serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
13,601 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <math.h>
extern "C"
{
__device__ float rand_expon(float a, curandState *state)
{
return -log(curand_uniform(state))/a; // x is now random expon by inverse CDF
} // END rand_expo
__device__ float psi_calc(float mu_minus, float alpha, float z)
{
float psi;
// Compute Psi
if(mu_minus < alpha){
psi = expf( -1/2*pow(alpha-z,2));
}
else {
psi = expf( 1/2*( pow(mu_minus-alpha,2) - pow(alpha-z,2) ) );
}
return psi;
}
__global__ void rtruncnorm_kernel(float *vals, int n,
float *mu, float *sigma,
float *lo, float *hi,
int mu_len, int sigma_len,
int lo_len, int hi_len,
int rng_seed_a, int rng_seed_b, int rng_seed_c,
int maxtries)
{
int accepted = 0;
int numtries = 0;
float x;
float u;
float alpha;
float psi;
float z;
float a;
float mu_minus;
int left_trunc = 0;
// Figure out which thread and block you are in and map these to a single index, "idx"
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Check: if index idx < n generate a sample, else in unneeded thread
if(idx<n){
// Setup the RNG:
curandState rng;
curand_init(rng_seed_a + idx*rng_seed_b, rng_seed_c, 0, &rng);
// Sample the truncated normal
// i.e. pick off mu and sigma corresponding to idx and generate a random sample, x
// if that random sample, x, is in the truncation region, update the return value to x, i.e. vals[idx]=x
// if x is not in the trunc region, try again until you get a sample in the trunc region or if more than maxtries,
// move on to Robert's approx method
while(accepted == 0 && numtries < maxtries){
numtries++; // Increment numtries
x = mu[idx] + sigma[idx]*curand_normal(&rng);
if(x >= lo[idx] && x <= hi[idx]){
accepted = 1;
vals[idx] = x;
}
}
// Robert's approx method
// We don't want to write both trunc algos for left and right tail truncations, just use
// right tail trancation. If we want to sample from Y~N(mu, sigma, -Inf, b), we transform
// first X~N(mu, sigma, -b+2*mu, Inf), use only right truncation, sample from the right
// tail to get a X, then transform back Y=2*mu-X to get left truncation sample if needed in Robert.
if(lo[idx] < mu[idx]) { // then left truncation
left_trunc = 1;
a = -1*hi[idx] + 2*mu[idx]; // flip up to right tail
}
else {
a = lo[idx]; // right truncation from a=lo[idx] to infinity
}
mu_minus = (a-mu[idx])/sigma[idx];
// need to find mu_minus but that depends on if lower trunc or upper trunc
alpha = (mu_minus + sqrtf(pow(mu_minus,2) + 4))/2;
numtries = 1; // If couldn't get sample naively, reset and try Robert
while(accepted == 0 && numtries < maxtries){
numtries++; // Increment numtries
// Need random expon for Robert no curand_expon function so do inverse CDF
// F(x) = 1-exp(-alpha*x) --> F^1(x) = -log(U)/alpha where U~Unif[0,1]
// u = curand_uniform(&rng);
// x = -1 * log(u)/alpha; // x is now random expon by inverse CDF
z = mu_minus + rand_expon(alpha, &rng);
// Compute Psi = probability of acceptance
psi = psi_calc(mu_minus, alpha, z);
// Check if Random Unif[0,1] < Psi, if so accept, else reject and try again
u = curand_uniform(&rng);
if (u < psi){
accepted = 1; // we now have our vals[idx]
if (left_trunc == 1){ // since originally left trunc, and flip back to left tail and final transform
vals[idx] = mu[idx] - sigma[idx]*z;
}
else { // right truncation originally so we're done after final transform
vals[idx] = mu[idx] + sigma[idx]*z;
}
}
}
if(accepted == 0){ // Just in case both naive and Roberts fail
vals[idx] = -999;
}
} // END if (idx<n)
return;
} // END rtruncnorm_kernel
} // END extern "C"
|
13,602 | #include "includes.h"
__global__ void assisted_activation2_kernel(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int xy = i % size;
int b = i / size;
float beta = 1 - alpha;
if (b < batches) {
for (int c = 0; c < channels; ++c) {
if(gt_gpu[i] == 0)
output[xy + size*(c + channels*b)] *= beta;
}
}
} |
13,603 | #define THREADS_PER_BLOCK 3
#define TOTAL_BLOCKS 1
//make sure numbers above match the matlab script
__device__ int blockSums [TOTAL_BLOCKS];
__constant__ int VECTOR_SIZE;
__global__ void dot_product (int* a, int*b, int*c)
{
__shared__ int multiplicationStorage [THREADS_PER_BLOCK];
if (threadIdx.x < VECTOR_SIZE)
multiplicationStorage[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
__syncthreads();
if (threadIdx.x == 0){
//compute sum
int tempSum = 0;
for (int i = 0; i < VECTOR_SIZE; i++){
tempSum+=multiplicationStorage[i];
}
blockSums[blockIdx.x]=tempSum;
__syncthreads();
if (blockIdx.x==0)
for (int i = 0; i < TOTAL_BLOCKS; i++)
*c+=blockSums[i];
//atomicAdd(c,tempSum);
}
}
|
13,604 | // prefix_sums.cpp : Defines the entry point for the application.
//
#include <thrust/scan.h>
#include <iostream>
int main()
{
int data[6] = { 1, 0, 2, 2, 1, 3 };
int data_out[6];
// data_out is now {1, 1, 3, 5, 6, 9}
thrust::inclusive_scan(data, data + 6, data_out);
std::cout << "inclusive scan = ";
for (int i = 0; i < 6; i++) {
std::cout << data_out[i] << ", ";
}
std::cout << std::endl;
// data_out is now {0, 1, 1, 3, 5, 6}
thrust::exclusive_scan(data, data + 6, data_out);
std::cout << "exclusive scan = ";
for (int i = 0; i < 6; i++) {
std::cout << data_out[i] << ", ";
}
std::cout << std::endl;
return 0;
}
|
13,605 | #include<cstdio>
#include<vector>
#include<string>
#include<cuda_runtime.h>
// #include <thrust/sequence.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h> // thrust::host/device
#define BLOCK_SIZE 256
using namespace std;
float average(const vector<float> &timing) {
double avg = 0;
for(vector<float>::const_iterator it = timing.begin(); it != timing.end(); it++) avg += *it;
avg /= timing.size();
return avg;
}
void print_info(int *data, int len, string flag) {
printf("%s frist ten:\n", flag.c_str());
for (int i=0; i<10; i++){
printf("%d ", data[i]);
}
printf("\n");
printf("%s last ten:\n", flag.c_str());
for (int i=len -10; i<len; i++){
printf("%d ", data[i]);
}
printf("\n");
}
void print_device_info(int *device_data, int len, string flag) {
int*data = (int*)malloc(len * sizeof(int));
cudaMemcpy(data, device_data, len*sizeof(int), cudaMemcpyDeviceToHost);
printf("%s frist ten:\n", flag.c_str());
for (int i=0; i<10 && i < len; i++){
printf("%d ", data[i]);
}
printf("\n");
if (len>10) {
printf("%s last ten:\n", flag.c_str());
for (int i=len -10; i<len; i++){
printf("%d ", data[i]);
}
printf("\n");
}
}
// interleved addressing, divergent warps branch
// time: 33.3 ms
__global__ void reduce_kernel1(int*A, int*out, int len_a) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int bid = blockIdx.x;
int tid = threadIdx.x;
extern __shared__ int sA[];
sA[tid] = 0;
if (idx < len_a) sA[tid] = A[idx];
__syncthreads();
for (int s=1; s<blockDim.x; s*=2) {
if(tid%(2*s)== 0) {
if (tid+s < len_a) sA[tid] += sA[tid+s];
}
__syncthreads();
}
if (tid==0) out[bid] = sA[0];
}
// inerleved addressing, bands conflict
// time: 20 ms
__global__ void reduce_kernel2(int*A, int*out, int len_a) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int bid = blockIdx.x;
int tid = threadIdx.x;
extern __shared__ int sA[];
sA[tid] = 0;
if (idx < len_a) sA[tid] = A[idx];
__syncthreads();
for (int s=1; s<blockDim.x; s*=2) {
int run_id = 2 * s * tid;
if(run_id <blockDim.x) {
if (run_id+s < blockDim.x) sA[run_id] += sA[run_id+s];
}
__syncthreads();
}
if (tid==0) out[bid] = sA[0];
}
// sequential addressing
// time: 17.9 ms
__global__ void reduce_kernel3(int*A, int*out, int len_a) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int bid = blockIdx.x;
int tid = threadIdx.x;
extern __shared__ int sA[];
sA[tid] = 0;
if (idx < len_a) sA[tid] = A[idx];
__syncthreads();
for (int s=blockDim.x/2; s>0; s>>=1) {
if(tid <s) {
sA[tid] += sA[tid+s];
}
__syncthreads();
}
if (tid==0) out[bid] = sA[0];
}
// first add during load
// time:10.78 ms
__global__ void reduce_kernel4(int*A, int*out, int len_a) {
int idx = blockDim.x * (blockIdx.x * 2) + threadIdx.x;
int bid = blockIdx.x;
int tid = threadIdx.x;
extern __shared__ int sA[];
sA[tid] = 0;
if (idx < len_a) sA[tid] = A[idx];
if (idx + blockDim.x < len_a) sA[tid] += A[idx + blockDim.x];
__syncthreads();
for (int s=blockDim.x/2; s>0; s>>=1) {
if(tid <s) {
sA[tid] += sA[tid+s];
}
__syncthreads();
}
if (tid==0) out[bid] = sA[0];
}
// unroll the last warp
__device__ void warpFunc(volatile int* sdata, int tid) {
sdata[tid] += sdata[tid+32];
sdata[tid] += sdata[tid+16];
sdata[tid] += sdata[tid+8];
sdata[tid] += sdata[tid+4];
sdata[tid] += sdata[tid+2];
sdata[tid] += sdata[tid+1];
}
template <unsigned int blockSize>
__device__ void warpReduce(volatile int *sdata, int tid) {
if (blockSize >=64) sdata[tid] += sdata[tid+32];
if (blockSize >=32) sdata[tid] += sdata[tid+16];
if (blockSize >=16) sdata[tid] += sdata[tid+8];
if (blockSize >=8) sdata[tid] += sdata[tid+4];
if (blockSize >=4) sdata[tid] += sdata[tid+2];
if (blockSize >=2) sdata[tid] += sdata[tid+1];
}
// time: 8.0 ms
__global__ void reduce_kernel5(int *A, int *out, int len_a) {
int idx = blockDim.x * (blockIdx.x * 2) + threadIdx.x;
int bid = blockIdx.x;
int tid = threadIdx.x;
extern __shared__ int sA[];
sA[tid] = 0;
if (idx < len_a) sA[tid] = A[idx];
if (idx + blockDim.x < len_a) sA[tid] += A[idx + blockDim.x];
__syncthreads();
for (int s=blockDim.x/2; s>32; s>>=1) {
if(tid <s) {
sA[tid] += sA[tid+s];
}
__syncthreads();
}
if (tid<32) warpFunc(sA, tid);
if (tid==0) out[bid] = sA[0];
}
// completely unroll
// time: 8.03 ms
__global__ void reduce_kernel6(int *A, int *out, int len_a) {
int idx = blockDim.x * (blockIdx.x * 2) + threadIdx.x;
int bid = blockIdx.x;
int tid = threadIdx.x;
extern __shared__ int sA[];
sA[tid] = 0;
if (idx < len_a) sA[tid] = A[idx];
if (idx + blockDim.x < len_a) sA[tid] += A[idx + blockDim.x];
__syncthreads();
if (blockDim.x >= 1024) { if (tid < 512) sA[tid] += sA[tid+512]; __syncthreads();}
if (blockDim.x >= 512) { if (tid < 256) sA[tid] += sA[tid+256]; __syncthreads();}
if (blockDim.x >= 256) { if (tid < 128) sA[tid] += sA[tid+128]; __syncthreads();}
if (blockDim.x >= 128) { if (tid < 64) sA[tid] += sA[tid+64]; __syncthreads();}
if (tid<32) warpFunc(sA, tid);
if (tid==0) out[bid] = sA[0];
}
// load as much as data
template<unsigned int blockSize>
__global__ void reduce_kernel7(int *A, int *out, int len_a) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int bid = blockIdx.x;
int tid = threadIdx.x;
int griz = gridDim.x * blockDim.x;
extern __shared__ int sA[];
sA[tid] = 0;
int tmp_id = idx;
while(tmp_id <len_a) {
if (tmp_id < len_a) sA[tid] += A[tmp_id];
tmp_id += griz;
}
__syncthreads();
if (blockDim.x >= 1024) { if (tid < 512) sA[tid] += sA[tid+512]; __syncthreads();}
if (blockDim.x >= 512) { if (tid < 256) sA[tid] += sA[tid+256]; __syncthreads();}
if (blockDim.x >= 256) { if (tid < 128) sA[tid] += sA[tid+128]; __syncthreads();}
if (blockDim.x >= 128) { if (tid < 64) sA[tid] += sA[tid+64]; __syncthreads();}
// if (tid<32) warpFunc(sA, tid);
if (tid<32) warpReduce<blockSize>(sA, tid);
if (tid==0) out[bid] = sA[0];
}
int reduce(int*A, int len_a, int numThreads){
int numBlocks = (len_a + numThreads) / numThreads;
// int numBlocks = (len_a + numThreads*2 -1) / numThreads/2;
int*tmp;
cudaMalloc((void**)&tmp, numBlocks*sizeof(int));
while(len_a>1) {
printf("len:%d, numBlocks:%d, numThreads:%d\n", len_a, numBlocks, numThreads);
// reduce_kernel1<<<numBlocks, numThreads, numThreads*sizeof(int)>>>(A, tmp, len_a);
// // time: 33.3 ms
// reduce_kernel2<<<numBlocks, numThreads, numThreads*sizeof(int)>>>(A, tmp, len_a);
// // time: 20 ms
reduce_kernel3<<<numBlocks, numThreads, numThreads*sizeof(int)>>>(A, tmp, len_a);
// time: 17.9 ms
len_a = numBlocks;
numBlocks = (numBlocks+numThreads-1)/numThreads;
cudaMemcpy(A, tmp, sizeof(int) * len_a, cudaMemcpyDeviceToDevice);
// print_device_info(tmp, len_a, "tmp");
}
int result;
cudaMemcpy(&result, tmp, sizeof(int), cudaMemcpyDeviceToHost);
return result;
}
int reduce_2(int*A, int len_a, int numThreads){
int numBlocks = (len_a + numThreads*2 -1) / numThreads/2;
int*tmp;
cudaMalloc((void**)&tmp, numBlocks*sizeof(int));
while(len_a>1) {
printf("len:%d, numBlocks:%d, numThreads:%d\n", len_a, numBlocks, numThreads);
// reduce_kernel4<<<numBlocks, numThreads, numThreads*sizeof(int)>>>(A, tmp, len_a);
// // time:10.78 ms
reduce_kernel5<<<numBlocks, numThreads, numThreads*sizeof(int)>>>(A, tmp, len_a);
// time: 8.0 ms
// reduce_kernel6<<<numBlocks, numThreads, numThreads*sizeof(int)>>>(A, tmp, len_a);
// // time: 8.03 ms
len_a = numBlocks;
numBlocks = (numBlocks+numThreads*2-1)/numThreads/2;
cudaMemcpy(A, tmp, sizeof(int) * len_a, cudaMemcpyDeviceToDevice);
// print_device_info(tmp, len_a, "tmp");
}
int result;
cudaMemcpy(&result, tmp, sizeof(int), cudaMemcpyDeviceToHost);
return result;
}
int reduce_3(int*A, int len_a, int numThreads){
// int count_per_threads = 2;
// int count_per_blocks = 2;
int count_per_threads = 16;
int numBlocks = (len_a + numThreads*count_per_threads -1) / numThreads/count_per_threads;
int*tmp;
cudaMalloc((void**)&tmp, numBlocks*sizeof(int));
while(len_a>1) {
printf("len:%d, numBlocks:%d, numThreads:%d\n", len_a, numBlocks, numThreads);
reduce_kernel7<256><<<numBlocks, numThreads, numThreads*sizeof(int)>>>(A, tmp, len_a);
// time: ms
len_a = numBlocks;
numBlocks = (numBlocks+numThreads*count_per_threads-1)/numThreads/count_per_threads;
cudaMemcpy(A, tmp, sizeof(int) * len_a, cudaMemcpyDeviceToDevice);
// print_device_info(tmp, len_a, "tmp");
}
int result;
cudaMemcpy(&result, tmp, sizeof(int), cudaMemcpyDeviceToHost);
return result;
}
int main() {
// int len_a = 256 * 256 * 256;
int len_a = 1024*1024*1024;
int numThreads = BLOCK_SIZE;
int* A = (int*) malloc(len_a * sizeof(int));
int* th_h_A = (int*) malloc(len_a * sizeof(int));
// file a,b
for (int i=0; i<len_a; i++) A[i] = 1;
for (int i=0; i<len_a; i++) th_h_A[i] = 1;
// print_info(A, len_a, "A");
int *d_A, *th_A;
cudaMalloc((void**)&d_A, len_a * sizeof(int));
cudaMalloc((void**)&th_A, len_a * sizeof(int));
cudaMemcpy(d_A, A, len_a * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(th_A, A, len_a * sizeof(int), cudaMemcpyHostToDevice);
int th_result;
// thrust reduce
th_result = thrust::reduce(thrust::device, th_A, th_A+len_a, 0);
// th_result = thrust::reduce(thrust::host, th_h_A, th_h_A+len_a, 0);
printf("th result:%d\n", th_result);
vector<float> times;
int loops = 1;
for (int i=0; i<loops; i++) {
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, NULL);
// int result = reduce(d_A, len_a, numThreads);
int result = reduce_2(d_A, len_a, numThreads);
// int result = reduce_3(d_A, len_a, numThreads);
printf("result:%d\n", result);
cudaEventRecord(end, NULL);
cudaEventSynchronize(end);
float time = 0;
cudaEventElapsedTime(&time, start, end);
times.push_back(time);
}
printf("reduce avg time:%lf\n", average(times));
free(A);
free(th_h_A);
cudaFree(d_A);
cudaFree(th_A);
} |
13,606 | #include <iostream>
#include <iomanip>
#include <chrono>
#include <thread>
using namespace std;
constexpr int BLOCK_DIM = 16;
// determines if this node (pixel) is inside the circle
// result is stored in a [16*16] array
// thread 0 then computes the number of "in" nodes (value from 0 to 16*16)
__global__ void flagKernel(unsigned *block_counts) {
bool __shared__ ins[BLOCK_DIM*BLOCK_DIM];
// compute our coordinate in the global grid
unsigned i = blockIdx.x*blockDim.x + threadIdx.x; // my i
unsigned j = blockIdx.y*blockDim.y + threadIdx.y; // my j
unsigned Ni = gridDim.x*blockDim.x; // total number of nodes in x
unsigned Nj = gridDim.y*blockDim.y; // total number of nodex in y
//get 1D index from i,j, u=j*ni+i
unsigned u = threadIdx.y*blockDim.x + threadIdx.x;
float x = i/(float)Ni; // compute x in [0,1)
float y = j/(float)Nj; // y in [0,1)
if (x*x+y*y<=1) ins[u] = true; // check if in the circle
else ins[u] = false;
// wait for all threads in the block to finish
__syncthreads();
// let the first thread in the block add up "ins"
if (u==0) {
unsigned count = 0;
for (int i=0;i<blockDim.x*blockDim.y;i++)
if (ins[u]) count++;
// flattened index for the block, u=j*ni+i
int block_u = blockIdx.y*gridDim.x+blockIdx.x;
// store the sum in global memory
block_counts[block_u] = count;
}
}
// this kernel adds up block-level sums to the global sum
// this could be further optimized by splitting up the sum over threads
__global__ void addKernel(dim3 numBlocks, unsigned *block_counts, unsigned long *glob_count) {
// compute total number of blocks
unsigned N = numBlocks.x*numBlocks.y;
unsigned long sum = 0;
for (int i=0;i<N;i++)
sum+=block_counts[i];
// store result in global memory
*glob_count = sum;
}
int main() {
// grab starting time
auto time_start = chrono::high_resolution_clock::now();
// figure out how many samples I should process
size_t N = BLOCK_DIM*1000; // grid size
// figure out our grid size
dim3 threadsPerBlock(BLOCK_DIM, BLOCK_DIM);
dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y);
// allocate memory on the GPU
unsigned *block_counts;
cudaMalloc((void**)&block_counts, numBlocks.x*numBlocks.y*sizeof(unsigned));
unsigned long *N_in_gpu; // GPU variable to hold the total N_in
unsigned long N_in; // CPU variable to hold this data
cudaMalloc((void**)&N_in_gpu, sizeof(N_in));
// launch the kernel to flag nodes, each block has BLOCK_DIM*BLOCK_DIM threads
flagKernel<<<numBlocks, threadsPerBlock>>>(block_counts);
// launch kernel to add up per-block "in" counts
addKernel<<<1, 1>>>(numBlocks, block_counts, N_in_gpu);
// transfer N_in from the GPU to the CPU
cudaMemcpy(&N_in, N_in_gpu, sizeof(N_in), cudaMemcpyDeviceToHost);
auto time_now = chrono::high_resolution_clock::now();
chrono::duration<double> time_delta = time_now-time_start;
// compute pi and show the result on rank 0 (root) using the global data
size_t N_tot = N*N;
double pi = 4*N_in/(double)N_tot;
cout<<"Using a "<<N<<"x"<<N<<" grid ("<<N_tot<<" samples), pi is "<<pi
<<" in "<<setprecision(3)<<time_delta.count()<<" seconds"<<endl;
// be a good neighbor and free memory
cudaFree(block_counts);
cudaFree(N_in_gpu);
return 0;
}
|
13,607 | #ifndef BLACKCAT_TENSOR_FUNCTIONS
#define BLACKCAT_TENSOR_FUNCTIONS
namespace BC {
#ifdef __CUDACC__
#define __BC_gcpu__ __host__ __device__
#define BLACKCAT_GPU_ENABLED
#else
#define __BC_gcpu__
#endif
namespace NN_Functions {
struct Sigmoid {
template<class T>
__BC_gcpu__ inline __attribute__((always_inline)) T operator () (T t) const {
static constexpr T e = 2.71828;
return 1 / (1 + std::pow(e, - t));
}
};
struct SigmoidAssign {
template<class T>
__BC_gcpu__ inline __attribute__((always_inline)) T operator () (T& t) const {
static constexpr T e = 2.71828;
return t = 1 / (1 + std::pow(e, - t));
}
};
struct CachedSigmoidDeriv {
template<class T>
__BC_gcpu__ inline __attribute__((always_inline)) T operator () (T t) const {
return t * (1 - t);
}
};
struct CachedSigmoidDerivAssign {
template<class T>
__BC_gcpu__ inline __attribute__((always_inline)) T operator () (T& t) const {
return t *= (1 - t);
}
};
struct Tanh {
template<class T>
__BC_gcpu__ inline __attribute__((always_inline)) T operator () (T t) const {
static constexpr double e = 2.71828;
return (powf(e, t) - powf(e, -t)) /
(powf(e, t) + powf(e, -t));
}
};
struct TanhAssign {
template<class T>
__BC_gcpu__ inline __attribute__((always_inline)) T operator () (T& t) const {
static constexpr T e = 2.71828;
return t = (powf(e, t) - powf(e, -t)) /
(powf(e, t) + powf(e, -t));
}
};
struct CachedTanhDeriv {
template<class T>
__BC_gcpu__ inline __attribute__((always_inline)) T operator () (T t) const {
return 1 - powf(t, 2);
}
};
struct CachedTanhDerivAssign {
template<class T>
__BC_gcpu__ inline __attribute__((always_inline)) T operator () (T& t) const {
static constexpr T e = 2.71828;
return t = 1 - powf(t, 2);
}
};
template<template<class,class> class tensor, class T, class ml>
auto sigmoid(tensor<T, ml>& x) {
return x.unExpr(SigmoidAssign());
}
template<template<class,class> class tensor, class T, class ml>
auto sigmoid(tensor<T, ml>&& x) {
return x.unExpr(Sigmoid());
}
template<template<class,class> class tensor, class T, class ml>
auto sigmoidDeriv(tensor<T, ml>& x) {
return x.unExpr(CachedSigmoidDerivAssign());
}
template<template<class,class> class tensor, class T, class ml>
auto sigmoidDeriv(tensor<T, ml>&& x) {
return x.unExpr(CachedSigmoidDeriv());
}
template<template<class,class> class tensor, class T, class ml>
auto tanh(tensor<T, ml>& x) {
return x.unExpr(TanhAssign());
}
template<template<class,class> class tensor, class T, class ml>
auto tanh(tensor<T, ml>&& x) {
return x.unExpr(Tanh());
}
template<template<class,class> class tensor, class T, class ml>
auto tanhDeriv(tensor<T, ml>& x) {
return x.unExpr(CachedTanhDerivAssign());
}
template<template<class,class> class tensor, class T, class ml>
auto tanhDeriv(tensor<T, ml>&& x) {
return x.unExpr(CachedTanhDeriv());
}
}
namespace NN_Abreviated_Functions {
using namespace NN_Functions;
template<template<class,class> class tensor, class T, class ml>
auto g(tensor<T, ml>& x) {
return x.unExpr(SigmoidAssign());
}
template<template<class,class> class tensor, class T, class ml>
auto g(tensor<T, ml>&& x) {
return x.unExpr(Sigmoid());
}
// template<template<class,class> class tensor, class T, class ml>
// auto gd(tensor<T, ml>& x) {
// return x.unExpr(CachedSigmoidDerivAssign());
// }
template<template<class,class> class tensor, class T, class ml>
auto gd(tensor<T, ml>& x) {
return x.unExpr(CachedSigmoidDeriv());
}
template<template<class,class> class tensor, class T, class ml>
auto h(tensor<T, ml>& x) {
return x.unExpr(TanhAssign());
}
template<template<class,class> class tensor, class T, class ml>
auto h(tensor<T, ml>&& x) {
return x.unExpr(Tanh());
}
template<template<class,class> class tensor, class T, class ml>
auto hd(tensor<T, ml>& x) {
return x.unExpr(CachedTanhDerivAssign());
}
template<template<class,class> class tensor, class T, class ml>
auto hd(tensor<T, ml>&& x) {
return x.unExpr(CachedTanhDeriv());
}
}
}
#endif
|
13,608 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
void print_info(unsigned int size);
__global__ void print_info_kernel()
{
int tid = threadIdx.x;
int bid = blockIdx.x;
// int p = tid * bid;
printf("Hello CUDA I’m a thread %d from block %d \n" , tid , bid);
}
int main()
{
print_info(100);
return 0;
}
void print_info(unsigned int size)
{
cudaSetDevice(0);
cudaError_t cudaStatus;
print_info_kernel<<<4, size>>>();
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
}
|
13,609 | #include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <math.h>
#define DOWNSAMPLE 25
#define SAMPLE_FREQ 800e6
#define LO_FREQ 302e6
#define THREADS_PER_BLOCK 512
#define BLOCKS_PER_GRID 32 * DOWNSAMPLE
// keeps things a multiple of COS_TABLE_LENGTH to avoid edge effects
#define BLOCK_SIZE THREADS_PER_BLOCK * BLOCKS_PER_GRID
#define SECONDS_OF_DATA 10
#define COS_TABLE_LENGTH 8000
// how many loops of block size to do
#define INPUT_SCALE 1.0f
// input may need scaling to see tones clearly...
__constant__ float cTaps[512];
cudaEvent_t t_start, t_stop;
__constant__ float cCos[COS_TABLE_LENGTH];
__global__ void mix(char *input_buffer, float *output_buffer, int lo_interval, int cos_table_length, int lo_offset)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
float mix_value = (float)cCos[(lo_interval*idx) % cos_table_length + lo_offset] * ((float)input_buffer[idx + blockDim.x] / INPUT_SCALE);
output_buffer[idx + blockDim.x] = mix_value;
// when mixing we skip the first blockDim.x (the number of threads) worth of input buffer (i.e. the memory portion)
}
__global__ void simple_fir(float *input_buffer, float *output_buffer, int no_taps, int upsample, int downsample, int poly_interval, int lo_interval, int cos_table_length)
{
float temp_output = 0;
int fir_idx = (blockIdx.x*blockDim.x + threadIdx.x)*downsample + blockDim.x;
// idx starts from end of memory buffer which is blockDim.x in length
for (int i=0; i < no_taps; i++) {
temp_output += cTaps[i] * (float)input_buffer[fir_idx - i];
}
output_buffer[blockIdx.x*blockDim.x + threadIdx.x] = temp_output * (downsample / 8.0f);
}
__global__ void float_cast(float *in, char *out)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
out[idx] = (char) in[idx];
}
int main(int argc, char **argv) {
int write_block = BLOCK_SIZE;
// 1 MB worth of data at a time...
// this should be enough to allow at least a couple of output points per thread when tap overlap
int sample_rate = SAMPLE_FREQ;
// our adc sampling frequency
int upsample = 1;
int downsample = DOWNSAMPLE;
// coefficients to sort out the output sample rate
// in this case giving us 128 MHz sampling
int lo_freq = LO_FREQ;
// the mixing frequency for the DDC
int cos_table_length = COS_TABLE_LENGTH;
// the number of samples in the sin lookup table
int cos_table_size = sizeof(float) * cos_table_length;
int lo_interval = int(((float)cos_table_length / sample_rate) * lo_freq);
// the stepping interval through the lo sin table. May result in slightly different lo_freq from that
// specified. The user is informed of this.
int lo_offset = 0;
// as we move from block to block our we need an lo_offset to maintain phase...
int lo_remainder = (write_block * lo_interval) % cos_table_length;
// the remainder at the end of each loop (i.e. the last point per block from the loop)
int no_taps = 0;
// number of filter taps. Calculated once filter data is loaded.
int no_output_samples = int(((float)write_block / downsample));
// overall number of output samples to produce for this block
int loops = int(SECONDS_OF_DATA * (sample_rate / (float)write_block));
fprintf(stderr,"%i, %i, %i, %i\n", loops, SECONDS_OF_DATA, sample_rate, write_block);
int fh;
char *data_file;
char *fir_taps_file;
float et;
struct stat stat_buf;
float *fir_taps;
char *base_buffer;
char *host_char_buffer;
float *output_buffer;
float *upsample_buffer;
float *cos_table;
// host buffers
char *device_char_buffer;
char *memory_buffer;
float *device_fir_taps;
float *device_output_buffer;
float *device_upsample_buffer;
float *device_float_buffer;
float *device_fir_buffer;
// device buffers
if (argc > 2) {
data_file = argv[1];
fir_taps_file = argv[2];
} else { printf("Please supply both data and fir_taps filenames...\n"); return -1;}
fprintf(stderr,"Producing %i output samples per block (%i samples).\n",no_output_samples,write_block);
base_buffer = (char*)malloc(write_block);
host_char_buffer = (char*)malloc(no_output_samples);
output_buffer = (float*)malloc(sizeof(float) * no_output_samples);
upsample_buffer = (float*)malloc(sizeof(float) * upsample * (write_block + THREADS_PER_BLOCK));
cos_table = (float*)malloc(cos_table_size);
memset(host_char_buffer, (char) 0, no_output_samples);
memset(base_buffer, (char) 0, write_block);
memset(upsample_buffer, (char) 0, sizeof(float) * upsample * (write_block + THREADS_PER_BLOCK));
// zero as we use part of this for our initial zero padding block
fh = open(fir_taps_file, O_RDONLY);
fstat(fh, &stat_buf);
no_taps = stat_buf.st_size / sizeof(float);
fprintf(stderr,"Using %i tap FIR filter.\n",no_taps);
fir_taps = (float*)malloc(sizeof(float) * no_taps);
read(fh, fir_taps, sizeof(float) * no_taps);
close(fh);
fprintf(stderr,"Preparing sin lookup table...\n");
for (int i=0; i < cos_table_length; i++) {
cos_table[i] = 2 * cos(i * (2*M_PI/cos_table_length));
}
fprintf(stderr,"Allocating block storage on GPU...\n");
cudaEventCreate(&t_start);
cudaEventCreate(&t_stop);
cudaMalloc((void**)&device_char_buffer, write_block + THREADS_PER_BLOCK);
// device buffer with space for initial zero padding
cudaMalloc((void**)&memory_buffer, THREADS_PER_BLOCK);
// previous loop memory
cudaMalloc((void**)&device_output_buffer, sizeof(float) * no_output_samples);
cudaMalloc((void**)&device_float_buffer, sizeof(float) * write_block);
cudaMalloc((void**)&device_upsample_buffer, sizeof(float) * upsample * (write_block + THREADS_PER_BLOCK));
cudaMalloc((void**)&device_fir_buffer, sizeof(float) * upsample * write_block);
cudaMalloc((void**)&device_fir_taps, sizeof(float) * no_taps);
// allocate the device storage
cudaMemcpy(device_fir_taps, fir_taps, sizeof(float) * no_taps, cudaMemcpyHostToDevice);
// copy the filter taps to the device
cudaMemcpyToSymbol(cTaps, fir_taps, sizeof(float) * no_taps);
cudaMemcpyToSymbol(cCos, cos_table, cos_table_size);
cudaMemcpy(device_upsample_buffer, upsample_buffer, sizeof(float) * upsample * write_block, cudaMemcpyHostToDevice);
cudaMemcpy(device_output_buffer, upsample_buffer, sizeof(float) * no_output_samples, cudaMemcpyHostToDevice);
cudaMemcpy(device_fir_buffer, upsample_buffer, sizeof(float) * upsample * write_block, cudaMemcpyHostToDevice);
cudaMemcpy(device_float_buffer, upsample_buffer, sizeof(float) * write_block, cudaMemcpyHostToDevice);
cudaMemcpy(device_char_buffer, base_buffer, write_block + THREADS_PER_BLOCK, cudaMemcpyHostToDevice);
cudaMemcpy(memory_buffer, base_buffer, THREADS_PER_BLOCK, cudaMemcpyHostToDevice);
// init host memory to zero
fprintf(stderr,"Actual lo freq is: %f MHz (interval: %i, reminader: %i)\n", (lo_interval / ((float)cos_table_length / sample_rate)) / 1e6, lo_interval, lo_remainder);
fprintf(stderr,"GPU Configuration: blocks per grid: %i, threads per block: %i\n",BLOCKS_PER_GRID, THREADS_PER_BLOCK);
fh = open(data_file, O_LARGEFILE);
// read(fh, base_buffer, write_block * LOOPS);
fprintf(stderr,"Producing %.2f s of data (%i loops reading a total of %.2f Mbytes of data)\n", loops * (write_block / float(sample_rate)), loops, loops * (write_block / float(1024*1024)));
for (int i=0; i <= loops; i++) {
read(fh, base_buffer, write_block);
//start = i * write_block;
//fprintf(stderr,"Loop %i (start: %li, lo_offset: %i).\n",i,start, lo_offset);
cudaEventRecord(t_start, 0);
cudaMemcpy(device_char_buffer+THREADS_PER_BLOCK, base_buffer, write_block, cudaMemcpyHostToDevice);
// need to recal lo_offset each loop
lo_offset = (i * lo_remainder) % cos_table_length;
// on the offchance that each block does not loop through the cos table exactly
// we adjust the starting offset to match the end of the previous loop
// polyphase method
int poly_interval = 0;
// hardcoded for now... l = 4; m = 25
dim3 threads(THREADS_PER_BLOCK, 1);
// the downsample spaced blocks are indexed by thread.x and the upsample number of fir filters are indexed by y
dim3 blocks(BLOCKS_PER_GRID / downsample,1);
mix<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(device_char_buffer, device_float_buffer, lo_interval, cos_table_length, lo_offset);
//poly_fir<<<blocks, threads>>>(device_char_buffer, memory_buffer, device_output_buffer, no_taps, upsample, downsample, poly_interval, lo_interval, cos_table_length);
simple_fir<<<blocks, threads>>>(device_float_buffer, device_output_buffer, no_taps, upsample, downsample, poly_interval, lo_interval, cos_table_length);
float_cast<<<no_output_samples/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(device_output_buffer, device_char_buffer);
//fill_memory_buffer<<<1, THREADS_PER_BLOCK>>>(device_char_buffer, memory_buffer, write_block - THREADS_PER_BLOCK);
// fill memory buffer for next loop
cudaMemcpy(host_char_buffer, device_char_buffer, no_output_samples, cudaMemcpyDeviceToHost);
cudaMemcpy(device_char_buffer, base_buffer + write_block - THREADS_PER_BLOCK, THREADS_PER_BLOCK, cudaMemcpyHostToDevice);
// prime memory buffer for next trip
cudaEventRecord(t_stop, 0);
cudaEventSynchronize(t_stop);
cudaEventElapsedTime(&et, t_start, t_stop);
if (i == 0) {for (int j=0; j < 20; j++) { fprintf(stderr,"%i ",host_char_buffer[j]); }}
write(1, host_char_buffer, no_output_samples);
//write(1, output_buffer, sizeof(float) * no_output_samples);
//if (i % 20 == 0) fprintf(stderr,"Loop done. CUDA time is %f ms\n", et);
}
read(1, output_buffer, 1);
return 0;
}
|
13,610 | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
__global__ void fHadamard( const float* argumentsA, const float* argumentsB, float* results, const long size ) {
const int X = gridDim.x;
const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x;
if(index < size) {
results[index] = argumentsA[index] * argumentsB[index];
}
} |
13,611 | #include <algorithm>
#include <cstdlib>
#include <iostream>
#include <cmath>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "cuda_runtime.h"
// #include "ray_tracing_GPU.h"
#define X 0
#define Y 1
#define Z 2
#define MY_CUDA_CHECK(call) { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
#define MY_CHECK_ERROR(errorMessage) { \
cudaError_t err = cudaGetLastError(); \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \
errorMessage, __FILE__, __LINE__, cudaGetErrorString( err) );\
exit(EXIT_FAILURE); \
} \
}
#define CPROD(dest,v1,v2) \
dest[0]=v1[1]*v2[2]-v1[2]*v2[1]; \
dest[1]=v1[2]*v2[0]-v1[0]*v2[2]; \
dest[2]=v1[0]*v2[1]-v1[1]*v2[0];
#define DPROD(v1,v2) (v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2])
// #define DPROD1(v1,v2) \
// return (v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2]);
#define SUB(dest,v1,v2) \
dest[0]=v1[0]-v2[0]; \
dest[1]=v1[1]-v2[1]; \
dest[2]=v1[2]-v2[2];
#define FINDMINMAX(x0,x1,x2,min,max) \
min = max = x0; \
if(x1<min) min=x1;\
if(x1>max) max=x1;\
if(x2<min) min=x2;\
if(x2>max) max=x2;
/******** X_AXIS test *********/
#define AXISTEST_X01(a, b, fa, fb) \
p0 = a*v0[Y] - b*v0[Z]; \
p2 = a*v2[Y] - b*v2[Z]; \
if(p0<p2) {min=p0; max=p2;} else {min=p2; max=p0;} \
rad = fa * boxhalfsize[Y] + fb * boxhalfsize[Z]; \
if(min>rad || max<-rad) return 0;
#define AXISTEST_X2(a, b, fa, fb) \
p0 = a*v0[Y] - b*v0[Z]; \
p1 = a*v1[Y] - b*v1[Z]; \
if(p0<p1) {min=p0; max=p1;} else {min=p1; max=p0;} \
rad = fa * boxhalfsize[Y] + fb * boxhalfsize[Z]; \
if(min>rad || max<-rad) return 0;
/*********** Y_AXIS test ************/
#define AXISTEST_Y02(a, b, fa, fb) \
p0 = -a*v0[X] + b*v0[Z]; \
p2 = -a*v2[X] + b*v2[Z]; \
if(p0<p2) {min=p0; max=p2;} else {min=p2; max=p0;} \
rad = fa * boxhalfsize[X] + fb * boxhalfsize[Z]; \
if(min>rad || max<-rad) return 0;
#define AXISTEST_Y1(a, b, fa, fb) \
p0 = -a*v0[X] + b*v0[Z]; \
p1 = -a*v1[X] + b*v1[Z]; \
if(p0<p1) {min=p0; max=p1;} else {min=p1; max=p0;} \
rad = fa * boxhalfsize[X] + fb * boxhalfsize[Z]; \
if(min>rad || max<-rad) return 0;
/***************** Z_AXIS test ************************/
#define AXISTEST_Z12(a, b, fa, fb) \
p1 = a*v1[X] - b*v1[Y]; \
p2 = a*v2[X] - b*v2[Y]; \
if(p2<p1) {min=p2; max=p1;} else {min=p1; max=p2;} \
rad = fa * boxhalfsize[X] + fb * boxhalfsize[Y]; \
if(min>rad || max<-rad) return 0;
#define AXISTEST_Z0(a, b, fa, fb) \
p0 = a*v0[X] - b*v0[Y]; \
p1 = a*v1[X] - b*v1[Y]; \
if(p0<p1) {min=p0; max=p1;} else {min=p1; max=p0;} \
rad = fa * boxhalfsize[X] + fb * boxhalfsize[Y]; \
if(min>rad || max<-rad) return 0;
__device__ __host__ struct Point3D {
double x;
double y;
double z;
};
__device__ __host__ struct Triangle {
double v0[3];
double v1[3];
double v2[3];
};
__device__ __host__ struct Index
{
int size;
int count;
int *ptr;
__device__ __host__ Index()
{
ptr = (int *)malloc(sizeof(int) *100);
size = 100;
count = 0;
}
__device__ __host__ void Insert(int value){
if(count == size){
// printf("Enters %p",ptr);
int *newptr = (int *)malloc(sizeof(int)*(size + 1));
for (int index = 0; index<size; index++){
newptr[index] = ptr[index];
}
// delete [] ptr;
free(ptr);
ptr = newptr;
size+=1;
}
ptr[count] = value;
count+=1;
}
};
__device__ __host__ struct Cube {
double xmin, xmax;
double ymin, ymax;
double zmin, zmax;
double boxhalfsize[3];
double center[3];
int reference;
int counter=0;
int *triangleIndexes;
// int triangleIndexes[12];
int index =0;
};
__device__ __host__ struct Ray {
//Rreze
double orig[3];
double dir[3];
int intersectedCubes[4]={-1};
int index;
int intersects=0;
};
using namespace std;
/******************Declaring global variables ***************/
Ray *rays;
int numRays;
Cube boundingBox;
bool *isInside;
bool *isInPlane;
Triangle *triangles;
int numTriangles;
/****************** Declaring functions*********************/
__device__ __host__ double dot_product(double *v1, double *v2);
void GETCENTER(Cube cube, double *center);
void GETHALFSIZE(Cube cube, double *boxhalfsize);
double into_double(char *str);
void getTriangles(Triangle *triangles);
void generateRandoms(double *direction, double *origin);
void getRays(Ray *rays);
int getNumberOfCubes(Point3D lower, Point3D upper, double delta);
void createGrid3D(Cube *cubes, Point3D lower, Point3D upper, double delta, Index &frontGrid);
__device__ __host__ bool rayTriangleIntersect(Ray ray, Triangle tri);
__device__ __host__ int planeBoxOverlap(double *normal, double *vert, double *maxbox);
__device__ __host__ int triBoxOverlap(double *boxcenter, double *boxhalfsize, Triangle triverts);
__host__ __device__ double TriArea(double *P1, double *P2, double *P3);
__device__ __host__ bool IsInPlaneTest(Ray ray, Triangle tri, double epsilon);
__device__ __host__ bool PointIsInSurface(Ray ray, Triangle tri, double epsilon);
__device__ __host__ int rayBoxIntersection_yaxis(double *rayorigin, Cube cube);
__global__ void TriangleCount_GPU(Triangle *triangles, Cube *cubes, int *numCubes, int *numTriangles, bool *CubeTriMapp);
__global__ void calcAlloc(Cube *cubes, int *frontGrid, int *cubesPerBlock, int *indexes, int *counter);
__global__ void calcAlloc(Cube *cubes, int *frontGrid, int *cubesPerBlock, int *indexes, int *counter);
__global__ void findUnique(Cube *cubes, int *end, int *cubesPerBlock, bool *CubeTriMapp, int *numTriangles, int *numCubes);
__global__ void appendTriangles(Cube *cubes, int *indexes, int *frontGrid, int *numTriangles, int *numCubes, bool *CubeTriMapp );
__global__ void getIntersectedCubes_GPU( Ray *rays, Cube *cubes, int *numrays, int *indexes, Cube *boundingBox, int *frontGrid);
void IsInPlane(Ray *rays, Triangle *tri, bool *isInside, int nr_rays, int nr_triangles, double epsilon);
__global__ void IsInPlaneGPU(Ray *rays, Triangle *tri, bool *isInside, int *n_r, int *n_tr, Cube *cubes);
__global__ void ray_tracingGPU(Ray *ray, Triangle *tri, bool *results, int *n_r, int *n_tr, Cube *cubes);
extern "C" void SetRays(float *xyz_flat, int nr_rays);
extern "C" void SetTriangles(double *tri_coordinates, int nr_triangles);
extern "C" void SetBoundingBox(double *x, double *y, double *z);
extern "C" bool *RayTracingGPU(int *numRays_, int *numTriangles);
/**************Defining the functions *****************/
__device__ __host__ double dot_product(double *v1, double *v2){
double dot_prod=0;
for (int i = 0; i< 3; i++){
dot_prod += v1[i]*v2[i];
}
return dot_prod;
}
void GETCENTER(Cube cube, double *center)
{
center[0] = (cube.xmax + cube.xmin)/2;
center[1] = (cube.ymax + cube.ymin)/2;
center[2] = (cube.zmax + cube.zmin)/2;
}
void GETHALFSIZE(Cube cube, double *boxhalfsize)
{
boxhalfsize[0] = (cube.xmax - cube.xmin)/2;
boxhalfsize[1] = (cube.ymax - cube.ymin)/2;
boxhalfsize[2] = (cube.zmax - cube.zmin)/2;
}
double into_double(char *str){
char *ptr;
double value;
value = strtod(str, &ptr);
return value;
}
void getTriangles(Triangle *triangles){
char * line = NULL;
size_t len = 0;
ssize_t read;
FILE *fp = fopen("/home/diko/MOBIUS/MOEBIUS/BACKEND/SCRIPTS/TEMP/triangles.txt","r");
if(fp == NULL){
perror(" unable to open file ");
exit(1);
}
int index = 0;
int nr_vec =0;
while ((read = getline(&line, &len, fp)) != -1) {
int j =0;
char* token = strtok(line," ");
while(token != NULL){
token = strtok(NULL," ");
if(nr_vec == 0){
if(token != NULL){
double token_ = into_double(token);
double r = 0;
if(token_ < 0){
token_ = token_ - r;
}
else token_ = token_ +r;
triangles[index].v0[j] = token_;
j = j+1;
}
}
else if(nr_vec == 1){
if(token != NULL){
double token_ = into_double(token);
double r = 0;
if(token_ < 0){
token_ = token_ - r;
}
else token_ = token_ +r;
triangles[index].v1[j] = token_;
j = j+1;
}
}
else{
if(token != NULL){
double token_ = into_double(token);
double r = 0;
if(token_ < 0){
token_ = token_ - r;
}
else token_ = token_ +r;
triangles[index].v2[j] = token_;
j = j+1;
}
}
}
if(nr_vec == 2){
index+=1;
nr_vec=0;
}
else nr_vec+=1;
}
fclose(fp);
if (line)
free(line);
}
void generateRandoms(double *direction, double *origin)
{
direction[0] = 0.0002;
direction[1] = 1.0;
direction[2] = 0.0002;
}
void getRays(Ray *rays){
char * line = NULL;
size_t len = 0;
ssize_t read;
FILE *fp = fopen("/home/diko/MOBIUS/MOEBIUS/BACKEND/SCRIPTS/TEMP/points.txt","r");
if(fp == NULL){
perror(" unable to open file ");
exit(1);
}
int index = 0;
while ((read = getline(&line, &len, fp)) != -1) {
int j =0;
char* token = strtok(line," ");
while(token != NULL && j < 3){
rays[index].orig[j] =into_double(token);
//printf("\n token %d %d is : %.5lf",index,j,rays[index].orig[j]);
token = strtok(NULL," ");
j = j+1;
}
generateRandoms(rays[index].dir, rays[index].orig);
index+=1;
}
fclose(fp);
if (line)
free(line);
}
int getNumberOfCubes(Point3D lower, Point3D upper, double delta){
int xn = ceil((upper.x - lower.x)/delta);
int yn = ceil((upper.y - lower.y)/delta);
int zn = ceil((upper.z - lower.z)/delta);
int num_BB = xn * yn * zn;
return num_BB;
}
void createGrid3D(Cube *cubes, Point3D lower, Point3D upper, double delta, Index &frontGrid){
/***********************************************
Input: Empty allocated memory for cubes, lower point and upper point
Outpu: It is a void function, will modify the array of cubes
***********************************************/
// double Y_MAX = upper.y;
const int xn = ceil((upper.x - lower.x)/delta);
const int yn = ceil((upper.y - lower.y)/delta);
const int zn = ceil((upper.z - lower.z)/delta);
double x = 0;
double y = 0;
double z = 0;
int block = 0;
int index = 0;
for(int i = 0; i<xn; i++){
x = lower.x;
x = x+delta*i;
for(int j = 0; j<zn; j++){
z = lower.z;
z = z+delta*j;
for(int k =0; k<yn; k++){
y = lower.y;
y = y + delta*k;
cubes[index].xmax = x+delta;
cubes[index].ymax = y+delta;
cubes[index].zmax = z+delta;
cubes[index].xmin = x;
cubes[index].ymin = y;
cubes[index].zmin = z;
GETCENTER(cubes[index], cubes[index].center);
GETHALFSIZE(cubes[index], cubes[index].boxhalfsize);
if(k== yn-1)
{
frontGrid.Insert(index);
}
cubes[index].reference = (block+1)*yn -1;
index +=1;
}
block+=1;
}
}
}
__device__ __host__ bool rayTriangleIntersect(Ray ray, Triangle tri){
double ZERO = 0.0;
double ONE = 1.0;
const double epsilon = 1.e-10;
double edge1[3] ={}; double edge2[3] ={} ; double pvec[3]={};double tvec[3] = {}; double qvec[3] = {};
//Calculate edges
SUB(edge1,tri.v1, tri.v0);
SUB(edge2,tri.v2, tri.v0);
CPROD(pvec,ray.dir, edge2);
//Calculate the determinant
double det = dot_product(edge1, pvec);
if( det>-epsilon && det < epsilon) return false; //It is parallel with the triangle
//Calculate the inverse determinant
double invDet = ONE / det; // f
//calculate the 'u' of baycentric
SUB(tvec,ray.orig, tri.v0); //s
double prod = dot_product(tvec, pvec); // s dot prod h
double u = prod * invDet; //f * s dot prod h
//Check if u is inside the allowed bounds
if(u < ZERO || u > ONE) return false;
//calculate the 'v' of baycentric coordinates and check if it is inside the desired interval to continue
CPROD(qvec,tvec, edge1);
double prod1 = dot_product(ray.dir, qvec);
double v = prod1 * invDet;
if(v < ZERO || u + v > ONE ) return false;
double t_prod = dot_product(edge2, qvec);
double t =t_prod * invDet;
return (t>epsilon);
}
__device__ __host__ int planeBoxOverlap(double *normal, double *vert, double *maxbox)
{
int q;
double vmin[3],vmax[3],v;
for(q=X;q<=Z;q++)
{
v=vert[q];
int isBigger = (int) (normal[q]>0.0f);
vmin[q]=isBigger*(-maxbox[q] - v) + !isBigger*(maxbox[q] - v);
vmax[q]= isBigger*(maxbox[q] - v) + !isBigger *(-maxbox[q] - v) ;
}
return !(DPROD(normal,vmin)>0.0f) * (DPROD(normal,vmax)>=0.0f);
}
__device__ __host__ int triBoxOverlap(double *boxcenter, double *boxhalfsize, Triangle triverts)
{
double v0[3]={0};
double v1[3]={0};
double v2[3]={0};
double min=1000000,max=-10000000,p0=0,p1=0,p2=0,rad=0,fex=0,fey=0,fez=0;
double normal[3]={0};
double e0[3]={0};
double e1[3]={0};
double e2[3]={0};
SUB(v0,triverts.v0,boxcenter);
SUB(v1,triverts.v1,boxcenter);
SUB(v2,triverts.v2,boxcenter);
SUB(e0,v1,v0);
SUB(e1,v2,v1);
SUB(e2,v0,v2);
CPROD(normal,e0,e1);
return planeBoxOverlap(normal, v0, boxhalfsize);
}
__host__ __device__ double TriArea(double *P1, double *P2, double *P3)
{
double P1P2[3];
double P1P3[3];
double CP[3];
SUB(P1P2, P2, P1);
SUB(P1P3, P3, P1);
CPROD(CP,P1P2, P1P3)
double triArea = sqrt(CP[0]*CP[0] + CP[1]*CP[1] +CP[2]*CP[2])/2;
return triArea;
}
__device__ __host__ bool IsInPlaneTest(Ray ray, Triangle tri, double epsilon)
{
//Find the plane equation starting with coordinates a, b, c and then distance d
double a1, b1, c1;
double a2, b2, c2;
double a, b, c; //rate of normal vector of the plane
double d; //distance
a1 = tri.v1[0] -tri.v0[0];
b1 = tri.v1[1] -tri.v0[1];
c1 = tri.v1[2] -tri.v0[2];
a2 = tri.v2[0] -tri.v0[0];
b2 = tri.v2[1] -tri.v0[1];
c2 = tri.v2[2] -tri.v0[2];
a = b1 * c2 - b2 * c1;
b = a2 * c1 - a1 * c2;
c = a1 * b2 - b1 * a2;
d = (- a * tri.v0[0] - b * tri.v0[1] - c * tri.v0[2]);
//Check if point is in plane
return (a*ray.orig[0]+b*ray.orig[1]+c*ray.orig[2]+d<=0+epsilon && a*ray.orig[0]+b*ray.orig[1]+c*ray.orig[2]+d>=0-epsilon );
// return (a*ray.orig[0]+b*ray.orig[1]+c*ray.orig[2]+d==0 );
}
__device__ __host__ bool PointIsInSurface(Ray ray, Triangle tri, double epsilon)
{
// const double epsilon = 1.e-7;
double tri0, tri1, tri2, tri3;
tri0 = TriArea(tri.v0, tri.v1, tri.v2);
tri1 = TriArea(ray.orig, tri.v0, tri.v1);
tri2 = TriArea(ray.orig, tri.v0, tri.v2);
tri3 = TriArea(ray.orig, tri.v1, tri.v2);
double sum = tri1 + tri2 +tri3;
double res = sqrt(sum)/sqrt(tri0)-1;
return(res <= epsilon && res >= -epsilon);
// return sum == tri0;
}
__device__ __host__ int rayBoxIntersection_yaxis(double *rayorigin, Cube cube)
{
// const double epsilon = 1.e-5;
double xmin = cube.xmin;
double xmax = cube.xmax;
double zmin = cube.zmin;
double zmax = cube.zmax;
double ymax = cube.ymax;
double ymin = cube.ymin;
return !(rayorigin[0]<xmin || rayorigin[0] >xmax || rayorigin[2]<zmin || rayorigin[2]>zmax || rayorigin[1]>ymax);
// return !(rayorigin[0]<xmin || rayorigin[0] >xmax || rayorigin[2]<zmin || rayorigin[2]>zmax || rayorigin[1]>ymax || rayorigin[1]<ymin);
// return !(rayorigin[0]<xmin-epsilon || rayorigin[0] >xmax+epsilon || rayorigin[2]<zmin-epsilon || rayorigin[2]>zmax+epsilon);
}
__global__ void TriangleCount_GPU(Triangle *triangles, Cube *cubes, int *numCubes, int *numTriangles, bool *CubeTriMapp)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
const unsigned int num_threads = gridDim.x * blockDim.x;
for (int i=idx; i<(*numCubes)*(*numTriangles) ; i+=num_threads)
{
int triIndex = (int)i/(*numCubes);
int cubeIndex = i%(*numCubes);
int intersects = (int) (triBoxOverlap(cubes[cubeIndex].center, cubes[cubeIndex].boxhalfsize, triangles[triIndex])==1);
CubeTriMapp[i]=(intersects>0);
atomicAdd(&cubes[cubeIndex].counter, intersects);
}
}
__global__ void calcAlloc(Cube *cubes, int *frontGrid, int *cubesPerBlock, int *indexes, int *counter)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
const unsigned int num_threads = gridDim.x * blockDim.x;
for(int i=idx; i<*frontGrid; i+=num_threads)
{
int end = indexes[i];
int start = end - (*cubesPerBlock-1);
for(int j=start; j<end; j++)
{
cubes[end].counter+=cubes[j].counter;
}
}
}
__global__ void findUnique(Cube *cubes, int *end, int *cubesPerBlock, bool *CubeTriMapp, int *numTriangles, int *numCubes)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
const unsigned int num_threads = gridDim.x * blockDim.x;
int start = *end -(*cubesPerBlock-1);
for(int triIndex=idx; triIndex<*numTriangles; triIndex+=num_threads)
{
for(int cubeIndex=start; cubeIndex<*end; cubeIndex++)
{
CubeTriMapp[triIndex*(*numCubes)+(*end)]|=CubeTriMapp[triIndex*(*numCubes)+cubeIndex];
}
atomicAdd(&cubes[*end].index,(int)(CubeTriMapp[triIndex*(*numCubes)+(*end)]==true));
}
}
__global__ void appendTriangles(Cube *cubes, int *indexes, int *frontGrid, int *numTriangles, int *numCubes, bool *CubeTriMapp )
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
const unsigned int num_threads = gridDim.x * blockDim.x;
for(int i=idx; i<*frontGrid; i+=num_threads)
{
int cubeIndex=indexes[i];
// int counter = 0;
cubes[cubeIndex].index=0;
for(int triIndex=0; triIndex<*numTriangles; triIndex+=1)
{
int index = triIndex*(*numCubes) + cubeIndex;
cubes[cubeIndex].triangleIndexes[cubes[cubeIndex].index] = triIndex;
cubes[cubeIndex].index+=1*(int)(CubeTriMapp[index]==true);
}
}
}
__global__ void getIntersectedCubes_GPU( Ray *rays, Cube *cubes, int *numrays, int *indexes, Cube *boundingBox, int *frontGrid)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int num_threads = gridDim.x*blockDim.x;
for(int index = idx; index<*numrays; index+=num_threads)
{
rays[index].index = 0;
for(int frontGridIndex=0; frontGridIndex<*frontGrid; frontGridIndex++)
{
int intersects = (int)rayBoxIntersection_yaxis(rays[index].orig, cubes[indexes[frontGridIndex]]);
rays[index].intersectedCubes[rays[index].index] = indexes[frontGridIndex];
rays[index].index+=1*intersects;
}
}
}
__global__ void ray_tracingGPU(Ray *ray, Triangle *tri, bool *results, int *n_r, int *n_tr, Cube *cubes){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int num_threads = gridDim.x*blockDim.x;
for(int rayIndex = idx; rayIndex <*n_r; rayIndex+=num_threads)
{
int count = 0;
if(ray[rayIndex].index > 0)
{
for (int j = 0; j<cubes[ray[rayIndex].intersectedCubes[0]].index; j=j+1){
count += (int)rayTriangleIntersect(ray[rayIndex],tri[cubes[ray[rayIndex].intersectedCubes[0]].triangleIndexes[j]]);
}
results[rayIndex] = (count % 2 !=0);
}
}
}
__global__ void IsInPlaneGPU(Ray *rays, Triangle *tri, bool *isInside, int *n_r, int *n_tr, Cube *cubes)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int num_threads = gridDim.x*blockDim.x;
const double epsilon = 1.e-10;
for(int rayIndex=idx; rayIndex<*n_r; rayIndex+=num_threads)
{
if(rays[rayIndex].index > 0)
{
int counter = 0;
for(int j=0; j<cubes[rays[rayIndex].intersectedCubes[0]].index;)
{
counter+=(int)(PointIsInSurface(rays[rayIndex], tri[cubes[rays[rayIndex].intersectedCubes[0]].triangleIndexes[j]], epsilon)==true);
j+=1+counter*cubes[rays[rayIndex].intersectedCubes[0]].index;
}
isInside[rayIndex]=(counter>0);
}
}
}
__global__ void appendALL(Cube *cubes,int *numCubes,int *numTriangles, bool *CubeTriMapp, Triangle *triangles)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
const unsigned int num_threads = gridDim.x * blockDim.x;
for(int cubeIndex=idx; cubeIndex<*numCubes; cubeIndex+=num_threads)
{
// cubes[cubeIndex].index = 0;
for(int triIndex=0; triIndex<*numTriangles; triIndex+=1)
{
int index = triIndex*(*numCubes) + cubeIndex;
// if(cubeIndex == 0) printf("Index: %d", index);
cubes[cubeIndex].triangleIndexes[cubes[cubeIndex].index] = triIndex;
int intersects = CubeTriMapp[index];
cubes[cubeIndex].index+=1*intersects;
// atomicAdd(&cubes[cubeIndex].index, intersects);
}
}
}
__global__ void firstCube(Cube *cubes, Ray *rays, Triangle *triangles,int *numRays, int *numCubes)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
const unsigned int num_threads = gridDim.x * blockDim.x;
for(int rayIndex=idx; rayIndex<*numRays; rayIndex+=num_threads)
{
rays[rayIndex].index=0;
for(int cubeIndex=0; cubeIndex<*numCubes;)
{
if(rayBoxIntersection_yaxis(rays[rayIndex].orig, cubes[cubeIndex]))
{
rays[rayIndex].intersectedCubes[rays[rayIndex].index] = cubeIndex;
rays[rayIndex].index +=1;
cubeIndex+=*numCubes;
}
cubeIndex+=1;
}
// printf("RayIndex: %d \n",rays[rayIndex].index);
}
}
/************************ END of GPU FUNCTIONS ************************************/
extern "C" void SetRays(float *xyz_flat, int nr_rays)
{
numRays = nr_rays;
rays = (Ray *)malloc(sizeof(Ray)*numRays);
for(int i=0; i<nr_rays; i+=1)
{
rays[i].orig[0] = xyz_flat[0+i*3];
rays[i].orig[1] = xyz_flat[1+i*3];
rays[i].orig[2] = xyz_flat[2+i*3];
generateRandoms(rays[i].dir, rays[i].orig);
}
}
extern "C" void SetTriangles(double *tri_coordinates, int nr_triangles)
{
numTriangles = nr_triangles;
triangles = (Triangle *)malloc(sizeof(Triangle)*nr_triangles);
for(int i=0; i<nr_triangles; i+=1)
{
// int tri_index = int(i/9);
triangles[i].v0[0] = tri_coordinates[i*9];
triangles[i].v0[1] = tri_coordinates[i*9+1];
triangles[i].v0[2] = tri_coordinates[i*9+2];
triangles[i].v1[0] = tri_coordinates[i*9+3];
triangles[i].v1[1] = tri_coordinates[i*9+4];
triangles[i].v1[2] = tri_coordinates[i*9+5];
triangles[i].v2[0] = tri_coordinates[i*9+6];
triangles[i].v2[1] = tri_coordinates[i*9+7];
triangles[i].v2[2] = tri_coordinates[i*9+8];
}
}
// extern "C" void printTriangles()
void printTriangles()
{
for(int i=0; i<numTriangles; i++)
{
printf("Triangle:%d %lf %lf %lf \n",i, triangles[i].v0[0],triangles[i].v0[1],triangles[i].v0[2]);
printf("Triangle:%d %lf %lf %lf \n",i, triangles[i].v1[0],triangles[i].v1[1],triangles[i].v1[2]);
printf("Triangle:%d %lf %lf %lf \n",i, triangles[i].v2[0],triangles[i].v2[1],triangles[i].v2[2]);
}
}
extern "C" void SetBoundingBox(double *x, double *y, double *z)
// extern "C" void SetBoundingBox(int *mesh_bounds)
{
boundingBox.xmin = x[0];
boundingBox.xmax = x[1];
boundingBox.ymin = y[0];
boundingBox.ymax = y[1];
boundingBox.zmin = z[0];
boundingBox.zmax = z[1];
// printf("XMIN: %lf XMAX: %lf",boundingBox.xmin,boundingBox.xmax);
// printf("yMIN: %lf yMAX: %lf",boundingBox.ymin,boundingBox.ymax);
// printf("zMIN: %lf zMAX: %lf",boundingBox.zmin,boundingBox.zmax);
}
/*************** CPU VERSION OF IS IN SURFACE *****************/
void TriangleCount(Triangle *triangles, Cube *cubes, int numCubes, int numTriangles, bool *CubeTriMapp)
{
for(int i=0; i<numCubes; i++)
{
cubes[i].counter=0;
for(int j=0; j<numTriangles; j++)
{
int index = j*numCubes + i;
int intersects = triBoxOverlap(cubes[i].center, cubes[i].boxhalfsize, triangles[j]);
CubeTriMapp[index] =(intersects>0);
cubes[i].counter+=intersects;
}
}
}
void appendALL_CPU(Cube *cubes,int numCubes,int numTriangles, bool *CubeTriMapp, Triangle *triangles)
{
for(int cubeIndex=0; cubeIndex<numCubes; cubeIndex+=1)
{
cubes[cubeIndex].index = 0;
for(int triIndex=0; triIndex<numTriangles; triIndex+=1)
{
int index = triIndex*numCubes + cubeIndex;
cubes[cubeIndex].triangleIndexes[cubes[cubeIndex].index] = triIndex;
int intersects = CubeTriMapp[index];
cubes[cubeIndex].index+=1*intersects;
}
}
}
void firstCube_CPU(Cube *cubes, Ray *rays, Triangle *triangles,int numRays, int numCubes)
{
for(int rayIndex=0; rayIndex<numRays; rayIndex+=1)
{
rays[rayIndex].index=0;
for(int cubeIndex=0; cubeIndex<numCubes;)
{
if(rayBoxIntersection_yaxis(rays[rayIndex].orig, cubes[cubeIndex]))
{
rays[rayIndex].intersectedCubes[rays[rayIndex].index] = cubeIndex;
rays[rayIndex].index +=1;
cubeIndex+=numCubes;
}
cubeIndex+=1;
}
}
}
void IsInPlane(Ray *rays, Triangle *tri, bool *isInside, int nr_rays, Cube *cubes)
{
const double epsilon = 1.e-10;
for(int rayIndex=0; rayIndex<nr_rays; rayIndex+=1)
{
if(rays[rayIndex].index > 0)
{
int counter = 0;
for(int j=0; j<cubes[rays[rayIndex].intersectedCubes[0]].index;)
{
counter+=(int)(PointIsInSurface(rays[rayIndex], tri[cubes[rays[rayIndex].intersectedCubes[0]].triangleIndexes[j]], epsilon)==true);
j+=1+counter*cubes[rays[rayIndex].intersectedCubes[0]].index;
}
isInside[rayIndex]=(counter>0);
}
}
}
void findUnique_CPU(Cube *cubes, int end, int cubesPerBlock, bool *CubeTriMapp, int numTriangles, int numCubes)
{
int start = end -(cubesPerBlock-1);
for(int triIndex=0; triIndex<numTriangles; triIndex+=1)
{
for(int cubeIndex=start; cubeIndex<end; cubeIndex++)
{
CubeTriMapp[triIndex*(numCubes)+(end)]|=CubeTriMapp[triIndex*(numCubes)+cubeIndex];
}
cubes[end].index+=(int)CubeTriMapp[triIndex*(numCubes)+(end)];
}
}
void appendTriangles_CPU(Cube *cubes, int *indexes, int frontGrid, int numTriangles, int numCubes, bool *CubeTriMapp )
{
for(int i=0; i<frontGrid; i+=1)
{
int cubeIndex=indexes[i];
cubes[cubeIndex].index=0;
for(int triIndex=0; triIndex<numTriangles; triIndex+=1)
{
int index = triIndex*numCubes + cubeIndex;
cubes[cubeIndex].triangleIndexes[cubes[cubeIndex].index] = triIndex;
cubes[cubeIndex].index+=1*(int)CubeTriMapp[index];
}
}
}
// void IsInPlane(Ray *rays, Triangle *tri, bool *isInside, int nr_rays, int nr_triangles, double epsilon)
// {
// for(int rayIndex=0; rayIndex<nr_rays; rayIndex+=1)
// {
// if(rays[rayIndex].index > 0)
// {
// int counter = 0;
// for(int j=0; j<nr_triangles; j=j+1)
// {
// counter+=(PointIsInSurface(rays[rayIndex], tri[j], epsilon)==true);
// // counter+=(IsInPlaneTest(rays[rayIndex], tri[j], epsilon)==true);
// // counter+=(SameSide(rays[rayIndex], tri[j])==true);
// }
// isInside[rayIndex]=(counter>0);
// }
// }
// }
void getIntersectedCubes(Ray *rays, Cube *cubes, int numrays, int *indexes, Cube boundingBox, int frontGrid)
{
for(int index = 0; index<numrays; index+=1)
{
rays[index].index = 0;
for(int frontGridIndex=0; frontGridIndex<frontGrid; frontGridIndex++)
{
int intersects = (int)rayBoxIntersection_yaxis(rays[index].orig, cubes[indexes[frontGridIndex]]);
rays[index].intersectedCubes[rays[index].index] = indexes[frontGridIndex];
rays[index].index+=1*intersects;
}
}
}
void ray_tracing(Ray *ray, Triangle *tri, bool *results, int n_r, int n_tr, Cube *cubes)
{
for(int rayIndex = 0; rayIndex <n_r; rayIndex+=1)
{
int count = 0;
if(ray[rayIndex].index > 0)
{
for (int j = 0; j<cubes[ray[rayIndex].intersectedCubes[0]].index; j=j+1)
{
count += (int)rayTriangleIntersect(ray[rayIndex],tri[cubes[ray[rayIndex].intersectedCubes[0]].triangleIndexes[j]]);
}
results[rayIndex] = (count % 2 !=0);
}
}
}
/*************** END OF CPU VERSION OF IS IN SURFACE *****************/
// extern "C" void RayTracingGPU(int *numRays, int *numTriangles, bool *isInside, bool *isInPlane){
extern "C" bool *RayTracingGPU(int *numRays_, int *numTriangles){
// bool GPU = true;
bool GPU = false;
int nr_rays = *numRays_;
int nr_triangles = *numTriangles;
Point3D upper = {boundingBox.xmax, boundingBox.ymax, boundingBox.zmax};
Point3D lower = {boundingBox.xmin, boundingBox.ymin, boundingBox.zmin};
double delta = ((upper.x-lower.x) * (upper.z-lower.z)) / 500;
delta = sqrt(delta);
//number of cubes per each block
int cubesPerBlock = ceil((upper.y - lower.y)/delta);
int num = getNumberOfCubes(lower, upper,delta);
Cube *cubes_d;
if (GPU)
cudaMallocManaged((void**)&cubes_d, sizeof(Cube)*num);
else
cubes_d = (Cube *)malloc(sizeof(Cube)*num);
Index frontGrid;
// createGrid3D(cubes, lower, upper, delta,frontGrid);
createGrid3D(cubes_d, lower, upper, delta,frontGrid);
bool *CubeTriMapp = (bool *)malloc(sizeof(bool)*nr_triangles*num);
int frontGridSize = frontGrid.count;
int *indexes = new int[frontGrid.count];
//Copy indexes in array
for(int i = 0; i<frontGrid.count; i++)
{
indexes[i] = frontGrid.ptr[i];
}
printf("Num cubes: %d \n",num);
printf("Num Triangles: %d \n",nr_triangles);
printf("Num Rays: %d \n",nr_rays);
const double epsilon = 1.e-15 * (boundingBox.xmax - boundingBox.xmin);
/******************* Device memory allocation ********************/
Triangle *tri_d;
MY_CUDA_CHECK(cudaMalloc((void**)&tri_d, sizeof(Triangle)*nr_triangles));
MY_CUDA_CHECK(cudaMemcpy(tri_d, triangles,sizeof(Triangle)*nr_triangles,cudaMemcpyHostToDevice));
Ray *ray_d;
MY_CUDA_CHECK(cudaMalloc((void**)&ray_d, sizeof(Ray)*nr_rays));
MY_CUDA_CHECK(cudaMemcpy(ray_d, rays, sizeof(Ray)*nr_rays, cudaMemcpyHostToDevice));
bool *CubeTriMapp_d;
MY_CUDA_CHECK(cudaMalloc((void**)&CubeTriMapp_d, sizeof(bool)*num*nr_triangles));
MY_CUDA_CHECK(cudaMemcpy(CubeTriMapp_d, CubeTriMapp,sizeof(bool)*num*nr_triangles , cudaMemcpyHostToDevice));
int *cubesPerBlock_d;
MY_CUDA_CHECK(cudaMalloc((void**)&cubesPerBlock_d, sizeof(int)));
MY_CUDA_CHECK(cudaMemcpy(cubesPerBlock_d, &cubesPerBlock, sizeof(int),cudaMemcpyHostToDevice));
int *frontGridSize_d;
MY_CUDA_CHECK(cudaMalloc((void**)&frontGridSize_d, sizeof(int)));
MY_CUDA_CHECK(cudaMemcpy(frontGridSize_d, &frontGridSize, sizeof(int),cudaMemcpyHostToDevice));
int *indexes_d;
MY_CUDA_CHECK(cudaMalloc((void**)&indexes_d, sizeof(int)*frontGridSize));
MY_CUDA_CHECK(cudaMemcpy(indexes_d, indexes, sizeof(int)*frontGridSize,cudaMemcpyHostToDevice));
int *numRays_d;
MY_CUDA_CHECK(cudaMalloc((void**)&numRays_d, sizeof(int)));
MY_CUDA_CHECK(cudaMemcpy(numRays_d, &nr_rays, sizeof(int),cudaMemcpyHostToDevice));
int *nr_triangles_d;
MY_CUDA_CHECK(cudaMalloc((void**)&nr_triangles_d, sizeof(int)));
MY_CUDA_CHECK(cudaMemcpy(nr_triangles_d, &nr_triangles, sizeof(int),cudaMemcpyHostToDevice));
int *num_cubes_d;
MY_CUDA_CHECK(cudaMalloc((void**)&num_cubes_d, sizeof(int)));
MY_CUDA_CHECK(cudaMemcpy(num_cubes_d, &num, sizeof(int),cudaMemcpyHostToDevice));
Cube *cube3d_d;
MY_CUDA_CHECK(cudaMalloc((void**)&cube3d_d, sizeof(Cube)));
MY_CUDA_CHECK(cudaMemcpy(cube3d_d, &boundingBox, sizeof(Cube),cudaMemcpyHostToDevice));
int *pointsInside;
MY_CUDA_CHECK(cudaMalloc((void**)&pointsInside, sizeof(int)));
int *countTri;
MY_CUDA_CHECK(cudaMalloc((void**)&countTri, sizeof(int)));
bool *results;
MY_CUDA_CHECK(cudaMalloc((void**)&results, sizeof(bool)*nr_rays*2));
bool *isInside_d;
MY_CUDA_CHECK(cudaMalloc((void**)&isInside_d, sizeof(bool)*nr_rays));
int *triCubMapp_d;
MY_CUDA_CHECK(cudaMalloc((void**)&triCubMapp_d, sizeof(int)*num));
/**************** End of device memory allocation *****************/
isInside = (bool *)malloc(2*nr_rays*sizeof(bool));
isInPlane = (bool *)malloc(nr_rays*sizeof(bool));
/**************** Begining of computations *****************/
if(GPU){
clock_t begin = clock();
TriangleCount_GPU<<<256,512>>>(tri_d, cubes_d, num_cubes_d, nr_triangles_d, CubeTriMapp_d); //Count number of triangles per cube
cudaDeviceSynchronize();
for(int i=0; i<num; i++)
{
MY_CUDA_CHECK(cudaMallocManaged((void**)&cubes_d[i].triangleIndexes, sizeof(int)*cubes_d[i].counter+1));
}
appendALL<<<256,256>>>(cubes_d, num_cubes_d, nr_triangles_d, CubeTriMapp_d,tri_d);
cudaDeviceSynchronize();
firstCube<<<256,256>>>(cubes_d, ray_d, tri_d, numRays_d, num_cubes_d);
cudaDeviceSynchronize();
// exit(0);
IsInPlaneGPU<<<256,256>>>(ray_d, tri_d, results, numRays_d, nr_triangles_d, cubes_d); //Find surface points
cudaDeviceSynchronize();
cudaStream_t streams_u[frontGridSize]; //using cuda streams to parallelise kernel execution
for(int i=0;i<frontGridSize; i++)
{
cudaStreamCreate(&streams_u[i]);
int reference = indexes[i];
int *cubeIndex;
MY_CUDA_CHECK(cudaMalloc((void**)&cubeIndex, sizeof(int)));
MY_CUDA_CHECK(cudaMemcpy(cubeIndex, &reference, sizeof(int),cudaMemcpyHostToDevice));
findUnique<<<8,256,0,streams_u[i]>>>(cubes_d,cubeIndex,cubesPerBlock_d, CubeTriMapp_d, nr_triangles_d, num_cubes_d);//find unique triangles per cube
}
cudaDeviceSynchronize();
for(int i=0;i<frontGridSize; i++)
{
cudaStreamDestroy(streams_u[i]); //Cuda streams must be destroyed at the end of execution
}
for(int i=0; i<frontGridSize; i++)
{
MY_CUDA_CHECK(cudaMalloc((void**)&cubes_d[indexes[i]].triangleIndexes, sizeof(int)*cubes_d[indexes[i]].index));
}
int numThreads = (frontGridSize + 32 - (frontGridSize%32))/2;
appendTriangles<<<2,196>>>(cubes_d,indexes_d, frontGridSize_d,nr_triangles_d, num_cubes_d, CubeTriMapp_d); //append triangles to cubes
cudaDeviceSynchronize();
getIntersectedCubes_GPU<<<32,256>>>(ray_d, cubes_d, numRays_d , indexes_d, cube3d_d, frontGridSize_d); //ray cube intersection
cudaDeviceSynchronize();
ray_tracingGPU<<<256,256>>>(ray_d, tri_d, isInside_d, numRays_d, nr_triangles_d, cubes_d); // ray tracing - ray triangle intersection Moller algorithm
cudaDeviceSynchronize();
clock_t end = clock();
double time_spent = (double)(end-begin)/ CLOCKS_PER_SEC;
MY_CUDA_CHECK(cudaMemcpy(isInside, results, sizeof(bool)*nr_rays,cudaMemcpyDeviceToHost));
MY_CUDA_CHECK(cudaMemcpy(isInPlane, isInside_d, sizeof(bool)*nr_rays,cudaMemcpyDeviceToHost));
// printf("\n time spent: %.10f\n",time_spent);
}
else{
clock_t begin1 = clock();
TriangleCount(triangles, cubes_d, num, nr_triangles, CubeTriMapp);
for(int i=0; i<num; i++)
{
cubes_d[i].triangleIndexes = (int *)malloc(sizeof(int)*cubes_d[i].counter+1);
}
appendALL_CPU(cubes_d,num, nr_triangles, CubeTriMapp, triangles);
firstCube_CPU(cubes_d, rays, triangles, nr_rays, num);
clock_t end1 = clock();
double time_spent1 = (double)(end1-begin1)/ CLOCKS_PER_SEC;
printf(" \n time spent for preprocessing: %.10f\n",time_spent1);
clock_t begin2 = clock();
IsInPlane(rays, triangles, isInside, nr_rays, cubes_d);
clock_t end2 = clock();
double time_spent2 = (double)(end2-begin2)/ CLOCKS_PER_SEC;
printf("time spent for Is In plane: %.10f\n",time_spent2);
for(int i=0;i<frontGridSize; i++)
{
int reference = indexes[i];
findUnique_CPU(cubes_d, reference, cubesPerBlock, CubeTriMapp, nr_triangles, num);
}
for(int i=0; i<frontGridSize; i++)
{
cubes_d[indexes[i]].triangleIndexes =(int *)malloc(sizeof(int)*cubes_d[indexes[i]].index);
}
clock_t begin3 = clock();
appendTriangles_CPU(cubes_d, indexes, frontGridSize, nr_triangles, num, CubeTriMapp);
getIntersectedCubes(rays, cubes_d, nr_rays, indexes, boundingBox, frontGridSize);
clock_t end3 = clock();
double time_spent3 = (double)(end3-begin3)/ CLOCKS_PER_SEC;
printf("time spent for pre-raytracing: %.10f\n",time_spent3);
clock_t begin4 = clock();
ray_tracing(rays, triangles, isInPlane, nr_rays, nr_triangles, cubes_d);
clock_t end4 = clock();
double time_spent4 = (double)(end4-begin4)/ CLOCKS_PER_SEC;
printf("time spent for raytracing: %.10f\n",time_spent4);
}
/**************** End of Computations*****************/
for(int i=0; i<nr_rays;i++){
isInside[i+nr_rays]= isInPlane[i]; //Copying the results to one unique array of size 2 x number of rays
}
for(int i=0; i<nr_rays;i++){
if (isInside[i]==true) isInside[nr_rays+i]= false; //Removing redundance
}
// int counter=0;
// for(int i=0; i<nr_rays;i++){
// if (isInside[i]==true) counter+=1;
// }
// printf("Counter C: %d \n",counter);
// FILE *fptr;
// // use appropriate location if you are using MacOS or Linux
// if (GPU)
// fptr = fopen("/home/diko/Desktop/programGPU.txt","w");
// else
// fptr = fopen("/home/diko/Desktop/programCPU.txt","w");
// if(fptr == NULL)
// {
// printf("Error!");
// exit(1);
// }
// for(int i=0; i<nr_rays; i++){
// if (isInside[i]==true) fprintf(fptr,"%d\n",i);
// }
// fclose(fptr);
// printf("Ray: %lf %lf %lf",rays[20182].orig[0],rays[20182].orig[1],rays[20182].orig[2]);
free(rays);
free(triangles);
free(isInPlane);
cudaFree(ray_d);
cudaFree(tri_d);
cudaFree(results);
cudaFree(pointsInside);
cudaFree(cube3d_d);
cudaFree(cubes_d);
cudaFree(cubesPerBlock_d);
cudaFree(indexes_d);
cudaFree(frontGridSize_d);
cudaFree(numRays_d);
cudaFree(triCubMapp_d);
cudaFree(isInside_d);
return isInside;
}
|
13,612 | #include <stdio.h>
__global__ void build_binary_tree(int *x, int *child, int *root, int n)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
bool newBody = true;
int rootValue = *root;
// build binary tree
int childPath;
int temp;
offset = 0;
while((bodyIndex + offset) < n){
if(newBody){
newBody = false;
temp = 0;
childPath = 0;
if(x[bodyIndex + offset] > rootValue){
childPath = 1;
}
}
int childIndex = child[temp*2 + childPath];
// traverse tree until we hit leaf node
while(childIndex >= 0){
temp = childIndex;
childPath = 0;
if(x[bodyIndex + offset] > temp){
childPath = 1;
}
childIndex = child[2*temp + childPath];
}
if(childIndex != -2){
int locked = temp*2 + childPath;
if(atomicCAS(&child[locked], childIndex, -2) == childIndex){
if(childIndex == -1){
child[locked] = x[bodyIndex + offset];
}
offset += stride;
newBody = true;
}
}
__syncthreads(); // not strictly needed
}
}
int main(){
int n = 32;
int *h_x; //host array
int *d_x; //device array
int *h_root;
int *d_root;
int *h_child;
int *d_child;
// allocate memory
h_x = (int*)malloc(n*sizeof(int));
h_root = (int*)malloc(sizeof(int));
h_child = (int*)malloc(2*(n+1)*sizeof(int));
cudaMalloc((void**)&d_root, sizeof(int));
cudaMalloc((void**)&d_x, n*sizeof(int));
cudaMalloc((void**)&d_child, 2*(n+1)*sizeof(int));
cudaMemset(d_child, -1, 2*(n+1)*sizeof(int));
// fill h_temp and h_x arrays
for(int i=0;i<n;i++){
h_x[i] = i+1;
}
// shuffling the array
for(int i=0;i<n;i++){
int j = random() % (n-i);
int temp = h_x[i];
h_x[i] = h_x[i+j];
h_x[i+j] = temp;
}
*h_root = h_x[0];
for(int i=0;i<n;i++){
printf("%d ", h_x[i]);
}
printf("\n");
// copy data to device
cudaMemcpy(d_root, h_root, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, h_x, n*sizeof(int), cudaMemcpyHostToDevice);
// kernel call
build_binary_tree<<< 16, 16>>>(d_x, d_child, d_root, n);
// copy from device back to host
cudaMemcpy(h_child, d_child, 2*(n+1)*sizeof(int), cudaMemcpyDeviceToHost);
// print tree
for(int i=0;i<2*(n+1);i++){
printf("%d ", h_child[i]);
}
printf("\n");
// free memory
free(h_x);
free(h_root);
free(h_child);
cudaFree(d_x);
cudaFree(d_root);
cudaFree(d_child);
}
|
13,613 | #include <stdio.h>
#include <iostream>
#include <cuda_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_")
#include <device_launch_parameters.h>
#include <time.h>
#define DATA_SIZE 9
#define THREAD_NUM 5
using namespace std;
int anData[DATA_SIZE];
//Generate random data
void GenerateNumber(int *pnNumber, int nSize)
{
for (int i = 0; i < nSize; i++) pnNumber[i] = rand() % 10;
}
//Global function to calculate square kernel: call on a host computer, performed on the equipment
__global__ static void sumofSquares(int *pnNum, int* pnResult, clock_t *pclock_ttime)
{
int tid = threadIdx.x;
int nSum = 0;
int i;
int nSize = 0;
if (DATA_SIZE % THREAD_NUM) nSize = DATA_SIZE / THREAD_NUM + 1;
else nSize = DATA_SIZE / THREAD_NUM; //nSize, One thread to calculate the amount of data
//start counting
clock_t clock_tstart;
if (tid == 0) clock_tstart = clock();
for (i = nSize * tid; i < (tid + 1) *nSize; i++)
{
nSum += (pnNum[i] * pnNum[i]);
}
pnResult[tid] = nSum;
//end
if (tid == 0) *pclock_ttime = clock() - clock_tstart; //Time with a thread
}
int main(void)
{
GenerateNumber(anData, DATA_SIZE); //Generate random data
int *pnGpuData, *pnResult;
clock_t *pclock_ttime;//time storage
int *nSummat;
cudaMallocHost((void**)&nSummat, sizeof(int) * THREAD_NUM);
cudaMalloc((void**)&pnGpuData, sizeof(int) * DATA_SIZE);
cudaMalloc((void**)&pnResult, sizeof(int) * THREAD_NUM);
cudaMalloc((void**)&pclock_ttime, sizeof(clock_t));
cudaMemcpy(pnGpuData, anData, sizeof(int)*DATA_SIZE, cudaMemcpyHostToDevice);
sumofSquares <<< 1, THREAD_NUM, 0 >>>(pnGpuData, pnResult, pclock_ttime);
cudaMemcpy(nSummat, pnResult, sizeof(int) * THREAD_NUM, cudaMemcpyDeviceToHost);
clock_t pclocksum;
cudaMemcpy(&pclocksum, pclock_ttime, sizeof(clock_t), cudaMemcpyDeviceToHost);
//operation of the CPU
int finishsum = 0;
for (size_t i = 0; i < THREAD_NUM; i++)
{
finishsum = finishsum + nSummat[i];
}
printf("SuM = %d Time = %d\n", finishsum, pclocksum);
cudaFree(pnGpuData);
cudaFree(pnResult);
cudaFree(pclock_ttime);
system("pause");
//return 0;
} |
13,614 | /*
thrust::device_vector<float> td_A(nr_rows_A * nr_cols_A), td_B(nr_rows_B * nr_cols_B), td_C(nr_rows_C * nr_cols_C);
float *h_A = (float *)malloc(nr_rows_A * nr_cols_A * sizeof(float));
float *h_B = (float *)malloc(nr_rows_B * nr_cols_B * sizeof(float));
float *h_C = (float *)malloc(nr_rows_C * nr_cols_C * sizeof(float));
// Allocate 3 arrays on GPU
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(float)); cudaMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(float)); cudaMalloc(&d_C,nr_rows_C * nr_cols_C * sizeof(float));
// Fill the arrays A and B on GPU with random numbers
GPU_fill_rand(d_A, nr_rows_A, nr_cols_A);
GPU_fill_rand(d_B, nr_rows_B, nr_cols_B);
GPU_fill_rand(thrust::raw_pointer_cast(&td_A[0]), nr_rows_A, nr_cols_A);
GPU_fill_rand(thrust::raw_pointer_cast(&td_B[0]), nr_rows_B, nr_cols_B);
*/
/*
// Optionally we can print the data
std::cout << "A =" << std::endl;
print_matrix(td_A, nr_rows_A, nr_cols_A);
std::cout << "B =" << std::endl;
print_matrix(td_B, nr_rows_B, nr_cols_B);
// Optionally we can copy the data back on CPU and print the arrays
cudaMemcpy(h_A,thrust::raw_pointer_cast(&td_A[0]),nr_rows_A * nr_cols_A * sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(h_B,thrust::raw_pointer_cast(&td_B[0]),nr_rows_B * nr_cols_B * sizeof(float),cudaMemcpyDeviceToHost);
std::cout << "A =" << std::endl;
print_matrix(h_A, nr_rows_A, nr_cols_A);
std::cout << "B =" << std::endl;
print_matrix(h_B, nr_rows_B, nr_cols_B);
*/
/*
//Print the result
std::cout << "C =" << std::endl;
print_matrix(td_C, nr_rows_C, nr_cols_C);
// Copy (and print) the result on host memory
cudaMemcpy(h_C,thrust::raw_pointer_cast(&td_C[0]),nr_rows_C * nr_cols_C * sizeof(float),cudaMemcpyDeviceToHost);
std::cout << "C =" << std::endl;
print_matrix(h_C, nr_rows_C, nr_cols_C);
//Free GPU memory
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
// Free CPU memory
free(h_A); free(h_B); free(h_C);
*/
// Multiply A and B on GPU
//gpu_blas_mmul(handle, tmA.devicePointer(), tmB.devicePointer(), tmC.devicePointer(), tmA.numRows(), tmA.numColumns(), tmB.numColumns());
//gpu_blas_mmul(handle, thrust::raw_pointer_cast(&td_A[0]), thrust::raw_pointer_cast(&td_A[0]), thrust::raw_pointer_cast(&td_C[0]), nr_rows_A, nr_cols_A, nr_cols_B);
/* float constalpha = 1;
float constbeta = 0;
unsigned int newChunk = 4, oldChunk = 6, size = 5;
cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, newChunk, size, &constalpha,
thrust::raw_pointer_cast(&W1[0]), oldChunk, &constbeta,
thrust::raw_pointer_cast(&W1[0]), oldChunk,
thrust::raw_pointer_cast(&W2[0]), newChunk);
*/
/* // summing up columns
thrust::device_vector<float> x(M);
thrust::fill(x.begin(), x.end(), 1);
cublasSgemv(handle, CUBLAS_OP_N, tmB.numRows(), tmB.numColumns(), &alpha, tmB.devicePointer(), tmB.numRows(),
thrust::raw_pointer_cast(&x[0]), 1, &beta,
thrust::raw_pointer_cast(&y[0]), 1);
*/
//tmC.multiplyByConstant(10.0);
//tmC.printBlasMajor(); // correct p
//tmC.printRowMajor();
/*
//C = alpha*op(A)*op(B) + beta*C
void matrixMatrixMultiply( cublasHandle_t &handle, float alpha, cublasOperation_t operationOnA, ThrustMatrix &A,
cublasOperation_t operationOnB, ThrustMatrix &B, float beta, ThrustMatrix &C ) {
if (A.numColumns() != B.numRows()) {
cout << "k does not match for matrix A and B, exiting\n"; return;
}
if (beta !=0 and !(C.numRows() == A.numRows() and C.numColumns() == B.numColumns())) {
cout << "size mismatch in C, exiting\n"; return;
}
// cublasSgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k,
// float *alpha, float *A, int lda, float *B, int ldb, float *beta, float *C, int ldc)
unsigned int m = A.numRows(), n = B.numColumns(), k = A.numColumns();
unsigned int &lda = m, &ldb = k, &ldc = m;
if (operationOnA == CUBLAS_OP_T) {
m = A.numColumns(); k = A.numRows();
}
if (operationOnB == CUBLAS_OP_T) {
m = A.numRows(); n = B.numRows();
}
//if (beta == 0)
C.resize(m, n);
cublasSgemm(handle, operationOnA, operationOnB, m, n, k, &alpha, A.devicePointer(), lda, B.devicePointer(), ldb, &beta, C.devicePointer(), ldc);
}
*/
/*
void matrixVectorMultiply( cublasHandle_t &handle, float alpha, cublasOperation_t operationOnA, ThrustMatrix &A,
float beta, thrust::device_vector<float> &x ) {
cublasSgemv(handle, operationOnA, A.numRows(), A.numColumns(), &alpha, A.devicePointer(), A.numRows(), thrust::raw_pointer_cast(x.data()), 1, &beta,
float *y, int incy)
}
cublasStatus_t cublasSgemv(cublasHandle_t handle, cublasOperation_t trans,
int m, int n,
const float *alpha,
const float *A, int lda,
const float *x, int incx,
const float *beta,
float *y, int incy)
y = alpha*op(A)*x + beta*y;
*/
/*
//cout << "A in row major =" << endl;
//tmA.printRowMajor();
//cout << "A in col major (what blas sees) =" << endl;
//tmA.printBlasMajor();
//cout << "B in col major =" << endl;
//tmB.printBlasMajor();
float alpha = 1.0/M, beta = 0;
//int m = ; n; k;
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, tmA.numRows(), tmB.numRows(), tmA.numColumns(), &alpha, tmA.devicePointer(), tmA.numRows(), tmB.devicePointer(), tmB.numRows(), &beta, tmC.devicePointer(), tmA.numRows());
*/
/* unsigned int oldchunkSize = R+K-1;
thrust::device_vector<float> U(ZTable.size() * oldchunkSize); // M is by definition equal to ZTable, or the list of data values, normalized
thrust::counting_iterator<unsigned int> countBegin(0); thrust::counting_iterator<unsigned int> countEnd = countBegin + U.size();
// generate B for k=1 (root case)
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(countBegin, U.begin())),
thrust::make_zip_iterator(thrust::make_tuple(countEnd, U.end())),
RootCaseBSplineFunctor<float>(TTable, ZTable, oldchunkSize));
//printDeviceVector(U);
print_matrix_rowMajor<float>(TTable, R+K);
print_matrix_rowMajor<float>(U, oldchunkSize);
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(countBegin, U.begin(), U.begin()+1, V.begin())),
thrust::make_zip_iterator(thrust::make_tuple(countEnd, U.end()-1, U.end(), V.end())),
BSplineFunctor<T>(TTable, ZTable, oldchunkSize, tempK));
*/
/*
for (map<string, thrust::device_vector<float> >::iterator it = listOfDeviceVectors.begin(); it != listOfDeviceVectors.end(); ++it) {
cout << it->first << ": " << it->second.size() << endl;
}
cout << "values for YPR199C:\t\t"; printDeviceVector<float>(listOfDeviceVectors["YPR199C"]);
*/
|
13,615 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#define N 1024
void copyH2D(void* dest,void* src,std::size_t size){
cudaMemcpy(dest, src, size, cudaMemcpyHostToDevice);
}
void copyD2H(void* dest, void* src, std::size_t size) {
cudaMemcpy(dest, src, size, cudaMemcpyDeviceToHost);
}
__global__ void device_add(int *a,int *b,int *c){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < N){
c[idx] = a[idx] + b[idx];
}
}
void fill_arr(int *data,int val){
for(int i=0;i<N;++i){
data[i] = val;
}
}
void print_equation(int *a,int *b,int *c){
for(int i=0;i<N;i++){
std::cout <<a[i]<<"+"<<b[i]<<"="<<c[i]<<'\n';
}
}
int main(void){
int *a,*b,*c;
int *d_a,*d_b,*d_c;
int int_size = N * sizeof(int);
//allocate host memory
/*
a = (int *)malloc(int_size);
fill_arr(a,1);
b = (int *)malloc(int_size);
fill_arr(b,2);
c = (int *)malloc(int_size);
*/
cudaMallocHost(&a,int_size);
fill_arr(a,1);
cudaMallocHost(&b,int_size);
fill_arr(b,2);
cudaMallocHost(&c,int_size);
//allocate device memory
cudaMalloc(&d_a,int_size);
cudaMalloc(&d_b,int_size);
cudaMalloc(&d_c,int_size);
copyH2D(d_a, a, int_size);
copyH2D(d_b, b, int_size);
int blocks = 8;
//castしないとN/blocksの段階で小数点以下が落とされる。
int threads = std::ceil(static_cast<double>(N)/blocks);
device_add<<<blocks,threads>>>(d_a,d_b,d_c);
copyD2H(c, d_c, int_size);
print_equation(a,b,c);
//free host memory
cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c);
//free(a);free(b);free(c);
//free device memory
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
13,616 | /*
NOTE: build with nvcc. run setup.bat first
can rename file with -o option
Can profile with
nvprof .\simple.exe
I had an issue with the profiler when I ran command line not as an admin.
Had an error that mentioned something about permission and users
https://developer.nvidia.com/nvidia-development-tools-solutions-ERR_NVGPUCTRPERM-permission-issue-performance-counters
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
__global__
void Add(int N, float* X, float* Y)
{
int Start = blockIdx.x * blockDim.x + threadIdx.x;
int Stride = blockDim.x * gridDim.x;
for(int Index = Start; Index < N; Index += Stride)
{
Y[Index] = X[Index] + Y[Index];
}
}
int main(void)
{
int N = 1 << 20; // 1M elements
float* X = NULL;
float* Y = NULL;
cudaMallocManaged(&X, N * sizeof(float));
cudaMallocManaged(&Y, N * sizeof(float));
for(int Index = 0; Index < N; Index++)
{
X[Index] = 1.0f;
Y[Index] = 2.0f;
}
int BlockSize = 256;
int NumBlocks = (N + BlockSize - 1) / BlockSize;
Add<<<NumBlocks, BlockSize>>>(N, X, Y);
cudaDeviceSynchronize();
float ExpectedValue = 3.0f;
for(int Index = 0; Index < N; Index++)
{
if(Y[Index] != ExpectedValue)
{
printf("Y has value %f at %d\n", Y[Index], Index);
}
}
printf("Complete\n");
return 0;
} |
13,617 | #include "includes.h"
__global__ void blockEigSort( float *eigenvalues, float *eigenvectors, int *blocknums, int *blocksizes, int N ) {
int blockNumber = blockIdx.x * blockDim.x + threadIdx.x;
int startspot = blocknums[blockNumber];
int endspot = startspot + blocksizes[blockNumber] - 1;
// Bubble sort for now, thinking blocks are relatively small
// We may fix it later
for( int i = startspot; i < endspot; i++ ) {
for( int j = startspot; j < i; j++ ) {
if( eigenvalues[j] > eigenvalues[j + 1] ) {
float tmp = eigenvalues[j];
eigenvalues[j] = eigenvalues[j + 1];
eigenvalues[j + 1] = tmp;
// Swapping addresses
for( int i = 0; i < N; i++ ) {
tmp = eigenvectors[i * N + j];
eigenvectors[i * N + j] = eigenvectors[i * N + j + 1];
eigenvectors[i * N + j + 1] = tmp;
}
/*float* tmpaddr = eigenvectors[j];
eigenvectors[j] = eigenvectors[j+1];;
eigenvectors[j+1] = tmpaddr;*/
}
}
}
} |
13,618 | // to avoid highlight problems
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdlib.h> // imported for rand() which generates a number between 0 & RAND_MAX
#include <time.h> // imported for the time() function and also the clock function
#include <limits> // for a large value
#include <cmath> // for exponentiation
using namespace std;
__global__ void FindClosestPoint(float3 *points, int *closestPoint, const int numberPoints)
{
// used to identify the thread that is currently running
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// now find the closest point to each point
// 'i' represents the current point that we are finding the closest point to!
int distanceBetweenPoints = 9999999, tempDistance = 0;
for (int j = 0; j < numberPoints; j++)
if (idx != j) // dont check the distance between the point and itself
{
tempDistance = pow((points[idx].x - points[j].x), 2) + pow((points[idx].y - points[j].y), 2);
if (tempDistance < distanceBetweenPoints)
{
distanceBetweenPoints = tempDistance;
closestPoint[idx] = j;
}
}
}
int main()
{
srand(time(NULL)); // used to initialize the seed for the random number generator
const int numberPoints = 1000;
clock_t startTime, endTime;
float3 *points = new float3[numberPoints];
float3 *pointsDeviceCopy;
int *closestPointDevice, *closestPoint = new int[numberPoints];
// initialize the points with random numbers
for (int i = 0; i < numberPoints; i++)
{
points[i].x = rand() % 1000;
points[i].y = rand() % 1000;
points[i].z = rand() % 1000;
}
// print the points initialized
for (int i = 0; i < numberPoints; i++)
cout << points[i].x << "\t" << points[i].y << "\t" << points[i].z << endl;
cout << endl;
// initialize memory in the GPU for calculation
if (cudaMalloc(&pointsDeviceCopy, sizeof(float3) * numberPoints) != cudaSuccess)
{
cout << "Couldn't initialize memory in the GPU for pointsDeviceCopy" << endl;
delete[] points;
delete[] closestPoint;
return 0;
}
if (cudaMalloc(&closestPointDevice, sizeof(int) * numberPoints) != cudaSuccess)
{
cout << "Couldn't initialize memory in the GPU for closestPointDevice" << endl;
cudaFree(pointsDeviceCopy);
delete[] points;
delete[] closestPoint;
return 0;
}
if (cudaMemcpy(pointsDeviceCopy, points, sizeof(float3) * numberPoints, cudaMemcpyHostToDevice) != cudaSuccess)
{
cout << "Could not copy points to pointsDeviceCopy" << endl;
cudaFree(pointsDeviceCopy);
cudaFree(closestPointDevice);
delete[] points;
delete[] closestPoint;
return 0;
}
// now find the distance between all points
startTime = clock();
// since a block can have upto 1024 elements, we can use a single block
FindClosestPoint<<<1, numberPoints>>>(pointsDeviceCopy, closestPointDevice, numberPoints);
if (cudaMemcpy(closestPoint, closestPointDevice, sizeof(int) * numberPoints, cudaMemcpyDeviceToHost) != cudaSuccess)
{
cout << "Could not get the output!";
cudaFree(pointsDeviceCopy);
cudaFree(closestPointDevice);
delete[] points;
delete[] closestPoint;
return 0;
}
endTime = clock() - startTime;
delete[] points;
delete[] closestPoint;
cudaFree(closestPointDevice);
cudaFree(pointsDeviceCopy);
cout << "Time it took was " << ((float)endTime / CLOCKS_PER_SEC) << endl;
return 0;
}
|
13,619 | /*
* dfs.cpp
* GSPAN
*
* Created by Jinseung KIM on 09. 07. 19.
* Copyright 2009 KyungHee. All rights reserved.
*
*/
#include "gspan.cuh"
#include <cstring>
#include <string>
#include <iterator>
#include <set>
using namespace std;
void DFSCode::add(int vi,int vj,int li,int lij,int lj)
{
if (nodeCount()==0)
{
push(vi,vj,li,lij,lj); //Push 1st edge to empty DFS_CODE
minLabel = vi;
maxId = vj;
return;
}
if(vi<vj)
{
push(vi,vj,-1,lij,lj);//build DFS_CODE forward
maxId=vj;
}
else
{
push(vi,vj,-1,lij,-1);//xây dựng DFS_CODE backward
}
}
void DFSCode::remove(int vi,int vj)
{
pop();
if (vi<vj)
{
--maxId;
}
}
void DFSCode::fromGraph(Graph& g){
clear();
EdgeList edges;
for(unsigned int from=0;from<g.size();++from)
{
if(get_forward_root(g,g[from],edges)==false)
continue;
for(EdgeList::iterator it = edges.begin();it!=edges.end();++it)
push(from,(*it)->to,g[(*it)->from].label,(*it)->elabel,g[(*it)->to].label);
}
}
bool DFSCode::toGraph(Graph& g) //Convert DFSCode sang đồ thị.
{
g.clear(); //g là một graph hay là một vector<vertex>, mỗi một phần tử của vector là một vertex và kèm theo các cạnh gắn liền với đỉnh đó.
for(DFSCode::iterator it = begin();it != end(); ++it){ //Duyệt qua DFSCODE
g.resize(std::max (it->from,it->to) +1); //khởi tạo kích thước cho đồ thị g chính bằng số lượng đỉnh của DFSCode
if(it->fromlabel != -1) //nếu như nhãn của đỉnh là hợp lệ
g[it->from].label = it->fromlabel; //
if(it->tolabel != -1)
g[it->to].label = it->tolabel;
g[it->from].push (it->from,it->to,it->elabel);
if(g.directed == false)
g[it->to].push (it->to,it->from,it->elabel);
}
g.buildEdge();
return (true);
}
unsigned int DFSCode::nodeCount(void) //giải thuật đếm node trên cây
{
unsigned int nodecount = 0;
for(DFSCode::iterator it = begin();it != end(); ++it)
nodecount = std::max(nodecount,(unsigned int) (std::max(it->from,it->to) + 1));
return (nodecount);
}
std::ostream& DFSCode::write(std::ostream& os)
{
if(size()==0) return os;
os<<"("<<(*this)[0].fromlabel<<") "<<(*this)[0].elabel<<" (of"<<(*this)[0].tolabel<<")";
for(unsigned int i=1;i<size();++i){
if((*this)[i].from < (*this)[i].to){
os<<" "<<(*this)[i].elabel<<" ("<<(*this)[i].from<<"f"<<(*this)[i].tolabel<<")";
}else{
os<<" "<<(*this)[i].elabel<<" (b"<<(*this)[i].to<<")";
}
}
return os;
}
|
13,620 |
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
__global__ void globalMax(int *a, int N, int* gl_max)
{
/* TODO insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
}
#define THREADS_PER_BLOCK 512
int main(int argc, char*argv[])
{
assert(argc == 2);
int N = atoi(argv[1]);
assert(N>0 && N<=10000000);
int *a;
int *d_a;
int *d_max;
int size = N * sizeof( int );
time_t t;
srand((unsigned) time(&t));
/* allocate space for device copies of a, max */
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_max, sizeof(int) );
/* allocate space for host copies of a, cpu_max, and setup input values */
a = (int *)malloc( size );
int cpu_max = 0;
for( int i = 0; i < N; i++ )
{
a[i] = rand() % 50;
}
/* copy inputs to device */
/* fix the parameters needed to copy data to the device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_max, &cpu_max, sizeof(int), cudaMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
globalMax<<< (N + (THREADS_PER_BLOCK-1)) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( d_a, N, d_max);
/* copy result back to host */
/* fix the parameters needed to copy data back to the host */
cudaMemcpy( &cpu_max, d_max, sizeof(int), cudaMemcpyDeviceToHost );
printf( "global max = %d\n", cpu_max);
/* clean up */
free(a);
cudaFree( d_a );
cudaFree( d_max );
return 0;
} /* end main */
|
13,621 | #include <stdio.h>
#include <math.h>
#include <float.h>
#include <limits.h>
#define XSIZE 1201
#define YSIZE 801
#define RADIUS 100
#define RADSTEP 1
#define ANGLESIZE 36
#define PI 3.141592653589793
int main()
{
FILE *datTxt,*outputAnisotropy00,*outputAnisotropy09,*outputAnisotropy49,*outputAnisotropy99;
FILE *outputAzimuth00,*outputAzimuth09,*outputAzimuth49,*outputAzimuth99;
int data[YSIZE][XSIZE];
FILE * inpCheck;
inpCheck = fopen("inpCheck.txt","w");
if(inpCheck == NULL) {
perror("Cannot open dat.txt file");
return (-1);
}
//1200 ints in a row which are max of 5 digits
//with a space in the front and the back and space
//between each number
char line[1200 * 5 +2+1200];
memset(line, '\0', sizeof(line));
char *startPtr,*endPtr;
datTxt = fopen("dat.txt","r");
if(datTxt == NULL) {
perror("Cannot open dat.txt file");
return (-1);
}
outputAnisotropy00 = fopen("outputDataAni00.txt","w");
outputAnisotropy09 = fopen("outputDataAni09.txt","w");
outputAnisotropy49 = fopen("outputDataAni49.txt","w");
outputAnisotropy99 = fopen("outputDataAni99.txt","w");
if((outputAnisotropy00 == NULL)||(outputAnisotropy09 == NULL)||(outputAnisotropy49 == NULL)||(outputAnisotropy99 == NULL)) {
perror("Cannot open Anisotropy file");
return (-1);
}
outputAzimuth00 = fopen("outputDataAzi00.txt","w");
outputAzimuth09 = fopen("outputDataAzi09.txt","w");
outputAzimuth49 = fopen("outputDataAzi49.txt","w");
outputAzimuth99 = fopen("outputDataAzi99.txt","w");
if((outputAzimuth00 == NULL)||(outputAzimuth09 == NULL)||(outputAzimuth49 == NULL)||(outputAzimuth99 == NULL)) {
perror("Cannot open Azimuth file");
return (-1);
}
int i,j,Value;
j = 0;
char tempVal[5];
memset(tempVal,'\0',sizeof(tempVal));
while(fgets(line,1200 *5 + 2 + 1200,datTxt)!=NULL) {
startPtr = line;
for(i=0;i<XSIZE;i++) {
Value = 0;
memset(tempVal,'\0',sizeof(tempVal));
if(i != (XSIZE - 1)) {
endPtr = strchr(startPtr,' ');
strncpy(tempVal,startPtr,endPtr-startPtr);
Value = atoi(tempVal);
data[j][i] = Value;
fprintf(inpCheck,"%d ",Value);
endPtr = endPtr + 1;
startPtr = endPtr;
}
else if(i == (XSIZE - 1)){
strcpy(tempVal,startPtr);
Value = atoi(tempVal);
data[j][i] = Value;
fprintf(inpCheck,"%d\n",Value);
}
}
j++;
}
//Fine
float angle[ANGLESIZE];
for(int i=0;i<ANGLESIZE;i++) {
angle[i] = i * 5 * PI/180;
}
//Initializing 3D matrix anisotropy
float*** anisotropy;
anisotropy = (float***)malloc(YSIZE * sizeof(float**));
for(i = 0;i<YSIZE;i++) {
anisotropy[i] = (float**)malloc(XSIZE * sizeof(float *));
for(j = 0; j<XSIZE;j++) {
anisotropy[i][j] = (float*)malloc(RADIUS * sizeof(float));
}
}
//Initializing 3D matrix anzimuth
float*** azimuth;
azimuth = (float***)malloc(YSIZE * sizeof(float**));
for(i = 0;i<YSIZE;i++) {
azimuth[i] = (float**)malloc(XSIZE * sizeof(float *));
for(j = 0; j<XSIZE;j++) {
azimuth[i][j] = (float*)malloc(RADIUS * sizeof(float));
}
}
//Actual computation
int xrad,yrad,x,y,xradOrtho,yradOrtho,xradOneEighty,yradOneEighty,valueOneEighty;
float variance[100];
float orientation[100];
float ortho[100];
float value,sum_value,avg_value;
float valueOrtho,sum_valueOrtho,avg_valueOrtho;
sum_value = 0;
avg_value = 0;
sum_valueOrtho = 0;
avg_valueOrtho = 0;
//y = 0;
for(y=0;y<YSIZE;y++) {
for(x = 0;x<XSIZE;x++) {
/*for(x = 0;x<XSIZE+1;x++) {
if(x==XSIZE) {
y++;
if(y==YSIZE){
x = XSIZE;
continue;
}
x=0;
continue;
}
*/
if((y>(YSIZE - RADIUS - 1))||(y<(RADIUS + 1))) continue;
if((x>(XSIZE - RADIUS - 1))||(x<(RADIUS + 1))) continue;
for(i=0;i<100;i++){
variance[i] = FLT_MAX;
ortho[i] = FLT_MAX;
}
//Flipped
for(i=0;i<ANGLESIZE;i++) {
sum_value = 0;
sum_valueOrtho = 0;
for(j = 0;j<RADIUS;j+=RADSTEP) {
xrad = (int)round(cos(angle[i]) * (j+1) + x);
yrad = (int)round(sin(angle[i]) * (j+1) + y);
value = data[y][x] - data[yrad][xrad];
value = value * value * 0.5;
//sum_value = sum_value + value;
//avg_value = sum_value/(j+1);
//Ortho computation
xradOrtho = (int)round(cos(angle[i]+PI/2) * (j+1) + x);
yradOrtho = (int)round(sin(angle[i]+PI/2) * (j+1) + y);
valueOrtho = data[y][x] - data[yradOrtho][xradOrtho];
valueOrtho = valueOrtho * valueOrtho *0.5;
sum_valueOrtho = sum_valueOrtho + valueOrtho;
avg_valueOrtho = sum_valueOrtho/(j+1);
//One eighty angle computation
xradOneEighty = (int)round(cos(angle[i]+PI) * (j+1) + x);
yradOneEighty = (int)round(sin(angle[i]+PI) * (j+1) + y);
valueOneEighty = data[y][x] - data[yradOneEighty][xradOneEighty];
valueOneEighty = valueOneEighty * valueOneEighty * 0.5;
sum_value = sum_value + value + valueOneEighty;
avg_value = sum_value/(2*(j+1));
//Fail safe to ensure there is no nan or inf
if(avg_value == 0) {
if((avg_valueOrtho < 1) && (avg_valueOrtho > 0)) {
avg_value = avg_valueOrtho;
}
else {
avg_value = 1;
}
}
if(avg_valueOrtho == 0) {
avg_valueOrtho = 1;
}
//printf("1(%d,%d) %f %f\n",(j+1),(i+1),variance[j],avg_value);
if(avg_value < variance[j]) {
// printf("2(%d) %f %f\n",j,variance[j],avg_value);
variance[j] = avg_value;
orientation[j] = angle[i];
ortho[j] = avg_valueOrtho;
}
}
}
for(j=0;j<RADIUS;j+=RADSTEP){
anisotropy[y][x][j] = ortho[j]/variance[j];
azimuth[y][x][j] = orientation[j] * 180/PI ;
//printf("%f %f\n",variance[j],anisotropy[y][x][j]);
}
// Writing to files
if (x == (XSIZE - RADIUS - 1)) {
fprintf(outputAnisotropy00,"%f",anisotropy[y][x][0]);
fprintf(outputAzimuth00,"%f",azimuth[y][x][0]);
fprintf(outputAnisotropy00,"\n");
fprintf(outputAzimuth00,"\n");
fprintf(outputAnisotropy09,"%f",anisotropy[y][x][9]);
fprintf(outputAzimuth09,"%f",azimuth[y][x][9]);
fprintf(outputAnisotropy09,"\n");
fprintf(outputAzimuth09,"\n");
fprintf(outputAnisotropy49,"%f",anisotropy[y][x][49]);
fprintf(outputAzimuth49,"%f",azimuth[y][x][49]);
fprintf(outputAnisotropy49,"\n");
fprintf(outputAzimuth49,"\n");
fprintf(outputAnisotropy99,"%f",anisotropy[y][x][99]);
fprintf(outputAzimuth99,"%f",azimuth[y][x][99]);
fprintf(outputAnisotropy99,"\n");
fprintf(outputAzimuth99,"\n");
}
else {
fprintf(outputAnisotropy00,"%f",anisotropy[y][x][0]);
fprintf(outputAzimuth00,"%f",azimuth[y][x][0]);
fprintf(outputAnisotropy00,"\t");
fprintf(outputAzimuth00,"\t");
fprintf(outputAnisotropy09,"%f",anisotropy[y][x][9]);
fprintf(outputAzimuth09,"%f",azimuth[y][x][9]);
fprintf(outputAnisotropy09,"\t");
fprintf(outputAzimuth09,"\t");
fprintf(outputAnisotropy49,"%f",anisotropy[y][x][49]);
fprintf(outputAzimuth49,"%f",azimuth[y][x][49]);
fprintf(outputAnisotropy49,"\t");
fprintf(outputAzimuth49,"\t");
fprintf(outputAnisotropy99,"%f",anisotropy[y][x][99]);
fprintf(outputAzimuth99,"%f",azimuth[y][x][99]);
fprintf(outputAnisotropy99,"\t");
fprintf(outputAzimuth99,"\t");
}
}
}
fclose(datTxt);
fclose(inpCheck);
fclose(outputAnisotropy00);
fclose(outputAnisotropy09);
fclose(outputAnisotropy49);
fclose(outputAnisotropy99);
fclose(outputAzimuth00);
fclose(outputAzimuth09);
fclose(outputAzimuth49);
fclose(outputAzimuth99);
//Freeing 3D matrix anisotropy
for(i = 0;i<YSIZE;i++) {
for(j=0;j<XSIZE;j++) {
free(anisotropy[i][j]);
}
free(anisotropy[i]);
}
free(anisotropy);
//Freeing 3D matrix azimuth
for(i = 0;i<YSIZE;i++) {
for(j=0;j<XSIZE;j++) {
free(azimuth[i][j]);
}
free(azimuth[i]);
}
free(azimuth);
return 0;
}
|
13,622 | __global__ void Fan2(float *m_cuda, float *a_cuda, float *b_cuda,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)];
//a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t];
if(yidx == 0){
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
//printf("xidx:%d,yidx:%d\n",xidx,yidx);
b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t];
}
}
|
13,623 | #include <iostream>
#include <stdio.h>
#include <string.h>
#include <cassert>
__global__ void kernel(int* dval, int nword)
{
int tid = threadIdx.x;
int3 bid;
bid.x = blockIdx.x;
bid.y = blockIdx.y;
int nthre = blockDim.x;
int i = (gridDim.x*bid.y + bid.x)*nthre + tid;
dval[i] = i;
}
int main( int argc, char** argv)
{
int nby = 6;
int nbx = 65535; // max 65535 blocks
int nthre = 512; // max 512 threads
int nword = nbx * nby * nthre;
int mem_size = sizeof(int) * nword;
printf("# threads: %d \n", nword);
printf("mem_size: %d Kbyte\n", mem_size >> 10);
int* hval = (int*) malloc(mem_size);
int* dval;
cudaMalloc( (void**) &dval, mem_size);
dim3 grid(nbx, nby);
dim3 threads(nthre);
kernel<<< grid, threads >>>(dval, nword);
cudaMemcpy(hval, dval, mem_size, cudaMemcpyDeviceToHost);
for(int i=0; i<nword; i++){
int z = hval[i];
if(i != z) printf("%d: %d\n", i, z);
}
free(hval);
cudaFree(dval);
return (0);
}
|
13,624 | int MaxVectorComp(float *result, int size)
{
float max = *result;
int j = 0;
for (int i = 1; i < size; i++){
if (max < result[i]){
max = result[i];
j = i;
}
}
return j;
} |
13,625 |
#include "cuda_runtime.h"
#include <stdio.h>
// __global_- keyword in CUDA C/C++ indicates a function that
// it run on the devices and it is called from host code.
// This is the device components processed by NVIDIA compiler(nvcc).
__global__ void mykernel(void)
{
}
// Host functions processed by standard host compiler. ex) GCC, VS including Nsight.
int main()
{
// Launch kernel from host code to device code for executing a function on the GPU!
// We'll return to the parameters (1,1) in a moment
mykernel <<<1, 1 >>>();
printf("Hello, CUDA!\n");
return 0;
}
|
13,626 | __global__ void cu_addone(int* a)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
a[idx]*=a[idx];
}
|
13,627 | //
// Created by igor on 10.04.2021.
//
#include "Vector3.cuh"
__device__ __host__ Vector3::Vector3(float x, float y, float z) : x(x), y(y), z(z) {}
__device__ __host__ Vector3& Vector3::normalize() {
float norm = this->norm();
x /= norm;
y /= norm;
z /= norm;
return *this;
}
__device__ __host__ float Vector3::norm() const{
return sqrt(x*x+y*y+z*z);
}
__device__ __host__ float Vector3::dot(const Vector3 r) const {
return x * r.x + y * r.y + z * r.z;
}
__device__ __host__ float Vector3::squared() const {
return x*x+y*y+z*z;
}
__device__ __host__ Vector3 Vector3::cross(Vector3 b) const {
const Vector3& a = *this;
return {
a.y * b.z - a.z * b.y,
a.z * b.x - a.x * b.z,
a.x * b.y - a.y * b.x
};
}
__device__ __host__ Vector3 operator*(float s, const Vector3& v) {
return {s*v.x, s*v.y, s*v.z};
}
__device__ __host__ Vector3 operator+(const Vector3 &l, const Vector3 &r) {
return {l.x + r.x, l.y + r.y, l.z + r.z};
}
__device__ __host__ Vector3 operator-(const Vector3 &l, const Vector3 &r) {
return {l.x - r.x, l.y - r.y, l.z - r.z};
}
__device__ __host__ Vector3 operator-(const Vector3 &l) {
return -1 * l;
}
|
13,628 | #include <thrust/device_vector.h>
#include <thrust/random.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <iostream>
struct initRandomPrg
{
float minValue, maxValue;
__host__ __device__
initRandomPrg(float _mnV=0.f, float _mxV=1.f) : minValue(_mnV), maxValue(_mxV) {};
__host__ __device__
float operator()(const unsigned int n) const
{
thrust::default_random_engine rng;
thrust::uniform_real_distribution<float> dist(minValue, maxValue);
rng.discard(n);
return dist(rng);
}
};
struct addPrg
{
//thrust::device_vector a, b;
__host__ __device__
//addPrg(thrust::device_vector _a, thrust::device_vector _b) : a(_a), b(_b) {};
addPrg() {};
__host__ __device__
float operator()(const float a, const float b) const
{
return a + b;
}
};
int main() {
int N = 8000 * 8000; // 800px x 800px image
int iterations = 10;
auto x = thrust::device_vector<float>(N);
auto y = thrust::device_vector<float>(N);
auto output = thrust::device_vector<float>(N);
// initilize array
auto index_sequence_begin = thrust::counting_iterator<unsigned int>(0);
thrust::transform(
index_sequence_begin,
index_sequence_begin + N,
x.begin(),
initRandomPrg()
);
thrust::transform(
index_sequence_begin,
index_sequence_begin + N,
y.begin(),
initRandomPrg()
);
// add them up
for (int i = 0; i < iterations; i++) {
thrust::transform(
x.begin(), x.end(),
y.begin(),
output.begin(),
//thrust::plus<float>()
addPrg()
);
}
/*for (int i = 0; i < N; i++) {
std::cout << x[i] + y[i] << ' ' << output[i] << '\n';
}*/
return 0;
}
|
13,629 | /*
* TopBottomUpdater.cpp
*
* Created on: 04 февр. 2016 г.
* Author: aleksandr
*/
#include "TopBottomUpdater.h"
#define Hx(M, N) Hx[(M) * (sizeY-1) + (N)]
#define Hy(M, N) Hy[(M) * (sizeY) + (N)]
#define Ez(M, N) Ez[(M) * (sizeY) + (N)]
#define epsilon(M, N) epsilon[(M) * (sizeY) + (N)]
#define sigma(M, N) sigma[(M) * (sizeY) + (N)]
__device__
void TopBottomUpdater::operator() (const int indx) {
int m = indx;
// Обновляю промежуточный массив
float Chxe = S / 377.0;
HxTemp[m] = HxTemp[m] - Chxe*(Ez(m,0) - Ez(m, sizeY-1));
float loss = sigma(m, 0)/(2*epsilon(m, 0));
float Cezh = S * 377.0 / epsilon(m,0) / (1+loss);
float Ceze = (1-loss)/(1+loss);
// Обновляет по нижней границе
Ez(m, 0) = Ceze*Ez(m, 0) + Cezh * ((Hy(m, 0) - Hy(m-1, 0)) - (Hx(m, 0) - HxTemp[m]));
loss = sigma(m, sizeY-1)/(2*epsilon(m, sizeY-1));
Cezh = S * 377.0 / epsilon(m,sizeY-1) / (1+loss);
Ceze = (1-loss)/(1+loss);
// Обновляет по верхней границе
Ez(m, sizeY-1) = Ceze*Ez(m, sizeY-1) + Cezh * ((Hy(m, sizeY-1) - Hy(m-1, sizeY-1)) - (HxTemp[m] - Hx(m, sizeY - 2)));
}
|
13,630 | /*******************************************************************************
This program uses the Thrust library to perform vector arithmetic .
Author: Said Darham
*******************************************************************************/
#include <iostream>
#include <stdlib.h> //srand and rand
#include <math.h>
//Thrust libraries headers
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
//Timer struct declaration. Using CUDA EVENTS
typedef struct timer{
cudaEvent_t startEvent;
cudaEvent_t stopEvent;
float time_ms;
} timerEvent;
/*******************************************************************************
PROFILER FUNCTIONS USING EVENTS
*******************************************************************************/
void startEventTimer(timerEvent *timer){
/* startEventTimer()
Creates and starts recording an event
*/
cudaEventCreate(&timer->startEvent);
cudaEventCreate(&timer->stopEvent);
cudaEventRecord(timer->startEvent);
}
void stopEventTimer(timerEvent *timer){
/* stopEventTimer()
Stops an event and calculates the elapsed time between start and stop event
*/
cudaEventRecord(timer->stopEvent);
cudaEventSynchronize(timer->stopEvent);
cudaEventElapsedTime(&timer->time_ms, timer->startEvent, timer->stopEvent);
}
void freeEventTimer(timerEvent *timer){
/* freeEventTimer()
cleans up the events
*/
cudaEventDestroy(timer->startEvent);
cudaEventDestroy(timer->stopEvent);
}
void checkDevices(void){
//Check and print devices name
cudaDeviceProp prop;
int deviceCount; //number of devices found
int devId = 0; // default device Id
cudaGetDeviceCount(&deviceCount);
if(deviceCount == 0){
std::cout << "No GPU Device Found\n";
exit(0);
}else if(deviceCount == 1){
cudaSetDevice(devId); //set the device 0 as default
}
std::cout << "Number Of Devices Found: " << deviceCount << std::endl;
//Print device names and some basic associated properties
for (int i = 0; i<deviceCount; i++){
cudaGetDeviceProperties(&prop,i);
std::cout << "Device " << i << " Name: " << prop.name << std::endl;
std::cout << "Compute Capability: " << prop.major << "." << prop.minor << std::endl;
}
}
void printArray(thrust::host_vector<int> array, int n){
//helper function to Print the array of n elements and what function is used
for(int i = 0; i<10; i++){
std::cout << array[i] << ' ';
}
std::cout << std::endl;
}
/*******************************************************************************
ARITHMETIC KERNEL FUNCTIONS
*******************************************************************************/
// Add Function
__global__ void add(int *a, int *b, int *c, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
// subtract function
__global__ void subtract(int *a, int *b, int *c, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] - b[id];
}
// multiply function
__global__ void mult(int *a, int *b, int *c, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] * b[id];
}
// Moudulu function
__global__ void mod(int *a, int *b, int *c, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] % b[id];
}
__host__ static __inline__ int myRand(){
//to be used with thrust's generate() to supply with random numbers b/w 0-3
return ((int)rand() % 4);
}
/*******************************************************************************
CUDA KERNELS TEST
*******************************************************************************/
void executeCudaTest(int numBlocks, int blockSize, int totalThreads){
std::cout << "\n\t\t*****Executing Arithmetic Functions Using CUDA kernels*****" << std::endl;
// Host input/output vectors
int *h_a, *h_b, *h_c_add,*h_c_sub,*h_c_mult,*h_c_mod;
// Device input/output vectors
int *d_a, *d_b, *d_c_add,*d_c_sub,*d_c_mult,*d_c_mod;
// Size, in bytes, of each vector
const unsigned int bytes = totalThreads*sizeof(int);
// Allocate memory for each vector on host Pinned
cudaMallocHost((void**)&h_a, bytes);
cudaMallocHost((void**)&h_b, bytes);
cudaMallocHost((void**)&h_c_add, bytes);
cudaMallocHost((void**)&h_c_sub, bytes);
cudaMallocHost((void**)&h_c_mult, bytes);
cudaMallocHost((void**)&h_c_mod, bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c_add, bytes);
cudaMalloc(&d_c_sub, bytes);
cudaMalloc(&d_c_mult, bytes);
cudaMalloc(&d_c_mod, bytes);
//initialize the input vectors
for(int i = 0;i<totalThreads;i++){
//first array is 0 through number of threads
h_a[i] = i;
// second array is a random number between 0 and 3
h_b[i] = rand() % 4;
}
//create a struct which will contain info for timing using events
timerEvent timer;
//Transfer and Profile data from host to device and profile using EVENTS
startEventTimer(&timer);
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
//Execute the kernel arithmetic functions
add<<<numBlocks, blockSize>>>(d_a, d_b, d_c_add, totalThreads);
subtract<<<numBlocks, blockSize>>>(d_a, d_b, d_c_sub, totalThreads);
mult<<<numBlocks, blockSize>>>(d_a, d_b, d_c_mult, totalThreads);
mod<<<numBlocks, blockSize>>>(d_a, d_b, d_c_mod, totalThreads);
//Transfer data from device to host
cudaMemcpy(h_c_add, d_c_add, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_c_sub, d_c_sub, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_c_mult, d_c_mult, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_c_mod, d_c_mod, bytes, cudaMemcpyDeviceToHost);
stopEventTimer(&timer);
std::cout << "Time Elaplsed For CUDA kernels: " << timer.time_ms << " ms" << std::endl;
//destroy Event timer
freeEventTimer(&timer);
//free up space on our GPU
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c_add);
cudaFree(d_c_sub);
cudaFree(d_c_mult);
cudaFree(d_c_mod);
//free up space on our CPU use cudaFreeHost since pinnned
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c_add);
cudaFreeHost(h_c_sub);
cudaFreeHost(h_c_mult);
cudaFreeHost(h_c_mod);
}
/*******************************************************************************
THRUST TEST
*******************************************************************************/
void executeThrustTest(int totalThreads){
std::cout << "\n\t\t*****Executing Arithmetic Functions Using Thrust*****" << std::endl;
// host vectors
thrust::host_vector<int> h_a(totalThreads);
thrust::host_vector<int> h_b(totalThreads);
// device vectors
thrust::device_vector<int> d_c_add(totalThreads);
thrust::device_vector<int> d_c_sub(totalThreads);
thrust::device_vector<int> d_c_mult(totalThreads);
thrust::device_vector<int> d_c_mod(totalThreads);
//Initialize data arrays
for( int i = 0; i < totalThreads; i++)
h_a[i] = i;
//generate random data on the host_vector
thrust::generate(h_b.begin(), h_b.end(), myRand);
//create a struct which will contain info for timing using events
timerEvent timer;
startEventTimer(&timer);
//copy vectors from host to devices
thrust::device_vector<int> d_a = h_a;
thrust::device_vector<int> d_b = h_b;
//perform arithmetic functions
thrust::transform(d_a.begin(), d_a.end(), d_b.begin(), d_c_add.begin(), thrust::plus<int>());
thrust::transform(d_a.begin(), d_a.end(), d_b.begin(), d_c_sub.begin(), thrust::minus<int>());
thrust::transform(d_a.begin(), d_a.end(), d_b.begin(), d_c_mult.begin(), thrust::multiplies<int>());
thrust::transform(d_a.begin(), d_a.end(), d_b.begin(), d_c_mod.begin(), thrust::modulus<int>());
//copy results from device to host
thrust::host_vector<int> h_c_add = d_c_add;
thrust::host_vector<int> h_c_sub = d_c_sub;
thrust::host_vector<int> h_c_mult = d_c_mult;
thrust::host_vector<int> h_c_mod = d_c_mod;
stopEventTimer(&timer);
std::cout << "Time Elaplsed For Arithmetic using Thrust: " << timer.time_ms << " ms" << std::endl;
//destroy Event timer
freeEventTimer(&timer);
}
/*******************************************************************************
MAIN
*******************************************************************************/
int main(int argc, char** argv)
{
int totalThreads = (1 << 10);
int blockSize = 256;
//User wants to run the Global vs Pinned Examples
if( argc > 2 && argc < 4){
// Ensure the user supplies both number of threads and block size
// otherwise use default values
totalThreads = atoi(argv[1]);
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
std::cout << "\nUsing " << totalThreads << " Threads and " << blockSize << " BlockSize\n" ;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
std::cout << "Warning: Total thread count is not evenly divisible by the block size\n";
std::cout << "The total number of threads will be rounded up to %d\n";
}
// get number of devices and print some basic properties
checkDevices();
executeCudaTest( numBlocks, blockSize, totalThreads);
executeThrustTest( totalThreads );
return 0;
}
|
13,631 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cuda.h>
__global__ void vecProduct(int *d_x, int *d_y, int *d_z, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
d_z[idx] = d_x[idx] * d_y[idx];
}
}
int main() {
int N=0;
printf("%s","Enter the size of vector : ");
if( scanf( "%d", &N) == 0 )
{
fprintf( stderr, "Expected a positive number as input\n");
exit(1);
}
int size = N * sizeof(int);
int h_x[N], h_y[N], h_z[N], *d_x, *d_y, *d_z;
int i = 0;
int total=0;
//Initialize vectors
for (i = 0; i < N; i++) {
h_x[i] = i;
h_y[i] = i;
h_z[i] = 0;
}
cudaEvent_t startC, stopC;
float elapsed_time_msC;
cudaEventCreate( &startC );
cudaEventCreate( &stopC );
cudaEventRecord( startC, 0 );
for (i = 0; i < N; i++) {
h_z[i] =h_x[i]+h_y[i] ;
}
cudaEventRecord( stopC, 0 );
cudaEventSynchronize( stopC );
cudaEventElapsedTime( &elapsed_time_msC, startC, stopC );
printf("Time to calculate results(CPU Time): %f ms.\n", elapsed_time_msC);
cudaMalloc(&d_x, size);
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_y, size);
cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_z, size);
dim3 dimGrid(1, 1);
dim3 dimBlock(N, 1);
cudaEvent_t start, stop;
float elapsed_time_ms;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
vecProduct <<< dimGrid, dimBlock >>> (d_x, d_y, d_z, N);
cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsed_time_ms, start, stop );
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
for (i = 0; i < N; i++) {
total+= h_z[i];
}
printf("Time to calculate results(GPU Time): %f ms.\n", elapsed_time_ms);
}
|
13,632 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
void initialData( float *ip, int size )
{
// generate different seed for random number
time_t t;
srand( (unsigned int) time (&t) );
for (int i=0; i<size; i++) {
ip[i] = (float)( rand() & 0xFF ) / 10.0f;
}
}
void sumMatrixOnHost_1( float *A, float *B, float *C, const int nx,
const int ny )
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy=0; iy<ny; iy++) {
for (int ix=0; ix<nx; ix++) {
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
}
void sumMatrixOnHost_2( float *A, float *B, float *C, const int nx,
const int ny )
{
for (int iy=0; iy<ny; iy++) {
for (int ix=0; ix<nx; ix++) {
C[ix + iy * nx] = A[ix + iy * nx] + B[ix + iy * nx];
}
}
}
void sumMatrixOnHost_3( float *A, float *B, float *C, const int nx,
const int ny )
{
int nxy = nx * ny;
for (int i=0; i<nxy; i++) {
C[i] = A[i] + B[i];
}
}
__global__ void sumMatrixOnDevice_1( float *A, float *B, float *C,
const int nx, const int ny )
{
size_t ix = threadIdx.x + blockIdx.x * blockDim.x;
size_t iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix < nx && iy < ny) {
C[ix + iy * nx] = A[ix + iy * nx] + B[ix + iy * nx];
}
return;
}
double cpuSecond()
{
struct timeval tp;
gettimeofday( &tp, NULL );
return ( (double)tp.tv_sec + (double)tp.tv_usec*1e-6 );
}
int main( int argc, char **argv )
{
// timing...
double startTime, cpuElapsed_1, cpuElapsed_2, cpuElapsed_3, gpuElapsed;
// data
int nx = 1<<14;
int ny = 1<<14;
int nxy = nx * ny;
size_t nBytes = nxy * sizeof(float);
printf("matrix dimensions = (%i, %i)\n", nx, ny);
printf("number of elements = %i\n", nxy);
printf("number of bytes in each matrix = %lu\n", nBytes);
float *h_A, *h_B, *h_C;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
initialData( h_A, nxy );
initialData( h_B, nxy );
// set up device
int dev = 0;
cudaSetDevice(dev);
float *d_A, *d_B, *d_C, *gpuRes;
cudaMalloc( (float **)&d_A, nBytes );
cudaMalloc( (float **)&d_B, nBytes );
cudaMalloc( (float **)&d_C, nBytes );
gpuRes = (float *)malloc(nBytes);
cudaMemcpy( d_A, h_A, nBytes, cudaMemcpyHostToDevice );
cudaMemcpy( d_B, h_B, nBytes, cudaMemcpyHostToDevice );
memset( h_C, 0, nBytes );
memset( gpuRes, 0, nBytes );
startTime = cpuSecond();
sumMatrixOnHost_1( h_A, h_B, h_C, nx, ny );
cpuElapsed_1 = cpuSecond() - startTime;
startTime = cpuSecond();
sumMatrixOnHost_2( h_A, h_B, h_C, nx, ny );
cpuElapsed_2 = cpuSecond() - startTime;
startTime = cpuSecond();
sumMatrixOnHost_3( h_A, h_B, h_C, nx, ny );
cpuElapsed_3 = cpuSecond() - startTime;
dim3 block(32, 32);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
printf("block = (%i, %i, %i)\n", block.x, block.y, block.z);
printf("grid = (%i, %i, %i)\n", grid.x, grid.y, grid.z);
printf("number of threads / block = %i\n", block.x * block.y * block.z);
if (block.x * block.y * block.z > 1024) {
printf("Can only launch 1024 threads per block, not %i\n",
block.x * block.y * block.z);
exit(1);
}
printf("total launched = %i\n", block.x * grid.x * block.y * grid.y);
printf("total needed = %i\n", nxy);
startTime = cpuSecond();
sumMatrixOnDevice_1<<< grid, block >>>( d_A, d_B, d_C, nx, ny );
cudaDeviceSynchronize();
printf("Launched with blockDim = (%i, %i, %i)\n", block.x, block.y, block.z);
printf("Launched with gridDim = (%i, %i, %i)\n", grid.x, grid.y, grid.z);
gpuElapsed = cpuSecond() - startTime;
cudaError_t error = cudaMemcpy( gpuRes, d_C, nBytes,
cudaMemcpyDeviceToHost );
if (error != cudaSuccess) {
printf("Error: %s:%d, ", __FILE__, __LINE__);
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error));
exit(1);
}
double err = 0.0;
for (int i=0; i<nxy; i++) {
err += abs(h_C[i] - gpuRes[i]);
// printf("%3i:: %7.3f %7.3f %7.3f %7.3f\n", i, h_C[i], gpuRes[i], h_A[i], h_B[i]);
}
printf("Total error is %f\n", err);
printf("Time on CPU_v1 is %f\n", cpuElapsed_1);
printf("Time on CPU_v2 is %f\n", cpuElapsed_2);
printf("Time on CPU_v3 is %f\n", cpuElapsed_3);
printf("Time on GPU is %f\n", gpuElapsed);
printf("GPU speed-up over CPU is %.2f x\n",
(cpuElapsed_1 < cpuElapsed_2 ?
(cpuElapsed_1 < cpuElapsed_3 ? cpuElapsed_1 : cpuElapsed_3)
: (cpuElapsed_2 < cpuElapsed_3 ? cpuElapsed_2 : cpuElapsed_3)) / gpuElapsed);
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(gpuRes);
return 0;
}
int comp(const void * elem1, const void * elem2)
{
int f = *((int*)elem1);
int s = *((int*)elem2);
if (f > s) return 1;
if (f < s) return -1;
return 0;
}
void checkSol()
{
int indices[] = { 96, 97 , 98 , 99 , 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 32 , 33 , 34 , 35 , 36 , 37 , 38 , 39 ,
40 , 41 , 42 , 43 , 44 , 45 , 46 , 47 , 48 , 49 , 50 , 51 , 52 , 53 , 54 , 55 , 56 , 57 , 58 , 59 ,
60 , 61 , 62 , 63 , 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
184, 185, 186, 187, 188, 189, 190, 191, 64 , 65 , 66 , 67 , 68 , 69 , 70 , 71 , 72 , 73 , 74 , 75 ,
76 , 77 , 78 , 79 , 80 , 81 , 82 , 83 , 84 , 85 , 86 , 87 , 88 , 89 , 90 , 91 , 92 , 93 , 94 , 95 ,
0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 ,
20 , 21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 , 31 , 192, 193, 194, 195, 196, 197, 198, 199,
200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255 };
qsort(indices, sizeof(indices)/sizeof(*indices), sizeof(*indices), comp);
for (int i = 0 ; i < sizeof(indices)/sizeof(*indices) ; i++) {
// printf ("%d ", indices[i]);
if (i != indices[i]) {
printf("failed:: %d:%d\n", i, indices[i]);
break;
}
}
printf("success:: all matched\n");
return;
}
|
13,633 | // Cuda example add1 by Oleksiy Grechnyev
// This one uses cudaMallocManaged() : unified CPU/GPU memory
#include <iostream>
#include <cmath>
// Kernel: This runs on the GPU (device) !
__global__
void add(int n, float *x, float *y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
printf("thread = %d/%d, block = %d/%d, index = %d/%d \n", threadIdx.x, blockDim.x, blockIdx.x, gridDim.x, index, stride);
for (int i = index; i< n ; i += stride)
y[i] += x[i];
}
// This runs on the CPU (host)
int main(){
int n = 1 << 20; // 1024**2
// Alloc unified CPU/GPU memory
float *x, *y;
cudaMallocManaged(&x, n*sizeof(float));
cudaMallocManaged(&y, n*sizeof(float));
// Initialize data
for (int i = 0; i< n ; ++i) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Automatic block size and number of blocks for max speed
// int blockSize = 256;
// int numBlocks = (n+ blockSize -1) / blockSize;
// Smaller values for the demo
int blockSize = 4;
int numBlocks = 3;
// Add
add<<<numBlocks, blockSize>>>(n, x, y);
// Wait for GPU, needed here because of cudaMallocManaged()
cudaDeviceSynchronize();
// Check the result: should be 0
float maxE=0;
for (int i = 0; i< n ; ++i)
maxE = std::fmax(maxE, std::fabs(y[i] - 3.0f));
std::cout << "maxE = " << maxE << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
13,634 | __global__ void Fir_SpB_shared(float2* __restrict__ d_data, float* __restrict__ d_coeff, int nTaps, int nChannels, int yshift, float2* __restrict__ d_spectra) {
int t = 0;
int bl= 4*blockIdx.x*nChannels;
int ypos = blockDim.x*blockIdx.y + yshift;
float2 ftemp1 = make_float2(0.0,0.0);
float2 ftemp2 = make_float2(0.0,0.0);
float2 ftemp3 = make_float2(0.0,0.0);
float2 ftemp4 = make_float2(0.0,0.0);
float temp;
for(t=ypos + threadIdx.x;t<(nTaps)*nChannels;t+=nChannels){
temp = d_coeff[t];
ftemp1.x = __fmaf_rn(temp,d_data[bl+t].x,ftemp1.x);
ftemp1.y = __fmaf_rn(temp,d_data[bl+t].y,ftemp1.y);
ftemp2.x = __fmaf_rn(temp,d_data[bl+nChannels+t].x,ftemp2.x);
ftemp2.y = __fmaf_rn(temp,d_data[bl+nChannels+t].y,ftemp2.y);
ftemp3.x = __fmaf_rn(temp,d_data[bl+2*nChannels+t].x,ftemp3.x);
ftemp3.y = __fmaf_rn(temp,d_data[bl+2*nChannels+t].y,ftemp3.y);
ftemp4.x = __fmaf_rn(temp,d_data[bl+3*nChannels+t].x,ftemp4.x);
ftemp4.y = __fmaf_rn(temp,d_data[bl+3*nChannels+t].y,ftemp4.y);
}
t=bl + ypos + threadIdx.x;
d_spectra[t]=ftemp1;
d_spectra[t+nChannels]=ftemp2;
d_spectra[t+2*nChannels]=ftemp3;
d_spectra[t+3*nChannels]=ftemp4;
return;
}
|
13,635 | __device__ void __ib_sync_lockfree(int goalVal, volatile int *Arrayin, volatile int *Arrayout) {
int tx = threadIdx.x;// * blockDim.y + threadIdx.y;
int numBlocks = gridDim.x;// * gridDim.y;
int bid = blockIdx.x;// * gridDim.y + blockIdx.y;
if(tx == 0) {
Arrayin[bid] = goalVal;
}
if(bid == 1) {
if(tx < numBlocks) {
while (Arrayin[tx] != goalVal) {}
}
__syncthreads();
if(tx < numBlocks) {
Arrayout[tx] = goalVal;
}
}
if(tx == 0) {
while(Arrayout[bid] != goalVal) {}
}
__syncthreads();
}
__device__ volatile int g_mutex;
__device__ void __ib_sync(int goal) {
// __syncthreads();
int tx = threadIdx.x;// * blockDim.y + threadIdx.y;
if (tx == 0) {
atomicAdd((int *)&g_mutex, 1);
while(g_mutex != goal) {}
}
__syncthreads();
}
|
13,636 | #include "includes.h"
__global__ void mult_inverse_array_kernel(const float *src_gpu, float *dst_gpu, int size, const float eps)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = src_gpu[index];
float sign = (val < 0) ? -1 : 1;
// eps = 1 by default
// eps = 2 - lower delta
// eps = 0 - higher delta (linear)
// eps = -1 - high delta (inverse number)
dst_gpu[index] = powf(fabs(val), eps) * sign;
}
} |
13,637 | #include <iostream>
#include <fstream>
#include <stdlib.h>
#include <time.h>
/* Generates a mandelbrot set image
*
* The elements of bounds is the real min/max and imaginary min/max in that order
* iterations is the number of iterations of z^2+c on each pixel
* The percision is the real and imaginary step size in that order
*/
__global__
void mandelbrot(unsigned char* image,
const double left,
const double right,
const double down,
const double up,
const int iterations,
const double real_precision,
const double im_precision)
{
// Representation of the image is in row-major order. Thus, the (re,im) pixel is image[re+im*re_size]
int x = threadIdx.x + blockIdx.x * blockDim.x; // real part
int y = threadIdx.y + blockIdx.y * blockDim.y; // imaginary part
const int re_size = (int)((right-left)/real_precision);
const int im_size = (int)((up-down)/im_precision);
if (x < re_size && y < im_size) // check that it is in bounds
{
// c=r+it is the original number
double r = left + real_precision*x;
double t = down + im_precision*y;
// start from z=0 and declare temporary variables
double zr = 0.0, zt = 0.0, _zr, _zt;
for (int iteration = 1; iteration < iterations; ++iteration)
{
// perform an iteration of z^2+c
_zr = zr*zr - zt*zt + r;
_zt = zr*zt*2 + t;
zr = _zr;
zt = _zt;
// check for |z|>2 (or |z|^2>4)
if (zr*zr+zt*zt > 4.0)
{
// convert to image using a log distribution.
int q = (int)((sqrtf(iteration)*512)/sqrtf(iterations));
int s = 3*(x+y*re_size); // s:red, s+1:green, s+2:blue
if (q < 256)
{
image[s+2] = 255-q;
image[s] = q;
}
else
{
image[s] = 511-q;
image[s+1] = q-256;
}
return;
}
}
// If it never reaches |z|>2, the point is then in the mandobrot set given the limitations of the program.
// Therefore, the point is represented in the image as black
}
}
int check(double r, double t, int iterations)
{
double zr = 0.0, zt = 0.0, _zr, _zt;
for (int iteration = 0; iteration < iterations-1; ++iteration)
{
// perform an iteration of z^2+c
_zr = zr*zr - zt*zt + r;
_zt = zr*zt*2 + t;
zr = _zr;
zt = _zt;
// check for |z|>2 (or |z|^2>4)
if (zr*zr+zt*zt > 4.0)
{
// convert to image. here, a linear gamma curve is used
return iteration;
}
}
return iterations-1;
}
int main(int argc, char* argv[])
{
// the default bounds for the mandelbrot set is usually -2<Re(z)<1 and -1<Im(z)<1
double bounds[4] = {-0.235125-4e-5, -0.235125+12e-5, 0.827215-4e-5, 0.827215+7.5e-5};
// given precision of 0.001, it will generate 3000x2000 image which is about 6 megapixel
double precision[2] = {1e-8, 1e-8};
// this just fits the 3 8-bit channels
int iterations = 1 << 11;
unsigned char *d_image, *h_image;
int re_size, im_size;
// compute the size of the image
re_size = (int)((bounds[1]-bounds[0])/precision[0]);
im_size = (int)((bounds[3]-bounds[2])/precision[1]);
// allocate the GPU memory for the image
if (cudaMalloc(&d_image, 3*re_size*im_size*sizeof(char)) != cudaSuccess)
{
std::cout << "The device does not have enough memory. Program exited with error code -1." << std::endl;
return -1;
}
// set the memory to 0
if (cudaMemset(d_image, 0, 3*re_size*im_size*sizeof(char)) != cudaSuccess)
{
std::cout << "Failed to set memory to 0. Program exited with error code -2." << std::endl;
return -2;
}
// create the grid and blocks to call the method. 32x32=1024 threads.
dim3 grid((re_size+31)/32, (im_size+31)/32, 1);
dim3 block(32,32,1);
mandelbrot <<<grid, block>>> (d_image, bounds[0], bounds[1], bounds[2], bounds[3], iterations, precision[0], precision[1]);
cudaDeviceSynchronize();
h_image = (unsigned char*)calloc(3*re_size*im_size, sizeof(char));
if (cudaMemcpy(h_image, d_image, 3*re_size*im_size*sizeof(char), cudaMemcpyDeviceToHost) != cudaSuccess)
{
std::cout << "Failed to copy the memory from device to host. Program exited with error code -9";
return -9;
}
cudaFree(d_image);
#ifdef DEBUG
// check against CPU code
srand(time(NULL));
for (int i = 0; i < 20; ++i)
{
int x = rand() % re_size;
int y = rand() % im_size;
printf("z=%.2f+%.2fi GPU:%d CPU:%d\n", bounds[0]+(double)x*precision[0], bounds[2]+(double)y*precision[1], h_image[3*(x+y*re_size)], check(bounds[0]+(double)x*precision[0], bounds[2]+(double)y*precision[1], iterations));
}
#endif
// write the image
FILE *img;
if (argc > 1)
img = fopen(argv[1], "wb");
else
img = fopen("/mnt/ramdisk/image.ppm", "wb");
fprintf(img, "P6\n%d %d\n%d\n", re_size, im_size, 255);
fwrite(h_image, sizeof(char), 3*re_size*im_size, img);
fclose(img);
free(h_image);
return 0;
} |
13,638 | //fail: data race
#include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
#include <assert.h>
#define N 2//64
__global__ void foo (int* p, int* q){
p[2] = q[2] + 1;
}
|
13,639 | #ifndef _CUDA_FLOAT_UTILITIES_CU_
#define _CUDA_FLOAT_UTILITIES_CU_
__device__ float
interpolate(float alpha, float x0, float x1);
__device__ float
clip(float x, float min, float max);
__device__ int
intervalComparison (float x, float lowerBound, float upperBound);
__device__ float
interpolate(float alpha, float x0, float x1)
{
return x0 + ((x1 - x0) * alpha);
}
__device__ float
clip(float x, float min, float max)
{
if (x < min) return min;
if (x > max) return max;
return x;
}
// ----------------------------------------------------------------------------
// classify a value relative to the interval between two bounds:
// returns -1 when below the lower bound
// returns 0 when between the bounds (inside the interval)
// returns +1 when above the upper bound
__device__ int
intervalComparison (float x, float lowerBound, float upperBound)
{
if (x < lowerBound) return -1;
if (x > upperBound) return +1;
return 0;
}
#endif // _CUDA_FLOAT_UTILITIES_CU_ |
13,640 | #include <stdio.h>
#include <stdlib.h>
__global__ void update_h(int nx, int ny, int nz, int nyz, int idx0, float *hx, float *hy, float *hz, float *ex, float *ey, float *ez) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
int i = idx/(nyz);
int j = (idx - i*nyz)/nz;
int k = idx%nz;
if( j>0 && k>0 ) hx[idx] -= 0.5*( ez[idx] - ez[idx-nz] - ey[idx] + ey[idx-1] );
if( i>0 && k>0 ) hy[idx] -= 0.5*( ex[idx] - ex[idx-1] - ez[idx] + ez[idx-nyz] );
if( i>0 && j>0 ) hz[idx] -= 0.5*( ey[idx] - ey[idx-nyz] - ex[idx] + ex[idx-nz] );
}
__global__ void update_e(int nx, int ny, int nz, int nyz, int idx0, float *hx, float *hy, float *hz, float *ex, float *ey, float *ez, float *cex, float *cey, float *cez) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
int i = idx/(nyz);
int j = (idx - i*nyz)/nz;
int k = idx%nz;
if( j<ny-1 && k<nz-1 ) ex[idx] += cex[idx]*( hz[idx+nz] - hz[idx] - hy[idx+1] + hy[idx] );
if( i<nx-1 && k<nz-1 ) ey[idx] += cey[idx]*( hx[idx+1] - hx[idx] - hz[idx+nyz] + hz[idx] );
if( i<nx-1 && j<ny-1 ) ez[idx] += cez[idx]*( hy[idx+nyz] - hy[idx] - hx[idx+nz] + hx[idx] );
}
__global__ void update_src(int nx, int ny, int nz, int nyz, float tn, float *f) {
int idx = threadIdx.x;
int ijk = (nx/2)*nyz + (ny/2)*nz + idx;
if( idx < nz ) f[ijk] += sin(0.1*tn);
}
__global__ void init_zero(int n, int idx0, float *f) {
int tx = threadIdx.x;
int idx = blockDim.x*blockIdx.x + tx + idx0;
if( idx < n ) f[idx] = 0;
}
int main() {
int i, n, nx, ny, nz, tn, tmax;
nx = 320;
ny = 480;
nz = 480;
n = nx*ny*nz;
tmax = 100000;
printf("Simple FDTD simulation\n", nx, ny, nz);
printf("Array size : %dx%dx%d\n", nx, ny, nz);
printf("Total used memory : %1.2f GB\n", n*4*9./(1024*1024*1024));
printf("Iteration : %d step\n", tmax);
// memory allocate
float *f, *cf;
f = (float *) calloc (n, sizeof(float));
cf = (float *) calloc (n, sizeof(float));
for( i=0; i<n; i++ ) cf[i] = 0.5;
float *hx_gpu, *hy_gpu, *hz_gpu;
float *ex_gpu, *ey_gpu, *ez_gpu;
float *cex_gpu, *cey_gpu, *cez_gpu;
cudaMalloc ( (void**) &hx_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &hy_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &hz_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &ex_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &ey_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &ez_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &cex_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &cey_gpu, n*sizeof(float) );
cudaMalloc ( (void**) &cez_gpu, n*sizeof(float) );
cudaMemcpy ( cex_gpu, cf, n*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy ( cey_gpu, cf, n*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy ( cez_gpu, cf, n*sizeof(float), cudaMemcpyHostToDevice );
int ng = 6; // number of grid
int tpb = 256; // threads per block
int bpg = n/tpb/ng; // blocks per grid
for( i=0; i<ng; i++) {
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hx_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hy_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, hz_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ex_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ey_gpu);
init_zero <<<dim3(bpg),dim3(tpb)>>> (n, i*bpg*tpb, ez_gpu);
}
// main loop
for( tn=0; tn<tmax; tn++ ) {
for( i=0; i<ng; i++) update_h <<<dim3(bpg),dim3(tpb)>>> (nx, ny, nz, ny*nz, i*bpg*tpb, hx_gpu, hy_gpu, hz_gpu, ex_gpu, ey_gpu, ez_gpu);
for( i=0; i<ng; i++) update_e <<<dim3(bpg),dim3(tpb)>>> (nx, ny, nz, ny*nz, i*bpg*tpb, hx_gpu, hy_gpu, hz_gpu, ex_gpu, ey_gpu, ez_gpu, cex_gpu, cey_gpu, cez_gpu);
update_src <<<dim3(1),dim3(512)>>> (nx, ny, nz, ny*nz, tn, ez_gpu);
}
cudaMemcpy( f, ez_gpu, n*sizeof(float), cudaMemcpyDeviceToHost );
printf("Complete.\n");
return 0;
}
|
13,641 | #include <iostream>
#include <cstdio>
#include <fstream>
#include <cstdlib>
using namespace std;
//#include <cuda_runtime.h>
#include "cuda.h"
//#include <sdkHelper.h>
#define TIMES 1
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////HELP FUNCTIONS/////////////////////////////////////////////////
void RandomInit(float* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() / (float)RAND_MAX;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////__NAIVE_MATRIX_MULTIPLICATION_///////////////////////////////////////////////
// Device code
// Compute C = A * B
#define TILEWIDTH_X 16
#define TILEWIDTH_Y 16
#define TILE_WIDTH 16
__global__ void matrixMultiply(float* d_M, float* d_N, float* d_P,
int Width) {
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the Pd element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the M and N tiles required to compute the Pd element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {
// Coolaborative loading of Md and Nd tiles into shared memory
ds_M[ty][tx] = d_M[Row*Width + m*TILE_WIDTH+tx];
ds_N[ty][tx] = d_N[(m*TILE_WIDTH+ty)*Width+Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += ds_M[ty][k] * ds_N[k][tx];
__syncthreads();
}
d_P[Row*Width+Col] = Pvalue;
}
void MatrixMulOnHost(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
for (int i = 0; i < numARows; ++i)
for (int j = 0; j < numBColumns; ++j) {
float sum = 0;
for (int k = 0; k < numAColumns; ++k) {
float a = A[i * numAColumns + k];
float b = B[k * numBColumns + j];
sum += a * b;
}
C[i * numCColumns + j] = sum;
}
}
int MatrixMulti(int numARows, int numAColumns, int numBRows, int numBColumns, int blockx, int blocky, bool optimzed, bool define=false)
{
if(!optimzed)
printf("NAIVE MATRIX MULTIPLICATION\n");
else if(define)
printf("Optimzed MATRIX MULTIPLICATION with static shared memory allocation\n");
else
printf("Optimzed MATRIX MULTIPLICATION with static dynamic memory allocation\n");
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numCRows = numARows;; // number of rows in the matrix C (you have to set this)
int numCColumns = numBColumns;; // number of columns in the matrix C (you have to set this)
double total_time=0;
//StopWatchInterface* timer;
int sizeA = numARows*numAColumns*sizeof(float);
int sizeB = numBRows*numBColumns*sizeof(float);
int sizeC = numCRows*numCColumns*sizeof(float);
if(numAColumns != numBRows)
{
cout<<"Error in inputs dimension! A columns != B rows"<<endl;
exit(-1);
}
// Allocate input vectors h_A and h_B in host memory
hostA = (float*)malloc(sizeA);
hostB = (float*)malloc(sizeB);
hostC = (float*)malloc(sizeC);
// Initialize input vectors
RandomInit(hostA, numARows*numAColumns);
RandomInit(hostB, numBRows*numBColumns);
RandomInit(hostC, numBRows*numBColumns);
cout<<"The dimensions of A are "<<numARows<<" x "<<numAColumns<<endl;
cout<<"The dimensions of B are "<<numBRows<<" x "<<numBColumns<<endl;
//Allocate GPU memory here
// checkCudaErrors(cudaMalloc(&deviceA, sizeA));
// checkCudaErrors(cudaMalloc(&deviceB, sizeB));
// checkCudaErrors(cudaMalloc(&deviceC, sizeC));
//@@ Copy memory to the GPU here
//checkCudaErrors(cudaMemcpy(deviceA, hostA, sizeA, cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(deviceB, hostB, sizeB, cudaMemcpyHostToDevice));
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
dim3 dimBlock, dimGrid;
dimBlock = dim3(blockx, blocky);
dimGrid = dim3((numCColumns+blockx-1)/blockx, (numCRows+blocky-1)/blocky);
//matrixMultiply<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC,numAColumns);
matrixMultiply<<<dimGrid, dimBlock>>>(hostA, hostB, hostC, numAColumns);
getLastCudaError("kernel launch failure");
checkCudaErrors(cudaThreadSynchronize());
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double dSeconds = total_time/((double)TIMES * 1000);
double dNumOps = 2.0 * (double)numARows * (double)numAColumns * (double)numBColumns;
double gflops = 1.0e-9 * dNumOps/dSeconds;
cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl;
//@@ Copy the GPU memory back to the CPU here
//checkCudaErrors(cudaMemcpy(hostC, deviceC, sizeC, cudaMemcpyDeviceToHost));
// Verify result
//float* hostcpu = (float*)malloc(sizeC);
/*MatrixMulOnHost(hostA,hostB,hostcpu,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
int i;
int j;
for (i = 0; i < numCRows; ++i)
for(j=0; j<numCColumns; j++)
{
if (fabs(hostC[i*numCColumns + j] - hostcpu[i*numCColumns + j]) > 1e-3)
{
break;
}
}*/
//@@ Free the GPU memory here
///checkCudaErrors(cudaFree(deviceA));
// checkCudaErrors(cudaFree(deviceB));
//checkCudaErrors(cudaFree(deviceC));
// cudaDeviceReset();
free(hostA);
free(hostB);
free(hostC);
//free(hostcpu);
/*if(i == numCRows && j == numCColumns)
cout<<"SUCCSESS"<<endl;
else
cout<<"FAILED"<<endl; */
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc,char *argv[])
{
if(argc < 6)
printf("Unsuffcient number of arguments!\n");
else
{
MatrixMulti(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]), atoi(argv[4]), atoi(argv[5]), atoi(argv[6]), false);
}
}
|
13,642 | #include <stdio.h>
#define N 256
#define THREADS_PER_BLOCK 256
//Test
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void add(double *price,int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ double returns[256];
//Calculate Log Returns
double logRt = 0.0;
if(index == 0) {
returns[0] = 0.0;
}else if(index < n) {
logRt = log(price[index]) - log(price[index-1]);
returns[index] = logRt;
}
__syncthreads();
//find average of returns
int idx = 2;
int back = 1;
while(idx <= (n+1)) {
if((index+1) % idx == 0) {
returns[index] = returns[index] + returns[index-back];
}
idx = idx * 2;
back = back * 2;
__syncthreads();
}
__syncthreads();
float ravg = returns[n-1]/n;
float rdiffSq = (logRt - ravg) * (logRt - ravg);
__syncthreads();
returns[index] = rdiffSq;
__syncthreads();
idx = 2;
back = 1;
while(idx <= (n + 1)) {
if((index+1) % idx == 0) {
returns[index] = returns[index] + returns[index-back];
}
idx = idx * 2;
back = back * 2;
__syncthreads();
}
__syncthreads();
if(index == 0) {
float vol = returns[n-1]/(n-2);
float sd = sqrt(vol);
printf("SD %f Volatility %f\n",sd,vol);
}
}
int main(void) {
double *price;
double *d_price;
int size = N * sizeof(double);
cudaMalloc((void **)&d_price,size);
price = (double *)malloc(size);
for(int i = 0; i < N;i++) {
price[i] = i+1;
}
cudaMemcpy(d_price,price,size,cudaMemcpyHostToDevice);
add<<<(N + THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_price,N);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
free(price);
cudaFree(d_price);
return 0;
}
|
13,643 | extern "C"
__global__ void accumulationKernel (int length, float *accumulator, float *addition)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
accumulator[index] += addition[index];
}
} |
13,644 | #include <assert.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void vector_add(int *a, int *b, int *c, int n) {
int thread_id = (blockIdx.x * blockDim.x) + threadIdx.x;
if (thread_id < n) {
c[thread_id] = a[thread_id] + b[thread_id];
}
}
void init_vector(int *a, int *b, int n) {
for (int i = 0; i < n; ++i) {
a[i] = rand() % 100;
b[i] = rand() % 100;
}
}
void check_answer(int *a, int *b, int *c, int n) {
for (int i = 0; i < n; ++i) {
assert(c[i] == a[i] + b[i]);
}
}
int main() {
// Initial values
int id = cudaGetDevice(&id); // Get the device ID for other CUDA calls
int n = 1 << 16; // Number of elements per array
size_t bytes = sizeof(int) * n; // Size of each arrays in bytes
int *a, *b, *c; // Unified memory pointers
// Allocate host memory
a = (int *)malloc(bytes);
b = (int *)malloc(bytes);
c = (int *)malloc(bytes);
// Allocate memory for these pointers
cudaMallocManaged(&a, bytes);
cudaMallocManaged(&b, bytes);
cudaMallocManaged(&c, bytes);
// Initialize vectors
init_vector(a, b, n);
// Set up threads
int BLOCK_SIZE = 256; // Set threadblock size
int GRID_SIZE = (int)ceil(n / BLOCK_SIZE); // Set grid size
// Call CUDA kernel
// Uncomment these for pre-fetching 'a' and 'b' vectors to device
cudaMemPrefetchAsync(a, bytes, id);
cudaMemPrefetchAsync(b, bytes, id);
vector_add<<<GRID_SIZE, BLOCK_SIZE>>>(a, b, c, n);
// Wait for all previous operations before using values
cudaDeviceSynchronize();
// Uncoment this for pre-fetching 'c' to the host
cudaMemPrefetchAsync(c, bytes, cudaCpuDeviceId);
// Check result
check_answer(a, b, c, n);
} |
13,645 | //Universidad del Valle de Guatemala
//Proyecto 4 - Calculo de Pi con Wallis y Nilakantha
//Programacion de Microprocesadores
//Integrantes:
//Bryann Alfaro 19372
//Diego Arredondo 19422
//Donaldo Garcia 19683
//Raul Jimenez 19017
//Diego Alvarez 19498
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void serieWallis(float *convergencia, int *vectorN, int limite)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float operacion;
float operacion2;
if((i-1)<=limite){
operacion = (2.0f*(vectorN[i-1]))/((2.0f*(vectorN[i-1]))-1.0f);
operacion2 = (2.0f*(i))/((2.0f*(i))+1.0f);
convergencia[i]=operacion*operacion2;
}
}
__global__ void nila(double *vector_2, double *vector_suma, int n)
{
#include <math.h>
// identificador de hilo
int myID = threadIdx.x;
int myid2 = (threadIdx.x +1)*2.f;
if((threadIdx.x +1)%2 == 0)
{
vector_2[myID] = (4.f/(myid2*(myid2+1.f)*(myid2+2.f)))*-1.f;
}else{
vector_2[myID] = 4.f/(myid2*(myid2+1.f)*(myid2+2.f));
}
// escritura de resultados
vector_suma[myID] = vector_2[myID];
}
int main(void){
//Inicializacion de Streams
cudaStream_t stream1, stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
int valorN =1000;
//Stream 1
float result=1.0f;
size_t sizef = 10*valorN* sizeof(float);
size_t sizei = 10*valorN* sizeof(int);
//Stream 2
double *hst_vector2, *hst_resultado;
double *dev_vector2, *dev_resultado;
//Reserva en el host Stream 1
int *host_vectorN= (int *)malloc(sizei);
float *host_vectorValor= (float *)malloc(sizef);
//Reserva en el device Stream 1
int *d_vectorN = NULL;
cudaMalloc((void **)&d_vectorN,sizei);
float *d_vectorValor = NULL;
cudaMalloc((void **)&d_vectorValor, sizef);
//Reserva en el host Stream 2
hst_vector2 = (double*)malloc(valorN * sizeof(double));
hst_resultado = (double*)malloc(valorN * sizeof(double));
//Reserva en el device Stream 2
cudaMalloc((void**)&dev_vector2, valorN * sizeof(double));
cudaMalloc((void**)&dev_resultado, valorN * sizeof(double));
//Llenado con los valores de N
for(int i = 1; i <= valorN; i++)
{
host_vectorN[i-1]=i;
hst_vector2[i] = 0;
}
//Creacion de Evento para tomar el tiempo
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Bloques e hilos para la ejecucion de los kernels
int threadsPerBlock = 1000;
int blocksPerGrid = (valorN + threadsPerBlock - 1) / threadsPerBlock;
//Stream 1
cudaMemcpyAsync(d_vectorN, host_vectorN,sizei, cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(d_vectorValor, host_vectorValor, sizef, cudaMemcpyHostToDevice, stream1);
cudaEventRecord(start);
serieWallis<<<blocksPerGrid+1,threadsPerBlock, 0, stream1>>>(d_vectorValor, d_vectorN, valorN);
cudaEventRecord(stop);
cudaMemcpyAsync(host_vectorValor, d_vectorValor, sizef, cudaMemcpyDeviceToHost, stream1);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
//Stream 2
cudaMemcpyAsync(dev_vector2, hst_vector2, valorN * sizeof(double), cudaMemcpyHostToDevice, stream2);
cudaMemcpyAsync(hst_resultado, dev_resultado, valorN * sizeof(double), cudaMemcpyHostToDevice, stream2);
cudaEventRecord(start);
nila <<< blocksPerGrid+1,threadsPerBlock, 0, stream2 >>>(dev_vector2, dev_resultado, valorN);
cudaEventRecord(stop);
cudaMemcpyAsync(hst_resultado, dev_resultado, valorN * sizeof(double), cudaMemcpyDeviceToHost, stream2);
cudaEventSynchronize(stop);
float milliseconds2 = 0;
cudaEventElapsedTime(&milliseconds2, start, stop);
//Sincronizar Streams
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
//Impresion de Datos del Stream 1
printf("--------Stream 1--------\n");
printf("Milisegundos: %.5f\n",milliseconds);
printf("Segundos: %.5f\n",milliseconds/1000);
for(int j=1;j<=valorN;j++){
result*=(host_vectorValor[j]);
}
printf("\nPi con la serie de Wallis: %.16f\n\n",result*2);
//Impresion de Datos del Stream 2
printf("--------Stream 2--------\n");
printf("Milisegundos: %.5f\n",milliseconds2);
printf("Segundos: %.5f\n",milliseconds2/1000);
double suma = 0;
for (int i = 0; i < valorN; i++)
{
suma += hst_resultado[i]*1.f;
}
printf("\nPi con la serie de Nilakantha: %.16f ", suma+3.f);
printf("\n");
//Salida
//Destruccion de Streams
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
//Liberacion de host
free(host_vectorN);
free(host_vectorValor);
//Liberacion de Device
cudaFree(d_vectorN);
cudaFree(d_vectorValor);
} |
13,646 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
int * myloadFile(int n,char *s){
FILE *f=fopen(s,"r");
int i;
int *a=(int *)malloc(sizeof(int)*n);
for(i=0;i<n;i++)
{
int x;
fscanf(f,"%d",&x);
a[i]=x;
}
return a;
}
void display(int *a,int n){
int i;
for(i=0;i<n;i++)
{
printf("%d %d \n",i,a[i]);
}
}
__global__ void addKernel(int* A, int* B, int* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
__global__ void displayDemo(int* A, int* B, int* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
printf("%d (blockId:%d,Thread Id:%d)\n",i, blockIdx.x,threadIdx.x);
}
int main()
{
int N =4096;
size_t size = N * sizeof(int);
int* h_A = myloadFile(N,"input1.txt");
int* h_B = myloadFile(N,"input2.txt");
int* h_C = (int*)malloc(size);
int* d_A;
cudaMalloc(&d_A, size);
int* d_B;
cudaMalloc(&d_B, size);
int* d_C;
cudaMalloc(&d_C, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid =
(N + threadsPerBlock - 1) / threadsPerBlock;
displayDemo<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
// Copy result from device memory to host memory
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
//display(h_C,N);
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
13,647 | // to avoid highlight problems
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdlib.h> // imported for rand() which generates a number between 0 & RAND_MAX
#include <time.h> // imported for the time() function and also the clock function
#include <limits> // for a large value
#include <cmath> // for exponentiation
using namespace std;
void FindClosestPoint(float3 *points, const int numberPoints, int *closestPoint)
{
// now find the closest point to each point
// 'i' represents the current point that we are finding the closest point to!
for (int i = 0; i < numberPoints; i++)
{
int distanceBetweenPoints = numeric_limits<int>::max(), tempDistance=0;
for (int j = 0; j < numberPoints; j++)
if (i != j) // dont check the distance between the point and itself
{
tempDistance = pow((points[i].x - points[j].x), 2) + pow((points[i].y - points[j].y), 2);
if (tempDistance < distanceBetweenPoints)
{
distanceBetweenPoints = tempDistance;
closestPoint[i] = j;
}
}
}
// display the closest points
cout << "The closest points :" << endl;
for (int i = 0; i < numberPoints; i++)
cout << i << "\t" << closestPoint[i] << endl;
}
int main()
{
srand(time(NULL)); // used to initialize the seed for the random number generator
const int numberPoints = 1000;
clock_t startTime, endTime;
float3 *points = new float3[numberPoints];
int *closestPoint = new int[numberPoints];
// initialize the points with random numbers
for (int i = 0; i < numberPoints; i++)
{
points[i].x = rand() % 1000;
points[i].y = rand() % 1000;
points[i].z = rand() % 1000;
}
// print the points initialized
for (int i = 0; i < numberPoints; i++)
cout << points[i].x << "\t" << points[i].y << "\t" << points[i].z << endl;
cout << endl;
// now find the distance between all points
startTime = clock();
FindClosestPoint(points, numberPoints, closestPoint);
endTime = clock() - startTime;
cout << "Time it took was " << ((float) endTime / CLOCKS_PER_SEC) << endl;
}
|
13,648 | //make sure numbers above match the matlab script
__constant__ int num_row;
__constant__ int num_col;
__global__ void matrix_addition (int* a, int* b, int* c)//each block calculates a row
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
//c[x * num_col + y] = a[x * num_col + y] + b[x * num_col + y];
c[y * num_row + x] = a[y * num_row + x] + b[y * num_row + x];
}
|
13,649 | #include <iostream>
#include <fstream>
#include <vector>
#include <string>
#include <stdio.h> // JDB -- should not be needed
#include <chrono>
using namespace std;
#define num_data_type 10
//#define INPUT_FILE "./input_file.csv"
#define INPUT_FILE "./input/nla.csv"
//#define INPUT_FILE "./input/go_track_trackspoints.csv"
typedef std::chrono::high_resolution_clock Clock;
int D[4][256];
uint8_t E[4][256];
void add_transition (int state, uint8_t input, int next_state) {
D[state][input] = next_state;
}
void add_default_transition(int state, int next_state) {
for (int i = 0; i < 256; i++) {
D[(int) state][i] = next_state;
}
}
void add_emission(int state, uint8_t input, uint8_t value) {
E[state][input] = value;
}
void add_default_emission(int state, uint8_t value) {
for (int i = 0; i < 256; i++) {
E[state][i] = value;
}
}
void Dtable_generate(void) {
for (int i = 0; i < 2; i++) {
add_default_transition(i ,i);
}
add_default_transition(2 , 1);
add_default_transition(3, 0);
add_transition(0, '[', 1);
add_transition(0, '\\', 3);
add_transition(1, '\\', 2);
add_transition(1, ']', 0);
}
void Etable_generate(void) {
for(int i = 0; i < 3; i++) {
add_default_emission(i, 0);
}
add_emission(0, ',', 1);
}
void parsedata (string s, uint8_t* comma_indices) {
//default states
int state = 0;
int emission = 0;
for (int i = 0; i < s.length(); i++) {
emission = E[state][s[i]];
state = D[state][s[i]];
comma_indices[i] = emission;
}
}
const int max_length(){
std::ifstream is(INPUT_FILE); // open file
string line;
int length = 0;
while (getline(is, line)){
if(length < line.length())
length = line.length();
}
is.close();
return length;
}
void seq_scan(uint8_t* comma_indices, int num_char) {
for(int i = 1; i < num_char; i++) {
comma_indices[i] = comma_indices[i - 1] + comma_indices[i];
}
}
int main() {
auto t1 = Clock::now();
std::vector<std::vector<int>> output_vec;
std::ifstream is(INPUT_FILE);
string line;
int state = 0;
while(getline(is, line)) {
std::vector<int> temp_vec;
int len = line.length();
for(int i = 0; i < len; i++){
if(state == 0 && line[i] == ',') {
temp_vec.push_back(i);
}
char c = line[i];
if(state == 0) {
if(c == '{')
state = 1;
else
state = 0;
}
else if(state == 1){
if( c == '}')
state = 0;
else if (c == '\\')
state = 2;
else
state = 1;
}
else {
state = 2;
}
}
output_vec.push_back(temp_vec);
}
auto t2 = Clock::now();
cout << std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() << " microseconds" << endl;
/*
int len = output_vec.size();
for(int i = 0; i < len; i++){
int temp_len = output_vec[i].size();
for(int j = 0; j < temp_len; j++) {
printf("%d ", (output_vec[i])[j]);
}
printf("\n");
}
*/
// std::ifstream is(INPUT_FILE); // open file
// string line;
// //generate the tables
// Dtable_generate();
// Etable_generate();
// // initialize bit vector
// const int array_len = max_length();
// uint8_t* comma_indices = new uint8_t[array_len];
// for (int i = 0; i < array_len; i++) {
// comma_indices[i] = 0;
// }
// auto t1 = Clock::now();
// while (getline(is, line)) {
// parsedata(line, comma_indices);
// seq_scan(comma_indices, array_len);
// int prev = 0;
// for(int i = 0; i < array_len; i++){
// if(prev != comma_indices[i]) {
// //std::cout << i << " ";
// }
// prev = comma_indices[i];
// comma_indices[i] = 0;
// }
// //cout << endl;
// }
// is.close();
// auto t2 = Clock::now();
// cout << std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() << " microseconds" << endl;
// delete [] comma_indices;
return 0;
}
|
13,650 | // This file is derived from the code at
// http://www.jcornwall.me.uk/2009/04/mersenne-twisters-in-cuda/
// which in turn is derived from the NVIDIA CUDA SDK example 'MersenneTwister'.
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// Some parts contain code from Makoto Matsumoto and Takuji Nishimura's dci.h
/* Copyright (C) 2001-2006 Makoto Matsumoto and Takuji Nishimura. */
/* This library is free software; you can redistribute it and/or */
/* modify it under the terms of the GNU Library General Public */
/* License as published by the Free Software Foundation; either */
/* version 2 of the License, or (at your option) any later */
/* version. */
/* This library is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */
/* See the GNU Library General Public License for more details. */
/* You should have received a copy of the GNU Library General */
/* Public License along with this library; if not, write to the */
/* Free Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA */
/* 02111-1307 USA */
//#include <cassert>
//#include <cstdio>
//#include <vector>
#define MT_MM 9
#define MT_NN 19
#define MT_WMASK 0xFFFFFFFFU
#define MT_UMASK 0xFFFFFFFEU
#define MT_LMASK 0x1U
#define MT_RNG_COUNT 32768
#define MT_SHIFT0 12
#define MT_SHIFTB 7
#define MT_SHIFTC 15
#define MT_SHIFT1 18
// Record format for MersenneTwister.dat, created by spawnTwisters.c
// size = 16 bytes
struct __align__(16) mt_struct_stripped {
unsigned int matrix_a;
unsigned int mask_b;
unsigned int mask_c;
unsigned int seed;
};
// Per-thread state object for a single twister.
// size = 84 bytes but aligned = 96 bytes
struct __align__(16) MersenneTwisterState {
unsigned int mt[MT_NN];
int iState;
unsigned int mti1;
};
// Preloaded, offline-generated seed data structure.
__device__ static mt_struct_stripped MT[MT_RNG_COUNT];
// Hold the current states of the twisters
__device__ static MersenneTwisterState MTS[MT_RNG_COUNT];
__device__ void MersenneTwisterInitialise(MersenneTwisterState *state, unsigned int threadID) {
state->mt[0] = MT[threadID].seed;
for(int i = 1; i < MT_NN; ++ i) {
state->mt[i] = (1812433253U * (state->mt[i - 1] ^ (state->mt[i - 1] >> 30)) + i) & MT_WMASK;
}
state->iState = 0;
state->mti1 = state->mt[0];
}
__device__ unsigned int MersenneTwisterGenerate(MersenneTwisterState *state, unsigned int threadID) {
int iState1 = state->iState + 1;
int iStateM = state->iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
unsigned int mti = state->mti1;
state->mti1 = state->mt[iState1];
unsigned int mtiM = state->mt[iStateM];
unsigned int x = (mti & MT_UMASK) | (state->mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? MT[threadID].matrix_a : 0);
state->mt[state->iState] = x;
state->iState = iState1;
// Tempering transformation.
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & MT[threadID].mask_b;
x ^= (x << MT_SHIFTC) & MT[threadID].mask_c;
x ^= (x >> MT_SHIFT1);
return x;
}
__global__ void InitialiseAllMersenneTwisters() {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
MersenneTwisterInitialise(&(MTS[tid]),tid);
}
|
13,651 | #include <stdio.h>
int main()
{
int dimx = 16;
int num_bytes = dimx*sizeof(int);
int *d_a=0, *h_a=0; // device and host pointers
h_a = (int*)malloc(num_bytes); //allocate mem on CPU side
cudaMalloc( (void**)&d_a, num_bytes); //allocate mem on GPU side
// did the mem allocation work?
if(0==h_a || 0==d_a)
{
printf("couln't allocate memory\n");
return 1;
}
cudaMemset( d_a, 0, num_bytes); //set the GPU memory to 0
cudaMemcpy( h_a, d_a, num_bytes, cudaMemcpyDeviceToHost); //cp from GPU to CPU the pointer values
for(int i=0;i<dimx;i++)
printf("%d",h_a[i]);
printf("\n");
free(h_a); //free the CPU mem
cudaFree(d_a); //free th eGPU mem
return 0;
}
|
13,652 | #include<stdio.h>
__global__ void paral()
{
printf("paral\n");
}
int main()
{
paral<<<5,5>>>();
cudaDeviceSynchronize();
}
|
13,653 | #include <iostream>
#include <algorithm>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace std;
#define BLOCK_SIZE 128
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct Tree {
int num_nodes;
Tree *left;
Tree *right;
float total_length;
float branch_length[2];
Tree(int _num_nodes, float _length, Tree *_left, Tree *_right, float length1,
float length2)
: num_nodes(_num_nodes), left(_left), right(_right),
total_length(_length) {
branch_length[0] = length1;
branch_length[1] = length2;
}
};
__global__ void getMin(float *input, int *input_idx, int n, float *output_val,
int *output_idx) {
__shared__ float smem_val[BLOCK_SIZE];
__shared__ int smem_idx[BLOCK_SIZE];
int tx = threadIdx.x;
int bx = blockIdx.x;
int i = tx + bx * BLOCK_SIZE * 8;
float min_val = INFINITY;
int min_idx = i;
if (i < n) {
float a1, a2, a3, a4, a5, a6, a7, a8;
a1 = input[i];
a1 = (a1 != 0.0f) ? a1 : INFINITY;
a2 = (i + BLOCK_SIZE) < n ? input[i + BLOCK_SIZE] : INFINITY;
a2 = (a2 != 0.0f) ? a2 : INFINITY;
a3 = (i + 2 * BLOCK_SIZE) < n ? input[i + 2 * BLOCK_SIZE] : INFINITY;
a3 = (a3 != 0.0f) ? a3 : INFINITY;
a4 = (i + 3 * BLOCK_SIZE) < n ? input[i + 3 * BLOCK_SIZE] : INFINITY;
a4 = (a4 != 0.0f) ? a4 : INFINITY;
a5 = (i + 4 * BLOCK_SIZE) < n ? input[i + 4 * BLOCK_SIZE] : INFINITY;
a5 = (a5 != 0.0f) ? a5 : INFINITY;
a6 = (i + 5 * BLOCK_SIZE) < n ? input[i + 5 * BLOCK_SIZE] : INFINITY;
a6 = (a6 != 0.0f) ? a6 : INFINITY;
a7 = (i + 6 * BLOCK_SIZE) < n ? input[i + 6 * BLOCK_SIZE] : INFINITY;
a7 = (a7 != 0.0f) ? a7 : INFINITY;
a8 = (i + 7 * BLOCK_SIZE) < n ? input[i + 7 * BLOCK_SIZE] : INFINITY;
a8 = (a8 != 0.0f) ? a8 : INFINITY;
min_val = a1;
min_idx = i;
if (a2 < min_val) {
min_val = a2;
min_idx = i + BLOCK_SIZE;
}
if (a3 < min_val) {
min_val = a3;
min_idx = i + 2 * BLOCK_SIZE;
}
if (a4 < min_val) {
min_val = a4;
min_idx = i + 3 * BLOCK_SIZE;
}
if (a5 < min_val) {
min_val = a5;
min_idx = i + 4 * BLOCK_SIZE;
}
if (a6 < min_val) {
min_val = a6;
min_idx = i + 5 * BLOCK_SIZE;
}
if (a7 < min_val) {
min_val = a7;
min_idx = i + 6 * BLOCK_SIZE;
}
if (a8 < min_val) {
min_val = a8;
min_idx = i + 7 * BLOCK_SIZE;
}
}
smem_val[tx] = min_val;
smem_idx[tx] = min_idx;
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tx < 512 && smem_val[tx + 512] < smem_val[tx]) {
smem_val[tx] = min_val = smem_val[tx + 512];
smem_idx[tx] = min_idx = smem_idx[tx + 512];
}
__syncthreads();
if (blockDim.x >= 512 && tx < 256 && smem_val[tx + 256] < smem_val[tx]) {
smem_val[tx] = min_val = smem_val[tx + 256];
smem_idx[tx] = min_idx = smem_idx[tx + 256];
}
__syncthreads();
if (blockDim.x >= 256 && tx < 128 && smem_val[tx + 128] < smem_val[tx]) {
smem_val[tx] = min_val = smem_val[tx + 128];
smem_idx[tx] = min_idx = smem_idx[tx + 128];
}
__syncthreads();
if (blockDim.x >= 128 && tx < 64 && smem_val[tx + 64] < smem_val[tx]) {
smem_val[tx] = min_val = smem_val[tx + 64];
smem_idx[tx] = min_idx = smem_idx[tx + 64];
}
__syncthreads();
// unrolling warp
if (tx < 32) {
volatile float *vsmem_val = smem_val;
volatile int *vsmem_idx = smem_idx;
if (vsmem_val[tx + 32] < vsmem_val[tx]) {
vsmem_val[tx] = min_val = vsmem_val[tx + 32];
vsmem_idx[tx] = min_idx = vsmem_idx[tx + 32];
}
if (vsmem_val[tx + 16] < vsmem_val[tx]) {
vsmem_val[tx] = min_val = vsmem_val[tx + 16];
vsmem_idx[tx] = min_idx = vsmem_idx[tx + 16];
}
if (vsmem_val[tx + 8] < vsmem_val[tx]) {
vsmem_val[tx] = min_val = vsmem_val[tx + 8];
vsmem_idx[tx] = min_idx = vsmem_idx[tx + 8];
}
if (vsmem_val[tx + 4] < vsmem_val[tx]) {
vsmem_val[tx] = min_val = vsmem_val[tx + 4];
vsmem_idx[tx] = min_idx = vsmem_idx[tx + 4];
}
if (vsmem_val[tx + 2] < vsmem_val[tx]) {
vsmem_val[tx] = min_val = vsmem_val[tx + 2];
vsmem_idx[tx] = min_idx = vsmem_idx[tx + 2];
}
if (vsmem_val[tx + 1] < vsmem_val[tx]) {
vsmem_val[tx] = min_val = vsmem_val[tx + 1];
vsmem_idx[tx] = min_idx = vsmem_idx[tx + 1];
}
}
if (tx == 0) {
output_val[bx] = min_val;
output_idx[bx] = (input_idx == nullptr) ? min_idx : input_idx[min_idx];
}
}
/*
void update(float *mat, int n, int idx1, int idx2, int num_nodes1,
int num_nodes2) {
int total_nodes = num_nodes1 + num_nodes2;
for (int i = 0; i < n; ++i) {
float val =
(mat[n * idx1 + i] * num_nodes1 + mat[n * idx2 + i] * num_nodes2) /
total_nodes;
mat[n * idx1 + i] = val;
mat[n * idx2 + i] = 0.0f;
mat[n * i + idx1] = val;
mat[n * i + idx2] = 0.0f;
}
mat[n * idx1 + idx1] = 0.0f;
mat[n * idx2 + idx1] = 0.0f;
mat[n * idx1 + idx2] = 0.0f;
mat[n * idx2 + idx2] = 0.0f;
}*/
__global__ void update(float *mat, int n, int idx1, int idx2, int num_nodes1,
int num_nodes2) {
int tx = threadIdx.x;
int i = tx + blockDim.x * blockIdx.x;
if (i >= n || i == idx1) {
return;
} else if (i == idx2) {
mat[n * idx1 + i] = 0.0f;
mat[n * i + idx1] = 0.0f;
return;
}
int total_nodes = num_nodes1 + num_nodes2;
float val =
(mat[n * idx1 + i] * num_nodes1 + mat[n * idx2 + i] * num_nodes2) /
total_nodes;
mat[n * idx1 + i] = val;
mat[n * idx2 + i] = 0.0f;
mat[n * i + idx1] = val;
mat[n * i + idx2] = 0.0f;
}
void cleanupTree(Tree *tree) {
// Reach the leaf
if (tree->left == nullptr && tree->right == nullptr) {
delete tree;
return;
}
cleanupTree(tree->left);
cleanupTree(tree->right);
}
void printTree(Tree *tree) {
// Reach the leaf
if (tree->left == nullptr && tree->right == nullptr) {
return;
}
cout << "(";
printTree(tree->left);
cout << ": " << tree->branch_length[0] << ", ";
printTree(tree->right);
cout << ": " << tree->branch_length[1] << ")";
}
int main() {
const int num_seqs = 7;
float h_a[num_seqs][num_seqs]{
{0.0f, 19.0f, 27.0f, 8.0f, 33.0f, 18.0f, 13.0f},
{19.0f, 0.0f, 31.0f, 18.0f, 36.0f, 1.0f, 13.0f},
{27.0f, 31.0f, 0.0f, 26.0f, 41.0f, 32.0f, 29.0f},
{8.0f, 18.0f, 26.0f, 0.0f, 31.0f, 17.0f, 14.0f},
{33.0f, 36.0f, 41.0f, 31.0f, 0.0f, 35.0f, 28.0f},
{18.0f, 1.0f, 32.0f, 17.0f, 35.0f, 0.0f, 12.0f},
{13.0f, 13.0f, 29.0f, 14.0f, 28.0f, 12.0f, 0.0f}};
Tree *nodes[num_seqs];
for (int i = 0; i < num_seqs; ++i) {
nodes[i] = new Tree(1, 0.0f, nullptr, nullptr, 0.0f, 0.0f);
}
int n = num_seqs * num_seqs;
int n_out_level0 = ceil((float)n / (BLOCK_SIZE * 8));
int n_out_level1 = ceil((float)n_out_level0 / (BLOCK_SIZE * 8));
float *h_val = (float *)malloc(sizeof(float) * n_out_level1);
int *h_idx = (int *)malloc(sizeof(int) * n_out_level1);
float *d_a;
float *d_val_level0, *d_val_level1;
int *d_idx_level0, *d_idx_level1;
CHECK(cudaMalloc((void **)&d_a, sizeof(float) * n));
CHECK(cudaMalloc((void **)&d_val_level0, sizeof(float) * n_out_level0));
CHECK(cudaMalloc((void **)&d_idx_level0, sizeof(int) * n_out_level0));
CHECK(cudaMalloc((void **)&d_val_level1, sizeof(float) * n_out_level1));
CHECK(cudaMalloc((void **)&d_idx_level1, sizeof(int) * n_out_level1));
CHECK(cudaMemcpy(d_a, h_a, sizeof(float) * n, cudaMemcpyHostToDevice));
Tree *root;
for (int remain = num_seqs; remain >= 2; --remain) {
// int idx = getMinIdx((float *)a, num_seqs * num_seqs);
getMin<<<n_out_level0, BLOCK_SIZE>>>(d_a, nullptr, n, d_val_level0,
d_idx_level0);
CHECK(cudaDeviceSynchronize());
getMin<<<n_out_level1, BLOCK_SIZE>>>(
d_val_level0, d_idx_level0, n_out_level0, d_val_level1, d_idx_level1);
CHECK(cudaDeviceSynchronize());
CHECK(cudaMemcpy(h_val, d_val_level1, sizeof(float) * n_out_level1,
cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(h_idx, d_idx_level1, sizeof(int) * n_out_level1,
cudaMemcpyDeviceToHost));
float val = h_val[0];
int idx = h_idx[0];
for (int i = 0; i < n_out_level1; ++i) {
if (h_val[i] < val) {
val = h_val[i];
idx = h_idx[i];
}
}
int idx1 = idx / num_seqs;
int idx2 = idx % num_seqs;
if (idx1 > idx2) {
swap(idx1, idx2);
}
update<<<num_seqs, BLOCK_SIZE>>>(d_a, num_seqs, idx1, idx2,
nodes[idx1]->num_nodes,
nodes[idx2]->num_nodes);
float length = val;
root = new Tree(nodes[idx1]->num_nodes + nodes[idx2]->num_nodes, length / 2,
nodes[idx1], nodes[idx2],
length / 2 - nodes[idx1]->total_length,
length / 2 - nodes[idx2]->total_length);
CHECK(cudaDeviceSynchronize());
nodes[idx1] = root;
}
printTree(root);
// Free device
CHECK(cudaFree(d_a));
CHECK(cudaFree(d_val_level0));
CHECK(cudaFree(d_idx_level0));
CHECK(cudaFree(d_val_level1));
CHECK(cudaFree(d_idx_level1));
// Free host
free(h_val);
free(h_idx);
// Clean up tree
cleanupTree(root);
return 0;
}
|
13,654 | /*
File name: test_device.cu
Date: 2009/03/31 23:53
Author: Lukas Vlcek
Copyright (C) 2009 Lukas Vlcek
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
in a file called COPYING along with this program; if not, write to
the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA
02139, USA.
*/
#include <stdio.h>
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
int main( int argc, char** argv)
{
int count;
cudaGetDeviceCount(&count);
checkCUDAError("cudaGetDeviceCount");
printf("Device count: %d\n", count);
cudaGetDevice(&count);
checkCUDAError("cudaGetDevice");
printf("Active device: %d\n", count);
cudaDeviceProp* prop;
prop = (cudaDeviceProp *)malloc(sizeof(cudaDeviceProp));
cudaGetDeviceProperties(prop, count);
checkCUDAError("cudaGetDeviceProperties");
printf("Device no.%d properties:\n", count);
printf("Name: %s\n", prop->name);
printf("GlobMem: %d\n", prop->totalGlobalMem);
printf("ShMem/block: %d\n", prop->sharedMemPerBlock);
printf("Regs/block: %d\n", prop->regsPerBlock);
printf("WarpSize: %d\n", prop->warpSize);
printf("memPitch: %d\n", prop->memPitch);
printf("maxThreadsPerBlock: %d\n", prop->maxThreadsPerBlock);
printf("maxThreadsDim: %d %d %d\n", prop->maxThreadsDim[0], prop->maxThreadsDim[1], prop->maxThreadsDim[2]);
printf("maxGridSize: %d %d %d\n", prop->maxGridSize[0], prop->maxGridSize[1], prop->maxGridSize[2]);
printf("ConstMem: %d\n", prop->totalConstMem);
printf("Compute capability: %d.%d\n", prop->major, prop->minor);
// printf("Major: %d\n", prop->minor);
// printf("Minor: %d\n", prop->minor);
printf("clockRate: %d\n", prop->clockRate);
printf("textureAlignment: %d\n", prop->textureAlignment);
printf("deviceOverlap: %d\n", prop->deviceOverlap);
printf("multiProcessorCount: %d\n", prop->multiProcessorCount);
return 0;
}
/* end of test_device.cu */
|
13,655 | #include "includes.h"
// CUDA-C includes
extern "C"
//Adds two arrays
void runCudaPart();
// Main cuda function
__global__ void addAry( int * ary1, int * ary2 )
{
int indx = threadIdx.x;
ary1[ indx ] += ary2[ indx ];
} |
13,656 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <stdlib.h>
//
//__global__ void unique_index_calc_threadIdx(int * data)
//{
// int tid = threadIdx.x;
// printf("threadIdx.x : %d - data : %d \n", tid, data[tid]);
//}
//
//
//__global__ void unique_gid_calculation(int * data)
//{
// int tid = threadIdx.x;
// int offset = blockIdx.x * blockDim.x;
// int gid = tid + offset;
//
// printf("blockIdx.x : %d, threadIdx.x : %d - data : %d \n",
// blockIdx.x, tid, data[gid]);
//}
//
//int main()
//{
// int array_size = 16;
// int array_byte_size = sizeof(int) * array_size;
// int h_data[] = { 23,9,4,53,65,12,1,33,22,43,56,1,76,81,94,32 };
//
// for (int i = 0; i < array_size; i++)
// {
// printf("%d ", h_data[i]);
// }
// printf("\n \n");
//
// int * d_data;
// cudaMalloc((void**)&d_data, array_byte_size);
// cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice);
//
// dim3 block(4);
// dim3 grid(2);
//
// unique_index_calc_threadIdx << < grid, block >> > (d_data);
// cudaDeviceSynchronize();
//
// cudaDeviceReset();
// return 0;
//} |
13,657 | #include <stdio.h>
#include <stdlib.h>
#define MAX_DELAY 30
#include <time.h>
#include <sys/time.h>
#define USECPSEC 1000000ULL
unsigned long long dtime_usec(unsigned long long start){
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
#define APPRX_CLKS_PER_SEC 1000000000ULL
__global__ void delay_kernel(unsigned seconds){
unsigned long long dt = clock64();
while (clock64() < (dt + (seconds*APPRX_CLKS_PER_SEC)));
}
int main(int argc, char *argv[]){
unsigned delay_t = 5; // seconds, approximately
// unsigned delay_t_r;
// if (argc > 1) delay_t_r = atoi(argv[1]);
// if ((delay_t_r > 0) && (delay_t_r < MAX_DELAY)) delay_t = delay_t_r;
unsigned long long difft = dtime_usec(0);
delay_kernel<<<1,1>>>(delay_t);
cudaDeviceSynchronize();
difft = dtime_usec(difft);
printf("kernel duration: %fs\n", difft/(float)USECPSEC);
return 0;
}
|
13,658 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#define uint16_t unsigned short
namespace {
const size_t defaultNUM = 64;
}
uint16_t NextOrEqualPower2(size_t N)
{
uint16_t size = static_cast<uint16_t>(N);
uint16_t mask = 0x8000; //(16 byte, 32768, 1000 0000 0000 0000)
//calculates the nearest 2^n from above
while (!(mask & size))
mask >>= 1;
return static_cast<size_t> (mask << (mask != size));
}
// Проверка на ошибку выполнения функций из cuda
void check_cuda_error(const char* message)
{
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("ERROR: %s: %s\n", message,
cudaGetErrorString(err));
}
template <typename T>
__global__ void kernel(T* res,size_t num)
{
extern __shared__ T cache[];
const T step = 1 / (float)(num);
size_t cacheIndex = threadIdx.x;
cache[cacheIndex] = 0;
size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < num)
{
T x0 = step * tid;
T x1 = step * (tid + 1);
T y0 = sqrtf(1 - x0 * x0);
T y1 = sqrtf(1 - x1 * x1);
cache[cacheIndex] = (y0 + y1) * step / 2.f; // Площадьтрапеции
}
__syncthreads();
// reduction
size_t i = blockDim.x / 2;
while (i != 0) {
if (cacheIndex < i)
{
cache[cacheIndex] += cache[cacheIndex + i];
}
__syncthreads();
i /= 2;
}
if (cacheIndex == i)
atomicAdd(res, cache[0]);
return;
}
int main(int argc, char** argv)
{
size_t N = 128;
if (N < 1)
N = defaultNUM;
N = NextOrEqualPower2(N);
float* res_d; // Результаты наустройстве
float res = 0;
cudaMalloc((void**)&res_d, sizeof(float));
check_cuda_error("Allocating memory on GPU");
// Рамеры грида и блока на GPU
size_t THREADS_PER_BLOCK = std::min(std::max(64, static_cast<int>(N)), 1024);
size_t BLOCKS_PER_GRID = ( N / THREADS_PER_BLOCK) + 1;
kernel <<<BLOCKS_PER_GRID, THREADS_PER_BLOCK, THREADS_PER_BLOCK * sizeof(float) >>> (res_d,N);
cudaThreadSynchronize();
check_cuda_error("Executing kernel");
cudaMemcpy(&res, res_d, sizeof(float), cudaMemcpyDeviceToHost);
// Копируем результаты на хост
check_cuda_error("Copying results from GPU");
cudaFree(res_d);
check_cuda_error("Freeing device memory");
res *= 4;
printf("PI = %.12f\n", res);
return 0;
}
|
13,659 | // Elapsed Real Time for input-4.txt: real 0m29.789s
#include <stdio.h>
#include <stdbool.h>
#include <cuda_runtime.h>
// Size of the square we're looking for.
#define SQUARE_WIDTH 6
#define SQUARE_HEIGHT 6
// Maximum width of a row. Makes it easier to allocate the whole
// grid contiguously.
#define MAX_WIDTH 16384
// Type used for a row of the grid. Makes it easier to declare the
// grid as a pointer.
typedef char Row[ MAX_WIDTH ];
// Kernel, run by each thread to count complete squares in parallel.
__global__ void countSquares( int rows, int cols, bool report, Row *grid, int *output) {
// Unique index for this worker.
int r0 = blockDim.x * blockIdx.x + threadIdx.x;
// Make sure I actually have something to work on.
if ( r0 + SQUARE_HEIGHT - 1 < rows ) {
int total = 0;
//TODO logic goes here
int startIdxCols = 0; //this is actually column
int endIdxCols = startIdxCols + 6;
int startIdxRows = r0; //so this is actually row
int endIdxRows = startIdxRows + 6;
int colidx = 0;
int rowidx = 0;
char square[6][6];
while (endIdxCols <= cols) {
//fill in square 2d array
for (int i = startIdxRows; i < endIdxRows; i++) {
for (int j = startIdxCols; j < endIdxCols; j++) {
square[rowidx][colidx] = grid[i][j];
// printf("%c %d %d\n", grid[i][j], i, j);
// printf("%c\n", square[rowidx][colidx]);
colidx++;
}
rowidx++;
colidx = 0;
}
rowidx = 0;
//at this point square is made
//so check if it's valid square
bool isValid = false;
char *knownLetters = (char *)malloc(26 * sizeof(char));
for (int i = 0; i < 26; i++) {
*(knownLetters + i) = '*';
}
int counter = 0;
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
bool exists = false;
for (int k = 0; k <= counter; k++) {
if (square[i][j] == *(knownLetters + k)) { //if letter is found
exists = true;
}
}
if (!exists) {
*(knownLetters + counter) = square[i][j];
counter++;
}
}
}
free(knownLetters);
if (counter == 26) {
isValid = true;
}
//End of boolean function
if (isValid) {
total++;
if (report) {
printf("%d %d\n", startIdxRows, startIdxCols);
}
}
endIdxCols++;
startIdxCols++;
} //end of while loop
*(output + r0) = total; //save the total number of squares to the unique index
// printf("Total squares is %d with thread ID %d\n", total, r0);
} //end of if statement
}
// Size of the grid of characters.
int rows, cols;
// Grid of letters.
Row *grid;
// Read the grid of characters.
void readGrid() {
// Read grdi dimensions.
scanf( "%d%d", &rows, &cols );
if ( cols > MAX_WIDTH ) {
fprintf( stderr, "Input grid is too wide.\n" );
exit( EXIT_FAILURE );
}
// Make space to store the grid as a big, contiguous array.
grid = (Row *) malloc( rows * sizeof( Row ) );
// Read each row of the grid as a string, then copy everything
// but the null terminator into the grid array.
int rowCount = 0;
char buffer[ MAX_WIDTH + 1 ];
while ( rowCount < rows ) {
scanf( "%s", buffer );
memcpy( grid[ rowCount++ ], buffer, cols );
}
}
// General function to report a failure and exit.
static void fail( char const *message ) {
fprintf( stderr, "%s\n", message );
exit( 1 );
}
// Print out a usage message, then exit.
static void usage() {
printf( "usage: square [report]\n" );
exit( 1 );
}
int main( int argc, char *argv[] ) {
// If there's an argument, it better be "report"
bool report = false;
if ( argc == 2 ) {
if ( strcmp( argv[ 1 ], "report" ) != 0 )
usage();
report = true;
}
// squareFound = false;
readGrid();
/** Array used to hold each squares found in each threads */
// int *reportedSquares = (int *)malloc(rows * sizeof(int));
// TODO Need to add code to allocate memory on the device and copy the grid
// over.
Row *rowGrid = NULL;
cudaMalloc((void **)&rowGrid, rows * sizeof(Row)); //allocate memory for 2 arrays
cudaMemcpy(rowGrid, grid, rows * sizeof(Row), cudaMemcpyHostToDevice);
// Block and grid dimensions.
int threadsPerBlock = 250;
// Round up.
int blocksPerGrid = ( rows + threadsPerBlock - 1 ) / threadsPerBlock;
// int *output = (int *)malloc(rows * sizeof(int));
int *output = NULL;
cudaMalloc((void **)&output, rows * sizeof(int));
cudaMemset(output, 0x00, rows * sizeof(int));
// printf("\n\n");
// Run our kernel on these block/grid dimensions
countSquares<<<blocksPerGrid, threadsPerBlock>>>( rows, cols, report, rowGrid, output);
if ( cudaGetLastError() != cudaSuccess )
fail( "Failure in CUDA kernel execution." );
// TODO Need to add code to copy the results list back to the host and
// add them up.
// int *squareNums = NULL;
int *mySquareNums = (int *)malloc(rows * sizeof(int));
// cudaMalloc((void **)&squareNums, rows * sizeof(int));
cudaMemcpy(mySquareNums, output, rows * sizeof(int), cudaMemcpyDeviceToHost);
// for (int i = 0; i < rows; i++) {
// *(output + i) = 0;
// mySquareNums[i] = 0;
// }
int total = 0;
for (int i = 0; i < rows; i++) {
total += mySquareNums[i];
}
printf( "Squares: %d\n", total );
cudaFree(rowGrid);
cudaFree(output);
free(mySquareNums);
// Free memory on the device and the host.
free( grid );
cudaDeviceReset(); //reset the device
return 0;
}
|
13,660 | #include <stdio.h>
#include <chrono>
#include <iostream>
__host__ __device__ float3 operator+(const float3 &a, const float3 &b)
{
return {a.x+b.x, a.y+b.y, a.z+b.z};
}
__host__ __device__ float3 operator-(const float3 &a, const float3 &b)
{
return {a.x-b.x, a.y-b.y, a.z-b.z};
}
__host__ __device__ void update(float3 &p, float3 &v, const int it)
{
p = p + v;
v = v + make_float3(0.1f*it, 0.1f*it, 0.1f*it);
}
__global__ void simKernel(float3 *particles, float3 *velocities, int iterations)
{
const int id = blockIdx.x*blockDim.x + threadIdx.x;
for(size_t it = 0; it<iterations;++it)
{
update(particles[id], velocities[id], it);
}
}
int main()
{
int numParticles, numIterations, blockSize;
std::cin >> numParticles >> numIterations >>blockSize;
int byteSize = numParticles * sizeof(float3);
//GPU SIMULATION:
auto start_gpu = std::chrono::high_resolution_clock::now();
float3 *dgpu_particles;
float3 *dgpu_velocities;
cudaMallocManaged(&dgpu_particles, byteSize);
cudaMallocManaged(&dgpu_velocities, byteSize);
for(size_t i=0; i<numParticles; ++i)
{
dgpu_particles[i] = make_float3(.1f,.1f,.1f);
dgpu_velocities[i] = make_float3(.01f,.01f,.01f);
}
for(int i=0; i<numIterations; ++i)
{
simKernel<<<(numParticles+blockSize-1)/blockSize, blockSize>>>(dgpu_particles,
dgpu_velocities, 1);
cudaDeviceSynchronize();
}
cudaFree(dgpu_particles);
cudaFree(dgpu_velocities);
auto end_gpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> time_gpu = end_gpu-start_gpu;
std::cout<<"GPU simulation time:"<<time_gpu.count()<<std::endl;
return 0;
} |
13,661 | __host__ __device__ int sum(int a, int b) {
return a + b;
}
|
13,662 | #include "GPUReader.cuh"
#include <assert.h>
#include <fstream>
#include <iostream>
#include <regex>
#include <string>
#include <vector>
#include <time.h>
using namespace std;
#define NEW_READER
GPUReader::GPUReader(string inputFile) {
this->FILENAME = inputFile;
#ifdef NEW_READER
readStreams();
#endif
}
IntInStream::IntInStream() {
}
UnitInStream::UnitInStream() {
}
void printArray(int* array, size_t len, string name) {
printf("%s : [", name.c_str());
for (int i=0; i < len - 1; i++) {
printf("%d, ", array[i]);
}
printf("%d]\n", array[len-1]);
}
void GPUReader::readStreams() {
fstream file;
clock_t start = clock();
file.open(this->FILENAME, ios::in);
printf("read file %s\n", this->FILENAME.c_str());
if (file.is_open()) {
string buf;
int i = 0;
while (getline(file, buf)) {
//printf("LINE %d\n", i);
i++;
buf.erase(std::remove_if(buf.begin(), buf.end(),::isspace), buf.end());
size_t colPos = buf.find(':');
size_t eqPos = buf.find('=');
if (colPos == std::string::npos || eqPos == std::string::npos) {
char buff[50];
std::snprintf(buff, sizeof(buff), "Line %d: invalid pattern", i);
throw std::runtime_error(buff);
}
int timestamp = stoi(buf, nullptr);
string name = buf.substr(colPos+1, eqPos-colPos-1);
size_t post_eq = eqPos + 1;
try {
int value = stoi(buf.substr(post_eq));
// check if exists in map
if (this->GPUIntStreams.find(name) == this->GPUIntStreams.end()) {
//printf("Create int stream %s\n", name.c_str());
shared_ptr<IntInStream> s = make_shared<IntInStream>();
this->GPUIntStreams.insert(std::pair<string,shared_ptr<IntInStream>>(name, s));
}
if (this->GPUIntStreams.find(name) != this->GPUIntStreams.end()) {
//printf("Insert (%d, %d) int stream %s\n", timestamp, value, name.c_str());
this->GPUIntStreams.find(name)->second->timestamps.push_back(timestamp);
this->GPUIntStreams.find(name)->second->values.push_back(value);
} else {
throw std::runtime_error("Error in GPUIntStream map insertion for Stream \"" + name + "\"");
}
} catch (std::invalid_argument &ia) {
// check unit event validity
if (buf.substr(post_eq) != "()") {
throw std::runtime_error("Invalid string \"" + buf.substr(post_eq) +
"\" at RHS of non-int stream");
}
// check if exists in map
if (this->GPUUnitStreams.find(name) == this->GPUUnitStreams.end()) {
//printf("Create unit stream %s\n", name.c_str());
shared_ptr<UnitInStream> s = make_shared<UnitInStream>();
this->GPUUnitStreams.insert(std::pair<string,shared_ptr<UnitInStream>>(name, s));
}
if (this->GPUUnitStreams.find(name) != this->GPUUnitStreams.end()) {
//printf("Insert %d in unit stream %s\n", timestamp, name.c_str());
this->GPUUnitStreams.find(name)->second->timestamps.push_back(timestamp);
//printf("last elem in %s: %d\n", name.c_str(), this->GPUUnitStreams.find(name)->second->timestamps.back());
//printf("Post insert unit stream %s\n", name.c_str());
} else {
throw std::runtime_error("Error in GPUUnitStream map insertion for Stream \"" + name + "\"");
}
}
}
}
clock_t dur = clock() - start;
printf("READING TOOK %ld us\n", dur*1000000/CLOCKS_PER_SEC);
}
#ifndef NEW_READER
GPUUnitStream Reader::getGPUUnitStream(string name) {
fstream file;
file.open(this->FILENAME, ios::in);
vector<int> timestamps;
if (file.is_open()) {
string buf;
regex pattern("([0-9]+):\\s*([A-Za-z][0-9A-Za-z]*)\\s*=\\s*\\(\\)\\s*");
while (getline(file, buf)) {
// match each line to regex
smatch matches;
if (regex_match(buf, matches, pattern)) {
if (name.compare(matches[2]) == 0) {
int timestamp = stoi(matches[1]);
timestamps.push_back(timestamp);
}
}
}
file.close();
}
size_t timestampsCnt = timestamps.size();
if (timestampsCnt == 0) {
cout << "Warning: Stream " << name << " is not present in the input file but was read!" << "\n";
//exit(1);
}
int *timestampsA = (int*) malloc(timestampsCnt * sizeof(int));
memset(timestampsA, 0, timestampsCnt * sizeof(int));
copy(timestamps.begin(), timestamps.end(), timestampsA);
/*
printf("%s: size=%d\n", name.c_str(), timestampsCnt);
if (timestampsCnt < 10000) {
printArray(timestampsA, timestampsCnt, "ts (" + name + ")");
}
*/
GPUUnitStream readStream = GPUUnitStream(timestampsA, timestampsCnt);
return readStream;
}
GPUIntStream Reader::getGPUIntStream(string name) {
fstream file;
file.open(this->FILENAME, ios::in);
vector<int> timestamps;
vector<int> values;
printf("read file %s\n", this->FILENAME.c_str());
if (file.is_open()) {
string buf;
// match each line to regex
regex pattern("([0-9]+):\\s*([A-Za-z][0-9A-Za-z]*)\\s*=\\s*(-?[0-9]+)\\s*");
while (getline(file, buf)) {
// match each line to regex
smatch matches;
if (regex_match(buf, matches, pattern)) {
if (name.compare(matches[2]) == 0) {
int timestamp = stoi(matches[1]);
int value = stoi(matches[3]);
timestamps.push_back(timestamp);
values.push_back(value);
}
}
}
file.close();
}
assert(timestamps.size() == values.size());
size_t timestampsCnt = timestamps.size();
if (timestampsCnt == 0) {
cout << "Warning: Stream " << name << " is not present in the input file but was read!" << "\n";
//exit(1);
}
size_t mallocSize = timestampsCnt * sizeof(int);
int *timestampsA = (int*) malloc(mallocSize);
int *valuesA = (int*) malloc(mallocSize);
memset(timestampsA, 0, mallocSize);
memset(valuesA, 0, mallocSize);
copy(timestamps.begin(), timestamps.end(), timestampsA);
copy(values.begin(), values.end(), valuesA);
/*
printf("%s: size=%d\n", name.c_str(), timestampsCnt);
if (timestampsCnt < 10000) {
printArray(timestampsA, timestampsCnt, "ts (" + name + ")");
printArray(valuesA, timestampsCnt, "vs (" + name + ")");
}
*/
GPUIntStream readStream = GPUIntStream(timestampsA, valuesA, timestampsCnt);
return readStream;
}
#endif
#ifdef NEW_READER
shared_ptr<GPUUnitStream> GPUReader::getUnitStream(string name) {
if (this->GPUUnitStreams.find(name) != this->GPUUnitStreams.end()) {
vector<int> *timestamps = &this->GPUUnitStreams.find(name)->second->timestamps;
size_t mallocSize = timestamps->size() * sizeof(int);
size_t size = timestamps->size();
int *timestampsA = (int*) malloc(mallocSize);
copy(timestamps->begin(), timestamps->end(), timestampsA);
/*
printf("%s: size=%zu\n", name.c_str(), timestamps->size());
if (timestamps->size() < 10000) {
printArray(&(*timestamps)[0], timestamps->size(), "ts (" + name + ")");
}
*/
return std::make_shared<GPUUnitStream>(GPUUnitStream{timestampsA, size});
} else {
throw std::runtime_error("could not find unit stream \"" + std::string(name) + "\"");
}
}
shared_ptr<GPUUnitStream> GPUReader::getUnitStreamDebug(string name) {
shared_ptr<GPUUnitStream> result;
try {
result = getUnitStream(name);
} catch (exception e) {
// Stream not found, return an empty stream
printf("Stream was not found!\n");
int *timestampsA = (int*) malloc(0);
size_t size = 0;
result = std::make_shared<GPUUnitStream>(GPUUnitStream{timestampsA, size});
}
return result;
}
shared_ptr<GPUIntStream> GPUReader::getIntStream(string name) {
if (this->GPUIntStreams.find(name) != this->GPUIntStreams.end()) {
vector<int> *timestamps = &this->GPUIntStreams.find(name)->second->timestamps;
vector<int> *values = &this->GPUIntStreams.find(name)->second->values;
size_t mallocSize = timestamps->size() * sizeof(int);
size_t size = timestamps->size();
int *timestampsA = (int*) malloc(mallocSize);
int *valuesA = (int*) malloc(mallocSize);
clock_t start = clock();
copy(timestamps->begin(), timestamps->end(), timestampsA);
copy(values->begin(), values->end(), valuesA);
clock_t time = clock() - start;
printf("MEMCPY TIME USED:: %ld\n", time*1000000/CLOCKS_PER_SEC);
/*
printf("%s: size=%zu\n", name.c_str(), size);
if (size < 10000) {
printArray(&(*timestamps)[0], timestamps->size(), "ts (" + name + ")");
printArray(&(*values)[0], values->size(), "vs (" + name + ")");
}
*/
return make_shared<GPUIntStream>(GPUIntStream{timestampsA, valuesA, size});
} else {
throw std::runtime_error("could not find int stream \"" + std::string(name) + "\"");
}
}
shared_ptr<GPUIntStream> GPUReader::getIntStreamDebug(string name) {
shared_ptr<GPUIntStream> result;
try {
result = getIntStream(name);
} catch (exception e) {
// Stream not found, return an empty stream
printf("Stream was not found!\n");
int *timestampsA = (int*) malloc(0);
int *valuesA = (int*) malloc(0);
size_t size = 0;
result = make_shared<GPUIntStream>(GPUIntStream{timestampsA, valuesA, size});
}
return result;
}
#endif |
13,663 | #include "includes.h"
__global__ void ch1(unsigned char* Pout, unsigned char* Pin, int width, int height) {
int channels = 3;
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
// check if pixel within range
if (col < width && row < height){
int gOffset = row * width + col;
int rgbOffset = gOffset * channels;
unsigned char r = Pin[rgbOffset ];
unsigned char g = Pin[rgbOffset+1];
unsigned char b = Pin[rgbOffset+2];
Pout[gOffset] = 0.21f*r + 0.71f*g + 0.07f*b;
}
} |
13,664 | /*
* dev1.c
*
* Copyright 2021 mike <mike@fedora33>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*
*/
#include <stdio.h>
#include <math.h>
#include <cuda.h>
__global__ void kernel(ulong* squares, ulong size) {
ulong i = threadIdx.x + (blockIdx.x * blockDim.x);
if(i < size) squares[i] *= 10;
}
int main(int argc, char **argv)
{
cudaError_t error_id;
ulong N = 1000;
ulong root_max = (ulong)floor(sqrt((double)N));
printf("%ld\n",root_max);
const ulong size_squares = root_max + 1;
ulong squares[size_squares];
for(int x = 0; x < size_squares; x += 1) squares[x] = x*x;
for(int x = 0; x < root_max+1; x += 1) {
printf("%ld ", squares[x]);
}
printf("\n");
// allocate memory on device
ulong *d_c;
error_id = cudaMalloc((void**)&d_c, sizeof(ulong)*size_squares);
if(error_id != cudaSuccess) {
printf("cudaMalloc failed with %d\n", error_id);
exit(1);
}
// copy data to device
error_id = cudaMemcpy(d_c, squares, sizeof(ulong)*size_squares,
cudaMemcpyHostToDevice);
if(error_id != cudaSuccess) {
printf("cudaMemcpy to device failed with %d\n", error_id);
exit(1);
}
// Set configuration parameters
dim3 grid_size=(1); dim3 block_size=(size_squares);
// launch kernel
kernel<<<grid_size, block_size>>>(d_c, size_squares);
// Wait for device to finish?
cudaDeviceSynchronize();
// copy data back to host
error_id = cudaMemcpy(squares, d_c, sizeof(ulong)*size_squares,
cudaMemcpyDeviceToHost);
if(error_id != cudaSuccess) {
printf("cudaMemcpy to host failed with %d\n", error_id);
exit(1);
}
printf("Results:\n");
for(int x = 0; x < root_max+1; x += 1) {
printf("%ld ", squares[x]);
}
printf("\n");
// free memory
error_id = cudaFree(d_c);
if(error_id != cudaSuccess) {
printf("cudaFree failed with %d\n", error_id);
exit(1);
}
return 0;
}
|
13,665 | // SDSC Summer Institute 2018
// Andreas Goetz (agoetz@sdsc.edu)
// Hello World Program in CUDA C
//
// Contains a function that is executed on the device (GPU)
//
#include<stdio.h>
__global__ void my_kernel(void){
}
int main(void) {
my_kernel<<<1,1>>>();
printf("Hello World!\n");
return 0;
}
|
13,666 | #define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include "cuda_runtime.h"
#include <stdio.h>
#include <cmath>
extern "C" {
// Takes 2 variables,
// int* topology should point to an integer array.
// float* out will be mapped to a huge output array.
// int N is the length of the topology array.
__global__ void kernel(int* topology, float* membank, float* weights, int TOPOLOGY_WIDTH, int NODE_WIDTH)
{
// Get the thread index.
int threadIndex = threadIdx.x + (blockDim.x * blockIdx.x);
// Calculate the layer.
int layer = ( threadIndex / NODE_WIDTH );
// Calculate the offset.
int node = ( threadIndex % NODE_WIDTH );
// Validate the datas.
if ( layer > 0 && layer < TOPOLOGY_WIDTH + 1 ) {
if ( node < topology[layer]) {
// This is a valid case.
// So first we need to start the loop.
int terminate = 1000;
float nodeOut = 0;
bool stop = false;
while ( !stop && terminate-- > 0) {
// Set stop to true so that only a failure will make us iterate again.
stop = true;
// Now we iterate over each node above us.
int max = layer - 1;
for ( int i = 0; i < topology[max]; i++ ) {
int arrayIndex = (max) * NODE_WIDTH + i;
// Check the respsective sources.
if ( membank[arrayIndex] == 0 ) {
// If something hasn't been pushed to it yet, let's abort.
stop = false;
break;
} else {
// Otherwise, there is a value here! So let's add it to our collective.
nodeOut += membank[arrayIndex] * weights[arrayIndex];
}
}
if ( !stop ) continue;
// Compute sigmoid.
//nodeOut = nodeOut;//1.0 / ( 1.0 + exp(-nodeOut));
// If we don't want to stop, it means we've added all of the nodes
// we needed to. So let's push our value.
membank[(layer) * NODE_WIDTH + node] = nodeOut;
}
}
}
__syncthreads();
__shared__ float total;
total = 0;
for ( int i = 0; i < topology[TOPOLOGY_WIDTH - 1]; i++ ) {
int arrayIndex = ( ( TOPOLOGY_WIDTH - 1 ) * NODE_WIDTH ) + i;
total = membank[arrayIndex] * weights[arrayIndex];
}
__syncthreads();
membank[0] = total;
}
int main()
{
return 0;
}
} |
13,667 | #include <iostream>
#include <memory>
#include <cuda_runtime.h>
/** 设备属性
* 了解 GPU 硬件设备的各项属性,有利于高效的利用 GPU 资源。
* 针对特定的硬件设备 GPU,为应用程序开发分配合理的资源。
* 具体使用的时候,需要查 CUDA 编程手册以及对应 GPU 硬件资源信息。
*
* D:\Nvidia\Samples\1_Utilities\deviceQuery\deviceQuery.cpp
*/
int main(int argc, char **argv)
{
int device_count = 0; // 可用设备数量
int device; // 设备名称编号
int driver_version; // CUDA 驱动版本号
int runtime_version; // 运行时引擎版本
// 通过查询 cudaDeviceProp 结构体来找到每个设备的相关信息,该结构体返回所有设备的属性。
// 如果有多个可用设备,可使用 for 循环遍历所有设备属性。
// get count of the avaiable CUDA hardware device in system.
cudaGetDeviceCount(&device_count);
// This function returns count of number of CUDA enable devices and 0 if there are no CUDA capable devices.
if (device_count == 0)
{
// printf("There are no available devices that support CUDA.\n");
std::cout << "There are no available devices that support CUDA.\n";
}
else
{
// printf("Detected %d CUDA Capable devices.\n", device_count);
std::cout << "Detected " << device_count << " CUDA Capable devices.\n";
}
/**通用设备信息
* cudaDeviceProp 结构体提供可以用来识别设备以及确认使用的版本信息的属性,name属性以字符串形式返回设备名称。
* cudaDriverGetVersion 获取设备使用的 CUDA Driver。
* cudaRuntimeGetVersion 获取设备运行时引擎的版本。
* clockRate属性获取 GPU 的时钟速率。
* multiProcessorCount属性用于判断设备上流多处理器的个数。
*/
// 获取设备名称编号
cudaGetDevice(&device);
// printf("ID of device: %d\n", device);
std::cout << "ID of device: " << device << std::endl;
// 获取设备结构体信息
cudaDeviceProp device_property;
cudaGetDeviceProperties(&device_property, device);
// printf("\nDevice %s: \n", device_property.name);
// device_property.name 获取 GPU 型号
std::cout << "The type of hardware GPU is: " << device_property.name << std::endl;
// 获取 CUDA 驱动版本号
cudaDriverGetVersion(&driver_version);
std::cout << "CUDA Driver Version is: CUDA " << driver_version / 1000 << "." << (driver_version % 100) / 10 << std::endl;
// 获取运行时引擎版本
cudaRuntimeGetVersion(&runtime_version);
std::cout << "CUDA Runtime Driver Version is: CUDA " << driver_version / 1000 << "." << (driver_version % 100) / 10 << std::endl;
// GPU 显存容量
// printf("Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)device_property.totalGlobalMem / 1048576.0f,
// (unsigned long long)device_property.totalGlobalMem);
std::cout << "Total amount of global memory: " << (float)device_property.totalGlobalMem / 1048576.0f << " MBytes" << std::endl;
// 具有最多流处理器的设备,如果有多个设备
printf(" (%2d) mutilprocessors\n", device_property.multiProcessorCount);
// std::cout << device_property.mutilProcessorCount << "mutilprocessors" << std::endl;
// GPU 时钟速率,以 KHz 为单位进行返回
// printf("GPU max clock rate: %.0f MHz (%.2f GHz)\n", device_property.clockRate * 1e-3f, device_property.clockRate * 1e-6f);
std::cout << "GPU max clock rate: " << device_property.clockRate * 1e-6f << " GHz" << std::endl;
// 显存频率
// printf("Memory clock rate: %.0f MHz\n", device_property.memoryClockRate * 1e-3f);
std::cout << "Memory clock rate: " << device_property.memoryClockRate * 1e-3f << " MHz" << std::endl;
// 显存位宽
// printf("Memory Bus Width: %d-bit\n", device_property.memoryBusWidth);
std::cout << "Memory Bus Width: " << device_property.memoryBusWidth << "-bit" << std::endl;
// L2 缓存
if (device_property.l2CacheSize)
{
// printf("L2 Cache size: %d bytes\n", device_property.l2CacheSize);
std::cout << "L2 Cache size: " << device_property.l2CacheSize << " bytes" << std::endl;
}
// 常量内存
// printf("Toal amount of constant memory: %lu bytes\n", device_property.totalConstMem);
std::cout << "Toal amount of constant memory: " << device_property.totalConstMem << " bytes" << std::endl;
// 共享内存
// printf("Toal amount of shared memory per block: %lu bytes\n", device_property.sharedMemPerBlock);
std::cout << "Toal amount of shared memory per block: " << device_property.sharedMemPerBlock << " bytes" << std::endl;
// 每一个块可用寄存器总数
// printf("Toal amount of registers available per block: %d\n", device_property.regsPerBlock);
std::cout << "Toal amount of registers available per block: " << device_property.regsPerBlock << std::endl;
// 网格grid 块block 线程thread 可以时多维的,每一个维度中可用并行启动多少个线程和块
// 这对于内核参数的配置十分重要
// printf("Maximum number of threads per multiprocessor: %d\n", device_property.maxThreadsPerMutilProcessor);
// std::cout << "Maximum number of threads per multiprocessor: " << device_property.maxThreadsPerMutilProcessor << std::endl;
// printf("Maximum number of threads per block: %d\n", device_property.maxThreadsPerBlock);
std::cout << "Maximum number of threads per block: " << device_property.maxThreadsPerBlock << std::endl;
// printf("Max dimension size of a thread block (x, y, z): (%d, %d, %d)\n",
// device_property.maxThreadsDim[0],
// device_property.maxThreadsDim[1],
// device_property.maxThreadsDim[2]);
std::cout << "Max dimension size of a thread block (x, y, z): " << "(" <<
device_property.maxThreadsDim[0] << "," <<
device_property.maxThreadsDim[1] << "," <<
device_property.maxThreadsDim[2] << ")" << std::endl;
// printf("Max dimension size of a grid size (x, y, z): (%d, %d, %d)\n",
// device_property.maxGridSize[0],
// device_property.maxGridSize[1],
// device_property.maxGridSize[2]);
std::cout << "Max dimension size of a grid size (x, y, z): " << "(" <<
device_property.maxGridSize[0] << "," <<
device_property.maxGridSize[1] << "," <<
device_property.maxGridSize[2] << ")" << std::endl;
// 查看设备是否支持双精度浮点操作,并为应用程序设置该设备
memset(&device_property, 0, sizeof(cudaDeviceProp));
// if major > 1 and minor > 3, then the device supports double precision.
device_property.major = 1;
device_property.minor = 3;
// 选择特定属性的设备
cudaChooseDevice(&device, &device_property);
// printf("ID of device which supports double precision is: %d\n", device);
std::cout << "ID of device which supports double precision is: " << device << std::endl;
// 设置设备为应用程序所用设备
cudaSetDevice(device);
return 0;
} |
13,668 |
/*
* Erik Palmer
* March 1, 2017
*
* This is stripped down version for public sharing. This code
* simulates elastic dumbbells based on the
* Upper Convective Maxwell (UCM) model.
*
* Global variables are used for important parameters, and
* computations are transferred between the CPU (host) and
* GPU (device) as needed for optimal efficiency.
*
* Species switching dynamics have been removed so this
* will only simulate the evolution of "active" dumbbells.
*
* To Compile:
* nvcc CUDA_FILENAME -lcurand -o EXECUTABLE_NAME
*
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <errno.h>
#include <ctype.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
//#include <math_functions.h>
//Define Macros for Error handling
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n", __FILE__,__LINE__); \
return EXIT_FAILURE; }} while(0)
#define CURAND_CALL(x) do { if((x)!= CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n", __FILE__, __LINE__); \
return EXIT_FAILURE; }} while(0)
//Define Macro for Histogram debugging
#define PRINT_VAR(x) printf("" #x "\n ")
//Debugging Macros
#define PRINT_VAR_FLOAT_VALUE(x) printf("" #x "=%f\n", x)
#define PRINT_VAR_INT_VALUE(x) printf("" #x "=%d\n", x)
//* Also useful: printf("DEBUG LINE %d\n", __LINE__);
//___velocity field on-off matrix ____
// note that this matrix is multiplied by the inputted flowrate value
#define U11 0.0
#define U12 0.0
#define U21 1.0
#define U22 0.0
//``````````````````````````````
//___Simulation Constants from paper____
#define INIT_ACT_TO_DNG_RATIO 0.5
#define LITTLE_D 0.03 //Default 0.03
#define TAO_FUND 5e-6 //Default 5e-6
#define ZEE 10.0 //Default 10.0
#define ALPHA_ZERO 0.83 //Default 0.83
#define ALPHA_ONE 0.17 //Default 0.17
#define U_ZERO 14.0 //Default 14.0
#define D_FREE 12.0 //Default 12.0
//``````````````````````````````
//____Define Global Variables________
//For GPU
__device__ double devStepSizeMicro;
__device__ unsigned int devTimeStepsMicro;
__device__ double devFlowRate;
__device__ double devMaxSpringLength;
__device__ double devFreq;
//For CPU
static long hostNumberOfParticles = 0;
static double hostStepSizeMicro = 0;
static long hostTimeStepsMicro = 0;
static long hostTimeStepsMacro = 0;
static double hostFlowRate;
static double hostMaxSpringLength = 0;
static double hostFreq;
//``````````````````````````````````
/*
* Function: ParseInput
* Sorts and examines command line input for inappropriate data
*/
int ParseInput(int argc, char *argv[]){
int i;
if (argc != 7){
printf("ERROR: Incorrect number of input arguments\n");
printf("Format: ./Maxwell [number of particles] [micro step size]");
printf(" [time steps micro] [time steps macro] [flow rate]"); //Max Spring Length Removed
printf(" [SAOS frequency]\n");
return EXIT_FAILURE;
}
char *argvCopy;
printf("The running program is %s\n", argv[0]);
for (i=1; i<argc; i++){
argvCopy = argv[i];
for (; *argv[i]!='\0'; argv[i]++){
if (*argv[i]=='.') continue; //skip decimals
if (isdigit(*argv[i])==0){
printf("%s is not a number\n", argv[i]);
return EXIT_FAILURE;
}
}
argv[i] = argvCopy;
}
errno = 0;
hostNumberOfParticles = strtol(argv[1], NULL, 10);
hostStepSizeMicro = strtod(argv[2], NULL);
hostTimeStepsMicro = strtol(argv[3], NULL, 10);
hostTimeStepsMacro = strtol(argv[4], NULL, 10);
hostFlowRate = strtod(argv[5], NULL);
//hostMaxSpringLength = strtod(argv[6], NULL); //turned off for UCM Maxwell
hostFreq = strtod(argv[6], NULL);
if (hostNumberOfParticles==0){
printf("Unable to convert %s to positive integer\n", argv[1]);
return EXIT_FAILURE;
}
if (hostTimeStepsMicro==0){
printf("Unable to convert %s to positive integer\n", argv[3]);
return EXIT_FAILURE;
}
if (hostTimeStepsMacro==0){
printf("Unable to convert %s to positive integer\n", argv[4]);
return EXIT_FAILURE;
}
if (hostStepSizeMicro==0){
printf("Unable to convert %s to double\n", argv[2]);
return EXIT_FAILURE;
}
//commented out to allow zero flow rate
/*
if (hostFlowRate==0.0){
printf("Unable to convert %s to positive double\n", argv[5]);
return EXIT_FAILURE;
}
*/
//commented out for UCM Maxwell
/*
if (hostMaxSpringLength == 0){
printf("Unable to convert %s to positive double\n", argv[6]);
return EXIT_FAILURE;
}
*/
if (hostFreq == 0){
printf("Unable to convert %s to positive double\n", argv[6]);
return EXIT_FAILURE;
}
if (errno == ERANGE){
printf("%s\n", strerror(errno));
return EXIT_FAILURE;
}
return 0;
}
/*
* Function PrintSimInfo
* Prints to terminal information about the current simulation
*/
void PrintSimInfo(){
// ___ Calculate and output program parameters _____
printf("___________Running Steady State UCM Maxwell Simulation_________________\n");
printf("|| Number of Particles: %d\n", hostNumberOfParticles);
printf("|| Total Time: %g \n", hostTimeStepsMicro * hostStepSizeMicro * hostTimeStepsMacro);
printf("|| Flow Rate: %g \n", hostFlowRate);
printf("|| Macro -- Steps: %d, Step Size: %g\n", hostTimeStepsMacro, hostTimeStepsMicro * hostStepSizeMicro);
printf("|| Micro -- Steps: %d, Step Size: %1.12g\n", hostTimeStepsMicro, hostStepSizeMicro);
printf("|| Maximum Spring Length: %g\n", hostMaxSpringLength );
printf("|| SAOS Frequency: %g\n", hostFreq );
printf(" - - - - - - - - - - - - - - - - - - - - - - - \n");
//``````````````````````````````````````````````````
}
/*
* Function OutputToFile
* Writes header containing information about the simulation
* and contents of three vectors to csv file
*/
void OutputToFile (double XX[], double XY[], double YY[], double time_spent, int count){
FILE *OutputFile;
char OutputFileName[] = "MaxwellSSimData";
sprintf(OutputFileName, "%s.csv", OutputFileName); //<---Filename
OutputFile = fopen(OutputFileName, "w");
if (OutputFile == NULL){
fprintf(stderr, "Couldn't open output file: %s!\n", OutputFileName);
exit(1);
}
// ____ Header for textfile _______________________
//Description
fprintf(OutputFile,"**********************************************************************\n");
fprintf(OutputFile,"* Simulation For UCM Maxwell *\n");
fprintf(OutputFile,"* *\n");
fprintf(OutputFile,"* *\n");
fprintf(OutputFile,"* *\n");
fprintf(OutputFile,"* *\n");
fprintf(OutputFile,"* *\n");
fprintf(OutputFile,"**********************************************************************\n");
fprintf(OutputFile,"TotalTime: %3.12g\n", hostTimeStepsMicro * hostStepSizeMicro * hostTimeStepsMacro);
fprintf(OutputFile,"FlowRate: %g\n", hostFlowRate);
fprintf(OutputFile,"MacroSteps: %ld\n", hostTimeStepsMacro);
fprintf(OutputFile,"MacroStepSize: %3.12g\n", hostTimeStepsMicro * hostStepSizeMicro);
fprintf(OutputFile,"MicroSteps: %ld\n", hostTimeStepsMicro);
fprintf(OutputFile,"StepSize: %2.12g\n", hostStepSizeMicro);
fprintf(OutputFile,"NumberOfParticles: %ld\n", hostNumberOfParticles);
fprintf(OutputFile,"Runtime: %g\n", time_spent);
fprintf(OutputFile,"MaxSpringLength: %g\n", hostMaxSpringLength);
fprintf(OutputFile,"SAOSFrequency: %g\n", hostFreq);
fprintf(OutputFile,"Initial-Active-to-Dangling-Ratio: %g\n", INIT_ACT_TO_DNG_RATIO);
fprintf(OutputFile,"Potential-well-distance(d): %g\n", LITTLE_D );
fprintf(OutputFile,"Tao_Fundamental: %g\n", TAO_FUND);
fprintf(OutputFile,"Z: %g\n", ZEE);
fprintf(OutputFile,"Alpha_Zero: %g\n", ALPHA_ZERO);
fprintf(OutputFile,"Alpha_One: %g\n", ALPHA_ONE);
fprintf(OutputFile,"U_Zero: %g\n", U_ZERO);
fprintf(OutputFile,"D_Free: %g\n", D_FREE);
//`````````````````````````````````````````````
//____ print ensemble average at each macro time step ______
fprintf(OutputFile," - - - - - - - - - - - - - - - - - - - - - - - \n");
fprintf(OutputFile,"|| XX || XY || YY ||\n");
fprintf(OutputFile," - - - - - - - - - - - - - - - - - - - - - - - \n");
int k;
for (k=0; k<count; k++){
fprintf(OutputFile,"% 2.16g," , XX[k]);
fprintf(OutputFile," % 2.16g," , XY[k]);
fprintf(OutputFile," % 2.16g\n", YY[k]);
}
//```````````````````````````````````````````````````````
fclose(OutputFile);
}
/*
* Function:
* GPU Function
* Calculates the change of state probability of an active dumbbell
* given the spring length
* Tao must be computed each time: See paper, use equations 10 AND 11.
*/
__device__ double ActiveToDanglingProb (double SpringLen){
double Tao_zero = TAO_FUND * exp ( U_ZERO ); //Equation (11) //INEFFICIENT - this computation can be moved out of loop
//__ HOOK Sim__dimensional__
double Tao = Tao_zero * exp ( - ( LITTLE_D * LITTLE_D * SpringLen * SpringLen) / U_ZERO ); //Equation (10)
//````````````````
return 1.0 - exp( -2.0 * devStepSizeMicro / Tao ); //Equation (13)
}
/*
* Function:
* GPU Function
* Calculates the change of state probability for a dangling dumbbell.
*/
__device__ double DanglingToActiveProb (double SpringLen) {
//__ Hook Sim _____
return 1.0 - exp( - (ALPHA_ZERO + ALPHA_ONE * SpringLen) * devStepSizeMicro); //Equation (14)
//`````````````````
}
/*
* Function: EvolveActive
* GPU Function
* Evolve Active Dumbbell for one micro step on GPU
*/
__device__ void EvolveActive (double *SpringLenX, double *SpringLenY, double randx, double randy, double *AvgSpringLifes, double *SimTime,
double totaltime){
double SpringLenXStep, SpringLenYStep;
double drag_coeff_active = 0.5; // set to 0.5 for comparison with analytic UCM result
//_____ Non-Dim Evo-Equations
SpringLenXStep = *SpringLenX //;
+ (U11 * *SpringLenX + U21 * devFreq * cos(devFreq * *SimTime) * *SpringLenY) * devStepSizeMicro * devFlowRate
- drag_coeff_active * *SpringLenX * devStepSizeMicro
+ sqrt( devStepSizeMicro ) * randx;
//````````````````````````````
SpringLenYStep = *SpringLenY //
+ (U12 * *SpringLenX + U22 * *SpringLenY) * devStepSizeMicro * devFlowRate
- drag_coeff_active * *SpringLenY * devStepSizeMicro
+ sqrt( devStepSizeMicro ) * randy;
//```````````````````````````
//``````````````````````````````````````
*SpringLenX = SpringLenXStep;
*SpringLenY = SpringLenYStep;
}
/*
* Function: EvolveDangling
* GPU Function
* Evolve Dangling Dumbbell for one micro step on GPU
*/
__device__ void EvolveDangling(double *SpringLenX, double *SpringLenY, double randx, double randy, double *AvgSpringLifes, double *SimTime,
double totaltime){
double SpringLenXStep, SpringLenYStep;
double drag_coeff_dangle = 0.5; //For comparison with UCM
//_____ Old Non-Dim Evo-Equations: Hook dumbbells ______
SpringLenXStep = *SpringLenX
+ (U11 * *SpringLenX + U21 * devFreq * cos(devFreq * *SimTime) * *SpringLenY) * devStepSizeMicro * devFlowRate
- drag_coeff_dangle * *SpringLenX * devStepSizeMicro
+ sqrt( drag_coeff_dangle * devStepSizeMicro ) * randx;
SpringLenYStep = *SpringLenY
+ (U12 * *SpringLenX + U22 * *SpringLenY) * devStepSizeMicro * devFlowRate
- drag_coeff_dangle * *SpringLenY * devStepSizeMicro
+ sqrt( drag_coeff_dangle * devStepSizeMicro ) * randy;
//```````````````````````````````````````````
*SpringLenX = SpringLenXStep;
*SpringLenY = SpringLenYStep;
}
/*
* Function: Micro_Steps
* Loops through the Micro loop of the SDE
*/
__global__ void Micro_Steps( double *SpringLenX, double *SpringLenY, int *SpeciesType,
curandState *states, curandState *ProbStates,
double *AvgSpringLifes, double *SimTime, double totaltime){
int i = threadIdx.x + blockIdx.x * blockDim.x;
//___Device API for Random Number Generation____
//copy state to local state for efficiency
curandState localState = states[i];
curandState localProbState = ProbStates[i];
int j;
//TODO: Move node value calculation here, since it only changes once each time this function is called.
double2 RandNorm;
//double RandUniform;
//double SpringLen;
for(j=0; j < devTimeStepsMicro; j++){
//generate new random number each time
RandNorm = curand_normal2_double(&localState);
//RandUniform = curand_uniform_double(&localProbState); //Disabled because species switching turned off
//Calculate Spring Length
//SpringLen = sqrt(SpringLenX[i] * SpringLenX[i] + SpringLenY[i] * SpringLenY[i]); //Disabled b/c species switching turned off
//_____Evolve Dumbbells According to their species_______
if (SpeciesType[i]==0){ //if active type
EvolveActive(&SpringLenX[i], &SpringLenY[i], RandNorm.x, RandNorm.y, AvgSpringLifes, &SimTime[i], totaltime);
}
else if (SpeciesType[i]==1){ //if dangling type
EvolveDangling(&SpringLenX[i], &SpringLenY[i], RandNorm.x, RandNorm.y, AvgSpringLifes, SimTime, totaltime);
}
//`````````````````````````````````````````````````````````
SimTime[i] += devStepSizeMicro;
}
//copy random number generator state back
states[i] = localState;
ProbStates[i] = localProbState;
}
/*
* Function: RandomGenInit
* Initialize the random number generator on each of the threads
* Gives each thread a different seed form *SeedList vector
*/
__global__ void RandomGenInit(unsigned int *SeedList, curandState *states){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(SeedList[tid], tid, 0, &states[tid]);
}
__global__ void PrintSpringLengths ( double *SpringLenX, double *SpringLenY) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
printf(" x:% f y:% f \n", SpringLenX[tid], SpringLenY[tid]);
}
/*
* Function: RndNorm
* CPU Function to transform uniform random variable [0,1] to normal random variable
* with mean 0 and Variance defined in the function
*/
__host__ double RndNorm (void)
{
double Variance = 1;
static int HasSpareRandomNum = 0;
static double SpareRandomNum;
if(HasSpareRandomNum == 1){
HasSpareRandomNum = 0;
return Variance * SpareRandomNum;
}
HasSpareRandomNum = 1;
static double u,v,s;
do{
u = ( rand() / ((double) RAND_MAX)) * 2 - 1;
v = ( rand() / ((double) RAND_MAX)) * 2 - 1;
s = u * u + v * v;
} while (s >= 1 || s == 0);
s = sqrt (-2.0 * log(s) / s);
SpareRandomNum = v * s; //Save spare random number for next function call
return Variance * u * s;
}
__host__ void OutputRatio (int Active, int Dangling){
double ActivePercent = (double)Active / hostNumberOfParticles;
double DanglingPercent = (double)Dangling / hostNumberOfParticles;
printf("Active %f Dangling: %f\n", ActivePercent, DanglingPercent);
}
__host__ double AvgSpringLife ( double *SpringLenX, double *SpringLenY, int *SpeciesType){
int j;
double Tao_zero = TAO_FUND * exp ( U_ZERO );
double Total = 0.0;
double SpringLen;
int ActiveCount = 0;
for (j=0; j<hostNumberOfParticles; j++){
if (SpeciesType[j] == 0){ //If active type
ActiveCount++;
SpringLen = sqrt( SpringLenX[j] * SpringLenX[j] + SpringLenY[j] * SpringLenY[j]);
//__Hookean Springs__
Total += Tao_zero * exp (- LITTLE_D * LITTLE_D * SpringLen * SpringLen / U_ZERO );
//``````````````````
}
}
return Total / (double) ActiveCount;
}
int main(int argc, char *argv[]){
//_____Record Program Run Time
clock_t begin, end, end2;
begin = clock();
double time_spent, time_spent2;
//````````````````````````````````
// ____ Read Command Line Arguments _____
if (ParseInput(argc, argv)==EXIT_FAILURE){
exit(2);
}
//`````````````````````````````````````
PrintSimInfo(); //Output Simulation Variables to Terminal
//___ Set Global Variable Values _______
cudaMemcpyToSymbol(devStepSizeMicro, &hostStepSizeMicro, sizeof(double));
cudaMemcpyToSymbol(devTimeStepsMicro, &hostTimeStepsMicro, sizeof(unsigned int));
cudaMemcpyToSymbol(devFlowRate, &hostFlowRate, sizeof(double));
cudaMemcpyToSymbol(devMaxSpringLength, &hostMaxSpringLength, sizeof(double));
cudaMemcpyToSymbol(devFreq, &hostFreq, sizeof(double));
//```````````````````````````````````````
//____define block and thread structure______
dim3 block;
if (hostNumberOfParticles < 32){
block.x = hostNumberOfParticles;
block.y = 1;
}
else {
block.x=512;
block.y = 1;
}
dim3 grid ((hostNumberOfParticles + block.x -1) / block.x,1);
//`````````````````````````````````````
//__Variables for random number generation on GPU kernels
curandState *states = NULL;
curandState *ProbStates = NULL;
//``````````````````````````````````
//____allocate memory on GPU for random number generator states______
CUDA_CALL(cudaMalloc((void **)&states, sizeof(curandState) * hostNumberOfParticles ));
CUDA_CALL(cudaMalloc((void **)&ProbStates, sizeof(curandState) * hostNumberOfParticles ));
//`````````````````````````````````````````````````````````````````
//__create vectors of seeds_____
unsigned int *hostSeeds, *devSeeds;
unsigned int *hostProbSeeds, *devProbSeeds;
hostSeeds = (unsigned int *)malloc(hostNumberOfParticles*sizeof(unsigned int));
hostProbSeeds = (unsigned int *)malloc(hostNumberOfParticles*sizeof(unsigned int));
CUDA_CALL(cudaMalloc((void **)&devSeeds, sizeof(unsigned int) * hostNumberOfParticles));
CUDA_CALL(cudaMalloc((void **)&devProbSeeds, sizeof(unsigned int) * hostNumberOfParticles));
srand(time(NULL));
int i;
for (i=0; i<hostNumberOfParticles; i++){
hostSeeds[i] = rand();
hostProbSeeds[i] = rand();
}
//````````````````````````````
CUDA_CALL(cudaMemcpy(devSeeds, hostSeeds, sizeof(unsigned int) * hostNumberOfParticles, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(devProbSeeds, hostProbSeeds, sizeof(unsigned int) * hostNumberOfParticles, cudaMemcpyHostToDevice));
//___initialize kernel random number generator on GPU threads____
RandomGenInit<<< grid, block >>>(devSeeds, states);
CUDA_CALL( cudaPeekAtLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
RandomGenInit<<< grid, block >>>(devProbSeeds, ProbStates);
CUDA_CALL( cudaPeekAtLastError() );
CUDA_CALL( cudaDeviceSynchronize() );
//````````````````````````````````````
//____Spring Length variables____
double *devSpringLenX, *devSpringLenY;
double *hostSpringLenX, *hostSpringLenY;
//`````````````````````````````````
//___Dumbbell Species Type Variable___
int *devSpeciesType;
int *hostSpeciesType;
//``````````````````````````````````
//_______allocate memory on CPU
hostSpringLenX = (double*)malloc(hostNumberOfParticles*sizeof(double));
hostSpringLenY = (double*)malloc(hostNumberOfParticles*sizeof(double));
hostSpeciesType = (int*)malloc(hostNumberOfParticles*sizeof(int));
//`````````````````````````
//_____allocate memory on GPU for spring length
CUDA_CALL(cudaMalloc((double**)&devSpringLenX, hostNumberOfParticles*sizeof(double)));
CUDA_CALL(cudaMalloc((double**)&devSpringLenY , hostNumberOfParticles*sizeof(double)));
CUDA_CALL(cudaMalloc((int**)&devSpeciesType, hostNumberOfParticles*sizeof(int)));
//`````````````````````````````````````
//___Simulation Time____
//Variables for tracking time t throughout simulation
double *devSimTime, *hostSimTime;
hostSimTime = (double *)malloc(hostNumberOfParticles*sizeof(double));
CUDA_CALL(cudaMalloc((double**)&devSimTime,hostNumberOfParticles*sizeof(double)));
//````````````````````````````````````````````
//___ Set initial Spring Lengths to Normal Distribution
int l;
for (l=0; l < hostNumberOfParticles; l++){
hostSimTime[l] = 0.0;
//___ Set initial length randomly__
hostSpringLenX[l] = RndNorm(); //Starting from this appears to speed up
hostSpringLenY[l] = RndNorm(); // steady state for SAOS
//`````````````````````````````````
//___set initial species type__
hostSpeciesType[l] = 0; //Make all dumbbells active initially
//`````````````````````````````
}
//``````````````````````````````````````````````````
//____Copy spring lengths to Gpu device
CUDA_CALL(cudaMemcpy(devSpringLenX, hostSpringLenX, hostNumberOfParticles*sizeof(double), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(devSpringLenY, hostSpringLenY, hostNumberOfParticles*sizeof(double), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(devSpeciesType, hostSpeciesType, hostNumberOfParticles*sizeof(int), cudaMemcpyHostToDevice));
//````````````````````````````````````````````````
CUDA_CALL(cudaMemcpy(devSimTime, hostSimTime, hostNumberOfParticles*sizeof(double), cudaMemcpyHostToDevice));
//PrintSpringLengths<<< grid, block >>>(devSpringLenX, devSpringLenY); //print lengths to verify created correctly
//___ initialize variables to calculate and store ensemble average
double *Spring_AvgLen_XX;
double *Spring_AvgLen_XY;
double *Spring_AvgLen_YY;
Spring_AvgLen_XX = (double*)malloc((hostTimeStepsMacro+1)*sizeof(double));
Spring_AvgLen_XY = (double*)malloc((hostTimeStepsMacro+1)*sizeof(double));
Spring_AvgLen_YY = (double*)malloc((hostTimeStepsMacro+1)*sizeof(double));
int k;
double EnsembleAverageXX_Active = 0.0;
double EnsembleAverageXY_Active = 0.0;
double EnsembleAverageYY_Active = 0.0;
double EnsembleAverageXX_Dangling = 0.0;
double EnsembleAverageXY_Dangling = 0.0;
double EnsembleAverageYY_Dangling = 0.0;
int j;
//````````````````````````````````````````````````````````
int NumberOfActive = 0;
int NumberOfDangling = 0;
//_____calculate ensemble average at time = 0
for (j=0; j<hostNumberOfParticles; j++){
if (hostSpeciesType[j]==0){ //if dumbbell is Active type
NumberOfActive++;
//___Hookean Springs____
EnsembleAverageXX_Active += - hostSpringLenX[j] * hostSpringLenX[j];
EnsembleAverageXY_Active += - hostSpringLenX[j] * hostSpringLenY[j];
EnsembleAverageYY_Active += - hostSpringLenY[j] * hostSpringLenY[j];
//```````````````````
} else if (hostSpeciesType[j]==1){ //if dumbbell is Dangling type
NumberOfDangling++;
//___Hookean Springs____
EnsembleAverageXX_Dangling += -hostSpringLenX[j] * hostSpringLenX[j];
EnsembleAverageXY_Dangling += -hostSpringLenX[j] * hostSpringLenY[j];
EnsembleAverageYY_Dangling += -hostSpringLenY[j] * hostSpringLenY[j];
//```````````````````
} else {
printf("Error1: Unable to Classify Species Type\n");
}
}
if (NumberOfActive == 0){
Spring_AvgLen_XX[0] = EnsembleAverageXX_Dangling / (double)NumberOfDangling;
Spring_AvgLen_XY[0] = EnsembleAverageXY_Dangling / (double)NumberOfDangling;
Spring_AvgLen_YY[0] = EnsembleAverageYY_Dangling / (double)NumberOfDangling;
} else if ( NumberOfDangling == 0){
Spring_AvgLen_XX[0] = EnsembleAverageXX_Active / (double)NumberOfActive;
Spring_AvgLen_XY[0] = EnsembleAverageXY_Active / (double)NumberOfActive;
Spring_AvgLen_YY[0] = EnsembleAverageYY_Active / (double)NumberOfActive;
} else {
Spring_AvgLen_XX[0] = EnsembleAverageXX_Active / (double)NumberOfActive + EnsembleAverageXX_Dangling / (double)NumberOfDangling;
Spring_AvgLen_XY[0] = EnsembleAverageXY_Active / (double)NumberOfActive + EnsembleAverageXY_Dangling / (double)NumberOfDangling;
Spring_AvgLen_YY[0] = EnsembleAverageYY_Active / (double)NumberOfActive + EnsembleAverageYY_Dangling / (double)NumberOfDangling;
}
//``````````````````````````````````
//____To Calculate Average Length of all Active Dumbbells___
double *hostAverageSpringLife, *devAverageSpringLife;
hostAverageSpringLife = (double *)malloc(sizeof(double));
CUDA_CALL(cudaMalloc((double**)&devAverageSpringLife,sizeof(double)));
//```````````````````````````````````````````````````````
double totaltime = hostStepSizeMicro * hostTimeStepsMicro * hostTimeStepsMacro;
//_____ Main simulation loop (Macro Time) ____
for (k=1; k<=hostTimeStepsMacro; k++){
//Calculate Average Length of all Active dumbbells
*hostAverageSpringLife = AvgSpringLife(hostSpringLenX, hostSpringLenY, hostSpeciesType);
CUDA_CALL(cudaMemcpy(devAverageSpringLife,hostAverageSpringLife,sizeof(double),cudaMemcpyHostToDevice));
//Call function to perform computations on GPU
Micro_Steps<<<grid,block>>>(devSpringLenX,devSpringLenY,devSpeciesType,states,ProbStates,devAverageSpringLife,devSimTime,totaltime);
//read result from gpu(device) back to cpu(host)
CUDA_CALL(cudaMemcpy(hostSpringLenX, devSpringLenX, hostNumberOfParticles*sizeof(double), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(hostSpringLenY, devSpringLenY, hostNumberOfParticles*sizeof(double), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(hostSpeciesType, devSpeciesType, hostNumberOfParticles*sizeof(int), cudaMemcpyDeviceToHost));
//read sim time back from gpu(device) back to cpu(host)
CUDA_CALL(cudaMemcpy(hostSimTime, devSimTime, sizeof(double), cudaMemcpyDeviceToHost));
NumberOfActive = 0;
NumberOfDangling = 0;
EnsembleAverageXX_Active = 0;
EnsembleAverageXY_Active = 0;
EnsembleAverageYY_Active = 0;
EnsembleAverageXX_Dangling = 0;
EnsembleAverageXY_Dangling = 0;
EnsembleAverageYY_Dangling = 0;
//____ Calculate Ensemble Averages ______
for (j=0; j<hostNumberOfParticles; j++){
if (hostSpeciesType[j]==0){ //if dumbbell is Active type
NumberOfActive++;
//___Hookean Springs____
EnsembleAverageXX_Active += -hostSpringLenX[j] * hostSpringLenX[j];
EnsembleAverageXY_Active += -hostSpringLenX[j] * hostSpringLenY[j];
EnsembleAverageYY_Active += -hostSpringLenY[j] * hostSpringLenY[j];
//```````````````````
} else if (hostSpeciesType[j]==1){ //if dumbbell is Dangling type
NumberOfDangling++;
//____Hookean Springs_____
EnsembleAverageXX_Dangling += -hostSpringLenX[j] * hostSpringLenX[j];
EnsembleAverageXY_Dangling += -hostSpringLenX[j] * hostSpringLenY[j];
EnsembleAverageYY_Dangling += -hostSpringLenY[j] * hostSpringLenY[j];
//````````````````````````````
} else {
printf("Error2: Unable to Classify Species Type of Dumbbell[%d] with Type: %d \n", j, hostSpeciesType[j]);
exit(4);
}
}
if (NumberOfActive == 0){
Spring_AvgLen_XX[k] = EnsembleAverageXX_Dangling / (double)NumberOfDangling;
Spring_AvgLen_XY[k] = EnsembleAverageXY_Dangling / (double)NumberOfDangling;
Spring_AvgLen_YY[k] = EnsembleAverageYY_Dangling / (double)NumberOfDangling;
} else if ( NumberOfDangling == 0){
Spring_AvgLen_XX[k] = EnsembleAverageXX_Active / (double)NumberOfActive;
Spring_AvgLen_XY[k] = EnsembleAverageXY_Active / (double)NumberOfActive;
Spring_AvgLen_YY[k] = EnsembleAverageYY_Active / (double)NumberOfActive;
} else {
Spring_AvgLen_XX[k] = EnsembleAverageXX_Active / (double)NumberOfActive + EnsembleAverageXX_Dangling / (double)NumberOfDangling;
Spring_AvgLen_XY[k] = EnsembleAverageXY_Active / (double)NumberOfActive + EnsembleAverageXY_Dangling / (double)NumberOfDangling;
Spring_AvgLen_YY[k] = EnsembleAverageYY_Active / (double)NumberOfActive + EnsembleAverageYY_Dangling / (double)NumberOfDangling;
}
}
//``````````````End Macro loop``````````````
// __ stop computational clock ____
end = clock();
time_spent = double(end-begin)/ CLOCKS_PER_SEC;
//````````````````````````````````
//___Write Values to .csv file
OutputToFile(Spring_AvgLen_XX, Spring_AvgLen_XY, Spring_AvgLen_YY, time_spent, k);
//````````````````````````````
OutputRatio(NumberOfActive,NumberOfDangling);
//___ clean up memory ____
free(hostSimTime);
CUDA_CALL(cudaFree(devSimTime));
free(hostAverageSpringLife);
CUDA_CALL(cudaFree(devAverageSpringLife));
free(hostSeeds);
CUDA_CALL(cudaFree(devSeeds));
CUDA_CALL(cudaFree(states));
free(hostProbSeeds);
CUDA_CALL(cudaFree(devProbSeeds));
CUDA_CALL(cudaFree(ProbStates));
free(hostSpringLenX);
free(hostSpringLenY);
CUDA_CALL(cudaFree(devSpringLenX));
CUDA_CALL(cudaFree(devSpringLenY));
free(Spring_AvgLen_XX);
free(Spring_AvgLen_XY);
free(Spring_AvgLen_YY);
//```````````````````````
cudaDeviceReset();
// __ stop computational clock ____
end2 = clock();
time_spent2 = double(end2-begin)/ CLOCKS_PER_SEC;
printf("Runtime: %f\n\n", time_spent2);
//````````````````````````````````
return EXIT_SUCCESS;
}
|
13,669 | template<typename T>
__device__ void matrixRows(const T* matrix, const int* indices, T* result, const int numRows, const int numColumns, const int indicesLength) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (row < indicesLength && col < numColumns) {
int resultIndex = row * numColumns + col;
int matrixIndex = indices[row] * numColumns + col;
result[resultIndex] = matrix[matrixIndex];
}
}
template<typename T>
__device__ void matrixColumns(const T* matrix, const int* indices, T* result, const int numRows, const int numColumns, const int indicesLength) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (row < numRows && col < indicesLength) {
int resultIndex = row * indicesLength + col;
int matrixIndex = row * numColumns + indices[col];
result[resultIndex] = matrix[matrixIndex];
}
}
template<typename T>
__device__ void vectorValuesMatrix(const T* vector, const int* indices, T* result, const int length, const int indicesNumRows, const int indicesNumColumns) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (row < indicesNumRows && col < indicesNumColumns) {
int ij = row * indicesNumColumns + col;
result[ij] = vector[indices[ij]];
}
} |
13,670 | #include<stdio.h>
#include<cuda_runtime.h>
/*
This is a simple vector addtion program.
C = A + B
*/
/*
CUDA Kernel Device Code starts with __global__ keyword
*/
__global__ void vadd( float *A, float *B, float *C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N){
C[i] = A[i] + B[i];
}
}
/*Host function*/
int main()
{
//call for error code to check CUDA calls
cudaError_t err = cudaSuccess;
//vector length
int N = 50000;
size_t size = N * sizeof(float);
printf("Vector addition of %d elements.\n", N);
//Allocate memory for host
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
float *h_C = (float *)malloc(size);
//Error check
//Initialized value for host vectors
for(int i=0; i< N; ++i){
h_A[i] = rand() / (float)RAND_MAX;
h_B[i] = rand() / (float)RAND_MAX;
h_C[i] = 0.0f; //good practice in numerical method
}
//allocate device vectors
float *d_A = NULL;
err = cudaMalloc( (void **)&d_A, size ); //err to check for error
float *d_B = NULL;
err = cudaMalloc( (void **)&d_B, size ); //err to check for error
float *d_C = NULL;
err = cudaMalloc( (void **)&d_C, size ); //err to check for error
//No error check; could be fatal but let skip it for a while
//Copy host vectors h_A, h_B to device vectors d_A, d_B
printf("Copy input data from host memory to the CUDA device.\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
//Launch the vector addition kernel
int threadsperblock = 256;
int blockspergrid = (N + threadsperblock-1) / threadsperblock;
printf("Launching CUDA kernel with %d blocks and %d threads.\n", blockspergrid,
threadsperblock);
vadd<<<blockspergrid, threadsperblock >>>(d_A, d_B, d_C, N); //the kernel call
err = cudaGetLastError();
//minimal checking; at least need to know kernel success.
if(err != cudaSuccess)
{
printf("Failed to launch vadd kernel, error code %s.\n",
cudaGetErrorString(err) );
exit(EXIT_FAILURE);
}
//Copy device result vector to host result vector in host memory
printf("Copying output data from CUDA device to host memory.\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
printf("Failed to copy data from device, error %s.\n", cudaGetErrorString(err) );
exit(EXIT_FAILURE);
}
//verify result
for(int i=0; i<N; ++i){
if( fabs(h_A[i] + h_B[i] - h_C[i] ) > 1.0e-5 ){
printf("Result verification failed at element %d \n", i);
exit(EXIT_FAILURE);
}
}
printf("Kernel success!\n");
//Free device global memory
err = cudaFree(d_A);
err = cudaFree(d_B);
err = cudaFree(d_C);
//Free host memory
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
13,671 |
#include <device_launch_parameters.h>
#include <cuda_runtime_api.h>
#include <cstdio>
// Device input vectors
int *d_a;
//Device output vector
int *d_b;
__device__ int mod(int a, int b) {
return a >= 0 ? a%b : ( b - abs ( a%b ) ) % b;
}
__global__ void update(int *A, int *B, int height, int width) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < height*width) {
int aliveNeighbours = 0;
const int h = index / width;
const int w = index % width;
/*The neighbours of the cell are checked*/
for (int i = -1; i < 2; ++i) {
for (int j = -1; j < 2; ++j) {
aliveNeighbours += ( (i | j) && A[mod(h+i,height)*width + mod(w+j,width)]);
}
}
B[index] = (A[index] == 0 && (aliveNeighbours == 3 || aliveNeighbours == 6))
|| A[index] == 1 && (aliveNeighbours == 2 || aliveNeighbours == 3);
}
}
void setUp(int height, int width) {
// Allocate memory for each vector on GPU
cudaMalloc((void **) &d_a, height*width*sizeof(int));
cudaMalloc((void **) &d_b, height*width*sizeof(int));
}
void destroy() {
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
}
void updateCuda(int *A, int *B, int height, int width) {
// Size, in bytes, of each vector
size_t bytes = height*width*sizeof(int);
// Copy host vectors to device
cudaMemcpy(d_a, A, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize, n;
// // Tamaño de la matriz.
// n = height*width;
//
// // Tamaño del bloque. Elegir entre 32 y 31.
// //blockSize = 32;
// blockSize = 32;
//
// // Number of thread blocks in grid
// gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
update<<< width*height, 1 >>>(d_a, d_b, height, width);
// Copy array back to host
cudaMemcpy( B, d_b, bytes, cudaMemcpyDeviceToHost );
} |
13,672 | /* jacobi.c - Poisson problem in 3d
*
*/
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
// en for gpu1 og en for gpu2
__device__
void jacobi_gpuDual1(int N, double ***u, double ***v, double ***f, double ***v2, int iter_max) {
//int counter = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
//v[i][j][k] = u[i][j][k];
//} while (counter <iter_max);
// if for halvdelen af gpu (kun halvdelen af z)
if(i > 0 && j > 0 && k > 0 && i<((N/2)-1) && j<(N-1) && k<(N-1)){
//v[i][j][k] = u[i][j][k];
u[i][j][k] = 1./6.*(v[i-1][j][k]+v[i+1][j][k]+v[i][j-1][k]+v[i][j+1][k]+v[i][j][k-1]+v[i][j][k+1] + 1./((N)*(N)) * f[i][j][k]);
}
if(i > 0 && j > 0 && k > 0 && i==(N/2-1) && j<(N-1) && k<(N-1)){
u[i][j][k] = 1./6.*(v[i-1][j][k]+v2[0][j][k]+v[i][j-1][k]+v[i][j+1][k]+v[i][j][k-1]+v[i][j][k+1]+ 1./((N)*(N))* f[i][j][k]);
}
}
__device__
void jacobi_gpuDual2(int N, double ***u, double ***v, double ***f, double ***v1, int iter_max) {
//int counter = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
//v[i][j][k] = u[i][j][k];
//} while (counter <iter_max);
// if for halvdelen af gpu (kun halvdelen af z)
// Den skal være større end k/2
if(i > 0 && j > 0 && k > 0 && i<((N/2)-1) && j<(N-1) && k<(N-1)){
//v[i][j][k] = u[i][j][k];
u[i][j][k] = 1./6.*(v[i-1][j][k]+v[i+1][j][k]+v[i][j-1][k]+v[i][j+1][k]+v[i][j][k-1]+v[i][j][k+1] + 1./((N)*(N)) * f[i][j][k]);
//printf("i=%i j=%i k=%i | u=%f v=%f f=%f\n", i, j, k, u[i][j][k], v[i][j][k], f[i][j][k]);
}
if(j > 0 && k>0 && i==0 &&j<N-1 && k<N-1){
u[i][j][k] = 1./6.*(v1[(N/2)-1][j][k]+v[i+1][j][k]+v[i][j-1][k]+v[i][j+1][k]+v[i][j][k-1]+v[i][j][k+1] + 1./((N)*(N)) * f[i][j][k]);
//printf("i=%i j=%i k=%i | u=%f v=%f f=%f\n", i, j, k, u[i][j][k], v[i][j][k], f[i][j][k]);
}
}
// Kernel to be launched on a single thread
__global__
void jacobi_dual1(int N, double ***u, double ***v, double ***f, double ***v2, int iter_max)
{
jacobi_gpuDual1(N, u, v, f, v2, iter_max);
}
__global__
void jacobi_dual2(int N, double ***u, double ***v, double ***f, double ***v1, int iter_max)
{
jacobi_gpuDual2(N, u, v, f, v1,iter_max);
}
|
13,673 | #include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <sys/time.h>
using namespace std;
#define CUDA_SAFE_CALL_NO_SYNC( call) do { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define CHECK( call) do { \
CUDA_SAFE_CALL_NO_SYNC(call); \
cudaError err = cudaThreadSynchronize(); \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define htod cudaMemcpyHostToDevice
#define dtoh cudaMemcpyDeviceToHost
void scanCPU(float *A, float *B, int size){
B[0] = 0;
for(int i = 1; i < size; i ++){
B[i] = A[i-1] + B[i-1];
}
}
void initializeData(float *ip, const int size)
{
int i;
for(i = 0; i < size; i++)
{
ip[i] = (float) (rand()%512);
}
return;
}
inline double cpuSecond()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
__global__ void scanGPU1(float *g_idata, float *g_odata,int n){
// extern __shared__ float temp[];
extern __shared__ float temp[];
int thid = threadIdx.x;
int pout = 0, pin = 1;
float *idata = blockDim.x*blockIdx.x+g_idata;
temp[thid] = (thid == 0) ? 0:idata[thid-1];
__syncthreads();
for (int offset = 1; offset < blockDim.x; offset = offset*2){
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (thid >= offset)
temp[pout*blockDim.x+thid] = temp[pin*blockDim.x+thid] + temp[pin*blockDim.x + thid - offset];
else
temp[pout*blockDim.x+thid] = temp[pin*blockDim.x+thid];
__syncthreads();
}
//printf("%f %d\n",idata[thid], thid);
g_odata[thid] = temp[pout*blockDim.x+thid];
}
__global__ void scanGPU2(float *g_idata, float *g_odata, int n){
extern __shared__ float temp[];// allocated on invocation
int thid = threadIdx.x;
int offset = 1;
//temp[2*thid] = g_idata[2*thid]; // load input into shared memory
//temp[2*thid+1] = g_idata[2*thid+1];
temp[thid] = g_idata[thid];
__syncthreads();
for (int d = n>>1; d > 0; d >>= 1){// build sum in place up the tre
if (thid < d){
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset <<= 1; //multiply by 2 implemented as bitwise operation
__syncthreads();
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
offset >>= 1;
//__syncthreads();
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
if (thid < d){
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
offset >>= 1;
__syncthreads();
}
g_odata[thid] = temp[thid];
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
float epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f at element %d \n", hostRef[i], gpuRef[i],i);
}
printf("host %f gpu %f at element %d \n", hostRef[i], gpuRef[i],i);
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
int main(int argc, char **argv){
srand(time(NULL));
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp,dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
//malloc on the device side
int size;
if(argc > 2) size = atoi(argv[1]);//1<<9; //1<<24;
else size = 1024; //MAtrix size
int nBytes = size*sizeof(float);
cout << "matrix size is " << size << endl;
//malloc on host side
float *h_A, *cpuRef, *gpuRef;
h_A = new float[size];
cpuRef = new float[size];
gpuRef = new float[size];
//initialize the data at host side
double iStart = cpuSecond();
initializeData(h_A,size);
double iElaps = cpuSecond() - iStart;
memset(cpuRef,0,nBytes);
memset(gpuRef,0,nBytes);
iStart = cpuSecond();
scanCPU(h_A,cpuRef,size);
iElaps = cpuSecond() - iStart;
cout << "cpu time is " << iElaps << endl;
//malloc on device side
float *d_A, *d_C;
CHECK(cudaMalloc((void **) &d_A,nBytes));
CHECK(cudaMalloc((void **) &d_C,nBytes));
CHECK(cudaMemcpy(d_A,h_A,nBytes,htod));
int input = 1024;
if(argc > 3) input = atoi(argv[2]); //block size
dim3 block(input,1,1);
dim3 grid((block.x+size-1)/block.x);
//kernel
cout << "kernal block size is " << block.x << endl;
cout << "kernal grid size is " << grid.x << endl;
cout << "scan alogrithm you choose: (0, 1)" << endl;
int choice = 2;
cout << "sample is" << h_A[0] <<' ' << h_A[1] << endl;
if(choice == 1){
iStart = cpuSecond();
scanGPU1<<<grid, block.x, block.x*2*sizeof(float)>>>(d_A,d_C,size);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
}
else if(choice == 2){
cout << "You choose option 2" << endl;
iStart = cpuSecond();
scanGPU2<<<grid, block.x, block.x*sizeof(float)>>>(d_A,d_C,size);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
}
else return 1;
cout << "GPU time is " << iElaps << endl;
CHECK(cudaMemcpy(gpuRef,d_C,nBytes,dtoh));
checkResult(cpuRef, gpuRef, size);
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_C));
delete [] cpuRef;
delete [] gpuRef;
cudaDeviceReset();
return 0;
}
|
13,674 | #include "includes.h"
//********************************************************//
// CUDA SIFT extractor by Marten Bjorkman aka Celebrandil //
//********************************************************//
///////////////////////////////////////////////////////////////////////////////
// Kernel configuration
///////////////////////////////////////////////////////////////////////////////
__constant__ float d_Threshold[2];
__constant__ float d_Scales[8], d_Factor;
__constant__ float d_EdgeLimit;
__constant__ int d_MaxNumPoints;
__device__ unsigned int d_PointCounter[1];
__constant__ float d_Kernel1[5];
__constant__ float d_Kernel2[12*16];
///////////////////////////////////////////////////////////////////////////////
// Lowpass filter an subsample image
///////////////////////////////////////////////////////////////////////////////
__global__ void ScaleDown_D(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch) {
// TODO: one element per thread in a block?
__shared__ float inrow[SCALEDOWN_W + 4];
__shared__ float brow[5 * (SCALEDOWN_W / 2)];
//
__shared__ int yRead[SCALEDOWN_H + 4];
__shared__ int yWrite[SCALEDOWN_H + 4];
// Get thread index, which ranges from 0 to SCALEDOWN_W + 4
const int tx = threadIdx.x;
// Get indices in brow
// TODO: move this out?
#define dx2 (SCALEDOWN_W / 2)
const int tx0 = tx + 0 * dx2;
const int tx1 = tx + 1 * dx2;
const int tx2 = tx + 2 * dx2;
const int tx3 = tx + 3 * dx2;
const int tx4 = tx + 4 * dx2;
// TODO: x and y pixel index
const int xStart = blockIdx.x * SCALEDOWN_W;
const int yStart = blockIdx.y * SCALEDOWN_H;
// TODO: x coordinate to write to?
const int xWrite = xStart / 2 + tx;
int xRead = xStart + tx - 2;
xRead = (xRead < 0 ? 0 : xRead);
xRead = (xRead >= width ? width - 1 : xRead);
const float *k = d_Kernel1;
// Identify y read and write indices; note we ignore SCALEDOWN_H + 4 <= tx <
// SCALEDOWN_H + 4 in this section
if (tx < SCALEDOWN_H + 4) {
// TODO: tx = 0 and tx = 1 are the same; why?
int y = yStart + tx - 1;
// Clamp at 0 and height - 1
y = (y < 0 ? 0 : y);
y = (y >= height ? height - 1 : y);
// Read start index
yRead[tx] = y * pitch;
// Write start index
yWrite[tx] = (yStart + tx - 4) / 2 * newpitch;
}
// Synchronize threads to ensure we have yRead and yWrite filled for current
// warp
__syncthreads();
// For each thread (which runs 0 to SCALEDOWN_W + 4 - 1), loop through 0 to
// SCALEDOWN_H + 4 - 1 by kernel size.
for (int dy = 0; dy < SCALEDOWN_H + 4; dy += 5) {
// yRead[dy + 0] is the y index to 0th row of data from source image (may
// be the same as 1st, 2nd, etc row, depending on how close we are to the
// edge of image). xRead is determined by thread id and starts from size
// of kernel / 2 + 1 to the left of our current pixel
inrow[tx] = d_Data[yRead[dy + 0] + xRead];
// Once we synchronize, inrow should contain the data from the source
// image corresponding to the first row in the current block. It is length
// SCALEDOWN_W + 4.
__syncthreads();
// For the SCALEDOWN_W / 2 threads in block, compute the first of 5
// indices for this thread. Convolve the 1-D kernel k with every other
// 'pixel' in the block via 2 * tx
if (tx < dx2) {
brow[tx0] = k[0] * (inrow[2 * tx] + inrow[2 * tx + 4]) +
k[1] * (inrow[2 * tx + 1] + inrow[2 * tx + 3]) +
k[2] * inrow[2 * tx + 2];
}
// TODO: Once we synchronize, brow[tx0] should contain
__syncthreads();
// Compute for SCALEDOWN_W / 2 threads in block. dy & 1 is true if dy is
// odd. We require that dy is even and after we've completed at least one
// iteration
if (tx < dx2 && dy >= 4 && !(dy & 1)) {
d_Result[yWrite[dy + 0] + xWrite] = k[2] * brow[tx2] +
k[0] * (brow[tx0] + brow[tx4]) +
k[1] * (brow[tx1] + brow[tx3]);
}
// And...this is all just the same as above. One big unrolled for loop.
if (dy < (SCALEDOWN_H + 3)) {
// yRead[dy + 1] is the y index to 1th row of data from source image
// (may be the same as 1st, 2nd, etc row, depending on how close we are
// to the edge of image). xRead is determined by thread id and starts
// from size of kernel / 2 + 1 to the left of our current pixel
inrow[tx] = d_Data[yRead[dy + 1] + xRead];
__syncthreads();
if (tx < dx2) {
brow[tx1] = k[0] * (inrow[2 * tx] + inrow[2 * tx + 4]) +
k[1] * (inrow[2 * tx + 1] + inrow[2 * tx + 3]) +
k[2] * inrow[2 * tx + 2];
}
__syncthreads();
if (tx<dx2 && dy>=3 && (dy&1)) {
d_Result[yWrite[dy+1] + xWrite] = k[2]*brow[tx3] + k[0]*(brow[tx1]+brow[tx0]) + k[1]*(brow[tx2]+brow[tx4]);
}
}
if (dy<(SCALEDOWN_H+2)) {
inrow[tx] = d_Data[yRead[dy+2] + xRead];
__syncthreads();
if (tx<dx2) {
brow[tx2] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
}
__syncthreads();
if (tx<dx2 && dy>=2 && !(dy&1)) {
d_Result[yWrite[dy+2] + xWrite] = k[2]*brow[tx4] + k[0]*(brow[tx2]+brow[tx1]) + k[1]*(brow[tx3]+brow[tx0]);
}
}
if (dy<(SCALEDOWN_H+1)) {
inrow[tx] = d_Data[yRead[dy+3] + xRead];
__syncthreads();
if (tx<dx2) {
brow[tx3] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
}
__syncthreads();
if (tx<dx2 && dy>=1 && (dy&1)) {
d_Result[yWrite[dy+3] + xWrite] = k[2]*brow[tx0] + k[0]*(brow[tx3]+brow[tx2]) + k[1]*(brow[tx4]+brow[tx1]);
}
}
if (dy<SCALEDOWN_H) {
inrow[tx] = d_Data[yRead[dy+4] + xRead];
__syncthreads();
if (tx<dx2) {
brow[tx4] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
}
__syncthreads();
if (tx<dx2 && !(dy&1)) {
d_Result[yWrite[dy+4] + xWrite] = k[2]*brow[tx1] + k[0]*(brow[tx4]+brow[tx3]) + k[1]*(brow[tx0]+brow[tx2]);
}
}
__syncthreads();
}
} |
13,675 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <curand_kernel.h>
struct particleposition
{
double x;
double y;
double z;
};
struct particlepositionnext
{
double x;
double y;
double z;
};
struct particlemove_sph
{
double r;
double theda;
double phi;
};
__device__ void position_random_generation (struct particleposition &GPU_pos)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
curandState localState;
curand_init(37,i, 0, &localState);
while (true)
{
GPU_pos.x=curand_uniform_double(&localState)-0.5;//sephere radius=0.5 cm
GPU_pos.y=curand_uniform_double(&localState)-0.5;
GPU_pos.z=curand_uniform_double(&localState)-0.5;
if (GPU_pos.x*GPU_pos.x+GPU_pos.y*GPU_pos.y+GPU_pos.z*GPU_pos.z<0.25)
{break;}
}
}
__device__ void particlemove_sphcor_generation (struct particlemove_sph &GPU_movesph)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
curandState localState;
curand_init(17,i, 0, &localState);
GPU_movesph.theda=curand_uniform_double(&localState);
GPU_movesph.theda=GPU_movesph.theda*2-1;
GPU_movesph.phi=curand_uniform_double(&localState);
GPU_movesph.phi=GPU_movesph.phi*2*3.1415926536;
GPU_movesph.r=curand_uniform_double(&localState);
GPU_movesph.r=1/3.443*-logf(GPU_movesph.r);//r=1/sigma_total*-log(random 0~1)
}
__device__ void particlenextpos (struct particleposition &GPU_pos,struct particleposition &posnew,struct particlemove_sph &GPU_movesph)
{
posnew.x=GPU_pos.x+GPU_movesph.r*GPU_movesph.theda*cosf(GPU_movesph.phi);
posnew.y=GPU_pos.y+GPU_movesph.r*GPU_movesph.theda*sinf(GPU_movesph.phi);
posnew.z=GPU_pos.z+GPU_movesph.r*sinf(acosf(GPU_movesph.theda));
}
__global__ void particlefunc(int *GPU_N,int *absorp_thread,int *scatter_thread,double *track_thread,struct particleposition *GPU_pos,struct particleposition *GPU_posnext,struct particlemove_sph *GPU_movesph,int *stp)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
double absorpportion=0;
int scatcounter=0;
if (i<=GPU_N[0])
{
curandState localState;
curand_init(37,i, 0, &localState);
/*generate start point*/
stp[i]=1;
//position_random_generation(GPU_pos[i]);
GPU_pos[i].x=0;
GPU_pos[i].y=0;
GPU_pos[i].z=0;
/*generate angle and length*/
while (true)
{
particlemove_sphcor_generation (GPU_movesph[i]);
/*calculate new position*/
particlenextpos (GPU_pos[i],GPU_posnext[i],GPU_movesph[i]);
/*check position*/
if (GPU_posnext[i].x*GPU_posnext[i].x+GPU_posnext[i].y*GPU_posnext[i].y+GPU_posnext[i].z*GPU_posnext[i].z<=0.25)/*interact in sphere*/
{
track_thread[i]=sqrtf((GPU_posnext[i].x-GPU_pos[i].x)*(GPU_posnext[i].x-GPU_pos[i].x)\
+(GPU_posnext[i].y-GPU_pos[i].y)*(GPU_posnext[i].y-GPU_pos[i].y)\
+(GPU_posnext[i].z-GPU_pos[i].z)*(GPU_posnext[i].z-GPU_pos[i].z));
//divide into scattering and absorption
absorpportion=curand_uniform_double(&localState);
if (absorpportion<0.0064)//absorp XS portion
{
absorp_thread[i]=1;
break;
}
else
{
scatter_thread[i]=1;
GPU_pos[i].x=GPU_posnext[i].x;
GPU_pos[i].y=GPU_posnext[i].y;
GPU_pos[i].z=GPU_posnext[i].z;
}
}/*if end*/
else/*die out of sphere*/
{
break;
}/*else end*/
if (scatcounter>=30) {break;}
scatcounter=scatcounter+1;
}
}/*if end*/
__syncthreads();
}
int main(int argc, char *argv[])
{
int N=0,absorption=0, scattering=0;
double track=0,trackflux;
int* GPU_N;
int* GPU_absorption;
int* GPU_scattering;
double* GPU_track;
struct particleposition *pos;
struct particleposition *posnext;
struct particlemove_sph *move_sph;
int* GPU_stp;
int intsize=sizeof(int);
int doublesize=sizeof(double);
int* absorpar;
int* scatteringar;
double* trackar;
cudaEvent_t start,stop;
float time;
/*user input*/
N=1000000;
printf("Simulate %d particles\n",N);
/*calculate block and thread number*/
int blockamount=0;
blockamount=N/1024+1;
printf("block %d \n",blockamount);
/*host array space declaration*/
absorpar=(int*) malloc(intsize*N);
scatteringar=(int*) malloc(intsize*N);
trackar=(double*) malloc(doublesize*N);
/*cuda computation*/
/*decleare menory on device*/
cudaMalloc((void**) &GPU_N, intsize);
cudaMalloc((void**) &GPU_absorption, intsize*N);
cudaMalloc((void**) &GPU_scattering, intsize*N);
cudaMalloc((void**) &GPU_track, doublesize*N);
cudaMalloc((void**) &pos, sizeof(particleposition)*N);
cudaMalloc((void**) &posnext, sizeof(particleposition)*N);
cudaMalloc((void**) &move_sph, sizeof(particlemove_sph)*N);;
cudaMalloc((void**) &GPU_stp, intsize*N);
/*copy data to device*/
cudaMemcpy(GPU_N,&N,intsize,cudaMemcpyHostToDevice);
cudaMemcpy(GPU_absorption,absorpar,intsize*N,cudaMemcpyHostToDevice);
cudaMemcpy(GPU_scattering,scatteringar,intsize*N,cudaMemcpyHostToDevice);
cudaMemcpy(GPU_track,trackar,doublesize*N,cudaMemcpyHostToDevice);
/*launch GPU computation*/
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
particlefunc<<<blockamount,1024>>> (GPU_N,GPU_absorption,GPU_scattering,GPU_track,pos,posnext,move_sph,GPU_stp);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("run time=%f ms\n",time);
/*copy results*/
cudaMemcpy(scatteringar,GPU_scattering,intsize*N,cudaMemcpyDeviceToHost);
cudaMemcpy(absorpar,GPU_absorption,intsize*N,cudaMemcpyDeviceToHost);
cudaMemcpy(trackar,GPU_track,doublesize*N,cudaMemcpyDeviceToHost);
/*free memories on GPU*/
cudaFree(GPU_N);
cudaFree(GPU_absorption);
cudaFree(GPU_scattering);
cudaFree(GPU_track);
cudaFree(move_sph);
cudaFree(posnext);
cudaFree(pos);
cudaFree(GPU_stp);
/*sum all of the particles*/
for (int i=0;i<N;++i)
{
scattering=scattering+scatteringar[i];
absorption=absorption+absorpar[i];
track=track+trackar[i];
}
printf ("escape probability=%lf\n",(double)(N-absorption)/N);
printf ("absorption=%d\n",absorption);
printf ("scattering=%d\n",scattering);
printf ("track=%lf\n",track);
trackflux=track/N;
printf ("track length estimated flux=%lf\n",trackflux);
system("PAUSE");
return 0;
}
|
13,676 | #include <iostream>
#include <stdio.h>
using namespace std;
__global__ void myfunc()
{
}
int main(void)
{
myfunc<<<1,1>>>();
cout<<"Hello World from host! \n";
return 0;
}
|
13,677 | #include "cuda_runtime.h"
#include <stdlib.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
__global__ void Plus(float A[], float B[], float C[], int n){
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
}
int main(void){
int n = 1024 * 1024;//default
cout<<"Set size vector(use default enter -1 default is 1024*1024):";
int sizeV;
cin>>sizeV;
if(sizeV>0){
n=sizeV;
cout<<"Using size "<<n<<" to calculate"<<endl;
}else{
cout<<"Now Using Default size 1024*1024 long..."<<endl;
}
struct timeval start, end;
gettimeofday( &start, NULL );
float*A, *Ad, *B, *Bd, *C, *Cd;
int size = n * sizeof(float);
A = (float*)malloc(size);
B = (float*)malloc(size);
C = (float*)malloc(size);
for(int i=0;i<n;i++){
A[i] = 20.0;
B[i] = 10.0;
}
//CPU calc
gettimeofday(&start,NULL);
for(int i=0;i<n;i++){
C[i] = A[i] + B[i];
}
gettimeofday( &end, NULL );
float max_error = 0.0;
for(int i=0;i<n;i++){
max_error += fabs(30.0-C[i]);
}
cout << "max_error of CPU is " << max_error << endl;
int timeuseCPU = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
cout << "total time for cpu is " << timeuseCPU<< "us" <<endl;
//delete results
for(int i=0;i<n;i++){
C[i]=0;
}
//GPU calc
cudaMalloc((void**)&Ad, size);
cudaMalloc((void**)&Bd, size);
cudaMalloc((void**)&Cd, size);
// copy to device
cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice);
cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice);
int Block=1024;
int Grid=(n-1)/Block+1;
// exec
gettimeofday(&start,NULL);
Plus<<<Grid, Block>>>(Ad, Bd, Cd, n);
gettimeofday(&end,NULL);
// return result to host
cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost);
// check errors
for(int i=0;i<n;i++)
{
max_error += fabs(30.0 - C[i]);
}
cout << "max error of GPU is " << max_error << endl;
// free memory for gpu und host
free(A);
free(B);
free(C);
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
int timeuseGPU = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
cout << "total time use in GPU is " << timeuseGPU<< "us" <<endl;
if(timeuseGPU<timeuseCPU){
cout<<"GPU is faster than CPU for "<<timeuseCPU-timeuseGPU<<" us"<<endl;
}
return 0;
} |
13,678 | #include <stdio.h>
#define N 10
__global__ void add(int *a, int * b, int *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
int main(void) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
int i;
printf("%d\n", cudaMalloc( (void **)&dev_a, sizeof(int)*N));
cudaMalloc( (void **)&dev_b, sizeof(int)*N);
cudaMalloc( (void **)&dev_c, sizeof(int)*N);
for (i = 0; i < N; i++) {
a[i] = i;
b[i] = 2*i;
}
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
add<<<N,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
for (i = 0; i < N; i++) {
printf("%d %d %d\n", a[i],b[i],c[i]);
}
cudaFree((void *)dev_a);
cudaFree((void *)dev_b);
cudaFree((void *)dev_c);
}
|
13,679 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
__device__ int _inv(double *m, double *invOut);
__device__ void mult(double *A, double *B, double *C);
__device__ void copy(double *A, double *B);
__device__ void _eye(double *data);
// TODO: device level link class
// TODO: block >= 2048 error
/*
* Params
* T: double(N, 4, 4) the final transform matrix of all points (shared)
* tool: double(N, 4, 4) the tool transform matrix of all points (shared)
* nlinks_pt: long(N,): the number of links associated with each (shared)
* link_A: double(N, max_nlinks, 4, 4) the transformation matrix of all joints
* link_axes: long(max_nlinks, ): axes of all links
* link_isjoint: long(max_nlinks, ): 1/0 whether links are joints
* N: (int) number of points
* njoints: (int) number of joints
* out: (N, 6, njoints)
*/
__global__ void _jacob0(double *T,
double *tool,
double *etool,
double *link_A,
long *nlinks_pt,
long *link_axes,
long *link_isjoint,
int N,
int max_nlinks,
int njoints,
double *out)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
double *T_i;
double *tool_i;
double *U;
double *temp;
double *etool_i;
double *invU;
double *link_iA;
U = (double*) malloc(sizeof(double) * 16);
invU = (double*) malloc(sizeof(double) * 16);
temp = (double*) malloc(sizeof(double) * 16);
int j = 0;
tool_i = &tool[tid * 16];
etool_i = &etool[tid * 16];
_eye(U);
T_i = &T[tid * 16];
if (tid >= N) {
free(U);
free(invU);
free(temp);
return;
}
long nlinks = nlinks_pt[tid];
double *link_A_tid = &link_A[tid * max_nlinks * 4 * 4];
// printf("Hello from tid %d nlinks %ld\n", tid, nlinks);
for (int i = 0; i < nlinks; i++) {
// printf("Hello from tid %d link_i %d link_axis %ld isjoint %ld \n", tid, i, link_axes[i], link_isjoint[i]);
if (link_isjoint[i] == 1) {
link_iA = &link_A_tid[i * 16];
mult(U, link_iA, temp);
copy(temp, U);
if (i == nlinks - 1) {
mult(U, etool_i, temp);
copy(temp, U);
mult(U, tool_i, temp);
copy(temp , U);
}
_inv(U, invU);
mult(invU, T_i, temp);
double *out_tid = &out[tid * 6 * njoints];
if (link_axes[i] == 0) {
out_tid[0 * njoints + j] = U[0 * 4 + 2] * temp[1 * 4 + 3] - U[0 * 4 + 1] * temp[2 * 4 + 3];
out_tid[1 * njoints + j] = U[1 * 4 + 2] * temp[1 * 4 + 3] - U[1 * 4 + 1] * temp[2 * 4 + 3];
out_tid[2 * njoints + j] = U[2 * 4 + 2] * temp[1 * 4 + 3] - U[2 * 4 + 1] * temp[2 * 4 + 3];
out_tid[3 * njoints + j] = U[0 * 4 + 2];
out_tid[4 * njoints + j] = U[1 * 4 + 2];
out_tid[5 * njoints + j] = U[2 * 4 + 2];
}
else if (link_axes[i] == 1)
{
out_tid[0 * njoints + j] = U[0 * 4 + 0] * temp[2 * 4 + 3] - U[0 * 4 + 2] * temp[0 * 4 + 3];
out_tid[1 * njoints + j] = U[1 * 4 + 0] * temp[2 * 4 + 3] - U[1 * 4 + 2] * temp[0 * 4 + 3];
out_tid[2 * njoints + j] = U[2 * 4 + 0] * temp[2 * 4 + 3] - U[2 * 4 + 2] * temp[0 * 4 + 3];
out_tid[3 * njoints + j] = U[0 * 4 + 1];
out_tid[4 * njoints + j] = U[1 * 4 + 1];
out_tid[5 * njoints + j] = U[2 * 4 + 1];
}
else if (link_axes[i] == 2)
{
out_tid[0 * njoints + j] = U[0 * 4 + 1] * temp[0 * 4 + 3] - U[0 * 4 + 0] * temp[1 * 4 + 3];
out_tid[1 * njoints + j] = U[1 * 4 + 1] * temp[0 * 4 + 3] - U[1 * 4 + 0] * temp[1 * 4 + 3];
out_tid[2 * njoints + j] = U[2 * 4 + 1] * temp[0 * 4 + 3] - U[2 * 4 + 0] * temp[1 * 4 + 3];
out_tid[3 * njoints + j] = U[0 * 4 + 2];
out_tid[4 * njoints + j] = U[1 * 4 + 2];
out_tid[5 * njoints + j] = U[2 * 4 + 2];
}
else if (link_axes[i] == 3)
{
out_tid[0 * njoints + j] = U[0 * 4 + 0];
out_tid[1 * njoints + j] = U[1 * 4 + 0];
out_tid[2 * njoints + j] = U[2 * 4 + 0];
out_tid[3 * njoints + j] = 0.0;
out_tid[4 * njoints + j] = 0.0;
out_tid[5 * njoints + j] = 0.0;
}
else if (link_axes[i] == 4)
{
out_tid[0 * njoints + j] = U[0 * 4 + 1];
out_tid[1 * njoints + j] = U[1 * 4 + 1];
out_tid[2 * njoints + j] = U[2 * 4 + 1];
out_tid[3 * njoints + j] = 0.0;
out_tid[4 * njoints + j] = 0.0;
out_tid[5 * njoints + j] = 0.0;
}
else if (link_axes[i] == 5)
{
out_tid[0 * njoints + j] = U[0 * 4 + 2];
out_tid[1 * njoints + j] = U[1 * 4 + 2];
out_tid[2 * njoints + j] = U[2 * 4 + 2];
out_tid[3 * njoints + j] = 0.0;
out_tid[4 * njoints + j] = 0.0;
out_tid[5 * njoints + j] = 0.0;
}
j++;
}
else
{
link_iA = &link_A_tid[i * 16];
mult(U, link_iA, temp);
copy(temp, U);
}
}
free(U);
free(invU);
free(temp);
}
__device__ void _eye(double *data)
{
data[0] = 1;
data[1] = 0;
data[2] = 0;
data[3] = 0;
data[4] = 0;
data[5] = 1;
data[6] = 0;
data[7] = 0;
data[8] = 0;
data[9] = 0;
data[10] = 1;
data[11] = 0;
data[12] = 0;
data[13] = 0;
data[14] = 0;
data[15] = 1;
}
__device__ void copy(double *A, double *B)
{
// copy A into B
B[0] = A[0];
B[1] = A[1];
B[2] = A[2];
B[3] = A[3];
B[4] = A[4];
B[5] = A[5];
B[6] = A[6];
B[7] = A[7];
B[8] = A[8];
B[9] = A[9];
B[10] = A[10];
B[11] = A[11];
B[12] = A[12];
B[13] = A[13];
B[14] = A[14];
B[15] = A[15];
}
__device__ void mult(double *A, double *B, double *C)
{
const int N = 4;
int i, j, k;
double num;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
num = 0;
for (k = 0; k < N; k++)
{
num += A[i * N + k] * B[k * N + j];
}
C[i * N + j] = num;
}
}
}
__device__ int _inv(double *m, double *invOut)
{
double *inv = (double*) malloc(sizeof(double) * 16);
double det;
int i;
inv[0] = m[5] * m[10] * m[15] -
m[5] * m[11] * m[14] -
m[9] * m[6] * m[15] +
m[9] * m[7] * m[14] +
m[13] * m[6] * m[11] -
m[13] * m[7] * m[10];
inv[4] = -m[4] * m[10] * m[15] +
m[4] * m[11] * m[14] +
m[8] * m[6] * m[15] -
m[8] * m[7] * m[14] -
m[12] * m[6] * m[11] +
m[12] * m[7] * m[10];
inv[8] = m[4] * m[9] * m[15] -
m[4] * m[11] * m[13] -
m[8] * m[5] * m[15] +
m[8] * m[7] * m[13] +
m[12] * m[5] * m[11] -
m[12] * m[7] * m[9];
inv[12] = -m[4] * m[9] * m[14] +
m[4] * m[10] * m[13] +
m[8] * m[5] * m[14] -
m[8] * m[6] * m[13] -
m[12] * m[5] * m[10] +
m[12] * m[6] * m[9];
inv[1] = -m[1] * m[10] * m[15] +
m[1] * m[11] * m[14] +
m[9] * m[2] * m[15] -
m[9] * m[3] * m[14] -
m[13] * m[2] * m[11] +
m[13] * m[3] * m[10];
inv[5] = m[0] * m[10] * m[15] -
m[0] * m[11] * m[14] -
m[8] * m[2] * m[15] +
m[8] * m[3] * m[14] +
m[12] * m[2] * m[11] -
m[12] * m[3] * m[10];
inv[9] = -m[0] * m[9] * m[15] +
m[0] * m[11] * m[13] +
m[8] * m[1] * m[15] -
m[8] * m[3] * m[13] -
m[12] * m[1] * m[11] +
m[12] * m[3] * m[9];
inv[13] = m[0] * m[9] * m[14] -
m[0] * m[10] * m[13] -
m[8] * m[1] * m[14] +
m[8] * m[2] * m[13] +
m[12] * m[1] * m[10] -
m[12] * m[2] * m[9];
inv[2] = m[1] * m[6] * m[15] -
m[1] * m[7] * m[14] -
m[5] * m[2] * m[15] +
m[5] * m[3] * m[14] +
m[13] * m[2] * m[7] -
m[13] * m[3] * m[6];
inv[6] = -m[0] * m[6] * m[15] +
m[0] * m[7] * m[14] +
m[4] * m[2] * m[15] -
m[4] * m[3] * m[14] -
m[12] * m[2] * m[7] +
m[12] * m[3] * m[6];
inv[10] = m[0] * m[5] * m[15] -
m[0] * m[7] * m[13] -
m[4] * m[1] * m[15] +
m[4] * m[3] * m[13] +
m[12] * m[1] * m[7] -
m[12] * m[3] * m[5];
inv[14] = -m[0] * m[5] * m[14] +
m[0] * m[6] * m[13] +
m[4] * m[1] * m[14] -
m[4] * m[2] * m[13] -
m[12] * m[1] * m[6] +
m[12] * m[2] * m[5];
inv[3] = -m[1] * m[6] * m[11] +
m[1] * m[7] * m[10] +
m[5] * m[2] * m[11] -
m[5] * m[3] * m[10] -
m[9] * m[2] * m[7] +
m[9] * m[3] * m[6];
inv[7] = m[0] * m[6] * m[11] -
m[0] * m[7] * m[10] -
m[4] * m[2] * m[11] +
m[4] * m[3] * m[10] +
m[8] * m[2] * m[7] -
m[8] * m[3] * m[6];
inv[11] = -m[0] * m[5] * m[11] +
m[0] * m[7] * m[9] +
m[4] * m[1] * m[11] -
m[4] * m[3] * m[9] -
m[8] * m[1] * m[7] +
m[8] * m[3] * m[5];
inv[15] = m[0] * m[5] * m[10] -
m[0] * m[6] * m[9] -
m[4] * m[1] * m[10] +
m[4] * m[2] * m[9] +
m[8] * m[1] * m[6] -
m[8] * m[2] * m[5];
det = m[0] * inv[0] + m[1] * inv[4] + m[2] * inv[8] + m[3] * inv[12];
if (det == 0) {
free(inv);
return 0;
}
det = 1.0 / det;
for (i = 0; i < 16; i++)
invOut[i] = inv[i] * det;
free(inv);
return 1;
}
extern "C"{
/*
* Params
* T: double(N, 4, 4) the final transform matrix of all points (shared)
* tool: double(N, 4, 4) the tool transform matrix of all points (shared)
* nlinks_pt: long(N,): the number of links associated with each (shared)
* link_A: double(N, max_nlinks, 4, 4) the transformation matrix of all joints
* link_axes: long(max_nlinks, ): axes of all links
* link_isjoint: long(max_nlinks, ): 1/0 whether links are joints
* N: (int) number of points
* max_nlinks: (int) max number of links on the path
* njoints: (int) number of joints
* out: (N, 6, njoints)
*/
void jacob0(double *T,
double *tool,
double *etool,
double *link_A,
long *nlinks_pt,
long *link_axes,
long *link_isjoint,
int N,
int max_nlinks,
int njoints,
double *out)
{
int block_size = 768;
int grid_size = ((N + block_size) / block_size);
// printf("Block size %d N %d gid size %d\n", block_size, N, grid_size);
double *d_T, *d_tool, *d_etool, *d_link_A;
long *d_link_axes, *d_link_isjoint, *d_nlinks_pt;
double *d_out;
cudaMalloc((void**)&d_T, sizeof(double) * N * 16);
cudaMalloc((void**)&d_tool, sizeof(double) * N * 16);
cudaMalloc((void**)&d_etool, sizeof(double) * N * 16);
cudaMalloc((void**)&d_link_A, sizeof(double) * N * max_nlinks * 16);
cudaMalloc((void**)&d_nlinks_pt, sizeof(long) * N);
cudaMalloc((void**)&d_link_axes, sizeof(long) * max_nlinks);
cudaMalloc((void**)&d_link_isjoint, sizeof(long) * max_nlinks);
cudaMalloc((void**)&d_out, sizeof(double) * N * 6 * njoints);
// Transfer data from host to device memory
cudaMemcpy(d_T, T, sizeof(double) * N * 16, cudaMemcpyHostToDevice);
cudaMemcpy(d_tool, tool, sizeof(double) * N * 16, cudaMemcpyHostToDevice);
cudaMemcpy(d_etool, etool, sizeof(double) * N * 16, cudaMemcpyHostToDevice);
cudaMemcpy(d_link_A, link_A, sizeof(double) * N * max_nlinks * 16, cudaMemcpyHostToDevice);
cudaMemcpy(d_nlinks_pt, nlinks_pt, sizeof(long) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_link_axes, link_axes, sizeof(long) * max_nlinks, cudaMemcpyHostToDevice);
cudaMemcpy(d_link_isjoint, link_isjoint, sizeof(long) * max_nlinks, cudaMemcpyHostToDevice);
cudaMemcpy(d_out, out, sizeof(double) * N * 6 * njoints, cudaMemcpyHostToDevice);
_jacob0<<<grid_size,block_size>>>(d_T,
d_tool,
d_etool,
d_link_A,
d_nlinks_pt,
d_link_axes,
d_link_isjoint,
N,
max_nlinks,
njoints,
d_out);
cudaError_t cudaerr = cudaDeviceSynchronize();
// if (cudaerr != cudaSuccess)
// printf("kernel launch failed with error \"%s\".\n",
// cudaGetErrorString(cudaerr));
// memset(out, 1, N * 6 * njoints);
// out[0] = 1;
cudaMemcpy(out, d_out, sizeof(double) * N * 6 * njoints, cudaMemcpyDeviceToHost);
// Deallocate device memory
cudaFree(d_T);
cudaFree(d_tool);
cudaFree(d_nlinks_pt);
cudaFree(d_etool);
cudaFree(d_link_A);
cudaFree(d_link_axes);
cudaFree(d_link_isjoint);
cudaFree(d_out);
}
}//extern "C" |
13,680 | #include <stdio.h>
static __device__ int d_state = 0;
static __device__ void lock(void) { while(atomicCAS(&d_state, 0, 1) != 0); } // this only works on the latest GPUs (Volta and newer)
static __device__ void unlock(void) { d_state = 0; }
static __device__ int get_sum_node_to_community(const int node_communities[], const int csr_nnz, const int csr_num_rows, const int csr_data[], const int csr_indices[], const int csr_indptr[], const int gid, const int target_community) {
/**
Device function called by a node that gives sum of links to provided target community.
Invariant: gid of thread is <= csr num rows
**/
int sum = 0;
// loop thru csr indices given at csr_indptr[gid] to csr_indptr[gid+1]
// if neighbor is in target community, add to sum
// be sure to also exit if csr_indptr[gid] reaches the end given by nnz
const int start = csr_indptr[gid];
const int stop = csr_indptr[gid+1];
for (int i = start; i < csr_nnz && i < stop; i++) {
int neighbor = csr_indices[i];
if(neighbor != gid && node_communities[neighbor] == target_community) {
sum += csr_data[i];
}
}
return sum;
}
__global__ void kernel_compute_max_delta_b(const int nodes_count, const int node_communities[], const int node_degrees[], const int communities_sum_incidents[], const int communities_sum_inside[], const int csr_nnz, const int csr_num_rows, const int csr_data[], const int csr_indices[], const int csr_indptr[], const int sum_all_weights, volatile int* const best_global_node, volatile int* const best_global_community, volatile double* max_global_delta) {
int gid = threadIdx.x + blockIdx.x * blockDim.x; //gid is effectively node id
__shared__ unsigned long long int val;
int best_local_node = gid; // gid == this node
int best_local_community = 0;
double max_local_delta = 0;
if (threadIdx.x == 0) val = 1025; //Init to val that threadIdX can't be
__syncthreads();
// Find local max delta
if (gid < nodes_count) {
int com_node = node_communities[gid];
best_local_community = com_node;//node_communities[gid]; //best community ground truth
double degc_totw = node_degrees[gid]/((double)sum_all_weights * 2.0);
double sum_node_to_own_c = get_sum_node_to_community(node_communities, csr_nnz, csr_num_rows, csr_data, csr_indices, csr_indptr, gid, com_node);
double remove_cost = -sum_node_to_own_c + (communities_sum_incidents[com_node] - node_degrees[gid]) * degc_totw;
int ex_com_node = com_node;
com_node = -1;
//From here on have to use modified sum_inside and sum_incident for target community
//Iterate through my neighbors
int start = csr_indptr[gid];
int stop = csr_indptr[gid+1];
for (int i = start; i < csr_nnz && i < stop; i++) {
int neighbor = csr_indices[i];
int target_community = node_communities[neighbor];
double com_deg_mod = 0.0;
if (target_community == ex_com_node) {
com_deg_mod = (double)node_degrees[gid];
}
double dnc = get_sum_node_to_community(node_communities, csr_nnz, csr_num_rows, csr_data, csr_indices, csr_indptr, gid, target_community);
double incr = remove_cost + dnc - ((communities_sum_incidents[target_community] - com_deg_mod)*degc_totw);
if (incr > max_local_delta) {
max_local_delta = incr;
best_local_community = target_community;
}
}
// Find max among thread in block, then check for global max, if so, update best node and community
// Use shifted value to find max among threads in block.
// create and initialize a shared local val per block to share local max
unsigned long long int loc = max_local_delta * 0x08000000; // mult to preserve some precision. Recommended factor less than 2^27
loc = (loc << 32) + threadIdx.x; // shift out of threadIdx.x space
atomicMax(&val, loc); // block max
} // End if
__syncthreads();
// Only one thread per block will run the following code
if (threadIdx.x == (val & 0xffffffff)) {
if (max_local_delta > *max_global_delta) {
lock();
if (max_local_delta > *max_global_delta) {
*max_global_delta = max_local_delta;
*best_global_node = best_local_node;
*best_global_community = best_local_community;
}
unlock();
}
}
}
|
13,681 | #include <fstream>
#include <iostream>
#include <math.h>
#include <cmath>
#include <curand_kernel.h>
#include <cuda.h>
#include <string>
#include <time.h>
__device__ void rot( float *w, float *vec, const float dt)
{
float mw = sqrt(w[0]*w[0] + w[1]*w[1] + w[2]*w[2]);
float omega[3];
float invmw = 1.0f/mw;
omega[0] = w[0]*invmw;
omega[1] = w[1]*invmw;
omega[2] = w[2]*invmw;
float dot = omega[0]*vec[0] + omega[1]*vec[1] + omega[2]*vec[2];
float i1[3];
i1[0] = omega[0]*dot;
i1[1] = omega[1]*dot;
i1[2] = omega[2]*dot;
float i2[3];
i2[0] = vec[0] - i1[0];
i2[1] = vec[1] - i1[1];
i2[2] = vec[2] - i1[2];
float i3[3];
i3[0] = omega[1]*vec[2] - omega[2]*vec[1];
i3[1] = omega[2]*vec[0] - omega[0]*vec[2];
i3[2] = omega[0]*vec[1] - omega[1]*vec[0];
float cwt =cos(mw*dt);
float swt =sin(mw*dt);
vec[0] = i1[0] + i2[0]*cwt + i3[0]*swt;
vec[1] = i1[1] + i2[1]*cwt + i3[1]*swt;
vec[2] = i1[2] + i2[2]*cwt + i3[2]*swt;
}
//-----------------------------------------------------------------------------
__global__ void precessnucspins(float *i, float *a, float *s, const int ni, const float dt)
{
extern __shared__ float iloc[];
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
int ggid1 = (blockIdx.y * blockDim.y) + threadIdx.y;
int glid = threadIdx.x;
int glid1 = threadIdx.y;
int groupid = blockIdx.x;
int nl = blockDim.x;
float w[3];
float store = 0;
int sind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3 && sind < 3*ni; ++ii, sind += nl)
{
iloc[glid + ii*nl + 3*nl*glid1] = i[sind + 3*ni*ggid1];
}
__syncthreads();
if (ggid < ni)
{
store = a[ggid];
w[0] = store*s[3*ggid1];
w[1] = store*s[1 + 3*ggid1];
w[2] = store*s[2 + 3*ggid1];
rot (w, iloc+(3*glid+3*nl*glid1), dt);
}
__syncthreads();
int wind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3 && wind < 3*ni; ++ii, wind += nl)
{
i[wind + 3*ni*ggid1] = iloc[glid + 3*nl*glid1 + ii*nl];
}
}
//-----------------------------------------------------------------------------
__global__ void setup_rand(curandState *state, unsigned long seed, const int mcs)
{
unsigned ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
/* Each thread gets same seed, a different sequence
number, no offset */
curand_init(seed, ggid, 4*mcs*ggid, &state[ggid]);
}
//-----------------------------------------------------------------------------
__global__ void vecbuilds(float *s, float *sinit, curandState *state)
{
extern __shared__ float sloc[];
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
int nl = blockDim.x;
int glid = threadIdx.x;
int groupid = blockIdx.x;
float v = curand_uniform(&state[ggid]);
float g = curand_uniform(&state[ggid]);
float m = sqrt(3.0f/4.0f);
float phi = 2.0*M_PI*v;
float th = acos(2.0*g - 1.0);
sloc[3*glid] = m*sin(th)*cos(phi);
sloc[3*glid + 1] = m*sin(th)*sin(phi);
sloc[3*glid + 2] = m*cos(th);
__syncthreads();
int wind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3; ++ii, wind += nl)
{
s[wind] = sloc[glid + ii*nl];
sinit[wind] = sloc[glid + ii*nl];
}
}
//-----------------------------------------------------------------------------
__global__ void vecbuildi(float *i, curandState *state, const int ni, const int nindium)
{
extern __shared__ float iloc[];
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
int ggid1 = (blockIdx.y * blockDim.y) + threadIdx.y;
int nl = blockDim.x;
int glid = threadIdx.x;
int glid1 = threadIdx.y;
int groupid = blockIdx.x;
int ng = nl*gridDim.x;
float m = 0;
if (ggid < nindium){
m = sqrt(99.0f/4.0f);
} else {
m = sqrt(15.0f/4.0f);
}
float v = curand_uniform(&state[ggid + ggid1*ng]);
float phi = 2.0f*M_PI*v;
float g = curand_uniform(&state[ggid + ggid1*ng]);
float th = acos(2.0f*g - 1.0f);
iloc[3*glid + 3*nl*glid1] = m*sin(th)*cos(phi);
iloc[3*glid + 3*nl*glid1 + 1] = m*sin(th)*sin(phi);
iloc[3*glid + 3*nl*glid1 + 2] = m*cos(th);
__syncthreads();
int wind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3 && wind < 3*ni; ++ii, wind += nl)
{
i[wind + 3*ni*ggid1] = iloc[glid + 3*nl*glid1 + ii*nl];
}
}
//-----------------------------------------------------------------------------
__global__ void reduce(float *i, float *w, const int n, const int a, float *hyp,
const int ni, float *wout, const int count, const int size)
{
extern __shared__ float locmem[];
float* store = locmem;
float* wtemp = store + blockDim.x;
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
int ggid1 = (blockIdx.y * blockDim.y) + threadIdx.y;
int glid = threadIdx.x;
int glid1 = threadIdx.y;
int nl = blockDim.x;
int ng = nl*gridDim.x;
int groupid = blockIdx.x;
if (count == 0)
{
if (ggid < ni)
{
store[glid] = hyp[ggid];
}
int sind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3 && sind < 3*ni; ++ii, sind += nl)
{
w[sind + 3*ng*ggid1] = i[sind + 3*ni*ggid1];
}
__syncthreads();
wtemp[glid + 3*nl*glid1] = store[(glid - glid%3)/3]*w[3*nl*groupid + glid + 3*ng*ggid1];
wtemp[glid + nl + 3*nl*glid1] = store[(glid + nl - (glid + nl)%3)/3]*w[3*nl*groupid + glid + nl + 3*ng*ggid1];
wtemp[glid + 2*nl + 3*nl*glid1] = store[(glid + 2*nl - (glid + 2*nl)%3)/3]*w[3*nl*groupid + glid + 2*nl + 3*ng*ggid1];
} else {
wtemp[glid + 3*nl*glid1] = w[3*nl*groupid + glid + 3*ng*ggid1];
wtemp[glid + nl + 3*nl*glid1] = w[3*nl*groupid + glid + nl + 3*ng*ggid1];
wtemp[glid + 2*nl + 3*nl*glid1] = w[3*nl*groupid + glid + 2*nl + 3*ng*ggid1];
}
#pragma unroll
for (int k=1; k < n; k++)
{
__syncthreads();
int b = nl >> k;
if (glid < b)
{
wtemp[3*glid + 3*nl*glid1] += wtemp[3*(glid + b)+ 3*nl*glid1];
wtemp[3*glid + 1 + 3*nl*glid1] += wtemp[3*(glid + b) + 1+ 3*nl*glid1];
wtemp[3*glid + 2 + 3*nl*glid1] += wtemp[3*(glid + b) + 2+ 3*nl*glid1];
}
}
__syncthreads();
if (glid == 0)
{
wout[(ggid >> n)*3 + 3*size*ggid1] = wtemp[3*nl*glid1] + wtemp[3 + 3*nl*glid1];
wout[(ggid >> n)*3 + 1 + 3*size*ggid1] = wtemp[1 + 3*nl*glid1] + wtemp[4 + 3*nl*glid1];
wout[(ggid >> n)*3 + 2 + 3*size*ggid1] = wtemp[2 + 3*nl*glid1] + wtemp[5 + 3*nl*glid1];
}
int c = 0;
if (a%nl == 0)
{
c = a/nl;
}
else
{
c = a/nl + 1;
}
__syncthreads();
int wind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3 && wind < 3*size; ++ii, wind += nl)
{
if (wind >= 3*c)
{
wout[wind + 3*size*ggid1] = 0;
}
}
}
//-----------------------------------------------------------------------------
__global__ void precesselecspins(float *w, float *wi, float *s, const int size, const int x,
float *sstore, const int a, const float dt)
{
extern __shared__ float locmem[];
float* sloc = locmem;
float* wloc = sloc + 3*blockDim.x;
float wtemp[3];
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
int glid = threadIdx.x;
int nl = blockDim.x;
int groupid = blockIdx.x;
int sind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3; ++ii, sind += nl)
{
sloc[glid + ii*nl] = s[sind];
}
__syncthreads();
wloc[3*glid] = w[3*a*ggid];
wloc[3*glid + 1] = w[3*a*ggid + 1];
wloc[3*glid + 2] = w[3*a*ggid + 2];
sstore[3*x + 3*size*ggid] = sloc[3*glid];
sstore[3*x + 1 + 3*size*ggid] = sloc[3*glid + 1];
sstore[3*x + 2 + 3*size*ggid] = sloc[3*glid + 2];
wtemp[0] = wloc[3*glid] + wi[0];
wtemp[1] = wloc[1 + 3*glid] + wi[1];
wtemp[2] = wloc[2 + 3*glid] + wi[2];
rot (wtemp, sloc+(3*glid), dt);
__syncthreads();
int wind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3; ++ii, wind += nl)
{
s[wind] = sloc[glid + ii*nl];
}
}
//-----------------------------------------------------------------------------
__global__ void prep2(float *sstore, float *output, const int size, float *sinit)
{
extern __shared__ float locmem[];
float* sstoreloc = locmem;
float* outputloc = sstoreloc + 3*blockDim.x*blockDim.y;
int ggid1 = (blockIdx.y * blockDim.y) + threadIdx.y;
int glid = threadIdx.x;
int glid1 = threadIdx.y;
int nl = blockDim.x;
int ng = nl*gridDim.x;
int groupid = blockIdx.x;
float store[2];
store[0] = sinit[3*ggid1];
store[1] = sinit[3*ggid1 + 2];
int sind = 3*nl*groupid + glid;
for (int ii = 0; ii < 3 && sind < 3*size; ++ii, sind += nl)
{
sstoreloc[glid + ii*nl + 3*nl*glid1] = sstore[sind + 3*size*ggid1];
}
__syncthreads();
outputloc[glid*3 + 3*nl*glid1] = store[0]*sstoreloc[glid*3 + 3*nl*glid1];
outputloc[glid*3 + 3*nl*glid1 + 1] = store[0]*sstoreloc[glid*3 + 3*nl*glid1 + 1];
outputloc[glid*3 + 3*nl*glid1 + 2] = store[1]*sstoreloc[glid*3 + 3*nl*glid1 + 2];
__syncthreads();
int wind = 3*nl*groupid + glid;
for(int ii = 0; ii < 3; ++ii, wind += nl)
{
output[wind + 3*ng*ggid1] = outputloc[glid + 3*nl*glid1 + ii*nl];
}
}
//-----------------------------------------------------------------------------
__global__ void reduce2(const int n, const int a, float *output, float *out)
{
extern __shared__ float sstoretemp[];
int ggid1 = (blockIdx.y * blockDim.y) + threadIdx.y;
int glid = threadIdx.x;
int glid1 = threadIdx.y;
int nl1 = blockDim.y;
int nl = blockDim.x;
int ng = nl*gridDim.x;
int groupid = blockIdx.x;
int sind = 3*nl*groupid + glid;
sstoretemp[glid + 3*nl*glid1] = output[sind + 3*ng*ggid1];
sstoretemp[glid + 3*nl*glid1 + nl] = output[sind + 3*ng*ggid1 + nl];
sstoretemp[glid + 3*nl*glid1 + 2*nl] = output[sind + 3*ng*ggid1 + 2*nl];
#pragma unroll
for (int k=1; k < n; k++)
{
__syncthreads();
int b = nl1 >> k;
if (glid1 < b)
{
sstoretemp[3*glid + 3*nl*glid1] += sstoretemp[3*glid+ 3*nl*(glid1+b)];
sstoretemp[3*glid + 1 + 3*nl*glid1] += sstoretemp[3*glid + 1 + 3*nl*(glid1+b)];
sstoretemp[3*glid + 2 + 3*nl*glid1] += sstoretemp[3*glid + 2 + 3*nl*(glid1+b)];
}
}
__syncthreads();
if (glid1 == 0)
{
out[sind + 3*ng*(ggid1 >> n)] = sstoretemp[glid] + sstoretemp[glid + 3*nl];
out[sind + nl + 3*ng*(ggid1 >> n)] = sstoretemp[glid + nl] + sstoretemp[glid + nl + 3*nl];
out[sind + 2*nl + 3*ng*(ggid1 >> n)] = sstoretemp[glid + 2*nl] + sstoretemp[glid + 2*nl + 3*nl];
}
int c = 0;
if (a%nl1 == 0)
{
c = a/nl1;
}
else
{
c = a/nl1 + 1;
}
if (ggid1 > c)
{
for (int ii = 0; ii < 3 && sind < 3*ng; ++ii, sind += nl)
{
out[sind + 3*ng*ggid1] = 0;
}
}
}
//-----------------------------------------------------------------------------
__global__ void tensors(float *output, float *Rxx, float *Rxy, float *Rzz, const int size, const int j)
{
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (ggid < size)
{
Rxx[ggid + j*size] = Rxx[ggid + j*size] + output[3*ggid];
Rxy[ggid + j*size] = Rxy[ggid + j*size] + output[3*ggid + 1];
Rzz[ggid + j*size] = Rzz[ggid + j*size] + output[3*ggid + 2];
}
}
//-----------------------------------------------------------------------------
__global__ void final(float *Rxx, float *Rxy, float *Rzz, const int mcs, const int xmax)
{
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
float recipmcs = 1.0f/mcs;
if (ggid < xmax)
{
Rxy[ggid] = 2.0f*Rxy[ggid]*recipmcs;
Rzz[ggid] = 2.0f*Rzz[ggid]*recipmcs;
Rxx[ggid] = 2.0f*Rxx[ggid]*recipmcs;
}
}
//-----------------------------------------------------------------------------
__global__ void final_temp(float *Rxx, float *Rxy, float *Rzz, const int mcs, const int xmax, float *Rxxtemp, float *Rxytemp, float *Rzztemp)
{
int ggid = (blockIdx.x * blockDim.x) + threadIdx.x;
float recipmcs = 1.0f/mcs;
if (ggid < xmax)
{
Rxytemp[ggid] = 2.0f*Rxy[ggid]*recipmcs;
Rzztemp[ggid] = 2.0f*Rzz[ggid]*recipmcs;
Rxxtemp[ggid] = 2.0f*Rxx[ggid]*recipmcs;
}
}
//-----------------------------------------------------------------------------
int main(void)
{
int nDevices;
clock_t t;
t = clock();
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
int ni = 1160;
int nindium = 553;
int local_size1 = 32;
int local_size2 = 32;
int global_blocks1 = (ni + local_size1 - 1)/local_size1;
int global_blocks2 = 32;
int global_size1 = global_blocks1*local_size1;
int global_size2 = global_blocks2*local_size2;
// Set up timestep
float dt = 1.0;
// Set up maxtime
float tmax = 3000.0;
// xmax - total number of timesteps
int xmax = tmax/dt;
// xmax must be a multiple of iterations
int iterations = 1;
int size = xmax/iterations;
// Global size for final kernels
int global_blocks_tensors = (size + local_size1 - 1)/local_size1;
int global_sizetensors = global_blocks_tensors * local_size1;
dim3 gridSizetensors = dim3 (global_blocks_tensors, global_blocks2);
// Global size for odd reductions
int global_blocks_odd = (global_blocks1 + local_size1 - 1)/local_size1;
int global_size_odd = global_blocks_odd*local_size1;
dim3 gridSizeodd = dim3 (global_blocks_odd, global_blocks2);
// Global size for final step
int global_blocks_final = (xmax + local_size1 - 1)/local_size1;
// Set up monte carlo iterations
int mcs = 1;
// Set up 2D workgroups
dim3 blockSize(local_size1, local_size2);
dim3 gridSize = dim3 (global_blocks1, global_blocks2);
// Set up electron spin and initial electron spin arrays
float *s, *sinit;
cudaMallocManaged(&s, 3*global_size2*sizeof(float));
cudaMallocManaged(&sinit, 3*global_size2*sizeof(float));
// Set up external field
float *wi;
cudaMallocManaged(&wi, 3*sizeof(float));
wi[0] = 0.0;
wi[1] = 0.0;
wi[2] = 0.0;
// Set up nuclear spin vector arrays
float *i;
cudaMallocManaged(&i, 3*global_size2*ni*sizeof(float));
// Set up state for random number generation
curandState *state;
cudaMallocManaged((void**)&state, global_size1*global_size2*sizeof(curandState));
// Set up the hyperfine constants
float *hyperfine;
cudaMallocManaged(&hyperfine, ni*sizeof(float));
std::ifstream hyp;
hyp.open("hyp.txt");
int p = 0;
for(std::string line; std::getline(hyp, line); )
{
hyperfine[p]=std::atof(line.c_str());
p += 1;
}
/*
hyperfine[0] =-0.999985;
hyperfine[1] =-0.7369246;
hyperfine[2] =0.511210;
hyperfine[3] =-0.0826998;
hyperfine[4] =0.0655341;
hyperfine[5] =-0.562082;
hyperfine[6] =-0.905911;
hyperfine[7] =0.357729;
hyperfine[8] =0.358593;
hyperfine[9] =0.869386;
hyperfine[10] =-0.232996;
hyperfine[11] =0.0388327;
hyperfine[12] =0.661931;
hyperfine[13] =-0.930856;
hyperfine[14] =-0.893077;
hyperfine[15] =0.0594001;
*/
// Set up omega vector
float *w;
cudaMallocManaged(&w, 3*global_size1*global_size2*sizeof(float));
// Set up output of omega vector
float *wout;
cudaMallocManaged(&wout, 3*global_size_odd*global_size2*sizeof(float));
// Set up tensor vectors
float *Rxx, *Rxy, *Rzz;
cudaMallocManaged(&Rxx, xmax*sizeof(float));
cudaMallocManaged(&Rxy, xmax*sizeof(float));
cudaMallocManaged(&Rzz, xmax*sizeof(float));
float *Rxxtemp, *Rxytemp, *Rzztemp;
cudaMallocManaged(&Rxxtemp, xmax*sizeof(float));
cudaMallocManaged(&Rxytemp, xmax*sizeof(float));
cudaMallocManaged(&Rzztemp, xmax*sizeof(float));
// Set up electron spin storage vector
float *sstore;
cudaMallocManaged(&sstore, 3*size*global_size2*sizeof(float));
// Set up output
float *output;
cudaMallocManaged(&output, 3*global_sizetensors*global_size2*sizeof(float));
float *out;
cudaMallocManaged(&out, 3*global_sizetensors*global_size2*sizeof(float));
// Work out logs
int n1 = log2f(local_size1);
int n2 = log2f(local_size2);
// Set up seed for random number generation
unsigned long seed = 1;
int pmax = 0;
float time = 0;
//-----------------------------------------------------------------------------
// Kernel Calls
// Call random number generation setup kernel
setup_rand<<<global_blocks1*global_blocks2, local_size1*local_size2>>>(state,seed,mcs);
for (int u = 0; u < mcs; ++u)
{
// Build electron spin vectors array
vecbuilds<<<global_blocks2, local_size2, 3*local_size2*sizeof(float)>>>(s, sinit, state);
// Build nuclear spin vector array
vecbuildi<<<gridSize, blockSize, 3*local_size1*local_size2*sizeof(float)>>>(i, state, ni, nindium);
// Precess the nuclear spins by dt/2 initially
precessnucspins<<<gridSize, blockSize, 3*local_size1*local_size2*sizeof(float)>>>(i, hyperfine, s, ni, dt/2.0);
for (int j = 0; j < iterations; ++j)
{
for (int x = 0; x < size; ++x)
{
int p = 0;
int a = global_size1;
while (a>1)
{
if (p%2 == 0)
{
reduce<<<gridSize, blockSize, (local_size1 + 3*local_size1*local_size2)*sizeof(float)>>>(i, w, n1, a, hyperfine, ni, wout, p, global_size_odd);
} else{
reduce<<<gridSizeodd, blockSize, (local_size1 + 3*local_size1*local_size2)*sizeof(float)>>>(i, wout, n1, a, hyperfine, ni, w, p, global_size1);
}
if (a%local_size1 == 0)
{
a = a/local_size1;
} else {
a = a/local_size1 + 1;
}
p = p + 1;
}
pmax = p;
if (pmax%2 == 0)
{
precesselecspins<<<global_blocks2,local_size2,2*3*local_size2*sizeof(float)>>>(w, wi, s, size, x, sstore, global_size1, dt);
} else {
precesselecspins<<<global_blocks2,local_size2,2*3*local_size2*sizeof(float)>>>(wout, wi, s, size, x, sstore, global_size_odd, dt);
}
precessnucspins<<<gridSize, blockSize, 3*local_size1*local_size2*sizeof(float)>>>(i, hyperfine, s, ni, dt);
}
// Prepare sstore for Rxx, Rxy, Rzz calculation
prep2<<<gridSizetensors, blockSize, 2*3*local_size1*local_size2*sizeof(float)>>>(sstore, output, size, sinit);
// Reset b between each monte carlo step
int b = global_size2;
int g = 0;
// Reduction in the y direction (over different monte carlo steps running in parallel)
// note that global size in the x direction is now related to xmax (no longer ni)
while (b>1)
{
if (g%2 == 0)
{
reduce2<<<gridSizetensors, blockSize, 3*local_size1*local_size2*sizeof(float)>>>(n2, b, output, out);
} else {
reduce2<<<gridSizetensors, blockSize, 3*local_size1*local_size2*sizeof(float)>>>(n2, b, out, output);
}
if (b%local_size2 == 0)
{
b = b/local_size2;
}
else
{
b = b/local_size2 + 1;
}
g = g + 1;
}
// Sum Rxx, Rxy, Rzz over different monte carlo step iterations - note that this is
// now a 1D workgroup size
if (g%2 ==0)
{
tensors<<<global_blocks_tensors, local_size1>>>(output, Rxx, Rxy, Rzz, size, j);
} else {
tensors<<<global_blocks_tensors, local_size1>>>(out, Rxx, Rxy, Rzz, size, j);
}
}
/*
if (u%5 == 0 && u != 0)
{
final_temp<<<global_blocks_final, local_size1>>>(Rxx, Rxy, Rzz, u*global_size2, xmax, Rxxtemp, Rxytemp, Rzztemp);
cudaDeviceSynchronize();
if (u%2 == 0)
{
std::ofstream Rzztemp2txt;
Rzztemp2txt.open("Rzz_w=0_93_temp2_1.txt");
time = 0;
for (int j = 0; j<xmax; ++j)
{
Rzztemp2txt << time << " " << Rzztemp[j] << "\n";
time += dt[0];
}
Rzztemp2txt.close();
} else {
std::ofstream Rzztemp1txt;
Rzztemp1txt.open("Rzz_w=0_93_temp1_1.txt");
time = 0;
for (int j = 0; j<xmax; ++j)
{
Rzztemp1txt << time << " " << Rzztemp[j] << "\n";
time += dt[0];
}
Rzztemp1txt.close();
}
}
*/
}
int h = mcs*global_size2;
final<<<global_blocks_final, local_size1>>>(Rxx, Rxy, Rzz, h, xmax);
cudaDeviceSynchronize();
t = clock() - t;
//auto end = std::chrono::high_resolution_clock::now();
//std::cout << "Time: " << std::chrono::duration_cast<std::chrono::microseconds>(end-start).count() << "us" << std::endl;
std::cout << "Time: " << t << std::endl;
/*
std::ofstream Rxxtxt;
Rxxtxt.open("Rxx_w=0.txt");
for (int j = 0; j<xmax; ++j)
{
Rxxtxt << time << " " << Rxx[j] << "\n";
time += dt[0];
//std::cout << time << " " << Rxx[j] << std::endl;
}
Rxxtxt.close();
std::ofstream Rxytxt;
Rxytxt.open("Rxy_w=0.txt");
time = 0;
for (int j = 0; j<xmax; ++j)
{
Rxytxt << time << " " << Rxy[j] << "\n";
time += dt[0];
//std::cout << time << " " << Rxy[j] << std::endl;
}
Rxytxt.close();
*/
std::ofstream Rzztxt;
Rzztxt.open("Rzz_w=0.txt");
time = 0;
for (int j = 0; j<xmax; ++j)
{
Rzztxt << time << " " << Rzz[j] << "\n";
time += dt;
std::cout << time << " " << Rzz[j] << std::endl;
}
Rzztxt.close();
cudaFree(s);
cudaFree(sinit);
cudaFree(state);
cudaFree(i);
cudaFree(hyperfine);
cudaFree(w);
cudaFree(Rxx);
cudaFree(Rxy);
cudaFree(Rzz);
cudaFree(sstore);
cudaFree(output);
cudaFree(wi);
cudaFree(out);
cudaFree(wout);
cudaFree(Rxxtemp);
cudaFree(Rxytemp);
cudaFree(Rzztemp);
return 0;
}
|
13,682 | #include <cstdlib>
#include <cstdio>
#include "testproc.cuh"
#include "kernels.cuh"
#define NAME(proc) #proc
static int runScenario(int scenario, dim3 domain, bool verify)
{
printf("----------------------------------------------\n");
printf(" Test scenario %d\n\n", scenario);
switch (scenario)
{
case 0:
{
runTest(RatesVersion1, domain, verify, NAME(RatesVersion1));
runTest(RatesVersion2, domain, verify, NAME(RatesVersion2));
runTest(RatesVersion3<256>, domain, verify, NAME(RatesVersion3<256>));
}
break;
case 1: { runTest(RatesVersion1, domain, verify, NAME(RatesVersion1)); } break;
case 2: { runTest(RatesVersion2, domain, verify, NAME(RatesVersion2)); } break;
case 3: { runTest(RatesVersion3<256>, domain, verify, NAME(RatesVersion3<256>)); } break;
default:
{
printf(" Unknown test scenario! Exiting...\n");
return EXIT_FAILURE;
}
}
return EXIT_SUCCESS;
}
int main(int argc, char** argv)
{
Configuration config = {0};
if (parseCommandline(argc, argv, config) > 0)
{
return EXIT_FAILURE;
}
int devID = 0;
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("\nGPUDevice %d: %s\nCompute cap: %d.%d\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
return runScenario(config.scenario, config.domain, config.verification);
}
|
13,683 | //pass
//--gridDim=[11377,1,1] --blockDim=[256,1,1]
#include "common.h"
__global__ void makeNewEdges(const uint *survivedEdgesIDs,
const uint *verticesMapping,
const uint *edges,
const float *weights,
uint *newEdges,
float *newWeights,
uint edgesCount)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < edgesCount)
{
uint edgeID = survivedEdgesIDs[tid];
uint oldEdge = edges[edgeID];
newEdges[tid] = verticesMapping[oldEdge];
newWeights[tid] = weights[edgeID];
}
}
|
13,684 | #include "includes.h"
__global__ void SumReduction(int* input, int n)
{
// Handle to thread block group
extern __shared__ int sm[];
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sm[tid] = (i < n) ? input[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s = 1; s < blockDim.x; s *= 2)
{
if ((tid % (2 * s)) == 0)
{
sm[tid] += sm[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
//printf("%d: %d , block ID: %d \n", threadIdx.x, sm[tid], blockIdx.x);
if (tid == 0) input[blockIdx.x] = sm[0];
} |
13,685 | #include <time.h>
#include <iostream>
#include <math.h>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
using namespace std;
using namespace std::chrono;
#define W (8192)
#define N (8192)
#define THREADS_PER_BLOCK (1)
#define NUMBER_BLOCKS (N/THREADS_PER_BLOCK)
typedef float myFloat;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess){
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void DistanceForBMUCalcBlocks(myFloat *input, myFloat *v, myFloat *x){
myFloat d = 0;
for(long long int i = 0; i < W; i++){
d += (v[i+W*blockIdx.x] - input[i]) * (v[blockIdx.x*W+i] - input[i]);
}
x[blockIdx.x] = sqrt(d);
}
__global__ void DistanceForBMUCalcBlocksAndThreads(myFloat *input, myFloat *v, myFloat *x){
myFloat d = 0;
int index = threadIdx.x + blockIdx.x * blockDim.x;
for(long long int i = 0; i < W; i++){
d += (v[i+W*index] - input[i]) * (v[index*W+i] - input[i]);
}
x[index] = sqrt(d);
}
int main(int argc, char* argv[]){
steady_clock::time_point t_i = steady_clock::now();
srand(0);
myFloat *v;
myFloat *d_v;
long long int size = N*W * sizeof(myFloat);
long long int d_vSize = N*W * sizeof(myFloat);
gpuErrchk(cudaMalloc((void **)&d_v, d_vSize));
v = (myFloat *)malloc(size);
for(int i = 0; i < N*W; i++){
v[i] = rand();
}
myFloat *distances;
myFloat *d_distances;
long long int distanceArraySize = N * sizeof(myFloat);
gpuErrchk(cudaMalloc((void **)&d_distances, distanceArraySize));
distances = (myFloat *)malloc(distanceArraySize);
myFloat *training;
myFloat *d_training;
long long int trainingSize = W * sizeof(myFloat);
gpuErrchk(cudaMalloc((void **)&d_training, trainingSize));
training = (myFloat *)malloc(trainingSize);
int index = 0;
for(int i = 0; i < W; i++){
training[i] = rand();
}
steady_clock::time_point workI = steady_clock::now();
for(int i = 0; i < 1; i++){
//steady_clock::time_point t_CPUToGPUI = steady_clock::now();
gpuErrchk(cudaMemcpy(d_v, v, size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_training, training, trainingSize, cudaMemcpyHostToDevice));
//int t_CPUToGPUF = time(NULL);
//cout<<"Finished copying to device "<<t_CPUToGPUF - t_CPUToGPUI<<endl;
//DistanceForBMUCalcBlocks<<<NUMBER_BLOCKS, THREADS_PER_BLOCK>>>(d_training, d_v, d_distances);
DistanceForBMUCalcBlocksAndThreads<<<NUMBER_BLOCKS, THREADS_PER_BLOCK>>>(d_training, d_v, d_distances);
cudaThreadSynchronize();
//cout<<"Finished distance calc"<<endl;
//int t_GPUToCPUI = time(NULL);
gpuErrchk(cudaMemcpy(distances, d_distances, distanceArraySize, cudaMemcpyDeviceToHost));
//int t_GPUToCPUF = time(NULL);
//cout<<"Finished Device to CPU copy "<<t_GPUToCPUF - t_GPUToCPUI<<endl;
myFloat dmin = distances[0];
for(int j = 0; j < N; j++){
if(distances[j] < dmin){
dmin = distances[j];
index = j;
}
}
}
steady_clock::time_point workF = steady_clock::now();
cout<<"Total work execution time "<<duration_cast<milliseconds>(workF - workI).count()<<endl;
cout<<"BMU is "<<index<<endl;
steady_clock::time_point t_f = steady_clock::now();
cout<<"Total Execution Time: "<<duration_cast<milliseconds>(t_f - t_i).count()<<endl;
}
|
13,686 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void threadCounting_atomicShared(int *a){
__shared__ int sa;
if(threadIdx.x == 0)
sa = 0;
__syncthreads();
atomicAdd(&sa, 1);
__syncthreads();
if(threadIdx.x ==0)
atomicAdd(a, sa);
}
int main(void){
int a = 0;
int *d;
cudaMalloc((void**)&d, sizeof(int));
cudaMemset(d, 0, sizeof(int)*1);
threadCounting_atomicShared<<<10240,512>>>(d);
cudaDeviceSynchronize();
cudaMemcpy(&a, d, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n",a);
cudaFree(d);
}
|
13,687 | #include "includes.h"
__global__ void kernel3( int *a, int dimx, int dimy )
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
a[idx] = idx;
} |
13,688 | #include "includes.h"
//VERSION 0.8 MODIFIED 10/25/16 12:34 by Jack
// The number of threads per blocks in the kernel
// (if we define it here, then we can use its value in the kernel,
// for example to statically declare an array in shared memory)
const int threads_per_block = 256;
// Forward function declarations
float GPU_vector_max(float *A, int N, int kernel_code, float *kernel_time, float *transfer_time);
float CPU_vector_max(float *A, int N);
float *get_random_vector(int N);
float *get_increasing_vector(int N);
float usToSec(long long time);
long long start_timer();
long long stop_timer(long long start_time, const char *name);
void die(const char *message);
void checkError();
// Main program
__global__ void vector_max_kernel(float *in, float *out, int N) {
// Determine the "flattened" block id and thread id
int block_id = blockIdx.x + gridDim.x * blockIdx.y;
int thread_id = blockDim.x * block_id + threadIdx.x;
// A single "lead" thread in each block finds the maximum value over a range of size threads_per_block
float max = 0.0;
if (threadIdx.x == 0) {
//calculate out of bounds guard
//our block size will be 256, but our vector may not be a multiple of 256!
int end = threads_per_block;
if(thread_id + threads_per_block > N)
end = N - thread_id;
//grab the lead thread's value
max = in[thread_id];
//grab values from all other threads' locations
for(int i = 1; i < end; i++) {
//if larger, replace
if(max < in[thread_id + i])
max = in[thread_id + i];
}
out[block_id] = max;
}
} |
13,689 | //fail: assertion
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <assert.h>
#define N 2//32
__global__ void kernel(uint4 *out) {
uint4 vector = {1,1,1,1};
out[threadIdx.x] = vector;
}
int main(){
uint4 *a;
uint4 *dev_a;
a = (uint4*)malloc(N*sizeof(uint4));
cudaMalloc((void**)&dev_a, N*sizeof(uint4));
cudaMemcpy(dev_a, a, N*sizeof(uint4), cudaMemcpyHostToDevice);
kernel<<<1,N>>>(dev_a);
//ESBMC_verify_kernel_u(kernel,1,N,dev_a);
cudaMemcpy(a, dev_a, N*sizeof(uint4), cudaMemcpyDeviceToHost);
for (int i=0; i<N; i++){
printf ("%u", a[i]);
assert(a[i].x != 1);
assert(a[i].y != 1);
assert(a[i].z != 1);
assert(a[i].w != 1);
}
free(a);
cudaFree(dev_a);
}
|
13,690 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__
void resolveWeight(int8_t *A,int8_t *B,int8_t *C, int n, int wx){
int k = blockIdx.x*blockDim.x+threadIdx.x;
int off = k * n;
int8_t temp = 0;
int b = 0;
if(k<wx){
for(int i=0;i<n;i++){
b = off+i;
temp+=A[i]*B[b];
}
C[k]=temp;
}
}
int main(){
FILE *fout = fopen("out.txt","w");
int T = 512;
int nodes,weightsX,weightsY;
for(int o = 1; o < 10000;o++){
if(o%1000 == 0){
printf("Starting run %d\n" ,o);
}
fprintf(fout,"%d",o);
nodes = o;
weightsX = o;
weightsY = o;
int8_t *node, *d_node, *weight, *d_weight,*out,*gout,*d_out;
node = (int8_t *)malloc(nodes*sizeof(int8_t));
weight = (int8_t *)malloc(weightsX*weightsY*sizeof(int8_t));
out = (int8_t *)malloc(weightsX*sizeof(int8_t));
gout = (int8_t *)malloc(weightsX*sizeof(int8_t));
srand(time(0));
//Set up matrix 1
for(int i = 0;i<nodes;i++){
node[i] = rand() % 3 - 1;
}
//Set up matrix 2
for(int i = 0;i<(weightsX*weightsY);i++){
weight[i] = rand() % 3 - 1;
}
//Set up matrices for results
for(int i = 0;i<weightsX;i++){
out[i] = 0;
gout[i] = 0;
}
unsigned int sstart = clock();
for(int i = 0; i<weightsX;i++){
for(int j = 0; j<weightsY;j++){
out[i]+=(node[j]*weight[(nodes*i)+j]);
}
}
//printf("Sequential time taken in ms %li\n" ,(clock() - sstart));
int seqtime = clock()-sstart;
fprintf(fout,",%d",seqtime);
//CUDA parallel code
cudaMalloc(&d_node,nodes*sizeof(int8_t));
cudaMalloc(&d_weight,weightsX*weightsY*sizeof(int8_t));
cudaMalloc(&d_out,weightsX*sizeof(int8_t));
cudaMemcpy(d_node,node,nodes*sizeof(int8_t),cudaMemcpyHostToDevice);
cudaMemcpy(d_weight,weight,weightsX*weightsY*sizeof(int8_t),cudaMemcpyHostToDevice);
cudaMemcpy(d_out,gout,weightsX*sizeof(int8_t),cudaMemcpyHostToDevice);
unsigned int pstart = clock();
resolveWeight<<<weightsX+(T-1)/T,T>>>(d_node,d_weight,d_out,nodes,weightsX);
cudaDeviceSynchronize();
//printf("Parallel time taken in ms %li\n" ,(clock() - pstart));
int partime = clock()-pstart;
fprintf(fout,",%d",partime);
cudaMemcpy(gout,d_out,weightsX*sizeof(int8_t),cudaMemcpyDeviceToHost);
// printf("Value at 3: %i\n",out[2]);
// printf("Value on gpu at 3: %i\n",gout[2]);
// printf("Value at 5120: %i\n",out[5119]);
// printf("Value on gpu at 5120: %i\n",gout[5119]);
// printf("Value at 10000: %i\n",out[9999]);
// printf("Value on gpu at 10000: %i\n",gout[9999]);
int err = 0;
for(int i = 0; i < weightsX; i++){
err += out[i]-gout[i];
}
// printf("Error with CUDA: %i\n",err);
fprintf(fout,",%d\n",err);
free(node);
free(weight);
free(out);
free(gout);
cudaDeviceReset();
}
fclose(fout);
}
|
13,691 | #include <cstdint>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <tuple>
#include <typeinfo>
#ifndef TARGET_ROOT
#define TARGET_ROOT 2
#endif
#ifndef FIND_DOUBLE
#define TARGET_FTYPE float
#define TARGET_ITYPE uint32_t
#define MANTISSA_SIZE ((1 << 24) - 1)
#define INITIAL_MAGIC (((127 - 127 / TARGET_ROOT) - 1) << 23)
#define SKIP_AMOUNT_MUL 1
#else
#define TARGET_FTYPE double
#define TARGET_ITYPE uint64_t
#define MANTISSA_SIZE ((IntT(1) << 53) - 1)
#define INITIAL_MAGIC ((IntT(1023 - 1023 / TARGET_ROOT) - 1) << 52)
#define SKIP_AMOUNT_MUL 179424673
#endif
template <uint32_t n, typename T>
__device__ inline T ct_pow(const T x)
{
if (n == 0) {
return 1;
} else {
const T part = ct_pow<n / 2, T>(x);
return ((n & 1) ? (x * part * part) : (part * part));
}
}
template <uint32_t n, typename T>
__device__ T halley_step(const T x0, const T value)
{
const T fx = ct_pow<n>(x0) - value;
const T fpx = n * ct_pow<n - 1>(x0);
const T fppx = n * (n - 1) * ct_pow<n - 2>(x0);
const T numer = 2 * fx * fpx;
const T denom = 2 * fpx * fpx - fx * fppx;
const T x1 = x0 - (numer / denom);
return x1;
}
template <uint32_t n, typename T>
__device__ T newton_step(const T x0, const T value)
{
// x1 = x0 - (f(x0)-y)/f'(x0)
const T x1 = x0 - ((ct_pow<n>(x0) - value) / (n * ct_pow<n - 1>(x0)));
return x1;
}
template <typename FloatT, typename IntT>
struct SetInitialErrorLevel {
__device__ void operator()(const IntT index)
{
magic_max_error[index] = 10000;
}
FloatT* __restrict__ magic_max_error;
};
template <typename FloatT, typename IntT, uint32_t n>
struct FindErrorForMagics {
__device__ void operator()(const IntT index)
{
IntT magic = INITIAL_MAGIC + magic_offset + index;
FloatT error = 0.0;
union {
FloatT f;
IntT i;
} packed;
for (FloatT root_value = 0.001; root_value < 4.0001; root_value += 0.001) {
const FloatT powered = ct_pow<n>(root_value);
packed.f = powered;
packed.i = magic + (packed.i / n);
const FloatT approx = halley_step<n>(packed.f, powered);
const FloatT current_error = abs((approx - root_value) / root_value);
error = max(current_error, error);
}
// Reuse memory
if (error < magic_max_error[index]) {
magic_max_error[index] = error;
magics[index] = magic;
}
}
const IntT magic_offset;
FloatT* __restrict__ magic_max_error;
IntT* __restrict__ magics;
};
std::pair<TARGET_FTYPE, TARGET_ITYPE> find_magic_number(void)
{
using FloatT = TARGET_FTYPE;
using IntT = TARGET_ITYPE;
using KernelT = FindErrorForMagics<FloatT, IntT, TARGET_ROOT>;
const IntT max_per_round = (1 << 14);
// Allocate memory
FloatT* local_error = new FloatT[max_per_round];
IntT* local_magics = new IntT[max_per_round];
FloatT* gpu_errors;
IntT* gpu_magics;
cudaMalloc(&gpu_errors, sizeof(FloatT) * max_per_round);
cudaMalloc(&gpu_magics, sizeof(IntT) * max_per_round);
// Set the initial error level to be high
thrust::for_each(thrust::counting_iterator<IntT>(0),
thrust::counting_iterator<IntT>(max_per_round),
SetInitialErrorLevel<FloatT, IntT>{ gpu_errors });
// Find the best magic
for (IntT magic_offset = 0; magic_offset < MANTISSA_SIZE;
magic_offset += (max_per_round * SKIP_AMOUNT_MUL)) {
thrust::for_each(thrust::counting_iterator<IntT>(0),
thrust::counting_iterator<IntT>(max_per_round),
KernelT{ magic_offset, gpu_errors, gpu_magics }
);
// Print progress
std::cerr << std::fixed << ((100.0 * magic_offset) / MANTISSA_SIZE)
<< "% \r";
}
std::cerr << "\n";
// Copy stuff back
cudaMemcpy(local_error, gpu_errors, sizeof(FloatT) * max_per_round,
cudaMemcpyDeviceToHost);
cudaMemcpy(local_magics, gpu_magics, sizeof(FloatT) * max_per_round,
cudaMemcpyDeviceToHost);
// Find the best magic
FloatT max_error = 1000;
IntT current_magic = 0;
for (IntT i = 0; i < max_per_round; ++i) {
if (local_error[i] < max_error) {
max_error = local_error[i];
current_magic = local_magics[i];
}
}
cudaFree(&gpu_errors);
cudaFree(&gpu_magics);
delete[] local_error;
delete[] local_magics;
return std::make_pair(max_error, current_magic);
}
int main(void)
{
std::cout << "Finding x^" << TARGET_ROOT << " for type ";
#ifdef FIND_DOUBLE
std::cout << "Double\n";
#else
std::cout << "Float\n";
#endif
auto result = find_magic_number();
std::cout << "Relative error: " << std::fixed << result.first << "\n";
std::cout << "Magic: 0x" << std::hex << result.second << " + (i / "
<< std::dec << TARGET_ROOT << ")\n";
} |
13,692 | /*
* simulator_kernel_impl.cu
*
* Created on: Jul 18, 2014
* Author: bqian
*/
|
13,693 | #include "includes.h"
__global__ void Evolve( int *val, int *aux, int n ) {
int up, upright, right, rightdown, down, downleft, left, leftup;
int sum = 0, estado;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if ( i > 0 && i < (n - 1) && j > 0 && j < (n - 1) ){
estado = val[ i * n + j ];
up = val[ ( i - 1 ) * n + j ];
upright = val[ ( i - 1 ) * n + j + 1 ];
right = val[ i * n + j + 1 ];
rightdown = val[ ( i + 1 ) * n + j + 1 ];
down = val[ ( i + 1 ) * n + j ];
downleft = val[ ( i + 1 ) * n + j - 1 ];
left = val[ i * n + j - 1 ];
leftup = val[ ( i - 1 ) * n + j - 1 ];
sum = up + upright + right + rightdown + down + downleft + left + leftup;
if( sum == 3 ) {
estado = 1;
}
else if( ( estado == 1 ) && ( ( sum < 2 ) || ( sum > 3 ) ) ) {
estado = 0;
}
aux[ i * n + j ] = estado;
}
} |
13,694 | #include <algorithm>
#include <iostream>
#include <vector>
#include <time.h>
#define MAX_BLOCK_SZ 128
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) \
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS)
#endif
typedef unsigned long long data_t;
static inline void check(cudaError_t err, const char* context) {
if (err != cudaSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< cudaGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
#define CHECK(x) check(x, #x)
template <class T>
void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) {
CHECK(cudaMemcpy(target, source, num * sizeof(T), direction));
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
__global__
void gpu_add_block_sums(data_t* const d_out,
const data_t* const d_in,
data_t* const d_block_sums,
const size_t numElems)
{
data_t d_block_sum_val = d_block_sums[blockIdx.x];
// Simple implementation's performance is not significantly (if at all)
// better than previous verbose implementation
data_t cpy_idx = 2 * blockIdx.x * blockDim.x + threadIdx.x;
if (cpy_idx < numElems)
{
d_out[cpy_idx] = d_in[cpy_idx] + d_block_sum_val;
if (cpy_idx + blockDim.x < numElems)
d_out[cpy_idx + blockDim.x] = d_in[cpy_idx + blockDim.x] + d_block_sum_val;
}
}
__global__
void gpu_prescan(data_t* const d_out,
const data_t* const d_in,
data_t* const d_block_sums,
const data_t len,
const data_t shmem_sz,
const data_t max_elems_per_block)
{
// Allocated on invocation
extern __shared__ data_t s_out[];
int thid = threadIdx.x;
int ai = thid;
int bi = thid + blockDim.x;
// Zero out the shared memory
// Helpful especially when input size is not power of two
s_out[thid] = 0;
s_out[thid + blockDim.x] = 0;
// If CONFLICT_FREE_OFFSET is used, shared memory size
// must be a 2 * blockDim.x + blockDim.x/num_banks
s_out[thid + blockDim.x + (blockDim.x >> LOG_NUM_BANKS)] = 0;
__syncthreads();
// Copy d_in to shared memory
// Note that d_in's elements are scattered into shared memory
// in light of avoiding bank conflicts
data_t cpy_idx = max_elems_per_block * blockIdx.x + threadIdx.x;
if (cpy_idx < len)
{
s_out[ai + CONFLICT_FREE_OFFSET(ai)] = d_in[cpy_idx];
if (cpy_idx + blockDim.x < len)
s_out[bi + CONFLICT_FREE_OFFSET(bi)] = d_in[cpy_idx + blockDim.x];
}
// For both upsweep and downsweep:
// Sequential indices with conflict free padding
// Amount of padding = target index / num banks
// This "shifts" the target indices by one every multiple
// of the num banks
// offset controls the stride and starting index of
// target elems at every iteration
// d just controls which threads are active
// Sweeps are pivoted on the last element of shared memory
// Upsweep/Reduce step
int offset = 1;
for (int d = max_elems_per_block >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_out[bi] += s_out[ai];
}
offset <<= 1;
}
// Save the total sum on the global block sums array
// Then clear the last element on the shared memory
if (thid == 0)
{
d_block_sums[blockIdx.x] = s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)];
s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)] = 0;
}
// Downsweep step
for (int d = 1; d < max_elems_per_block; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
data_t temp = s_out[ai];
s_out[ai] = s_out[bi];
s_out[bi] += temp;
}
}
__syncthreads();
// Copy contents of shared memory to global memory
if (cpy_idx < len)
{
d_out[cpy_idx] = s_out[ai + CONFLICT_FREE_OFFSET(ai)];
if (cpy_idx + blockDim.x < len)
d_out[cpy_idx + blockDim.x] = s_out[bi + CONFLICT_FREE_OFFSET(bi)];
}
}
void sum_scan_blelloch(data_t* const d_out,
const data_t* const d_in,
const size_t numElems)
{
// Zero out d_out
CHECK(cudaMemset(d_out, 0, numElems * sizeof(data_t)));
// Set up number of threads and blocks
data_t block_sz = MAX_BLOCK_SZ / 2;
data_t max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the ceiling of input size / max elems that a block can handle
//data_t grid_sz = (data_t) std::ceil((double) numElems / (double) max_elems_per_block);
// UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically
// add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity
data_t grid_sz = numElems / max_elems_per_block;
// Take advantage of the fact that integer division drops the decimals
if (numElems % max_elems_per_block != 0)
grid_sz += 1;
// Conflict free padding requires that shared memory be more than 2 * block_sz
data_t shmem_sz = max_elems_per_block + ((max_elems_per_block) >> LOG_NUM_BANKS);
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks
data_t* d_block_sums;
CHECK(cudaMalloc(&d_block_sums, sizeof(data_t) * grid_sz));
CHECK(cudaMemset(d_block_sums, 0, sizeof(data_t) * grid_sz));
// Sum scan data allocated to each block
//gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(data_t) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems);
gpu_prescan<<<grid_sz, block_sz, sizeof(data_t) * shmem_sz>>>(d_out,
d_in,
d_block_sums,
numElems,
shmem_sz,
max_elems_per_block);
// Sum scan total sums produced by each block
// Use basic implementation if number of total sums is <= 2 * block_sz
// (This requires only one block to do the scan)
if (grid_sz <= max_elems_per_block)
{
data_t* d_dummy_blocks_sums;
CHECK(cudaMalloc(&d_dummy_blocks_sums, sizeof(data_t)));
CHECK(cudaMemset(d_dummy_blocks_sums, 0, sizeof(data_t)));
//gpu_sum_scan_blelloch<<<1, block_sz, sizeof(data_t) * max_elems_per_block>>>(d_block_sums, d_block_sums, d_dummy_blocks_sums, grid_sz);
gpu_prescan<<<1, block_sz, sizeof(data_t) * shmem_sz>>>(d_block_sums,
d_block_sums,
d_dummy_blocks_sums,
grid_sz,
shmem_sz,
max_elems_per_block);
CHECK(cudaFree(d_dummy_blocks_sums));
}
// Else, recurse on this same function as you'll need the full-blown scan
// for the block sums
else
{
data_t* d_in_block_sums;
CHECK(cudaMalloc(&d_in_block_sums, sizeof(data_t) * grid_sz));
CHECK(cudaMemcpy(d_in_block_sums, d_block_sums, sizeof(data_t) * grid_sz, cudaMemcpyDeviceToDevice));
sum_scan_blelloch(d_block_sums, d_in_block_sums, grid_sz);
CHECK(cudaFree(d_in_block_sums));
}
// Add each block's total sum to its scan output
// in order to get the final, global scanned array
gpu_add_block_sums<<<grid_sz, block_sz>>>(d_out, d_out, d_block_sums, numElems);
CHECK(cudaFree(d_block_sums));
}
__global__ void gpu_radix_sort_local(data_t* d_out_sorted,
data_t* d_prefix_sums,
data_t* d_block_sums,
data_t input_shift_width,
data_t* d_in,
data_t d_in_len,
data_t max_elems_per_block)
{
// need shared memory array for:
// - block's share of the input data (local sort will be put here too)
// - mask outputs
// - scanned mask outputs
// - merged scaned mask outputs ("local prefix sum")
// - local sums of scanned mask outputs
// - scanned local sums of scanned mask outputs
// for all radix combinations:
// build mask output for current radix combination
// scan mask ouput
// store needed value from current prefix sum array to merged prefix sum array
// store total sum of mask output (obtained from scan) to global block sum array
// calculate local sorted address from local prefix sum and scanned mask output's total sums
// shuffle input block according to calculated local sorted addresses
// shuffle local prefix sums according to calculated local sorted addresses
// copy locally sorted array back to global memory
// copy local prefix sum array back to global memory
extern __shared__ data_t shmem[];
data_t* s_data = shmem;
// s_mask_out[] will be scanned in place
data_t s_mask_out_len = max_elems_per_block + 1;
data_t* s_mask_out = &s_data[max_elems_per_block];
data_t* s_merged_scan_mask_out = &s_mask_out[s_mask_out_len];
data_t* s_mask_out_sums = &s_merged_scan_mask_out[max_elems_per_block];
data_t* s_scan_mask_out_sums = &s_mask_out_sums[4];
data_t thid = threadIdx.x;
// Copy block's portion of global input data to shared memory
data_t cpy_idx = max_elems_per_block * blockIdx.x + thid;
if (cpy_idx < d_in_len)
s_data[thid] = d_in[cpy_idx];
else
s_data[thid] = 0;
__syncthreads();
// To extract the correct 2 bits, we first shift the number
// to the right until the correct 2 bits are in the 2 LSBs,
// then mask on the number with 11 (3) to remove the bits
// on the left
data_t t_data = s_data[thid];
data_t t_2bit_extract = (t_data >> input_shift_width) & 3;
for (data_t i = 0; i < 4; ++i)
{
// Zero out s_mask_out
s_mask_out[thid] = 0;
if (thid == 0)
s_mask_out[s_mask_out_len - 1] = 0;
__syncthreads();
// build bit mask output(0 to 3, same as input data in paper)
bool val_equals_i = false;
if (cpy_idx < d_in_len)
{
val_equals_i = t_2bit_extract == i;
s_mask_out[thid] = val_equals_i;
}
__syncthreads();
// Scan mask outputs (Hillis-Steele)
int partner = 0;
data_t sum = 0;
data_t max_steps = (data_t) log2f(max_elems_per_block);
for (data_t d = 0; d < max_steps; d++) {
partner = thid - (1 << d);
if (partner >= 0) {
sum = s_mask_out[thid] + s_mask_out[partner];
}
else {
sum = s_mask_out[thid];
}
__syncthreads();
s_mask_out[thid] = sum;
__syncthreads();
}
// Shift elements to produce the same effect as exclusive scan
data_t cpy_val = 0;
cpy_val = s_mask_out[thid];
__syncthreads();
s_mask_out[thid + 1] = cpy_val;
__syncthreads();
if (thid == 0)
{
// Zero out first element to produce the same effect as exclusive scan
s_mask_out[0] = 0;
data_t total_sum = s_mask_out[s_mask_out_len - 1];
s_mask_out_sums[i] = total_sum;
d_block_sums[i * gridDim.x + blockIdx.x] = total_sum;
}
__syncthreads();
if (val_equals_i && (cpy_idx < d_in_len))
{
s_merged_scan_mask_out[thid] = s_mask_out[thid];
}
__syncthreads();
}//end loop here. complete local prefix sum
// Scan mask output sums
// Just do a naive scan since the array is really small
if (thid == 0)
{
data_t run_sum = 0;
for (data_t i = 0; i < 4; ++i)
{
s_scan_mask_out_sums[i] = run_sum;
run_sum += s_mask_out_sums[i];
}
}// use s_scan_mask_out_sums for local shuffle, get index for each input data(0~3)
__syncthreads();
if (cpy_idx < d_in_len)
{
// Calculate the new indices of the input elements for sorting
data_t t_prefix_sum = s_merged_scan_mask_out[thid];
data_t new_pos = t_prefix_sum + s_scan_mask_out_sums[t_2bit_extract];
__syncthreads();
// Shuffle the block's input elements to actually sort them
// Do this step for greater global memory transfer coalescing
// in next step
s_data[new_pos] = t_data; //0~3
s_merged_scan_mask_out[new_pos] = t_prefix_sum;
__syncthreads();
// Copy block - wise prefix sum results to global memory
// Copy block-wise sort results to global
d_prefix_sums[cpy_idx] = s_merged_scan_mask_out[thid];
d_out_sorted[cpy_idx] = s_data[thid];
}
}
__global__ void gpu_glbl_shuffle(data_t* d_out,
data_t* d_in,
data_t* d_scan_block_sums,
data_t* d_prefix_sums,
data_t input_shift_width,
data_t d_in_len,
data_t max_elems_per_block)
{
// d_scan_block_sums is prefix block sum
// d_prefix_sums is local prefix sum
data_t thid = threadIdx.x;
data_t cpy_idx = max_elems_per_block * blockIdx.x + thid;
if (cpy_idx < d_in_len)
{
data_t t_data = d_in[cpy_idx];
data_t t_2bit_extract = (t_data >> input_shift_width) & 3;
data_t t_prefix_sum = d_prefix_sums[cpy_idx];
data_t data_glbl_pos = d_scan_block_sums[t_2bit_extract * gridDim.x + blockIdx.x]
+ t_prefix_sum; // max pos is less than 100,000,000 in our test case, data_t is sufficient
__syncthreads();
d_out[data_glbl_pos] = t_data;
}
}
__global__ void order_checking_local(data_t* d_in, data_t* d_out, data_t d_in_len, data_t max_elems_per_block)
{
extern __shared__ data_t shmem[]; //dynamic shared memory
//https://developer.nvidia.com/blog/using-shared-memory-cuda-cc/
//its length is (max_elems_per_block+ 1)+ max_elems_per_block
// 1 is the first element in the next block, the last max_elems_per_block is comparison result
data_t* s_data=shmem;
data_t* s_comparison=&s_data[max_elems_per_block+1];
data_t thid=threadIdx.x;// from 0 to max_elems_per_block-1
// Copy block's portion of global input data to shared memory
// one thread for one number
data_t cpy_idx = max_elems_per_block * blockIdx.x + thid;
if(cpy_idx<d_in_len)
s_data[thid]=d_in[cpy_idx];
else
s_data[thid]=d_in[d_in_len-1]+(cpy_idx-d_in_len);//padding ensure is greater or equal to the previous one
if(thid==0){
data_t next_cpy_idx= max_elems_per_block *(blockIdx.x+1);//the first element in the next block
if(next_cpy_idx<d_in_len)
s_data[max_elems_per_block]=d_in[next_cpy_idx];
else
s_data[max_elems_per_block]=d_in[d_in_len-1]+(next_cpy_idx-d_in_len);//padding ensure is greater or equal to the previous one
}
//Wait for all threads to finish reading
__syncthreads();
//Perform order checking
s_comparison[thid]=s_data[thid]>s_data[thid+1];
//Wait for all threads to finish reading
__syncthreads();
//Perform reduction sum
//Scan comparison result (Hillis-Steele)
int partner=0;
data_t sum=0;
data_t max_steps = (data_t) log2f(max_elems_per_block);
for (data_t d = 0; d < max_steps; d++) {
partner = thid - (1 << d);
if (partner >= 0) {
sum = s_comparison[thid] + s_comparison[partner];
}
else {
sum = s_comparison[thid];
}
__syncthreads();
s_comparison[thid] = sum;
__syncthreads();
}
//the last element of s_comparison is an exclusive sum
//if 0, then the current block is sorted
d_out[blockIdx.x]=s_comparison[max_elems_per_block-1];
}
bool partial_order_checking(data_t* d_in, data_t d_in_len)
{ //checing if d_in from 0 to d_in_len-1 is sorted
const int block_size=MAX_BLOCK_SZ;//64 threads per block;
const int len=block_size; // max_elems_per_block
const int grid_size=divup(d_in_len,len);
data_t shmem_sz =(len+len+1)*sizeof(data_t);//(max_elems_per_block+ 1)+ max_elems_per_block
data_t *d_out=NULL;
CHECK(cudaMalloc((void**)&d_out,grid_size*sizeof(data_t)));// each block will yeild one result, sorted or not.
order_checking_local<<<grid_size, block_size, shmem_sz>>>(d_in, d_out, d_in_len, len);
std::vector <data_t> d_out_cpu(grid_size);
cuda_memcpy(d_out_cpu.data(), d_out, grid_size, cudaMemcpyDeviceToHost);
//The all_of algorithm has the added benefit that it may exit early
//if one of the elements isn't 0 and save some unnecessary checking.
//if all elements are 0, then sorted
bool sorted = std::all_of(d_out_cpu.begin(), d_out_cpu.end(), [](data_t i) { return i==0; });
CHECK(cudaFree(d_out));
return sorted;
}
bool order_checking(data_t* d_in, data_t d_in_len)
{
//do the first half checking
data_t half=d_in_len/2;
if(partial_order_checking(d_in,half))
return partial_order_checking(d_in+half-1,d_in_len-half+1);
else
return false;
}
void psort(int n, data_t *data) {
if(n<=0) return;
// FIXME: Implement a more efficient parallel sorting algorithm for the GPU.
const int block_size=MAX_BLOCK_SZ;//64 threads per block;
const int len=block_size; // max_elems_per_block
const int grid_size=divup(n,len);
data_t *d_in=NULL;
CHECK(cudaMalloc((void**)&d_in,n*sizeof(data_t)));
cuda_memcpy(d_in,data,n,cudaMemcpyHostToDevice);
data_t *d_out=NULL;
CHECK(cudaMalloc((void**)&d_out,n*sizeof(data_t)));
data_t* d_prefix_sums; //local prefix sum
CHECK(cudaMalloc((void**)&d_prefix_sums,n*sizeof(data_t)));
CHECK(cudaMemset(d_prefix_sums, 0, n*sizeof(data_t)));
data_t* d_block_sums; //block sum in the paper
data_t d_block_sums_len = 4 * grid_size; // 4-way split
CHECK(cudaMalloc(&d_block_sums, sizeof(data_t) * d_block_sums_len));
CHECK(cudaMemset(d_block_sums, 0, sizeof(data_t) * d_block_sums_len));
data_t* d_scan_block_sums;//prefix block sum in the paper
CHECK(cudaMalloc(&d_scan_block_sums, sizeof(data_t) * d_block_sums_len));
CHECK(cudaMemset(d_scan_block_sums, 0, sizeof(data_t) * d_block_sums_len));
data_t s_data_len = len;
data_t s_mask_out_len = len + 1;
data_t s_merged_scan_mask_out_len = len;
data_t s_mask_out_sums_len = 4; // 4-way split
data_t s_scan_mask_out_sums_len = 4;
data_t shmem_sz = (s_data_len
+ s_mask_out_len
+ s_merged_scan_mask_out_len
+ s_mask_out_sums_len
+ s_scan_mask_out_sums_len)
* sizeof(data_t);//share memory size
// clock_t cpu_startTime;
for (data_t shift_width = 0; shift_width <= sizeof(data_t)*8 ; shift_width += 2)
{
// cpu_startTime=clock();
if(order_checking(d_in,n))
{
//early stop if sorted, based on experiments, oder checking is indeed inexpensive.
//but it usually save 1 iteration only in our test case.
std::cout<<"order checking save "<< (64-shift_width)/2+1 <<" iteration" << std::endl;
std::cout<< "early stopping..." <<std::endl;
break;
}
// std::cout<< "order checking spend: "<<(clock()-cpu_startTime)/CLOCKS_PER_SEC <<"s"<<std::endl;
gpu_radix_sort_local<<<grid_size, block_size, shmem_sz>>>(d_out,
d_prefix_sums,
d_block_sums,
shift_width,
d_in,
n,
len);
// scan global block sum array
// prefix block sum in the paper
sum_scan_blelloch(d_scan_block_sums, d_block_sums, d_block_sums_len);
// scatter/shuffle block-wise sorted array to final positions
gpu_glbl_shuffle<<<grid_size, block_size>>>(d_in,
d_out,
d_scan_block_sums,
d_prefix_sums,
shift_width,
n,
len);
}
cuda_memcpy(data, d_in, n, cudaMemcpyDeviceToHost);
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_out));
CHECK(cudaFree(d_scan_block_sums));
CHECK(cudaFree(d_block_sums));
CHECK(cudaFree(d_prefix_sums));
// std::sort(data, data + n);
} |
13,695 | #include <vector> // vector
/* use this to set the block size of the kernel launches.
CUDA kernels will be launched with block size blockDimSize by blockDimSize. */
constexpr int blockDimSize = 8;
/* your job is to write convolveGPU:
convolveGPU will be called with blockSize blockDimSize x blockDimSize
and gridsize ⌈height/blockDimSize⌉x⌈width/blockDimSize⌉.
Each thread may have to compute more than one pixel. You will need to stride the computation.
Look at convolveCPU below for more info.
*/
__global__ void convolveGPU(float const* in, float *out, int width, int height, float const* kernel, int kernelWidth, int kernelHeight) {
/* your code here */
}
/* A CPU example of the convolve kernel */
void convolveCPU(float const* in, float *out, int width, int height, float const* kernel, int kernelWidth, int kernelHeight) {
const int halfKernelHeight = kernelHeight/2;
const int halfKernelWidth = kernelWidth/2;
const int redChannel = 2;
const int greenChannel = 1;
const int blueChannel = 0;
/* point-wise loop over the image pixels */
for (int i = halfKernelHeight; i < height-halfKernelHeight; i += 1) {
for (int j = halfKernelWidth; j < width-halfKernelWidth; j += 1) {
/* compute dot product of kernel and sub-image */
float redDot = 0.0f, greenDot = 0.0f, blueDot = 0.0f;
for (int k = -halfKernelHeight; k <= halfKernelHeight; k += 1) {
for (int l = -halfKernelWidth; l <= halfKernelWidth; l += 1) {
/* add in[i+k][j+l]*kernel[k][l] to dot product for red, green, and blue */
redDot += in[(i+k)*width*3 + (j+l)*3 + redChannel] * kernel[(k+halfKernelHeight)*kernelWidth + (l+halfKernelWidth)];
greenDot += in[(i+k)*width*3 + (j+l)*3 + greenChannel] * kernel[(k+halfKernelHeight)*kernelWidth + (l+halfKernelWidth)];
blueDot += in[(i+k)*width*3 + (j+l)*3 + blueChannel] * kernel[(k+halfKernelHeight)*kernelWidth + (l+halfKernelWidth)];
}
}
/* set out[i][j] to dot product */
out[i*width*3 + j*3 + redChannel] = redDot;
out[i*width*3 + j*3 + greenChannel] = greenDot;
out[i*width*3 + j*3 + blueChannel] = blueDot;
}
}
}
/* call the convolveGPU function on each frame */
float convolveFrames(std::vector<float *> const& framesIn, std::vector<float *> &framesOut, int width, int height, float const* kernel, int kernelWidth, int kernelHeight,
cudaStream_t *streams, int numStreams) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 blockSize (blockDimSize, blockDimSize);
dim3 gridSize ( height/blockSize.x + (height % blockSize.x != 0),
width/blockSize.y + (width % blockSize.y != 0) );
cudaEventRecord(start, 0);
for (int i = 0; i < framesIn.size(); i += 1) {
convolveGPU<<<gridSize, blockSize, 0, streams[i % numStreams]>>>(framesIn.at(i), framesOut.at(i), width, height, kernel, kernelWidth, kernelHeight);
}
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return (elapsed / 1000.0f);
} |
13,696 | #include <stdio.h>
__global__ void gpu_kernel()
{
int block_idx, grid_dim;
block_idx = blockIdx.x;
grid_dim = gridDim.x;
printf("Hello form b #%d of %d, t #%d of %d!\n",
block_idx, grid_dim, threadIdx.x, blockDim.x);
}
int main()
{
gpu_kernel<<<2, 2>>>();
cudaDeviceSynchronize();
return 0;
}
|
13,697 | #include <cuda_runtime.h>
#include <stdio.h>
/*
* Display a variety of information on the first CUDA device in this system,
* including driver version, runtime version, compute capability, bytes of
* global memory, etc.
*/
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
int main(int argc, char **argv) {
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("There are no devices that support CUDA\n");
exit(EXIT_SUCCESS);
}
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
int dev = 0;
CHECK(cudaSetDevice(dev));
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
printf("Please use: sm_%d%d\n", deviceProp.major, deviceProp.minor);
exit(EXIT_SUCCESS);
}
|
13,698 | #include <cmath>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
int t_id = blockIdx.x * blockDim.x + threadIdx.x; // global thread id
if (t_id < n)
c[t_id] = a[t_id] + b[t_id];
if (t_id == 0)
printf("GPU works\n");
}
extern void wrapper(double *c)
{
printf("Initializing GPU data...\n");
int n = 100000; // Size of vectors
double *h_a, *h_b; // Host input vectors
double *h_c; // Host output vector
double *d_a, *d_b; // Device input vectors
double *d_c; // Device output vector
size_t bytes = n * sizeof(double); // Size, in bytes, of each vector
h_a = new double[n]; // allocate memory for vectors on host
h_b = new double[n];
h_c = new double[n];
cudaMalloc(&d_a, bytes); // allocate memory for vectors on device
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
for (int i = 0; i < n; i++) // initialize host vectors
{
h_a[i] = sin(i) * sin(i);
h_b[i] = cos(i) * cos(i);
}
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 1024; // number of threads in each block
gridSize = (int)ceil((float)n / blockSize); // number of thread blocks in a grid
printf("... Initialized.\nExecuting kernel...\n");
// execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
cudaMemcpy(c, d_c, bytes, cudaMemcpyDeviceToHost);
printf("... Kernel executed\n");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
delete[] h_a;
delete[] h_b;
delete[] h_c;
}
|
13,699 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#define KERNEL_SIZE 20
__constant__ int kernel[KERNEL_SIZE];
__global__ void conv1d(int *input, int *output, int l) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int s_input[];
int r = KERNEL_SIZE / 2;
int d = r * 2;
int n_padded = blockDim.x + d;
int offset = threadIdx.x + blockDim.x;
int g_offset = blockDim.x * blockIdx.x + offset;
s_input[threadIdx.x] = input[tid];
if (offset < n_padded) {
s_input[offset] = input[g_offset];
}
__syncthreads();
int temp = 0;
for (int j = 0; j < KERNEL_SIZE; j++) {
temp += s_input[threadIdx.x + j] * kernel[j];
}
output[tid] = temp;
}
int main() {
int l = 20480;
int i;
int r = KERNEL_SIZE / 2;
int n = l + r * 2;
int *host_input, *host_kernel, *host_output;
int *dev_input, *dev_output;
cudaMalloc((void**)&dev_input, sizeof(int) * n);
cudaMalloc((void**)&dev_output, sizeof(int) * KERNEL_SIZE);
cudaMallocHost((void**)&host_input, sizeof(int) * n);
cudaMallocHost((void**)&host_kernel, sizeof(int) * KERNEL_SIZE);
cudaMallocHost((void**)&host_output, sizeof(int) * l);
for (i = 0; i < n; i++) {
if ((i < r) || (i >= l + r)) {
host_input[i] = 0;
}
else {
host_input[i] = round(rand());
}
}
for (i = 0; i < KERNEL_SIZE; i++) {
host_kernel[i] = round(rand());
}
printf("Start convolution\n");
clock_t start_time = clock();
cudaMemcpy(dev_input, host_input, sizeof(int) * n, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(kernel, host_kernel, sizeof(int) * KERNEL_SIZE);
int block = 256;
int grid = (l + block - 1) / block;
size_t sharemem = sizeof(int) * (block + r * 2);
conv1d<<<grid, block, sharemem>>>(dev_input, dev_output, l);
cudaMemcpy(host_output, dev_output, sizeof(int) * l, cudaMemcpyDeviceToHost);
clock_t end_time = clock();
printf("Time consuming of 1D convolution of %d array with %d kernel is %f ms.\n", l, KERNEL_SIZE, static_cast<double>(end_time - start_time)/CLOCKS_PER_SEC*1000);
cudaFree(dev_input);
cudaFree(dev_output);
cudaFreeHost(host_input);
cudaFreeHost(host_kernel);
cudaFreeHost(host_output);
return 0;
}
|
13,700 | // Temporizador de un Kernel
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
# define N 24
// Funcion que suma de dos vectores de n elementos
__global__
void suma(int *vector_1, int *vector_2, int*vector_suma, int n)
{
// identificador de hilo
int myID = threadIdx.x;
// obtenemos el vector invertido
vector_2[myID] = vector_1[n -1 - myID];
// escritura de resultados
vector_suma[myID] = vector_1[myID] + vector_2[myID];
}
int main(int argc, char** argv) {
// declaraciones
int *hst_vector1, *hst_vector2, *hst_resultado;
int *dev_vector1, *dev_vector2, *dev_resultado;
// reserva en el host
hst_vector1 = (int*)malloc(N * sizeof(int));
hst_vector2 = (int*)malloc(N * sizeof(int));
hst_resultado = (int*)malloc(N * sizeof(int));
// reserva en el device
cudaMalloc((void**)&dev_vector1, N * sizeof(int));
cudaMalloc((void**)&dev_vector2, N * sizeof(int));
cudaMalloc((void**)&dev_resultado, N * sizeof(int));
// inicializacion de vectores
for (int i=0; i<N; i++) {
hst_vector1[i] = rand() % 10;
hst_vector2[i] = 0;
}
printf("> Vector de %d elementos\n", N);
printf("> Lanzamiento con %d bloque de %d hilos\n", 1, N);
// Temporizacion
// Declaracion de eventos
cudaEvent_t start;
cudaEvent_t stop;
// Creacion de eventos
cudaEventCreate(&start);
cudaEventCreate(&stop);
// copia de datos hacia el device
cudaMemcpy(dev_vector1, hst_vector1, N*sizeof(int), cudaMemcpyHostToDevice);
// marca de inicio
cudaEventRecord(start, 0);
// Lanzamos Kernel de un solo bloque y 24 hilos
suma <<< 1, N >>> (dev_vector1, dev_vector2, dev_resultado, N);
// marca de final
cudaEventRecord(stop, 0);
// recogida de datos desde el device
cudaMemcpy(hst_vector2, dev_vector2, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(hst_resultado, dev_resultado, N*sizeof(int), cudaMemcpyDeviceToHost);
// sincronizacion GPU-CPU
cudaEventSynchronize(stop);
// Calculo del tiempo en milisegundos
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\n> Tiempo de ejecucion: %f ms\n", elapsedTime);
// impresion de resultados
printf("VECTOR 1:\n");
for (int i=0; i<N; i++) {
printf("%.2d ", hst_vector1[i]);
}
printf("\n");
printf("VECTOR 2:\n");
for (int i=0; i<N; i++) {
printf("%.2d ", hst_vector2[i]);
}
printf("\n");
printf("SUMA:\n");
for (int i=0; i<N; i++) {
printf("%.2d ", hst_resultado[i]);
}
printf("\n");
printf("****");
printf("<pulsa [INTRO] para finalizar>");
getchar();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.