serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
22,801 | #include "includes.h"
__global__ void saxpy_float4s ( float* y, float* x, float a, clock_t * timer_vals)
{
for (int i=0; i < NUM_ITERS/4; i++) {
unsigned int idx = i * COMPUTE_THREADS_PER_CTA * CTA_COUNT + blockIdx.x * COMPUTE_THREADS_PER_CTA + threadIdx.x;
float4 * x_as_float4 = (float4 *)x;
float4 * y_as_float4 = (float4 *)y;
float4 tmp1_x, tmp1_y;
tmp1_x = x_as_float4[idx];
tmp1_y = y_as_float4[idx];
float4 result_y;
result_y.x = a * tmp1_x.x + tmp1_y.x;
result_y.y = a * tmp1_x.y + tmp1_y.y;
result_y.z = a * tmp1_x.z + tmp1_y.z;
result_y.w = a * tmp1_x.w + tmp1_y.w;
y_as_float4[idx] = result_y;
}
} |
22,802 | #include "includes.h"
__global__ void NmDistanceGradKernel(int b, int n, const float *xyz1, int m, const float *xyz2, const float *grad_dist1, const int *idx1, float *grad_xyz1, float *grad_xyz2) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y) {
float x1 = xyz1[(i * n + j) * 3 + 0];
float y1 = xyz1[(i * n + j) * 3 + 1];
float z1 = xyz1[(i * n + j) * 3 + 2];
int j2 = idx1[i * n + j];
float x2 = xyz2[(i * m + j2) * 3 + 0];
float y2 = xyz2[(i * m + j2) * 3 + 1];
float z2 = xyz2[(i * m + j2) * 3 + 2];
float g = grad_dist1[i * n + j] * 2;
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 0]), g * (x1 - x2));
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 1]), g * (y1 - y2));
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 2]), g * (z1 - z2));
atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 0]), -(g * (x1 - x2)));
atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 1]), -(g * (y1 - y2)));
atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 2]), -(g * (z1 - z2)));
}
}
} |
22,803 | #include<iostream>
#include<algorithm>
#include<stdio.h>
#include<fstream>
#include <stdlib.h>
using namespace std;
#define REPEAT 1
#define Real double
#define STRIDE 1
#define CACHELINE 8
__global__ void VecAdd(Real* A, int* N, Real* d_time);
int main(int argc, char* argv[])
{
if(argc != 2)
{
std::cout << "Wrong number of argument!! Exiting program !!!";
return 0;
}
// struct timeval tv1, tv2;
int N = atoi(argv[1]);
//register long elapsed=0;
Real *A, *d_A;
int *d_N;// *B, *C, *d_A, *d_B, *d_C ;
std::ofstream fp;
fp.open("/home/hpc/ihpc/ihpc002h/gpu-exp/Master-thesis/data/result.txt", std::ofstream::app);
double *d_time, h_time;
A = (Real*)malloc(N*sizeof(Real));
// B = (Real*)malloc(N*sizeof(Real));
// C = (Real*)malloc(N*sizeof(Real));
cudaMalloc(&d_A, N*sizeof(Real));
cudaMalloc(&d_time, sizeof(double));
cudaMalloc(&d_N, sizeof(double));
// cudaMalloc(&d_B, N*sizeof(Real));
// cudaMalloc(&d_C, N*sizeof(Real));
for(int i=0; i < N ; i++)
{
A[i] = (i+STRIDE)%N;
// B[i] = rand()%5;
//C[i] = 0.0;
}
cudaMemcpy(d_A, A, N*sizeof(Real), cudaMemcpyHostToDevice );
cudaMemcpy(d_N, &N, sizeof(int), cudaMemcpyHostToDevice );
//cudaMemcpy(d_C, C, N*sizeof(Real), cudaMemcpyHostToDevice );
//for(int i=0 ; i < REPEAT ; i++)
//{
//gettimeofday(&tv1, NULL);
VecAdd<<<1, 1>>>(d_A, d_N, d_time);
//gettimeofday(&tv2, NULL);
//elapsed += ((tv2.tv_sec-tv1.tv_sec)*1000000.0 + (tv2.tv_usec-tv1.tv_usec));
//}
cudaMemcpy(&h_time, d_time, sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//cout << N << std::endl;
//double td = (N * 8.0)/1024;
//cout << h_time << " " << CLOCKS_PER_SEC << std::endl;
fp << N*8.0/1024.0 << " " << h_time << std::endl;
for(int i =0; i < N ; i++)
{
// cout << A[i] << std::endl;
}
//fs << std::endl;
cudaFree(d_A);
//cudaFree(d_B);
//cudaFree(d_C);
fp.close();
}
|
22,804 | #include <iostream>
#include <vector>
#include <random>
#include <cuda_runtime.h>
using TheType = float;
constexpr auto TheSize = 65536u*128u;
constexpr auto TheSizeInBytes = TheSize*sizeof(TheType);
constexpr auto TheInnerLoop = 256u;
__global__ void add(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
auto res = 0.f;
for(auto j = 0u; j < TheInnerLoop; ++j) {
res += xs1[i+j] + xs2[i+j];
}
ys[i] = res;
}
__global__ void mul(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
auto res = 0.f;
for(auto j = 0u; j < TheInnerLoop; ++j) {
res += xs1[i+j] * xs2[i+j];
}
ys[i] = res;
}
__global__ void div(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
auto res = 0.f;
for(auto j = 0u; j < TheInnerLoop; ++j) {
res += xs1[i+j] / xs2[i+j];
}
ys[i] = res;
}
__global__ void std_sqrt(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
auto res = 0.f;
for(auto j = 0u; j < TheInnerLoop; ++j) {
res += std::sqrt(xs1[i+j]);
}
ys[i] = res;
}
__global__ void std_sin(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
auto res = 0.f;
for(auto j = 0u; j < TheInnerLoop; ++j) {
res += std::sin(xs1[i+j]);
}
ys[i] = res;
}
__global__ void poly_sin(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
auto res = 0.f;
for(auto j = 0u; j < TheInnerLoop; ++j) {
const auto x = xs1[i+j];
res += -0.000182690409228785*x*x*x*x*x*x*x+0.00830460224186793*x*x*x*x*x-0.166651012143690*x*x*x+x;
}
ys[i] = res;
}
__global__ void poly_sin2(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
auto res = 0.f;
for(auto j = 0u; j < TheInnerLoop; ++j) {
const auto x = xs1[i+j];
res += x*x*x*(x*x*(-0.000182690409228785*x*x+0.00830460224186793)-0.166651012143690)+x;
}
ys[i] = res;
}
__global__ void poly_sin3(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
auto res = 0.f;
for(auto j = 0u; j < TheInnerLoop; ++j) {
const auto x = xs1[i+j];
res += -0.000182690409228785f*x*x*x*x*x*x*x+0.00830460224186793f*x*x*x*x*x-0.166651012143690f*x*x*x+x;
}
ys[i] = res;
}
__global__ void poly_sin4(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
auto res = 0.f;
for(auto j = 0u; j < TheInnerLoop; ++j) {
const auto x = xs1[i+j];
res += x*x*x*(x*x*(-0.000182690409228785f*x*x+0.00830460224186793f)-0.166651012143690f)+x;
}
ys[i] = res;
}
__global__ void logical_and(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
bool all_gt = true;
for(auto j = 0u; j < TheInnerLoop - 3; ++j) {
all_gt = all_gt && (xs1[i+j] > xs1[i+j]) && (xs1[i+j+1] > xs1[i+j+1]) && (xs1[i+j+2] > xs1[i+j]+2) && (xs1[i+j+3] > xs1[i+j+3]);
}
ys[i] = all_gt ? 1.f : 0.f;
}
__global__ void bit_and(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
bool all_gt = true;
for(auto j = 0u; j < TheInnerLoop - 3; ++j) {
all_gt = all_gt & (xs1[i+j] > xs1[i+j]) & (xs1[i+j+1] > xs1[i+j+1]) & (xs1[i+j+2] > xs1[i+j]+2) & (xs1[i+j+3] > xs1[i+j+3]);
}
ys[i] = all_gt ? 1.f : 0.f;
}
__global__ void mul_and(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
bool all_gt = true;
for(auto j = 0u; j < TheInnerLoop - 3; ++j) {
all_gt = all_gt * (xs1[i+j] > xs1[i+j]) * (xs1[i+j+1] > xs1[i+j+1]) * (xs1[i+j+2] > xs1[i+j]+2) * (xs1[i+j+3] > xs1[i+j+3]);
}
ys[i] = all_gt ? 1.f : 0.f;
}
#define swap(a, b) {auto c = a; a = b; b = c;}
__global__ void sort(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
float checksum = 0.;
for(auto j = 0u; j < TheInnerLoop - 2; ++j) {
double s[3] = {xs1[i+j], xs1[i+j+1], xs1[i+j+2]};
if(s[0] > s[1])
swap(s[0], s[1]);
if(s[1] > s[2])
swap(s[1], s[2]);
if(s[0] > s[1])
swap(s[0], s[1]);
checksum += s[0] + 2*s[1] + 3*s[3];
}
ys[i] = checksum;
}
__global__ void nano_sort(const float *xs1, const float *xs2, float *ys, int size) {
int i = (blockDim.x * blockIdx.x + threadIdx.x);
float checksum = 0.;
for(auto j = 0u; j < TheInnerLoop - 2; ++j) {
double sortable[3] = {xs1[i+j], xs1[i+j+1], xs1[i+j+2]};
const auto a = sortable[0];
const auto b = sortable[1];
const auto c = sortable[2];
sortable[int(a > b) + int(a > c)] = a;
sortable[int(b >= a) + int(b > c)] = b;
sortable[int(c >= a) + int(c >= b)] = c;
checksum += sortable[0] + 2*sortable[1] + 3*sortable[3];
}
ys[i] = checksum;
}
#define attempt(smth) {auto s=(smth);if(s!=cudaSuccess){std::cout << cudaGetErrorString(s) << " at " << __LINE__ << "\n"; return -1;}}
#define measure(smth) {\
/*timestamp start*/\
cudaEvent_t start;\
cudaEventCreate(&start);\
cudaEventRecord(start, 0);\
cudaEvent_t stop;\
cudaEventCreate(&stop); /*here so it wouldn't interfere with the measurement*/\
\
/* run it*/\
int threadsPerBlock = 256;\
int blocksPerGrid = (TheSize - TheInnerLoop + threadsPerBlock - 1) / threadsPerBlock;\
smth<<<blocksPerGrid, threadsPerBlock>>>(d_xs, d_ys, d_zs, TheSize);\
attempt(cudaGetLastError());\
attempt(cudaDeviceSynchronize());\
\
/* timestamp stop*/\
cudaEventRecord(stop, 0); \
cudaEventSynchronize(stop);\
float elapsedTime;\
cudaEventElapsedTime(&elapsedTime, start, stop);\
std::cout << "Time of " << #smth << ": " << elapsedTime << "\n";}\
int main(void)
{
// prepare the data
std::mt19937 rng(0);
std::uniform_real_distribution<TheType> distribution(0.f, 1.f);
std::vector<TheType> xs(TheSize);
std::vector<TheType> ys(TheSize);
std::vector<TheType> zs(TheSize);
for (TheType &number : xs) number = distribution(rng);
for (TheType &number : ys) number = distribution(rng);
// do the allocations
float *d_xs = nullptr;
float *d_ys = nullptr;
float *d_zs = nullptr;
attempt(cudaMalloc((void **)&d_xs, TheSizeInBytes));
attempt(cudaMalloc((void **)&d_ys, TheSizeInBytes));
attempt(cudaMalloc((void **)&d_zs, TheSizeInBytes));
// and copying
attempt(cudaMemcpy(d_xs, xs.data(), TheSizeInBytes, cudaMemcpyHostToDevice));
attempt(cudaMemcpy(d_ys, ys.data(), TheSizeInBytes, cudaMemcpyHostToDevice));
measure(add);
measure(mul);
measure(div);
measure(std_sqrt);
measure(std_sin);
measure(poly_sin);
measure(poly_sin2);
measure(poly_sin3);
measure(poly_sin4);
measure(logical_and);
measure(bit_and);
measure(mul_and);
measure(sort);
measure(nano_sort);
// back (for debug, don't really want it)
attempt(cudaMemcpy(zs.data(), d_zs, TheSizeInBytes, cudaMemcpyDeviceToHost));
attempt(cudaFree(d_xs));
attempt(cudaFree(d_ys));
attempt(cudaFree(d_zs));
return 0;
}
|
22,805 | #include <stdio.h>
#define MATRIX_ROWS 5
#define MATRIX_COLUMNS 5
#define SHARED_MEMORY_PADDING 1
__global__
void createMatrixStatic(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS];
int idx = blockIdx.y * blockDim.x + threadIdx.x;
int idy = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < MATRIX_COLUMNS && idy < MATRIX_ROWS) {
matrix[idy][idx] = idx + idy;
out[idy*MATRIX_COLUMNS+idx] = matrix[idy][idx];
}
}
__global__
void createMatrixStaticPadding(float* out) {
__shared__ float matrix[MATRIX_ROWS][MATRIX_COLUMNS+SHARED_MEMORY_PADDING];
int idx = threadIdx.y * blockDim.x + threadIdx.x;
matrix[threadIdx.y][threadIdx.x] = idx;
__syncthreads();
out[idx] = matrix[threadIdx.y][threadIdx.x];
}
void printMatrix(float* matrix) {
for (int y = 0; y < MATRIX_COLUMNS; y++) {
for (int x = 0; x < MATRIX_ROWS; x++) {
printf("%-3d ", int(matrix[y*MATRIX_ROWS+x]));
}
printf("\n");
}
}
int main(void) {
printf("\n");
dim3 block(MATRIX_ROWS, MATRIX_COLUMNS);
dim3 grid((MATRIX_ROWS+block.x-1)/block.x, (MATRIX_COLUMNS+block.y-1)/block.y);
float* host_matrix = (float*)malloc(MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
float* device_matrix;
cudaMalloc((float**)&device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float));
createMatrixStatic<<<grid,block>>>(device_matrix);
cudaDeviceSynchronize();
cudaMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), cudaMemcpyDeviceToHost);
printf("createMatrixStatic\n");
printMatrix(host_matrix);
printf("\n");
createMatrixStaticPadding<<<grid.x,block.x>>>(device_matrix);
cudaDeviceSynchronize();
cudaMemcpy(host_matrix, device_matrix, MATRIX_ROWS*MATRIX_COLUMNS*sizeof(float), cudaMemcpyDeviceToHost);
printf("createMatrixStaticPadding\n");
printMatrix(host_matrix);
printf("\n");
free(host_matrix);
cudaFree(device_matrix);
cudaDeviceReset();
printf("\n");
return 0;
} |
22,806 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <cuda.h>
#define N 1024 * 1024
#define CHECK_CUDA_ERR(x) { \
err = x; \
if (err != cudaSuccess) { \
printf("cuda error with %s in line %d\n",cudaGetErrorString(err),__LINE__); \
exit(1); \
} }
int main()
{
cudaError_t err;
cudaEvent_t start_event,stop_event;
CHECK_CUDA_ERR ( cudaEventCreate(&start_event) );
CHECK_CUDA_ERR ( cudaEventCreate(&stop_event) );
void * buf = malloc(sizeof(float) * N);
void * dBuf;
CHECK_CUDA_ERR ( cudaMalloc(&dBuf,sizeof(float) * N) );
CHECK_CUDA_ERR ( cudaEventRecord(start_event,0) ) ;
CHECK_CUDA_ERR ( cudaMemcpy(dBuf,buf,sizeof(float) * N, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR ( cudaEventRecord(stop_event,0) );
CHECK_CUDA_ERR ( cudaDeviceSynchronize() );
float ms = 100.f;
CHECK_CUDA_ERR ( cudaEventElapsedTime(&ms,start_event,stop_event));
printf("%f m second cost\n",ms);
CHECK_CUDA_ERR ( cudaEventDestroy(start_event) );
CHECK_CUDA_ERR ( cudaEventDestroy(stop_event) );
free(buf);
CHECK_CUDA_ERR ( cudaFree(dBuf) );
return 0;
}
|
22,807 | #include "includes.h"
__device__ int tex_i(const int * ptData,int y,int x,int step)
{
return ptData[y*step+x];
}
__global__ void nonmaxSuppression(const short2* kpLoc_Device, int count, const int* score_DeviceMat,int cols,int rows,short2* locFinal, float* responseFinal)
{
const int kpIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (kpIdx < count)
{
short2 loc = kpLoc_Device[kpIdx];
int score_Device = tex_i( score_DeviceMat,loc.y, loc.x,cols);
bool ismax =
score_Device > tex_i( score_DeviceMat,loc.y - 1, loc.x - 1,cols) &&
score_Device > tex_i( score_DeviceMat,loc.y - 1, loc.x ,cols) &&
score_Device > tex_i( score_DeviceMat,loc.y - 1, loc.x + 1,cols) &&
score_Device > tex_i( score_DeviceMat,loc.y , loc.x - 1,cols) &&
score_Device > tex_i( score_DeviceMat,loc.y , loc.x + 1,cols) &&
score_Device > tex_i( score_DeviceMat,loc.y + 1, loc.x - 1,cols) &&
score_Device > tex_i( score_DeviceMat,loc.y + 1, loc.x ,cols) &&
score_Device > tex_i( score_DeviceMat,loc.y + 1, loc.x + 1,cols);
if (ismax)
{
const unsigned int ind = atomicInc(&g_counter, (unsigned int)(-1));
locFinal[ind] = loc;
responseFinal[ind] = static_cast<float>(score_Device);
}
}
} |
22,808 | #include <stdio.h>
#include <cuda.h>
__global__ void calculate_g_image_gpu(float* in, float* out, int w, int h){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int j = x % w;
int i = x / w;
if (1 <= i && i < h && 1 <= j && j < w) {
float val = pow((in[(i+1)*w+j]-in[(i-1)*w+j])/2, 2) + pow((in[i*w+j+1]-in[i*w+j-1])/2, 2);
float lambda = 3.5;
out[i*w+j] = exp(-pow(val, 2)/2/pow(lambda, 2));
}
}
__global__ void copy_gpu(float* src, float* dest, int w, int h) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int j = x % w;
int i = x / w;
dest[i*w+j] = src[i*w+j];
}
__device__ float arithmetic_mean_gpu(float n1, float n2) {
return (n1+n2)/2.0;
}
__global__ void apply_stencil_gpu(float* in, float* out, float* g, int w, int h, float time_step) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int j = x % w;
int i = x / w;
if (1 <= i && i < h && 1 <= j && j < w) {
float val = in[i*w+j]*(1-time_step*(arithmetic_mean_gpu(g[i*w+j], g[(i+1)*w+j]) +
arithmetic_mean_gpu(g[i*w+j], g[i*w+j+1]) +
arithmetic_mean_gpu(g[i*w+j], g[i*w+j-1])));
val += in[(i+1)*w+j]*time_step*arithmetic_mean_gpu(g[i*w+j], g[(i+1)*w+j]);
val += in[(i-1)*w+j]*time_step*arithmetic_mean_gpu(g[i*w+j], g[(i-1)*w+j]);
val += in[i*w+j+1]*time_step*arithmetic_mean_gpu(g[i*w+j], g[i*w+j+1]);
val += in[i*w+j-1]*time_step*arithmetic_mean_gpu(g[i*w+j], g[i*w+j-1]);
val = (val < 0 ? 0 : val);
val = (val > 255 ? 255 : val);
out[i*w+j] = val;
}
}
void gpu_func(float* in, float* out, float* g_img, int w, int h, int n_iters){
int device = 0;
int n = w*h;
cudaSetDevice(device);
float* in_dev;
float* out_dev;
float* g_dev;
cudaMallocManaged(&in_dev, n * sizeof(float));
cudaMallocManaged(&out_dev, n * sizeof(float));
cudaMallocManaged(&g_dev, n * sizeof(float));
for (int i = 0; i < n; i++) in_dev[i] = in[i];
dim3 blockDim(16);
dim3 gridDim((n % blockDim.x) ? n / blockDim.x : n / blockDim.x + 1);
for (int t=0; t<n_iters; t++) {
calculate_g_image_gpu<<<gridDim, blockDim>>>(in_dev, g_dev, w, h);
apply_stencil_gpu<<<gridDim, blockDim>>>(in_dev, out_dev, g_dev, w, h, t);
copy_gpu<<<gridDim, blockDim>>>(out_dev, in_dev, w, h);
}
cudaDeviceSynchronize();
printf("Done executing CUDA kernel\n");
for (int i = 0; i < n; i++) out[i] = out_dev[i];
for (int i = 0; i < n; i++) g_img[i] = g_dev[i];
cudaFree(in_dev);
cudaFree(out_dev);
cudaFree(g_dev);
}
|
22,809 | #include <stdlib.h>
#include "cuda.h"
#include <iostream>
#define RADIUS 3 //TODO: Change to larger values
#define N 1000000
void initializeWeights(float* weights) {
weights[0] = 0.05f;
weights[1] = 0.10f;
weights[2] = 0.20f;
weights[3] = 0.30f;
weights[4] = 0.20f;
weights[5] = 0.10f;
weights[6] = 0.05f;
}
void initializeArray(float* in) {
for (int i = 0; i < N; i++) {
in[i] = rand() % 10;
}
}
__global__ void applyStencil1D(int sIdx, int eIdx, const float* weights, float* in, float* out) {
int i = sIdx + blockIdx.x * blockDim.x + threadIdx.x;
if (i < eIdx) {
out[i] = 0;
//loop over all elements in the stencil
for (int j = -RADIUS; j <= RADIUS; j++) {
out[i] += weights[j + RADIUS] * in[i + j];
}
out[i] = out[i] / (2 * RADIUS + 1);
}
}
int main() {
int wsize = 2 * RADIUS + 1;
//allocate resources
float* weights = new float[wsize];
float* in = new float[N];
float* out = new float[N];
initializeWeights(weights);
initializeArray(in);
float* d_weights;
cudaMalloc(&d_weights, wsize * sizeof(float));
float* d_in;
cudaMalloc(&d_in, N * sizeof(float));
float* d_out;
cudaMalloc(&d_out, N * sizeof(float));
cudaMemcpy(d_weights, weights, wsize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_in, in, N*sizeof(float), cudaMemcpyHostToDevice);
applyStencil1D <<<(N+511) / 512, 512 >>> (RADIUS, N - RADIUS, d_weights, d_in, d_out);
cudaMemcpy(out, d_out, N*sizeof(float), cudaMemcpyDeviceToHost);
//free resources
delete[] weights;
delete[] in;
delete[] out;
cudaFree(d_weights);
cudaFree(d_in);
cudaFree(d_out);
}
|
22,810 | /// managed mamory analysis - cuda lab cpu->gpu only mamory access
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
using namespace std::chrono;
__global__
void deviceKernel(int *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = 1;
}
}
void hostFunction(int *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = 1;
}
}
int main(int argc, char** argv)
{
char* pEnd;
const int N = 2<<strtol(argv[1], &pEnd, 10); //2<<24;
size_t size = N * sizeof(int);
int *a;
cudaMallocManaged(&a, size);
FILE *f;
f = fopen(argv[2], "a");
if (strtol(argv[1], &pEnd, 10) == 10) {
fprintf(f, "NumElement\t\tHost\t\tDevice\n");
}
fprintf(f, "%d\t\t", N);
auto start = high_resolution_clock::now();
hostFunction(a, N);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
fprintf(f, "%d\t\t", duration.count());
printf("Host: %d us \n", duration.count());
start = high_resolution_clock::now();
deviceKernel<<<256, 256>>>(a, N);
cudaDeviceSynchronize();
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop - start);
fprintf(f, "%d\n", duration.count());
fclose(f);
printf("Device: %d us \n", duration.count());
cudaFree(a);
}
|
22,811 | //Mesh Laplacian
//Author: Weiyue Wang
//Reference: https://github.com/charlesq34/pointnet-autoencoder/blob/master/tf_ops/nn_distance/tf_nndistance_g.cu
// https://github.com/PointCloudLibrary/pcl/blob/master/tools/mesh_sampling.cpp
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include <stdio.h>
#include <assert.h>
__device__ void getPoint(const float *vertices, int v_id, float *p){
p[0] = vertices[3* v_id];
p[1] = vertices[3* v_id+1];
p[2] = vertices[3* v_id+2];
}
__device__ void getTriangle(const int *triangles, int t_id, int &v1, int &v2, int &v3){
v1 = triangles[3 * t_id];
v2 = triangles[3 * t_id + 1];
v3 = triangles[3 * t_id + 2];
}
__host__ void getTriangle_cpu(const int *triangles, int t_id, int &v1, int &v2, int &v3){
v1 = triangles[3 * t_id];
v2 = triangles[3 * t_id + 1];
v3 = triangles[3 * t_id + 2];
}
__host__ bool findnb_cpu(const int *nb, int v_id){
bool flag = false;
for (int i=0; i<20; i++)
if (nb[i] == v_id){
flag = true;
break;
}
return flag;
}
__device__ bool findnb(const int *nb, int v_id){
bool flag = false;
for (int i=0; i<20; i++)
if (nb[i] == v_id){
flag = true;
break;
}
return flag;
}
__device__ void setLaplacian(float *laplacian, int *count, int v_id1, int v_id2, float * p1, float * p2){
atomicAdd(&laplacian[3 * v_id1], p2[0] - p1[0]);
atomicAdd(&laplacian[3 * v_id1+1], p2[1] - p1[1]);
atomicAdd(&laplacian[3 * v_id1+2], p2[2] - p1[2]);
atomicAdd(&count[v_id1], 1);
atomicAdd(&laplacian[3 * v_id2], p1[0] - p2[0]);
atomicAdd(&laplacian[3 * v_id2+1], p1[1] - p2[1]);
atomicAdd(&laplacian[3 * v_id2+2], p1[2] - p2[2]);
atomicAdd(&count[v_id2], 1);
}
__global__ void InitMeshLaplacianKernel(const int b, const int maxnverts, float* laplacian, int* count){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int v_id=threadIdx.x+blockIdx.y*blockDim.x; v_id<maxnverts; v_id+=blockDim.x*gridDim.y){
for (int i_c = 0; i_c < 3; i_c++){
laplacian[i*maxnverts*3+v_id*3+i_c] = 0;
count[i*maxnverts+v_id] = 0;
}
}
}
}
__global__ void AvgMeshLaplacianKernel(const int b, const int maxnverts, float* laplacian, int* count){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int v_id=threadIdx.x+blockIdx.y*blockDim.x; v_id<maxnverts; v_id+=blockDim.x*gridDim.y){
for (int i_c = 0; i_c < 3; i_c++)
if (count[i*maxnverts+v_id]!=0)
laplacian[i*maxnverts*3+v_id*3+i_c] /= count[i*maxnverts+v_id];
}
}
}
__global__ void MeshLaplacianKernel(const int b, const int * nverts, const int maxnverts, const float * vertices, const int * ntriangles, const int maxntriangles, const int * triangles,
float * laplacian, int* nb, int* count){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int n_triangles = ntriangles[i];
for (int triangle_id=threadIdx.x+blockIdx.y*blockDim.x; triangle_id < n_triangles; triangle_id+=blockDim.x*gridDim.y){
int v1, v2, v3;
float p1[3], p2[3], p3[3];
getTriangle(&triangles[i*maxntriangles*3], triangle_id, v1, v2, v3);
getPoint(&vertices[i*maxnverts*3], v1, p1);
getPoint(&vertices[i*maxnverts*3], v2, p2);
getPoint(&vertices[i*maxnverts*3], v3, p3);
if (!findnb(&nb[(i*maxnverts+v1)*20], v2)){
nb[(i*maxnverts+v1)*20+count[i*maxnverts+v1]] = v2;
nb[(i*maxnverts+v2)*20+count[i*maxnverts+v2]] = v1;
setLaplacian(&laplacian[i*maxnverts*3], &count[i*maxnverts], v1, v2, p1, p2);
}
if (!findnb(&nb[(i*maxnverts+v1)*20], v3)){
nb[(i*maxnverts+v1)*20+count[i*maxnverts+v1]] = v3;
nb[(i*maxnverts+v3)*20+count[i*maxnverts+v3]] = v1;
setLaplacian(&laplacian[i*maxnverts*3], &count[i*maxnverts], v1, v3, p1, p3);
}
if (!findnb(&nb[(i*maxnverts+v2)*20], v3)){
nb[(i*maxnverts+v2)*20+count[i*maxnverts+v2]] = v3;
nb[(i*maxnverts+v3)*20+count[i*maxnverts+v3]] = v2;
setLaplacian(&laplacian[i*maxnverts*3], &count[i*maxnverts], v2, v3, p2, p3);
}
}
__syncthreads();
}
}
void MeshLaplacianKernelLauncher( \
/*inputs*/ const int b, const int * n_verts, const int maxn_verts, const float * vertices, const int * n_triangles, const int maxn_triangles, const int * triangles, \
/*outputs*/ float * laplacian, int * count, int * nb){
InitMeshLaplacianKernel<<<dim3(2,8,1),512>>>(b, maxn_verts, laplacian, count);
cudaMemset(nb, -1, b*maxn_verts*20*sizeof(int));
MeshLaplacianKernel<<<64,512>>>(b, n_verts, maxn_verts, vertices, n_triangles, maxn_triangles, triangles, laplacian, nb, count);
AvgMeshLaplacianKernel<<<dim3(32,16,1),512>>>(b, maxn_verts, laplacian, count);
}
/****************** Gradient ******************/
__device__ void setLaplaciangrad (const float * grad_laplacian, float *grad_verts, int v_id1, int v_id2){
atomicAdd(&grad_verts[3 * v_id1], - 1 * grad_laplacian[3 * v_id1]);
atomicAdd(&grad_verts[3 * v_id1+1], - 1 * grad_laplacian[3 * v_id1+1]);
atomicAdd(&grad_verts[3 * v_id1+2], - 1 * grad_laplacian[3 * v_id1+2]);
atomicAdd(&grad_verts[3 * v_id2], grad_laplacian[3 * v_id1]);
atomicAdd(&grad_verts[3 * v_id2+1], grad_laplacian[3 * v_id1+1]);
atomicAdd(&grad_verts[3 * v_id2+2], grad_laplacian[3 * v_id1+2]);
}
__global__ void MeshLaplacianGradKernel(const int b, const int maxnverts, const int *nverts, const int * nb, const float * grad_laplacian, float* grad_verts){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int n_verts = nverts[i];
for (int v1=threadIdx.x+blockIdx.y*blockDim.x; v1 < n_verts; v1+=blockDim.x*gridDim.y){
for (int v2=0; v2<20; v2++){
if (nb[(i*maxnverts+v1)*20 + v2] == -1)
break;
else{
setLaplaciangrad(&grad_laplacian[i*maxnverts*3], &grad_verts[i*maxnverts*3], v1, nb[(i*maxnverts+v1)*20 + v2]);
}
}
}
}
}
__global__ void AvgGradKernel(const int b, const int maxnverts, float* grad_verts, const int* cumulativeCounts){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int v_id=threadIdx.x+blockIdx.y*blockDim.x; v_id<maxnverts; v_id+=blockDim.x*gridDim.y){
for (int i_c = 0; i_c < 3; i_c++){
if (cumulativeCounts[i*maxnverts+v_id]!=0){
grad_verts[i*maxnverts*3+v_id*3+i_c] /= cumulativeCounts[i*maxnverts+v_id];
}
}
}
}
}
void MeshLaplacianGradKernelLauncher(const int b, const int maxnverts, const int * nverts, const float * grad_laplacian, const int * count, const int * nb,
float* grad_verts){
cudaMemset(grad_verts, 0, b*maxnverts*3*sizeof(float));
MeshLaplacianGradKernel<<<dim3(32,16,1),512>>>(b, maxnverts, nverts, nb, grad_laplacian, grad_verts);
AvgGradKernel<<<dim3(32,16,1),512>>>(b, maxnverts, grad_verts, count);
}
#endif
|
22,812 | #include "includes.h"
__global__ void x_calculation(float * x ,float * r,float * r_squared ,int size)
{
int index = blockDim.x * blockIdx.x + threadIdx.x ;
if (index < size)
{
float alpha = r_squared[0] ;
x[index] = x[index] + alpha * r[index] ;
}
} |
22,813 | /*
* makeEigenvalues()
* float* eigenvalues: Will be populated by the function
* float** eigenvectors: Will be populated by the function
* float* blockHessian: A linear array containing the block Hessian
* matrices in sorted order. Note that these
* have different sizes. Thus block 1 (aXa) will
* occupy slots 0 to a*a-1, block 2 (bXb) will
* occupy slots a*a to b*b-1, and so on.
* float* array1D: Eigenvectors as a linear array.
* int* evecstarts: Array of starting positions of every eigenvector
* in array1D.
* int* blocknums: Array of starting atoms of each block.
* int* blocksize: Array of sizes of each block.
* int N: Total degrees of freedom.
*/
__global__ void makeEigenvalues( float *eigenvalues, float *blockHessian, int *blocknums, int *blocksizes, int *hessiannums, int N, int numblocks ) {
// elementnum is the degree of freedom (0 to 3n-1)
int elementNum = blockIdx.x * blockDim.x + threadIdx.x;
if( elementNum >= N ) {
return;
}
// b is the block number in which DOF elementnum resides
// blocknums contains atom numbers, so we must divide by 3
// We find the first index with an atom number larger than
// ours, and take one less (or numblocks-1 if we are at the end)
int b = 0;
while( b < numblocks ) {
if( blocknums[b] > elementNum / 3 ) {
break;
}
b++;
}
b--;
// 3*blocknums[b] is the starting degree of freedom for our block
// We must compute an offset from that, call it x.
int x = elementNum - 3 * blocknums[b];
// We initialize our spot to hessiannums[b], which is the starting
// Hessian location for our block.
// We then want to take the diagonal entry from that offset
// So element (x,x)
int spot = hessiannums[b] + x * ( 3 * blocksizes[b] ) + x;
eigenvalues[elementNum] = blockHessian[spot];
}
|
22,814 | #include <stdio.h>
#include <stdlib.h>
const int INF = 1000000000;
int V = 20010;
void input(char *inFileName);
void output(char *outFileName);
void block_FW(int B);
int ceil(int a, int b);
int n, m; // Number of vertices, edges
int* host_ptr = NULL;
size_t pitch;
// for device
int* device_ptr = NULL;
__global__ void gpu_phase1(int* dist, int B, int Round, int block_start_x, int block_start_y, int n, size_t pitch);
__global__ void gpu_phase2(int* dist, int B, int Round, int block_start_x, int block_start_y, int n, int pos, size_t pitch);
__global__ void gpu_phase3(int* dist, int B, int Round, int block_start_x, int block_start_y, int n, size_t pitch);
int main(int argc, char* argv[])
{
input(argv[1]);
int B = atoi(argv[3]);
// allocate memory for device
cudaMallocPitch(&device_ptr, &pitch, V*sizeof(int), V);
cudaMemcpy2D(device_ptr, pitch, host_ptr, V*sizeof(int), V*sizeof(int), V, cudaMemcpyHostToDevice);
block_FW(B);
cudaMemcpy2D(host_ptr, V*sizeof(int), device_ptr, pitch, V*sizeof(int), V, cudaMemcpyDeviceToHost);
cudaFree(device_ptr);
output(argv[2]);
free(host_ptr);
return 0;
}
void input(char *inFileName)
{
FILE *infile = fopen(inFileName, "r");
fscanf(infile, "%d %d", &n, &m);
// Malloc host memory
V = n + 10;
host_ptr = (int*)malloc((size_t)V * V * sizeof(int));
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) host_ptr[i*V+j] = 0;
else host_ptr[i*V+j] = INF;
}
}
while (--m >= 0) {
int a, b, v;
fscanf(infile, "%d %d %d", &a, &b, &v);
host_ptr[a*V+b] = v;
}
fclose(infile);
}
void output(char *outFileName)
{
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (host_ptr[i*V+j] >= INF)
host_ptr[i*V+j] = INF;
}
fwrite(&host_ptr[i*V], sizeof(int), n, outfile);
}
fclose(outfile);
}
int ceil(int a, int b)
{
return (a + b -1)/b;
}
void block_FW(int B)
{
int round = ceil(n, B);
dim3 blocks = {1, 1};
dim3 threads = {(unsigned int)B, (unsigned int)B};
printf("B: %d, Round: %d\n", B, round);
for (unsigned int r = 0; r < round; ++r) {
if (r % 10 == 0)
printf("%d %d\n", r, round);
/* Phase 1*/
blocks = {1, 1};
gpu_phase1<<<blocks, threads, B*B*1*sizeof(int)>>>(device_ptr, B, r, r, r, n, pitch/sizeof(int));
/* Phase 2*/
if (r > 0) {
// left
blocks = {1, r};
gpu_phase2<<<blocks, threads, B*B*2*sizeof(int)>>>(device_ptr, B, r, r, 0, n, 1, pitch/sizeof(int));
// up
blocks = {r, 1};
gpu_phase2<<<blocks, threads, B*B*2*sizeof(int)>>>(device_ptr, B, r, 0, r, n, 0, pitch/sizeof(int));
}
if (r < round - 1) {
// right
blocks = {1, round - r -1};
gpu_phase2<<<blocks, threads, B*B*2*sizeof(int)>>>(device_ptr, B, r, r, r +1, n, 1, pitch/sizeof(int));
// down
blocks = {round - r -1, 1};
gpu_phase2<<<blocks, threads, B*B*2*sizeof(int)>>>(device_ptr, B, r, r +1, r, n, 0, pitch/sizeof(int));
}
/* Phase 3*/
if (r == 0) {
// down right
blocks = {round - r -1, round - r -1};
gpu_phase3<<<blocks, threads, B*B*3*sizeof(int)>>>(device_ptr, B, r, r +1, r +1, n, pitch/sizeof(int));
}
else if (r == round - 1) {
// upper left
blocks = {r, r};
gpu_phase3<<<blocks, threads, B*B*3*sizeof(int)>>>(device_ptr, B, r, 0, 0, n, pitch/sizeof(int));
}
else {
// down right
blocks = {round - r -1, round - r -1};
gpu_phase3<<<blocks, threads, B*B*3*sizeof(int)>>>(device_ptr, B, r, r +1, r +1, n, pitch/sizeof(int));
// upper left
blocks = {r, r};
gpu_phase3<<<blocks, threads, B*B*3*sizeof(int)>>>(device_ptr, B, r, 0, 0, n, pitch/sizeof(int));
// upper right
blocks = {r, round -r -1};
gpu_phase3<<<blocks, threads, B*B*3*sizeof(int)>>>(device_ptr, B, r, 0, r +1, n, pitch/sizeof(int));
// down left
blocks = {round - r -1, r};
gpu_phase3<<<blocks, threads, B*B*3*sizeof(int)>>>(device_ptr, B, r, r +1, 0, n, pitch/sizeof(int));
}
}
}
extern __shared__ int shared_mem[];
__global__ void gpu_phase1(int* dist, int B, int Round, int block_start_x, int block_start_y, int n, size_t pitch)
{
int V = pitch;
int tid = threadIdx.y * B + threadIdx.x;
int i = (block_start_x + blockIdx.x) * B + threadIdx.x;
int j = (block_start_y + blockIdx.y) * B + threadIdx.y;
// need self block - (b_i, b_j)
shared_mem[tid] = dist[j*V+i];
__syncthreads();
for (int k = Round * B; k < (Round +1) * B && k < n; ++k) {
if (i < n && j < n) {
//===== change new posision by: =====//
// new_i = origin_i - B * B_i
// new_j = origin_j - B * B_j
//===================================//
int k_new = k - B * Round;
int i_new = i - B * Round;
int j_new = j - B * Round;
int tmp = shared_mem[k_new*B+i_new] + shared_mem[j_new*B+k_new];
if (tmp < shared_mem[tid]) {
shared_mem[tid] = tmp;
}
}
__syncthreads();
}
dist[j*V+i] = shared_mem[tid];
}
extern __shared__ int shared_mem[];
__global__ void gpu_phase2(int* dist, int B, int Round, int block_start_x, int block_start_y, int n, int pos, size_t pitch)
{
int V = pitch;
int tid = threadIdx.y * B + threadIdx.x;
int b_i = block_start_x + blockIdx.x;
int b_j = block_start_y + blockIdx.y;
int i = b_i * B + threadIdx.x;
int j = b_j * B + threadIdx.y;
// need self block - (b_i, b_j) & pivot block - (Round, Round)
shared_mem[tid+B*B] = dist[j*V+i];
shared_mem[tid] = dist[(Round*B+threadIdx.y)*V+(Round*B+threadIdx.x)];
__syncthreads();
for (int k = Round * B; k < (Round +1) * B && k < n; ++k) {
if (i < n && j < n) {
// up, down
if (pos == 1) {
int k_new_1 = k - B * Round;
int i_new_1 = i - B * Round;
int k_new_2 = k - B * b_i;
int j_new_2 = j - B * b_j;
int tmp = shared_mem[k_new_1*B+i_new_1] + shared_mem[j_new_2*B+k_new_2+B*B];
if (tmp < shared_mem[tid+B*B]) {
shared_mem[tid+B*B] = tmp;
}
}
// left, right
else {
int k_new_1 = k - B * Round;
int j_new_1 = j - B * Round;
int i_new_2 = i - B * b_i;
int k_new_2 = k - B * b_j;
int tmp = shared_mem[k_new_2*B+i_new_2+B*B] + shared_mem[j_new_1*B+k_new_1];
if (tmp < shared_mem[tid+B*B]) {
shared_mem[tid+B*B] = tmp;
}
}
}
__syncthreads();
}
dist[j*V+i] = shared_mem[tid+B*B];
}
__global__ void gpu_phase3(int* dist, int B, int Round, int block_start_x, int block_start_y, int n, size_t pitch)
{
int V = pitch;
int tid = threadIdx.y * B + threadIdx.x;
int b_i = block_start_x + blockIdx.x;
int b_j = block_start_y + blockIdx.y;
int i = b_i * B + threadIdx.x;
int j = b_j * B + threadIdx.y;
// need self block - (b_i, b_j) & row / column block
shared_mem[tid] = dist[j*V+i];
shared_mem[tid+B*B] = dist[(b_j*B+threadIdx.y)*V+(Round*B+threadIdx.x)]; // left, right
shared_mem[tid+B*B*2] = dist[(Round*B+threadIdx.y)*V+(b_i*B+threadIdx.x)]; // up , down
__syncthreads();
for (int k = Round * B; k < (Round +1) * B && k < n; ++k) {
if (i < n && j < n) {
// left, right
int i_new_1 = i - B * b_i;
int k_new_1 = k - B * Round;
// up, down
int k_new_2 = k - B * Round;
int j_new_2 = j - B * b_j;
int tmp = shared_mem[k_new_1*B+i_new_1+B*B*2] + shared_mem[j_new_2*B+k_new_2+B*B];
if (tmp < shared_mem[tid]) {
shared_mem[tid] = tmp;
}
}
__syncthreads();
}
dist[j*V+i] = shared_mem[tid];
}
|
22,815 | //**********************************************************************
// *
// University Of North Carolina Charlotte *
// *
//Program: Vecotr adder *
//Description: This program is for testing GPU performance with one *
// stencil. *
// *
// *
//File Name: pb2b_gpu.cu *
//File Version: 1.0 *
//Baseline: Homework_0 *
// *
//Course: ECGR6090- Heterogeneous Computing *
// *
//Programmed by: Roy Liu *
//Under Suppervision of: Dr. Hamed Tabkhi *
// *
//Input file: No *
// *
//Output:Time of program running *
//**********************************************************************
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include<cuda_runtime.h>
#define N 10000
#define RADIUS 8
#define BLOCK_SIZE 128
void random_ints(int *r, int n);
__global__ void stencil_1d(int *in, int *out)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j;
int result=0;
for (j=0; j<(1+2*RADIUS);j++)
{
result += in[i];
in+=j;
}
out[i]=result;
}
int main()
{
//for counting run time
struct timeval start, end;
float timer;
gettimeofday(&start, NULL);
int*in, *d_in, *out, *d_out;
int n;
n = N;
int size = (n+2*RADIUS)*sizeof(int);
// data initializing
in = (int *)malloc(size); random_ints(in, n);
out = (int *)malloc(size);
//for (int i=0;i<n;i++) printf("%d\n",a[i]);//for testing
cudaMalloc((void**)&d_in, size);
cudaMalloc((void**)&d_out, size);
// CPU TO GPU
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
// Define kernel,block:(1024*1024/512)512 threds each block
dim3 dimGrid(n/BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE); //each block has X threads
// kernel
stencil_1d<<<dimGrid, dimBlock>>>(d_in, d_out);
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
// cleanup
free(in);
free(out);
cudaFree(d_in);
cudaFree(d_out);
gettimeofday(&end, NULL);
timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
printf("Data number is: %d\nBlocksize is: %d\nRadius is: %d\nRunning time is: %f ms\n", n,BLOCK_SIZE,RADIUS,timer/1000);
return 0;
}
//**********************************************************************
// Function Name: random_ints *
// Description: - Generate random integer *
// Input : None *
// Output : Random integer *
// Return: None *
//**********************************************************************
void random_ints(int* r, int n)
{
int i;
for (i=0; i < n+2*RADIUS; ++i)
{
r[i] = rand()/2;
}
}
|
22,816 | #include "includes.h"
__global__ void kLogisticCorrectNormalized(float* mat, float* targets, float* out, unsigned int height, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < height) {
float correct = 0;
float total = 0;
float p, t;
for (int i = idx; i < width * height; i += height) {
p = mat[i];
t = targets[i];
correct += (t < 0) ? 0 : (((t >= 0.5 && p >= 0.5) || (t < 0.5 && p < 0.5)) ? 1: 0);
total += (t < 0) ? 0 : 1;
__syncthreads();
}
out[idx] = (total > 0) ? (correct / total) : 0;
}
} |
22,817 | #include <stdio.h>
#include <cuda.h>
#define N 1024
__global__ void dkernel(unsigned *a, unsigned chunksize) {
unsigned start = chunksize * threadIdx.x;
for (unsigned nn = start; nn < start + chunksize; ++nn) {
a[nn]++;
}
}
int main() {
unsigned *a, chunksize = 32;
cudaMalloc(&a, sizeof(unsigned) * N);
dkernel<<<1, N/chunksize>>>(a, chunksize);
cudaDeviceSynchronize();
return 0;
}
|
22,818 | #include <cstring>
#include <fstream>
#include <iostream>
#ifndef SHA256_H
#define SHA256_H
#include <string>
class SHA256
{
protected:
const static unsigned int sha256_k[];
static const unsigned int SHA224_256_BLOCK_SIZE = (512/8);
public:
void init();
void update(const unsigned char *message, unsigned int len);
void final(unsigned char *digest);
static const unsigned int DIGEST_SIZE = ( 256 / 8);
protected:
void transform(const unsigned char *message, unsigned int block_nb, unsigned int number_of_elements);
unsigned int m_tot_len;
unsigned int m_len;
unsigned char m_block[2*SHA224_256_BLOCK_SIZE];
unsigned int m_h[8];
};
std::string sha256(std::string input);
#define SHA2_SHFR(x, n) (x >> n)
#define SHA2_ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
#define SHA2_ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
#define SHA2_CH(x, y, z) ((x & y) ^ (~x & z))
#define SHA2_MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
#define SHA256_F1(x) (SHA2_ROTR(x, 2) ^ SHA2_ROTR(x, 13) ^ SHA2_ROTR(x, 22))
#define SHA256_F2(x) (SHA2_ROTR(x, 6) ^ SHA2_ROTR(x, 11) ^ SHA2_ROTR(x, 25))
#define SHA256_F3(x) (SHA2_ROTR(x, 7) ^ SHA2_ROTR(x, 18) ^ SHA2_SHFR(x, 3))
#define SHA256_F4(x) (SHA2_ROTR(x, 17) ^ SHA2_ROTR(x, 19) ^ SHA2_SHFR(x, 10))
#define SHA2_UNPACK32(x, str) \
{ \
*((str) + 3) = (unsigned char) ((x) ); \
*((str) + 2) = (unsigned char) ((x) >> 8); \
*((str) + 1) = (unsigned char) ((x) >> 16); \
*((str) + 0) = (unsigned char) ((x) >> 24); \
}
#define SHA2_PACK32(str, x) \
{ \
*(x) = ((unsigned int) *((str) + 3) ) \
| ((unsigned int) *((str) + 2) << 8) \
| ((unsigned int) *((str) + 1) << 16) \
| ((unsigned int) *((str) + 0) << 24); \
}
#endif
using std::string;
using std::cout;
using std::endl;
const unsigned int SHA256::sha256_k[64] = //UL = uint32
{0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};
__device__ unsigned int t11;
__device__ unsigned int t12;
__global__ void kernel1(unsigned char* sub_block_d, unsigned int *w_d, unsigned int *wv, unsigned int *m_h) {
int j = threadIdx.x;
wv[j] = m_h[j];
if ( j >= 8 ) {
SHA2_PACK32(&sub_block_d[j << 2], &w_d[j]);
// printf("%d %d\n", sub_block_d[j << 2], w_d[j]);
}
}
__global__ void kernel2(unsigned int *w_d, unsigned int *wv, unsigned int *sha256_k) {
int j = threadIdx.x;
if (j >= 16){
w_d[j] = SHA256_F4(w_d[j - 2]) + w_d[j - 7] + SHA256_F3(w_d[j - 15]) + w_d[j - 16];
}
t11 = wv[7] + SHA256_F2(wv[4]) + SHA2_CH(wv[4], wv[5], wv[6])
+ sha256_k[j] + w_d[j];
t12 = SHA256_F1(wv[0]) + SHA2_MAJ(wv[0], wv[1], wv[2]);
wv[7] = wv[6];
wv[6] = wv[5];
wv[5] = wv[4];
wv[4] = wv[3] + t11;
wv[3] = wv[2];
wv[2] = wv[1];
wv[1] = wv[0];
wv[0] = t11 + t12;
}
__global__ void kernel3(unsigned int *wv, unsigned int *m_h) {
int j = threadIdx.x;
m_h[j] += wv[j];
}
void SHA256::transform(const unsigned char *message, unsigned int block_nb, unsigned int number_of_elements)
{
unsigned int w[64];
unsigned int wv[8];
unsigned int *wv1;
unsigned int *w_d;
unsigned int *sha256_k1;
unsigned int *wv_d;
unsigned int *m_h_d;
const unsigned char *sub_block;
unsigned char *sub_block_d;
int i;
for (i = 0; i < (int) block_nb; i++) {
sub_block = message + (i << 6);
///////////////////////// K 1 ////////////////////////////////
cudaMalloc((void **) &sub_block_d, sizeof(unsigned char) * number_of_elements);
cudaMemcpy(sub_block_d, sub_block, sizeof(unsigned char) * number_of_elements, cudaMemcpyHostToDevice);
cudaMalloc((void **) &w_d, sizeof(unsigned int) * 64);
cudaMemcpy(w_d, w, sizeof(unsigned int) * 64, cudaMemcpyHostToDevice);
cudaMalloc((void **) &wv_d, sizeof(unsigned int) * 8);
cudaMemcpy(wv_d, wv, sizeof(unsigned int) * 8, cudaMemcpyHostToDevice);
cudaMalloc((void **) &m_h_d, sizeof(unsigned int) * 8);
cudaMemcpy(m_h_d, m_h, sizeof(unsigned int) * 8, cudaMemcpyHostToDevice);
kernel1<<<1, 16>>>(sub_block_d, w_d, wv_d, m_h_d);
cudaDeviceSynchronize();
cudaMemcpy(w, w_d, sizeof(unsigned int) * 64, cudaMemcpyDeviceToHost);
cudaMemcpy(wv, wv_d, sizeof(unsigned int) * 8, cudaMemcpyDeviceToHost);
cudaMemcpy(m_h, m_h_d, sizeof(unsigned int) * 8, cudaMemcpyDeviceToHost);
// for(int k = 0; k < 64; k++){
// printf("%d ", w[k]);
// }
//////////////////////// K 2 ////////////////////////////////
cudaMemcpy(w_d, w, sizeof(unsigned int) * 64, cudaMemcpyHostToDevice);
cudaMalloc((void **) &wv1, sizeof(unsigned int) * 8);
cudaMemcpy(wv1, wv, sizeof(unsigned int) * 8, cudaMemcpyHostToDevice);
cudaMalloc((void **) &sha256_k1, sizeof(unsigned int) * 64);
cudaMemcpy(sha256_k1, sha256_k, sizeof(unsigned int) * 64, cudaMemcpyHostToDevice);
kernel2<<<1, 64>>>(w_d, wv1, sha256_k1);
cudaDeviceSynchronize();
cudaMemcpy(wv, wv1, sizeof(unsigned int) * 8, cudaMemcpyDeviceToHost);
cudaMemcpy(w, w_d, sizeof(unsigned int) * 64, cudaMemcpyDeviceToHost);
//////////////////////////K 3///////////////////////////////
cudaMemcpy(wv1, wv, sizeof(unsigned int) * 8, cudaMemcpyHostToDevice);
cudaMemcpy(m_h_d, m_h, sizeof(unsigned int) * 8, cudaMemcpyHostToDevice);
kernel3<<<1, 8>>>(wv1, m_h_d);
cudaDeviceSynchronize();
cudaMemcpy(wv, wv1, sizeof(unsigned int) * 8, cudaMemcpyDeviceToHost);
cudaMemcpy(m_h, m_h_d, sizeof(unsigned int) * 8, cudaMemcpyDeviceToHost);
}
}
void SHA256::init()
{
m_h[0] = 0x6a09e667;
m_h[1] = 0xbb67ae85;
m_h[2] = 0x3c6ef372;
m_h[3] = 0xa54ff53a;
m_h[4] = 0x510e527f;
m_h[5] = 0x9b05688c;
m_h[6] = 0x1f83d9ab;
m_h[7] = 0x5be0cd19;
m_len = 0;
m_tot_len = 0;
}
void SHA256::update(const unsigned char *message, unsigned int len)
{
unsigned int block_nb;
unsigned int new_len, rem_len, tmp_len;
const unsigned char *shifted_message;
tmp_len = SHA224_256_BLOCK_SIZE - m_len;
rem_len = len < tmp_len ? len : tmp_len;
memcpy(&m_block[m_len], message, rem_len);
if (m_len + len < SHA224_256_BLOCK_SIZE) {
m_len += len;
return;
}
new_len = len - rem_len;
block_nb = new_len / SHA224_256_BLOCK_SIZE;
shifted_message = message + rem_len;
block_nb = new_len / SHA224_256_BLOCK_SIZE;
transform(m_block, 1, SHA224_256_BLOCK_SIZE);
transform(shifted_message, block_nb, SHA224_256_BLOCK_SIZE);
rem_len = new_len % SHA224_256_BLOCK_SIZE;
memcpy(m_block, &shifted_message[block_nb << 6], rem_len);
m_len = rem_len;
m_tot_len += (block_nb + 1) << 6;
}
void SHA256::final(unsigned char *digest)
{
unsigned int block_nb;
unsigned int pm_len;
unsigned int len_b;
int i;
block_nb = (1 + ((SHA224_256_BLOCK_SIZE - 9)
< (m_len % SHA224_256_BLOCK_SIZE)));
len_b = (m_tot_len + m_len) << 3;
pm_len = block_nb << 6;
memset(m_block + m_len, 0, pm_len - m_len);
m_block[m_len] = 0x80;
SHA2_UNPACK32(len_b, m_block + pm_len - 4);
transform(m_block, block_nb, block_nb * len_b);
for (i = 0 ; i < 8; i++) {
SHA2_UNPACK32(m_h[i], &digest[i << 2]);
}
}
std::string sha256(std::string input)
{
unsigned char digest[SHA256::DIGEST_SIZE];
memset(digest,0,SHA256::DIGEST_SIZE);
SHA256 ctx = SHA256();
ctx.init();
ctx.update( (unsigned char*)input.c_str(), input.length());
ctx.final(digest);
char buf[2*SHA256::DIGEST_SIZE+1];
buf[2*SHA256::DIGEST_SIZE] = 0;
for (int i = 0; i < SHA256::DIGEST_SIZE; i++)
sprintf(buf+i*2, "%02x", digest[i]);
return std::string(buf);
}
int main(int argc, char *argv[])
{
string input = "apple";
string output1 = sha256(input);
cout << "sha256('"<< input << "'):" << output1 << endl;
return 0;
} |
22,819 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void cal(int *a, int *b, int x1)
{
int i = blockIdx.x;
b[i] = x1 * a[i] + b[i];
}
int main()
{
int a[20], b[20];
int n, i;
printf("Enter size");
scanf("%d",&n);
printf("\nEnter set 1 \n");
for(i = 0; i < n; i++)
scanf("%d", &a[i]);
printf("Enter set 2 \n");
for(i = 0; i < n; i++)
scanf("%d", &b[i]);
int alpha;
printf("Enter AA ");
scanf("%d", &alpha);
int *d_a, *d_b, *d_c, *d_d, *d_e;
int size = sizeof(int) * 20;
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
cudaMalloc((void**)&d_d, size);
cudaMalloc((void**)&d_e, size);
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
cal<<<n, 1>>>(d_a, d_b, alpha);
cudaMemcpy(&b, d_b, size, cudaMemcpyDeviceToHost);
printf("\nExpansion res \n");
for(i = 0; i < n; i++)
printf("%d ", b[i]);
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
return 0;
} |
22,820 | #include "includes.h"
__global__ void devicetodevicecopy(double *dphi, double *dpsix, double *dpsiy, double *mphi, double *mpsix, double *mpsiy, unsigned int nx, unsigned int TileSize)
{
unsigned int bx = blockIdx.x;
unsigned int by = blockIdx.y;
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int index_x = bx * TileSize + tx;
unsigned int index_y = by * TileSize + ty;
unsigned int indexToWrite = index_y * nx + index_x;
mphi[indexToWrite] = dphi[indexToWrite];
mpsix[indexToWrite] = dpsix[indexToWrite];
mpsiy[indexToWrite] = dpsiy[indexToWrite];
} |
22,821 | #include<iostream>
using namespace std;
__global__ void add(int *a,int *b,int *c,int n)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
{
c[id]=b[id]+a[id];
}
}
int main()
{
cout<<"Enter the no of elements"<<endl;
int n;
cin>>n;
int a[n],b[n],c[n];
for(int i=0;i<n;i++)
{
a[i]=b[i]=i+1;
}
int *ad,*bd,*cd;
int size=n*sizeof(int);
cudaEvent_t start,end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaMalloc(&ad,size);
cudaMemcpy(ad,a,size,cudaMemcpyHostToDevice);
cudaMalloc(&bd,size);
cudaMemcpy(bd,b,size,cudaMemcpyHostToDevice);
cudaMalloc(&cd,size);
dim3 grid(256,1);
dim3 block(32,1);
cudaEventRecord(start);
add<<<grid,block>>>(ad,bd,cd,n);
cudaEventRecord(end);
cudaEventSynchronize(end);
float time=0;
cudaEventElapsedTime(&time,start,end);
cudaMemcpy(c,cd,size,cudaMemcpyDeviceToHost);
for(int i=0;i<n;i++)
{
cout<<c[i]<<endl;
}
cout<<"The time required is"<<time<<endl;
}
|
22,822 | #include "includes.h"
__global__ void Add(float *a, float *b, float *c)
{
int Id = threadIdx.x + blockDim.x * blockIdx.x;
if (Id < N) {
a[Id] = threadIdx.x;
b[Id] = blockIdx.x;
c[Id] = Id;
}
} |
22,823 | //pass
//--blockDim=10 --gridDim=64 --no-inline
#include "cuda.h"
__device__ void bar(int* p) {
p[threadIdx.x] = 0;
}
__global__ void foo() {
__shared__ int A[10];
int* p = A;
bar(p);
}
|
22,824 | #include <stdio.h>
#define LENGTH 16
#define THREADNUM 4
#define BLOCKNUM 2
static void HandleError(cudaError_t err,
const char *file,
int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
int getThreadNum() {
cudaDeviceProp prop;
int count;
HANDLE_ERROR(cudaGetDeviceCount(&count));
printf("gpu num %d\n", count);
HANDLE_ERROR(cudaGetDeviceProperties(&prop, 0));
printf("max thread num: %d\n", prop.maxThreadsPerBlock);
printf("max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
return prop.maxThreadsPerBlock;
}
__global__ void dot_product(float *a, float *b, float *r) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int total_thread_nnum = THREADNUM * BLOCKNUM;
__shared__ float sData[THREADNUM];
sData[tid] = 0;
int global_id = tid + bid * blockDim.x;
while (global_id < LENGTH) {
sData[tid] += a[global_id] * b[global_id];
global_id += total_thread_nnum;
}
__syncthreads();
for (int i = THREADNUM / 2; i > 0; i /= 2) {
if (tid < i) {
sData[tid] = sData[tid] + sData[tid + i];
}
__syncthreads();
}
if (tid == 0) {
r[bid] = sData[0];
}
}
int main(int argc, char* argv[]) {
float a[LENGTH];
float b[LENGTH];
for (int i = 0; i < LENGTH; i++) {
a[i] = i * (i + 1);
b[i] = i * (i - 2);
}
float *aGpu;
cudaMalloc((void**)&aGpu, LENGTH * sizeof(float));
cudaMemcpy(aGpu, a, LENGTH * sizeof(float), cudaMemcpyHostToDevice);
float *bGpu;
cudaMalloc((void**)&bGpu, LENGTH * sizeof(float));
cudaMemcpy(bGpu, b, LENGTH * sizeof(float), cudaMemcpyHostToDevice);
float *rGpu;
cudaMalloc((void**)&rGpu, BLOCKNUM * sizeof(float));
dot_product<<<BLOCKNUM, THREADNUM>>>(aGpu, bGpu, rGpu);
float r[BLOCKNUM];
cudaMemcpy(r, rGpu, BLOCKNUM * sizeof(float), cudaMemcpyDeviceToHost);
float result = 0.0;
for (int i = 0; i < BLOCKNUM; i++) {
printf("r[%d]: %f\n", i, r[i]);
result += r[i];
}
printf("result is: %f\n", result);
return 0;
}
|
22,825 | #include<stdio.h>
#define N 10000
// Kernel definition
__global__
void VecAdd(int* A, int* B, int* C)
{
int i = threadIdx.x;
C[i] = A[i] + B[i];
//printf("%i ",C[i]);
}
int main()
{
int A[N],B[N],C[N],*d_a,*d_b,*d_c;
int i;
for(i=0;i<N;i++){
A[i]=1;
B[i]=1;
}
cudaMalloc((void**)&d_a,N*sizeof(int));
cudaMalloc((void**)&d_b,N*sizeof(int));
cudaMalloc((void**)&d_c,N*sizeof(int));
cudaMemcpy(d_a,A,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,B,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_c,C,N*sizeof(int),cudaMemcpyHostToDevice);
VecAdd<<<(N/1024)+1, N>>>(d_a, d_b, d_c);
//cudaMemcpy(A,d_a,N*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy(B,d_b,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(C,d_c,N*sizeof(int),cudaMemcpyDeviceToHost);
for(i=0;i<N;i++){
printf("%i ",C[i]);
}
printf("\n");
cudaDeviceSynchronize();
return 0;
}
|
22,826 | #include <cstdio>
#include <cmath>
__global__ void vector_add(double *C, const double *A, const double *B, int N)
{
// Add the kernel code
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Do not try to access past the allocated memory
if (idx < N) {
C[idx] = A[idx] + B[idx];
}
}
int main(void)
{
const int N = 20;
const int ThreadsInBlock = 128;
double *dA, *dB, *dC;
double hA[N], hB[N], hC[N];
for(int i = 0; i < N; ++i) {
hA[i] = (double) i;
hB[i] = (double) i * i;
}
cudaMalloc((void**)&dA, sizeof(double)*N);
// #error Add the remaining memory allocations and copies
cudaMalloc((void**)&dB, sizeof(double)*N);
cudaMalloc((void**)&dC, sizeof(double)*N);
cudaMemcpy((void*)dA,(void*)hA,N*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy((void*)dB,(void*)hB,N*sizeof(double),cudaMemcpyHostToDevice);
// Note the maximum size of threads in a block
dim3 grid, threads;
//// Add the kernel call here
// #error Add the CUDA kernel call
vector_add <<<1,ThreadsInBlock>>> (dC,dA,dB,N);
// error: access host memory
// vector_add <<<1,ThreadsInBlock>>> (hC,hA,hB,N);
//// Copy back the results and free the device memory
// #error Copy back the results and free the allocated memory
cudaMemcpy((void*)hC,(void*)dC,N*sizeof(double),cudaMemcpyDeviceToHost);
// for (int i = 0; i < N; i++)
// printf("%5.1f\n", hC[i]);
for (int i = 0; i < N; i++)
printf("%5.1f\n", hC[i]);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
return 0;
} |
22,827 | #include "TerrainModifier.cuh"
__global__ void submerge(float** map, int width, int height){
//Gets the thread numbers
int threadX = threadIdx.x + blockIdx.x * blockDim.x;
int threadY = threadIdx.y + blockIdx.y * blockDim.y;
//Gets the stride to increase
int strideX = gridDim.x*blockDim.x;
int strideY = gridDim.y*blockDim.y;
for (int y = threadY; y < height; y+=strideY) {
for (int x = threadX; x < width; x+=strideX) {
if (map[y][x]<=WATERLEVEL) {
map[y][x] *= -1;
}
}
}
}
void submergeTerrain(float** map, int width, int height) {
submerge<<<dim3(BLOCKSIZE, BLOCKSIZE), dim3(MAXBLOCKS, MAXBLOCKS)>>>(map, width, height);
cudaDeviceSynchronize();
} |
22,828 | //Input-------------------------------------------------------------------------------------------------
#define WindowDimension 3 // this is the dimension of the window.
#define PatchSigma 0.01 // this is h squared , mentioned in the report
#define Sigma 0.05 // this is the sigma squared , mentioned in the report
#define FILENAME "images/rasp_noise.csv" // path to the csv of the image you want to use as input
//End of input------------------------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
//fast exp , sacrifising some accuracy
float expf_fastCPU(float a) {
union { float f; int x; } u;
u.x = (int) (12102203 * a + 1064866805);
return u.f;
}
//Calculate the mean of each pixel , using the moving-window technique
void meansCPU(float* pixels , int *windowSize, int* iconLine, float *means){
int xIndex = 0;
for(xIndex = 0; xIndex<*iconLine*(*iconLine); xIndex++){
int* index = &xIndex;
int i, j;
int lim = (*windowSize)/2;
int dimension = *windowSize;
float result[WindowDimension*WindowDimension];
for(i=-lim; i<=lim; i++){
for(j=-lim; j<=lim; j++){
int location = (*index) + i +j*(*iconLine);
bool outLeftorRight = (location/(*iconLine) != ((*index)/(*iconLine) + j) ) || location<0;
bool outUpOrDown = location/(*iconLine) <0 || location/(*iconLine) >= (*iconLine);
bool ifResult = !(outLeftorRight || outUpOrDown);
int resultIndex = (j+lim)*(*windowSize) + (i+lim);
//if the window is completely inside the image
if(ifResult){
result[resultIndex] = pixels[location];
}
//if part of the window is outside of the image
else{
location = (*index) - i -j*(*iconLine);
outLeftorRight =location/(*iconLine) != ((*index)/(*iconLine) - j) || location<0;
outUpOrDown = location/(*iconLine) <0 || location/(*iconLine) >= (*iconLine);
if(!outLeftorRight && !outUpOrDown){
result[resultIndex] = pixels[location];
continue;
}
location = (*index) -i +j*(*iconLine);
outLeftorRight =location/(*iconLine) != ((*index)/(*iconLine) + j) || location<0;
outUpOrDown = location/(*iconLine) <0 || location/(*iconLine) >= (*iconLine);
if(!outLeftorRight && !outUpOrDown){
result[resultIndex] = pixels[location];
continue;
}
location = (*index) +i -j*(*iconLine);
result[resultIndex] = pixels[location];
}
}
}
float mean=0;
float patchSigma = PatchSigma;
float tmp = 0;
for(i=0; i<*windowSize*(*windowSize); i++){
int x = i%dimension - dimension/2;
int y = i/dimension - dimension/2;
float fx = (float)x;
float fy = (float)y;
float arithmitis = fx*fx + fy*fy;
float paronomastis = 2*M_PI*patchSigma;
mean = mean + result[i]*expf_fastCPU(-arithmitis/paronomastis)*0.5;
tmp = tmp+expf_fastCPU(-arithmitis/paronomastis)*0.5;
}
mean = mean/tmp;
means[xIndex] = mean;
}
}
//denoise the image using the formulas mentioned in the report
void denoiseCPU(float* pixels, float* sigma, int* imageDimension,int* windowDimension,float* means, float* result){
int xIndex =0;
for(xIndex=0; xIndex<*imageDimension*(*imageDimension); xIndex++){
int windowSize;
int imageSize;
imageSize = *imageDimension*(*imageDimension);
windowSize = *windowDimension*(*windowDimension);
float mean1 = means[xIndex];
int i=0;
float sumW = 0 ;
float sumP = 0;
for(i=0; i<imageSize; i++){
float mean2 = means[i];
float tmp = (mean1 - mean2)*(mean1-mean2)*(-1);
float weight = exp(tmp/(*sigma));
sumW = sumW + weight;
sumP = sumP + weight*pixels[i];
}
sumP = sumP/sumW;
result[xIndex] = sumP;
}
}
//read the csv and put it in a float array
float* readCSVfile(char* filename, int* dimension){
FILE *file;
file = fopen(filename, "r");
int local_dimension = 256;
float* result = (float*)malloc(local_dimension*local_dimension*sizeof(float)); // dont forget to free the memory afterwards
if(file == NULL){
printf("The file could not be opened/ does not exits");
exit(1);
}
char* line =NULL;
size_t length = 0 ;
ssize_t read;
const char delimeters[] = ", ";
char* number ;
int outer_counter = 0;
while ((read = getline(&line, &length, file) != -1)){
number = strtok(line, delimeters);
float fnumber = atof(number);
int counter = 0;
while( number !=NULL){
result[counter + outer_counter*local_dimension] = fnumber;
counter++;
number = strtok(NULL, delimeters);
if(number != NULL)
fnumber = atof(number);
}
if(counter != local_dimension){
local_dimension = counter;
result = (float*)realloc(result, counter*counter*sizeof(float));
}
outer_counter++;
}
fclose(file);
if(line)
free(line);
*dimension = local_dimension;
return result;
}
//save a float array to csv
void floatToCSV(char* filename, int dimension, float* arr){
int i;
FILE* fileWriter;
fileWriter = fopen(filename, "a");
if(fileWriter == NULL){
printf("something went wrong when saving");
exit(1);
}
for(i=0; i<dimension*dimension ; i++){
if(i!=0 && i%dimension==0)
fputs("\n", fileWriter);
if(arr[i]!=arr[i])
arr[i]=0;
char stringFl[10];
sprintf(stringFl, "%f", arr[i]);
fputs( stringFl, fileWriter );
fputs( ",", fileWriter );
}
fclose(fileWriter);
}
int main(void){
float sigma = Sigma;
int windowDimension = WindowDimension;
int dimension ;
float *image ;
image = readCSVfile(FILENAME, &dimension);
float* means = (float*)malloc(dimension*dimension*sizeof(float));
float* finalPixel = (float*)malloc(sizeof(float)*dimension*dimension);
clock_t start, end;
double cpu_time_used;
start = clock();
//the whole algorithm gets executed here
meansCPU(image, &windowDimension, &dimension, means);
denoiseCPU(image, &sigma, &dimension, &windowDimension, means, finalPixel);
//-------------------------------------
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("program took %f seconds to execute \n", cpu_time_used);
floatToCSV("imageAfter.csv", dimension, finalPixel);
return 0;
} |
22,829 | #include "includes.h"
__global__ void cunnx_WindowGate_updateGradInput_kernel( float *gradInput, float *error, float* targetCentroids, const float *centroids,const float *input, const float *outputIndice, const float* output, const float* gradOutput, int inputSize, int outputSize, int outputWindowSize, float c, float d, float e, float lr)
{
__shared__ float buffer[WINDOWGATE_THREADS+1];
unsigned int tx = threadIdx.x;
unsigned int k = blockIdx.x;
const float *gradOutput_k = gradOutput + outputWindowSize*k;
const float *output_k = output + outputWindowSize*k;
const float *input_k = input + inputSize*k;
float *gradInput_k = gradInput + inputSize*k;
float centroid = centroids[k];
// get gradient of centroid
buffer[tx] = 0;
for (unsigned int i=tx; i<outputWindowSize; i+=blockDim.x)
{
buffer[tx] += gradOutput_k[i]*output_k[i]*((float)(i+1) - centroid);
}
// add (reduce)
for (unsigned int stride = WINDOWGATE_THREADS >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
if (tx == 0)
{
int outputIdx = outputIndice[k];
float gradCentroid = buffer[0]*c;
centroid -= (lr*gradCentroid);
centroid += outputIdx-1;
centroid /= (float)(outputSize);
targetCentroids[k] = centroid;
buffer[WINDOWGATE_THREADS] = centroid*(float)(inputSize);
}
__syncthreads();
float targetCentroid = buffer[WINDOWGATE_THREADS];
buffer[tx] = 0;
// target is a gaussian blur
for (int i=tx; i<inputSize; i+=blockDim.x)
{
float target = (float)(i+1)-targetCentroid;
target = d*expf(target*target*e);
float input = input_k[i];
// dot product of logProbInput and probTarget (NLL)
buffer[tx] -= logf(input + 0.0000001)*target;
// grad input w.r.t. NLL
gradInput_k[i] = -target/(input + 0.0000001);
}
// add (reduce)
for (unsigned int stride = WINDOWGATE_THREADS >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
if (tx == 0)
error[k] = buffer[tx];
} |
22,830 | #include "includes.h"
#define max(a, b) a > b ? a : b
#define min(a, b) a < b ? a : b
struct Edge{
long long int x;
};
///*
//*/
__global__ void initialize_active_edges(bool* active_edges, int e){
int bid = blockIdx.x;
int id = bid*blockDim.x + threadIdx.x;
if(id < e)
active_edges[id] = true;
return;
} |
22,831 | // Samuel Grenon
// CS 443
// Dr. Mock
// Problem 4:
#include "stdio.h"
#define COLUMNS 8
#define ROWS 8
__global__ void add(int * a, int*b) {
int cacheIndex = threadIdx.x;
int i = blockDim.x/2;
while(i > 0){
if(cacheIndex < i){
a[blockIdx.x*COLUMNS+cacheIndex] += a[blockIdx.x*COLUMNS+cacheIndex + i];
}
__syncthreads();
i/=2;
}
if(threadIdx.x == 0) {
b[blockIdx.x] = a[blockIdx.x*COLUMNS];
}
}
int main() {
int a[ROWS][COLUMNS], b[COLUMNS];
int *dev_a;
int *dev_b;
int sum = 0;
int cudaSum = 0;
cudaMalloc((void **)&dev_a, ROWS*COLUMNS*sizeof(int));
cudaMalloc((void **)&dev_b, COLUMNS*sizeof(int));
for (int y = 0; y< ROWS; y++)
for(int x = 0; x < COLUMNS; x++){
a[y][x] = x+y;
sum += a[y][x];
}
printf("The exact sum is: %d \n", sum);
cudaMemcpy(dev_a, a, ROWS*COLUMNS*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, COLUMNS*sizeof(int), cudaMemcpyHostToDevice);
add<<<8,8>>>(dev_a, dev_b);
cudaMemcpy(b, dev_b, COLUMNS*sizeof(int), cudaMemcpyDeviceToHost);
for(unsigned int i = 0; i < COLUMNS; i++){
cudaSum += b[i];
}
printf("The cuda sum is: %d \n", cudaSum);
cudaFree(dev_a);
cudaFree(dev_b);
} |
22,832 | #include "sha512.cuh"
#define ROTRIGHT(a, b) (((a) >> (b)) | ((a) << (64 - (b))))
#define CH(x, y, z) (((x) & (y)) ^ (~(x) & (z)))
#define MAJ(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
#define EP0(x) (ROTRIGHT(x, 28) ^ ROTRIGHT(x, 34) ^ ROTRIGHT(x, 39))
#define EP1(x) (ROTRIGHT(x, 14) ^ ROTRIGHT(x, 18) ^ ROTRIGHT(x, 41))
#define SIG0(x) (ROTRIGHT(x, 1) ^ ROTRIGHT(x, 8) ^ ((x) >> 7))
#define SIG1(x) (ROTRIGHT(x, 19) ^ ROTRIGHT(x, 61) ^ ((x) >> 6))
__constant__ uint64_t sha512_kernel[] = {
0x428a2f98d728ae22ul, 0x7137449123ef65cdul, 0xb5c0fbcfec4d3b2ful, 0xe9b5dba58189dbbcul, 0x3956c25bf348b538ul,
0x59f111f1b605d019ul, 0x923f82a4af194f9bul, 0xab1c5ed5da6d8118ul, 0xd807aa98a3030242ul, 0x12835b0145706fbeul,
0x243185be4ee4b28cul, 0x550c7dc3d5ffb4e2ul, 0x72be5d74f27b896ful, 0x80deb1fe3b1696b1ul, 0x9bdc06a725c71235ul,
0xc19bf174cf692694ul, 0xe49b69c19ef14ad2ul, 0xefbe4786384f25e3ul, 0x0fc19dc68b8cd5b5ul, 0x240ca1cc77ac9c65ul,
0x2de92c6f592b0275ul, 0x4a7484aa6ea6e483ul, 0x5cb0a9dcbd41fbd4ul, 0x76f988da831153b5ul, 0x983e5152ee66dfabul,
0xa831c66d2db43210ul, 0xb00327c898fb213ful, 0xbf597fc7beef0ee4ul, 0xc6e00bf33da88fc2ul, 0xd5a79147930aa725ul,
0x06ca6351e003826ful, 0x142929670a0e6e70ul, 0x27b70a8546d22ffcul, 0x2e1b21385c26c926ul, 0x4d2c6dfc5ac42aedul,
0x53380d139d95b3dful, 0x650a73548baf63deul, 0x766a0abb3c77b2a8ul, 0x81c2c92e47edaee6ul, 0x92722c851482353bul,
0xa2bfe8a14cf10364ul, 0xa81a664bbc423001ul, 0xc24b8b70d0f89791ul, 0xc76c51a30654be30ul, 0xd192e819d6ef5218ul,
0xd69906245565a910ul, 0xf40e35855771202aul, 0x106aa07032bbd1b8ul, 0x19a4c116b8d2d0c8ul, 0x1e376c085141ab53ul,
0x2748774cdf8eeb99ul, 0x34b0bcb5e19b48a8ul, 0x391c0cb3c5c95a63ul, 0x4ed8aa4ae3418acbul, 0x5b9cca4f7763e373ul,
0x682e6ff3d6b2b8a3ul, 0x748f82ee5defb2fcul, 0x78a5636f43172f60ul, 0x84c87814a1f0ab72ul, 0x8cc702081a6439ecul,
0x90befffa23631e28ul, 0xa4506cebde82bde9ul, 0xbef9a3f7b2c67915ul, 0xc67178f2e372532bul, 0xca273eceea26619cul,
0xd186b8c721c0c207ul, 0xeada7dd6cde0eb1eul, 0xf57d4f7fee6ed178ul, 0x06f067aa72176fbaul, 0x0a637dc5a2c898a6ul,
0x113f9804bef90daeul, 0x1b710b35131c471bul, 0x28db77f523047d84ul, 0x32caab7b40c72493ul, 0x3c9ebe0a15c9bebcul,
0x431d67c49c100d4cul, 0x4cc5d4becb3e42b6ul, 0x597f299cfc657e2aul, 0x5fcb6fab3ad6faecul, 0x6c44198c4a475817ul};
__device__ void sha512_transform(Sha512Context *ctx, const uint8_t data[128]) {
uint64_t a, b, c, d, e, f, g, h, i, j, t1, t2, m[80];
#pragma unroll 16
for (i = 0, j = 0; i < 16; ++i, j += 8) {
m[i] = (static_cast<uint64_t>(data[j + 0]) << 56) | (static_cast<uint64_t>(data[j + 1]) << 48) |
(static_cast<uint64_t>(data[j + 2]) << 40) | (static_cast<uint64_t>(data[j + 3]) << 32) |
(static_cast<uint64_t>(data[j + 4]) << 24) | (static_cast<uint64_t>(data[j + 5]) << 16) |
(static_cast<uint64_t>(data[j + 6]) << 8) | (static_cast<uint64_t>(data[j + 7]));
}
#pragma unroll 80
for (; i < 80; ++i) {
m[i] = m[i - 16] + SIG0(m[i - 15]) + SIG1(m[i - 2]) + m[i - 7];
}
a = ctx->state[0];
b = ctx->state[1];
c = ctx->state[2];
d = ctx->state[3];
e = ctx->state[4];
f = ctx->state[5];
g = ctx->state[6];
h = ctx->state[7];
#pragma unroll 80
for (i = 0; i < 80; ++i) {
t1 = h + EP1(e) + CH(e, f, g) + sha512_kernel[i] + m[i];
t2 = EP0(a) + MAJ(a, b, c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
ctx->state[0] += a;
ctx->state[1] += b;
ctx->state[2] += c;
ctx->state[3] += d;
ctx->state[4] += e;
ctx->state[5] += f;
ctx->state[6] += g;
ctx->state[7] += h;
}
__device__ void sha512_init(Sha512Context *ctx) {
ctx->dataLen = 0;
ctx->bitLen = 0;
ctx->state[0] = 0x6a09e667f3bcc908UL;
ctx->state[1] = 0xbb67ae8584caa73bUL;
ctx->state[2] = 0x3c6ef372fe94f82bUL;
ctx->state[3] = 0xa54ff53a5f1d36f1UL;
ctx->state[4] = 0x510e527fade682d1UL;
ctx->state[5] = 0x9b05688c2b3e6c1fUL;
ctx->state[6] = 0x1f83d9abfb41bd6bUL;
ctx->state[7] = 0x5be0cd19137e2179UL;
}
__device__ void sha512_update(Sha512Context *ctx, const uint8_t data[], size_t len) {
for (auto i = 0; i < len; ++i) {
ctx->data[ctx->dataLen] = data[i];
ctx->dataLen++;
if (ctx->dataLen == 128) {
sha512_transform(ctx, ctx->data);
ctx->bitLen += 1024;
ctx->dataLen = 0;
}
}
}
__device__ void sha512_final(Sha512Context *ctx) {
uint32_t i;
i = ctx->dataLen;
// Pad whatever data is left in the buffer.
if (ctx->dataLen < 112) {
ctx->data[i++] = 0x80;
while (i < 112) {
ctx->data[i++] = 0x00;
}
} else {
ctx->data[i++] = 0x80;
while (i < 128) {
ctx->data[i++] = 0x00;
}
sha512_transform(ctx, ctx->data);
memset(ctx->data, 0, 112);
}
// Append to the padding the total message's length in bits and transform.
ctx->bitLen += ctx->dataLen * 8;
ctx->data[127] = ctx->bitLen;
ctx->data[126] = ctx->bitLen >> 8;
ctx->data[125] = ctx->bitLen >> 16;
ctx->data[124] = ctx->bitLen >> 24;
ctx->data[123] = ctx->bitLen >> 32;
ctx->data[122] = ctx->bitLen >> 40;
ctx->data[121] = ctx->bitLen >> 48;
ctx->data[120] = ctx->bitLen >> 56;
memset(ctx->data + 112, 0, 8);
sha512_transform(ctx, ctx->data);
}
__device__ void sha512_write_output(Sha512Context *ctx, uint8_t hash[]) {
for (auto i = 0; i < 8; ++i) {
hash[i] = (ctx->state[0] >> (56 - i * 8)) & 0x000000ff;
hash[i + 8] = (ctx->state[1] >> (56 - i * 8)) & 0x000000ff;
hash[i + 16] = (ctx->state[2] >> (56 - i * 8)) & 0x000000ff;
hash[i + 24] = (ctx->state[3] >> (56 - i * 8)) & 0x000000ff;
hash[i + 32] = (ctx->state[4] >> (56 - i * 8)) & 0x000000ff;
hash[i + 40] = (ctx->state[5] >> (56 - i * 8)) & 0x000000ff;
hash[i + 48] = (ctx->state[6] >> (56 - i * 8)) & 0x000000ff;
hash[i + 56] = (ctx->state[7] >> (56 - i * 8)) & 0x000000ff;
}
}
|
22,833 | #include "includes.h"
__global__ void kernel_normalize_and_add_to_output(float * dev_vol_in, float * dev_vol_out, float * dev_accumulate_weights, float * dev_accumulate_values)
{
unsigned int i = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
unsigned int j = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
unsigned int k = __umul24(blockIdx.z, blockDim.z) + threadIdx.z;
if (i >= c_volSize.x || j >= c_volSize.y || k >= c_volSize.z)
{
return;
}
// Index row major into the volume
long int out_idx = i + (j + k * c_volSize.y) * (c_volSize.x);
float eps = 1e-6;
// Divide the output volume's voxels by the accumulated splat weights
// unless the accumulated splat weights are equal to zero
if (c_normalize)
{
if (abs(dev_accumulate_weights[out_idx]) > eps)
dev_vol_out[out_idx] = dev_vol_in[out_idx] + (dev_accumulate_values[out_idx] / dev_accumulate_weights[out_idx]);
else
dev_vol_out[out_idx] = dev_vol_in[out_idx];
}
else
dev_vol_out[out_idx] = dev_vol_in[out_idx] + dev_accumulate_values[out_idx];
} |
22,834 | #include <cuda.h>
#include <stdio.h>
#define N 100000
__global__ void kernel_add(int* a, int* b, int* c){
*c = *a + *b;
}
int main(int argc, char** argv){
int* host_a = (int*) malloc(sizeof(int));
int* host_b = (int*) malloc(sizeof(int));
int* host_c = (int*) malloc(sizeof(int));
int* device_a; cudaMalloc((void**) &device_a, sizeof(int));
int* device_b; cudaMalloc((void**) &device_b, sizeof(int));
int* device_c; cudaMalloc((void**) &device_c, sizeof(int));
for(int i = 0; i < N; i++){
*host_a = 7;
*host_b = 2;
cudaMemcpy(device_a, host_a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_b, host_b, sizeof(int), cudaMemcpyHostToDevice);
kernel_add<<<1,1>>>(device_a, device_b, device_c);
cudaMemcpy(host_c, device_c, sizeof(int), cudaMemcpyDeviceToHost);
}
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
printf("%d\n", *host_c);
return 0;
}
|
22,835 | __global__ void tile(float* out, int2 out_size, float* pattern, int2 pat_size, int2 offset){
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if(out_size.x <= x || out_size.y <= y){
return;
}
const int i = x + out_size.x * y;
const int j = ((x + offset.x) % pat_size.x) + (((y + offset.y) % pat_size.y) * pat_size.x);
out[i] = pattern[j];
}
__global__ void fuse(float* out, int2 dim, float* model, float offset){
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int i = x + y * dim.x;
if(dim.x <= x || dim.y <= y){
return;
}
float m = model[i] + offset;
if(m > 0)
out[i] = m;
}
|
22,836 | /*
* The Game of Life
*
* a cell is born, if it has exactly three neighbours
* a cell dies of loneliness, if it has less than two neighbours
* a cell dies of overcrowding, if it has more than three neighbours
* a cell survives to the next generation, if it does not die of loneliness
* or overcrowding
*
* In this version, a 2D array of ints is used. A 1 cell is on, a 0 cell is off.
* The game plays a number of steps (given by the input), printing to the screen each time. 'x' printed
* means on, space means off.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
typedef unsigned char bool_t;
typedef unsigned char cell_t;
#define TILE_SIZE 8
#define KERNEL_SIZE 3
#define SHARED_MEMORY_SIZE (TILE_SIZE + KERNEL_SIZE - 1)
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
/**
* Allocates the board as a 1D array instead of the previous 2D array
* @param flat_size
* @param outer_grid_size
* @return An allocated board of the given flat_size
*/
cell_t *allocate_board_flat(unsigned int flat_size, int outer_grid_size) {
cell_t *board = (cell_t *) malloc(sizeof(cell_t) * flat_size);
// Since there is no dependency between the indices we can collapse the loop
// Only if the grid is large enough
#pragma omp parallel for collapse(2) if(outer_grid_size >= 5000)
for (int i = 0; i < outer_grid_size; ++i) {
for (int k = 0; k < KERNEL_SIZE / 2; ++k) {
// Fill first rows
board[k * outer_grid_size + i] = 0;
// Fill last rows
board[(outer_grid_size - (k + 1)) * outer_grid_size + i] = 0;
// Fill left columns
board[i * outer_grid_size + k] = 0;
// Fill right columns
board[i * outer_grid_size + (outer_grid_size - (k + 1))] = 0;
}
}
return board;
}
/**
* Processes one step of the game of life
* @param d_board The original board
* @param d_newboard The new board
* @param inner_size The inner grid size
* @param outer_size The padded outer grid size
*/
__global__ void playKernelSMDynamic(const cell_t *d_board, cell_t *d_newboard, int inner_size, int outer_size) {
unsigned short bx = blockIdx.x;
unsigned short by = blockIdx.y;
unsigned short tx = threadIdx.x;
unsigned short ty = threadIdx.y;
// Calculate the row and col for the output array
unsigned short row = by * TILE_SIZE + ty + (KERNEL_SIZE / 2);
unsigned short col = bx * TILE_SIZE + tx + (KERNEL_SIZE / 2);
__shared__ cell_t ds_neighbors[SHARED_MEMORY_SIZE][SHARED_MEMORY_SIZE];
// These indices are the inner grid's indices
// For example, with a 5000x5000 padded on each side, we get a 5002x5002 grid
// To access the index (0, 0) of the inner grid, we must access the index (1, 1) of the padded grid
unsigned short idx_inner_x = tx + (KERNEL_SIZE / 2);
unsigned short idx_inner_y = ty + (KERNEL_SIZE / 2);
unsigned short blockIndex = ty + tx * TILE_SIZE;
// Using unsigned short reduces the duration of each kernel by ~100 us (~930 us to ~830 us)
// Here, each thread is responsible for loading between one and two values into the shared memory array
for (unsigned short incr = blockIndex; incr < SHARED_MEMORY_SIZE * SHARED_MEMORY_SIZE; incr += TILE_SIZE * TILE_SIZE) {
unsigned short ry = incr % SHARED_MEMORY_SIZE;
unsigned short rx = incr / SHARED_MEMORY_SIZE;
unsigned short gy = ry + by * TILE_SIZE;
unsigned short gx = rx + bx * TILE_SIZE;
// Required to avoid accessing out of bounds
if (gy < outer_size && gx < outer_size) {
ds_neighbors[ry][rx] = d_board[gy * outer_size + gx];
}
}
// Required so we don't fill the outer padded grid
if (row > inner_size || col > inner_size) {
return;
}
// Sync threads now, no need to wait for the threads that exit
__syncthreads();
unsigned short a = 0;
// Instead of using the serial function that had multiple conditions checking for boundaries, simply loop over all
// neighbors as the shared memory array is padded on each side too
for (unsigned short j = 0; j < KERNEL_SIZE; ++j) {
for (unsigned short i = 0; i < KERNEL_SIZE; ++i) {
a += ds_neighbors[j + idx_inner_y - (KERNEL_SIZE / 2)][i + idx_inner_x - (KERNEL_SIZE / 2)];
}
}
a -= ds_neighbors[idx_inner_y][idx_inner_x];
if (a == 2)
d_newboard[row * outer_size + col] = ds_neighbors[idx_inner_y][idx_inner_x];
if (a == 3)
d_newboard[row * outer_size + col] = 1;
if (a < 2)
d_newboard[row * outer_size + col] = 0;
if (a > 3)
d_newboard[row * outer_size + col] = 0;
}
/**
* Print the game of the life board from a flat 1D array
* @param board The board
* @param inner_size The inner grid size
* @param outer_size The padded outer grid size
*/
void print_flat(cell_t *board, int inner_size, int outer_size) {
int i, j;
/* for each row */
for (j = 0; j < inner_size; j++) {
/* print each column position... */
for (i = 0; i < inner_size; i++)
printf("%c", board[(j + (KERNEL_SIZE / 2)) * outer_size + (i + (KERNEL_SIZE / 2))] ? 'x' : ' ');
/* followed by a carriage return */
printf("\n");
}
}
/**
* Read the game of the life board into a flat 1D array from a file
* @param f The file to read
* @param board The allocated 1D array
* @param inner_size The inner grid size
* @param outer_size The padded outer grid size
*/
void read_file_flat(FILE *f, cell_t *board, int inner_size, int outer_size) {
int i, j;
size_t len;
char *s = (char *) malloc(inner_size + 10);
for (j = 0; j < inner_size; j++) {
/* get a string */
fgets(s, inner_size + 10, f);
// Get the length of the line that was just read since fgets adds a '\0' at the end of a line
len = strlen(s) - 1;
/* copy the string to the life board */
for (i = 0; i < inner_size; i++) {
// If i is less than the line's length, read the value at that index, otherwise fill with a blank (0)
// The serial version was accessing uninitialized memory due to the first line of judge.in not being
// as long as the grid size expects it to be
// Those accesses sometimes added 'x's where they shouldn't have been if the accessed memory wasn't empty,
// causing the result to be wrong with a small number of steps
// Since the grid is padded on each side to avoid boundary conditions, only fill the 'inner' part
board[(j + (KERNEL_SIZE / 2)) * outer_size + (i + (KERNEL_SIZE / 2))] = i < len ? s[i] == 'x' : 0;
}
}
}
int main() {
// Host variables
unsigned short size, steps, i, grid_size, outer_grid_size;
unsigned int flat_size;
FILE *f_in;
cell_t *h_prev;
bool_t writeOutput = 1, evenSteps;
// Device variables
cell_t *d_prev, *d_next;
f_in = stdin;
// Read the input file and write its content in the host array
fscanf(f_in, "%d %d", &size, &steps);
// Create a border around the grid to avoid dealing with boundary conditions
// Assuming a KERNEL_SIZE of 3, a 5000x5000 grid will be padded with 1 extra row/column on each side, resulting
// in a 5002x5002 grid
outer_grid_size = size + (2 * (KERNEL_SIZE / 2));
flat_size = outer_grid_size * outer_grid_size;
evenSteps = steps % 2 == 0;
// Allocate a 'flat' array instead of the previous 2D array so the memory is contiguous for faster accesses
h_prev = allocate_board_flat(flat_size, outer_grid_size);
read_file_flat(f_in, h_prev, size, outer_grid_size);
fclose(f_in);
// Assuming we only have square grids, the grid size is the same for both dimensions
// Here, we round up the result to make sure even a grid size that's not a multiple of 32 will be fully processed
// at the cost of having more blocks than necessary
grid_size = int(ceilf((float) size / TILE_SIZE));
dim3 dimGrid(grid_size, grid_size, 1);
// In our case, a TILE_SIZE of 8 gives the best results, with 16 and 32 being slightly slower (+ ~20 ms)
dim3 dimBlock(TILE_SIZE, TILE_SIZE, 1);
// Allocate device arrays
gpuErrchk(cudaMalloc((void **) &d_prev, flat_size * sizeof(cell_t)));
gpuErrchk(cudaMalloc((void **) &d_next, flat_size * sizeof(cell_t)));
// Copy the data from the host array to the device array
gpuErrchk(cudaMemcpy(d_prev, h_prev, flat_size * sizeof(cell_t), cudaMemcpyHostToDevice));
for (i = 0; i < int(ceilf((float) steps / 2)); i++) {
// printf("Step: %d\n", 2 * i);
// Instead of using cudaMemcpy and a buffer or swapping pointers, run the same kernel with the variables inverted
// The data remains on the GPU at all times
playKernelSMDynamic<<<dimGrid, dimBlock>>>(d_prev, d_next, size, outer_grid_size);
// Condition to make sure the second kernel is only executed if necessary
// For example, 500 steps can be divided by 2 (judge.in)
// But 11 steps can't (life.in), thus ceilf(steps / 2) returns 12 which would result in an extra step
if (evenSteps || (2 * i + 1) < steps) {
// printf("Step: %d\n", 2 * i + 1);
playKernelSMDynamic<<<dimGrid, dimBlock>>>(d_next, d_prev, size, outer_grid_size);
}
}
// Copy data back from the device array to the host array
gpuErrchk(cudaMemcpy(h_prev, evenSteps ? d_prev : d_next, flat_size * sizeof(cell_t), cudaMemcpyDeviceToHost));
// Deallocate device arrays
gpuErrchk(cudaFree(d_next));
gpuErrchk(cudaFree(d_prev));
if (writeOutput) {
print_flat(h_prev, size, outer_grid_size);
}
free(h_prev);
return EXIT_SUCCESS;
}
|
22,837 | #include <iostream>
#include <cstring>
#include <fstream>
#include <algorithm>
#include <cmath>
#include <ctime>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#define EPS 1e-3
//#define WRITE_TO_FILE
using namespace std;
//Обработчик ошибок
static void HandleError(cudaError_t err, const char *file, int line)
{
if (err != cudaSuccess)
{
printf("%s in %s at line %d\n", cudaGetErrorString(err),file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( error ) (HandleError( error, __FILE__, __LINE__ ))
__global__ void iter_kernel_shared(double *U,double *Unext,double *M,double *F,int Nn,double* err)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x+1;
int stid=threadIdx.x+1;
__shared__ double Utemp[1026];
if(tid<Nn+1)
{
Utemp[stid]=U[tid];
if(stid==1)
Utemp[0]=U[tid-1];
else if(stid==1024)
Utemp[1025]=U[tid+1];
__syncthreads();
double unext;
unext=Utemp[stid]+1.0/M[(tid-1)*3+1]*(F[tid]-M[(tid-1)*3]*Utemp[stid-1]-M[(tid-1)*3+1]*Utemp[stid]-M[(tid-1)*3+2]*Utemp[stid+1]);
Unext[tid]=unext;
err[tid]=abs(unext-Utemp[stid]);
}
}
__global__ void iter_kernel(double *U,double *Unext,double *M,double *F,int Nn,double* err)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x+1;
if(tid<Nn+1)
{
double unext;
unext=U[tid]+1.0/M[(tid-1)*3+1]*(F[tid]-M[(tid-1)*3]*U[tid-1]-M[(tid-1)*3+1]*U[tid]-M[(tid-1)*3+2]*U[tid+1]);
Unext[tid]=unext;
err[tid]=abs(unext-U[tid]);
}
}
float solveGPUshared(double L, double T, double tau, int N)
{
#ifdef WRITE_TO_FILE
ofstream ofile("../datagpu.dat");
ofile.precision(16);
int counter=0, writeeach=1;
#endif
cudaEvent_t start,stop;
float gputime=0;
HANDLE_ERROR( cudaEventCreate(&start) );
HANDLE_ERROR( cudaEventCreate(&stop) );
double *U,*Unext,*Uloc;
double *M,*Mdev,*Fdev,*errdev;
int Nn=N+1;
int Nplus=Nn+2;
double h=L/N,t=0.0;
size_t size=Nplus*sizeof(double);
size_t sizeM=3*Nn*sizeof(double);
Uloc=new double[Nplus];
M=new double[Nn*3];
double maxerr;
HANDLE_ERROR( cudaMalloc(&U,size) );
HANDLE_ERROR( cudaMalloc(&Unext,size) );
HANDLE_ERROR( cudaMalloc(&Mdev,sizeM) );
HANDLE_ERROR( cudaMalloc(&Fdev,size) );
HANDLE_ERROR( cudaMalloc(&errdev,size) );
thrust::device_ptr<double> err_ptr = thrust::device_pointer_cast(errdev);
M[0]=0.0;
M[1]=1.0;
M[2]=0.0;
for(int i=1;i<Nn-1;i++)
{
M[i*3]=-tau/(h*h);
M[i*3+1]=1.0+2.0*tau/(h*h);
M[i*3+2]=-tau/(h*h);
}
M[(Nn-1)*3]=-2.0*tau/(h*h);
M[(Nn-1)*3+1]=1.0+2.0*tau/(h*h);
M[(Nn-1)*3+2]=0.0;
HANDLE_ERROR( cudaMemcpy(Mdev,M,sizeM,cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemset(U,0,size) );
HANDLE_ERROR( cudaMemset(Fdev,0,size) );
memset(Uloc,0,size);
dim3 threads(1024,1,1),blocks(Nn%1024==0?Nn/1024:Nn/1024+1,1,1);
HANDLE_ERROR( cudaEventRecord(start) );
while(t<T-0.5*tau)
{
HANDLE_ERROR( cudaMemcpy(Fdev,U,size,cudaMemcpyDeviceToDevice) );
double a=0.0;
double b=5.0*2.0*tau/h+Uloc[N+1];
HANDLE_ERROR( cudaMemcpy(&Fdev[1],&a,sizeof(double),cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemcpy(&Fdev[N+1],&b,sizeof(double),cudaMemcpyHostToDevice) );
do{
iter_kernel_shared<<<blocks,threads>>>(U,Unext,Mdev,Fdev,Nn,errdev);
HANDLE_ERROR( cudaGetLastError() );
HANDLE_ERROR( cudaDeviceSynchronize() );
thrust::device_ptr<double> max_ptr = thrust::max_element(err_ptr+1, err_ptr + Nn+1);
maxerr=max_ptr[0];
swap(U,Unext);
}while(maxerr>EPS);
t+=tau;
#ifdef WRITE_TO_FILE
if(counter%writeeach==0)
{
HANDLE_ERROR( cudaMemcpy(Uloc,U,size,cudaMemcpyDeviceToHost) );
for(int i=0;i<Nn;i++)
ofile<<Uloc[i+1]<<endl;
ofile<<endl;
ofile<<endl;
}
counter++;
#endif
}
HANDLE_ERROR( cudaMemcpy(Uloc,U,size,cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaEventRecord(stop) );
HANDLE_ERROR( cudaEventSynchronize(stop) );
HANDLE_ERROR( cudaEventElapsedTime(&gputime,start,stop) );
#ifdef WRITE_TO_FILE
ofile.close();
#endif
delete[] Uloc;
delete[] M;
HANDLE_ERROR( cudaFree(U) );
HANDLE_ERROR( cudaFree(Unext) );
HANDLE_ERROR( cudaFree(Mdev) );
HANDLE_ERROR( cudaFree(Fdev) );
HANDLE_ERROR( cudaFree(errdev) );
HANDLE_ERROR( cudaEventDestroy(start) );
HANDLE_ERROR( cudaEventDestroy(stop) );
return 1e-3*gputime;
}
float solveGPU(double L, double T, double tau, int N)
{
#ifdef WRITE_TO_FILE
ofstream ofile("../datagpu.dat");
ofile.precision(16);
int counter=0, writeeach=1;
#endif
cudaEvent_t start,stop;
float gputime=0;
HANDLE_ERROR( cudaEventCreate(&start) );
HANDLE_ERROR( cudaEventCreate(&stop) );
double *U,*Unext,*Uloc;
double *M,*Mdev,*Fdev,*errdev;
int Nn=N+1;
int Nplus=Nn+2;
double h=L/N,t=0.0;
size_t size=Nplus*sizeof(double);
size_t sizeM=3*Nn*sizeof(double);
Uloc=new double[Nplus];
M=new double[Nn*3];
double maxerr;
HANDLE_ERROR( cudaMalloc(&U,size) );
HANDLE_ERROR( cudaMalloc(&Unext,size) );
HANDLE_ERROR( cudaMalloc(&Mdev,sizeM) );
HANDLE_ERROR( cudaMalloc(&Fdev,size) );
HANDLE_ERROR( cudaMalloc(&errdev,size) );
thrust::device_ptr<double> err_ptr = thrust::device_pointer_cast(errdev);
M[0]=0.0;
M[1]=1.0;
M[2]=0.0;
for(int i=1;i<Nn-1;i++)
{
M[i*3]=-tau/(h*h);
M[i*3+1]=1.0+2.0*tau/(h*h);
M[i*3+2]=-tau/(h*h);
}
M[(Nn-1)*3]=-2.0*tau/(h*h);
M[(Nn-1)*3+1]=1.0+2.0*tau/(h*h);
M[(Nn-1)*3+2]=0.0;
HANDLE_ERROR( cudaMemcpy(Mdev,M,sizeM,cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemset(U,0,size) );
memset(Uloc,0,size);
dim3 threads(1024,1,1),blocks(Nn%1024==0?Nn/1024:Nn/1024+1,1,1);
HANDLE_ERROR( cudaEventRecord(start) );
while(t<T-0.5*tau)
{
HANDLE_ERROR( cudaMemcpy(Fdev,U,size,cudaMemcpyDeviceToDevice) );
double a=0.0;
double b=5.0*2.0*tau/h+Uloc[N+1];
HANDLE_ERROR( cudaMemcpy(&Fdev[1],&a,sizeof(double),cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemcpy(&Fdev[N+1],&b,sizeof(double),cudaMemcpyHostToDevice) );
do{
iter_kernel<<<blocks,threads>>>(U,Unext,Mdev,Fdev,Nn,errdev);
HANDLE_ERROR( cudaGetLastError() );
HANDLE_ERROR( cudaDeviceSynchronize() );
thrust::device_ptr<double> max_ptr = thrust::max_element(err_ptr+1, err_ptr + Nn+1);
maxerr=max_ptr[0];
swap(U,Unext);
}while(maxerr>EPS);
t+=tau;
#ifdef WRITE_TO_FILE
if(counter%writeeach==0)
{
HANDLE_ERROR( cudaMemcpy(Uloc,U,size,cudaMemcpyDeviceToHost) );
for(int i=0;i<Nn;i++)
ofile<<Uloc[i+1]<<endl;
ofile<<endl;
ofile<<endl;
}
counter++;
#endif
}
HANDLE_ERROR( cudaMemcpy(Uloc,U,size,cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaEventRecord(stop) );
HANDLE_ERROR( cudaEventSynchronize(stop) );
HANDLE_ERROR( cudaEventElapsedTime(&gputime,start,stop) );
#ifdef WRITE_TO_FILE
ofile.close();
#endif
delete[] Uloc;
delete[] M;
HANDLE_ERROR( cudaFree(U) );
HANDLE_ERROR( cudaFree(Unext) );
HANDLE_ERROR( cudaFree(Mdev) );
HANDLE_ERROR( cudaFree(Fdev) );
HANDLE_ERROR( cudaFree(errdev) );
HANDLE_ERROR( cudaEventDestroy(start) );
HANDLE_ERROR( cudaEventDestroy(stop) );
return 1e-3*gputime;
}
float solveCPU(double L, double T, double tau, int N)
{
#ifdef WRITE_TO_FILE
ofstream ofile("../datacpu.dat");
ofile.precision(16);
int counter=0, writeeach=1;
#endif
float cputime=0;
double *U,*Unext,*F;
double *M;
int Nn=N+1;
double h=L/N,t=0.0;
F=new double[Nn];
U=new double[Nn];
Unext=new double[Nn];
M=new double[Nn*3];
double maxerr;
M[0]=0.0;
M[1]=1.0;
M[2]=0.0;
for(int i=1;i<Nn-1;i++)
{
M[i*3]=-tau/(h*h);
M[i*3+1]=1.0+2.0*tau/(h*h);
M[i*3+2]=-tau/(h*h);
}
M[(Nn-1)*3]=-2.0*tau/(h*h);
M[(Nn-1)*3+1]=1.0+2.0*tau/(h*h);
M[(Nn-1)*3+2]=0.0;
memset(U,0,Nn*sizeof(double));
cputime=clock();
while(t<T-0.5*tau)
{
F[0]=0.0;
F[N]=5.0*2.0*tau/h+U[N];
for(int i=1;i<Nn-1;i++)
F[i]=U[i];
do{
maxerr=0;
Unext[0]=U[0]+1.0/M[1]*(F[0]-M[1]*U[0]-M[2]*U[1]);
for(int i=1;i<Nn-1;i++)
Unext[i]=U[i]+1.0/M[i*3+1]*(F[i]-M[i*3]*U[i-1]-M[i*3+1]*U[i]-M[i*3+2]*U[i+1]);
Unext[Nn-1]=U[Nn-1]+1.0/M[(Nn-1)*3+1]*(F[Nn-1]-M[(Nn-1)*3]*U[Nn-2]-M[(Nn-1)*3+1]*U[Nn-1]);
for(int i=0;i<Nn;i++)
{
double err=abs(Unext[i]-U[i]);
if(err>maxerr)maxerr=err;
}
swap(U,Unext);
}while(maxerr>EPS);
t+=tau;
#ifdef WRITE_TO_FILE
if(counter%writeeach==0)
{
for(int i=0;i<Nn;i++)
ofile<<U[i]<<endl;
ofile<<endl;
ofile<<endl;
}
counter++;
#endif
}
cputime=(double)(clock()-cputime)/CLOCKS_PER_SEC;
#ifdef WRITE_TO_FILE
ofile.close();
#endif
delete[] U;
delete[] Unext;
delete[] M;
delete[] F;
return cputime;
}
int main()
{
float gpu,gpushared,cpu;
gpu=solveGPU(1.0,50.0,0.01,1000000);
cout<<"GPU Time: "<<gpu<<endl;
gpushared=solveGPUshared(1.0,50.0,0.01,1000000);
cout<<"GPU Time: "<<gpushared<<endl;
cpu=solveCPU(1.0,50.0,0.01,1000000);
cout<<"CPU Time: "<<cpu<<endl;
cout<<"Max ratio:"<<cpu/min(gpu,gpushared)<<endl;
return 0;
}
|
22,838 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19) {
float tmp_1 = coshf(acosf(var_3 + log10f(cosf((-1.9336E-44f + (-1.9337E-20f * +0.0f * var_4))))));
comp = tmp_1 * var_5 * log10f(-1.5986E36f / expf(acosf(asinf((var_6 + (var_7 * var_8 + +1.4863E9f))))));
for (int i=0; i < var_1; ++i) {
float tmp_2 = -1.4378E-41f;
comp += tmp_2 - (-1.2722E-41f + (var_9 - (+0.0f + var_10 - (var_11 * var_12))));
comp += (var_13 * var_14);
comp = coshf((-1.5624E-44f * -1.8369E-37f / (var_15 + -1.9608E1f + var_16)));
}
if (comp == (-1.6344E-37f / +1.6938E36f)) {
comp += +1.9019E-41f + +1.6335E-36f * var_17 / +0.0f + -1.3284E-42f;
float tmp_3 = +1.5643E36f;
comp += tmp_3 / cosf((+1.1861E-44f - +1.8515E2f));
}
for (int i=0; i < var_2; ++i) {
comp += +0.0f + +1.4281E-37f;
comp = +1.1717E-44f + var_18 * var_19;
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20);
cudaDeviceSynchronize();
return 0;
}
|
22,839 | /*
Copyright 2017 the arraydiff authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common.cuh"
#include <cuda_runtime_api.h>
#include <math_constants.h>
#include <stdint.h>
#include <stdlib.h>
#define OFFSET_BANK(idx) ({ __typeof__ (idx) _idx = idx; ((_idx) + ((_idx) / 32)); })
__global__ void blockreduce_argmax_kernel(
uint32_t block_dim,
uint32_t num_blocks,
const float *xs,
float *x_max,
uint32_t *x_argmax)
{
__shared__ float cache[1024 + 32];
__shared__ uint32_t cache_idx[1024 + 32];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t i = tid + block * block_dim;
if (tid < block_dim && block < num_blocks) {
cache[OFFSET_BANK(tid)] = xs[i];
} else {
cache[OFFSET_BANK(tid)] = -CUDART_INF_F;
}
cache_idx[OFFSET_BANK(tid)] = tid;
__syncthreads();
for (uint32_t s = 1; s < blockDim.x; s *= 2) {
if (tid < block_dim && block < num_blocks) {
if (tid % (2*s) == 0 && (tid + s) < block_dim && cache[OFFSET_BANK(tid)] < cache[OFFSET_BANK(tid + s)]) {
cache[OFFSET_BANK(tid)] = cache[OFFSET_BANK(tid + s)];
cache_idx[OFFSET_BANK(tid)] = cache_idx[OFFSET_BANK(tid + s)];
}
}
__syncthreads();
}
if (tid < block_dim && block < num_blocks) {
if (tid == 0) {
x_max[block] = cache[0];
if (NULL != x_argmax) {
x_argmax[block] = cache_idx[0];
}
}
}
}
extern "C" void arraydiff_cuda_kernel_blockreduce_max_argmax_f32(
size_t block_dim,
size_t num_blocks,
const float *xs,
float *xs_max,
uint32_t *xs_argmax,
cudaStream_t stream)
{
// XXX: assert(block_dim <= 1024);
// FIXME(20151022): could make more efficient use of blocks but w/e.
blockreduce_argmax_kernel<<<num_blocks, 1024, 0, stream>>>(
block_dim, num_blocks, xs, xs_max, xs_argmax);
}
/*__global__ void blockreduce_sum_kernel(
uint32_t block_dim,
uint32_t num_blocks,
const float *xs,
float *xs_sum)
{
__shared__ float cache[1024 + 32];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t i = tid + block * block_dim;
if (tid < block_dim && block < num_blocks) {
cache[OFFSET_BANK(tid)] = xs[i];
} else {
cache[OFFSET_BANK(tid)] = 0.0f;
}
__syncthreads();
for (uint32_t s = 1; s < blockDim.x; s *= 2) {
if (tid < block_dim && block < num_blocks) {
if (tid % (2*s) == 0 && (tid + s) < block_dim) {
cache[OFFSET_BANK(tid)] += cache[OFFSET_BANK(tid + s)];
}
}
__syncthreads();
}
if (tid < block_dim && block < num_blocks) {
if (tid == 0) {
xs_sum[block] = cache[0];
}
}
}*/
__global__ void blockreduce_sum_kernel(
uint32_t block_dim,
uint32_t num_blocks,
const float *xs,
float *xs_sum)
{
__shared__ float cache[1024];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t idx = tid + block_dim * block;
if (tid < block_dim && block < num_blocks) {
cache[tid] = xs[idx];
} else {
cache[tid] = 0.0f;
}
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (tid < block_dim && block < num_blocks) {
if (tid == 0) {
xs_sum[block] = cache[0];
}
}
}
extern "C" void arraydiff_cuda_kernel_blockreduce_sum_f32(
size_t block_dim,
size_t num_blocks,
const float *xs,
float *xs_sum,
cudaStream_t stream)
{
// XXX: assert(block_dim <= 1024);
// FIXME(20151022): could make more efficient use of blocks but w/e.
blockreduce_sum_kernel<<<num_blocks, 1024, 0, stream>>>(
block_dim, num_blocks, xs, xs_sum);
}
__global__ void reduce_index_fwd_f32_kernel(
uint32_t dim,
uint32_t batch_sz,
const float *x,
const uint32_t *index,
float *y)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < batch_sz) {
y[idx] = x[index[idx] + dim * idx];
}
}
extern "C" void arraydiff_cuda_kernel_reduce_index_fwd_f32(
size_t dim,
size_t batch_sz,
const float *x,
const uint32_t *index,
float *y,
cudaStream_t stream)
{
size_t n = batch_sz;
reduce_index_fwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
dim, batch_sz, x, index, y);
}
__global__ void reduce_index_bwd_f32_kernel(
uint32_t dim,
uint32_t batch_sz,
const float *dy,
const uint32_t *index,
float *dx)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < batch_sz) {
dx[index[idx] + dim * idx] += dy[idx];
}
}
extern "C" void arraydiff_cuda_kernel_reduce_index_bwd_f32(
size_t dim,
size_t batch_sz,
const float *dy,
const uint32_t *index,
float *dx,
cudaStream_t stream)
{
size_t n = batch_sz;
reduce_index_bwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
dim, batch_sz, dy, index, dx);
}
|
22,840 | #include <iostream>
#include <chrono>
using test_t = uint64_t;
constexpr std::size_t N = 5000;
constexpr std::size_t block_size = 1 << 7;
constexpr std::size_t num_threads = 240 * block_size;
constexpr std::size_t test_count = 1 << 16;
__constant__ test_t const_mem[N];
template <std::size_t test_count>
__global__ void read_dummy(test_t* const result){
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
test_t sum = static_cast<test_t>(0);
for(std::size_t i = 0; i < test_count; i++){
for(std::size_t n = 0; n < N; n++){
sum += static_cast<test_t>(1.9);
}
}
result[tid] = sum;
}
template <std::size_t test_count>
__global__ void constant_read(test_t* const result){
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
test_t sum = static_cast<test_t>(0);
for(std::size_t i = 0; i < test_count; i++){
for(std::size_t n = 0; n < N; n++){
sum += const_mem[n];
}
}
result[tid] = sum;
}
template <std::size_t test_count>
__global__ void global_read(const test_t* const mem, test_t* const result){
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
test_t sum = static_cast<test_t>(0);
for(std::size_t i = 0; i < test_count; i++){
for(std::size_t n = 0; n < N; n++){
sum += mem[n];
}
}
result[tid] = sum;
}
template <std::size_t test_count>
__global__ void global_ldg_read(const test_t* const mem, test_t* const result){
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
test_t sum = static_cast<test_t>(0);
for(std::size_t i = 0; i < test_count; i++){
for(std::size_t n = 0; n < N; n++){
sum += __ldg(mem + n);
}
}
result[tid] = sum;
}
template <std::size_t test_count>
__global__ void global_ldg_dist_read(const test_t* const mem_head, test_t* const result){
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
const auto mem = mem_head + blockIdx.x * N;
test_t sum = static_cast<test_t>(0);
for(std::size_t i = 0; i < test_count; i++){
for(std::size_t n = 0; n < N; n++){
sum += __ldg(mem + n);
}
}
result[tid] = sum;
}
template <std::size_t test_count>
__global__ void constant_shared_read(test_t* const result){
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
test_t sum = static_cast<test_t>(0);
__shared__ test_t smem[N];
for(std::size_t i = 0; i < test_count; i++){
// はじめにgmem -> smem
for(std::size_t i = 0, index; (index = i + tid) < N; i+= block_size){
smem[index] = const_mem[index];
}
for(std::size_t n = 0; n < N; n++){
sum += *(smem + n);
}
}
result[tid] = sum;
}
template <std::size_t test_count>
__global__ void global_shared_read(const test_t* const mem, test_t* const result){
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
test_t sum = static_cast<test_t>(0);
__shared__ test_t smem[N];
for(std::size_t i = 0; i < test_count; i++){
// はじめにgmem -> smem
for(std::size_t i = 0, index; (index = i + tid) < N; i+= block_size){
smem[index] = mem[index];
}
for(std::size_t n = 0; n < N; n++){
sum += *(smem + n);
}
}
result[tid] = sum;
}
template <class Func>
double get_elapsed_time(Func func){
const auto start_clock = std::chrono::system_clock::now();
func();
const auto end_clock = std::chrono::system_clock::now();
return std::chrono::duration_cast<std::chrono::microseconds>(end_clock - start_clock).count() / 1.e6;
}
int main(){
auto get_speed = [](double dt){return (num_threads * N * test_count * sizeof(test_t) / dt / (1lu<<30));};
{
test_t *d_result;
cudaMalloc(reinterpret_cast<void**>(&d_result), num_threads * sizeof(test_t));
const auto t0 = get_elapsed_time(
[&d_result](){
constant_read<test_count><<<(num_threads + block_size - 1)/block_size, block_size>>>(d_result);
cudaDeviceSynchronize();
});
const auto t1 = get_elapsed_time(
[&d_result](){
read_dummy<test_count><<<(N + block_size - 1)/block_size, block_size>>>(d_result);
cudaDeviceSynchronize();
});
std::cout<<"constant : "<<get_speed(t0 - t1)<<" GB/s"<<std::endl;
cudaFree(d_result);
}
{
test_t *d_result;
test_t *d_global;
cudaMalloc(reinterpret_cast<void**>(&d_result), num_threads * sizeof(test_t));
cudaMalloc(reinterpret_cast<void**>(&d_global), N * sizeof(test_t));
const auto t0 = get_elapsed_time(
[&d_result, &d_global](){
global_read<test_count><<<(num_threads + block_size - 1)/block_size, block_size>>>(d_global, d_result);
cudaDeviceSynchronize();
});
const auto t1 = get_elapsed_time(
[&d_result, &d_global](){
read_dummy<test_count><<<(N + block_size - 1)/block_size, block_size>>>(d_result);
cudaDeviceSynchronize();
});
std::cout<<"global : "<<get_speed(t0 - t1)<<" GB/s"<<std::endl;
cudaFree(d_result);
cudaFree(d_global);
}
{
test_t *d_result;
test_t *d_global;
cudaMalloc(reinterpret_cast<void**>(&d_result), num_threads * sizeof(test_t));
cudaMalloc(reinterpret_cast<void**>(&d_global), N * sizeof(test_t));
const auto t0 = get_elapsed_time(
[&d_result, &d_global](){
global_ldg_read<test_count><<<(num_threads + block_size - 1)/block_size, block_size>>>(d_global, d_result);
cudaDeviceSynchronize();
});
const auto t1 = get_elapsed_time(
[&d_result, &d_global](){
read_dummy<test_count><<<(N + block_size - 1)/block_size, block_size>>>(d_result);
cudaDeviceSynchronize();
});
std::cout<<"global ldg : "<<get_speed(t0 - t1)<<" GB/s"<<std::endl;
cudaFree(d_result);
cudaFree(d_global);
}
{
constexpr auto grid_size = (num_threads + block_size - 1)/block_size;
test_t *d_result;
test_t *d_global;
cudaMalloc(reinterpret_cast<void**>(&d_result), num_threads * sizeof(test_t));
cudaMalloc(reinterpret_cast<void**>(&d_global), N * sizeof(test_t) * grid_size);
const auto t0 = get_elapsed_time(
[&d_result, &d_global](){
global_ldg_dist_read<test_count><<<grid_size, block_size>>>(d_global, d_result);
cudaDeviceSynchronize();
});
const auto t1 = get_elapsed_time(
[&d_result, &d_global](){
read_dummy<test_count><<<grid_size, block_size>>>(d_result);
cudaDeviceSynchronize();
});
std::cout<<"dist ldg : "<<get_speed(t0 - t1)<<" GB/s"<<std::endl;
cudaFree(d_result);
cudaFree(d_global);
}
{
test_t *d_result;
cudaMalloc(reinterpret_cast<void**>(&d_result), num_threads * sizeof(test_t));
const auto t0 = get_elapsed_time(
[&d_result](){
constant_shared_read<test_count><<<(num_threads + block_size - 1)/block_size, block_size>>>(d_result);
cudaDeviceSynchronize();
});
const auto t1 = get_elapsed_time(
[&d_result](){
read_dummy<test_count><<<(N + block_size - 1)/block_size, block_size>>>(d_result);
cudaDeviceSynchronize();
});
std::cout<<"s constant : "<<get_speed(t0 - t1)<<" GB/s"<<std::endl;
cudaFree(d_result);
}
{
constexpr auto grid_size = (num_threads + block_size - 1)/block_size;
test_t *d_result;
test_t *d_global;
cudaMalloc(reinterpret_cast<void**>(&d_result), num_threads * sizeof(test_t));
cudaMalloc(reinterpret_cast<void**>(&d_global), N * sizeof(test_t));
const auto t0 = get_elapsed_time(
[&d_result, &d_global](){
global_shared_read<test_count><<<grid_size, block_size>>>(d_global, d_result);
cudaDeviceSynchronize();
});
const auto t1 = get_elapsed_time(
[&d_result, &d_global](){
read_dummy<test_count><<<grid_size, block_size>>>(d_result);
cudaDeviceSynchronize();
});
std::cout<<"shared : "<<get_speed(t0 - t1)<<" GB/s"<<std::endl;
cudaFree(d_result);
cudaFree(d_global);
}
}
|
22,841 | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
///////////////////////////////////////////////////////////////////////////////
#include <cufft.h>
#include <math_constants.h>
//Round a / b to nearest higher integer value
int cuda_iDivUp(int a, int b)
{
return (a + (b - 1)) / b;
}
// complex math functions
__device__
float2 conjugate(float2 arg)
{
return make_float2(arg.x, -arg.y);
}
__device__
float2 complex_exp(float arg)
{
return make_float2(cosf(arg), sinf(arg));
}
__device__
float2 complex_add(float2 a, float2 b)
{
return make_float2(a.x + b.x, a.y + b.y);
}
__device__
float2 complex_mult(float2 ab, float2 cd)
{
return make_float2(ab.x * cd.x - ab.y * cd.y, ab.x * cd.y + ab.y * cd.x);
}
// generate wave heightfield at time t based on initial heightfield and dispersion relationship
__global__ void generateSpectrumKernel(float2* h0, float2 *ht, unsigned int width, unsigned int height, float t, float patchSize)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int i = y*width+x;
// calculate coordinates
float2 k;
k.x = CUDART_PI_F * x / (float) patchSize;
k.y = 2.0f * CUDART_PI_F * y / (float) patchSize;
// calculate dispersion w(k)
float k_len = sqrtf(k.x*k.x + k.y*k.y);
float w = sqrtf(9.81f * k_len);
if ((x < width) && (y < height)) {
float2 h0_k = h0[i];
float2 h0_mk = h0[(((height-1)-y)*width)+x];
float2 h_tilda = complex_add( complex_mult(h0_k, complex_exp(w * t)),
complex_mult(conjugate(h0_mk),
complex_exp(-w * t)) );
// output frequency-space complex values
ht[i] = h_tilda;
}
}
// generate slope by partial differences in spatial domain
__global__ void calculateSlopeKernel(float* h, float2 *slopeOut, unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int i = y*width+x;
float2 slope;
if ((x > 0) && (y > 0) && (x < width-1) && (y < height-1)) {
slope.x = h[i+1] - h[i-1];
slope.y = h[i+width] - h[i-width];
} else {
slope = make_float2(0.0f, 0.0f);
}
slopeOut[i] = slope;
}
extern "C"
void cudaGenerateSpectrumKernel(float2* d_h0, float2 *d_ht,
unsigned int width, unsigned int height,
float animTime, float patchSize)
{
dim3 block(8, 8, 1);
dim3 grid(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1);
generateSpectrumKernel<<<grid, block>>>(d_h0, d_ht, width, height, animTime, patchSize);
}
extern "C"
void cudaCalculateSlopeKernel( float* hptr, float2 *slopeOut,
unsigned int width, unsigned int height)
{
dim3 block(8, 8, 1);
dim3 grid2(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1);
calculateSlopeKernel<<<grid2, block>>>(hptr, slopeOut, width, height);
}
|
22,842 | #include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<stdlib.h>
#include<stdio.h>
#include<string.h>
__global__ void multipleStrings(char* a , char* b,int size)
{
int i = threadIdx.x * size;
int j = 0;
for(j=0;j<size;j++)
{
b[i+j] = a[j];
}
}
int main()
{
cudaError_t error;
int n;
int size;
printf("Enter the value of n \n");
scanf("%d",&n);
printf("Enter the size of the string \n");
scanf("%d",&size);
char *a = (char*)malloc(sizeof(char)*(size+1));
printf("Enter the string \n");
scanf("%s",a);
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
char *b = (char*)malloc(sizeof(char)*(n*size+1));
char *d_a , *d_b;
int size1 = sizeof(char)*(size+1);
int size2 = sizeof(char)*(size*n+1);
cudaMalloc((void**)&d_a,size1);
cudaMalloc((void**)&d_b,size2);
error = cudaMemcpy(d_a,a,sizeof(char)*(size+1),cudaMemcpyHostToDevice);
multipleStrings<<<1,n>>>(d_a,d_b,size);
cudaMemcpy(b,d_b,size2,cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
int l = strlen(b);
printf("string = %s \n",b);
printf("Time taken = %f \n",elapsedTime);
cudaFree(d_a);
cudaFree(d_b);
}
|
22,843 | #include <iostream>
#include <stdio.h>
#define N 100
#define ITERS 5
__global__ void stencil(float* a, float* b) {
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * N;
float update = 0.0;
if (y > 0) {
update += a[(y-1)*N+x];
}
if (y < N-1) {
update += a[(y+1)*N+x];
}
if (x > 0) {
update += a[y*N+(x-1)];
}
if (x < N-1) {
update += a[y*N+(x+1)];
}
b[offset] = update / 4.0;
}
__global__ void copy(float* to, float* from) {
int offset = blockIdx.x + blockIdx.y * N;
to[offset] = from[offset];
}
int main() {
float a[N*N], b[N*N];
float *dev_a, *dev_b;
dim3 blocks(N, N);
cudaMalloc((void**)&dev_a, N*N*sizeof(float));
cudaMalloc((void**)&dev_b, N*N*sizeof(float));
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
a[i*N+j] = static_cast<float>(i+j);
}
}
cudaMemcpy(dev_a, a, N*N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*N*sizeof(float), cudaMemcpyHostToDevice);
for (int num_it = 0; num_it < ITERS; num_it++) {
stencil<<<blocks, 1>>>(dev_a, dev_b);
copy<<<blocks, 1>>>(dev_a, dev_b);
}
cudaMemcpy(b, dev_b, N*N*sizeof(float), cudaMemcpyDeviceToHost);
// print out the new array b
std::cout << std::endl;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
}
}
std::cout << std::endl;
// find sum
float sum = 0.0;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
sum += b[i*N+j];
std::cout << b[i*N+j] << " ";
}
std::cout << std::endl;
}
std::cout << "sum is " << sum << std::endl;
cudaFree(dev_a);
cudaFree(dev_b);
}
|
22,844 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
int row = 480000;
int col = 464;
float *gmat, *gsum;
__global__ void get_average(float *arr, float *sum, int row, int col){
//int i = blockIdx.x * blockDim.x + threadIdx.x;
// add all row and make average
for(int k=threadIdx.x; k<row; k+=blockDim.x){
for(int q = 0; q<col;q++){
sum[k] += arr[col*k + q];
}
sum[k] /= col;
for(int q = 0; q<col;q++){
arr[col*k + q] -= sum[k];
}
}
}
int main(){
char *record, *line;
char buffer[400000];
int i =0;
int grid = 1;
int block = 512;
/*double **arr = (double**)malloc(row * sizeof(double*));
for(int k=0;k<row;k++)
arr[k] = (double*)malloc(col * sizeof(double));
*/
float *arr = (float*)malloc(row * col * sizeof(float));
float *sum_array = (float*)malloc(row * sizeof(float));
FILE *fstream = fopen("matrix.csv","r");
if(fstream == NULL){
printf("\n file opening failed ");
return -1;
}
printf("Start load csv\n");
while((line = fgets(buffer, sizeof(buffer),fstream))!= NULL){
record = strtok(line, ",");
while(record != NULL){
float a = atof(record);
arr[i++] = a;
record = strtok(NULL, ",");
}
}
printf("Load Finish%lf\n",arr[5]);
// Create GPU memory
int err= cudaMalloc((void**)&gmat, row*col*sizeof(float));
printf("err is %d\n",err);
cudaMalloc((void**)&gsum, row*sizeof(float));
cudaMemcpy(gmat, arr, row*col*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gsum, sum_array, row*sizeof(float), cudaMemcpyHostToDevice);
printf("Start calaute\n");
// Use Cuda kernel
get_average<<<grid,block>>>(gmat, gsum, row, col);
cudaMemcpy(arr, gmat, row*col*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(sum_array, gsum, row*sizeof(float), cudaMemcpyDeviceToHost);
// all row mine average
printf("Now write\n");
FILE *fp;
fp=fopen("B1.csv","w+");
for(int i=0;i<row;i++){
fprintf(fp,"\n%d",i+1);
for(int j=0;j<col;j++)
fprintf(fp,",%lf ",arr[j+i*col]);
}
return 0;
}
|
22,845 | // Code adapted from MATLAB implementation at https://people.ece.cornell.edu/land/courses/ece5760/LABS/s2016/lab3.html
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N 512 // grid side length
#define RHO 0.5 // related to pitch
#define ETA 2e-4 // related to duration of sound
#define BOUNDARY_GAIN 0.75 // clamped edge vs free edge
void print_grid(float **grid) {
int i,j;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("(%d,%d): %f ", i,j,grid[i][j]);
}
printf("\n");
}
printf("\n");
}
int main(int argc, char** argv) {
// get number of iterations to perform
int T = atoi(argv[1]);
struct timeval start_time, end_time;
// initialize grid
float **u = (float **) malloc(N * sizeof(float *));
float **u1 = (float **) malloc(N * sizeof(float *));
float **u2 = (float **) malloc(N * sizeof(float *));
int i,j;
for (i = 0; i < N; i++) {
u[i] = (float *)malloc(N * sizeof(float));
u1[i] = (float *)malloc(N * sizeof(float));
u2[i] = (float *)malloc(N * sizeof(float));
for (j = 0; j < N; j++) {
u[i][j] = 0;
u1[i][j] = 0;
u2[i][j] = 0;
}
}
printf("Size of grid: %d nodes\n", N*N);
gettimeofday(&start_time, NULL);
// simulate drum strike
u1[N/2][N/2] = 1;
float *audio = (float *)malloc(T * sizeof(float));
float sum_of_neighbors, previous_value, previous_previous_value;
float **temp;
int t;
for (t = 0; t < T; t++) {
// update interior points
int i,j;
for (i = 1; i < N-1; i++) {
for (j = 1; j < N-1; j++) {
sum_of_neighbors = u1[i-1][j] + u1[i+1][j] + u1[i][j-1] + u1[i][j+1];
previous_value = u1[i][j];
previous_previous_value = u2[i][j];
u[i][j] = (RHO * (sum_of_neighbors -4*previous_value) + 2*previous_value -(1-ETA)*previous_previous_value)/(1+ETA);
}
}
// update side points
for (i = 1; i < N-1; i++) {
u[0][i] = BOUNDARY_GAIN * u[1][i]; // top
u[N-1][i] = BOUNDARY_GAIN * u[N-2][i]; // bottom
u[i][0] = BOUNDARY_GAIN * u[i][1]; // left
u[i][N-1] = BOUNDARY_GAIN * u[i][N-2]; // right
}
// update corners
u[0][0] = BOUNDARY_GAIN * u[1][0];
u[N-1][0] = BOUNDARY_GAIN * u[N-2][0];
u[0][N-1] = BOUNDARY_GAIN * u[0][N-2];
u[N-1][N-1] = BOUNDARY_GAIN * u[N-1][N-2];
// shift u1 into u2, u into u1
// this is expensive!
/*
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
u2[i][j] = u1[i][j];
u1[i][j] = u[i][j];
}
}
*/
// print_grid(u);
audio[t] = u[N/2][N/2];
printf("%f,\n", audio[t]);
temp = u2;
u2 = u1;
u1 = u;
u = temp;
// record displacement at node (N-1,N-1)
}
gettimeofday(&end_time, NULL);
unsigned long long time_elapsed = 1000 * (end_time.tv_sec - start_time.tv_sec) + (end_time.tv_usec - start_time.tv_usec) / 1000;
printf("Time Elapsed [%llu ms]\n", time_elapsed);
// free grid memory
for (i = 0; i < N; i++) {
free(u[i]);
free(u1[i]);
free(u2[i]);
}
free(u);
free(u1);
free(u2);
free(audio);
}
|
22,846 | #include <cuda.h>
#include <iostream>
__global__ void simpleKernel(int* data, int a)
{
//this adds a value to a variable stored in global memory
data[blockIdx.x*8+threadIdx.x] += blockIdx.x+ a*threadIdx.x;
}
int main()
{
const int numElems= 8;
int hA[numElems*2], *dA;
//allocate memory on the device (GPU); zero out all entries in this device array
cudaMalloc((void**) &dA, sizeof(int) * numElems*2);
cudaMemset(dA, 0, numElems*2* sizeof(int));
//invoke GPU kernel, with one block that has four threads
const int RANGE = 10;
int a = rand() % (RANGE + 1);
simpleKernel<<<2,numElems>>>(dA,a);
//bring the result back from the GPU into the hA
cudaMemcpy(&hA, dA, sizeof(int) * numElems*2, cudaMemcpyDeviceToHost);
//print out the result to confirm that things are looking good
std::cout << hA[0];
for(int i = 1; i < numElems*2; i++)
std::cout<< " "<<hA[i];
std::cout << std::endl;
//release the memory allocated on the GPU
cudaFree(dA);
return 0;
} |
22,847 | // Cuda example add2 by Oleksiy Grechnyev
// This one uses classical memory management
#include <iostream>
#include <cmath>
#include <vector>
// Kernel: This runs on the GPU (device) !
__global__
void add(int n, float *x, float *y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
printf("thread = %d/%d, block = %d/%d, index = %d/%d \n", threadIdx.x, blockDim.x, blockIdx.x, gridDim.x, index, stride);
for (int i = index; i< n ; i += stride)
y[i] += x[i];
}
// This runs on the CPU (host)
int main(){
int n = 1 << 20; // 1024**2
int nF = n*sizeof(float);
// Create data in the CPU memory (host)
std::vector<float> x(n, 2.0f), y(n, 1.0f);
// Allocate GPU (device) memory
float *dX, *dY;
cudaMalloc(&dX, nF);
cudaMalloc(&dY, nF);
// Copy Device->Host
cudaMemcpy(dX, x.data(), nF, cudaMemcpyHostToDevice);
cudaMemcpy(dY, y.data(), nF, cudaMemcpyHostToDevice);
// Automatic block size and number of blocks for max speed
// int blockSize = 256;
// int numBlocks = (n+ blockSize -1) / blockSize;
// Smaller values for the demo
int blockSize = 4;
int numBlocks = 3;
// Add
add<<<numBlocks, blockSize>>>(n, dX, dY);
// Not needed in this example
// cudaDeviceSynchronize();
// Copy Host->Device
cudaMemcpy(y.data(), dY, nF, cudaMemcpyDeviceToHost);
// Check the result: should be 0
double maxE=0;
for (int i = 0; i< n ; ++i)
maxE = std::fmax(maxE, std::fabs(y[i] - 3.0f));
std::cout << "maxE = " << maxE << std::endl;
// Free memory
cudaFree(dX);
cudaFree(dY);
return 0;
}
|
22,848 | #include "includes.h"
__global__ void predicate(int *d_array, int d_numberOfElements,int *d_predicateArray)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index <d_numberOfElements)
{
if(d_array[index]%32== 0)
{
d_predicateArray[index] =1;
}
else
{
d_predicateArray[index] = 0;
}
}
} |
22,849 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
void define_xs_or_ys(float* xs, float dx, float x0, int gsize);
void define_xs_or_ys(float* xs, float dx, float x0, int gsize){
for(int i = 0; i < gsize; i++)
xs[i] = x0 + i*dx;
}
int main(){
int gsize = 10;
float dx = 1;
float x0 = 0.5;
thrust::host_vector<float> H_xs(gsize, -1);
thrust::host_vector<float> H_ys(gsize, -1);
float* xs = thrust::raw_pointer_cast(&H_xs[0]);
float* ys = thrust::raw_pointer_cast(&H_ys[0]);
define_xs_or_ys(xs, dx, x0, gsize);
for(int i = 0; i < gsize; i++)
std::cout << xs[i] << std::endl;
} |
22,850 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_profiler_api.h>
#include <assert.h>
#define min(x,y) (y + ((x - y) & ((x - y) >> (sizeof(long) * 8 - 1))))
const int Tile_Width = 1;
const int WIDTH = 3;
void print_matrix(long *m) {
for (int i = 0; i < WIDTH; i++)
for (int j = 0; j < WIDTH; j++)
printf("%ld%c", m[i * WIDTH + j], " \n"[j == WIDTH-1]);
printf("\n");
}
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
__global__ void reduction(long* Pd, long* Nd, int ndsize) {
long Pvalue = LONG_MAX;
int i = blockIdx.y * Tile_Width + threadIdx.y;
int j = blockIdx.x * Tile_Width + threadIdx.x;
for (int k = 0; k < ndsize; ++k) {
Pvalue = min(Pvalue, Nd[k * WIDTH * WIDTH + i * WIDTH + j]);
}
Pd[i * WIDTH + j] = Pvalue;
}
void *emalloc(size_t size) {
void *memory = malloc(size);
if (!memory) {
fprintf(stderr, "ERROR: Failed to malloc.\n");
exit(1);
}
return memory;
}
void file_to_matrix(FILE *path_matr, long* m, int ind) {
/*Reads a file and get the matrix 3x3 from it*/
// Creating matrix
char line[20];
fscanf(path_matr, " %[^\n]", line);
// Filling matrix
for (int i = 0; i < WIDTH; i++) {
for (int j = 0; j < WIDTH; j++) {
fscanf(path_matr, "%ld", &m[(ind * WIDTH * WIDTH) + i * WIDTH + j]);
}
}
}
int main(int argc, char* argv[]) {
if (argc != 2) {
fprintf(stderr, "Syntax: %s <matrix file>\n", argv[0]);
return EXIT_FAILURE;
}
FILE *path_matr = fopen(argv[1], "r");
if (path_matr == NULL) {
fprintf(stderr, "ERROR: Invalid file to matrices.\n");
exit(1);
}
int n_matr; // Number of matrices
fscanf(path_matr, "%d", &n_matr);
long* M = (long*) emalloc(n_matr * WIDTH * WIDTH * sizeof(long*));
long* P = (long*) emalloc(WIDTH * WIDTH * sizeof(long));
for (int i = 0; i < n_matr; i++) {
file_to_matrix(path_matr, M, i);
}
checkCuda( cudaSetDevice(0) );
cudaDeviceReset();
// allocate device matrices (linearized)
//printf("Allocate device matrices (linearized)...\n");
long* Nd = NULL;
long* Pd = NULL;
checkCuda( cudaMalloc((void**) &Nd, n_matr * WIDTH * WIDTH * sizeof(long)) );
checkCuda( cudaMalloc((void**) &Pd, WIDTH * WIDTH * sizeof(long)) );
struct timeval begin, end;
gettimeofday(&begin, NULL);
// copy host memory to device
// cudaMemcpy -> faz copias de vetores do host para o device
checkCuda( cudaMemcpy(Nd, M, n_matr * WIDTH * WIDTH * sizeof(long), cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(Pd, P, WIDTH * WIDTH * sizeof(long), cudaMemcpyHostToDevice) );
// execute the kernel
printf("Execute the kernel...\n");
int GridSize = (WIDTH + Tile_Width-1) / Tile_Width;
dim3 gridDim(GridSize, GridSize);
dim3 blockDim(Tile_Width, Tile_Width);
cudaProfilerStart();
reduction<<< gridDim, blockDim >>>(Pd, Nd, n_matr);
cudaProfilerStop();
// copy result from device to host
checkCuda( cudaMemcpy( P, Pd, WIDTH * WIDTH * sizeof(long),cudaMemcpyDeviceToHost) );
gettimeofday(&end, NULL);
double gpuTime = 1000000*(double)(end.tv_sec - begin.tv_sec);
gpuTime += (double)(end.tv_usec - begin.tv_usec);
// print times
printf("Execution Time (microseconds): %9.2f\n", gpuTime);
print_matrix(P);
// clean up memory
free(M);
free(P);
checkCuda( cudaFree(Nd) );
checkCuda( cudaFree(Pd) );
return 0;
}
|
22,851 | #include "global_defines.cuh"
#include <numeric>
void LBM::relaxation(){
/*One-step density relaxation process
c.......density relaxation: a single time relaxation with relaxation
c parameter omega is applied here. This step is only "local",
c nothing is propagated through the lattice.
c*/
if(data_location==GPU)
copy_data_from_device_to_host();
int x,y,z;
FLOATING u_x=0.0, u_y=0.0, u_z=0.0, u_squ=0.0, rho=0.0, reciprocal_rho=0.0;
FLOATING u_n[19];
// FLOATING n_equ[19];
FLOATING buff[19];
// FLOATING u_n_squared[19];
//FLOATING two_x_c_squ_sqared;
const FLOATING omega_x_t_0=omega*t_0, omega_x_t_1=omega*t_1, omega_x_t_2=omega*t_2;
FLOATING omega_x_rho_x_t_0, omega_x_rho_x_t_1, omega_x_rho_x_t_2;
FLOATING temp_factor;
FLOATING u_n__over__c_squ[19];
FLOATING u_n__over__c_squ__squared_and_halved[19];
//....square speed of sound
/* compute the out let velocity with a convevtive boundary condition
c.....loop over all nodes
c.....attention: actual densities are stored after the propagation
c step in the help-array n_hlp !*/
#pragma unroll
for (z = 0 ; z< lz ; ++z){
#pragma unroll
for (y = 0 ; y< ly ; ++y){
#pragma unroll
for (x = 0 ; x< lx; ++x){
/*c.........only free nodes are considered here
!if (.not. obstacles[z][y][x]) then
c...........integral local density
c...........initialize variable ro*/
//memory optimised implementation
buff[0]=D3_hlp.Q0[index(z,y,x)];
buff[1]=D3_hlp.Q1[index(z,y,x)];
buff[2]=D3_hlp.Q2[index(z,y,x)];
buff[3]=D3_hlp.Q3[index(z,y,x)];
buff[4]=D3_hlp.Q4[index(z,y,x)];
buff[5]=D3_hlp.Q5[index(z,y,x)];
buff[6]=D3_hlp.Q6[index(z,y,x)];
buff[7]=D3_hlp.Q7[index(z,y,x)];
buff[8]=D3_hlp.Q8[index(z,y,x)];
buff[9]=D3_hlp.Q9[index(z,y,x)];
buff[10]=D3_hlp.Q10[index(z,y,x)];
buff[11]=D3_hlp.Q11[index(z,y,x)];
buff[12]=D3_hlp.Q12[index(z,y,x)];
buff[13]=D3_hlp.Q13[index(z,y,x)];
buff[14]=D3_hlp.Q14[index(z,y,x)];
buff[15]=D3_hlp.Q15[index(z,y,x)];
buff[16]=D3_hlp.Q16[index(z,y,x)];
buff[17]=D3_hlp.Q17[index(z,y,x)];
buff[18]=D3_hlp.Q18[index(z,y,x)];
rho=accumulate(buff, buff+DENSITIES, 0.0);
reciprocal_rho=1.0/rho;
switch(obstacles[index(z,y,x)]){
case 1:
u_x = 0.0;
u_y = 0.0;
u_z = 0.0;
break;
default:
u_x = 0.0;
u_x = reciprocal_rho*(buff[1] + buff[7] + buff[10] +buff[11] + buff[14]-
(buff[3] + buff[8] + buff[9] +buff[12] + buff[13]));
u_y = 0.0;
u_y = reciprocal_rho*(buff[2]+buff[8]+buff[7]+buff[16] + buff[15] -
(buff[4] + buff[9] + buff[10] +buff[17] + buff[18]));
u_z = 0.0;
u_z = reciprocal_rho*(buff[5]+buff[13]+buff[14]+buff[15]+buff[18]-
(buff[6]+buff[12]+buff[11]+buff[16]+buff[17]));
break;
}//switch(obstacles[index(z,y,x)])
//original implementation
// rho=0.0;
// rho+=D3_hlp.Q0[index(z,y,x)]+D3_hlp.Q1[index(z,y,x)]+D3_hlp.Q2[index(z,y,x)]+D3_hlp.Q3[index(z,y,x)];
// rho+=D3_hlp.Q4[index(z,y,x)]+D3_hlp.Q5[index(z,y,x)]+D3_hlp.Q6[index(z,y,x)]+D3_hlp.Q7[index(z,y,x)];
// rho+=D3_hlp.Q8[index(z,y,x)]+D3_hlp.Q9[index(z,y,x)]+D3_hlp.Q10[index(z,y,x)]+D3_hlp.Q11[index(z,y,x)];
// rho+=D3_hlp.Q12[index(z,y,x)]+D3_hlp.Q13[index(z,y,x)]+D3_hlp.Q14[index(z,y,x)]+D3_hlp.Q15[index(z,y,x)];
// rho+=D3_hlp.Q16[index(z,y,x)]+D3_hlp.Q17[index(z,y,x)]+D3_hlp.Q18[index(z,y,x)];
// reciprocal_rho=1.0/rho;
//...........x-, and y- velocity components
// switch(obstacles[index(z,y,x)]){
// case 1:
// u_x = 0.0;
// u_y = 0.0;
// u_z = 0.0;
// break;
// default:
// u_x = (FLOATING) reciprocal_rho*(D3_hlp.Q1[index(z,y,x)] + D3_hlp.Q7[index(z,y,x)] + D3_hlp.Q10[index(z,y,x)] +
// D3_hlp.Q11[index(z,y,x)] + D3_hlp.Q14[index(z,y,x)] -
// (D3_hlp.Q3[index(z,y,x)] + D3_hlp.Q8[index(z,y,x)] + D3_hlp.Q9[index(z,y,x)] +
// D3_hlp.Q12[index(z,y,x)] + D3_hlp.Q13[index(z,y,x)]));
//
// u_y = (FLOATING) reciprocal_rho*(D3_hlp.Q2[index(z,y,x)] + D3_hlp.Q8[index(z,y,x)] + D3_hlp.Q7[index(z,y,x)] +
// D3_hlp.Q16[index(z,y,x)] + D3_hlp.Q15[index(z,y,x)] -
// (D3_hlp.Q4[index(z,y,x)] + D3_hlp.Q9[index(z,y,x)] + D3_hlp.Q10[index(z,y,x)] +
// D3_hlp.Q17[index(z,y,x)] + D3_hlp.Q18[index(z,y,x)]));
//
// u_z = (FLOATING) reciprocal_rho*(D3_hlp.Q5[index(z,y,x)] + D3_hlp.Q13[index(z,y,x)] + D3_hlp.Q14[index(z,y,x)] +
// D3_hlp.Q15[index(z,y,x)] + D3_hlp.Q18[index(z,y,x)] -
// (D3_hlp.Q6[index(z,y,x)] + D3_hlp.Q12[index(z,y,x)] + D3_hlp.Q11[index(z,y,x)] +
// D3_hlp.Q16[index(z,y,x)] + D3_hlp.Q17[index(z,y,x)]));
// break;
// }//switch(obstacles[index(z,y,x)])
u_squ = (FLOATING) u_x*u_x + u_y*u_y + u_z*u_z;
temp_factor= 0.5*(2.0* c_squ - u_squ)/c_squ;
//u_squ = (FLOATING) pow(u_x,2) + pow(u_y,2) + pow(u_z,2);
/*...........n- velocity compnents (n = lattice node connection vectors)
c...........this is only necessary for clearence, and only 3 speeds would
c...........be necessary*/
//WARNING!!!! o pinakas autos exei tropopoihmena indices!!!!
u_n[0]= 0.0; //SHOULD NEVER USED!
u_n[1] = u_x;
u_n[2] = u_y;
u_n[3] = - u_x;
u_n[4] = - u_y;
u_n[5] = u_z;
u_n[6] = - u_z;
u_n[7] = u_x + u_y;
u_n[8] = - u_x + u_y;
u_n[9] = - u_x - u_y;
u_n[10] = u_x - u_y;
u_n[11] = u_x - u_z;
u_n[12] = - u_x - u_z;
u_n[13] = - u_x + u_z;
u_n[14] = u_x + u_z;
u_n[15] = u_z + u_y;
u_n[16] = - u_z + u_y;
u_n[17] = - u_z - u_y;
u_n[18] = u_z - u_y;
#pragma unroll
for(int i=0; i<DENSITIES; ++i){
u_n__over__c_squ[i]=reciprocal_c_squ*u_n[i];
u_n__over__c_squ__squared_and_halved[i]=0.5*u_n__over__c_squ[i]*u_n__over__c_squ[i];
}
/*c...........equilibrium densities
c...........this can be rewritten to improve computational performance
c...........considerabely !
c
c...........zero velocity density
c*/
//memory optimised implementation! WARNING!!! different from the original case!
//two_x_c_squ_sqared=2.0*c_squ*c_squ;
omega_x_rho_x_t_0=omega_x_t_0*rho;
omega_x_rho_x_t_1=omega_x_t_1*rho;
omega_x_rho_x_t_2=omega_x_t_2*rho;
// //...........relaxation step
//omega_x_rho_x_t_0*(1.0 - 0.5*u_squ/c_squ);
D3.Q0[index(z,y,x)]=buff[0]*one_minus_omega+omega_x_rho_x_t_0*(u_n__over__c_squ__squared_and_halved[0]+u_n__over__c_squ[0]+temp_factor);
D3.Q1[index(z,y,x)]=buff[1]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[1]+u_n__over__c_squ[1]+temp_factor);
D3.Q2[index(z,y,x)]=buff[2]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[2]+u_n__over__c_squ[2]+temp_factor);
D3.Q3[index(z,y,x)]=buff[3]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[3]+u_n__over__c_squ[3]+temp_factor);
D3.Q4[index(z,y,x)]=buff[4]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[4]+u_n__over__c_squ[4]+temp_factor);
D3.Q5[index(z,y,x)]=buff[5]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[5]+u_n__over__c_squ[5]+temp_factor);
D3.Q6[index(z,y,x)]=buff[6]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[6]+u_n__over__c_squ[6]+temp_factor);
D3.Q7[index(z,y,x)]= buff[ 7]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 7]+u_n__over__c_squ[ 7]+temp_factor);
D3.Q8[index(z,y,x)]= buff[ 8]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 8]+u_n__over__c_squ[ 8]+temp_factor);
D3.Q9[index(z,y,x)]= buff[ 9]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 9]+u_n__over__c_squ[ 9]+temp_factor);
D3.Q10[index(z,y,x)]=buff[10]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[10]+u_n__over__c_squ[10]+temp_factor);
D3.Q11[index(z,y,x)]=buff[11]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[11]+u_n__over__c_squ[11]+temp_factor);
D3.Q12[index(z,y,x)]=buff[12]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[12]+u_n__over__c_squ[12]+temp_factor);
D3.Q13[index(z,y,x)]=buff[13]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[13]+u_n__over__c_squ[13]+temp_factor);
D3.Q14[index(z,y,x)]=buff[14]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[14]+u_n__over__c_squ[14]+temp_factor);
D3.Q15[index(z,y,x)]=buff[15]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[15]+u_n__over__c_squ[15]+temp_factor);
D3.Q16[index(z,y,x)]=buff[16]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[16]+u_n__over__c_squ[16]+temp_factor);
D3.Q17[index(z,y,x)]=buff[17]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[17]+u_n__over__c_squ[17]+temp_factor);
D3.Q18[index(z,y,x)]=buff[18]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[18]+u_n__over__c_squ[18]+temp_factor);
//original implementation
// n_equ[0] = t_0 * rho*(1.0 - u_squ / (2.0 * c_squ));
//
// //...........axis speeds (factor: t_1)
// #pragma unroll
// for (int i = 1 ; i< 7; ++i){
// n_equ[i] = t_1 * rho*(1.0 + u_n[i] / c_squ
// + (u_n[i]*u_n[i]) / (2.0 * c_squ *c_squ)
// - u_squ / (2.0 * c_squ));
// }
//
// //...........diagonal speeds (factor: t_2)
// #pragma unroll
// for (int i = 7 ; i< 19; ++i){
// n_equ[i] = t_2 * rho*(1.0 + u_n[i] / c_squ
// + (u_n[i]*u_n[i]) / (2.0 * c_squ *c_squ)
// - u_squ / (2.0 * c_squ));
// }
// D3.Q0[index(z,y,x)]=D3_hlp.Q0[index(z,y,x)]+omega*(n_equ[0] - D3_hlp.Q0[index(z,y,x)]);
// D3.Q1[index(z,y,x)]=D3_hlp.Q1[index(z,y,x)]+omega*(n_equ[1] - D3_hlp.Q1[index(z,y,x)]);
// D3.Q2[index(z,y,x)]=D3_hlp.Q2[index(z,y,x)]+omega*(n_equ[2] - D3_hlp.Q2[index(z,y,x)]);
// D3.Q3[index(z,y,x)]=D3_hlp.Q3[index(z,y,x)]+omega*(n_equ[3] - D3_hlp.Q3[index(z,y,x)]);
// D3.Q4[index(z,y,x)]=D3_hlp.Q4[index(z,y,x)]+omega*(n_equ[4] - D3_hlp.Q4[index(z,y,x)]);
// D3.Q5[index(z,y,x)]=D3_hlp.Q5[index(z,y,x)]+omega*(n_equ[5] - D3_hlp.Q5[index(z,y,x)]);
// D3.Q6[index(z,y,x)]=D3_hlp.Q6[index(z,y,x)]+omega*(n_equ[6] - D3_hlp.Q6[index(z,y,x)]);
// D3.Q7[index(z,y,x)]=D3_hlp.Q7[index(z,y,x)]+omega*(n_equ[7] - D3_hlp.Q7[index(z,y,x)]);
// D3.Q8[index(z,y,x)]=D3_hlp.Q8[index(z,y,x)]+omega*(n_equ[8] - D3_hlp.Q8[index(z,y,x)]);
// D3.Q9[index(z,y,x)]=D3_hlp.Q9[index(z,y,x)]+omega*(n_equ[9] - D3_hlp.Q9[index(z,y,x)]);
// D3.Q10[index(z,y,x)]=D3_hlp.Q10[index(z,y,x)]+omega*(n_equ[10] - D3_hlp.Q10[index(z,y,x)]);
// D3.Q11[index(z,y,x)]=D3_hlp.Q11[index(z,y,x)]+omega*(n_equ[11] - D3_hlp.Q11[index(z,y,x)]);
// D3.Q12[index(z,y,x)]=D3_hlp.Q12[index(z,y,x)]+omega*(n_equ[12] - D3_hlp.Q12[index(z,y,x)]);
// D3.Q13[index(z,y,x)]=D3_hlp.Q13[index(z,y,x)]+omega*(n_equ[13] - D3_hlp.Q13[index(z,y,x)]);
// D3.Q14[index(z,y,x)]=D3_hlp.Q14[index(z,y,x)]+omega*(n_equ[14] - D3_hlp.Q14[index(z,y,x)]);
// D3.Q15[index(z,y,x)]=D3_hlp.Q15[index(z,y,x)]+omega*(n_equ[15] - D3_hlp.Q15[index(z,y,x)]);
// D3.Q16[index(z,y,x)]=D3_hlp.Q16[index(z,y,x)]+omega*(n_equ[16] - D3_hlp.Q16[index(z,y,x)]);
// D3.Q17[index(z,y,x)]=D3_hlp.Q17[index(z,y,x)]+omega*(n_equ[17] - D3_hlp.Q17[index(z,y,x)]);
// D3.Q18[index(z,y,x)]=D3_hlp.Q18[index(z,y,x)]+omega*(n_equ[18] - D3_hlp.Q18[index(z,y,x)]);
if (x == lx-2) {
u_previous_spatial_boundary[index2D(z,y)] = u_x;
v_previous_spatial_boundary[index2D(z,y)] = u_y;
w_previous_spatial_boundary[index2D(z,y)] = u_z;
u_current[index2D(z,y)] = u_x;
v_current[index2D(z,y)] = u_y;
w_current[index2D(z,y)] = u_z;
}//if (x == lx-2)
}//for (x = 0 ; x< lx; ++x)
}//for (y = 0 ; y< ly ; ++y)
}//for (z = 0 ; z< lz ; ++z)
#ifdef DEBUG
cout << " #LBM relaxation OK!" << endl;
#endif
}
void LBM::initial_relaxation(){
/*One-step density relaxation process
c.......density relaxation: a single time relaxation with relaxation
c parameter omega is applied here. This step is only "local",
c nothing is propagated through the lattice.
c*/
int x,y,z;
FLOATING u_x=0.0, u_y=0.0, u_z=0.0, u_squ=0.0, rho=0.0;
const FLOATING tau=3.0*nu + 0.5, omega = 1.0 /tau; // omega=1.0/(3.0*nu+0.5);
FLOATING u_n[19], n_equ[19];
//....square speed of sound
/* compute the out let velocity with a convevtive boundary condition
c.....loop over all nodes
c.....attention: actual densities are stored after the propagation
c step in the help-array n_hlp !*/
for (z = 0 ; z< lz ; ++z){
for (y = 0 ; y< ly ; ++y){
for (x = 0 ; x< lx; ++x){
/*c.........only free nodes are considered here
!if (.not. obstacles[z][y][x]) then
c...........integral local density
c...........initialize variable ro*/
rho=0.0;
rho+=D3_hlp.Q0[index(z,y,x)]+D3_hlp.Q1[index(z,y,x)]+D3_hlp.Q2[index(z,y,x)]+D3_hlp.Q3[index(z,y,x)];
rho+=D3_hlp.Q4[index(z,y,x)]+D3_hlp.Q5[index(z,y,x)]+D3_hlp.Q6[index(z,y,x)]+D3_hlp.Q7[index(z,y,x)];
rho+=D3_hlp.Q8[index(z,y,x)]+D3_hlp.Q9[index(z,y,x)]+D3_hlp.Q10[index(z,y,x)]+D3_hlp.Q11[index(z,y,x)];
rho+=D3_hlp.Q12[index(z,y,x)]+D3_hlp.Q13[index(z,y,x)]+D3_hlp.Q14[index(z,y,x)]+D3_hlp.Q15[index(z,y,x)];
rho+=D3_hlp.Q16[index(z,y,x)]+D3_hlp.Q17[index(z,y,x)]+D3_hlp.Q18[index(z,y,x)];
//...........x-, and y- velocity components
if ( obstacles[index(z,y,x)]==1 ) {
u_x = 0.0;
u_y = 0.0;
u_z = 0.0;
}else{
u_x = (FLOATING) (D3_hlp.Q1[index(z,y,x)] + D3_hlp.Q7[index(z,y,x)] + D3_hlp.Q10[index(z,y,x)] +
D3_hlp.Q11[index(z,y,x)] + D3_hlp.Q14[index(z,y,x)] -
(D3_hlp.Q3[index(z,y,x)] + D3_hlp.Q8[index(z,y,x)] + D3_hlp.Q9[index(z,y,x)] +
D3_hlp.Q12[index(z,y,x)] + D3_hlp.Q13[index(z,y,x)])) / rho;
u_y = (FLOATING) (D3_hlp.Q2[index(z,y,x)] + D3_hlp.Q8[index(z,y,x)] + D3_hlp.Q7[index(z,y,x)] +
D3_hlp.Q16[index(z,y,x)] + D3_hlp.Q15[index(z,y,x)] -
(D3_hlp.Q4[index(z,y,x)] + D3_hlp.Q9[index(z,y,x)] + D3_hlp.Q10[index(z,y,x)] +
D3_hlp.Q17[index(z,y,x)] + D3_hlp.Q18[index(z,y,x)])) / rho;
u_z = (FLOATING) (D3_hlp.Q5[index(z,y,x)] + D3_hlp.Q13[index(z,y,x)] + D3_hlp.Q14[index(z,y,x)] +
D3_hlp.Q15[index(z,y,x)] + D3_hlp.Q18[index(z,y,x)] -
(D3_hlp.Q6[index(z,y,x)] + D3_hlp.Q12[index(z,y,x)] + D3_hlp.Q11[index(z,y,x)] +
D3_hlp.Q16[index(z,y,x)] + D3_hlp.Q17[index(z,y,x)])) / rho;
}
u_squ = (FLOATING) u_x*u_x + u_y*u_y + u_z*u_z;
//u_squ = (FLOATING) pow(u_x,2) + pow(u_y,2) + pow(u_z,2);
/*...........n- velocity compnents (n = lattice node connection vectors)
c...........this is only necessary for clearence, and only 3 speeds would
c...........be necessary*/
//WARNING!!!! o pinakas autos exei tropopoihmena indices!!!!
u_n[0]= 0.0; //SHOULD NEVER USED!
u_n[1] = u_x;
u_n[2] = u_y;
u_n[3] = - u_x;
u_n[4] = - u_y;
u_n[5] = u_z;
u_n[6] = - u_z;
u_n[7] = u_x + u_y;
u_n[8] = - u_x + u_y;
u_n[9] = - u_x - u_y;
u_n[10] = u_x - u_y;
u_n[11] = u_x - u_z;
u_n[12] = - u_x - u_z;
u_n[13] = - u_x + u_z;
u_n[14] = u_x + u_z;
u_n[15] = u_z + u_y;
u_n[16] = - u_z + u_y;
u_n[17] = - u_z - u_y;
u_n[18] = u_z - u_y;
/*c...........equilibrium densities
c...........this can be rewritten to improve computational performance
c...........considerabely !
c
c...........zero velocity density
c*/
n_equ[0] = t_0 * rho*(1.0 - u_squ / (2.0 * c_squ));
//...........axis speeds (factor: t_1)
for (int i = 1 ; i< 7; ++i){
n_equ[i] = t_1 * rho*(1.0 + u_n[i] / c_squ
+ (u_n[i]*u_n[i]) / (2.0 * c_squ *c_squ)
- u_squ / (2.0 * c_squ));
}
//...........diagonal speeds (factor: t_2)
for (int i = 7 ; i< 19; ++i){
n_equ[i] = t_2 * rho*(1.0 + u_n[i] / c_squ
+ (u_n[i]*u_n[i]) / (2.0 * c_squ *c_squ)
- u_squ / (2.0 * c_squ));
}
//...........relaxation step
D3.Q0[index(z,y,x)]=D3_hlp.Q0[index(z,y,x)]+omega*(n_equ[0] - D3_hlp.Q0[index(z,y,x)]);
D3.Q1[index(z,y,x)]=D3_hlp.Q1[index(z,y,x)]+omega*(n_equ[1] - D3_hlp.Q1[index(z,y,x)]);
D3.Q2[index(z,y,x)]=D3_hlp.Q2[index(z,y,x)]+omega*(n_equ[2] - D3_hlp.Q2[index(z,y,x)]);
D3.Q3[index(z,y,x)]=D3_hlp.Q3[index(z,y,x)]+omega*(n_equ[3] - D3_hlp.Q3[index(z,y,x)]);
D3.Q4[index(z,y,x)]=D3_hlp.Q4[index(z,y,x)]+omega*(n_equ[4] - D3_hlp.Q4[index(z,y,x)]);
D3.Q5[index(z,y,x)]=D3_hlp.Q5[index(z,y,x)]+omega*(n_equ[5] - D3_hlp.Q5[index(z,y,x)]);
D3.Q6[index(z,y,x)]=D3_hlp.Q6[index(z,y,x)]+omega*(n_equ[6] - D3_hlp.Q6[index(z,y,x)]);
D3.Q7[index(z,y,x)]=D3_hlp.Q7[index(z,y,x)]+omega*(n_equ[7] - D3_hlp.Q7[index(z,y,x)]);
D3.Q8[index(z,y,x)]=D3_hlp.Q8[index(z,y,x)]+omega*(n_equ[8] - D3_hlp.Q8[index(z,y,x)]);
D3.Q9[index(z,y,x)]=D3_hlp.Q9[index(z,y,x)]+omega*(n_equ[9] - D3_hlp.Q9[index(z,y,x)]);
D3.Q10[index(z,y,x)]=D3_hlp.Q10[index(z,y,x)]+omega*(n_equ[10] - D3_hlp.Q10[index(z,y,x)]);
D3.Q11[index(z,y,x)]=D3_hlp.Q11[index(z,y,x)]+omega*(n_equ[11] - D3_hlp.Q11[index(z,y,x)]);
D3.Q12[index(z,y,x)]=D3_hlp.Q12[index(z,y,x)]+omega*(n_equ[12] - D3_hlp.Q12[index(z,y,x)]);
D3.Q13[index(z,y,x)]=D3_hlp.Q13[index(z,y,x)]+omega*(n_equ[13] - D3_hlp.Q13[index(z,y,x)]);
D3.Q14[index(z,y,x)]=D3_hlp.Q14[index(z,y,x)]+omega*(n_equ[14] - D3_hlp.Q14[index(z,y,x)]);
D3.Q15[index(z,y,x)]=D3_hlp.Q15[index(z,y,x)]+omega*(n_equ[15] - D3_hlp.Q15[index(z,y,x)]);
D3.Q16[index(z,y,x)]=D3_hlp.Q16[index(z,y,x)]+omega*(n_equ[16] - D3_hlp.Q16[index(z,y,x)]);
D3.Q17[index(z,y,x)]=D3_hlp.Q17[index(z,y,x)]+omega*(n_equ[17] - D3_hlp.Q17[index(z,y,x)]);
D3.Q18[index(z,y,x)]=D3_hlp.Q18[index(z,y,x)]+omega*(n_equ[18] - D3_hlp.Q18[index(z,y,x)]);
//at the penultimat slice, save previous and current slices
if (x == lx-2) {
u_previous_spatial_boundary[index2D(z,y)] = u_x;
v_previous_spatial_boundary[index2D(z,y)] = u_y;
w_previous_spatial_boundary[index2D(z,y)] = u_z;
u_current[index2D(z,y)] = u_x;
v_current[index2D(z,y)] = u_y;
w_current[index2D(z,y)] = u_z;
//the following 3 lines correspond to time_unit==0!!!
u_previous_temporal_boundary[index2D(z,y)] = u_current[index2D(z,y)];
v_previous_temporal_boundary[index2D(z,y)] = v_current[index2D(z,y)];
w_previous_temporal_boundary[index2D(z,y)] = w_current[index2D(z,y)];
}//if (x == lx-2)
}//for (x = 0 ; x< lx; ++x)
}//for (y = 0 ; y< ly ; ++y)
}//for (z = 0 ; z< lz ; ++z)
#ifdef DEBUG
cout << " #LBM relaxation OK!" << endl;
#endif
}
//__global__
//void relaxation_kernel(int lx, int ly, int lz, FLOATING reynolds, FLOATING nu, FLOATING r_small,
// FLOATING t_0, FLOATING t_1, FLOATING t_2, FLOATING c_squ, FLOATING omega, FLOATING one_minus_omega,
// FLOATING reciprocal_c_squ,lattice D3, lattice D3_hlp, int *obstacles_d,
// FLOATING *u_previous_spatial_boundary, FLOATING *v_previous_spatial_boundary, FLOATING *w_previous_spatial_boundary,
// FLOATING *u_current, FLOATING *v_current, FLOATING *w_current){
//
//
//
//
//
// /*One-step density relaxation process
//
// c.......density relaxation: a single time relaxation with relaxation
// c parameter omega is applied here. This step is only "local",
// c nothing is propagated through the lattice.
// c*/
//
// int x,y,z;
// FLOATING u_x=0.0, u_y=0.0, u_z=0.0, u_squ=0.0, rho=0.0, reciprocal_rho=0.0;
//
//
//
//
// FLOATING u_n[19];
// // FLOATING n_equ[19];
// FLOATING buff[19];
//
// // FLOATING u_n_squared[19];
// //FLOATING two_x_c_squ_sqared;
// const FLOATING omega_x_t_0=omega*t_0, omega_x_t_1=omega*t_1, omega_x_t_2=omega*t_2;
// FLOATING omega_x_rho_x_t_0, omega_x_rho_x_t_1, omega_x_rho_x_t_2;
// FLOATING temp_factor;
// FLOATING u_n__over__c_squ[19];
// FLOATING u_n__over__c_squ__squared_and_halved[19];
//
// //....square speed of sound
// /* compute the out let velocity with a convevtive boundary condition
// c.....loop over all nodes
// c.....attention: actual densities are stored after the propagation
// c step in the help-array n_hlp !*/
//
//
//#pragma unroll
// for (z = 0 ; z< lz ; ++z){
//#pragma unroll
// for (y = 0 ; y< ly ; ++y){
//#pragma unroll
// for (x = 0 ; x< lx; ++x){
//
// /*c.........only free nodes are considered here
// !if (.not. obstacles[z][y][x]) then
// c...........integral local density
// c...........initialize variable ro*/
// //memory optimised implementation
// buff[0]=D3_hlp.Q0[index(z,y,x)];
// buff[1]=D3_hlp.Q1[index(z,y,x)];
// buff[2]=D3_hlp.Q2[index(z,y,x)];
// buff[3]=D3_hlp.Q3[index(z,y,x)];
// buff[4]=D3_hlp.Q4[index(z,y,x)];
// buff[5]=D3_hlp.Q5[index(z,y,x)];
// buff[6]=D3_hlp.Q6[index(z,y,x)];
// buff[7]=D3_hlp.Q7[index(z,y,x)];
// buff[8]=D3_hlp.Q8[index(z,y,x)];
// buff[9]=D3_hlp.Q9[index(z,y,x)];
// buff[10]=D3_hlp.Q10[index(z,y,x)];
// buff[11]=D3_hlp.Q11[index(z,y,x)];
// buff[12]=D3_hlp.Q12[index(z,y,x)];
// buff[13]=D3_hlp.Q13[index(z,y,x)];
// buff[14]=D3_hlp.Q14[index(z,y,x)];
// buff[15]=D3_hlp.Q15[index(z,y,x)];
// buff[16]=D3_hlp.Q16[index(z,y,x)];
// buff[17]=D3_hlp.Q17[index(z,y,x)];
// buff[18]=D3_hlp.Q18[index(z,y,x)];
//
// rho=0.0;
// for(int k=0; k<DENSITIES; ++k)
// rho+=buff[k];
//
// // rho=accumulate(buff, buff+DENSITIES, 0.0);
//
// reciprocal_rho=1.0/rho;
//
//
//
//
// switch(obstacles_d[index(z,y,x)]){
// case 1:
// u_x = 0.0;
// u_y = 0.0;
// u_z = 0.0;
// break;
// default:
// u_x = 0.0;
// u_x = reciprocal_rho*(buff[1] + buff[7] + buff[10] +buff[11] + buff[14]-
// (buff[3] + buff[8] + buff[9] +buff[12] + buff[13]));
//
// u_y = 0.0;
// u_y = reciprocal_rho*(buff[2]+buff[8]+buff[7]+buff[16] + buff[15] -
// (buff[4] + buff[9] + buff[10] +buff[17] + buff[18]));
//
// u_z = 0.0;
// u_z = reciprocal_rho*(buff[5]+buff[13]+buff[14]+buff[15]+buff[18]-
// (buff[6]+buff[12]+buff[11]+buff[16]+buff[17]));
// break;
// }//switch(obstacles[index(z,y,x)])
//
// //original implementation
// // rho=0.0;
// // rho+=D3_hlp.Q0[index(z,y,x)]+D3_hlp.Q1[index(z,y,x)]+D3_hlp.Q2[index(z,y,x)]+D3_hlp.Q3[index(z,y,x)];
// // rho+=D3_hlp.Q4[index(z,y,x)]+D3_hlp.Q5[index(z,y,x)]+D3_hlp.Q6[index(z,y,x)]+D3_hlp.Q7[index(z,y,x)];
// // rho+=D3_hlp.Q8[index(z,y,x)]+D3_hlp.Q9[index(z,y,x)]+D3_hlp.Q10[index(z,y,x)]+D3_hlp.Q11[index(z,y,x)];
// // rho+=D3_hlp.Q12[index(z,y,x)]+D3_hlp.Q13[index(z,y,x)]+D3_hlp.Q14[index(z,y,x)]+D3_hlp.Q15[index(z,y,x)];
// // rho+=D3_hlp.Q16[index(z,y,x)]+D3_hlp.Q17[index(z,y,x)]+D3_hlp.Q18[index(z,y,x)];
// // reciprocal_rho=1.0/rho;
//
// //...........x-, and y- velocity components
//
// // switch(obstacles[index(z,y,x)]){
// // case 1:
// // u_x = 0.0;
// // u_y = 0.0;
// // u_z = 0.0;
// // break;
// // default:
// // u_x = (FLOATING) reciprocal_rho*(D3_hlp.Q1[index(z,y,x)] + D3_hlp.Q7[index(z,y,x)] + D3_hlp.Q10[index(z,y,x)] +
// // D3_hlp.Q11[index(z,y,x)] + D3_hlp.Q14[index(z,y,x)] -
// // (D3_hlp.Q3[index(z,y,x)] + D3_hlp.Q8[index(z,y,x)] + D3_hlp.Q9[index(z,y,x)] +
// // D3_hlp.Q12[index(z,y,x)] + D3_hlp.Q13[index(z,y,x)]));
// //
// // u_y = (FLOATING) reciprocal_rho*(D3_hlp.Q2[index(z,y,x)] + D3_hlp.Q8[index(z,y,x)] + D3_hlp.Q7[index(z,y,x)] +
// // D3_hlp.Q16[index(z,y,x)] + D3_hlp.Q15[index(z,y,x)] -
// // (D3_hlp.Q4[index(z,y,x)] + D3_hlp.Q9[index(z,y,x)] + D3_hlp.Q10[index(z,y,x)] +
// // D3_hlp.Q17[index(z,y,x)] + D3_hlp.Q18[index(z,y,x)]));
// //
// // u_z = (FLOATING) reciprocal_rho*(D3_hlp.Q5[index(z,y,x)] + D3_hlp.Q13[index(z,y,x)] + D3_hlp.Q14[index(z,y,x)] +
// // D3_hlp.Q15[index(z,y,x)] + D3_hlp.Q18[index(z,y,x)] -
// // (D3_hlp.Q6[index(z,y,x)] + D3_hlp.Q12[index(z,y,x)] + D3_hlp.Q11[index(z,y,x)] +
// // D3_hlp.Q16[index(z,y,x)] + D3_hlp.Q17[index(z,y,x)]));
// // break;
// // }//switch(obstacles[index(z,y,x)])
//
//
//
//
// u_squ = (FLOATING) u_x*u_x + u_y*u_y + u_z*u_z;
// temp_factor= 0.5*(2.0* c_squ - u_squ)/c_squ;
// //u_squ = (FLOATING) pow(u_x,2) + pow(u_y,2) + pow(u_z,2);
//
//
// /*...........n- velocity compnents (n = lattice node connection vectors)
// c...........this is only necessary for clearence, and only 3 speeds would
// c...........be necessary*/
//
//
// //WARNING!!!! o pinakas autos exei tropopoihmena indices!!!!
// u_n[0]= 0.0; //SHOULD NEVER USED!
// u_n[1] = u_x;
// u_n[2] = u_y;
// u_n[3] = - u_x;
// u_n[4] = - u_y;
// u_n[5] = u_z;
// u_n[6] = - u_z;
// u_n[7] = u_x + u_y;
// u_n[8] = - u_x + u_y;
// u_n[9] = - u_x - u_y;
// u_n[10] = u_x - u_y;
// u_n[11] = u_x - u_z;
// u_n[12] = - u_x - u_z;
// u_n[13] = - u_x + u_z;
// u_n[14] = u_x + u_z;
// u_n[15] = u_z + u_y;
// u_n[16] = - u_z + u_y;
// u_n[17] = - u_z - u_y;
// u_n[18] = u_z - u_y;
//
//#pragma unroll
// for(int i=0; i<DENSITIES; ++i){
// u_n__over__c_squ[i]=reciprocal_c_squ*u_n[i];
// u_n__over__c_squ__squared_and_halved[i]=0.5*u_n__over__c_squ[i]*u_n__over__c_squ[i];
// }
//
// /*c...........equilibrium densities
// c...........this can be rewritten to improve computational performance
// c...........considerabely !
// c
// c...........zero velocity density
// c*/
// //memory optimised implementation! WARNING!!! different from the original case!
//
// //two_x_c_squ_sqared=2.0*c_squ*c_squ;
// omega_x_rho_x_t_0=omega_x_t_0*rho;
// omega_x_rho_x_t_1=omega_x_t_1*rho;
// omega_x_rho_x_t_2=omega_x_t_2*rho;
//
//
// // //...........relaxation step
//
//
// //omega_x_rho_x_t_0*(1.0 - 0.5*u_squ/c_squ);
// D3.Q0[index(z,y,x)]=buff[0]*one_minus_omega+omega_x_rho_x_t_0*(u_n__over__c_squ__squared_and_halved[0]+u_n__over__c_squ[0]+temp_factor);
//
//
//
//
// D3.Q1[index(z,y,x)]=buff[1]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[1]+u_n__over__c_squ[1]+temp_factor);
// D3.Q2[index(z,y,x)]=buff[2]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[2]+u_n__over__c_squ[2]+temp_factor);
// D3.Q3[index(z,y,x)]=buff[3]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[3]+u_n__over__c_squ[3]+temp_factor);
// D3.Q4[index(z,y,x)]=buff[4]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[4]+u_n__over__c_squ[4]+temp_factor);
// D3.Q5[index(z,y,x)]=buff[5]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[5]+u_n__over__c_squ[5]+temp_factor);
// D3.Q6[index(z,y,x)]=buff[6]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[6]+u_n__over__c_squ[6]+temp_factor);
//
//
//
// D3.Q7[index(z,y,x)]= buff[ 7]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 7]+u_n__over__c_squ[ 7]+temp_factor);
// D3.Q8[index(z,y,x)]= buff[ 8]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 8]+u_n__over__c_squ[ 8]+temp_factor);
// D3.Q9[index(z,y,x)]= buff[ 9]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 9]+u_n__over__c_squ[ 9]+temp_factor);
// D3.Q10[index(z,y,x)]=buff[10]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[10]+u_n__over__c_squ[10]+temp_factor);
// D3.Q11[index(z,y,x)]=buff[11]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[11]+u_n__over__c_squ[11]+temp_factor);
// D3.Q12[index(z,y,x)]=buff[12]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[12]+u_n__over__c_squ[12]+temp_factor);
// D3.Q13[index(z,y,x)]=buff[13]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[13]+u_n__over__c_squ[13]+temp_factor);
// D3.Q14[index(z,y,x)]=buff[14]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[14]+u_n__over__c_squ[14]+temp_factor);
// D3.Q15[index(z,y,x)]=buff[15]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[15]+u_n__over__c_squ[15]+temp_factor);
// D3.Q16[index(z,y,x)]=buff[16]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[16]+u_n__over__c_squ[16]+temp_factor);
// D3.Q17[index(z,y,x)]=buff[17]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[17]+u_n__over__c_squ[17]+temp_factor);
// D3.Q18[index(z,y,x)]=buff[18]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[18]+u_n__over__c_squ[18]+temp_factor);
//
// //original implementation
// // n_equ[0] = t_0 * rho*(1.0 - u_squ / (2.0 * c_squ));
// //
// // //...........axis speeds (factor: t_1)
// // #pragma unroll
// // for (int i = 1 ; i< 7; ++i){
// // n_equ[i] = t_1 * rho*(1.0 + u_n[i] / c_squ
// // + (u_n[i]*u_n[i]) / (2.0 * c_squ *c_squ)
// // - u_squ / (2.0 * c_squ));
// // }
// //
// // //...........diagonal speeds (factor: t_2)
// // #pragma unroll
// // for (int i = 7 ; i< 19; ++i){
// // n_equ[i] = t_2 * rho*(1.0 + u_n[i] / c_squ
// // + (u_n[i]*u_n[i]) / (2.0 * c_squ *c_squ)
// // - u_squ / (2.0 * c_squ));
// // }
//
// // D3.Q0[index(z,y,x)]=D3_hlp.Q0[index(z,y,x)]+omega*(n_equ[0] - D3_hlp.Q0[index(z,y,x)]);
// // D3.Q1[index(z,y,x)]=D3_hlp.Q1[index(z,y,x)]+omega*(n_equ[1] - D3_hlp.Q1[index(z,y,x)]);
// // D3.Q2[index(z,y,x)]=D3_hlp.Q2[index(z,y,x)]+omega*(n_equ[2] - D3_hlp.Q2[index(z,y,x)]);
// // D3.Q3[index(z,y,x)]=D3_hlp.Q3[index(z,y,x)]+omega*(n_equ[3] - D3_hlp.Q3[index(z,y,x)]);
// // D3.Q4[index(z,y,x)]=D3_hlp.Q4[index(z,y,x)]+omega*(n_equ[4] - D3_hlp.Q4[index(z,y,x)]);
// // D3.Q5[index(z,y,x)]=D3_hlp.Q5[index(z,y,x)]+omega*(n_equ[5] - D3_hlp.Q5[index(z,y,x)]);
// // D3.Q6[index(z,y,x)]=D3_hlp.Q6[index(z,y,x)]+omega*(n_equ[6] - D3_hlp.Q6[index(z,y,x)]);
// // D3.Q7[index(z,y,x)]=D3_hlp.Q7[index(z,y,x)]+omega*(n_equ[7] - D3_hlp.Q7[index(z,y,x)]);
// // D3.Q8[index(z,y,x)]=D3_hlp.Q8[index(z,y,x)]+omega*(n_equ[8] - D3_hlp.Q8[index(z,y,x)]);
// // D3.Q9[index(z,y,x)]=D3_hlp.Q9[index(z,y,x)]+omega*(n_equ[9] - D3_hlp.Q9[index(z,y,x)]);
// // D3.Q10[index(z,y,x)]=D3_hlp.Q10[index(z,y,x)]+omega*(n_equ[10] - D3_hlp.Q10[index(z,y,x)]);
// // D3.Q11[index(z,y,x)]=D3_hlp.Q11[index(z,y,x)]+omega*(n_equ[11] - D3_hlp.Q11[index(z,y,x)]);
// // D3.Q12[index(z,y,x)]=D3_hlp.Q12[index(z,y,x)]+omega*(n_equ[12] - D3_hlp.Q12[index(z,y,x)]);
// // D3.Q13[index(z,y,x)]=D3_hlp.Q13[index(z,y,x)]+omega*(n_equ[13] - D3_hlp.Q13[index(z,y,x)]);
// // D3.Q14[index(z,y,x)]=D3_hlp.Q14[index(z,y,x)]+omega*(n_equ[14] - D3_hlp.Q14[index(z,y,x)]);
// // D3.Q15[index(z,y,x)]=D3_hlp.Q15[index(z,y,x)]+omega*(n_equ[15] - D3_hlp.Q15[index(z,y,x)]);
// // D3.Q16[index(z,y,x)]=D3_hlp.Q16[index(z,y,x)]+omega*(n_equ[16] - D3_hlp.Q16[index(z,y,x)]);
// // D3.Q17[index(z,y,x)]=D3_hlp.Q17[index(z,y,x)]+omega*(n_equ[17] - D3_hlp.Q17[index(z,y,x)]);
// // D3.Q18[index(z,y,x)]=D3_hlp.Q18[index(z,y,x)]+omega*(n_equ[18] - D3_hlp.Q18[index(z,y,x)]);
//
// if (x == lx-2) {
// u_previous_spatial_boundary[index2D(z,y)] = u_x;
// v_previous_spatial_boundary[index2D(z,y)] = u_y;
// w_previous_spatial_boundary[index2D(z,y)] = u_z;
//
// u_current[index2D(z,y)] = u_x;
// v_current[index2D(z,y)] = u_y;
// w_current[index2D(z,y)] = u_z;
//
// }//if (x == lx-2)
// }//for (x = 0 ; x< lx; ++x)
// }//for (y = 0 ; y< ly ; ++y)
// }//for (z = 0 ; z< lz ; ++z)
//#ifdef DEBUG
// cout << " #LBM relaxation OK!" << endl;
//#endif
//}
//
//__global__
//void relaxation_kernel_v2(int lx, int ly, int lz, FLOATING reynolds, FLOATING nu, FLOATING r_small,
// FLOATING t_0, FLOATING t_1, FLOATING t_2, FLOATING c_squ, FLOATING omega, FLOATING one_minus_omega,
// FLOATING reciprocal_c_squ,
// FLOATING *hlp_Q0, FLOATING *hlp_Q1, FLOATING *hlp_Q2, FLOATING *hlp_Q3,
// FLOATING *hlp_Q4, FLOATING *hlp_Q5, FLOATING *hlp_Q6, FLOATING *hlp_Q7,
// FLOATING *hlp_Q8, FLOATING *hlp_Q9, FLOATING *hlp_Q10, FLOATING *hlp_Q11,
// FLOATING *hlp_Q12, FLOATING *hlp_Q13, FLOATING *hlp_Q14, FLOATING *hlp_Q15,
// FLOATING *hlp_Q16, FLOATING *hlp_Q17, FLOATING *hlp_Q18,
// FLOATING *Q0, FLOATING *Q1, FLOATING *Q2, FLOATING *Q3,
// FLOATING *Q4, FLOATING *Q5, FLOATING *Q6, FLOATING *Q7,
// FLOATING *Q8, FLOATING *Q9, FLOATING *Q10, FLOATING *Q11,
// FLOATING *Q12, FLOATING *Q13, FLOATING *Q14, FLOATING *Q15,
// FLOATING *Q16, FLOATING *Q17, FLOATING *Q18,
// int *obstacles_d,
// FLOATING *u_previous_spatial_boundary, FLOATING *v_previous_spatial_boundary, FLOATING *w_previous_spatial_boundary,
// FLOATING *u_current, FLOATING *v_current, FLOATING *w_current){
//
//
//
//
//
// /*One-step density relaxation process
//
// c.......density relaxation: a single time relaxation with relaxation
// c parameter omega is applied here. This step is only "local",
// c nothing is propagated through the lattice.
// c*/
//
// int x,y,z;
// FLOATING u_x=0.0, u_y=0.0, u_z=0.0, u_squ=0.0, rho=0.0, reciprocal_rho=0.0;
//
//
//
//
// FLOATING u_n[19];
// // FLOATING n_equ[19];
// FLOATING buff[19];
//
// // FLOATING u_n_squared[19];
// //FLOATING two_x_c_squ_sqared;
// const FLOATING omega_x_t_0=omega*t_0, omega_x_t_1=omega*t_1, omega_x_t_2=omega*t_2;
// FLOATING omega_x_rho_x_t_0, omega_x_rho_x_t_1, omega_x_rho_x_t_2;
// FLOATING temp_factor;
// FLOATING u_n__over__c_squ[19];
// FLOATING u_n__over__c_squ__squared_and_halved[19];
//
// //....square speed of sound
// /* compute the out let velocity with a convevtive boundary condition
// c.....loop over all nodes
// c.....attention: actual densities are stored after the propagation
// c step in the help-array n_hlp !*/
//
// //
// //#pragma unroll
// // for (z = 0 ; z< lz ; ++z){
// //#pragma unroll
// // for (y = 0 ; y< ly ; ++y){
// //#pragma unroll
// // for (x = 0 ; x< lx; ++x){
//
// const int tid=blockIdx.x*blockDim.x+threadIdx.x;
// int rest;
// int end_of_memory=lz*ly*(lx);
//
// z=(int) (tid/(ly*lx));
// rest=tid-z;
// y=(int)(rest/lx);
// x=rest-y;
//
// if (tid<end_of_memory){
// /*c.........only free nodes are considered here
// !if (.not. obstacles[z][y][x]) then
// c...........integral local density
// c...........initialize variable ro*/
// //memory optimised implementation
// buff[0]=hlp_Q0[index(z,y,x)];
// buff[1]=hlp_Q1[index(z,y,x)];
// buff[2]=hlp_Q2[index(z,y,x)];
// buff[3]=hlp_Q3[index(z,y,x)];
// buff[4]=hlp_Q4[index(z,y,x)];
// buff[5]=hlp_Q5[index(z,y,x)];
// buff[6]=hlp_Q6[index(z,y,x)];
// buff[7]=hlp_Q7[index(z,y,x)];
// buff[8]=hlp_Q8[index(z,y,x)];
// buff[9]=hlp_Q9[index(z,y,x)];
// buff[10]=hlp_Q10[index(z,y,x)];
// buff[11]=hlp_Q11[index(z,y,x)];
// buff[12]=hlp_Q12[index(z,y,x)];
// buff[13]=hlp_Q13[index(z,y,x)];
// buff[14]=hlp_Q14[index(z,y,x)];
// buff[15]=hlp_Q15[index(z,y,x)];
// buff[16]=hlp_Q16[index(z,y,x)];
// buff[17]=hlp_Q17[index(z,y,x)];
// buff[18]=hlp_Q18[index(z,y,x)];
//
// rho=0.0;
// for(int k=0; k<DENSITIES; ++k)
// rho+=buff[k];
//
// // rho=accumulate(buff, buff+DENSITIES, 0.0);
//
// reciprocal_rho=1.0/rho;
//
//
//
//
// switch(obstacles_d[index(z,y,x)]){
// case 1:
// u_x = 0.0;
// u_y = 0.0;
// u_z = 0.0;
// break;
// default:
// u_x = 0.0;
// u_x = reciprocal_rho*(buff[1] + buff[7] + buff[10] +buff[11] + buff[14]-
// (buff[3] + buff[8] + buff[9] +buff[12] + buff[13]));
//
// u_y = 0.0;
// u_y = reciprocal_rho*(buff[2]+buff[8]+buff[7]+buff[16] + buff[15] -
// (buff[4] + buff[9] + buff[10] +buff[17] + buff[18]));
//
// u_z = 0.0;
// u_z = reciprocal_rho*(buff[5]+buff[13]+buff[14]+buff[15]+buff[18]-
// (buff[6]+buff[12]+buff[11]+buff[16]+buff[17]));
// break;
// }//switch(obstacles[index(z,y,x)])
//
// //original implementation
// // rho=0.0;
// // rho+=D3_hlp.Q0[index(z,y,x)]+D3_hlp.Q1[index(z,y,x)]+D3_hlp.Q2[index(z,y,x)]+D3_hlp.Q3[index(z,y,x)];
// // rho+=D3_hlp.Q4[index(z,y,x)]+D3_hlp.Q5[index(z,y,x)]+D3_hlp.Q6[index(z,y,x)]+D3_hlp.Q7[index(z,y,x)];
// // rho+=D3_hlp.Q8[index(z,y,x)]+D3_hlp.Q9[index(z,y,x)]+D3_hlp.Q10[index(z,y,x)]+D3_hlp.Q11[index(z,y,x)];
// // rho+=D3_hlp.Q12[index(z,y,x)]+D3_hlp.Q13[index(z,y,x)]+D3_hlp.Q14[index(z,y,x)]+D3_hlp.Q15[index(z,y,x)];
// // rho+=D3_hlp.Q16[index(z,y,x)]+D3_hlp.Q17[index(z,y,x)]+D3_hlp.Q18[index(z,y,x)];
// // reciprocal_rho=1.0/rho;
//
// //...........x-, and y- velocity components
//
// // switch(obstacles[index(z,y,x)]){
// // case 1:
// // u_x = 0.0;
// // u_y = 0.0;
// // u_z = 0.0;
// // break;
// // default:
// // u_x = (FLOATING) reciprocal_rho*(D3_hlp.Q1[index(z,y,x)] + D3_hlp.Q7[index(z,y,x)] + D3_hlp.Q10[index(z,y,x)] +
// // D3_hlp.Q11[index(z,y,x)] + D3_hlp.Q14[index(z,y,x)] -
// // (D3_hlp.Q3[index(z,y,x)] + D3_hlp.Q8[index(z,y,x)] + D3_hlp.Q9[index(z,y,x)] +
// // D3_hlp.Q12[index(z,y,x)] + D3_hlp.Q13[index(z,y,x)]));
// //
// // u_y = (FLOATING) reciprocal_rho*(D3_hlp.Q2[index(z,y,x)] + D3_hlp.Q8[index(z,y,x)] + D3_hlp.Q7[index(z,y,x)] +
// // D3_hlp.Q16[index(z,y,x)] + D3_hlp.Q15[index(z,y,x)] -
// // (D3_hlp.Q4[index(z,y,x)] + D3_hlp.Q9[index(z,y,x)] + D3_hlp.Q10[index(z,y,x)] +
// // D3_hlp.Q17[index(z,y,x)] + D3_hlp.Q18[index(z,y,x)]));
// //
// // u_z = (FLOATING) reciprocal_rho*(D3_hlp.Q5[index(z,y,x)] + D3_hlp.Q13[index(z,y,x)] + D3_hlp.Q14[index(z,y,x)] +
// // D3_hlp.Q15[index(z,y,x)] + D3_hlp.Q18[index(z,y,x)] -
// // (D3_hlp.Q6[index(z,y,x)] + D3_hlp.Q12[index(z,y,x)] + D3_hlp.Q11[index(z,y,x)] +
// // D3_hlp.Q16[index(z,y,x)] + D3_hlp.Q17[index(z,y,x)]));
// // break;
// // }//switch(obstacles[index(z,y,x)])
//
//
//
//
// u_squ = (FLOATING) u_x*u_x + u_y*u_y + u_z*u_z;
// temp_factor= 0.5*(2.0* c_squ - u_squ)/c_squ;
// //u_squ = (FLOATING) pow(u_x,2) + pow(u_y,2) + pow(u_z,2);
//
//
// /*...........n- velocity compnents (n = lattice node connection vectors)
// c...........this is only necessary for clearence, and only 3 speeds would
// c...........be necessary*/
//
//
// //WARNING!!!! o pinakas autos exei tropopoihmena indices!!!!
// u_n[0]= 0.0; //SHOULD NEVER USED!
// u_n[1] = u_x;
// u_n[2] = u_y;
// u_n[3] = - u_x;
// u_n[4] = - u_y;
// u_n[5] = u_z;
// u_n[6] = - u_z;
// u_n[7] = u_x + u_y;
// u_n[8] = - u_x + u_y;
// u_n[9] = - u_x - u_y;
// u_n[10] = u_x - u_y;
// u_n[11] = u_x - u_z;
// u_n[12] = - u_x - u_z;
// u_n[13] = - u_x + u_z;
// u_n[14] = u_x + u_z;
// u_n[15] = u_z + u_y;
// u_n[16] = - u_z + u_y;
// u_n[17] = - u_z - u_y;
// u_n[18] = u_z - u_y;
//
//#pragma unroll
// for(int i=0; i<DENSITIES; ++i){
// u_n__over__c_squ[i]=reciprocal_c_squ*u_n[i];
// u_n__over__c_squ__squared_and_halved[i]=0.5*u_n__over__c_squ[i]*u_n__over__c_squ[i];
// }
//
// /*c...........equilibrium densities
// c...........this can be rewritten to improve computational performance
// c...........considerabely !
// c
// c...........zero velocity density
// c*/
// //memory optimised implementation! WARNING!!! different from the original case!
//
// //two_x_c_squ_sqared=2.0*c_squ*c_squ;
// omega_x_rho_x_t_0=omega_x_t_0*rho;
// omega_x_rho_x_t_1=omega_x_t_1*rho;
// omega_x_rho_x_t_2=omega_x_t_2*rho;
//
//
// // //...........relaxation step
//
//
// //omega_x_rho_x_t_0*(1.0 - 0.5*u_squ/c_squ);
// Q0[index(z,y,x)]=buff[0]*one_minus_omega+omega_x_rho_x_t_0*(u_n__over__c_squ__squared_and_halved[0]+u_n__over__c_squ[0]+temp_factor);
//
//
//
//
// Q1[index(z,y,x)]=buff[1]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[1]+u_n__over__c_squ[1]+temp_factor);
// Q2[index(z,y,x)]=buff[2]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[2]+u_n__over__c_squ[2]+temp_factor);
// Q3[index(z,y,x)]=buff[3]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[3]+u_n__over__c_squ[3]+temp_factor);
// Q4[index(z,y,x)]=buff[4]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[4]+u_n__over__c_squ[4]+temp_factor);
// Q5[index(z,y,x)]=buff[5]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[5]+u_n__over__c_squ[5]+temp_factor);
// Q6[index(z,y,x)]=buff[6]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[6]+u_n__over__c_squ[6]+temp_factor);
//
//
//
// Q7[index(z,y,x)]= buff[ 7]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 7]+u_n__over__c_squ[ 7]+temp_factor);
// Q8[index(z,y,x)]= buff[ 8]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 8]+u_n__over__c_squ[ 8]+temp_factor);
// Q9[index(z,y,x)]= buff[ 9]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 9]+u_n__over__c_squ[ 9]+temp_factor);
// Q10[index(z,y,x)]=buff[10]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[10]+u_n__over__c_squ[10]+temp_factor);
// Q11[index(z,y,x)]=buff[11]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[11]+u_n__over__c_squ[11]+temp_factor);
// Q12[index(z,y,x)]=buff[12]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[12]+u_n__over__c_squ[12]+temp_factor);
// Q13[index(z,y,x)]=buff[13]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[13]+u_n__over__c_squ[13]+temp_factor);
// Q14[index(z,y,x)]=buff[14]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[14]+u_n__over__c_squ[14]+temp_factor);
// Q15[index(z,y,x)]=buff[15]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[15]+u_n__over__c_squ[15]+temp_factor);
// Q16[index(z,y,x)]=buff[16]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[16]+u_n__over__c_squ[16]+temp_factor);
// Q17[index(z,y,x)]=buff[17]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[17]+u_n__over__c_squ[17]+temp_factor);
// Q18[index(z,y,x)]=buff[18]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[18]+u_n__over__c_squ[18]+temp_factor);
//
// //original implementation
// // n_equ[0] = t_0 * rho*(1.0 - u_squ / (2.0 * c_squ));
// //
// // //...........axis speeds (factor: t_1)
// // #pragma unroll
// // for (int i = 1 ; i< 7; ++i){
// // n_equ[i] = t_1 * rho*(1.0 + u_n[i] / c_squ
// // + (u_n[i]*u_n[i]) / (2.0 * c_squ *c_squ)
// // - u_squ / (2.0 * c_squ));
// // }
// //
// // //...........diagonal speeds (factor: t_2)
// // #pragma unroll
// // for (int i = 7 ; i< 19; ++i){
// // n_equ[i] = t_2 * rho*(1.0 + u_n[i] / c_squ
// // + (u_n[i]*u_n[i]) / (2.0 * c_squ *c_squ)
// // - u_squ / (2.0 * c_squ));
// // }
//
// // D3.Q0[index(z,y,x)]=D3_hlp.Q0[index(z,y,x)]+omega*(n_equ[0] - D3_hlp.Q0[index(z,y,x)]);
// // D3.Q1[index(z,y,x)]=D3_hlp.Q1[index(z,y,x)]+omega*(n_equ[1] - D3_hlp.Q1[index(z,y,x)]);
// // D3.Q2[index(z,y,x)]=D3_hlp.Q2[index(z,y,x)]+omega*(n_equ[2] - D3_hlp.Q2[index(z,y,x)]);
// // D3.Q3[index(z,y,x)]=D3_hlp.Q3[index(z,y,x)]+omega*(n_equ[3] - D3_hlp.Q3[index(z,y,x)]);
// // D3.Q4[index(z,y,x)]=D3_hlp.Q4[index(z,y,x)]+omega*(n_equ[4] - D3_hlp.Q4[index(z,y,x)]);
// // D3.Q5[index(z,y,x)]=D3_hlp.Q5[index(z,y,x)]+omega*(n_equ[5] - D3_hlp.Q5[index(z,y,x)]);
// // D3.Q6[index(z,y,x)]=D3_hlp.Q6[index(z,y,x)]+omega*(n_equ[6] - D3_hlp.Q6[index(z,y,x)]);
// // D3.Q7[index(z,y,x)]=D3_hlp.Q7[index(z,y,x)]+omega*(n_equ[7] - D3_hlp.Q7[index(z,y,x)]);
// // D3.Q8[index(z,y,x)]=D3_hlp.Q8[index(z,y,x)]+omega*(n_equ[8] - D3_hlp.Q8[index(z,y,x)]);
// // D3.Q9[index(z,y,x)]=D3_hlp.Q9[index(z,y,x)]+omega*(n_equ[9] - D3_hlp.Q9[index(z,y,x)]);
// // D3.Q10[index(z,y,x)]=D3_hlp.Q10[index(z,y,x)]+omega*(n_equ[10] - D3_hlp.Q10[index(z,y,x)]);
// // D3.Q11[index(z,y,x)]=D3_hlp.Q11[index(z,y,x)]+omega*(n_equ[11] - D3_hlp.Q11[index(z,y,x)]);
// // D3.Q12[index(z,y,x)]=D3_hlp.Q12[index(z,y,x)]+omega*(n_equ[12] - D3_hlp.Q12[index(z,y,x)]);
// // D3.Q13[index(z,y,x)]=D3_hlp.Q13[index(z,y,x)]+omega*(n_equ[13] - D3_hlp.Q13[index(z,y,x)]);
// // D3.Q14[index(z,y,x)]=D3_hlp.Q14[index(z,y,x)]+omega*(n_equ[14] - D3_hlp.Q14[index(z,y,x)]);
// // D3.Q15[index(z,y,x)]=D3_hlp.Q15[index(z,y,x)]+omega*(n_equ[15] - D3_hlp.Q15[index(z,y,x)]);
// // D3.Q16[index(z,y,x)]=D3_hlp.Q16[index(z,y,x)]+omega*(n_equ[16] - D3_hlp.Q16[index(z,y,x)]);
// // D3.Q17[index(z,y,x)]=D3_hlp.Q17[index(z,y,x)]+omega*(n_equ[17] - D3_hlp.Q17[index(z,y,x)]);
// // D3.Q18[index(z,y,x)]=D3_hlp.Q18[index(z,y,x)]+omega*(n_equ[18] - D3_hlp.Q18[index(z,y,x)]);
//
// if (x == lx-2) {
// u_previous_spatial_boundary[index2D(z,y)] = u_x;
// v_previous_spatial_boundary[index2D(z,y)] = u_y;
// w_previous_spatial_boundary[index2D(z,y)] = u_z;
//
// u_current[index2D(z,y)] = u_x;
// v_current[index2D(z,y)] = u_y;
// w_current[index2D(z,y)] = u_z;
//
// }//if (x == lx-2)
// }//if memory!
// // }//for (x = 0 ; x< lx; ++x)
// // }//for (y = 0 ; y< ly ; ++y)
// // }//for (z = 0 ; z< lz ; ++z)
//
//}
//
//__global__
//void relaxation_kernel_v3(int lx, int ly, int lz, FLOATING reynolds, FLOATING nu, FLOATING r_small,
// FLOATING t_0, FLOATING t_1, FLOATING t_2, FLOATING c_squ, FLOATING omega, FLOATING one_minus_omega,
// FLOATING reciprocal_c_squ,
// FLOATING *hlp_Q0, FLOATING *hlp_Q1, FLOATING *hlp_Q2, FLOATING *hlp_Q3,
// FLOATING *hlp_Q4, FLOATING *hlp_Q5, FLOATING *hlp_Q6, FLOATING *hlp_Q7,
// FLOATING *hlp_Q8, FLOATING *hlp_Q9, FLOATING *hlp_Q10, FLOATING *hlp_Q11,
// FLOATING *hlp_Q12, FLOATING *hlp_Q13, FLOATING *hlp_Q14, FLOATING *hlp_Q15,
// FLOATING *hlp_Q16, FLOATING *hlp_Q17, FLOATING *hlp_Q18,
// FLOATING *Q0, FLOATING *Q1, FLOATING *Q2, FLOATING *Q3,
// FLOATING *Q4, FLOATING *Q5, FLOATING *Q6, FLOATING *Q7,
// FLOATING *Q8, FLOATING *Q9, FLOATING *Q10, FLOATING *Q11,
// FLOATING *Q12, FLOATING *Q13, FLOATING *Q14, FLOATING *Q15,
// FLOATING *Q16, FLOATING *Q17, FLOATING *Q18,
// int *obstacles,
// FLOATING *u_previous_spatial_boundary, FLOATING *v_previous_spatial_boundary, FLOATING *w_previous_spatial_boundary,
// FLOATING *u_current, FLOATING *v_current, FLOATING *w_current){
//
//
//
//
//
// /*One-step density relaxation process
//
// c.......density relaxation: a single time relaxation with relaxation
// c parameter omega is applied here. This step is only "local",
// c nothing is propagated through the lattice.
// c*/
//
// int x,y,z;
// FLOATING u_x=0.0, u_y=0.0, u_z=0.0, u_squ=0.0, rho=0.0, reciprocal_rho=0.0;
//
//
//
//
// FLOATING u_n[19];
// // FLOATING n_equ[19];
// FLOATING buff[19];
//
// // FLOATING u_n_squared[19];
// //FLOATING two_x_c_squ_sqared;
// const FLOATING omega_x_t_0=omega*t_0, omega_x_t_1=omega*t_1, omega_x_t_2=omega*t_2;
// FLOATING omega_x_rho_x_t_0, omega_x_rho_x_t_1, omega_x_rho_x_t_2;
// FLOATING temp_factor;
// FLOATING u_n__over__c_squ[19];
// FLOATING u_n__over__c_squ__squared_and_halved[19];
//
// //....square speed of sound
// /* compute the out let velocity with a convevtive boundary condition
// c.....loop over all nodes
// c.....attention: actual densities are stored after the propagation
// c step in the help-array n_hlp !*/
//
// //
// //#pragma unroll
// // for (z = 0 ; z< lz ; ++z){
// //#pragma unroll
// // for (y = 0 ; y< ly ; ++y){
// //#pragma unroll
// // for (x = 0 ; x< lx; ++x){
//
// const int tid=blockIdx.x*blockDim.x+threadIdx.x;
// int rest;
// int end_of_memory=lz*ly*(lx);
//
// z=(int) (tid/(ly*lx));
// rest=tid-z;
// y=(int)(rest/lx);
// x=rest-y;
//
// if (tid<end_of_memory){
// /*c.........only free nodes are considered here
// !if (.not. obstacles[z][y][x]) then
// c...........integral local density
// c...........initialize variable ro*/
// //memory optimised implementation
// buff[0]=hlp_Q0[tid];
// buff[1]=hlp_Q1[tid];
// buff[2]=hlp_Q2[tid];
// buff[3]=hlp_Q3[tid];
// buff[4]=hlp_Q4[tid];
// buff[5]=hlp_Q5[tid];
// buff[6]=hlp_Q6[tid];
// buff[7]=hlp_Q7[tid];
// buff[8]=hlp_Q8[tid];
// buff[9]=hlp_Q9[tid];
// buff[10]=hlp_Q10[tid];
// buff[11]=hlp_Q11[tid];
// buff[12]=hlp_Q12[tid];
// buff[13]=hlp_Q13[tid];
// buff[14]=hlp_Q14[tid];
// buff[15]=hlp_Q15[tid];
// buff[16]=hlp_Q16[tid];
// buff[17]=hlp_Q17[tid];
// buff[18]=hlp_Q18[tid];
//
// rho=0.0;
// for(int k=0; k<DENSITIES; ++k)
// rho+=buff[k];
//
// // rho=accumulate(buff, buff+DENSITIES, 0.0);
//
// reciprocal_rho=1.0/rho;
//
//
//
//
// switch(obstacles[tid]){
// case 1:
// u_x = 0.0;
// u_y = 0.0;
// u_z = 0.0;
// break;
// default:
// u_x = 0.0;
// u_x = reciprocal_rho*(buff[1] + buff[7] + buff[10] +buff[11] + buff[14]-
// (buff[3] + buff[8] + buff[9] +buff[12] + buff[13]));
//
// u_y = 0.0;
// u_y = reciprocal_rho*(buff[2]+buff[8]+buff[7]+buff[16] + buff[15] -
// (buff[4] + buff[9] + buff[10] +buff[17] + buff[18]));
//
// u_z = 0.0;
// u_z = reciprocal_rho*(buff[5]+buff[13]+buff[14]+buff[15]+buff[18]-
// (buff[6]+buff[12]+buff[11]+buff[16]+buff[17]));
// break;
// }//switch(obstacles[index(z,y,x)])
//
// //original implementation
// // rho=0.0;
// // rho+=D3_hlp.Q0[index(z,y,x)]+D3_hlp.Q1[index(z,y,x)]+D3_hlp.Q2[index(z,y,x)]+D3_hlp.Q3[index(z,y,x)];
// // rho+=D3_hlp.Q4[index(z,y,x)]+D3_hlp.Q5[index(z,y,x)]+D3_hlp.Q6[index(z,y,x)]+D3_hlp.Q7[index(z,y,x)];
// // rho+=D3_hlp.Q8[index(z,y,x)]+D3_hlp.Q9[index(z,y,x)]+D3_hlp.Q10[index(z,y,x)]+D3_hlp.Q11[index(z,y,x)];
// // rho+=D3_hlp.Q12[index(z,y,x)]+D3_hlp.Q13[index(z,y,x)]+D3_hlp.Q14[index(z,y,x)]+D3_hlp.Q15[index(z,y,x)];
// // rho+=D3_hlp.Q16[index(z,y,x)]+D3_hlp.Q17[index(z,y,x)]+D3_hlp.Q18[index(z,y,x)];
// // reciprocal_rho=1.0/rho;
//
// //...........x-, and y- velocity components
//
// // switch(obstacles[index(z,y,x)]){
// // case 1:
// // u_x = 0.0;
// // u_y = 0.0;
// // u_z = 0.0;
// // break;
// // default:
// // u_x = (FLOATING) reciprocal_rho*(D3_hlp.Q1[index(z,y,x)] + D3_hlp.Q7[index(z,y,x)] + D3_hlp.Q10[index(z,y,x)] +
// // D3_hlp.Q11[index(z,y,x)] + D3_hlp.Q14[index(z,y,x)] -
// // (D3_hlp.Q3[index(z,y,x)] + D3_hlp.Q8[index(z,y,x)] + D3_hlp.Q9[index(z,y,x)] +
// // D3_hlp.Q12[index(z,y,x)] + D3_hlp.Q13[index(z,y,x)]));
// //
// // u_y = (FLOATING) reciprocal_rho*(D3_hlp.Q2[index(z,y,x)] + D3_hlp.Q8[index(z,y,x)] + D3_hlp.Q7[index(z,y,x)] +
// // D3_hlp.Q16[index(z,y,x)] + D3_hlp.Q15[index(z,y,x)] -
// // (D3_hlp.Q4[index(z,y,x)] + D3_hlp.Q9[index(z,y,x)] + D3_hlp.Q10[index(z,y,x)] +
// // D3_hlp.Q17[index(z,y,x)] + D3_hlp.Q18[index(z,y,x)]));
// //
// // u_z = (FLOATING) reciprocal_rho*(D3_hlp.Q5[index(z,y,x)] + D3_hlp.Q13[index(z,y,x)] + D3_hlp.Q14[index(z,y,x)] +
// // D3_hlp.Q15[index(z,y,x)] + D3_hlp.Q18[index(z,y,x)] -
// // (D3_hlp.Q6[index(z,y,x)] + D3_hlp.Q12[index(z,y,x)] + D3_hlp.Q11[index(z,y,x)] +
// // D3_hlp.Q16[index(z,y,x)] + D3_hlp.Q17[index(z,y,x)]));
// // break;
// // }//switch(obstacles[index(z,y,x)])
//
//
//
//
// u_squ = (FLOATING) u_x*u_x + u_y*u_y + u_z*u_z;
// temp_factor= 0.5*(2.0* c_squ - u_squ)/c_squ;
// //u_squ = (FLOATING) pow(u_x,2) + pow(u_y,2) + pow(u_z,2);
//
//
// /*...........n- velocity compnents (n = lattice node connection vectors)
// c...........this is only necessary for clearence, and only 3 speeds would
// c...........be necessary*/
//
//
// //WARNING!!!! o pinakas autos exei tropopoihmena indices!!!!
// u_n[0]= 0.0; //SHOULD NEVER USED!
// u_n[1] = u_x;
// u_n[2] = u_y;
// u_n[3] = - u_x;
// u_n[4] = - u_y;
// u_n[5] = u_z;
// u_n[6] = - u_z;
// u_n[7] = u_x + u_y;
// u_n[8] = - u_x + u_y;
// u_n[9] = - u_x - u_y;
// u_n[10] = u_x - u_y;
// u_n[11] = u_x - u_z;
// u_n[12] = - u_x - u_z;
// u_n[13] = - u_x + u_z;
// u_n[14] = u_x + u_z;
// u_n[15] = u_z + u_y;
// u_n[16] = - u_z + u_y;
// u_n[17] = - u_z - u_y;
// u_n[18] = u_z - u_y;
//
//#pragma unroll
// for(int i=0; i<DENSITIES; ++i){
// u_n__over__c_squ[i]=reciprocal_c_squ*u_n[i];
// u_n__over__c_squ__squared_and_halved[i]=0.5*u_n__over__c_squ[i]*u_n__over__c_squ[i];
// }
//
// /*c...........equilibrium densities
// c...........this can be rewritten to improve computational performance
// c...........considerabely !
// c
// c...........zero velocity density
// c*/
// //memory optimised implementation! WARNING!!! different from the original case!
//
// //two_x_c_squ_sqared=2.0*c_squ*c_squ;
// omega_x_rho_x_t_0=omega_x_t_0*rho;
// omega_x_rho_x_t_1=omega_x_t_1*rho;
// omega_x_rho_x_t_2=omega_x_t_2*rho;
//
//
// // //...........relaxation step
//
//
// //omega_x_rho_x_t_0*(1.0 - 0.5*u_squ/c_squ);
// Q0[tid]=buff[0]*one_minus_omega+omega_x_rho_x_t_0*(u_n__over__c_squ__squared_and_halved[0]+u_n__over__c_squ[0]+temp_factor);
//
//
//
//
// Q1[tid]=buff[1]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[1]+u_n__over__c_squ[1]+temp_factor);
// Q2[tid]=buff[2]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[2]+u_n__over__c_squ[2]+temp_factor);
// Q3[tid]=buff[3]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[3]+u_n__over__c_squ[3]+temp_factor);
// Q4[tid]=buff[4]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[4]+u_n__over__c_squ[4]+temp_factor);
// Q5[tid]=buff[5]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[5]+u_n__over__c_squ[5]+temp_factor);
// Q6[tid]=buff[6]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[6]+u_n__over__c_squ[6]+temp_factor);
//
//
//
// Q7[tid]= buff[ 7]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 7]+u_n__over__c_squ[ 7]+temp_factor);
// Q8[tid]= buff[ 8]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 8]+u_n__over__c_squ[ 8]+temp_factor);
// Q9[tid]= buff[ 9]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 9]+u_n__over__c_squ[ 9]+temp_factor);
// Q10[tid]=buff[10]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[10]+u_n__over__c_squ[10]+temp_factor);
// Q11[tid]=buff[11]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[11]+u_n__over__c_squ[11]+temp_factor);
// Q12[tid]=buff[12]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[12]+u_n__over__c_squ[12]+temp_factor);
// Q13[tid]=buff[13]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[13]+u_n__over__c_squ[13]+temp_factor);
// Q14[tid]=buff[14]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[14]+u_n__over__c_squ[14]+temp_factor);
// Q15[tid]=buff[15]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[15]+u_n__over__c_squ[15]+temp_factor);
// Q16[tid]=buff[16]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[16]+u_n__over__c_squ[16]+temp_factor);
// Q17[tid]=buff[17]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[17]+u_n__over__c_squ[17]+temp_factor);
// Q18[tid]=buff[18]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[18]+u_n__over__c_squ[18]+temp_factor);
//
// //original implementation
// // n_equ[0] = t_0 * rho*(1.0 - u_squ / (2.0 * c_squ));
// //
// // //...........axis speeds (factor: t_1)
// // #pragma unroll
// // for (int i = 1 ; i< 7; ++i){
// // n_equ[i] = t_1 * rho*(1.0 + u_n[i] / c_squ
// // + (u_n[i]*u_n[i]) / (2.0 * c_squ *c_squ)
// // - u_squ / (2.0 * c_squ));
// // }
// //
// // //...........diagonal speeds (factor: t_2)
// // #pragma unroll
// // for (int i = 7 ; i< 19; ++i){
// // n_equ[i] = t_2 * rho*(1.0 + u_n[i] / c_squ
// // + (u_n[i]*u_n[i]) / (2.0 * c_squ *c_squ)
// // - u_squ / (2.0 * c_squ));
// // }
//
// // D3.Q0[index(z,y,x)]=D3_hlp.Q0[index(z,y,x)]+omega*(n_equ[0] - D3_hlp.Q0[index(z,y,x)]);
// // D3.Q1[index(z,y,x)]=D3_hlp.Q1[index(z,y,x)]+omega*(n_equ[1] - D3_hlp.Q1[index(z,y,x)]);
// // D3.Q2[index(z,y,x)]=D3_hlp.Q2[index(z,y,x)]+omega*(n_equ[2] - D3_hlp.Q2[index(z,y,x)]);
// // D3.Q3[index(z,y,x)]=D3_hlp.Q3[index(z,y,x)]+omega*(n_equ[3] - D3_hlp.Q3[index(z,y,x)]);
// // D3.Q4[index(z,y,x)]=D3_hlp.Q4[index(z,y,x)]+omega*(n_equ[4] - D3_hlp.Q4[index(z,y,x)]);
// // D3.Q5[index(z,y,x)]=D3_hlp.Q5[index(z,y,x)]+omega*(n_equ[5] - D3_hlp.Q5[index(z,y,x)]);
// // D3.Q6[index(z,y,x)]=D3_hlp.Q6[index(z,y,x)]+omega*(n_equ[6] - D3_hlp.Q6[index(z,y,x)]);
// // D3.Q7[index(z,y,x)]=D3_hlp.Q7[index(z,y,x)]+omega*(n_equ[7] - D3_hlp.Q7[index(z,y,x)]);
// // D3.Q8[index(z,y,x)]=D3_hlp.Q8[index(z,y,x)]+omega*(n_equ[8] - D3_hlp.Q8[index(z,y,x)]);
// // D3.Q9[index(z,y,x)]=D3_hlp.Q9[index(z,y,x)]+omega*(n_equ[9] - D3_hlp.Q9[index(z,y,x)]);
// // D3.Q10[index(z,y,x)]=D3_hlp.Q10[index(z,y,x)]+omega*(n_equ[10] - D3_hlp.Q10[index(z,y,x)]);
// // D3.Q11[index(z,y,x)]=D3_hlp.Q11[index(z,y,x)]+omega*(n_equ[11] - D3_hlp.Q11[index(z,y,x)]);
// // D3.Q12[index(z,y,x)]=D3_hlp.Q12[index(z,y,x)]+omega*(n_equ[12] - D3_hlp.Q12[index(z,y,x)]);
// // D3.Q13[index(z,y,x)]=D3_hlp.Q13[index(z,y,x)]+omega*(n_equ[13] - D3_hlp.Q13[index(z,y,x)]);
// // D3.Q14[index(z,y,x)]=D3_hlp.Q14[index(z,y,x)]+omega*(n_equ[14] - D3_hlp.Q14[index(z,y,x)]);
// // D3.Q15[index(z,y,x)]=D3_hlp.Q15[index(z,y,x)]+omega*(n_equ[15] - D3_hlp.Q15[index(z,y,x)]);
// // D3.Q16[index(z,y,x)]=D3_hlp.Q16[index(z,y,x)]+omega*(n_equ[16] - D3_hlp.Q16[index(z,y,x)]);
// // D3.Q17[index(z,y,x)]=D3_hlp.Q17[index(z,y,x)]+omega*(n_equ[17] - D3_hlp.Q17[index(z,y,x)]);
// // D3.Q18[index(z,y,x)]=D3_hlp.Q18[index(z,y,x)]+omega*(n_equ[18] - D3_hlp.Q18[index(z,y,x)]);
//
// if (x == lx-2) {
// u_previous_spatial_boundary[index2D(z,y)] = u_x;
// v_previous_spatial_boundary[index2D(z,y)] = u_y;
// w_previous_spatial_boundary[index2D(z,y)] = u_z;
//
// u_current[index2D(z,y)] = u_x;
// v_current[index2D(z,y)] = u_y;
// w_current[index2D(z,y)] = u_z;
//
// }//if (x == lx-2)
// }//if memory!
// // }//for (x = 0 ; x< lx; ++x)
// // }//for (y = 0 ; y< ly ; ++y)
// // }//for (z = 0 ; z< lz ; ++z)
//
//}
__global__
void relaxation_kernel_v4(const int lx, const int ly, const int lz, const FLOATING reynolds, FLOATING nu, FLOATING r_small,
FLOATING t_0, FLOATING t_1, FLOATING t_2, FLOATING c_squ, FLOATING omega, FLOATING one_minus_omega,
FLOATING reciprocal_c_squ,
const FLOATING *hlp_Q0, const FLOATING *hlp_Q1, const FLOATING *hlp_Q2, const FLOATING *hlp_Q3,
const FLOATING *hlp_Q4, const FLOATING *hlp_Q5, const FLOATING *hlp_Q6, const FLOATING *hlp_Q7,
const FLOATING *hlp_Q8, const FLOATING *hlp_Q9, const FLOATING *hlp_Q10, const FLOATING *hlp_Q11,
const FLOATING *hlp_Q12, const FLOATING *hlp_Q13, const FLOATING *hlp_Q14, const FLOATING *hlp_Q15,
const FLOATING *hlp_Q16, const FLOATING *hlp_Q17, const FLOATING *hlp_Q18,
FLOATING *Q0, FLOATING *Q1, FLOATING *Q2, FLOATING *Q3,
FLOATING *Q4, FLOATING *Q5, FLOATING *Q6, FLOATING *Q7,
FLOATING *Q8, FLOATING *Q9, FLOATING *Q10, FLOATING *Q11,
FLOATING *Q12, FLOATING *Q13, FLOATING *Q14, FLOATING *Q15,
FLOATING *Q16, FLOATING *Q17, FLOATING *Q18,
const int *obstacles,
FLOATING *u_previous_spatial_boundary, FLOATING *v_previous_spatial_boundary, FLOATING *w_previous_spatial_boundary,
FLOATING *u_current, FLOATING *v_current, FLOATING *w_current, FLOATING *u_current_temp){
/*One-step density relaxation process
c.......density relaxation: a single time relaxation with relaxation
c parameter omega is applied here. This step is only "local",
c nothing is propagated through the lattice.
c*/
int x,y,z;
FLOATING u_x=0.0, u_y=0.0, u_z=0.0, u_squ=0.0, rho=0.0, reciprocal_rho=0.0;
__shared__ FLOATING u_n[19];
__shared__ FLOATING buff[19];
// extern __shared__ FLOATING shared_buffer[];
const FLOATING omega_x_t_0=omega*t_0, omega_x_t_1=omega*t_1, omega_x_t_2=omega*t_2;
FLOATING omega_x_rho_x_t_0, omega_x_rho_x_t_1, omega_x_rho_x_t_2;
FLOATING temp_factor;
FLOATING u_n__over__c_squ[19];
FLOATING u_n__over__c_squ__squared_and_halved[19];
//....square speed of sound
/* compute the out let velocity with a convevtive boundary condition
c.....loop over all nodes
c.....attention: actual densities are stored after the propagation
c step in the help-array n_hlp !*/
const int tid=blockIdx.x*blockDim.x+threadIdx.x;
int rest;
int end_of_memory=lz*ly*(lx);
z=(int) (tid/(ly*lx));
rest=tid-z;
y=(int)(rest/lx);
x=rest-y;
if (tid<end_of_memory){
/*c.........only free nodes are considered here
!if (.not. obstacles[z][y][x]) then
c...........integral local density
c...........initialize variable ro*/
//memory optimised implementation
buff[0]=hlp_Q0[tid];
buff[1]=hlp_Q1[tid];
buff[2]=hlp_Q2[tid];
buff[3]=hlp_Q3[tid];
buff[4]=hlp_Q4[tid];
buff[5]=hlp_Q5[tid];
buff[6]=hlp_Q6[tid];
buff[7]=hlp_Q7[tid];
buff[8]=hlp_Q8[tid];
buff[9]=hlp_Q9[tid];
buff[10]=hlp_Q10[tid];
buff[11]=hlp_Q11[tid];
buff[12]=hlp_Q12[tid];
buff[13]=hlp_Q13[tid];
buff[14]=hlp_Q14[tid];
buff[15]=hlp_Q15[tid];
buff[16]=hlp_Q16[tid];
buff[17]=hlp_Q17[tid];
buff[18]=hlp_Q18[tid];
rho=0.0;
#pragma unroll
for(int k=0; k<DENSITIES; ++k)
rho+=buff[k];
reciprocal_rho=1.0/rho;
u_x = (1-obstacles[tid])*reciprocal_rho*(buff[1] + buff[7] + buff[10] +buff[11] + buff[14]-
(buff[3] + buff[8] + buff[9] +buff[12] + buff[13]));
u_y = (1-obstacles[tid])*reciprocal_rho*(buff[2]+buff[8]+buff[7]+buff[16] + buff[15] -
(buff[4] + buff[9] + buff[10] +buff[17] + buff[18]));
u_z = (1-obstacles[tid])*reciprocal_rho*(buff[5]+buff[13]+buff[14]+buff[15]+buff[18]-
(buff[6]+buff[12]+buff[11]+buff[16]+buff[17]));
u_squ = (FLOATING) u_x*u_x + u_y*u_y + u_z*u_z;
temp_factor= 0.5*(2.0* c_squ - u_squ)/c_squ;
//u_squ = (FLOATING) pow(u_x,2) + pow(u_y,2) + pow(u_z,2);
/*...........n- velocity compnents (n = lattice node connection vectors)
c...........this is only necessary for clearence, and only 3 speeds would
c...........be necessary*/
//WARNING!!!! o pinakas autos exei tropopoihmena indices!!!!
u_n[0]= 0.0; //SHOULD NEVER USED!
u_n[1] = u_x;
u_n[2] = u_y;
u_n[3] = - u_x;
u_n[4] = - u_y;
u_n[5] = u_z;
u_n[6] = - u_z;
u_n[7] = u_x + u_y;
u_n[8] = - u_x + u_y;
u_n[9] = - u_x - u_y;
u_n[10] = u_x - u_y;
u_n[11] = u_x - u_z;
u_n[12] = - u_x - u_z;
u_n[13] = - u_x + u_z;
u_n[14] = u_x + u_z;
u_n[15] = u_z + u_y;
u_n[16] = - u_z + u_y;
u_n[17] = - u_z - u_y;
u_n[18] = u_z - u_y;
#pragma unroll
for(int i=0; i<DENSITIES; ++i){
u_n__over__c_squ[i]=reciprocal_c_squ*u_n[i];
u_n__over__c_squ__squared_and_halved[i]=0.5*u_n__over__c_squ[i]*u_n__over__c_squ[i];
}
/*c...........equilibrium densities
c...........this can be rewritten to improve computational performance
c...........considerabely !
c
c...........zero velocity density
c*/
//memory optimised implementation! WARNING!!! different from the original case!
//two_x_c_squ_sqared=2.0*c_squ*c_squ;
omega_x_rho_x_t_0=omega_x_t_0*rho;
omega_x_rho_x_t_1=omega_x_t_1*rho;
omega_x_rho_x_t_2=omega_x_t_2*rho;
// //...........relaxation step
//omega_x_rho_x_t_0*(1.0 - 0.5*u_squ/c_squ);
Q0[tid]=buff[0]*one_minus_omega+omega_x_rho_x_t_0*(u_n__over__c_squ__squared_and_halved[0]+u_n__over__c_squ[0]+temp_factor);
Q1[tid]=buff[1]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[1]+u_n__over__c_squ[1]+temp_factor);
Q2[tid]=buff[2]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[2]+u_n__over__c_squ[2]+temp_factor);
Q3[tid]=buff[3]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[3]+u_n__over__c_squ[3]+temp_factor);
Q4[tid]=buff[4]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[4]+u_n__over__c_squ[4]+temp_factor);
Q5[tid]=buff[5]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[5]+u_n__over__c_squ[5]+temp_factor);
Q6[tid]=buff[6]*one_minus_omega+omega_x_rho_x_t_1*(u_n__over__c_squ__squared_and_halved[6]+u_n__over__c_squ[6]+temp_factor);
Q7[tid]= buff[ 7]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 7]+u_n__over__c_squ[ 7]+temp_factor);
Q8[tid]= buff[ 8]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 8]+u_n__over__c_squ[ 8]+temp_factor);
Q9[tid]= buff[ 9]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[ 9]+u_n__over__c_squ[ 9]+temp_factor);
Q10[tid]=buff[10]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[10]+u_n__over__c_squ[10]+temp_factor);
Q11[tid]=buff[11]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[11]+u_n__over__c_squ[11]+temp_factor);
Q12[tid]=buff[12]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[12]+u_n__over__c_squ[12]+temp_factor);
Q13[tid]=buff[13]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[13]+u_n__over__c_squ[13]+temp_factor);
Q14[tid]=buff[14]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[14]+u_n__over__c_squ[14]+temp_factor);
Q15[tid]=buff[15]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[15]+u_n__over__c_squ[15]+temp_factor);
Q16[tid]=buff[16]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[16]+u_n__over__c_squ[16]+temp_factor);
Q17[tid]=buff[17]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[17]+u_n__over__c_squ[17]+temp_factor);
Q18[tid]=buff[18]*one_minus_omega+omega_x_rho_x_t_2*(u_n__over__c_squ__squared_and_halved[18]+u_n__over__c_squ[18]+temp_factor);
//todo : improve the following if by having a kernel to collect all the necessary data
if (x == lx-2 and obstacles[index(z,y,(lx-1))]==0) {
u_previous_spatial_boundary[index2D(z,y)] = u_x;
v_previous_spatial_boundary[index2D(z,y)] = u_y;
w_previous_spatial_boundary[index2D(z,y)] = u_z;
u_current[index2D(z,y)] = u_x;
v_current[index2D(z,y)] = u_y;
w_current[index2D(z,y)] = u_z;
u_current_temp[index2D(z,y)] = u_x;
}//if (x == lx-2)
}//if memory!
}
void LBM::cuda_relaxation(){
if(data_location==CPU)
copy_data_from_host_to_device();
dim3 threads_type2(threads_for_streaming_collision_and_relaxation,1,1);
dim3 grid_type2(blocks_for_streaming_collision_and_relaxation,1,1);
relaxation_kernel_v4<<<grid_type2, threads_type2>>>( lx, ly, lz, reynolds, nu, r_small,
t_0, t_1, t_2, c_squ, omega, one_minus_omega,
reciprocal_c_squ,
D3_hlp_d.Q0, D3_hlp_d.Q1, D3_hlp_d.Q2, D3_hlp_d.Q3,
D3_hlp_d.Q4, D3_hlp_d.Q5, D3_hlp_d.Q6, D3_hlp_d.Q7,
D3_hlp_d.Q8, D3_hlp_d.Q9, D3_hlp_d.Q10, D3_hlp_d.Q11,
D3_hlp_d.Q12, D3_hlp_d.Q13, D3_hlp_d.Q14, D3_hlp_d.Q15,
D3_hlp_d.Q16, D3_hlp_d.Q17, D3_hlp_d.Q18,
D3_d.Q0, D3_d.Q1, D3_d.Q2, D3_d.Q3,
D3_d.Q4, D3_d.Q5, D3_d.Q6, D3_d.Q7,
D3_d.Q8, D3_d.Q9, D3_d.Q10, D3_d.Q11,
D3_d.Q12, D3_d.Q13, D3_d.Q14, D3_d.Q15,
D3_d.Q16, D3_d.Q17, D3_d.Q18,
obstacles_d,
u_previous_spatial_boundary_d, v_previous_spatial_boundary_d, w_previous_spatial_boundary_d,
u_current_d, v_current_d, w_current_d, u_current_temp_d);
cudaDeviceSynchronize();
}
|
22,852 | #include <stdio.h>
const int N = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = ( (N+threadsPerBlock-1) / threadsPerBlock );
static void HandleError( cudaError_t err )
{
if (err != cudaSuccess) {
printf( "%s \n", cudaGetErrorString( err ));
exit( 1 );
}
}
__global__ void dot( float *a, float *b, float *c ) {
__shared__ float cache[threadsPerBlock];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
// set the cache values
cache[cacheIndex] = a[idx] * b[idx];
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main( void ) {
float *a, *b, c, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
// allocate memory on the CPU side
a = (float*)malloc( N*sizeof(float) );
b = (float*)malloc( N*sizeof(float) );
partial_c = (float*)malloc( blocksPerGrid*sizeof(float) );
// allocate the memory on the GPU
HandleError( cudaMalloc( (void**)&dev_a,
N*sizeof(float) ) );
HandleError( cudaMalloc( (void**)&dev_b,
N*sizeof(float) ) );
HandleError( cudaMalloc( (void**)&dev_partial_c,
blocksPerGrid*sizeof(float) ) );
// fill in the host memory with data
for (int i=0; i<N; i++)
{
a[i] = i;
b[i] = i*2;
}
// copy the arrays ‘a’ and ‘b’ to the GPU
HandleError( cudaMemcpy( dev_a, a, N*sizeof(float),
cudaMemcpyHostToDevice ) );
HandleError( cudaMemcpy( dev_b, b, N*sizeof(float),
cudaMemcpyHostToDevice ) );
dot<<<blocksPerGrid,threadsPerBlock>>>( dev_a, dev_b,
dev_partial_c );
// copy the array 'c' back from the GPU to the CPU
HandleError( cudaMemcpy( partial_c, dev_partial_c,
blocksPerGrid*sizeof(float),
cudaMemcpyDeviceToHost ) );
// finish up on the CPU side
c = 0;
for (int i=0; i<blocksPerGrid; i++)
{
c += partial_c[i];
}
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
printf( "Does GPU value %.6g = %.6g ?\n", c,
2 * sum_squares( (float)(N - 1) ) );
// free memory on the GPU side
HandleError( cudaFree( dev_a ) );
HandleError( cudaFree( dev_b ) );
HandleError( cudaFree( dev_partial_c ) );
// free memory on the CPU side
free( a );
free( b );
free( partial_c );
}
|
22,853 | #include "includes.h"
__global__ void cudaKernel_maxlocPlusZoominOffset(float *offset, const int * padStart, const int * maxlocUpSample, const size_t nImages, float zoomInRatioX, float zoomInRatioY)
{
int imageIndex = threadIdx.x + blockDim.x *blockIdx.x; //image index
if (imageIndex < nImages)
{
int index=2*imageIndex;
offset[index] = padStart[index] + maxlocUpSample[index] * zoomInRatioX;
index++;
offset[index] = padStart[index] + maxlocUpSample[index] * zoomInRatioY;
}
} |
22,854 | #include <iostream>
#include <string.h>
#include <stdio.h>
#include <math.h>
using namespace std;
void calc_on_cpu(float* vec_X, float* vec_Y, float* vec_Z, int nword)
{
for(int i=0; i<nword; i++){
vec_Z[i] = vec_X[i] + vec_Y[i];
}
}
__global__ void kernel(float* vec_X, float* vec_Y, float* vec_Z, int nword)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int i = blockDim.x*bid + tid;
vec_Z[i] = vec_X[i] + vec_Y[i];
}
int main( int argc, char** argv)
{
int nb = 512; // max 65535
int nthre = 128; // max 512
int nword = nb * nthre;
int mem_size = sizeof(float) * nword;
printf("# threads: %d \n", nb*nthre);
printf("mem_size: %d Kbyte\n", mem_size >> 10);
float* hval_X = (float*) malloc(mem_size);
float* hval_Y = (float*) malloc(mem_size);
float* hval_Z = (float*) malloc(mem_size);
float* dval_X;
float* dval_Y;
float* dval_Z;
cudaMalloc( (void**) &dval_X, mem_size);
cudaMalloc( (void**) &dval_Y, mem_size);
cudaMalloc( (void**) &dval_Z, mem_size);
for(int i=0; i<nword; i++){
float a = (float) i;
hval_X[i] = a;
hval_Y[i] = -a;
}
cudaMemcpy(dval_X, hval_X, mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(dval_Y, hval_Y, mem_size, cudaMemcpyHostToDevice);
dim3 grid(nb);
dim3 threads(nthre);
kernel<<< grid, threads >>>(dval_X, dval_Y, dval_Z, nword);
cudaMemcpy(hval_Z, dval_Z, mem_size, cudaMemcpyDeviceToHost);
// calc_on_cpu(hval_X, hval_Y, hval_Z, nword);
for(int i=0; i<nword; i++){
printf("%d: %f + %f => %f\n", i, hval_X[i], hval_Y[i], hval_Z[i]);
}
free(hval_X);
free(hval_Y);
free(hval_Z);
cudaFree(dval_X);
cudaFree(dval_Y);
cudaFree(dval_Z);
return (0);
}
|
22,855 | // Find pixels within histogram range specified by user.
// Add to gray color's count value atomically, and filter
// out pixels not within histogram range.
// by Bruno Costa Rendon
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
#define TILE_SIZE 3
#define CUDA_TIMING
unsigned char *input_gpu;
unsigned char *output_gpu;
int *out_histogram;
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// GPU kernel and functions
__global__ void kernel(unsigned char *input,
unsigned char *hist_image,
int* histogram,
int lower, int upper,
unsigned int height,
unsigned int width) {
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int index = x + y * width;
if (x < width && y < height) {
// If pixel is within histogram range.
if(input[index] >= lower && input[index] <= upper) {
atomicAdd(&histogram[input[index]-lower], 1);
hist_image[index] = input[index];
}
else
hist_image[index] = 0;
}
__syncthreads();
}
void classify(unsigned char *int_mat,
unsigned char *hist_image,
int* histogram,
int lower, int upper,
unsigned int height,
unsigned int width) {
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
int hist_size = upper-lower+1;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu, size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu, size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&out_histogram, hist_size*sizeof(int)));
checkCuda(cudaMemset(output_gpu, 0, size*sizeof(unsigned char)));
checkCuda(cudaMemset(out_histogram, 0, hist_size*sizeof(int)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
int_mat,
height*width*sizeof(char),
cudaMemcpyHostToDevice));
// Wait for all threads to synchronize
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
printf("All memory allocated and set.\n");
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// Kernel Call
kernel<<<dimGrid, dimBlock>>>(input_gpu, output_gpu, out_histogram, lower, upper, height, width);
checkCuda(cudaDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(cudaMemcpy(hist_image,
output_gpu,
height*width*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
checkCuda(cudaMemcpy(histogram,
out_histogram,
hist_size*sizeof(int),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
checkCuda(cudaFree(out_histogram));
}
|
22,856 | #include "includes.h"
__global__ void SomeKernel(int* res, int* data, int col, int row,int y, int step)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
//Считаем идентификатор текущего потока
int currDelta = 0;
for (int i=step*threadId; (i<(threadId+1)*step) && (i < col); i++) //Работа со столбцами по потокам
{
for (int j = y; j > 0; j--) //Здесь работа со строками
{
currDelta = data[i + j*row] - data[i + (j-1)*row];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > 10){
res[i] = j-1;
break;
}
}
}
} |
22,857 | #include "includes.h"
__global__ void outerProductSmartBruteForce(float* resultMatrix, float* vec, int vectorLength)
{
int col = (blockIdx.x * blockDim.x) + threadIdx.x; //column
int row = (blockIdx.y * blockDim.y) + threadIdx.y; //row
//check bounds
if(row >= vectorLength || col >= vectorLength || row > col)
return;
int index = (row * vectorLength + col) - (row * (row + 1)) / 2;
resultMatrix[index] += vec[row] * vec[col];
} |
22,858 | #include <stdio.h>
#include <time.h>
#define ROUND 32768*32768 // 32k ^ 2 = 1073741824
__global__ void outputFromGPU()
{
for(int i = 0; i < ROUND; i++){} // GPU
}
int main(void)
{
printf(":: Ex1 ::\n");
clock_t begin, end;
float timeGPU, timeCPU;
begin = clock();
outputFromGPU<<<1,1>>>();
cudaDeviceSynchronize();
end = clock();
timeGPU = (float)(end-begin)/CLOCKS_PER_SEC;
begin = clock();
for(int i = 0; i < ROUND; i++){} // CPU
end = clock();
timeCPU = (float)(end-begin)/CLOCKS_PER_SEC;
printf("(GPU) : time %f sec.\n", timeGPU);
printf("(CPU) : time %f sec.\n", timeCPU);
return 0;
}
|
22,859 | #include "includes.h"
__global__ void find_all_sums_hub_kernel(int* hub, int nhub, float *node_weight, int *neighbor, int *neighbor_start, float *neighbor_accum_weight_result, float *sum_weight_result){
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < nhub) {
int nid = hub[x];
float sum = 0.0;
for (int eid = neighbor_start[nid]; eid < neighbor_start[nid+1]; eid++) { // this eid is just index of the neighbor in the neighbor array
sum += node_weight[neighbor[eid]];
neighbor_accum_weight_result[eid] = sum;
}
sum_weight_result[nid] = sum;
}
} |
22,860 | #include <stdio.h>
#include <algorithm>
#include <iterator>
#include <stdlib.h>
#include <math.h>
#include <string>
#include <vector>
#include <map>
#include <mutex>
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
typedef struct
{
int id;
double x;
double y;
} City;
typedef struct
{
double cost;
vector<int> path;
} PathCost;
typedef struct
{
int threadId;
unsigned long long numPermutations;
} TSPArgs;
void printDistanceMatrix(float*h_distances, int numCities, int numFeatures);
double fRand(double fMin, double fMax);
vector<City> generateCities(int numCities, int gridDimX, int gridDimY);
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true);
void genKey(vector<int> set, int z, long long &key);
#define DEBUG false
int NUM_THREADS=1024;
int numCities = 16;
int numFeatures = 3;
vector<vector<int>> subsets;
map<long long int, PathCost> solutionsMap;
mutex m;
float* h_distances;
__global__ void computeDistances(int numInstances, int numAttributes, float* dataset, float* distances)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int row = tid / numInstances; // instance1Index
int column = tid - ((tid / numInstances) * numInstances); //instance2Index
if ((tid < numInstances * numInstances))
{
float sum = 0;
int instance1 = row * numAttributes;
int instance2 = column * numAttributes;
for (int atIdx = 1; atIdx < numAttributes; atIdx++) // start at 1 so we don't compare the id of each city
{
sum += ((dataset[instance1 + atIdx] - dataset[instance2 + atIdx]) * (dataset[instance1 + atIdx] - dataset[instance2 + atIdx]));
}
distances[row * numInstances + column] = (float) sqrt(sum);
distances[column * numInstances + row] = distances[row * numInstances + column]; //set the distance for the other half of the pair we just computed
}
}
__device__ unsigned long long countNumBits(unsigned long long n)
{
unsigned long long count = 0;
while (n)
{
count += n & 1;
n >>= 1;
}
return count;
}
__device__ unsigned long long curPosition = 0;
__global__ void findPermutations(char* permutationsOfK, int k, unsigned long long lowerBound, unsigned long long upperBound)
{
unsigned long long tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned long long numToCheck = lowerBound + tid;
unsigned long long count = 0;
unsigned long long curBitPosition = 0;
if (numToCheck < upperBound)
{
if (countNumBits(numToCheck) == k)
{
unsigned long long added = atomicAdd(&curPosition, 1);
unsigned long long permutationStartPos = (added) * (unsigned long long) k;
while (numToCheck)
{
if (numToCheck & 1)
{
permutationsOfK[permutationStartPos + count] = curBitPosition;
count++;
}
numToCheck >>= 1;
curBitPosition++;
}
}
}
}
void * computeCosts(void* args)
{
TSPArgs* tspArgs = (TSPArgs*) args;
int threadId = tspArgs->threadId;
int numPermutations = tspArgs->numPermutations;
int calcsPerTask = (numPermutations + NUM_THREADS - 1) / NUM_THREADS;
int beginIndex = threadId * calcsPerTask;
int endIndex = min(beginIndex + calcsPerTask, numPermutations);
double currentCost = 0;
long long key = 0x00000;
for (int i = beginIndex; i < endIndex; i++)
{
vector<int> set = subsets[i];
for (int k : set)
{
vector<int> kSet
{ k };
vector<int> diff;
set_difference(set.begin(), set.end(), kSet.begin(), kSet.end(), inserter(diff, diff.begin()));
double minCost = INT_MAX;
vector<int> minPath;
int bestM;
// we initialized 2 levels earlier so this for loop will always be able to run.
for (int m : diff)
{
vector<int> mSet
{ m }; // need to generate the key for k-1
vector<int> noMoreM; // get rid of m because thats where we're going
set_difference(diff.begin(), diff.end(), mSet.begin(), mSet.end(), inserter(noMoreM, noMoreM.begin()));
genKey(noMoreM, m, key);
currentCost = solutionsMap[key].cost + h_distances[m * numCities + k];
if (currentCost < minCost)
{
minCost = currentCost;
minPath = solutionsMap[key].path;
bestM = m;
}
}
genKey(diff, k, key);
PathCost pathCost;
pathCost.cost = minCost;
minPath.push_back(bestM);
pathCost.path = minPath;
m.lock();
solutionsMap.insert(pair<long long, PathCost>(key, pathCost));
m.unlock();
}
}
return NULL;
}
vector<City> tsp(vector<City> cities, int numCities, float* distances, float* d_distances)
{
cudaEvent_t permutationsStart, permutationsStop;
cudaEventCreate(&permutationsStart);
cudaEventCreate(&permutationsStop);
float permutationMilliseconds = 0;
pthread_t *threads = (pthread_t*) malloc(NUM_THREADS * sizeof(pthread_t));
int* threadIds = (int*) malloc(NUM_THREADS * sizeof(int));
for (int i = 0; i < NUM_THREADS; i++)
threadIds[i] = i;
vector<int> cityNums;
// convert cities back to integer array
for (int i = 1; i < numCities; i++)
{
cityNums.push_back(i);
}
// calculate the highest layer number so we know how large we need to be for our permutation storage at worst
double currentCost = 0;
long long key = 0x00000;
int k = numCities % 2 == 0 ? numCities / 2 : (ceil(numCities / 2));
// initalize first 2 levels of the lookup table
for (int i = 1; i < numCities; i++)
{
for (int j = 1; j < numCities; j++)
{
if (i == j)
continue;
vector<int> iSet
{ i };
genKey(iSet, j, key);
PathCost pathCost;
vector<int> path
{ 0, i };
pathCost.path = path;
pathCost.cost = distances[i * numCities + j] + distances[0 + i];
solutionsMap.insert(pair<long long, PathCost>(key, pathCost));
}
}
char* d_permutationsOfK;
char *h_permutationsOfK = (char*) malloc(pow(2, numCities) * sizeof(char) * k);
gpuErrchk(cudaMalloc(&d_permutationsOfK, pow(2, numCities) * sizeof(char) * k));
unsigned long long finalPos;
unsigned long long numPossibilities = pow(2, numCities); // - pow(2, k - 1);
int threadsPerBlock = 1024;
unsigned long long blocksPerGrid = ((numPossibilities) + threadsPerBlock - 1) / threadsPerBlock;
unsigned long long*curPosPtr;
gpuErrchk(cudaGetSymbolAddress((void** )&curPosPtr, curPosition));
for (int subsetSize = 2; subsetSize < numCities; subsetSize++)
{
cudaEventRecord(permutationsStart);
gpuErrchk(cudaMemset(curPosPtr, 0, sizeof(unsigned long long)));
findPermutations<<<blocksPerGrid, threadsPerBlock, 0>>>(d_permutationsOfK, subsetSize, (unsigned long long) (pow(2, subsetSize) - 1),
(unsigned long long) pow(2, numCities));
// cudaDeviceSynchronize();
gpuErrchk(cudaMemcpyFromSymbol(&finalPos, curPosition, sizeof(unsigned long long), 0, cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_permutationsOfK, d_permutationsOfK, finalPos * sizeof(char) * subsetSize, cudaMemcpyDeviceToHost));
cudaEventRecord(permutationsStop);
cudaEventSynchronize(permutationsStop);
cudaEventElapsedTime(&permutationMilliseconds, permutationsStart, permutationsStop);
printf("%i choose %i is %llu and took %f ms\n", numCities, subsetSize, finalPos, permutationMilliseconds);
// use the permutations we generated here
// remember the permutations are stored in k length 'arrays' within the 1-D array we have them in
// so we need to index them interestingly.
// converting to vector<vector<int>> so I don't have to rethink the logic at the current moment... definitely need to in the interests of speed
subsets.clear();
for (int pos = 0; pos < finalPos; pos++)
{
vector<int> permutation;
for (int l = 0; l < subsetSize; l++)
{
permutation.push_back(h_permutationsOfK[pos * subsetSize + l]);
}
subsets.push_back(permutation);
}
int numThreads = min(NUM_THREADS, (int)(finalPos / 4));
for (int i = 0; i < numThreads; i++)
{
TSPArgs* args = new TSPArgs;
args->threadId = threadIds[i];
args->numPermutations = finalPos;
int status = pthread_create(&threads[i], NULL, computeCosts, (void*) args);
}
for (int i = 0; i < numThreads; i++)
{
pthread_join(threads[i], NULL);
}
//
// // printf("we have %i subsets of size %i\n", counter, i);
}
double minCost = INT_MAX;
vector<int> minPath;
int bestM;
for (int m : cityNums)
{
vector<int> mSet
{ m }; // need to generate the key for k-1
vector<int> noMoreM; // get rid of m because thats where we're going
set_difference(cityNums.begin(), cityNums.end(), mSet.begin(), mSet.end(), inserter(noMoreM, noMoreM.begin()));
genKey(noMoreM, m, key);
currentCost = solutionsMap[key].cost + distances[m * numCities + 0];
if (currentCost < minCost)
{
minCost = currentCost;
vector<int> path = solutionsMap[key].path;
minPath = path;
bestM = m;
}
}
minPath.push_back(bestM);
minPath.push_back(0);
vector<City> bestPath;
for (int i = 0; i < minPath.size(); i++)
{
bestPath.push_back(cities[minPath[i]]);
}
printf("Cost for this set of %i cities was %f\n", numCities, minCost);
free(threads);
free(threadIds);
return bestPath;
}
int main(int argc, char*argv[])
{
if (argc < 3) {
printf("./cuda-tsp numCities numThreads\n");
exit(-1);
}
else{
numCities=atoi(argv[1]);
NUM_THREADS=atoi(argv[2]);
}
float* d_distances;
float* h_dataset;
float* d_dataset;
int k = numCities % 2 == 0 ? numCities / 2 : (ceil(numCities / 2));
cudaEvent_t allStart, allStop, distStart, distStop;
cudaEventCreate(&allStart);
cudaEventCreate(&allStop);
cudaEventCreate(&distStart);
cudaEventCreate(&distStop);
float allMilliseconds = 0, distMilliseconds = 0;
vector<City> cities = generateCities(numCities, 500, 500);
cudaMallocHost(&h_dataset, sizeof(float) * numCities * numFeatures);
cudaMalloc(&d_dataset, sizeof(float) * numCities * numFeatures);
cudaMallocHost(&h_distances, sizeof(float) * numCities * numCities);
cudaMalloc(&d_distances, sizeof(float) * numCities * numCities);
for (int i = 0; i < numCities; i++) // convert cities vector to the array the distance computation kernel expects
{
h_dataset[i * numFeatures] = cities[i].id; //cities[i].id;
h_dataset[i * numFeatures + 1] = cities[i].x;
h_dataset[i * numFeatures + 2] = cities[i].y;
}
cudaEventRecord(allStart);
int threadsPerBlock = 1024;
int blocksPerGrid = ((numCities * numCities) + threadsPerBlock - 1) / threadsPerBlock;
cudaEventRecord(distStart);
gpuErrchk(cudaMemcpy(d_dataset, h_dataset, numCities * numFeatures * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_distances, h_distances, numCities * numCities * sizeof(float), cudaMemcpyHostToDevice));
computeDistances<<<blocksPerGrid, threadsPerBlock, 0>>>(numCities, numFeatures, d_dataset, d_distances);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpy(h_distances, d_distances, numCities * numCities * sizeof(float), cudaMemcpyDeviceToHost));
cudaEventRecord(distStop);
cudaEventSynchronize(distStop);
gpuErrchk(cudaFree(d_dataset));
vector<City> solution = tsp(cities, numCities, h_distances, d_distances);
gpuErrchk(cudaPeekAtLastError());
// cudaDeviceSynchronize();
cudaEventRecord(allStop);
cudaEventSynchronize(allStop);
cudaEventElapsedTime(&distMilliseconds, distStart, distStop);
cudaEventElapsedTime(&allMilliseconds, allStart, allStop);
printf("The distance calculation for %i cities took %llu ms.\n", numCities, (long long unsigned int) distMilliseconds);
// printf("The permutations calculation for %i cities took %llu ms.\n", numCities, (long long unsigned int) permutationMilliseconds);
printf("The salesman traversed %i cities in %llu ms.\n", numCities, (long long unsigned int) allMilliseconds);
cudaFreeHost(h_dataset);
cudaFreeHost(h_distances);
cudaFree(d_distances);
return 0;
}
void printDistanceMatrix(float*h_distances, int numCities, int numFeatures)
{
for (int i = 0; i < numCities; i++)
{
int city1Offset = i * numCities;
for (int j = 0; j < numCities; j++)
{
printf("%f ", h_distances[city1Offset + j]);
}
printf("\n");
}
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort)
{
// https://stackoverflow.com/a/14038590
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
double fRand(double fMin, double fMax)
{
double f = (double) rand() / RAND_MAX;
return fMin + f * (fMax - fMin);
}
vector<City> generateCities(int numCities, int gridDimX, int gridDimY)
{
vector<City> cities;
if (DEBUG)
{
City city0;
city0.id = 0;
city0.x = 323.05;
city0.y = 24.73;
cities.push_back(city0);
City city1;
city1.id = 1;
city1.x = 24.56;
city1.y = 101.00;
cities.push_back(city1);
City city2;
city2.id = 2;
city2.x = 275.87;
city2.y = 44.57;
cities.push_back(city2);
City city3;
city3.id = 3;
city3.x = 114.67;
city3.y = 186.45;
cities.push_back(city3);
City city4;
city4.id = 4;
city4.x = 164.11;
city4.y = 334.44;
cities.push_back(city4);
City city5;
city5.id = 5;
city5.x = 485.90;
city5.y = 401.21;
cities.push_back(city5);
City city6;
city6.id = 6;
city6.x = 333.49;
city6.y = 464.63;
cities.push_back(city6);
City city7;
city7.id = 7;
city7.x = 133.37;
city7.y = 168.05;
cities.push_back(city7);
City city8;
city8.id = 8;
city8.x = 362.79;
city8.y = 255.52;
cities.push_back(city8);
City city9;
city9.id = 9;
city9.x = 378.74;
city9.y = 235.48;
cities.push_back(city9);
}
else
{
for (int i = 0; i < numCities; i++)
{
City city;
city.id = i;
city.x = fRand(0, gridDimX);
city.y = fRand(0, gridDimY);
cities.push_back(city);
}
}
return cities;
}
void genKey(vector<int> set, int z, long long &key)
{
key = 0;
key |= z;
for (int j : set)
{
key |= (1 << (j + 8));
}
}
|
22,861 | #include<stdio.h>
#include<math.h>
#define BLOCK_SIZE 1024
__global__ void multithreads_inverse_calculate(
double* d_x_in, double* d_x_out, double entry_value, int d_n, int quantity, int entry_price, int leverage, int short_long
)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
//int tid = threadIdx.x;
if(tid < d_n){
//printf("d_x_in[%d]: %.2f\n", tid, d_x_in[tid]); //print d_x_in
d_x_out[tid] = short_long*leverage*(quantity/entry_price-quantity/d_x_in[tid])/entry_value*100;
}
}
__global__ void multithreads_normal_calculate(
double* d_x_in, double* d_xnormal_out, int d_n, int entry_price, int leverage, int short_long
)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
//int tid = threadIdx.x;
if(tid < d_n){
//printf("d_x_in[%d]: %.2f\n", tid, d_x_in[tid]); //print d_x_in
d_xnormal_out[tid]
= short_long*leverage*(d_x_in[tid]-entry_price)/entry_price*100;
}
}
int main(){
int quantity, entry_price, exit_price, leverage, short_long;
quantity = 1;
entry_price = 1;
exit_price = 1000;
leverage = 1;
short_long = -1;
//ROE inverse calculate
double entry_value, exit_value, profit, roe_inverse;
entry_value = quantity/(double)entry_price;
exit_value = quantity/(double)exit_price;
profit = entry_value-exit_value;
roe_inverse = (profit/entry_value)*100*leverage*short_long;
//ROE normal calculate
double roe_normal;
roe_normal = short_long*leverage*((exit_price-entry_price)/(double)entry_price)*100;
//find array x
int num_arr;
if(entry_price > exit_price){
num_arr = (entry_price - exit_price);
} else{
num_arr = (exit_price - entry_price);
}
int n = num_arr*10+1;
double x_in[n], xinverse_out[n], xnormal_out[n];
for(int i = 0; i < n; i++){
if(entry_price <= exit_price){
x_in[i] = i*0.1 + entry_price;
} else{
x_in[i] = i*0.1 + exit_price;
}
}
//copy data from host to device
double* d_x_in, *d_xinverse_out, *d_xnormal_out;
cudaMalloc((void **) &d_x_in, n*sizeof(double));
cudaMalloc((void **) &d_xinverse_out, n*sizeof(double));
cudaMalloc((void **) &d_xnormal_out, n*sizeof(double));
cudaMemcpy(d_x_in, &x_in, n*sizeof(double), cudaMemcpyHostToDevice);
//time record start
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//Kernel launch
/*multithreads_inverse_calculate<<<ceil(n/(double)BLOCK_SIZE), BLOCK_SIZE>>>(
d_x_in, d_xinverse_out, entry_value, n, quantity, entry_price, leverage,short_long);*/
//time record stop
multithreads_normal_calculate<<<ceil(n/(double)BLOCK_SIZE), BLOCK_SIZE>>>(
d_x_in,d_xnormal_out,n,entry_price,leverage,short_long);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float millisec = 0;
cudaEventElapsedTime(&millisec, start, stop);
//Copy data from device back to host. and free all data allocate on device
cudaMemcpy(&xinverse_out, d_xinverse_out, n*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&xnormal_out, d_xnormal_out, n*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_x_in);
cudaFree(d_xinverse_out);
cudaFree(d_xnormal_out);
//prit check output
printf("\nROE inverse %.2lf%%\n", roe_inverse);
printf("ROE normal %.2lf%%\n", roe_normal);
printf("num arr: %d\n", num_arr);
printf("n: %d\n", n);
/*for(int i = 0; i < n; i++){
//printf("%.2lf ", xinverse_out[i]);
}
for(int i=0; i<n; i++){
printf("%.2lf" , xnormal_out[i]);
}*/
printf("Time: %.2f ms\n", millisec);
}
|
22,862 | /*
* Created by Harshavardhan Patil on 9/30/16.
*
*
* Matrix Normalization using CUDA :
* - The generated input values are stored by inverting the matrix. i.e All the attributes of a column which needs to be normalized
* are stored as elements of a row. So that while normalizing the threads in a block will access nearby elements which will
* optimize the code
* - The number of threads is hard coded to 16. This value is finalized after multiple iterations with values (32, 64, 128, 256, 512).
* This value is used along with the size of the matrix to calculate number of blocks.
* - Each block is assigned to calculate sum and squares of one row.
* In a block each thread will read one element and put it in a shared memory. Once this is done, partial sum is calculated and
* each block stores it's partial sum in the global memory area using it's block Id as index.
* - Once partial sum calculation is done by all the blocks, another kernal function is launched with this partial
* sum as the input in a single block. This block calculates the final sum and squares.
* - Partial sum using reduction works only if the number of elements passed to the block is a power of 2.
* So to avoid wrong calculation when number of blocks is not power of 2 (in calculateFinalSum method).
* The input array argument lentgh is set to the nearest power of 2 for the number of blocks and value 0 is set to those indices
* which are greater than number of blocks.
* - Then the population standard deviation is calculated for that row using formula (sumOfSquares + N * powf(mean, 2.0) - 2 * mean * sumOfTheElements)/N;
* Where,
* N - Size of the Matrix
* - The above values are used to calculate standard score of each element in that row.
* - The computed values are stored in the output matrix at their inverse position. This operation is done for all the elements
*
* Steps to compile and execute
-----------------------------
1) Go to folder "/home/hpatil2/hw4"
2) run : qlogin -q interactive.q
3) run : cd /home/hpatil2/hw4
4) run : nvcc MatrixNormalizationCuda.cu -o MatrixNormalizationCuda
5) run : ./MatrixNormalizationCuda 15000 4
In step 5 : [argument1 (15000) is MATRIX_SIZE, this is mandatory to pass. and maximum value it can take is 15000]
[argument2 (4) is seed value, this is an optional field]
*
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
#include <cuda_runtime.h>
/* Program Parameters */
#define MAXN 15000 /* Max value of N */
int N; /* Matrix size */
/* Matrices */
volatile float A[MAXN][MAXN], B[MAXN][MAXN];
/* junk */
#define randm() 4|2[uid]&3
/* Prototype */
void matrixNorm();
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the program parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
char uid[32]; /*User name */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 3) {
seed = atoi(argv[2]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 2) {
N = atoi(argv[1]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
}
else {
printf("Usage: %s <matrix_dimension> [random seed]\n",
argv[0]);
exit(0);
}
/* Print parameters */
printf("\nMatrix dimension N = %i.\n", N);
}
/* Initialize A and B*/
void initialize_inputs() {
int row, col;
printf("\nInitializing...\n");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
A[row][col] = (float)rand() / 32768.0;
B[row][col] = 0.0;
}
}
}
/* Print input matrices */
void print_inputs() {
int row, col;
if (N < 10) {
printf("\nA =\n\t");
for (col = 0; col < N; col++) {
for (row = 0; row < N; row++) {
printf("%5.2f%s", A[row][col], (row < N-1) ? ", " : ";\n\t");
}
}
}
}
void print_B() {
int row, col;
if (N < 10) {
printf("\nB =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%1.10f%s", B[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
}
}
/*
* This method calculates sum and square of given block
*/
__global__ void calculateBlockSum(const float *input, float *sumResults, float *squareResults, const size_t n)
{
__shared__ float smSum[512];
__shared__ float smSquare[512];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tx = threadIdx.x;
float x = 0;
if(i < n) {
x = input[i];
}
smSum[tx] = x;
smSquare[tx] = x*x;
__syncthreads();
int j;
for(j = blockDim.x / 2; j > 0; j >>= 1) {
if(tx < j) {
smSum[tx] = smSum[tx]+ smSum[tx + j];
smSquare[tx] = smSquare[tx] + smSquare[tx + j];
}
__syncthreads();
}
if(threadIdx.x == 0) {
sumResults[blockIdx.x] = smSum[0];
squareResults[blockIdx.x] = smSquare[0];
}
}
/*
* This method calculates final sum and square
*/
__global__ void calculateFinalSum(float *sumResults, float *squareResults, const size_t size, float *finalSumResult, float *finalSigmaResult) {
__shared__ float smSum[512];
__shared__ float smSquare[512];
int tx = threadIdx.x;
if(tx < size) {
smSum[tx] = sumResults[tx];
smSquare[tx] = squareResults[tx];
}
__syncthreads();
int i;
for(i = size/2; i > 0; i >>= 1) {
if(tx < i) {
smSum[tx] = smSum[tx] + smSum[tx + i];
smSquare[tx] = smSquare[tx] + smSquare[tx + i];
}
__syncthreads();
}
if(threadIdx.x == 0) {
finalSumResult[0] = smSum[0];
finalSigmaResult[0] = smSquare[0];
}
}
float* calculateSum (float *input, size_t n, float *dMatrixA, float *sumValue, float *sigmaValue, size_t blockSize, size_t totalBlocks, int nextNearestPowerOf2, float *finalSumResult, float *finalSigmaResult) {
float *results = (float *)malloc(sizeof(float) * 2);
cudaMemcpy(dMatrixA, input, sizeof(float) * N, cudaMemcpyHostToDevice);
calculateBlockSum<<<totalBlocks, blockSize>>> (dMatrixA, sumValue, sigmaValue, n);
calculateFinalSum<<<1,totalBlocks>>>(sumValue, sigmaValue, nextNearestPowerOf2, finalSumResult, finalSigmaResult);
cudaMemcpy(&results[0], &finalSumResult[0], sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&results[1], &finalSigmaResult[0], sizeof(float), cudaMemcpyDeviceToHost);
return results;
}
void matrixNorm() {
int row, column;
float mu, sigma;
float *sumValue = 0, *sigmaValue = 0, *dMatrixA = 0;
float *finalSumResult = 0, *finalSigmaResult = 0;
printf("Parallel Computing.\n");
size_t blockSize = 16;
size_t totalBlocks;
if(N%blockSize == 0){
totalBlocks = (N/blockSize);
} else {
totalBlocks = (N/blockSize) + 1;
}
int nextNearestPowerOf2 = pow(2, ceil(log(totalBlocks)/log(2)));
cudaMalloc((void**)&sumValue, sizeof(float) * (nextNearestPowerOf2));
cudaMemset(sumValue, 0.0, sizeof(float) * nextNearestPowerOf2);
cudaMalloc((void**)&sigmaValue, sizeof(float) * (nextNearestPowerOf2));
cudaMemset(sigmaValue, 0.0, sizeof(float) * nextNearestPowerOf2);
cudaMalloc((void**)&dMatrixA, sizeof(float) * N);
cudaMalloc((void**)&finalSumResult, sizeof(float));
cudaMalloc((void**)&finalSigmaResult, sizeof(float));
for (column=0; column < N; column++) {
mu = 0.0;
float *result;
result = calculateSum ((float *)A[column], N, dMatrixA, sumValue, sigmaValue, blockSize, totalBlocks, nextNearestPowerOf2,finalSumResult,finalSigmaResult);
mu = result[0] / (float) N;
sigma = (result[1] + N * powf(mu, 2.0) - 2 * mu * result[0])/(float)N;
for (row=0; row < N; row++) {
if (sigma == 0.0) {
B[row][column] = 0.0;
} else {
B[row][column] = (A[column][row] - mu) / sigma;
}
}
}
cudaFree(sumValue);
cudaFree(sigmaValue);
cudaFree(dMatrixA);
}
int main(int argc, char **argv) {
struct timeval etstart, etstop;
struct timezone tzdummy;
clock_t etstart2, etstop2;
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop;
parameters(argc, argv);
initialize_inputs();
print_inputs();
printf("\nStarting clock.\n");
gettimeofday(&etstart, &tzdummy);
etstart2 = times(&cputstart);
matrixNorm();
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
print_B();
printf("\nElapsed time = %g ms.\n",
(float)(usecstop - usecstart)/(float)1000);
printf("(CPU times are accurate to the nearest %g ms)\n",
1.0/(float)CLOCKS_PER_SEC * 1000.0);
printf("My total CPU time for parent = %g ms.\n",
(float)( (cputstop.tms_utime + cputstop.tms_stime) -
(cputstart.tms_utime + cputstart.tms_stime) ) /
(float)CLOCKS_PER_SEC * 1000);
printf("My system CPU time for parent = %g ms.\n",
(float)(cputstop.tms_stime - cputstart.tms_stime) /
(float)CLOCKS_PER_SEC * 1000);
printf("My total CPU time for child processes = %g ms.\n",
(float)( (cputstop.tms_cutime + cputstop.tms_cstime) -
(cputstart.tms_cutime + cputstart.tms_cstime) ) /
(float)CLOCKS_PER_SEC * 1000);
printf("--------------------------------------------\n");
exit(0);
} |
22,863 | #include <stdio.h>
#include <cuda_runtime.h>
#include <asm/unistd.h>
#include <fcntl.h>
#include <inttypes.h>
#include <linux/kernel-page-flags.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/sysinfo.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
#include <vector>
#include <sys/time.h>
#include <assert.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true)
{
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void kernel(int number_of_threads, float * dsum ,volatile int * d_mapping, int cnt, int fence_system_flag, int fence_block_flag)
{
int i;
/*printf("D: i am [%d] \n", blockIdx.x * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x);
*/
for(i=0; i < cnt ; i++){
*dsum = i;
d_mapping[0] = *dsum;
if(fence_system_flag){
__threadfence_system();
}
if(fence_block_flag){
__threadfence_block();
}
}
}
int main(int argc, char **argv)
{
int opt, BLOCKS = 1, THREADS = 1, cnt =10000, fence_system_flag =0, fence_block_flag = 0;
cudaEvent_t start, stop;
float elapsed_time =0;
gpuErrchk(cudaEventCreate(&start));
gpuErrchk(cudaEventCreate(&stop));
while ((opt = getopt(argc, argv, "b:t:n:f:s:")) != -1) {
switch (opt) {
case 'b':
BLOCKS = atoi(optarg);
break;
case 't':
THREADS = atoi(optarg);
break;
case 'n':
cnt = atoi(optarg);
break;
case 'f':
fence_system_flag = atoi(optarg);
break;
case 's':
fence_block_flag = atoi(optarg);
break;
default:
fprintf(stderr, "Usage: %s -b [blocks] -t [threads] -n [count of iterations] -f [fence_system] -s [fence_block]\n",
argv[0]);
exit(EXIT_FAILURE);
}
}
float * dsum;
gpuErrchk(cudaMallocManaged((void **) &dsum, sizeof(uint64_t)));
volatile int * h_mapping;
gpuErrchk(cudaHostAlloc( (void**)&h_mapping, sizeof(volatile int), cudaHostAllocMapped));
volatile int * d_mapping;
gpuErrchk(cudaHostGetDevicePointer((void**)&d_mapping,(void*)h_mapping,0));
*dsum = 0;
cudaEventRecord(start, 0);
kernel <<< BLOCKS, THREADS >>> (BLOCKS * THREADS,dsum,d_mapping, cnt, fence_system_flag, fence_block_flag);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaEventRecord(stop, 0));
gpuErrchk(cudaEventSynchronize(stop));
gpuErrchk(cudaEventElapsedTime
(&elapsed_time, start, stop));
assert(*dsum != 0);
printf("H: elapsed_time is : %f \n", elapsed_time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
22,864 | #include <stdio.h>
#include <math.h>
__global__ void matmul(float * a, float * b, float * c, int * a_shape, int * b_shape) {
if ((blockDim.y * blockIdx.y + threadIdx.y) < a_shape[0] && (blockDim.x * blockIdx.x + threadIdx.x) < b_shape[1]) {
int aMin = (blockDim.y * blockIdx.y + threadIdx.y) * a_shape[1];
int aMax = (blockDim.y * blockIdx.y + threadIdx.y + 1) * a_shape[1];
int aStep = 1;
int bMin = blockDim.x * blockIdx.x + threadIdx.x;
int bMax = blockDim.x * blockIdx.x + threadIdx.x + b_shape[0] * b_shape[1];
int bStep = b_shape[1];
float temp = 0;
for (int ai = aMin, bi = bMin; ai < aMax && bi < bMax; ai += aStep, bi += bStep) {
temp += a[ai] * b[bi];
}
int a_index = (blockDim.y * blockIdx.y + threadIdx.y) * b_shape[1];
c[a_index + bMin] = temp;
}
}
__global__ void transpose(float * a, float * a_T, int * a_shape) {
int elem_idx = (blockDim.y * blockIdx.y + threadIdx.y) * a_shape[1] + blockDim.x * blockIdx.x + threadIdx.x;
if (elem_idx < a_shape[0] * a_shape[1]) {
int a_t_1 = a_shape[0];
int elem_tr_idx = (blockDim.x * blockIdx.x + threadIdx.x) * a_t_1 + blockDim.y * blockIdx.y + threadIdx.y;
a_T[elem_tr_idx] = a[elem_idx];
}
}
__global__ void row_mean(float * a, float * mean, int * a_shape) {
//Returns a column
int row_num = (blockDim.x * blockIdx.x + threadIdx.x);
if (row_num < a_shape[0]) {
int start_idx = row_num * a_shape[1];
int end_idx = start_idx + a_shape[1];
float sum = 0;
for (int i = start_idx; i < end_idx; i++) {
sum += a[i];
}
mean[row_num] = sum / a_shape[1];
}
}
__global__ void column_mean(float * a, float * mean, int * a_shape) {
//Returns a row
int col_num = (blockDim.x * blockIdx.x + threadIdx.x);
if (col_num < a_shape[1]) {
int start_idx = col_num;
int end_idx = start_idx + a_shape[1] * a_shape[0];
float sum = 0;
for (int i = start_idx; i < end_idx; i += a_shape[1]) {
sum += a[i];
}
mean[col_num] = sum / a_shape[0];
}
}
__global__ void min_row(float * a, int * a_shape, float * min_row, int * arg_min) {
//Returns a column for min_row and argmin
int row_num = (blockDim.x * blockIdx.x + threadIdx.x);
if (row_num < a_shape[0]) {
int start_idx = row_num * a_shape[1];
int end_idx = start_idx + a_shape[1];
min_row[row_num] = a[start_idx];
arg_min[row_num] = 0;
for (int col = start_idx + 1, index = 1; col < end_idx, index < a_shape[1]; col++, index++) {
if (a[col] < min_row[row_num]) {
min_row[row_num] = a[col];
arg_min[row_num] = index;
}
}
}
}
__global__ void sum_axis3(float * a, int * a_shape, float * result) {
//a[i][j][k] = k+a_shape[2]*j + a_shape[2]*a_shape[1]*i
int col_num = (blockDim.x * blockIdx.x + threadIdx.x);
int row_num = (blockDim.y * blockIdx.y + threadIdx.y);
if (row_num < a_shape[0] && col_num < a_shape[1]) {
int start_idx = (row_num * a_shape[1] + col_num) * a_shape[2];
int end_idx = start_idx + a_shape[2];
int step = 1;
float temp = 0;
for (int idx = start_idx; idx < end_idx; idx += step) {
temp += a[idx];
}
result[row_num * a_shape[1] + col_num] = temp;
}
}
__global__ void sum_axis2(float * a, int * a_shape, float * result) {
//a[i][j][k] = k+a_shape[2]*j + a_shape[2]*a_shape[1]*i
int col_num = (blockDim.x * blockIdx.x + threadIdx.x);
int row_num = (blockDim.y * blockIdx.y + threadIdx.y);
if (row_num < a_shape[0] && col_num < a_shape[2]) {
int start_idx = row_num * a_shape[1] * a_shape[2] + col_num;
int end_idx = start_idx + a_shape[2] * a_shape[1];
int step = a_shape[2];
float temp = 0;
for (int idx = start_idx; idx < end_idx; idx += step) {
temp += a[idx];
}
result[row_num * a_shape[2] + col_num] = temp;
}
}
__global__ void sum_axis1(float * a, int * a_shape, float * result) {
//a[i][j][k] = k+a_shape[2]*j + a_shape[2]*a_shape[1]*i
int col_num = (blockDim.x * blockIdx.x + threadIdx.x);
int row_num = (blockDim.y * blockIdx.y + threadIdx.y);
if (row_num < a_shape[1] && col_num < a_shape[2]) {
int start_idx = (row_num) * a_shape[2] + col_num;
int end_idx = start_idx + a_shape[2] * a_shape[1] * a_shape[0];
int step = a_shape[2] * a_shape[1];
float temp = 0;
for (int idx = start_idx; idx < end_idx; idx += step) {
temp += a[idx];
}
result[row_num * a_shape[2] + col_num] = temp;
}
}
__global__ void argmin_mu_diff(float * data, float * mu, int * data_shape, int * mu_shape, int * arg_min) {
int data_id = blockDim.x * blockIdx.x + threadIdx.x;
if (data_id < data_shape[0]) {
int startIdx = (blockDim.x * blockIdx.x + threadIdx.x) * data_shape[1];
float min_diff = INT_MAX;
float arg_min_diff = -1;
for (int i = 0; i < mu_shape[0]; i++) {
float diff = 0;
for (int dim = 0; dim < mu_shape[1]; dim++) {
diff += (data[startIdx + dim] - mu[i * mu_shape[1] + dim]) * (data[startIdx + dim] - mu[i * mu_shape[1] + dim]);
}
if (diff < min_diff) {
min_diff = diff;
arg_min_diff = i;
}
}
arg_min[data_id] = arg_min_diff;
}
} |
22,865 | #include <algorithm>
#include <cstdio>
#include <cstring>
static void print_matrix(const char *name, const float *matrix, int h, int w) {
int eff_h = std::min(h, 8);
int eff_w = std::min(w, 8);
std::printf("%s = [\n", name);
for(int i = 0; i < eff_h; ++i) {
for(int j = 0; j < eff_w; ++j) {
std::printf(" %5g", matrix[i*w+j]);
}
if(eff_w == w) {
std::puts("");
} else {
std::puts(" ...");
}
}
if(eff_h == h) {
std::puts("]\n");
} else {
std::puts("... ]\n");
}
}
__global__ static void matrix_mul(float q[], const float a[], const float b[], int size) {
int i = threadIdx.x, j = threadIdx.y;
float s = 0;
for(int k = 0; k < size; ++k) {
s += a[i*size+k] * b[k*size+j];
}
q[i*size+j] = s;
}
static cudaError_t report_error(void) {
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess) {
std::fprintf(stderr, "CUDA Error: %s\n", cudaGetErrorString(err));
std::abort();
}
return err;
}
int main(int argc, char *argv[]) {
int size = 8;
if(argc >= 2) {
std::sscanf(argv[1], "%d", &size);
}
bool bench = argc >= 4 && std::strcmp(argv[3], "bench") == 0;
float *a = new float[size*size];
float *b = new float[size*size];
for(int i = 0; i < size*size; ++i) {
a[i] = i+1;
}
for(int i = 0; i < size*size; ++i) {
b[i] = i+1;
}
if(!bench) {
print_matrix("a", a, size, size);
print_matrix("b", b, size, size);
}
float *d_a, *d_b, *d_c;
cudaMalloc(&d_a, size*size*sizeof *d_a); report_error();
cudaMalloc(&d_b, size*size*sizeof *d_b); report_error();
cudaMalloc(&d_c, size*size*sizeof *d_c); report_error();
cudaMemcpy(d_a, a, size*size*sizeof *a, cudaMemcpyHostToDevice); report_error();
cudaMemcpy(d_b, b, size*size*sizeof *b, cudaMemcpyHostToDevice); report_error();
cudaEvent_t start, end;
cudaEventCreate(&start); report_error();
cudaEventCreate(&end); report_error();
cudaEventRecord(start); report_error();
cudaEventSynchronize(start); report_error();
matrix_mul<<<1, dim3(size, size)>>>(d_c, d_a, d_b, size); report_error();
cudaEventRecord(end); report_error();
cudaEventSynchronize(end); report_error();
float elapsed;
cudaEventElapsedTime(&elapsed, start, end); report_error();
cudaEventDestroy(end); report_error();
cudaEventDestroy(start); report_error();
if(!bench) {
float *c = new float[size*size];
cudaMemcpy(c, d_c, size*size*sizeof *c, cudaMemcpyDeviceToHost); report_error();
print_matrix("a * b", c, size, size);
delete[] c;
std::printf("%.9g s elapsed\n", elapsed * 0.001f);
} else {
std::printf("%.9g", elapsed * 0.001f);
}
cudaFree(d_c); report_error();
cudaFree(d_b); report_error();
cudaFree(d_a); report_error();
delete[] b;
delete[] a;
return 0;
}
|
22,866 | /*
bfield.c
Computes B fields in toroidal coordinates with given coefficients
and calculates rms deviation from data
Written by Hee Sok Chung at ANL
July 10, 2016
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//#include <TTree.h>
//#include <TFile.h>
#include <iostream>
#include <cuda_runtime.h>
/* hypergeometric function 1F2 */
double h1f2(double a, double b, double c, double z);
/* LegendreQ(m-1/2,n,cosh(x)) */
double LegendreQ(double m, double n, double z);
/* derivative of LegendreQ(m-1/2,n,cosh(x)) */
double DLegendreQ(int m, int n, double z);
/* coordinate transformation r, z -> zeta */
double zetaf(double rho, double z, double r0);
/* coordinate transformation r, z -> eta */
double etaf(double rho, double z, double r0);
typedef struct B_Struct{
double Prb[25];
}B_Struct;
using namespace std;
__global__ void thread_call(const double *cc,const double *cs,const double *sc,const double *ss,const double *zeta,const double *eta,const double *Tlegq,const double *Tdleq,const int *midx,const int *nidx,const int*ccidx,const double *dataread,double *dataoutZ,double *dataoutR,double *dataoutPhi,const double rr,const double bzero,const int dim1,int nphi);
int main () {
double *cc, *cs, *sc, *ss, *lsigma;
int *ccidx;
int *midx;
int *nidx;
double *Tlegq;
double *Tdleq;
double *zeta, *eta;
double * dataread;
double * dataoutZ;
double * dataoutR;
double * dataoutPhi;
cudaError_t err = cudaSuccess;
zeta = (double*)malloc(30*sizeof(double));
eta = (double*)malloc(30*sizeof(double));
int nmax, mmax, nphi, dim1, dim2;
FILE *input, *iname, *datafile, *datac , *output;
int Nthread = 8;
// input = fopen ("fitcoeffN.txt", "r");
input = fopen ("fitc.txt", "r");
iname = fopen ("outnames.txt", "r");
output = fopen ("output52full.txt","w");
fscanf (iname, "%d", &mmax); // read m Maximum
fscanf (iname, "%d", &nmax); // read n Maximum
// fscanf (input, "%d", &mmax); // read m Maximum
// fscanf (input, "%d", &nmax); // read n Maximum
printf("MMax %d, NMax %d \n",mmax, nmax);
dim1=(nmax+1)*(mmax+1); // coefficients dimensions
double cterm, rr;
// int ccidx[mmax+2][nmax+2];
double zt, et, zt0, bzero, br, bz, bphi, bsize;
double difrms, dzrms, ffrms, rmsd;
double angles[30], radii[30], bfield[30][5];
int i, j, id, idx, md, nd, mdx, ndx, prb, na, ma, qidx, vid;
double datatmp, tp, tx, phi, wgt, iwgt, legq, dleq;
double phidz, phidp, phide, phide1, phide2;
B_Struct B_Measure;
B_Struct B_Fit;
B_Struct B_ZFit;
B_Struct B_RFit;
B_Struct B_PhiFit;
cc=(double *)malloc(sizeof*cc*(dim1+2));
cs=(double *)malloc(sizeof*cs*(dim1+2));
sc=(double *)malloc(sizeof*sc*(dim1+2));
ss=(double *)malloc(sizeof*ss*(dim1+2));
lsigma=(double *)malloc(sizeof*lsigma*(nmax+2));
int ccidx_dim = dim1+2;
ccidx=(int *)malloc((dim1+2)*(dim1+2)*sizeof(int));
if(ccidx==NULL){
printf("out of memory\n");
}
midx=(int *)malloc((dim1+2)*sizeof(int*));
nidx=(int *)malloc((dim1+2)*sizeof(int*));
bzero=61.789; // average B-field
zt0=6.5; // eigenfunction normalization point
// construct probe positions
angles[1]=0;
radii[1]=0;
for (i=2;i<=9;i++) {
angles[i]=(i-2.)*M_PI/4;
radii[i]=22.5;
}
for (i=10;i<=25;i++) {
angles[i]=(i-10.)*M_PI/8;
radii[i]=45.;
}
for (i=1;i<=25;i++) {
printf("probe %d location r=%.17g, theta=%.17g \n",i,radii[i],angles[i]);
}
// calculate toroidal coordinates
rr=7111.5; // toroid center
printf ("\nComputing coordinates\n");
for (i=1;i<=25;i++) {
zeta[i]=zetaf(radii[i]*sin(angles[i])+7112., \
-radii[i]*cos(angles[i]),rr);
eta[i]=etaf(radii[i]*sin(angles[i])+7112., \
-radii[i]*cos(angles[i]),rr);
}
/* initialize arrays */
na=0;
ma=0;
i=0;
nd=0;
md=0;
lsigma[0]=1.;
lsigma[nmax]=0.;
for (nd=0;nd<=nmax;nd++) {
for (md=0;md<=mmax;md++) {
i+=1;
ccidx[md*ccidx_dim+nd]=i;
midx[i]=md;
nidx[i]=nd;
cc[i]=0.; cs[i]=0.; sc[i]=0.; ss[i]=0.;
}
if(nd>0&&nd<nmax) {
lsigma[nd]=sin(M_PI*(nd*1.)/(nmax*1.));
lsigma[nd]=lsigma[nd]/(M_PI*(nd*1.)/(nmax*1.));
}
}
for (idx=1;idx<=dim1*4;idx++) { // read coefficients
fscanf (iname, "%d \t %d \t %d", &id, &ma, &na);
// fscanf (input, "%d \t %d \t %d \t %lg", &id, &ma, &na, &tp);
fscanf (input, "%lg", &tp);
// printf("%d, %d, %lg \n", id, ccidx[ma*ccidx_dim+na], tp);
// if (tp!=tp) printf("%d, %d, %lg \n", id, ccidx[ma][na], tp);
if (id==1) {
// printf("cc \n");
cc[ccidx[ma*ccidx_dim+na]]=tp;
}
else if (id==2) {
// printf("cs \n");
cs[ccidx[ma*ccidx_dim+na]]=tp;
}
else if (id==3) {
// printf("sc \n");
sc[ccidx[ma*ccidx_dim+na]]=tp;
}
else if (id==4) {
// printf("ss \n");
ss[ccidx[ma*ccidx_dim+na]]=tp;
}
else {
printf("error \n");
}
// printf("%.20lg \n",tp);
}
fclose(input); // close data
fclose(iname); // close data
printf ("\nComputing b-fields\n");
nphi=0;
datac=fopen("data52.txt", "r"); // open data for counting
while(fscanf(datac, "%lg", &datatmp)>0) {
nphi++;
}
nphi=nphi/26; // azimuthal angle + 25 probes
fclose(datac); // close data file used for counting
//Loading data first
dataread = (double *)malloc(sizeof(double)*26*nphi);
dataoutZ = (double *)malloc(sizeof(double)*26*nphi);
dataoutR = (double *)malloc(sizeof(double)*26*nphi);
dataoutPhi = (double *)malloc(sizeof(double)*26*nphi);
double tmp;
datafile = fopen ("data52.txt", "r");
for (id=0;id<nphi;id++) { // loop over azimuthal slices
fscanf (datafile, "%lg", &tmp); // read azimuthal angle in degrees
dataread[id*26]=tmp*M_PI/180.;
for (prb=1;prb<=25;prb++){ // loop over 25 probes
fscanf (datafile, "%lg", &tmp);
dataread[id*26+prb]=tmp*0.001+61.7400000;
}
}
fclose(datafile);
//Zero output
for (id=0;id<nphi;id++) { // loop over azimuthal slices
for (prb=1;prb<=25;prb++){ // loop over 25 probes
dataoutZ[id*26+prb]=0.0;
dataoutR[id*26+prb]=0.0;
dataoutPhi[id*26+prb]=0.0;
}
}
//Calculate legq and dleq table
Tlegq=(double *)malloc((dim1+1)*26*sizeof(double));
Tdleq=(double *)malloc((dim1+1)*26*sizeof(double));
for (prb=0;prb<=25;prb++){ // loop over 25 probes
for (i=0;i<=dim1;i++){
Tlegq[i*26+prb]=0.0;
Tdleq[i*26+prb]=0.0;
}
}
for (prb=1;prb<=25;prb++){ // loop over 25 probes
for (i=1;i<=dim1;i++){
int md=midx[i]; // m index
int nd=nidx[i]; // n index
/* LegendreQ at probe */
Tlegq[i*26+prb]=LegendreQ(md,nd,zeta[prb])/LegendreQ(md,nd,zt0);
/* Derivative of LegendreQ at probe */
Tdleq[i*26+prb]=DLegendreQ(md,nd,zeta[prb])/LegendreQ(md,nd,zt0);
}
}
//
difrms=0.;
dzrms=0.;
ffrms=0.;
//Allocate memory in device
//Function Talbe
double *d_legq = NULL;
double *d_dleq = NULL;
int f_size = sizeof(double)*26*(dim1+1);
err = cudaMalloc((void **)&d_legq, f_size);
err = cudaMalloc((void **)&d_dleq, f_size);
//Cooridinates
double *d_zeta = NULL;
double *d_eta = NULL;
int c_size = sizeof(double)*30;
err = cudaMalloc((void **)&d_zeta, c_size);
err = cudaMalloc((void **)&d_eta, c_size);
//data storage;
double *d_Data = NULL;
double *d_DataoutZ = NULL;
double *d_DataoutR = NULL;
double *d_DataoutPhi = NULL;
size_t sizeData = sizeof(double)*26*nphi;
err = cudaMalloc((void **)&d_Data, sizeData);
err = cudaMalloc((void **)&d_DataoutZ, sizeData);
err = cudaMalloc((void **)&d_DataoutR, sizeData);
err = cudaMalloc((void **)&d_DataoutPhi, sizeData);
//ccidx
int *d_ccidx = NULL;
int size_ccidx = (dim1+2)*(dim1+2)*sizeof(int);
err = cudaMalloc((void **)&d_ccidx, size_ccidx);
int *d_midx = NULL;
int size_midx = (dim1+2)*sizeof(int);
err = cudaMalloc((void **)&d_midx, size_midx);
int *d_nidx = NULL;
int size_nidx = (dim1+2)*sizeof(int);
err = cudaMalloc((void **)&d_nidx, size_nidx);
//Vectors
double *d_cc = NULL;
double *d_cs = NULL;
double *d_sc = NULL;
double *d_ss = NULL;
int d_size = sizeof(double)*(dim1+2);
err = cudaMalloc((void **)&d_cc, d_size);
err = cudaMalloc((void **)&d_cs, d_size);
err = cudaMalloc((void **)&d_sc, d_size);
err = cudaMalloc((void **)&d_ss, d_size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device memory (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//copy matrix memory to device
err = cudaMemcpy(d_legq, Tlegq, f_size, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_dleq, Tdleq, f_size, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_zeta, zeta, c_size, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_eta, eta, c_size, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_Data, dataread, sizeData, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_DataoutZ, dataoutZ, sizeData, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_DataoutR, dataoutR, sizeData, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_DataoutPhi, dataoutPhi, sizeData, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_ccidx, ccidx, size_ccidx, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_midx, midx, size_midx, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_nidx, nidx, size_nidx, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_cc, cc, d_size, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_cs, cs, d_size, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_sc, sc, d_size, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_ss, ss, d_size, cudaMemcpyHostToDevice);
//Start parallel computing
dim3 DimBlock (16,16);
dim3 DimGrid (nphi/16+1, 26/16+1);
printf("CUDA kernel launch with %d blocks of %d threads\n", DimGrid.x * DimGrid.y, 256);
thread_call<<<DimGrid, DimBlock>>>(d_cc,d_cs,d_sc,d_ss,d_zeta,d_eta,d_legq,d_dleq,d_midx,d_nidx,d_ccidx,d_Data,d_DataoutZ,d_DataoutR,d_DataoutPhi,rr,bzero,dim1,nphi);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("GPU jobs done.\n");
//End parallel computing
err = cudaMemcpy(dataoutZ, d_DataoutZ, sizeData, cudaMemcpyDeviceToHost);
err = cudaMemcpy(dataoutR, d_DataoutR, sizeData, cudaMemcpyDeviceToHost);
err = cudaMemcpy(dataoutPhi, d_DataoutPhi, sizeData, cudaMemcpyDeviceToHost);
//open Root tree
/* TFile *outfile = new TFile("RootOut52.root","recreate");
TTree *Tree_Measured = new TTree ("Tree_Measured", "Measured field");
Tree_Measured->Branch("Phi",&phi,"Phi/D");
Tree_Measured->Branch("BField",&B_Measure,"Prb1/D:Prb2:Prb3:Prb4:Prb5:Prb6:Prb7:Prb8:Prb9:Prb10:Prb11:Prb12:Prb13:Prb14:Prb15:Prb16:Prb17:Prb18:Prb19:Prb20:Prb21:Prb22:Prb23:Prb24:Prb25");
TTree *Tree_Fit = new TTree ("Tree_Fit", "Fitted field");
Tree_Fit->Branch("Phi",&phi,"Phi/D");
Tree_Fit->Branch("BField",&B_Fit,"Prb1/D:Prb2:Prb3:Prb4:Prb5:Prb6:Prb7:Prb8:Prb9:Prb10:Prb11:Prb12:Prb13:Prb14:Prb15:Prb16:Prb17:Prb18:Prb19:Prb20:Prb21:Prb22:Prb23:Prb24:Prb25");
Tree_Fit->Branch("BFieldZ",&B_Fit,"Prb1/D:Prb2:Prb3:Prb4:Prb5:Prb6:Prb7:Prb8:Prb9:Prb10:Prb11:Prb12:Prb13:Prb14:Prb15:Prb16:Prb17:Prb18:Prb19:Prb20:Prb21:Prb22:Prb23:Prb24:Prb25");
Tree_Fit->Branch("BFieldR",&B_Fit,"Prb1/D:Prb2:Prb3:Prb4:Prb5:Prb6:Prb7:Prb8:Prb9:Prb10:Prb11:Prb12:Prb13:Prb14:Prb15:Prb16:Prb17:Prb18:Prb19:Prb20:Prb21:Prb22:Prb23:Prb24:Prb25");
Tree_Fit->Branch("BFieldPhi",&B_Fit,"Prb1/D:Prb2:Prb3:Prb4:Prb5:Prb6:Prb7:Prb8:Prb9:Prb10:Prb11:Prb12:Prb13:Prb14:Prb15:Prb16:Prb17:Prb18:Prb19:Prb20:Prb21:Prb22:Prb23:Prb24:Prb25");
*/
for (id=0;id<nphi;id++) { // loop over azimuthal slices
phi=dataread[id*26];
for (prb=1;prb<=25;prb++){ // loop over 25 probes
bz=dataoutZ[id*26+prb];
br=dataoutR[id*26+prb];
bphi=dataoutPhi[id*26+prb];
bsize=sqrt(br*br+bz*bz+bphi*bphi);
// printf("%Lf \n",bsize);
bfield[prb][4]=bsize;
bfield[prb][1]=bz;
bfield[prb][2]=br;
bfield[prb][3]=bphi;
B_Fit.Prb[prb-1]=bsize;
B_ZFit.Prb[prb-1]=bz;
B_RFit.Prb[prb-1]=br;
B_PhiFit.Prb[prb-1]=bphi;
B_Measure.Prb[prb-1]=dataread[id*26+prb];
//Output
fprintf(output,"%.17g %d %.17g %.17g %.17g %.17g %.17g\n",phi,prb,dataread[id*26+prb],bfield[prb][1],bfield[prb][2],bfield[prb][3],bfield[prb][4]);
rmsd=(dataread[id*26+prb]-bfield[prb][4])*(dataread[id*26+prb]-bfield[prb][4]);
difrms+=rmsd;
rmsd=rmsd/bzero/bzero;
dzrms+=(dataread[id*26+prb]-bz)*(dataread[id*26+prb]-bz);
ffrms+=(dataread[id*26+prb]-bzero)*(dataread[id*26+prb]-bzero);
}
// Tree_Measured->Fill();
// Tree_Fit->Fill();
}
//Tree_Measured->Write();
//Tree_Fit->Write();
//outfile->Close();
difrms=difrms/bzero/bzero/nphi/25;
dzrms=dzrms/bzero/bzero/nphi/25;
ffrms=ffrms/bzero/bzero/nphi/25;
difrms=sqrt(difrms)*1000000.;
dzrms=sqrt(dzrms)*1000000.;
ffrms=sqrt(ffrms)*1000000.;
printf("RMS fluctuation = %.17g ppm \n", ffrms);
printf("RMS difference (lin. approx.) = %.17g ppm \n", dzrms);
printf("RMS difference (real) = %.17g ppm \n", difrms);
free(cc);
free(cs);
free(sc);
free(ss);
free(ccidx);
free(midx);
free(nidx);
free(Tlegq);
free(Tdleq);
free(dataread);
free(dataoutZ);
free(dataoutR);
free(dataoutPhi);
return 0;
}
/* hypergeometric function 1F2
evaluated by truncating an infinite sum */
double h1f2(double a, double b, double c, double z){
int i, imax;
double err, tol, si, sii, f1f2;
double errabs, errrel;
imax=100000000; // 10^8 maximum iterations
sii=1.; // initial term
err=1.; // estimated uncertainty
f1f2=1.; // initial contribution
i=0; // iterator
tol=pow(10.,-16.); // error tolerance
if (z<tol||z>1.-tol) { // z out of range or dangerously close to 0 or 1
i=imax+1;
f1f2=1./err; // to return inf or nan
}
while (err>tol&&i<=imax) {
i++;
si=(a+i-1.)*(b+i-1.)/(c+i-1.)*z/i*sii; // next term
f1f2+=si; //next term added
errabs=fabsl(si*z/(1.-z)); // estimated absolute uncertainty
errrel=fabsl(errabs/f1f2); // estimated relative uncertainty
if (errabs>errrel) { // choose larger one as error
err=errabs;
}
else {
err=errrel;
}
sii=si;
}
return f1f2;
}
/* Legendre function of the second kind, Q_{m+1/2}^n (cosh(z)).
Normalized to remove gamma function.
Regular inside the torus */
double LegendreQ(double m, double n, double z){
double lq;
// lq=pow(M_PI,.5)*exp(lgammal(m+n+.5))/(pow(2.,m+.5)*exp(lgammal(m+1.)));
lq=pow(tanh(z),n)/pow(cosh(z),m+.5);
lq=lq*h1f2(.5*(m+n+.5),.5*(m+n+1.5),m+1.,1./cosh(z)/cosh(z));
return lq;
}
/* Derivative of Q_{m+1/2}^n (cosh(z)).
Normalized to remove gamma function.
*/
double DLegendreQ(int m, int n, double z){
double dlq, lq1, lq2;
if (m==0) {
dlq=-1/(8*pow(cosh(z),1.5))/sinh(z);
dlq=dlq*pow(tanh(z),n);
dlq=dlq*( (4.*pow(sinh(z),2.)-8.*n)* \
h1f2(n/2.+.25,n/2.+.75,1.,1/cosh(z)/cosh(z)) + \
(4.*n*n+8.*n+3.)*tanh(z)*tanh(z)* \
h1f2(n/2.+1.25,n/2.+1.75,2.,1/cosh(z)/cosh(z)));
}
else {
lq1=pow(tanh(z),n)/pow(cosh(z),m+.5);
lq1=lq1*h1f2(.5*(m+n+.5),.5*(m+n+1.5),m+1.,1./cosh(z)/cosh(z));
lq2=pow(tanh(z),n)/pow(cosh(z),m-.5);
lq2=lq2*h1f2(.5*(m+n-.5),.5*(m+n+.5),m*1.,1./cosh(z)/cosh(z));
dlq=(m-.5)/tanh(z)*lq1-(2.*m)/sinh(z)*lq2;
}
return dlq;
}
/* coordinate transformation r, z -> zeta */
double zetaf(double rho, double z, double r0){
double zetax;
zetax=atanhl(2.*rho*r0/(rho*rho+r0*r0+z*z));
return zetax;
}
/* coordinate transformation r, z -> eta */
double etaf(double rho, double z, double r0){
double etax, xx;
int i;
xx=2.*r0*z/(rho*rho-r0*r0+z*z);
if (fabsl(xx)<0.001) {
etax=1.;
for (i=1;i<=10;i++) {
etax=etax+pow(xx,2.*i)/(2.*i+1.)*cos(M_PI*i);
}
etax=etax*xx;
}
else {
etax=atanl(xx);
}
if (rho<sqrtl(r0*r0-z*z)) etax=etax+M_PI;
return etax;
}
//Function for each thread call
__global__ void thread_call(const double *cc,const double *cs,const double *sc,const double *ss,const double *zeta,const double *eta,const double *Tlegq,const double *Tdleq,const int *midx,const int *nidx,const int*ccidx,const double *dataread,double *dataoutZ,double *dataoutR,double *dataoutPhi,const double rr,const double bzero,const int dim1,int nphi){
//Get index
int id = blockDim.x * blockIdx.x + threadIdx.x;
int prb = blockDim.y * blockIdx.y + threadIdx.y;
int ccidx_dim = dim1+2;
if(id<nphi && prb<26 && prb>0){
double phi=dataread[id*26];
double zt=zeta[prb]; // zeta coordinate at probe
double et=eta[prb]; // eta coordinate at probe
// printf("%Lf, %Lf \n", zt, et);
double wgt=sqrt(cosh(zt)-cos(et)); // weight func in toroidal coordinates
double br=0.;
double bz=bzero;
double bphi=0.;
for (int i=1;i<=dim1;i++){
int md=midx[i]; // m index
int nd=nidx[i]; // n index
// printf("%d, %d \n", md, nd);
/* LegendreQ at probe */
double legq=Tlegq[i*26+prb];
/* Derivative of LegendreQ at probe */
double dleq=Tdleq[i*26+prb];
// printf("(%d, %d, %Lf) : %Lf, %Lf \n", md,nd,zt,legq, dleq);
double phidz=cc[ccidx[md*ccidx_dim+nd]]*cos(nd*phi)*cos(md*et)+ \
sc[ccidx[md*ccidx_dim+nd]]*cos(nd*phi)*sin(md*et)+ \
cs[ccidx[md*ccidx_dim+nd]]*sin(nd*phi)*cos(md*et)+ \
ss[ccidx[md*ccidx_dim+nd]]*sin(nd*phi)*sin(md*et);
phidz=phidz*(sinh(zt)/2./wgt*legq + wgt*dleq);
double phidp=-cc[ccidx[md*ccidx_dim+nd]]*sin(nd*phi)*cos(md*et)- \
sc[ccidx[md*ccidx_dim+nd]]*sin(nd*phi)*sin(md*et)+ \
cs[ccidx[md*ccidx_dim+nd]]*cos(nd*phi)*cos(md*et)+ \
ss[ccidx[md*ccidx_dim+nd]]*cos(nd*phi)*sin(md*et);
phidp=phidp*nd*wgt*legq;
double phide1=cc[ccidx[md*ccidx_dim+nd]]*cos(nd*phi)*cos(md*et)+ \
sc[ccidx[md*ccidx_dim+nd]]*cos(nd*phi)*sin(md*et)+ \
cs[ccidx[md*ccidx_dim+nd]]*sin(nd*phi)*cos(md*et)+ \
ss[ccidx[md*ccidx_dim+nd]]*sin(nd*phi)*sin(md*et);
phide1=phide1*sin(et)/2./wgt*legq;
double phide2=-cc[ccidx[md*ccidx_dim+nd]]*cos(nd*phi)*sin(md*et)+ \
sc[ccidx[md*ccidx_dim+nd]]*cos(nd*phi)*cos(md*et)- \
cs[ccidx[md*ccidx_dim+nd]]*sin(nd*phi)*sin(md*et)+ \
ss[ccidx[md*ccidx_dim+nd]]*sin(nd*phi)*cos(md*et);
phide2=phide2*md*wgt*legq;
double phide=phide1+phide2;
br=br+sinh(zt)/rr*((1.-cosh(zt)*cos(et))/sinh(zt)*phidz-sin(et)*phide);
bz=bz+sinh(zt)/rr*(-sin(et)*phidz-(1.-cosh(zt)*cos(et))/sinh(zt)*phide);
bphi=bphi+(cosh(zt)-cos(et))/(rr*sinh(zt))*phidp;
}
dataoutZ[id*26+prb]=bz;
dataoutR[id*26+prb]=br;
dataoutPhi[id*26+prb]=bphi;
}
}
|
22,867 | /* Test: result = thread ID.
*
* CUDA equivalent of test_hello_ptx.ml kernel.
*/
__global__ void test(const float* input, float* result, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
// if (i < N)
result[i] = float(i);
}
|
22,868 | #include <iostream>
#include <cstdio>
#include <ctime>
#include <math.h>
//CUDA kernel function to add the elements of two arrays
__global__
void matvec(float *a, float *x, float *y, int n)
{
//Set index per block and sum variable
int row = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0f;
//Perform the matrix-vector multiplication
for(unsigned int i = 0; i < n; i++)
{
if (row < n)
{
sum += x[i]*a[row*n+i];
}
}
//Synchronize the various threads working
__syncthreads();
//Put the results in the output vector
if(row < n)
{
y[row] = sum;
__syncthreads();
}
}
int main(void)
{
//Initializae the clock timer
clock_t start = clock();
//Set matrix and vector dimensions
int N = 10000;
//Allocate unified memory -- accessible from cpu or gpu
float *x, *y, *a;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
cudaMallocManaged(&a, N*N*sizeof(float));
// initialize x and y arrays on the host
for (unsigned int i = 0; i < N*N; ++i)
a[i] = 0.0;
for (unsigned int i = 0; i < N; i++) {
a[i*N+i] = 4.0;
if (i > 0) a[i*N+i-1] = -1.0;
if (i < N-1) a[i*N+i+1] = -1.0;
x[i] = 1.0;
y[i] = 0.0;
}
//Set the block size and run the kernel
int blockSize = 1024;
int numBlocks = (N + blockSize - 1) / blockSize;
matvec<<<numBlocks, blockSize>>>(a, x, y, N);
//Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
//Finish the timer
double el = double(clock() - start) / CLOCKS_PER_SEC;
//Print the results
printf("y[0]=%8.4e and y[1]=%8.4e\n", y[0], y[1]);
printf("Number of elements in array %8.0f\n", float(N));
printf("Elapsed time: %8.8f seconds\n", el);
// Free memory
cudaFree(x);
cudaFree(y);
cudaFree(a);
return 0;
}
|
22,869 | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__
//void compute(const float* A, const float* B, const float* C, float* D, int n) {
void compute(float* D, int n, int div) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
float I1 = tid * 2.0;
int thread_id = threadIdx.x % 32;
if (thread_id < div) {
__asm volatile (
" .reg .f32 %r1111;\n\t"
" .reg .f32 %r1112;\n\t"
" .reg .f32 %r1113;\n\t"
" .reg .f32 %r1114;\n\t"
" .reg .f32 %r1115;\n\t"
" .reg .f32 %r1116;\n\t"
" .reg .f32 %r1117;\n\t"
" .reg .f32 %r1118;\n\t"
" .reg .f32 %r1119;\n\t"
" .reg .f32 %r1120;\n\t"
" .reg .f32 %r1121;\n\t"
" .reg .f32 %r1122;\n\t"
" .reg .f32 %r1123;\n\t"
" .reg .f32 %r1124;\n\t"
" .reg .f32 %r1125;\n\t"
" .reg .f32 %r1126;\n\t"
" .reg .f32 %r1127;\n\t"
" .reg .f32 %r1128;\n\t"
"mov.f32 %r1112, 4.4;\n\t"
"mov.f32 %r1113, %r1112;\n\t"
"mov.f32 %r1114, 2.2;\n\t"
"mov.f32 %r1115, 3.3;\n\t"
"mov.f32 %r1116, 1.23;\n\t"
"mov.f32 %r1117, 2.42;\n\t"
"mov.f32 %r1118, 3.34;\n\t"
"mov.f32 %r1119, 5.62;\n\t"
"mov.f32 %r1120, 2.56;\n\t"
"mov.f32 %r1121, 1.56;\n\t"
"mov.f32 %r1122, 2.56;\n\t"
"mov.f32 %r1123, 5.56;\n\t"
"mov.f32 %r1124, 8.56;\n\t"
"mov.f32 %r1125, 3.56;\n\t"
"mov.f32 %r1126, 5.56;\n\t"
"mov.f32 %r1127, 6.56;\n\t"
"mov.f32 %r1128, 0.56;\n\t"
);
for (int k = 0; k < n; k++) {
__asm volatile (
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
"fma.rn.f32 %r1113, %r1111, %r1113, %r1112;\n\t"
"fma.rn.f32 %r1114, %r1111, %r1114, %r1112;\n\t"
"fma.rn.f32 %r1115, %r1111, %r1115, %r1112;\n\t"
"fma.rn.f32 %r1116, %r1111, %r1116, %r1112;\n\t"
"fma.rn.f32 %r1117, %r1111, %r1117, %r1112;\n\t"
"fma.rn.f32 %r1118, %r1111, %r1118, %r1112;\n\t"
"fma.rn.f32 %r1119, %r1111, %r1119, %r1112;\n\t"
"fma.rn.f32 %r1120, %r1111, %r1120, %r1112;\n\t"
"fma.rn.f32 %r1121, %r1111, %r1121, %r1112;\n\t"
"fma.rn.f32 %r1122, %r1111, %r1122, %r1112;\n\t"
"fma.rn.f32 %r1123, %r1111, %r1123, %r1112;\n\t"
"fma.rn.f32 %r1124, %r1111, %r1124, %r1112;\n\t"
"fma.rn.f32 %r1125, %r1111, %r1125, %r1112;\n\t"
"fma.rn.f32 %r1126, %r1111, %r1126, %r1112;\n\t"
"fma.rn.f32 %r1127, %r1111, %r1127, %r1112;\n\t"
"fma.rn.f32 %r1128, %r1111, %r1128, %r1112;\n\t"
);
}
}
__syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
*D = I1;
__syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
int main(int argc, char **argv)
{
if (argc != 5) {
usage();
exit(1);
}
int num_blocks = atoi(argv[1]);
int num_threads_per_block = atoi(argv[2]);
int iterations = atoi(argv[3]);
int divergence = atoi(argv[4]);
// h_A = new float(2.0);
// h_B = new float(3.0);
// h_C = new float(4.0);
// cudaMalloc((void**)&d_A, sizeof(float));
// cudaMalloc((void**)&d_B, sizeof(float));
// cudaMalloc((void**)&d_C, sizeof(float));
cudaMalloc((void**)&d_res, sizeof(float));
// cudaMemcpy(d_A, h_A, sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_B, h_B, sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_C, h_C, sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
// compute<<<num_blocks, num_threads_per_block>>>(d_A, d_B, d_C, d_res, iterations);
compute<<<num_blocks, num_threads_per_block>>>(d_res, iterations, divergence);
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::cout << "GPU Elapsed Time = " << time << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceSynchronize();
cudaMemcpy(h_res, d_res, sizeof(float), cudaMemcpyDeviceToHost);
return 0;
}
|
22,870 | #include <stdio.h>
//#include <stdlib.h>
#include <unistd.h>
#define BLOCK_SIZE 1024
#define GRID_SIZE 38400
extern "C" {
__global__ void mul_matrix(int *A, int *B, int *C, int size){
int i = threadIdx.x + blockDim.x * blockIdx.x;
int sum = 0;
if(i < size){
__syncthreads();
sum = A[i] + B[i];
C[i] = sum;
}
}
// CUDA code here
int cuda_matrixMul(int *a_h, int *b_h, int *c_h, int size, int device_id){
cudaError_t err;
int *a_d, *b_d, *c_d;
printf("C: device id >> %d\n", device_id);
cudaSetDevice(device_id);
//printf("C: Allocate GPU Memory1\n");
// allocate memory in the GPU device for a, b and c
err = cudaMalloc((void **) & a_d, size);
if (err != cudaSuccess){
printf("CUDA error(1): %s\n", cudaGetErrorString(err));
exit(-1);
}
err = cudaMalloc((void **) & b_d, size);
if (err != cudaSuccess){
printf("CUDA error(1): %s\n", cudaGetErrorString(err));
exit(-1);
}
err = cudaMalloc((void **) & c_d, size);
if (err != cudaSuccess){
printf("CUDA error(1): %s\n", cudaGetErrorString(err));
exit(-1);
}
// copy from host to GPU device
//printf("C: Memory Copy tost to device(Size %d)\n", size);
err = cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
printf("CUDA error(4): %s\n", cudaGetErrorString(err));
exit(-1);
}
err = cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
printf("CUDA error(4): %s\n", cudaGetErrorString(err));
exit(-1);
}
int N = size / 4;
// do calculations on device
dim3 block(BLOCK_SIZE, 1, 1);
dim3 grid(N/BLOCK_SIZE, 1, 1);
// Launch GPU
printf("C: Launch(size = %d)\n", N);
mul_matrix<<<grid, block>>>(a_d, b_d, c_d, N);
cudaDeviceSynchronize();
//printf("C: Memory Copy device to host\n");
err = cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){
printf("CUDA error(4): %s\n", cudaGetErrorString(err));
exit(-1);
}
cudaDeviceSynchronize();
for(int i = 0; i < (size/4); i++) {
if(c_h[i] != i+i){
printf("C: Mismatch (c[%d] = %d)\n", i , c_h[i]);
exit(-1);
}
}
//printf("C: Memory Free\n");
cudaFree(a_d);
return 0;
}
}
|
22,871 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include <time.h>
#define Mask_size 3 //filter size
#define Width 1024 // image width
#define Height 1024 // image height
#define N (Width*Height)
//---------------kernel-------------------
__global__ void ConvExp (int *I_input, int *Mask1,int *Mask2,int *Mask3,int *Mask4, int *I_output1,int *I_output2,int *I_output3,int *I_output4)
{
/* Thread Row Index */
int Row = blockIdx.y * blockDim.y + threadIdx.y;
/* Thread column Index */
int Col = blockIdx.x * blockDim.x + threadIdx.x;
float value1 = 0;
float value2 = 0;
float value3 = 0;
float value4 = 0;
int Index = Row*Width+Col; //output Image index
/* convolution */
for(int i=0; i<Mask_size; i++)
{
for(int j=0; j<Mask_size; j++)
{
int R_start = i + Row - 1;
int C_start = j + Col - 1;
if((C_start>= 0 && C_start < Width) && (R_start>= 0 && R_start < Height))
{
value1 += Mask1[i * Mask_size + j] * I_input[R_start* Width + C_start];
value2 += Mask2[i * Mask_size + j] * I_input[R_start* Width + C_start];
value1 += Mask2[i * Mask_size + j] * I_input[R_start* Width + C_start];
value2 += Mask3[i * Mask_size + j] * I_input[R_start* Width + C_start];
}
}
}
if((Row < Height) && (Col < Width)) {
I_output1[Index] = value1; // convolved image
I_output2[Index] = value2;
I_output3[Index] = value3;
I_output4[Index] = value4;
}
}
//----------------------------main-----------------------------------
int main(void)
{
//-------------------------------------------------------------------
int *Image, *Output1,*Output2, *Output3, *Output4;
int *mask1, *mask2, *mask3, *mask4;
int SIZE= Width*Height*sizeof(int);
int Row,Col;
Image= (int *)malloc(SIZE);
Output1= (int *)malloc(SIZE);
Output2= (int *)malloc(SIZE);
Output3= (int *)malloc(SIZE);
Output4= (int *)malloc(SIZE);
mask1= (int *)malloc(Mask_size*Mask_size*sizeof(int));
mask2= (int *)malloc(Mask_size*Mask_size*sizeof(int));
mask3= (int *)malloc(Mask_size*Mask_size*sizeof(int));
mask4= (int *)malloc(Mask_size*Mask_size*sizeof(int));
//-------------------------------------------------------------------
int *d_image, *d_mask1,*d_mask2,*d_mask3,*d_mask4,*d_output1, *d_output2,*d_output3, *d_output4; /* pointer to device memory
for input image, mask and output */
//-----------------------------------------------------------
for(Row=0;Row<Width;Row++)
for(Col=0;Col<Height;Col++)
{
Image[Row*Width+Col]=1;
Output1[Row*Width+Col]=0;
Output2[Row*Width+Col]=0;
Output3[Row*Width+Col]=0;
Output4[Row*Width+Col]=0;
}
//-----------------------------------------------------------
for(Row=0;Row<Mask_size;Row++)
{
for(Col=0;Col<Mask_size;Col++)
mask1[Row*Mask_size+Col]=1;
mask2[Row*Mask_size+Col]=2;
mask3[Row*Mask_size+Col]=3;
mask4[Row*Mask_size+Col]=4;
}
//------------------------------------------------------
/* Device Memory Allocation */
cudaMalloc(&d_image, (Width*Height)* sizeof(int));
cudaMalloc(&d_output1, (Width*Height)* sizeof(int));
cudaMalloc(&d_output2, (Width*Height)* sizeof(int));
cudaMalloc(&d_output3, (Width*Height)* sizeof(int));
cudaMalloc(&d_output4, (Width*Height)* sizeof(int));
cudaMalloc(&d_mask1, (Mask_size*Mask_size)* sizeof(int));
cudaMalloc(&d_mask2, (Mask_size*Mask_size)* sizeof(int));
cudaMalloc(&d_mask3, (Mask_size*Mask_size)* sizeof(int));
cudaMalloc(&d_mask4, (Mask_size*Mask_size)* sizeof(int));
//---------------------------------------------------------
cudaEvent_t start, stop; // Cuda API to measure time for Cuda Kernel Execution.
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//--------------------------------------------------------
/*Copying Input Image to GPU Memory */
cudaMemcpy(d_image, Image, (Width*Height)* sizeof(int), cudaMemcpyHostToDevice);
/*Copying Mask to GPU Memory */
cudaMemcpy(d_mask1, mask1, (Mask_size*Mask_size)* sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_mask2, mask2, (Mask_size*Mask_size)* sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_mask3, mask3, (Mask_size*Mask_size)* sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_mask4, mask4, (Mask_size*Mask_size)* sizeof(int), cudaMemcpyHostToDevice);
/* Two Dimesional blocks with two dimensional threads */
dim3 grid(((Width-1)/Mask_size+1),((Height-1)/Mask_size+1));
/*Number of threads per block is 3x3=9 */
dim3 block(Mask_size,Mask_size);
//---------------------------------------------
printf ("GPU Executing Convolution Kernel...\n") ;
printf("\n");
//--------------------------------------------
/*Kernel Launch configuration*/
ConvExp <<<grid, block >>>(d_image, d_mask1,d_mask2, d_mask3,d_mask4,d_output1, d_output2,d_output3, d_output4);
/*copying output Image to Host Memory*/
cudaMemcpy(Output1, d_output1, (Width*Height)* sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(Output2, d_output2, (Width*Height)* sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(Output3, d_output3, (Width*Height)* sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(Output4, d_output4, (Width*Height)* sizeof(int), cudaMemcpyDeviceToHost);
//-------------------------------------------
cudaEventRecord(stop);
cudaEventSynchronize(stop); // Blocks CPU execution until Device Kernel finishes its job.
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("GPU Execution Time for Convolution Kernel: %fn\n", milliseconds); //GPU Execution Time.
printf("Effective Bandwidth (GB/s): %fn\n", N*4*2/milliseconds/1e6);
//N*4 is the total number of Bytes transferred and (1+1)=2 is for read Input Image and write Output Image.
printf("\n");
//------------------------------------------
free(Image);
free(Output1);
free(Output2);
free(Output3);
free(Output4);
free(mask1);
free(mask2);
free(mask3);
free(mask4);
cudaFree(d_image);
cudaFree(d_mask1);
cudaFree(d_mask2);
cudaFree(d_mask3);
cudaFree(d_mask4);
cudaFree(d_output1);
cudaFree(d_output2);
cudaFree(d_output3);
cudaFree(d_output4);
return 0;
}
|
22,872 | #include <stdio.h>
#include <cuda.h>
/**/
#define SAMPLE_INTERVAL 4 /* pick a sample every 4 elements */
/**/
/*
Inline device function, to compute a rank of a "key" in an array "arr"
of length "len" (including this key)
*/
static inline __device__ int get_rank_inclusive(int key, int* arr, int len);
/*
Inline device function, to compute a rank of a "key" in an array "arr"
of length "len" (excluding this key)
*/
static inline __device__ int get_rank_exclusive(int key, int* arr, int len);
/**/
__global__ void pairwise_merge(int* input, int half_size, int* sb_len_left, int* sb_len_right, int sb_num, int* output)
{
int i, other_rank, output_rank;
int* left_half = input;
int* right_half = input + half_size;
int* cur_output = output;
/* A loop through all pair of sub-blocks */
for(i=0;i<sb_num;++i)
{
/***************************************************************/
/* YOUR TASK (with 3 TODOs) STARTS HERE */
/* Perform the pair-wise merging of corresponding sub-blocks */
/***************************************************************/
if(threadIdx.x < sb_len_left[i])
{
int key = left_half[threadIdx.x];
/***************************/
/* Your TODO-1 starts here */
/***************************/
/* use function get_rank_exclusive() to calculate the rank
of key in the right_half */
/* use function get_rank_exclusive() to calculate the rank
of key in the left_right*/
other_rank = get_rank_exclusive(key,right_half,sb_len_right[i]);
/* calculate the output rank of key */
output_rank = threadIdx.x + other_rank;
/* assign key to the correspoding position in the output array*/
cur_output[output_rank] = key;
}
/**/
/********************************************/
/* Your TODO-2 starts here: */
/* Use the same process as TODO-1 */
/* to assign the keys in the right half to */
/* the output array */
/* hint: use function get_rank_inclusive */
/* instead of get_rank_exclusive */
/********************************************/
if(threadIdx.x < sb_len_right[i])
{
int key = right_half[threadIdx.x];
/* use function get_rank_inclusive() to calculate the rank
of key in the left_half*/
other_rank = get_rank_inclusive(key,left_half,sb_len_left[i]);
/* calculate the output rank of key */
output_rank = threadIdx.x + other_rank;
/* assign key to the correspoding position in the output array*/
cur_output[output_rank] = key;
}
/***************************/
/* Your TODO-2 ends here */
/***************************/
/****************************************************/
/* Your TODO-3 starts here: */
/* Update new positions that */
/* left_half, right_half and cur_output point to */
/****************************************************/
left_half += sb_len_left[i];
right_half += sb_len_right[i];
/**/
cur_output += sb_len_left[i];
cur_output += sb_len_right[i];
/****************************************************/
/* Your TODO-3 ends here */
/****************************************************/
/**********************************************************/
/* YOUR TASK ENDS HERE */
/**********************************************************/
} /* end of the loop through all pair of sub-blocks */
/**/
}/* end of the kernel*/
/**/
void checkCUDAError(const char *msg);
/**/
int main(int argc, char* argv[])
{
int i;
/**/
int* h_input, *h_output;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/*******************/
/** READING INPUT **/
/*******************/
int half_size,size;
int sb_num; //number of sub-block
/* read the value of half_size from stdin*/
scanf("%d", &half_size);
size = half_size*2;
/* Allocate host memory */
h_input = (int*) malloc(sizeof(int)*size);
h_output = (int*) malloc(sizeof(int)*size);
/* read input from stdin */
for(i=0;i<size;++i) scanf("%d", &h_input[i]);
/* read the value of sb_num */
scanf("%d", &sb_num);
int *h_sb_len_left, *h_sb_len_right;
h_sb_len_left = (int*) malloc(sizeof(int)*sb_num);
h_sb_len_right = (int*) malloc(sizeof(int)*sb_num);
for(i=0;i<sb_num;++i) scanf("%d", &h_sb_len_left[i]);
for(i=0;i<sb_num;++i) scanf("%d", &h_sb_len_right[i]);
/**/
/****************************/
/** FINISHED INPUT READING **/
/****************************/
/******************************/
/* allocate device memories */
/******************************/
int* d_input, *d_output, *d_sb_len_left, *d_sb_len_right;
cudaMalloc(&d_input,sizeof(int)*size);
cudaMalloc(&d_output,sizeof(int)*size);
cudaMalloc(&d_sb_len_left,sizeof(int)*sb_num);
cudaMalloc(&d_sb_len_right,sizeof(int)*sb_num);
cudaEventRecord(start,0);
/***********************************/
/* copy input data to device */
/***********************************/
cudaMemcpy(d_input, h_input, size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_sb_len_left, h_sb_len_left, sb_num*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_sb_len_right, h_sb_len_right, sb_num*sizeof(int), cudaMemcpyHostToDevice);
/* invoke the kernel, with 1 block, SAMPLE_INTERVAL threads */
pairwise_merge<<<1,SAMPLE_INTERVAL>>>(d_input,half_size,d_sb_len_left,d_sb_len_right,sb_num,d_output);
checkCUDAError("kernel invocation\n");
/* copy the sorted results back to host */
cudaMemcpy(h_output, d_output, sizeof(int)*size, cudaMemcpyDeviceToHost);
/**/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(stderr,"Elapsed time = %f (s)\n",elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
/*******************************************/
/* Print the final scan result */
/*******************************************/
printf("The sorted array is :\n");
for(int i=0;i<size;++i) printf("%d ",h_output[i]);
printf("\n");
/* free device memory */
cudaFree(d_sb_len_left);
cudaFree(d_sb_len_right);
cudaFree(d_input);
cudaFree(d_output);
/* free host memory */
free(h_input);
free(h_output);
free(h_sb_len_left);
free(h_sb_len_right);
/**/
return 0;
}
/*function to test CUDA command*/
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
/*
Inline device function, to compute a rank of a "key" in an array "arr"
of length "len" (including this key)
Naive implementation.
Binary search can be used to implement more efficient function
*/
static inline __device__ int get_rank_inclusive(int key, int* arr, int len)
{
int rank=0;
while((rank < len) && (arr[rank]<=key)) ++rank;
return rank;
}
/*
Inline device function, to compute a rank of a "key" in an array "arr"
of length "len" (excluding this key)
Naive implementation.
Binary search can be used to implement more efficient function
*/
static inline __device__ int get_rank_exclusive(int key, int* arr, int len)
{
int rank=0;
while((rank < len) && (arr[rank]<key)) ++rank;
return rank;
}
|
22,873 | #include "includes.h"
__global__ void add(int *a, int *b, int *c)
{
int i = blockIdx.x;
if(i < N)
c[i] = a[i] + b[i];
} |
22,874 | #include <stdio.h>
#define N 8
#define THREADS_PER_BLOCK 4
#define BLOCKS (N / THREADS_PER_BLOCK)
__global__ void dot_product(int *a, int *b, int *res)
{
__shared__ int temp[THREADS_PER_BLOCK];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
temp[threadIdx.x] = a[idx] * b[idx];
__syncthreads();
if(0 == threadIdx.x)
{
int sum = 0;
for(int i = 0; i < THREADS_PER_BLOCK; i++)
sum += temp[i];
/* synchronise adding result to sum because
* *res += sum; can result in a race condition
*/
atomicAdd(res, sum);
}
}
void random_ints(int *arr, int n)
{
int i;
for(i = 0; i < n; i++)
arr[i] = i; /*rand();*/
}
void print_arr(int *arr, int n)
{
int i, last;
for(i = 0, last = n -1; i < last; i++)
printf("%i,", arr[i]);
printf("%i\n", arr[last]);
}
int main(void)
{
int *a, *b, *res;
int *dev_a, *dev_b, *dev_res;
int size = N * sizeof(int);
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**) &dev_res, sizeof(int));
a = (int*) malloc(size);
b = (int*) malloc(size);
res = (int*) malloc(sizeof(int));
random_ints(a, N);
random_ints(b, N);
*res = 0;
/* copy dev_a, dev_b to the device */
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_res, res, sizeof(int), cudaMemcpyHostToDevice);
/* launch device_add kernel with M blocks of N threads. */
dot_product<<<BLOCKS, THREADS_PER_BLOCK>>>(dev_a, dev_b, dev_res);
/* copy the device result (dev_res) back to res (on host) */
cudaMemcpy(res, dev_res, sizeof(int), cudaMemcpyDeviceToHost);
print_arr(a, N);
print_arr(b, N);
printf("result = %i\n", *res);
free(a);
free(b);
free(res);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_res);
return 0;
}
|
22,875 | #include<cufft.h>
#include<stdio.h>
int main(){
printf("test for linking cufft library\n");
return 0;
}
|
22,876 | #include "cuda.h"
__global__ void kernel_saxpy( int n, float a, float * x, float * y, float * z ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < n ) {
z[i] = a * x[i] + y [i];
}
}
void saxpy( int nblocks, int nthreads, int n, float a, float * x, float * y, float * z ) {
kernel_saxpy<<<nblocks, nthreads>>>( n, a, x, y, z );
}
|
22,877 | #include "includes.h"
__global__ void cudaSclamp_kernel(float* x, unsigned int size, float minVal, float maxVal)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
x[i] = (x[i] < minVal) ? minVal :
(x[i] > maxVal) ? maxVal :
x[i];
}
} |
22,878 | #include "includes.h"
__global__ void kernel_Phi4_Phi6(const int N, double *t, double *q, const double lambda, const double g)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
t[i] = q[i] * q[i] * q[i] * (lambda + g * q[i] * q[i]);
}
} |
22,879 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#define NUM_BLOCKS 800
#define NUM_THREADS 1024
__global__ void localReductionKernel(int* cudaDeltaArray) {
__shared__ int sharedDeltaArray[NUM_THREADS];
unsigned int id = threadIdx.x;
sharedDeltaArray[id] = 1;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (id < s) {
sharedDeltaArray[id] = sharedDeltaArray[id] + sharedDeltaArray[id + s];
}
__syncthreads();
}
if (id == 0) {
cudaDeltaArray[blockIdx.x] = sharedDeltaArray[0];
}
}
int nextPowerOf2(int a){
int b = 1;
while (b < a)
{
b = b << 1;
}
return b;
}
__global__ void globalReductionKernel(int* cudaDeltaArray) {
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
int b = 1;
while (b < gridDim.x)
{
b = b << 1;
}
//printf("b=%d", b);
for (unsigned int s = b / 2; s > 0; s >>= 1) {
//printf("s=%d", s);
if ((id < s) && (id + s <gridDim.x)) {
printf("Id=%d niz[%d]=niz[%d]+niz[%d] => %d + %d\n", id, id, id, id + s, cudaDeltaArray[id], cudaDeltaArray[id + s]);
cudaDeltaArray[id] = cudaDeltaArray[id] + cudaDeltaArray[id + s];
}
__syncthreads();
}
}
int main() {
int* deltaArray = new int[NUM_BLOCKS];
int* cudaDeltaArray;
cudaMalloc(&cudaDeltaArray, NUM_BLOCKS * sizeof(int));
cudaMemcpy(cudaDeltaArray, deltaArray, NUM_BLOCKS * sizeof(int), cudaMemcpyHostToDevice);
dim3 dimGrid(NUM_BLOCKS);
dim3 dimBlock(NUM_THREADS);
localReductionKernel <<< dimGrid, dimBlock >>> (cudaDeltaArray);
globalReductionKernel <<< dimGrid, dimBlock >>> (cudaDeltaArray);
int delta;
cudaMemcpy(&delta, &cudaDeltaArray[0], sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "Delta je " << delta;
return 0;
} |
22,880 | #include <stdio.h>
#include <iostream>
#include <ctime>
#include <unistd.h>
#include <cmath>
#include <sys/time.h>
#define N 1000000
#define BLOCK_SIZE 64
//#define TIME_CHECK clock()/float(CLOCKS_PER_SEC)
typedef unsigned long long timestamp;
//get time in microseconds
timestamp get_timestamp()
{
struct timeval now;
gettimeofday(&now, NULL);
return now.tv_usec + now.tv_sec*1000000;
}
#define TIME_CHECK get_timestamp()
using namespace std;
float hArray[N];
float *dArray;
int blocks;
void prologue(void) {
cudaMalloc((void**)&dArray, N*sizeof(float));
}
void epilogue(void) {
cudaMemcpy(hArray, dArray, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dArray);
}
// Kernel
__global__ void pi(float *arr) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
if(x < N)
{
double licznik = (x%2)?-1:1;
double mianownik = 2*x+1;
arr[x] = licznik/mianownik;
}
}
int main(int argc, char** argv)
{
timestamp gpu_start_time = 0;
timestamp gpu_post_prologue_time = 0;
timestamp gpu_post_computing_time = 0;
timestamp gpu_end_time = 0;
timestamp cpu_start_time = 0;
timestamp cpu_end_time = 0;
if(argc != 2)
return -1;
double eps = atof(argv[1]);
double x0=1, x1=10000;
double x2 = 1;
cout << eps << endl;
//cpu
cpu_start_time = TIME_CHECK;
int i = 1;
int mianownik = 1;
while(abs(x2 - x1) > eps)
{
x1 = x2;
int licznik = (i%2)?-1:1;
i++;
mianownik += 2;
x2 = (float)licznik/(float)mianownik;
x0 += x2;
}
x0 = 4 * x0;
cpu_end_time = TIME_CHECK;
printf("%.10f\n", x0);
//gpu
int devCnt;
cudaGetDeviceCount(&devCnt);
if(devCnt == 0) {
perror("No CUDA devices available -- exiting.");
return 1;
}
gpu_start_time = TIME_CHECK;
prologue();
blocks = N / BLOCK_SIZE;
if(N % BLOCK_SIZE)
blocks++;
gpu_post_prologue_time = TIME_CHECK;
pi<<<blocks, BLOCK_SIZE>>>(dArray);
cudaThreadSynchronize();
gpu_post_computing_time = TIME_CHECK;
epilogue();
double sum = 0;
for(int i=0;i<N;i++)
sum += hArray[i];
sum *= 4;
gpu_end_time = TIME_CHECK;
printf("%.10f\n", sum);
cout << "prologue\t" << gpu_post_prologue_time - gpu_start_time << endl;
cout << "counting\t" << gpu_post_computing_time - gpu_post_prologue_time << endl;
cout << "epilogue\t" << gpu_end_time - gpu_post_computing_time << endl;
cout << "cpu\t" << cpu_end_time - cpu_start_time << endl;
return 0;
}
|
22,881 | #include "includes.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
////////////////////////////////////////////////////////////////////////////////
// Shortcut shared memory atomic addition functions
////////////////////////////////////////////////////////////////////////////////
#define TAG_MASK 0xFFFFFFFFU
__global__ void mergeHistogram256Kernel( uint *d_Histogram, uint *d_PartialHistograms, uint histogramCount )
{
uint sum = 0;
for (uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
{
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT];
}
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1)
{
__syncthreads();
if (threadIdx.x < stride)
{
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if (threadIdx.x == 0)
{
d_Histogram[blockIdx.x] = data[0];
}
} |
22,882 |
#include <stdio.h>
#include <cuda.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
//use high as x value low as y for clustering
typedef struct day{
int month;
int date;
int year;
double high;
double low;
int cluster;
}day;
typedef struct center{
double x;
double y;
}center;
__global__ void setCenters(day* data, center* centers, int k, int numDays) {
__shared__ int nums[3];
int * avgx =nums;
int * avgy =&nums[1];
int * n=&nums[2];
*avgx=0;
*avgy=0;
*n=0;
int i =0;
int index;
while((index=threadIdx.x + blockDim.x*i) < numDays){
if(data[index].cluster==blockIdx.x){
atomicAdd(n, 1);
atomicAdd(avgx, (int)data[index].high);
atomicAdd(avgy, (int)data[index].low);
}
i++;
}
__syncthreads();
if(threadIdx.x ==0){
centers[blockIdx.x].x=(double)(*avgx)/(double)(*n);
centers[blockIdx.x].y=(double)(*avgy)/(double)(*n);
}
}
__global__ void cluster(day* data, center* centers, int k, int numDays, int * s){
int numT=gridDim.x*blockDim.x;
int i=0;
int index;
int cluster;
*s=0;
while((index=threadIdx.x +blockIdx.x * blockDim.x+ numT*i) < numDays){
double min=1000;
for( int j=0; j< k; j++){
double x=data[index].high-centers[j].x;
x=x*x;
double y=data[index].low-centers[j].y;
y=y*y;
double dist=sqrt(x+y);
if(dist< min){
min=dist;
cluster=j;
}
}
if(data[index].cluster!=cluster){
atomicAdd(s, 1);
data[index].cluster=cluster;
}
i++;
}
}
__global__ void processData(day* data, int * month_data, int k, int numDays){
int i=0;
int month_index = blockIdx.x * 12 + threadIdx.x; //index for month_data
int data_index;//index for data
int month = threadIdx.x + 1;//month to look for
int cluster = blockIdx.x;//cluster to look for
while((data_index = threadIdx.y + blockDim.y * gridDim.y *i++) < numDays){
if((data[data_index].cluster == cluster) && (data[data_index].month == month) ){
atomicAdd(&month_data[month_index], 1);
}
}
}
int main(int argc, char *argv[]) {
printf("begin checks\n");
if(argc < 3){ //checcks for proper number of Args
printf("Missing Arguments");
return 1;
}
int k=atoi(argv[1]);
if(k <1){ //checks that k value is greater than 1
printf("invalid number of Clusters");
return 1;
}
FILE *fp;
fp=fopen(argv[2], "r");
if(fp==NULL){ //chechs that the file opened properly
perror("Failed to open file:");
return 1;
}
day * data;
data=(day*)malloc(sizeof(struct day));
while((fgetc(fp))!='\n'){}//getting rid of the title line of the file
int numDays=0;
int high=-1;
int low;
int date;
int month;
int year;
char station[15];
while(fscanf(fp,"%[^,],%d/%d/%d,%d,%d",station,&month, &date, &year, &high, &low)==6){//populates data from file
numDays++;
data=(day*)realloc(data, sizeof(struct day) * numDays);
data[numDays-1].date=date;
data[numDays-1].high=high;
data[numDays-1].low=low;
data[numDays-1].month=month;
data[numDays-1].year=year;
data[numDays-1].cluster=-1;
}
fclose(fp);//close file
//declares data for device
day * d_data;
cudaMalloc((void **)&d_data, sizeof(struct day)*numDays);
cudaMemcpy(d_data, data, sizeof(struct day)*numDays, cudaMemcpyHostToDevice);
//create centers
center * centers;
centers=(center*)malloc(sizeof(struct center)* k);
for(int i=0; i<k; i++){//initilize centers to random data points
centers[i].x=data[numDays/(i+2)].high;
centers[i].y=data[numDays/(i+2)].low;
}
//create centers for device
center * d_centers;
cudaMalloc((void **)&d_centers, sizeof(struct center) *k);
cudaMemcpy(d_centers, centers, sizeof(struct center) *k, cudaMemcpyHostToDevice);
int temp = 1093; //random number non zero number
int * s=&temp;
int * d_s;//variable to count how many data points change clusters between iterations
cudaMalloc((void **)&d_s, sizeof(int));
while(*s>0 ){
*s=0;//reset s value
int numB=numDays/512;
cluster<<<numB, 512>>>(d_data, d_centers, k, numDays, d_s);//cluster data
cudaMemcpy(s, d_s, sizeof(int), cudaMemcpyDeviceToHost);//retrieve d_S value from device
if(*s>0){//compute new centers if any clusters changed
int numT=((numDays/k)/32)*32; //assigns highest
numT>512 ? numT=512 : numT=numT;//checks that numt doesn't exceed 512
setCenters<<<k, numT>>>(d_data, d_centers, k, numDays);
}
}
//copy data back to device for printing
cudaMemcpy(data, d_data, sizeof(struct day)*numDays, cudaMemcpyDeviceToHost);
//open file for output
fp=fopen("output.csv", "w");
//print data to output in csv format
for(int i=0; i<k; i++){
fprintf(fp, "Cluster %d,Center,x=%f, y=%f\nDate,High,Low\n", i+1, centers[i].x, centers[i].y);
for(int j=0; j<numDays; j++){
if(i==data[j].cluster){
fprintf(fp, "%d/%d/%d,%f,%f,%d\n", data[j].month, data[j].date, data[j].year, data[j].high,data[j].low, data[j].cluster);
}
}
fprintf(fp,"\n\n");
}
//pointer to single dimensional array that is to
//hold summary of many days of each month are in each
int * month_data;
month_data=(int*)malloc(k*12*sizeof(int));
int * d_month_data;//device copy
cudaMalloc((void **)&d_month_data, k*12*sizeof(int) );
dim3 threads(12, 32);
processData<<<k, threads>>>(d_data, d_month_data, k, numDays );
cudaMemcpy(month_data, d_month_data, k * 12 *sizeof(int), cudaMemcpyDeviceToHost);
fprintf(fp, "Cluster,JAN,FEB,MAR,APR,MAY,JUN,JUL,AUG,SEP,OCT,NOV,DEC\n");
printf("%6s%5s%5s%5s%5s%5s%5s%5s%5s%5s%5s%5s%5s\n","Cluster","JAN", "FEB", "MAR","APR","MAY","JUN","JUL","AUG","SEP","OCT","NOV","DEC");
for( int i = 0; i< k; i++){
fprintf(fp, "%d", i+1);
printf("%6d", i+1);
for( int j = 0; j < 12; j++){
fprintf(fp, ",%d",month_data[i * 12 + j]);
printf("%5d",month_data[i * 12 + j]);
}
fprintf(fp, "\n");
printf("\n");
}
// Cleanup
cudaFree(d_centers);
cudaFree(d_s);
cudaFree(d_data);
cudaFree(d_month_data);
free(data);
free(centers);
free(month_data);
return 0;
}
|
22,883 | //this is a lite version of a GPU accelerated N-body simulation. Has to be run on an NVIDIA machine with CUDA enabled.
//the interaction is just gravitation
//the simulation trajectory is to be visualized in VMD
#include <cstdio>
#include <cstdlib>
#include <cmath>
#define N 9999 // number of bodies
#define MASS 0 // row in array for mass
#define X_POS 1 // row in array for x position
#define Y_POS 2 // row in array for y position
#define Z_POS 3 // row in array for z position
#define X_VEL 4 // row in array for x velocity
#define Y_VEL 5 // row in array for y velocity
#define Z_VEL 6 // row in array for z velocity
#define G 10 // "gravitational constant" (not really)
float dt = 0.05; // time interval
// each thread computes new position of one body
__global__ void nbody(float *dev_body, float dt) {
int i=threadIdx.x + blockIdx.x* blockDim.x;
int j;
if(i<N)
{ // force calculation
float Fx_dir;
float Fy_dir;
float Fz_dir;
// initialize forces to zero
Fx_dir = 0.0;
Fy_dir = 0.0;
Fz_dir = 0.0;
for(j=0;j<N&&j!=i;j++)
{
// force on body x due to all other bodies
float x_diff, y_diff, z_diff;
x_diff = dev_body[i*7+X_POS] - dev_body [j*7+X_POS]; // difference in x direction
y_diff = dev_body[i*7+Y_POS] - dev_body [j*7+Y_POS]; // difference in y direction
z_diff = dev_body[i*7+Z_POS] - dev_body [j*7+Z_POS]; // difference in z direction
// calculate distance (r)
float rr = (x_diff * x_diff + y_diff * y_diff + z_diff * z_diff);
float r = sqrt(rr);
// force between bodies i and x
float F = G * dev_body[i*7+MASS] * dev_body[j*7+MASS] / r;
// if sufficiently far away, gravitation force
if (r > 10.0) {
Fx_dir += -F * x_diff / r; // resolve forces in x and y directions
Fy_dir += -F * y_diff / r; // and accumulate forces
Fz_dir += -F * z_diff / r; //
}
else if(r<=10.0&&r>0.5){//avoid extremely large acceleration due to long time interval
// if too close, anti-gravitational force
Fx_dir -= -F * x_diff / r; // resolve forces in x and y directions
Fy_dir -= -F * y_diff / r; // and accumulate forces
Fz_dir -= -F * z_diff / r; //
}
}
// update postions and velocity in array
// update velocities
dev_body[i*7+X_VEL] += Fx_dir * dt / dev_body[i*7+MASS];
dev_body[i*7+Y_VEL] += Fy_dir * dt / dev_body[i*7+MASS];
dev_body[i*7+Z_VEL] += Fz_dir * dt / dev_body[i*7+MASS];
// update positions
dev_body[i*7+X_POS] += dev_body[i*7+X_VEL] * dt;
dev_body[i*7+Y_POS] += dev_body[i*7+Y_VEL] * dt;
dev_body[i*7+Z_POS] += dev_body[i*7+Z_VEL] * dt;
}
}
int main(int argc, char **argv) {
float *body; // host data array of bodies
float *dev_body; // device data array of bodies
int tmax = 0;
if (argc != 2) {
fprintf(stderr, "Format: %s { number of timesteps }\n", argv[0]);
exit (-1);
}
tmax = atoi(argv[1]);
// allocate memory size for the body
int bodysize = N * 7 * sizeof(float);
body = (float *)malloc(bodysize);
cudaMalloc((void**) &dev_body, bodysize);
// assign each body a random position
for (int i = 0; i < N; i++) {
body[i * 7 + MASS] = i%1001?1:1000;//create several heavy regions
body[i * 7 + X_POS] = (i%2?0.0:200.0)+drand48() * 50.0;//define two galaxis
body[i * 7 + Y_POS] = drand48() * 50.0;
body[i * 7 + Z_POS] = drand48() * 20.0;//a plate-like distribution
body[i * 7 + X_VEL] = drand48() * 0.1/body[i * 7 + MASS];
body[i * 7 + Y_VEL] = (i%2?-10:10)+drand48() * 1.0/body[i * 7 + MASS];//angular momentum
body[i * 7 + Z_VEL] = drand48() * 0.1/body[i * 7 + MASS];
}
// print out initial positions in PDB format
printf("MODEL %8d\n", 0);
for (int i = 0; i < N; i++) {
printf("%s%7d %s %s %s%4d %7.0f %7.0f %7.0f %4.2f %4.3f\n",
"ATOM", i+1, "CA ", "GLY", "A", i+1, body[i * 7 + X_POS], body[i * 7 + Y_POS], body[i * 7 + Z_POS], 1.00, 0.00);
}
printf("TER\nENDMDL\n");
// copy nbody info over to GPU
cudaMemcpy(dev_body, body, bodysize, cudaMemcpyHostToDevice);
// step through each time step
for (int t = 0; t < tmax; t++) {
dim3 blockDim(1024);
dim3 gridDim((int)ceil(N*1.0 / blockDim.x));
// run nbody calculation
nbody<<<gridDim, blockDim>>>(dev_body, dt);
cudaThreadSynchronize();
if(!(t%1))//change output frequency by the mod factor. help to determine the robusticity quickly
{
// copy nbody info back to CPU
cudaMemcpy(body, dev_body, bodysize, cudaMemcpyDeviceToHost);
// print out positions in PDB format
printf("MODEL %8d\n", t+1);
for (int i = 0; i < N; i++) {
printf("%s%7d %s %s %s%4d %7.0f %7.0f %7.0f %4.2f %4.3f\n",
"ATOM", i+1, "CA ", "GLY", "A", i+1, body[i * 7 + X_POS], body[i * 7 + Y_POS], body[i * 7 + Z_POS], 1.00, 0.00);
}
printf("TER\nENDMDL\n");
}
} // end of time period loop
free(body);
cudaFree(body);
}
|
22,884 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void mem_trs_test(int * input){
int gid = blockIdx.x * blockDim.x + threadIdx.x;
printf("tid = %d, gid = %d, value = %d\n", threadIdx.x, gid, input[gid]);
}
__global__ void mem_trs_test2(int * input, int size){
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < size) {
printf("tid = %d, gid = %d, value = %d\n", threadIdx.x, gid, input[gid]);
}
}
int main(int argc, char ** argv) {
int size = 150;
int byte_size = size * sizeof(int);
int *h_input;
h_input = (int *)malloc(byte_size);
time_t t;
srand((unsigned)time(&t));
for (int i=0; i<size; i++) {
h_input[i] = (int) (rand() & 0xff);
}
int * d_input;
cudaMalloc((void**)&d_input, byte_size);
cudaMemcpy(d_input, h_input, byte_size, cudaMemcpyHostToDevice);
dim3 block(32);
dim3 grid(5);
mem_trs_test2 <<<grid, block>>>(d_input, size);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
22,885 | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N 512
void fillArray(int *array, int size){
int i;
for(i = 0; i < size; i++){
array[i] = 1;
}
}
void printInFile(int *array, int size){
int i;
FILE *fPtr;
if((fPtr = fopen("result.txt", "w")) == NULL){
puts("error on file...");
exit(1);
}
printf("printing on file\n");
for(i = 0; i < size; i++){
fprintf(fPtr, "%d ", array[i]);
}
fprintf(fPtr, "\n");
fclose(fPtr);
}
void printArray(int *array, int size){
int i;
for(i = 0; i < size; i++){
printf("%d ", array[i]);
}
printf("\n");
}
// runs on device, called by host
__global__ void gpuAdd(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
int main(int argc, char **argv)
{
int size = N * sizeof(int);
// host variables
int *a = (int *)malloc(size);
int *b = (int *)malloc(size);
int *c = (int *)malloc(size);
int *d_a, *d_b, *d_c; // device variables
int i;
// allocate device memory
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
fillArray(a, N);
fillArray(b, N);
// copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// kernel launch
puts("Launch kernel");
// 1 block of 512 threads (1D threads vector)
gpuAdd<<<1,N>>>(d_a, d_b, d_c);
// copy output to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
printInFile(c, N);
// free host memory
free(a);
free(b);
free(c);
// free device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
22,886 | #include "includes.h"
__device__ float activation_function(float x)
{
return 1 / (1 + exp(-x));
}
__global__ void apply_activation_function(float *input, float *output, const int N)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
output[idx] = activation_function(input[idx]);
}
} |
22,887 |
extern "C" __global__
void solve_general(double *rateConst, double *state, double *deriv,
int *numReact, int *numProd, int *reactId, int *prodId,
int numcell, int numrxn, int numspec, int maxreact, int maxprod)
{
size_t tid;
int i_spec, i_rxn, i_react, i_prod;
double rate;
tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numcell) {
for (i_spec = 0; i_spec < numspec; ++i_spec)
deriv[i_spec+numspec*tid] = 0.0;
for (i_rxn = 0; i_rxn < numrxn; ++i_rxn) {
rate = rateConst[i_rxn+numrxn*tid];
for (i_react = 0; i_react < numReact[i_rxn]; ++i_react)
rate *= state[reactId[i_rxn*maxreact+i_react]+numspec*tid];
for (i_react = 0; i_react < numReact[i_rxn]; ++i_react)
deriv[reactId[i_rxn*maxreact+i_react]+numspec*tid] -= rate;
for (i_prod = 0; i_prod < numProd[i_rxn]; ++i_prod)
deriv[prodId[i_rxn*maxprod+i_prod]+numspec*tid] += rate;
}
}
}
|
22,888 | #include "includes.h"
__global__ void resampleFeaturesKernel(double* u, double* v, double* d, double* vu, double* vv, double* vd, double* weights, double* randvals, int n_features, double* u_sampled, double* v_sampled, double* d_sampled, double* vu_sampled, double* vv_sampled, double* vd_sampled)
{
// each block corresponds to 1 feature. there may be more features
// than the maximum number of blocks, so we use this for loop
int n_particles = blockDim.x ;
for ( int n = blockIdx.x ; n < n_features; n += gridDim.x ){
double interval = 1.0/n_particles ;
double r = randvals[n] + threadIdx.x*interval ;
int offset = blockDim.x*n ;
double c = weights[offset] ;
int idx = offset ;
while ( r > c ){
c += weights[++idx] ;
if (idx == offset + n_particles){
idx-- ;
break ;
}
}
int idx_new = n*blockDim.x + threadIdx.x ;
u_sampled[idx_new] = u[idx] ;
v_sampled[idx_new] = v[idx] ;
d_sampled[idx_new] = d[idx] ;
vu_sampled[idx_new] = vu[idx] ;
vv_sampled[idx_new] = vv[idx] ;
vd_sampled[idx_new] = vd[idx] ;
}
} |
22,889 | #include <stdio.h>
#include <cuda.h>
#include <assert.h>
#define CUDA_WRAP(fct_call) \
while(0) { \
cudaError_t rv = (fct_call); \
assert(rv == cudaSuccess); \
}
#define N 10
#define NUM_BLOCKS 2
__global__ void add(int *a, int *b, int *c) {
for (int i = blockIdx.x; i < N; i += gridDim.x) {
c[i] = a[i] + b[i];
}
}
int main() {
// Print out info about the GPUs
int count;
CUDA_WRAP( cudaGetDeviceCount(&count));
printf("gpu count = %d\n", count);
int runtimeVersion;
CUDA_WRAP( cudaRuntimeGetVersion(&runtimeVersion));
printf("runtime version = %d\n", runtimeVersion);
int driverVersion;
CUDA_WRAP( cudaDriverGetVersion(&driverVersion));
printf("driver version = %d\n", driverVersion);
printf("\n");
for (int device = 0; device < count; device++) {
cudaDeviceProp prop;
CUDA_WRAP( cudaGetDeviceProperties(&prop, device));
printf("device = %d:\n", device);
printf(" name = %s\n", prop.name);
printf(" totalGlobalMem = %zd\n", prop.totalGlobalMem);
printf(" compute capability = %d.%d\n", prop.major, prop.minor);
printf(" clockRate = %d\n", prop.clockRate);
printf(" multiProcessorCount = %d\n", prop.multiProcessorCount);
printf("\n");
}
// A very simple GPU compute example, adds two vectors on the GPU.
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
size_t arr_size = N * sizeof(int);
CUDA_WRAP( cudaMalloc((void **) &dev_a, arr_size));
CUDA_WRAP( cudaMalloc((void **) &dev_b, arr_size));
CUDA_WRAP( cudaMalloc((void **) &dev_c, arr_size));
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i * i;
c[i] = 1000 + i;
}
CUDA_WRAP ( cudaMemcpy(dev_a, a, arr_size, cudaMemcpyHostToDevice));
CUDA_WRAP ( cudaMemcpy(dev_b, b, arr_size, cudaMemcpyHostToDevice));
CUDA_WRAP ( cudaMemcpy(dev_c, c, arr_size, cudaMemcpyHostToDevice));
for (int i = 0; i < N; i++) {
c[i] = 0;
}
add<<<NUM_BLOCKS,1>>>(dev_a, dev_b, dev_c);
CUDA_WRAP( cudaMemcpy(dev_c, c, arr_size, cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++) {
printf("%5d %5d %8d\n", a[i], b[i], c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
22,890 | #include <stdio.h>
#include <cuda.h>
#define N 500
#define BLOCKSIZE 64
#define ELEPERTHREAD 5
__device__ unsigned wlsize;
__device__ unsigned worklist[N * ELEPERTHREAD];
__global__ void k1(unsigned *nelements) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
unsigned index = atomicAdd(&wlsize, nelements[id]);
for (unsigned ii = 0; ii < nelements[id]; ++ii)
worklist[index + ii] = id;
}
__global__ void k2() {
printf("Number of threads = %d, worklist size = %d\n", N, wlsize);
for (unsigned ii = 0; ii < wlsize; ++ii)
printf("%d ", worklist[ii]);
printf("\n");
}
int main() {
cudaMemset(&wlsize, 0, sizeof(unsigned)); // initialization.
unsigned hnelements[N];
for (unsigned ii = 0; ii < N; ++ii) {
hnelements[ii] = rand() % ELEPERTHREAD;
}
unsigned *nelements;
cudaMalloc(&nelements, N * sizeof(unsigned));
cudaMemcpy(nelements, hnelements, N * sizeof(unsigned), cudaMemcpyHostToDevice);
unsigned nblocks = (N + BLOCKSIZE - 1) / BLOCKSIZE;
k1<<<nblocks, BLOCKSIZE>>>(nelements);
cudaDeviceSynchronize();
k2<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
}
|
22,891 | #include <stdio.h>
#include <math.h>
#define N 1024
//Interleave addressing kernel_version
__global__ void interleaved_reduce(int *d_in, int *d_out)
{
//using shared memory
__shared__ int sm[N];
int i = threadIdx.x;
int id = blockIdx.x * blockDim.x + threadIdx.x;
sm[i] = d_in[id];
__syncthreads();
/*int M = N/2;
for (int s = 1; s <= N; s *= 2){
if (i < M){
printf("stride = %d, thread %d is active\n" , s, i);
d_in[(2*s)*i] = d_in[(2*s)*i] + d_in[(2*s)*i+s];
}
M = M/2;
}
if (i == 0)
d_out[0] = d_in[0];
*/
for (int s = 1; s < blockDim.x; s *= 2){
int i = 2 * s * id;
if (i < blockDim.x){
sm[i] += sm[i+s];
}
__syncthreads();
}
if (i == 0)
d_out[blockIdx.x] = sm[0];
}
//Contiguous addressing kernel version
__global__ void contiguous_reduce(int *d_in, int *d_out)
{
/*
int i = threadIdx.x;
int M = N/2;
for (int s = M; s > 0; s /= 2){
if (i < M){
printf("stride = %d, thread %d is active\n" , s, i);
d_in[i] = d_in[i] + d_in[i+s];
}
M = M/2;
}
if (i == 0)
d_out[0] = d_in[0];
*/
//using shared memory
__shared__ int sm[N];
int i = threadIdx.x;
int id = blockIdx.x * blockDim.x + threadIdx.x;
sm[i] = d_in[id];
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2){
if (i < s){
sm[i] += sm[i+s];
}
__syncthreads();
}
if (i == 0)
d_out[blockIdx.x] = sm[0];
}
int main()
{
int h_in[N];
int h_out = 0;
//timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for(int i = 0; i < N; i++)
h_in[i] = i+1;
int *d_in, *d_out;
cudaMalloc((void**) &d_in, N*sizeof(int));
cudaMalloc((void**) &d_out, sizeof(int));
cudaMemcpy(d_in, &h_in, N*sizeof(int), cudaMemcpyHostToDevice);
//kernel call
// ==================== interleaved_reduce =======================
/*
cudaEventRecord(start);
interleaved_reduce<<<1, 1024>>>(d_in, d_out);
cudaEventRecord(stop);
*/
// =================== contiguous_reduce =========================
cudaEventRecord(start);
contiguous_reduce<<<1, 1024>>>(d_in, d_out);
cudaEventRecord(stop);
cudaMemcpy(&h_out, d_out, sizeof(int), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
cudaFree(d_in);
cudaFree(d_out);
printf("Output %d\n", h_out);
printf("Time used: %f milliseconds\n", ms);
return -1;
}
|
22,892 | #include "includes.h"
__global__ void kernMoveMem(const size_t numPoints, const size_t pointDim, const size_t s, double* A) {
int b = blockIdx.y * gridDim.x + blockIdx.x;
int i = b * blockDim.x + threadIdx.x;
// Before
// [abc......] [def......] [ghi......] [jkl......]
// shared memory
// [adgj.....]
// After
// [a..d..g..] [j........] [ghi......] [.........]
__shared__ double mem[1024];
mem[threadIdx.x] = A[s * i * pointDim];
__syncthreads();
A[i * pointDim] = mem[threadIdx.x];
} |
22,893 | #include "includes.h"
__global__ void cpy(float *a, float *b, int n) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
a[i] = b[i];
} |
22,894 | #include "includes.h"
__global__ void set_bookmarks(int2* vis_in, int npts, int blocksize, int blockgrid, int* bookmarks) {
for (int q=threadIdx.x+blockIdx.x*blockDim.x;q<=npts;q+=gridDim.x*blockDim.x) {
int2 this_vis = vis_in[q];
int2 last_vis = vis_in[q-1];
int main_x = this_vis.x/GCF_GRID/blocksize;
int main_x_last = last_vis.x/GCF_GRID/blocksize;
int main_y = this_vis.y/GCF_GRID/blocksize;
int main_y_last = last_vis.y/GCF_GRID/blocksize;
if (0==q) {
main_y_last=0;
main_x_last=-1;
}
if (npts==q) main_x = main_y = blockgrid;
if (main_x != main_x_last || main_y != main_y_last) {
for (int z=main_y_last*blockgrid+main_x_last+1;
z<=main_y*blockgrid+main_x; z++) {
bookmarks[z] = q;
}
}
}
} |
22,895 | #include "includes.h"
__global__ void gpu_array_init_r4__(size_t tsize, float *arr, float val)
/** arr(:)=val **/
{
size_t _ti = blockIdx.x*blockDim.x + threadIdx.x;
size_t _gd = gridDim.x*blockDim.x;
for(size_t l=_ti;l<tsize;l+=_gd){arr[l]=val;}
return;
} |
22,896 | #include <stdio.h>
#include <stdint.h>
#include <pthread.h>
#include <unistd.h>
#include <assert.h>
#define MAX_STREAMS 3
uint32_t *bufferA[MAX_STREAMS], *bufferB[MAX_STREAMS];
int flags[MAX_STREAMS] = {1,1,1};
int max_iteration = 10;
pthread_mutex_t lock;
inline
cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
// Data initialization kernel
__global__ void init_data(uint32_t *buff, const int vector_size){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
int my_idx = idx;
while (my_idx < vector_size){
buff[my_idx] = my_idx;
my_idx += gridDim.x*blockDim.x; // grid-striding loop
}
}
// Vector addition kernel
__global__ void vector_add(uint32_t *ibuff, uint32_t *obuff, const int vector_size){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
int my_idx = idx;
while (my_idx < vector_size){
obuff[my_idx] = ibuff[my_idx] + ibuff[my_idx];
my_idx += gridDim.x*blockDim.x; // grid-striding loop
}
}
// Read/write controller thread
void *read_write_controller(void *){
int i = 0;
printf("Starting read_write_controller\n");
while (i<max_iteration) {
sleep(0.0001);
if (flags[i%MAX_STREAMS] == 1){
// Printing bufferA and bufferB element
printf("A : %d\n",bufferA[i%MAX_STREAMS][1]);
printf("B : %d\n",bufferB[i%MAX_STREAMS][1]);
// updating flag (mutex protected)
pthread_mutex_lock(&lock);
flags[i%MAX_STREAMS] = 0;
printf("iteration (%d) : [%d,%d,%d]\n",i,flags[0],flags[1],flags[2]);
pthread_mutex_unlock(&lock);
i++;
}
}
return NULL;
}
int main(){
uint32_t *ibuff[MAX_STREAMS], *obuff[MAX_STREAMS];
int result=0, device_id=0;
int numBlocks, numThreadsPerBlock = 1024;
int vector_size = 1024*1024;
size_t size = vector_size*sizeof(uint32_t);
cudaStream_t streams[MAX_STREAMS];
for (int stream = 0; stream < MAX_STREAMS; stream++){
cudaStreamCreate(&streams[stream]);
}
////////////////////////////////////////////////////////////////
// MEMORY ALLOCATION ON GPU
////////////////////////////////////////////////////////////////
printf("Memory allocation GPU\n");
cudaDeviceGetAttribute (&result, cudaDevAttrConcurrentManagedAccess, device_id);
for (int stream = 0; stream < MAX_STREAMS; stream++){
checkCuda(cudaMallocManaged(&ibuff[stream],size));
checkCuda(cudaMallocManaged(&obuff[stream],size));
if (result) {
checkCuda(cudaMemAdvise(ibuff[stream],size,cudaMemAdviseSetPreferredLocation,device_id));
checkCuda(cudaMemAdvise(obuff[stream],size,cudaMemAdviseSetPreferredLocation,device_id));
}
checkCuda(cudaMemset(ibuff[stream], 0, size));
checkCuda(cudaMemset(obuff[stream], 0, size));
}
////////////////////////////////////////////////////////////////
// MEMORY ALLOCATION ON HOST
////////////////////////////////////////////////////////////////
printf("Memory allocation HOST\n");
for (int stream = 0; stream < MAX_STREAMS; stream++){
checkCuda(cudaHostAlloc(&bufferA[stream], size, cudaHostAllocDefault));
checkCuda(cudaHostAlloc(&bufferB[stream], size, cudaHostAllocDefault));
}
for (int i = 0; i < vector_size; i++){
for (int stream = 0; stream < MAX_STREAMS; stream++){
bufferA[stream][i] = i + 1000*stream;
}
}
///////////////////////////////////////////////////////////////
// RUNNING READ/WRITE CONTROLLER ON SPECIFIC THREAD
///////////////////////////////////////////////////////////////
pthread_t thread;
printf("Running thread \n");
if (pthread_create(&thread, NULL, &read_write_controller, NULL)){
fprintf(stderr, "Error creating thread \n");
return 1;
}
if (pthread_mutex_init(&lock, NULL) != 0){
printf("Mutex initialization failed.\n");
return 1;
}
///////////////////////////////////////////////////////////////
// RUNNING GPU KERNEL PIPELINING
//////////////////////////////////////////////////////////////
int stream =0;
cudaDeviceGetAttribute(&numBlocks, cudaDevAttrMultiProcessorCount, 0);
for (int iteration = 0; iteration < max_iteration; iteration++){
stream = iteration % MAX_STREAMS;
//FPGA is writing data in buffer
while(flags[stream] == 1){
sleep(0.0001);
}
cudaMemcpyAsync(ibuff[stream],bufferA[stream], size, cudaMemcpyDeviceToHost, streams[stream]);
vector_add<<<4*numBlocks, numThreadsPerBlock,0,streams[stream]>>>(ibuff[stream],obuff[stream],vector_size);
cudaMemcpyAsync(bufferB[stream], obuff[stream], size, cudaMemcpyHostToDevice, streams[stream]);
// FPGA can write new data
pthread_mutex_lock(&lock);
flags[stream] = 1;
pthread_mutex_unlock(&lock);
}
pthread_join(thread, NULL);
printf("Completed %d iterations successfully\n", max_iteration);
for (int i = 0; i < MAX_STREAMS; i++){
cudaFreeHost(bufferA[i]);
cudaFreeHost(bufferB[i]);
cudaFree(ibuff[i]);
cudaFree(obuff[i]);
}
}
|
22,897 | #include "gpu_lib.cuh"
__global__ void add(unsigned char* a,unsigned char* b,unsigned char* c,int n)
{
int i=blockDim.x * blockIdx.x + threadIdx.x;
if(i<n)
c[i] = a[i]*0.7f + b[i]*0.3f;
}
extern "C" void func(unsigned char* a,unsigned char *b,unsigned char *c)
{
unsigned char* dev_c=NULL;
unsigned char* dev_a=NULL;
unsigned char* dev_b=NULL;
long status = 0;
cudaMalloc(&dev_a,sizeof(unsigned char)*10);
cudaMalloc(&dev_b,sizeof(unsigned char)*10);
status = cudaMalloc(&dev_c,sizeof(unsigned char)*10);
//printf("%ld<<\n",status);
if(status == cudaSuccess)
{
cudaMemcpy(dev_a,a,sizeof(unsigned char)*10,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,sizeof(unsigned char)*10,cudaMemcpyHostToDevice);
add<<<10/256+1,256>>>(dev_a,dev_b,dev_c,10);
cudaMemcpy(c,dev_c,sizeof(unsigned char)*10,cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
}
|
22,898 | #include "includes.h"
/*
Sample input file format:
1.Line : 6 => Number of nodes(int)
2.Line : 7 => Number of edges(int)
3.Line : 1 2 5.0 ----------------
4.Line : 2 3 1.5 |
5.Line : 1 3 2.1 |
6.Line : 1 4 1.2 |=> Edges
7.Line : 1 5 15.5 |
8.Line : 2 5 3.6 |
9.Line : 3 6 1.2-----------------
10.Line : 1 => Start node.
///////////////////////////////////////////////////////
Doesn't check any error condition.
*/
using namespace std;
// Edge struct.
typedef struct {
int* startPoints;
int* endPoints;
double* weights;
}Edge;
// This kernel will call queue size thread.
__global__ void updateQueueKernel(int *queueu,int *queueSize, const int *startPoints, const int *endPoints,const int*visitedArray, const int *currentVertex ) {
int index = threadIdx.x;
if (startPoints[index] == *currentVertex && visitedArray[endPoints[index]] == 0 )
{
int oldValue = atomicAdd(queueSize,1);
queueu[oldValue] = index;
}
} |
22,899 | // TODO: Make more generic
__global__ void subset_assignment_kernel(float *d_a, float *d_b, int a_x, int size) {
// Get the id and make sure it is within bounds
const int b_id = threadIdx.x + blockIdx.x * blockDim.x;
if (b_id >= size) {
return;
}
const int a_id = a_x * size + b_id;
d_a[a_id] = d_b[b_id];
}
// TODO: Make more generic
__global__ void subset_slice_assignment_kernel(float *d_a, float *d_b, int a_x_start, int size, int non_width) {
// Get the id and make sure it is within bounds
const int b_id = threadIdx.x + blockIdx.x * blockDim.x;
if (b_id >= size) {
return;
}
const int b_x = b_id / non_width;
const int b_other = b_id % non_width;
const int a_id = (a_x_start+b_x)*non_width + b_other;
d_a[a_id] = d_b[b_id];
}
// TODO: Make more generic
__global__ void vector_subset_slice_assignment_kernel(float *d_a, float *d_b, int a_start, int size) {
// Get the id and make sure it is within bounds
const int b_id = threadIdx.x + blockIdx.x * blockDim.x;
if (b_id >= size) {
return;
}
const int a_id = a_start + b_id;
d_a[a_id] = d_b[b_id];
}
|
22,900 | #include "includes.h"
__global__ void reduction_kernel(float* d_out, float* d_in, unsigned int size)
{
unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ float s_data[];
s_data[threadIdx.x] = (idx_x < size) ? d_in[idx_x] : 0.f;
__syncthreads();
// do reduction
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2)
{
// thread synchronous reduction
if ( (idx_x % (stride * 2)) == 0 )
s_data[threadIdx.x] += s_data[threadIdx.x + stride];
__syncthreads();
}
if (threadIdx.x == 0)
d_out[blockIdx.x] = s_data[0];
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.