serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
7,901 | #include <string.h>
#include <math.h>
//ldoc on
/**
* ## Implementation
*
* The actually work of computing the fluxes and speeds is done
* by local (`static`) helper functions that take as arguments
* pointers to all the individual fields. This is helpful to the
* compilers, since by specifying the `restrict` keyword, we are
* promising that we will not access the field data through the
* wrong pointer. This lets the compiler do a better job with
* vectorization.
*/
static const float g = 9.8;
__device__
static
void shallow2dv_flux(float* __restrict__ fh,
float* __restrict__ fhu,
float* __restrict__ fhv,
float* __restrict__ gh,
float* __restrict__ ghu,
float* __restrict__ ghv,
const float* __restrict__ h,
const float* __restrict__ hu,
const float* __restrict__ hv,
float g,
int ncell)
{
/*
memcpy(fh, hu, ncell * sizeof(float));
memcpy(gh, hv, ncell * sizeof(float));
*/
int indexX = blockIdx.x * blockDim.x + threadIdx.x;
int cudaStrideX = blockDim.x * gridDim.x;
for (int i = indexX; i < ncell; i += cudaStrideX) {
fh[i] = hu[i];
gh[i] = hv[i];
float hi = h[i], hui = hu[i], hvi = hv[i];
float inv_h = 1/hi;
fhu[i] = hui*hui*inv_h + (0.5f*g)*hi*hi;
fhv[i] = hui*hvi*inv_h;
ghu[i] = hui*hvi*inv_h;
ghv[i] = hvi*hvi*inv_h + (0.5f*g)*hi*hi;
}
}
__device__
static
void shallow2dv_speed(float* __restrict__ cxy,
const float* __restrict__ h,
const float* __restrict__ hu,
const float* __restrict__ hv,
float g,
int ncell)
{
float cx = cxy[0];
float cy = cxy[1];
int indexX = blockIdx.x * blockDim.x + threadIdx.x;
int cudaStrideX = blockDim.x * gridDim.x;
for (int i = indexX; i < ncell; i += cudaStrideX) {
float hi = h[i];
float inv_hi = 1.0f/hi;
float root_gh = sqrtf(g * hi);
float cxi = fabsf(hu[i] * inv_hi) + root_gh;
float cyi = fabsf(hv[i] * inv_hi) + root_gh;
if (cx < cxi) cx = cxi;
if (cy < cyi) cy = cyi;
}
cxy[0] = cx;
cxy[1] = cy;
}
__global__
void shallow2d_flux(float* FU, float* GU, const float* U,
int ncell, int field_stride)
{
shallow2dv_flux(FU, FU+field_stride, FU+2*field_stride,
GU, GU+field_stride, GU+2*field_stride,
U, U +field_stride, U +2*field_stride,
g, ncell);
}
__global__
void shallow2d_speed(float* __restrict__ cxy, const float* __restrict__ U,
int ncell, int field_stride)
{
shallow2dv_speed(cxy, U, U+field_stride, U+2*field_stride, g, ncell);
}
|
7,902 | #include "includes.h"
/* Non-local Means -- Cuda implementation
*
* Graikos Alexandros - 8128
*/
// Padded the mirrored pixels around our image
#define image(i,j) image[((i) + vertical_radius)*(n+2*horizontal_radius) + ((j)+horizontal_radius)]
// Weight function between pixel (i,j) <-> (k,l)
#define weights(i,j,k,l) weights[((i)*n + (j))*m*n + ((k)*n + (l))]
// Gaussian filter matrix
#define gaussian_matrix(i,j) gaussian_matrix[(i)*patchSize[1] + (j)]
// Shared memory patch matrix
#define shared_memory_patch(patch,i,j) patch[((i)+horizontal_radius)*(blockDim.y+2*horizontal_radius) + ((j)+vertical_radius)]
// Filter output
#define filtered_image(i,j) filtered_image[(i)*n + j]
// Start of shared memory
extern __shared__ float gaussian_matrix[];
/* int version of pow */
__device__ float compare_patches (int m, int n, float *patch1, float *patch2, int local_x, int local_y, int target_x, int target_y, float patch_sigma, int *patchSize , float filter_sigma) {
int vertical_radius = (patchSize[0] - 1) / 2;
int horizontal_radius = (patchSize[1] - 1) / 2;
int k, l;
float euclidian_distance = 0;
for (k=-vertical_radius; k<=vertical_radius; k++) {
for (l=-horizontal_radius; l<=horizontal_radius; l++) {
// Filter value assigned to patch (x,y)
float gaussian_filter = gaussian_matrix(k+vertical_radius,l+horizontal_radius);
// Compute euclidian distance (squared) between local_pixel
// and target_pixel
euclidian_distance += gaussian_filter*gaussian_filter*
powf( shared_memory_patch(patch1, local_x+k, local_y+l) -
shared_memory_patch(patch2, target_x+k, target_y+l), 2);
}
}
return expf( -euclidian_distance / (filter_sigma));
}
__device__ float compare_patches (int m, int n, float *patch1, float *patch2, int pixel_1_x, int pixel_1_y, int pixel_2_x, int pixel_2_y, float patch_sigma, int *patchSize , float filter_sigma);
/* Loads image patch(x,y) into shared memory matrix (patch) */
__device__ void load_patch(float *patch, const float *image, int m, int n,int patch_x, int patch_y, int *patchSize) {
int i = patch_x * blockDim.x + threadIdx.x;
int j = patch_y * blockDim.y + threadIdx.y;
int vertical_radius = (patchSize[0] - 1) / 2;
int horizontal_radius = (patchSize[1] -1 ) / 2;
// Copy thread assigned pixel to shared memory
shared_memory_patch(patch, threadIdx.x, threadIdx.y) = image(i,j);
// Copy the mirrored (padded) pixels into shared memory
// Left border
if (threadIdx.x < vertical_radius) {
shared_memory_patch(patch, -threadIdx.x-1, threadIdx.y) = image(i-2*threadIdx.x-1,j);
// Upper left diagonal
if (threadIdx.y < horizontal_radius) {
shared_memory_patch(patch, -threadIdx.x-1, -threadIdx.y-1) = image(i-2*threadIdx.x-1, j-2*threadIdx.y-1);
}
}
// Upper border
if (threadIdx.y < horizontal_radius) {
shared_memory_patch(patch, threadIdx.x, -threadIdx.y-1) = image(i,j-2*threadIdx.y-1);
}
// Bottom border
if (threadIdx.x >= blockDim.x - vertical_radius) {
shared_memory_patch(patch, 2*blockDim.x - (threadIdx.x+1), threadIdx.y) = image(i+1+2*(blockDim.x-threadIdx.x-1), j);
// Botoom left diagonal
if (threadIdx.y < horizontal_radius) {
shared_memory_patch(patch, 2*blockDim.x-(threadIdx.x+1), -threadIdx.y-1) =
image(i+1+2*(blockDim.x-threadIdx.x-1),j-2*threadIdx.y-1);
}
}
// Right border
if (threadIdx.y >= blockDim.y - horizontal_radius) {
shared_memory_patch(patch, threadIdx.x, 2*blockDim.y - (threadIdx.y+1)) = image(i,j+1+2*(blockDim.y-threadIdx.y-1));
// Upper right diagonal
if (threadIdx.x < vertical_radius) {
shared_memory_patch(patch, -threadIdx.x-1, 2*blockDim.y-(threadIdx.y+1)) =
image(i-2*threadIdx.x-1, j+1+2*(blockDim.y-threadIdx.y-1));
}
}
// Bottom right diagonal
if (threadIdx.x >= (blockDim.x - vertical_radius) && threadIdx.y >= (blockDim.y - horizontal_radius)) {
shared_memory_patch(patch, 2*blockDim.x-(threadIdx.x+1),2*blockDim.y-(threadIdx.y+1)) =
image(i+1+2*(blockDim.x-threadIdx.x-1), j+1+2*(blockDim.y-threadIdx.y-1));
}
__syncthreads();
}
__device__ int int_pow(int a, int b) {
int i, prod = 1;
for (i=0; i<b; i++) {
prod *= a;
}
return prod;
}
__global__ void nlm_kernel (float const *image, float *filtered_image,int m, int n, float patch_sigma, int *patchSize, float filter_sigma) {
int vertical_radius = (patchSize[0] - 1) / 2;
int horizontal_radius = (patchSize[1] - 1) / 2;
// Compute gaussian filter
if (threadIdx.x < patchSize[0] && threadIdx.y < patchSize[1]) {
gaussian_matrix(threadIdx.x, threadIdx.y) = exp(-(int_pow(threadIdx.x-horizontal_radius,2) +
int_pow(threadIdx.y-vertical_radius,2)) / (2*(patch_sigma*patch_sigma)));
}
__syncthreads();
// Pixel coordinates assigned to thread
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// Addresses of shared memory matrices
float *target_patch = gaussian_matrix + patchSize[0]*patchSize[1];
float *local_patch = target_patch + (blockDim.x+2*vertical_radius)*(blockDim.y+2*horizontal_radius);
// Load local image patch assigned to block to shared memory
load_patch(local_patch, image, m, n, blockIdx.x, blockIdx.y, patchSize);
int k, l;
int patch_x, patch_y;
// Z value for current pixel
float Z = 0.f;
float filtered_pixel = 0.f;
// Load each target_patch from the image to shared memory and calculate
// weight between pixels in local patch and target patch
for (patch_x=0; patch_x<gridDim.x; patch_x++) {
for (patch_y=0; patch_y<gridDim.y; patch_y++) {
// Load image patch (x,y)
// !Do not load already present local_patch!
if (patch_x != blockIdx.x || patch_y != blockIdx.y) {
load_patch(target_patch, image, m, n, patch_x, patch_y, patchSize);
} else {
target_patch = local_patch;
}
// Calculate weights
for (k=0; k<blockDim.x; k++) {
for (l=0; l<blockDim.y; l++) {
float weight;
// Weight between ours pixel and target pixel's patch
// Consider case where local_patch equals target_patch
weight = compare_patches(m, n, local_patch, target_patch, threadIdx.x, threadIdx.y,
k, l, patch_sigma, patchSize, filter_sigma);
Z += weight;
// Add weight*pixel_value
filtered_pixel += weight*shared_memory_patch(target_patch,k,l);
}
}
// If we used local_patch for comparison reset target patch pointer
if (patch_x == blockIdx.x && patch_y == blockIdx.y) {
target_patch = gaussian_matrix + patchSize[0]*patchSize[1];
}
// Sync threads to load next patch
__syncthreads();
}
}
// Divide by Z to normalize weights
filtered_image(i,j) = filtered_pixel / Z;
return;
} |
7,903 | # include <stdlib.h>
# include <stdio.h>
# include <math.h>
# include <sys/time.h>
#define x_max (1.25)
#define x_min (-2.25)
#define y_max (1.75)
#define y_min (-1.75)
#define count_max 400
#define n 64
#define task 32768
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
void explode ( float x, float y, int *value){
int k;
float x1;
float x2;
float y1;
float y2;
//int value;
*value = 0;
x1 = x;
y1 = y;
for ( k = 1; k <= count_max; k++ )
{
x2 = x1 * x1 - y1 * y1 + x;
y2 = 2.0 * x1 * y1 + y;
if ( x2 < -2.0 || 2.0 < x2 || y2 < -2.0 || 2.0 < y2 )
{
*value = k;
//if(k > 1000)
//printf("k:%d\n", k);
break;
}
x1 = x2;
y1 = y2;
}
}
/*
Carry out the iteration for each pixel, determining COUNT.
*/
void get_pixel(int *count, float index, int size){
int i, j;
float x, y;
for ( i = 0; i < size; i++ )
{
for ( j = 0; j < size; j++ )
{
x = ( ( float ) ( j ) * (x_max + index)
+ ( float ) ( size - j - 1 ) * (x_min + index) )
/ ( float ) ( size - 1 );
y = ( ( float ) ( i ) * (y_max + index)
+ ( float ) ( size - i - 1 ) * (y_min + index) )
/ ( float ) ( size - 1 );
explode ( x, y, &count[i + j * size] );
}
}
}
/*
Determine the coloring of each pixel.
*/
int det_pixel(int *c_max, int *count, int size){
int i, j;
*c_max = 0;
for ( j = 0; j < size; j++ )
{
for ( i = 0; i < size; i++ )
{
if ( *c_max < count[i+j*size] )
{
*c_max = count[i+j*size];
}
}
}
}
/*
Set the image data.
*/
void set_img(int *r, int *g, int *b, int *count, int c_max, int size){
int i, j;
int c;
for ( i = 0; i < size; i++ )
{
for ( j = 0; j < size; j++ )
{
if ( count[i+j*size] % 2 == 1 )
{
r[i+j*size] = 255;
g[i+j*size] = 255;
b[i+j*size] = 255;
}
else
{
c = ( int ) ( 255.0 * sqrt ( sqrt ( sqrt (
( ( double ) ( count[i+j*size] ) / ( double ) ( c_max ) ) ) ) ) );
r[i+j*size] = 3 * c / 5;
g[i+j*size] = 3 * c / 5;
b[i+j*size] = c;
}
}
}
}
int main(){
int i, j, k;
int **r, **g, **b;
int *c_max;
int **count;
int num_thread[task];
int num_size[task];
FILE *f;
f = fopen("rand.txt", "r");
for(i = 0; i < task; i++)
fscanf(f, "%1d", &num_thread[i]);
fclose(f);
for(i = 0; i < task; i++)
num_size[i] = num_thread[i]*32;
double start_timer, end_timer;
count = (int**)malloc(task * sizeof(int *));
r = (int**)malloc(task * sizeof(int *));
g = (int** )malloc(task * sizeof(int *));
b = (int**)malloc(task * sizeof(int *));
c_max = (int*)malloc(task * sizeof(int));
for(i = 0; i < task; i++){
count[i] = ( int * ) malloc ( num_size[i] * num_size[i] * sizeof ( int ) );
r[i] = ( int * ) malloc ( num_size[i] * num_size[i] * sizeof ( int ) );
g[i] = ( int * ) malloc ( num_size[i] * num_size[i] * sizeof ( int ) );
b[i] = ( int * ) malloc ( num_size[i] * num_size[i] * sizeof ( int ) );
}
start_timer = my_timer();
//Carry out the iteration for each pixel, determining COUNT.
for(i = 0 ; i < task ; i++)
get_pixel(count[i], (float)(i/(task/2.0)), num_size[i]);
end_timer = my_timer();
printf("Elapsed Time:%lf Sec.\n", end_timer - start_timer);
//Determine the coloring of each pixel.
for(i = 0; i < task; i++)
det_pixel(&c_max[i], count[i], num_size[i]);
//Set the image data.
for(i = 0; i < task ; i++)
set_img(r[i], g[i], b[i], count[i], c_max[i], num_size[i]);
#if 0
// output results
FILE *fp;
fp = fopen("output1.txt", "w+");
for(i = 0; i < task; i++)
for(j = 0; j < n * n; j++)
fprintf(fp, "%d, %d, %d\n", r[i][j], g[i][j], b[i][j]);
fclose(fp);
#endif
/*clean up*/
for(i = 0; i < task; i++){
free(count[i]);
free(r[i]);
free(g[i]);
free(b[i]);
}
free(c_max);
free(r);
free(g);
free(b);
return 0;
}
|
7,904 | //# tKernel.in_.cu: simple function to test Kernel class
//# Copyright (C) 2013 ASTRON (Netherlands Institute for Radio Astronomy)
//# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands
//#
//# This file is part of the LOFAR software suite.
//# The LOFAR software suite is free software: you can redistribute it and/or
//# modify it under the terms of the GNU General Public License as published
//# by the Free Software Foundation, either version 3 of the License, or
//# (at your option) any later version.
//#
//# The LOFAR software suite is distributed in the hope that it will be useful,
//# but WITHOUT ANY WARRANTY; without even the implied warranty of
//# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
//# GNU General Public License for more details.
//#
//# You should have received a copy of the GNU General Public License along
//# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
//#
//# $Id: tKernel.in_.cu 24903 2013-05-14 23:50:58Z amesfoort $
extern "C" {
// test various "types" of args (for arg setting), esp. an immediate and a buffer
__global__ void testKernel(float *out, const float *in, size_t size, float inc)
{
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
{
out[i] = in[i] + inc;
}
}
}
|
7,905 | #include <iostream>
#include <vector>
struct Timer
{
cudaEvent_t xstart, xstop;
Timer()
{
cudaEventCreate(&xstart);
cudaEventCreate(&xstop);
}
void start()
{
cudaEventRecord(xstart, 0);
}
void stop()
{
cudaEventRecord(xstop, 0);
}
void sync()
{
cudaEventSynchronize(xstop);
}
float elapsed() const
{
float time = 0;
cudaEventElapsedTime(&time, xstart, xstop);
return time;
}
};
/*
A = Ar x Ac
B = Br x Bc
E faccio che Ac==Br
matrixmul<<dim3(Ar,Bc),Ac>>(a,b,c)
Rowmajor senza pitch
Assuming Ac < 1024
*/
__global__ void mmulk(float * a, float * b, float * c, int Br, int Bc)
{
}
template<class T,int base=0>
class MatrixWrap
{
public:
MatrixWrap(T * p, int a,int b): pp(p),r(a),c(b) {}
T operator() (int i,int j) const { return pp[(i-base)*c+(j-base)]; }
T &operator() (int i,int j) { return pp[(i-base)*c+(j-base)]; }
int r,c;
T * pp;
};
template <class T>
void mmul(MatrixWrap<T> a,MatrixWrap<T> b,MatrixWrap<T> c)
{
// TODO assert
for(int i = 0; i < c.r; i++)
for(int j = 0; j < c.c; j++)
{
T t = 0;
for(int k = 0; k < a.c; k++)
t += a(i,k)*b(k,j);
c(i,j) = t;
}
}
template <class T>
void minit(MatrixWrap<T,0> a,T rv, T cv, T dv)
{
for(int i = 0; i < a.r; i++)
for(int j = 0; j < a.c; j++)
a(i,j) = i*rv+j*cv+dv;
}
template <class T>
std::ostream & operator << (std::ostream & ons, MatrixWrap<T> & w)
{
ons << "(" << w.r << "," << w.c << ")\n";
for(int i = 0; i < w.r; i++)
{
for(int j = 0; j < w.c; j++)
ons << w(i,j) << " ";
ons << std::endl;
}
return ons;
}
int main(int argc, char const *argv[])
{
int Ar = 1, Ac = 512, Bc = 1;
int & Br = Ac;
int sizeA = sizeof(float)*Ar*Ac;
int sizeB = sizeof(float)*Ac*Bc;
int sizeC = sizeof(float)*Ar*Bc;
float *dev_a,*dev_b,*dev_c;
std::vector<float> a(sizeA/sizeof(float));
std::vector<float> b(sizeB/sizeof(float));
std::vector<float> c(sizeC/sizeof(float));
std::vector<float> cr(sizeC/sizeof(float));
MatrixWrap<float,0> mwA(&a[0],Ar,Ac);
MatrixWrap<float,0> mwB(&b[0],Ac,Bc);
MatrixWrap<float,0> mwC(&c[0],Ar,Bc);
MatrixWrap<float,0> mwCr(&cr[0],Ar,Bc);
cudaMalloc( (void**)&dev_a, sizeA );
cudaMalloc( (void**)&dev_b, sizeB );
cudaMalloc( (void**)&dev_c, sizeC);
minit<float>(mwA,2,1,0);
minit<float>(mwB,-8,-4,0);
minit<float>(mwC,0,0,1);
minit<float>(mwCr,0,0,2);
mmul(mwA,mwB,mwC);
std::cout << mwA << std::endl;
std::cout << mwB << std::endl;
std::cout << mwC << std::endl;
Timer t;
t.start();
cudaMemcpy(dev_a,mwA.pp,sizeA,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,mwB.pp,sizeB,cudaMemcpyHostToDevice);
if(Ac < 1024)
{
// invoke mmulk
}
cudaMemcpy(mwCr.pp,dev_c,sizeC,cudaMemcpyDeviceToHost);
t.stop();
t.sync();
std::cout << mwCr << std::endl;
std::cout << t.elapsed() << std::endl;
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} |
7,906 |
/* compile with:
nvcc -fatbin -O2
-gencode=arch=compute_20,code=sm_20
-gencode=arch=compute_30,code=sm_30
-gencode=arch=compute_35,code=sm_35
-gencode=arch=compute_50,code=sm_50
-gencode=arch=compute_52,code=sm_52
-gencode=arch=compute_60,code=sm_60
-gencode=arch=compute_61,code=sm_61
-gencode=arch=compute_62,code=sm_62
-gencode=arch=compute_62,code=compute_62
"kernelSource/forcefield.cu" -o "kernelBinaries/forcefield.bin"
See Maxwell compatibility guide for more info:
http://docs.nvidia.com/cuda/maxwell-compatibility-guide/index.html#building-maxwell-compatible-apps-using-cuda-6-0
*/
typedef struct __align__(8) {
int numPairs; // @ 0
int num14Pairs; // @ 4
double coulombFactor; // @ 8
double scaledCoulombFactor; // @ 16
double solvCutoff2; // @ 24
bool useDistDepDielec; // @ 32
bool useHEs; // @ 33
bool useHVdw; // @ 34
bool useSubset; // @ 35
} ForcefieldArgs;
// sizeof = 36
__device__ int getAtomIndex(int flags) {
return abs(flags) - 1;
}
__device__ bool isHydrogen(int flags) {
return flags > 0;
}
extern "C" __global__ void calc(
const double *coords,
const int *atomFlags,
const double *precomputed,
const int *subsetTable,
const ForcefieldArgs *args,
double *out
) {
extern __shared__ double scratch[];
// start with zero energy
double energy = 0;
int globalId = blockIdx.x*blockDim.x + threadIdx.x;
// which atom pair are we calculating?
if (globalId < args->numPairs) {
int i = globalId;
// are we using the subset?
if (args->useSubset) {
i = subsetTable[i];
}
// read atom flags and calculate all the things that use the atom flags in this scope
bool bothHeavy;
double r2 = 0;
{
int atom1Flags, atom2Flags;
{
int i2 = i*2;
atom1Flags = atomFlags[i2];
atom2Flags = atomFlags[i2 + 1];
}
bothHeavy = !isHydrogen(atom1Flags) && !isHydrogen(atom2Flags);
// calculate the squared radius
int atom1Index3 = getAtomIndex(atom1Flags)*3;
int atom2Index3 = getAtomIndex(atom2Flags)*3;
double d;
d = coords[atom1Index3] - coords[atom2Index3];
r2 += d*d;
d = coords[atom1Index3 + 1] - coords[atom2Index3 + 1];
r2 += d*d;
d = coords[atom1Index3 + 2] - coords[atom2Index3 + 2];
r2 += d*d;
}
int i9 = i*9;
// calculate electrostatics
if (bothHeavy || args->useHEs) {
double esEnergy = 1;
{
bool is14Pair = globalId < args->num14Pairs;
esEnergy *= is14Pair ? args->scaledCoulombFactor : args->coulombFactor;
}
{
double charge = precomputed[i9 + 2];
esEnergy *= charge;
}
{
esEnergy /= args->useDistDepDielec ? r2 : sqrt(r2);
}
energy += esEnergy;
}
// calculate vdw
if (bothHeavy || args->useHVdw) {
double Aij, Bij;
{
Aij = precomputed[i9];
Bij = precomputed[i9 + 1];
}
// compute vdw
double r6 = r2*r2*r2;
double r12 = r6*r6;
energy += Aij/r12 - Bij/r6;
}
// calculate solvation
if (bothHeavy && r2 < args->solvCutoff2) {
double r = sqrt(r2);
{
double lambda1 = precomputed[i9 + 3];
double radius1 = precomputed[i9 + 4];
double alpha1 = precomputed[i9 + 5];
double Xij = (r - radius1)/lambda1;
energy -= alpha1*exp(-Xij*Xij)/r2;
}
{
double lambda2 = precomputed[i9 + 6];
double radius2 = precomputed[i9 + 7];
double alpha2 = precomputed[i9 + 8];
double Xji = (r - radius2)/lambda2;
energy -= alpha2*exp(-Xji*Xji)/r2;
}
}
}
// compute the energy sum in SIMD-style
// see url for a tutorial on GPU reductions:
// http://developer.amd.com/resources/articles-whitepapers/opencl-optimization-case-study-simple-reductions/
int localId = threadIdx.x;
scratch[localId] = energy;
__syncthreads();
for (int offset = 1; offset < blockDim.x; offset <<= 1) {
// sum this level of the reduction tree
int mask = (offset << 1) - 1;
if ((localId & mask) == 0) {
scratch[localId] += scratch[localId + offset];
}
__syncthreads();
}
// finally, if we're the 0 thread, write the summed energy for this work group
if (localId == 0) {
out[blockIdx.x] = scratch[0];
}
}
|
7,907 | #include <cuda.h>
#include<stdio.h>
#define SIZE 10
#define RAD 12
__global__ void set_sphere(int cx, int cy, int cz, int r, bool* out)
{
int x =threadIdx.x + blockIdx.x*blockDim.x - r + cx;
int y = threadIdx.y + blockIdx.y*blockDim.y - r + cy;
int z = threadIdx.z + blockIdx.z*blockDim.z - r + cz;
bool inside = false, outside = false;
int idx = threadIdx.z*blockDim.x*blockDim.y
+ threadIdx.y*blockDim.x + threadIdx.x;
out[ idx ] = 0;
// Test if block is on surface of sphere
for(int dx = 0; dx < 2; ++dx)
for(int dy = 0; dy < 2; ++dy)
for(int dz = 0; dz < 2; ++dz)
{
int vertex_dist =(x+dx - cx)*(x+dx - cx) +
(y+dy - cy)*(y+dy - cy) +
(z+dz - cz)*(z+dz - cz);
if(vertex_dist <= r*r)
inside = true;
else
outside = true;
}
out[idx] = inside && outside;
}
int
main(){
bool out[SIZE*SIZE*SIZE];
bool *dev;
dim3 blockDim(SIZE, SIZE, SIZE);
cudaMalloc( (void**) &dev, sizeof(bool) * SIZE*SIZE*SIZE);
set_sphere<<<1, blockDim>>>(SIZE, SIZE, SIZE, RAD, dev);
cudaMemcpy(out, dev, sizeof(bool)*SIZE*SIZE*SIZE, cudaMemcpyDeviceToHost);
int x,y,z;
printf("showing the results of 'out':\n");
for(z = 0; z < SIZE; ++z){
printf("\nhere's the surface for z = %d\n", z);
for(y = 0; y < SIZE; ++y){
for(x = 0; x < SIZE; ++x){
printf(" %d ", out[x + y * SIZE + z * SIZE * SIZE] == true? 1 : 0);
}
printf("\n");
}
}
}
|
7,908 | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
//System includes
#include <stdio.h>
#include <stdlib.h>
//Cuda includes
#include <cuda.h>
__global__ void vecAddKernel(float *A, float *B, float *C, int n){
int i = threadIdx.x+blockDim.x*blockIdx.x;
if(i<n)
C[i] = A[i]+B[i];
}
extern "C"
void vecAdd_par(float* h_A, float* h_B, float* h_C, int n){
int size = n*sizeof(float);
float *dA, *dB, *dC;
// Alocate memory para A, B, C
cudaMalloc((void **) &dA,size);
cudaMalloc((void **) &dB,size);
cudaMalloc((void **) &dC,size);
// Copiar A y B a la memoria del dispositivo
cudaMemcpy(dA,h_A,size,cudaMemcpyHostToDevice);
cudaMemcpy(dB,h_B,size,cudaMemcpyHostToDevice);
// llamada al kernel
// dA, dB, dC tal y como lo hemos visto antes
// Para bloques de hebras de 256
int var=256;
if(n>512)
var=512;
dim3 DimBlock(var,1,1);
dim3 DimGrid(((n-1)/DimBlock.x)+1,1,1);
vecAddKernel<<<DimGrid,DimBlock>>>(dA,dB,dC,n);
// copiar C desde el dispositivo
cudaMemcpy(h_C,dC,size,cudaMemcpyDeviceToHost);
// liberar memoria de A, B y C
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
}
|
7,909 | #include <iostream>
using namespace std;
class Polygon {
public:
int width, height;
public:
void set_values(int a, int b) {
width = a;
height = b;
}
virtual int area() {
return 0;
}
};
class Rectangle: public Polygon {
public:
int area () {
return width * height;
}
};
class Triangle: public Polygon {
public:
int area () {
return (width * height / 2);
}
};
int get_area(Polygon& poly) {
printf("t: %s\n", typeid(poly).name());
printf("w: %d\n", poly.width);
return poly.area();
}
int main () {
Polygon* rect = new Rectangle();
Polygon* trgl = new Triangle();
Polygon* poly = new Polygon();
rect->set_values(4, 5);
trgl->set_values(5, 5);
poly->set_values(6, 5);
printf("size for rect: %d\n", get_area(*rect));
printf("size for trgl: %d\n", get_area(*trgl));
printf("size for poly: %d\n", get_area(*poly));
return 0;
}
|
7,910 | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void register_usage_test(int *results, int size) {
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int x1 = 3456;
int x2 = 1768;
int x3 = 453;
int x4 = x1 + x2 + x3;
if (gid < size) {
results[gid] = x4;
}
};
int main() {
int size = 1 << 22;
int byte_size = sizeof(int) * size;
int *h_ref = (int *)malloc(byte_size);
int *d_results;
cudaMalloc((void **)&d_results, byte_size);
cudaMemset(d_results, 0, byte_size);
dim3 blocks(128);
dim3 grid((size + blocks.x - 1) / blocks.x);
printf("launching the kernel \n");
register_usage_test<<<grid, blocks>>>(d_results, size);
cudaDeviceSynchronize();
return 0;
} |
7,911 | #include "includes.h"
//Source: https://kb.iu.edu/d/bdmg
//INDIANA UNIVERSITY
/********************** mat_mul.cu ******************************/
#define M 256
#define P 128
#define N 64
#define BLKSIZ 16
/**********************************************************************/
__global__ void mat_mul(float *Ad, float *Bd, float *Cd) {
int m = blockIdx.x;
int n = blockIdx.y;
int i = threadIdx.x;
int j = threadIdx.y;
int k,p;
float c = 0.0;
__shared__ float As[BLKSIZ][BLKSIZ];
__shared__ float Bs[BLKSIZ][BLKSIZ];
for(p=0;p<P/BLKSIZ;p++) {
As[i][j] = Ad[(m*BLKSIZ+i)*P+(p*BLKSIZ+j)];
Bs[i][j] = Bd[(p*BLKSIZ+i)*N+(n*BLKSIZ+j)];
__syncthreads();
for(k=0; k<BLKSIZ; k++) {
c += As[i][k] * Bs[k][j];
}
}
Cd[(m*BLKSIZ+i)*N+(n*BLKSIZ+j)] = c;
} |
7,912 | #include "includes.h"
__global__ void BackwardReLU(float* Z, float* dA, int nRowsdZ, int nColsdZ, float *dZ)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nRowsdZ * nColsdZ)
{
if (Z[index] >= 0)
dZ[index] = dA[index];
else
dZ[index] = 0;
}
} |
7,913 | __global__ void zero_kernel(int * in, int length) {
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx < length)
in[idx] = 0;
}
|
7,914 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define BLOCK 16
__global__ void quad(float *a, int n, float *u, float *v)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row < n && col < n && col >= row) {
float sum = u[col]*a[row*n+col]*u[row];
if (col == row)
atomicAdd(v, sum);
else
atomicAdd(v, 2*sum);
}
}
float gpuquad(float *a, int n, float *u) {
float *da, *du, *dv;
float v = 0;
cudaMalloc((void **)&da, n * n * sizeof(float));
cudaMalloc((void **)&du, n * sizeof(float));
cudaMalloc((void **)&dv, sizeof(float));
cudaMemcpy(da, a, n * n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(du, u, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dv, &v, sizeof(float), cudaMemcpyHostToDevice);
int size = (n+BLOCK-1) / BLOCK;
dim3 dimGrid(size, size);
dim3 dimBlock(BLOCK, BLOCK);
quad<<<dimGrid, dimBlock>>>(da, n, du, dv);
cudaMemcpy(&v, dv, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(da);
cudaFree(du);
cudaFree(dv);
return v;
}
|
7,915 | #include <cstdio>
#define ASSERT_NO_CUDA_ERROR() { \
cudaThreadSynchronize(); \
cudaError_t err = cudaGetLastError(); \
if (err != cudaSuccess) { \
printf("Cuda error (%s/%d) in file '%s' in line %i\n", \
cudaGetErrorString(err), err, __FILE__, __LINE__); \
exit(1); \
} \
} while(0);
#define TRACE() { \
trace[(lid*x*y*8)+((loop)*8)+0] = A[0][0]; \
trace[(lid*x*y*8)+((loop)*8)+1] = A[0][1]; \
trace[(lid*x*y*8)+((loop)*8)+2] = A[0][2]; \
trace[(lid*x*y*8)+((loop)*8)+3] = A[0][3]; \
trace[(lid*x*y*8)+((loop)*8)+4] = A[1][0]; \
trace[(lid*x*y*8)+((loop)*8)+5] = A[1][1]; \
trace[(lid*x*y*8)+((loop)*8)+6] = A[1][2]; \
trace[(lid*x*y*8)+((loop)*8)+7] = A[1][3]; \
loop++; \
} while(0);
//set x and y through xyvals
__global__ void k2(int *xyvals, int *trace, int*final) {
__shared__ int A[2][4];
int buf, x, y, i, j;
int lid = threadIdx.x;
//initialize A
if (lid == 0) {
A[0][0] = 0; A[0][1] = 1; A[0][2] = 2; A[0][3] = 3;
A[1][0] = -1; A[1][1] = -1; A[1][2] = -1; A[1][3] = -1;
}
__syncthreads();
x = (lid == 0 ? xyvals[0] : xyvals[1]);
y = (lid == 0 ? xyvals[1] : xyvals[0]);
buf = i = 0;
int loop = 0;
while (i < x) {
j = 0;
while (j < y) {
__syncthreads();
TRACE();
A[1-buf][lid] = A[buf][(lid+1)%4];
buf = 1 - buf;
j++;
}
i++;
}
__syncthreads();
if (lid == 0) {
final[0] = A[0][0]; final[1] = A[0][1];
final[2] = A[0][2]; final[3] = A[0][3];
final[4] = A[1][0]; final[5] = A[1][1];
final[6] = A[1][2]; final[7] = A[1][3];
}
}
#define GROUPSIZE 4
int main(int argc, char **argv) {
// thread0 runs outer xyvals[0] times
// inner xyvals[1] times
// other threads do opposite
int xyvals[2];
if (argc == 3) {
xyvals[0] = atoi(argv[1]);
xyvals[1] = atoi(argv[2]);
} else {
xyvals[0] = 4;
xyvals[1] = 1;
}
int *d_xyvals;
size_t d_xyvals_size = sizeof(int)*2;
cudaMalloc((void **)&d_xyvals, d_xyvals_size);
cudaMemcpy(d_xyvals, xyvals, d_xyvals_size, cudaMemcpyHostToDevice);
// trace shared array A[] after each __syncthreads, for each thread
// number of trace items :=
// 8 values in A[]
// __syncthreads() hit (xyvals[0]*xyvals[1]) times
// by GROUPSIZE threads
int ntrace = 8 * (xyvals[0]*xyvals[1]) * GROUPSIZE;
int *trace = new int[ntrace];
for (int i=0; i<ntrace; i++) {
trace[i] = 99;
}
int *d_trace;
size_t d_trace_size = sizeof(int)*ntrace;
cudaMalloc((void **)&d_trace, d_trace_size);
cudaMemcpy(d_trace, trace, d_trace_size, cudaMemcpyHostToDevice);
// also record the final state of A
int final[8];
for (int i=0; i<8; i++) {
final[i] = 99;
}
int *d_final;
size_t d_final_size = sizeof(int)*8;
cudaMalloc((void **)&d_final, d_final_size);
cudaMemcpy(d_final, final, d_final_size, cudaMemcpyHostToDevice);
// run kernel
printf("Set x and y through xyvals[%d,%d]...", xyvals[0], xyvals[1]);
ASSERT_NO_CUDA_ERROR();
k2<<</*gridDim=*/1, GROUPSIZE>>>(d_xyvals, d_trace, d_final);
ASSERT_NO_CUDA_ERROR();
printf("[done]\n");
// print out trace
cudaMemcpy(trace, d_trace, d_trace_size, cudaMemcpyDeviceToHost);
int stride = 8 * (xyvals[0]*xyvals[1]);
for (int lid=0; lid<GROUPSIZE; lid++) {
printf("lid = %d\n", lid);
for (int xy=0; xy<(xyvals[0]*xyvals[1]); xy++) {
printf("(%d) A = {{%d,%d,%d,%d}, {%d,%d,%d,%d}}\n",
xy,
trace[(lid*stride)+(xy*8)+0], trace[(lid*stride)+(xy*8)+1],
trace[(lid*stride)+(xy*8)+2], trace[(lid*stride)+(xy*8)+3],
trace[(lid*stride)+(xy*8)+4], trace[(lid*stride)+(xy*8)+5],
trace[(lid*stride)+(xy*8)+6], trace[(lid*stride)+(xy*8)+7]
);
}
printf("---\n");
}
// print out final state
cudaMemcpy(final, d_final, d_final_size, cudaMemcpyDeviceToHost);
printf("final state\n");
printf(" A = {{%d,%d,%d,%d}, {%d,%d,%d,%d}}\n",
final[0],final[1],final[2],final[3],
final[4],final[5],final[6],final[7]);
cudaFree(d_xyvals);
cudaFree(d_trace);
cudaFree(d_final);
delete[] trace;
return 0;
}
|
7,916 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#include <cuda.h>
#include <math.h>
__global__ void kernel_func(int *ccurrent, int *cnext,int arraysize)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;// get the row number
int col = blockIdx.x * blockDim.x + threadIdx.x;// get the col number
if(row >= arraysize || col >= arraysize)return;// check if the dimensions are over the dims of the array
/*Because the ccurent array is a flattened 2d array we access each element ccurent[(row * arraysize) + column]*/
int neighbours = 0;//start checking the neighbours
if((col-1) < 0){if(ccurrent[row * arraysize + (arraysize -1)] == 1 )neighbours++;}//West neighbour
else if(ccurrent[row * arraysize +(col-1)] == 1 )neighbours++;//west
if((row+1) > (arraysize-1))//South-West neighbour
{
if((col-1) < 0){if(ccurrent[(arraysize-1)] == 1)neighbours++;}
else if(ccurrent[(col-1)] == 1)neighbours++;
}
else if((col-1) < 0){if(ccurrent[(row+1) * arraysize + (arraysize-1)] == 1)neighbours++;}//sw
else if(ccurrent[(row+1) * arraysize +(col-1)] == 1 )neighbours++;//sw
if((row+1) > arraysize-1){if(ccurrent[col] == 1)neighbours++;}//South neighbour
else if(ccurrent[(row+1) * arraysize + col] == 1 )neighbours++;//s
if((row+1) > (arraysize-1))//South- East neighbour
{
if((col+1) > (arraysize-1)){if(ccurrent[0] == 1)neighbours++;}
else if(ccurrent[col+1] == 1)neighbours++;
}
else if((col+1) > (arraysize-1)){if(ccurrent[(row+1) * arraysize] == 1)neighbours++;}//se
else if(ccurrent[(row+1) * arraysize + col+1] == 1 )neighbours++;//se
if((col+1) > (arraysize-1)){if(ccurrent[row * arraysize] == 1)neighbours++;}//East neighbour
else if(ccurrent[row * arraysize + col+1] == 1 )neighbours++;//e
if((row-1) < 0)//North- East neighbour
{
if((col+1) > (arraysize-1) ){if(ccurrent[(arraysize-1) * arraysize] == 1)neighbours++;}
else if(ccurrent[(arraysize-1)* arraysize + col+1] == 1)neighbours++;
}
else if((col+1) > arraysize-1){if(ccurrent[(row-1) * arraysize] == 1)neighbours++;}//ne
else if(ccurrent[(row-1) * arraysize + col+1] == 1 )neighbours++;//ne
if((row-1) < 0){if(ccurrent[(arraysize-1)* arraysize + col])neighbours++;}//North neighbour
else if(ccurrent[(row-1) * arraysize + col] == 1) neighbours++;//n
if((row-1) < 0)//North- West neighbour
{
if((col-1) < 0 ){if(ccurrent[(arraysize-1) * arraysize + arraysize-1] == 1)neighbours++;}
else if(ccurrent[(arraysize-1) * arraysize + col-1] == 1)neighbours++;
}
else if((col-1) < 0){if(ccurrent[(row-1) * arraysize + arraysize-1] == 1)neighbours++;}//nw
else if(ccurrent[(row-1) * arraysize + col-1] == 1 )neighbours++;//nw
if(ccurrent[row * arraysize +col] == 0)
{
if(neighbours == 3)cnext[row * arraysize + col]=1;//If the array[row][col] = 1 check the number of neighbours and set the next value
else cnext[row * arraysize + col] = 0;
}
if(ccurrent[row * arraysize + col] == 1)
{
if(neighbours>1 && neighbours<4)cnext[row * arraysize + col] = 1;
else cnext[row * arraysize + col] = 0;
}
}
__global__ void kernel_checksimilarity(int *ccurrent, int *cnext, int arraysize, int *dsame, int *ddead)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;// get the row number
int col = blockIdx.x * blockDim.x + threadIdx.x;// get the col number
if(row >= arraysize || col >= arraysize)return;// check if the dimensions are over the dims of the array
if(ccurrent[row * arraysize + col] != cnext[row * arraysize + col]) *dsame = 0;
if(cnext[row * arraysize + col] == 1)*ddead = 0;
}
int main(int argc, char *argv[])
{
srand(time(NULL));
unsigned int i, j, arraysize = 50,check = 31;
int *same , *dead;
int *dsame = NULL, *ddead = NULL;
same = (int*)malloc(sizeof(int));
dead = (int*)malloc(sizeof(int));
cudaMalloc(&dsame,sizeof(int));
cudaMalloc(&ddead,sizeof(int));
unsigned long generations = 50;
char c;
while((c = getopt (argc, argv, "g:s:")) != -1)
{
switch(c)
{
case 'g':
generations = atoi(optarg);
break;
case 's':
arraysize = atoi(optarg);
break;
default:
break;
}
}
//Host mallocs and initialization of the arrays
int *currentdata = (int*)malloc(arraysize * arraysize * sizeof(int));
int **current = (int**)malloc(arraysize * sizeof(int*));
int *nextdata = (int*)malloc(arraysize *arraysize * sizeof(int));
int **next = (int**)malloc(arraysize * sizeof(int*));
for (i = 0; i < arraysize; ++i)
{
current[i] = &(currentdata[i*arraysize]);
next[i] = &(nextdata[i*arraysize]);
}
for (i = 0; i < arraysize; ++i)
{
for (j= 0; j < arraysize; ++j)
{
current[i][j] = rand()%2;
next[i][j] = 0;
//printf("%d ", current[i][j]);
}
//printf("\n");
}
//Device mallocs and transfer HostToDevice
int *ccurrent = NULL;
cudaMalloc(&ccurrent,(arraysize * arraysize * sizeof(int)));
int *cnext = NULL;
cudaMalloc(&cnext,(arraysize * arraysize * sizeof(int)));
cudaMemcpy(ccurrent, currentdata, (arraysize * arraysize * sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(cnext, nextdata, (arraysize * arraysize * sizeof(int)), cudaMemcpyHostToDevice);
int *ctemp;
//dim3 dimBlock(block_size, block_size);
//dim3 dimGrid((arraysize + dimBlock.x - 1) / dimBlock.x,(arraysize + dimBlock.y - 1) / dimBlock.y);
cudaDeviceProp myCUDA;
if (cudaGetDeviceProperties(&myCUDA, 0) == cudaSuccess)
{
printf("Using device %d: ", 0);
printf("%s; global mem: %dΒ; compute v%d.%d; clock: %d kHz\n",myCUDA.name, (int)myCUDA.totalGlobalMem, (int)myCUDA.major,(int)myCUDA.minor, (int)myCUDA.clockRate);
}
int threadsPerBlock = myCUDA.maxThreadsPerBlock;
int temp = (int)sqrt(threadsPerBlock);
printf("Maximum threads per block dimension = %d\n",temp);
dim3 dimBlock(temp, temp);
dim3 dimGrid((arraysize + dimBlock.x - 1) / dimBlock.x,(arraysize + dimBlock.y - 1) / dimBlock.y);
float ttime;
cudaEvent_t start, stop;//Start the clock
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (i = 0; i < generations; ++i)
{
kernel_func<<<dimGrid, dimBlock>>>(ccurrent,cnext,arraysize);//call the Device function
cudaDeviceSynchronize();
if((i%check) == 0)
{
*same = 1;
*dead = 1;
cudaMemcpy(dsame, same, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(ddead, dead, sizeof(int), cudaMemcpyHostToDevice);
kernel_checksimilarity<<<dimGrid,dimBlock>>>(ccurrent,cnext,arraysize,dsame,ddead);
cudaDeviceSynchronize();
cudaMemcpy(same, dsame, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(dead, ddead, sizeof(int), cudaMemcpyDeviceToHost);
if((*same == 1) || (*dead == 1))break;
}
ctemp = ccurrent;//Change the arrays for the next generation
ccurrent = cnext;
cnext = ctemp;
/*cudaMemcpy(currentdata, ccurrent, (arraysize * arraysize * sizeof(int)), cudaMemcpyDeviceToHost);//DevicetoHost transfer
printf("In generation %d\n",i);
for ( k = 0; k < arraysize; ++k)//print the array
{
for (j = 0; j < arraysize; ++j)
{
printf("%d ",current[k][j]);
}
printf("\n");
}*/
}
cudaEventRecord(stop, 0);//stop the clock
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ttime, start, stop);
printf("\nProcess exited at generation: %d with arraysize: %d x %d, after: %3.1f ms \n", i, arraysize, arraysize,ttime);
if(*dead == 1)printf("Exited earlier because all cells are dead.\n");
if(*same == 1)printf("Exited earlier because current and next generations are the same.\n");
/*printf("Last version of the array\n");
cudaMemcpy(currentdata, ccurrent, (arraysize * arraysize * sizeof(int)), cudaMemcpyDeviceToHost);
for ( i = 0; i < arraysize; ++i)
{
for (j = 0; j < arraysize; ++j)
{
printf("%d ",current[i][j]);
}
printf("\n");
}*/
//Free the allocated memory
cudaFree(ccurrent);
cudaFree(cnext);
cudaFree(dsame);
cudaFree(ddead);
free(same);
free(dead);
free(nextdata);
free(next);
free(currentdata);
free(current);
return 0;
} |
7,917 | #include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <stdio.h>
__device__ float reference(float x)
{
double y = x;
return y * tanh(log1p(exp(y)));
}
__device__ float mish_final(float value)
{
auto e = __expf(value);
auto n = e * e + 2 * e;
if (value <= -0.6f)
return value * __fdividef(n, n + 2);
return value - 2 * __fdividef(value, n + 2);
}
__device__ half mish_half_old(half value)
{
return value * half(tanhf(hlog(half(1) + hexp(value))));
}
__device__ half mish_half_final(half value)
{
if (value > half(3.999))
return value;
auto e = hexp(value);
auto n = e * e + half(2) * e;
return value * n / (n + half(2));
}
__global__ void test()
{
for (float x = 0; x < 6; x += 0.0001)
{
// double precision reference
float ref = reference(x);
half h = x;
float expr1 = [=] {
return h * half(tanhf(hlog(half(1.0f) + hexp(h))));
} ();
auto e = hexp(h);
auto n = e * e + half(2) * e;
float expr2 = h * n / (n + half(2));
float expr3 = x; // h - half(2) * h / (n + half(2));
double err1 = abs(double(ref) - double(expr1));
double err2 = abs(double(ref) - double(expr2));
double err3 = abs(double(ref) - double(expr3));
int temp;
printf("[x=%f] %.7e %.7e %.7e %.7e (%.7e, %.7e, %.7e, %.7e)\n",
x, ref, expr1, expr2, expr3,
//frexpf(ref, &temp), frexpf(expr1, &temp), frexpf(expr2, &temp), frexpf(expr3, &temp),
0.0f, float(err1), float(err2), float(err3));
}
}
__global__ void test_final()
{
for (float x = -100; x < 100; x += 0.1)
{
float ref = reference(x);
float expr = mish_half_final(x);
printf("[x=%f] %.7e %.7e (err=%.8e)\n", x, ref, expr, abs(expr - ref));
}
}
__global__ void dump()
{
for (float x = -20; x < 50; x += 0.0001)
printf("%.7f %.7e\n", x, static_cast<float>(mish_half_final(x)));
}
int main ()
{
dump<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
} |
7,918 | #include <stdio.h>
#define N 512
/****************************************************************************
* TERMINOLOGÍA *
* cada invocación paralela de add se llama block *
* el conjunto de blocks se llama grid *
* cada invocación puede referirse al indice de si bloque con blockIdx.x *
*****************************************************************************/
__global__ void add(int*a, int*b, int*c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
int main(){
int *a, *b, *c; // Copias de a b y c en el host
int *d_a, *d_b, *d_c; // Copias de a b y c en el device
int size = N * sizeof(int);
// Obtenemos espacio para las copias de a,b y c en device
cudaMalloc((void**) &d_a, size);
cudaMalloc((void**) &d_b, size);
cudaMalloc((void**) &d_c, size);
// Obtenemos espacio para las copias de a, b y c dentro del host
a = (int *)malloc(size);
b = (int *)malloc(size);
for(int i = 0; i < N; i++){
a[i] = i;
b[i] = i;
}
c = (int *)malloc(size);
// Copiamos los imput en device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Lanzamos el kernel add dentro de la GPU
add<<<N,1>>>(d_a, d_b, d_c);
// Copiamos los resultados de nuevo en el host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%d, ",c[i]);
// Liberamos memoria
free(a);free(b);free(c);
cudaFree(d_a);cudaFree(d_b);cudaFree(d_c);
}
|
7,919 | #include "includes.h"
__global__ void boundaryCondition(const int nbrOfGrids, double *d_u1, double *d_u2, double *d_u3) {
d_u1[0] = d_u1[1];
d_u2[0] = -d_u2[1];
d_u3[0] = d_u3[1];
d_u1[nbrOfGrids - 1] = d_u1[nbrOfGrids - 2];
d_u2[nbrOfGrids - 1] = -d_u2[nbrOfGrids - 2];
d_u3[nbrOfGrids - 1] = d_u3[nbrOfGrids - 2];
} |
7,920 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void HelloWorldFromGPU(void)
{
printf("Hello world from GPU!\n");
}
int main()
{
printf("Hello world from CPU!\n");
HelloWorldFromGPU<<<1,10>>>();
cudaDeviceReset();
return 0;
}
|
7,921 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float* var_13,float* var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) {
comp = +1.8369E13f + +1.5231E19f - -1.1416E-30f;
float tmp_1 = (-1.6147E24f + var_3);
comp = tmp_1 * +1.6122E-25f + +1.0583E-44f;
comp += (var_4 / -1.2922E-30f + var_5 + var_6);
if (comp == +1.3898E-42f / var_7 + (-1.1999E-36f + (var_8 / var_9 + var_10))) {
float tmp_2 = sinf(+1.0847E-35f / (-1.8160E-43f * var_11));
comp = tmp_2 / fabsf((var_12 * (-1.5792E19f / +0.0f)));
}
for (int i=0; i < var_1; ++i) {
comp = (var_15 - var_16);
var_13[i] = var_17 / (var_18 - -1.9131E-22f + (var_19 + +1.7892E34f));
var_14[i] = +1.0286E-37f;
comp += var_14[i] * var_13[i] * (var_20 * (var_21 / +1.8649E-23f));
}
for (int i=0; i < var_2; ++i) {
comp = ldexpf(var_22 + (var_23 * (-0.0f - (+1.3815E-42f - +1.8187E-35f))), 2);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float* tmp_14 = initPointer( atof(argv[14]) );
float* tmp_15 = initPointer( atof(argv[15]) );
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24);
cudaDeviceSynchronize();
return 0;
}
|
7,922 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
using namespace std;
#define MIN(a,b) ({ \
__typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a < _b ? _a : _b; })
#define MAX(a,b) ({ \
__typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
#define DIV(a,b) \
({((a) % (b) == 0) ? ((a) / (b)) : ((a) / (b) + 1); })
#define CUDA_SAFE_CALL_NO_SYNC(call) do { \
cudaError err = call; \
if (cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString(err)); \
exit(EXIT_FAILURE); \
} } while(0)
#define HOST_CHECK_POINTER(p) ({ \
__typeof__ (p) __HOST_TEMP_POINTER = (p); \
(__HOST_TEMP_POINTER == NULL) ? ({ \
fprintf(stderr, "malloc error in file '%s' in line %i.\n", \
__FILE__, __LINE__); \
exit(EXIT_FAILURE); \
__HOST_TEMP_POINTER; \
}) : \
__HOST_TEMP_POINTER; })
__global__ void kernelTemplate(int *array, int arrayLength) {
__shared__ int cache[1024];
unsigned int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < arrayLength) {
cache[threadIdx.x] = array[offset];
}
__syncthreads();
cache[threadIdx.x] ++;
if (offset < arrayLength) {
array[offset] = cache[threadIdx.x];
}
}
int main(int argc, char **argv) {
int array_length = 10000;
int *array = HOST_CHECK_POINTER((int *)malloc(array_length * sizeof(int)));
memset(array, 0, sizeof(array));
int *dev_array;
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&dev_array, array_length * sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(dev_array, array, array_length * sizeof(int), cudaMemcpyHostToDevice));
dim3 blocksPerGrid(DIV(array_length, 1024));
dim3 threadsPerBlock(1024);
cudaEvent_t timer_start, timer_end;
CUDA_SAFE_CALL_NO_SYNC(cudaEventCreate(&timer_start));
CUDA_SAFE_CALL_NO_SYNC(cudaEventCreate(&timer_end));
CUDA_SAFE_CALL_NO_SYNC(cudaEventRecord(timer_start, 0));
kernelTemplate<<<blocksPerGrid, threadsPerBlock>>>(dev_array, array_length);
CUDA_SAFE_CALL_NO_SYNC(cudaPeekAtLastError());
CUDA_SAFE_CALL_NO_SYNC(cudaDeviceSynchronize());
CUDA_SAFE_CALL_NO_SYNC(cudaEventRecord(timer_end, 0));
CUDA_SAFE_CALL_NO_SYNC(cudaEventSynchronize(timer_end));
float timer_elapsed;
CUDA_SAFE_CALL_NO_SYNC(cudaEventElapsedTime(&timer_elapsed, timer_start, timer_end));
printf ("Time: %3.1f ms\n", timer_elapsed);
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(array, dev_array, array_length * sizeof(int), cudaMemcpyDeviceToHost));
int flag = 0;
for (int i = 0; i < array_length; i++) {
if (array[i] != 1) {
printf ("kernel failed\n");
flag ++;
break;
}
}
if (flag == 0)
printf ("kernel sucessed\n");
free(array);
CUDA_SAFE_CALL_NO_SYNC(cudaFree(dev_array));
return 0;
}
|
7,923 | #include "includes.h"
__global__ void compress_write_permutation(int *d_write_permutation, int *d_full_write_permutation, int *d_gcs, int total_pad_row_num, int chunk)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_pad_row_num) {
return;
}
int chunk_id = i / chunk;
if (d_gcs[chunk_id + 1] - d_gcs[chunk_id] > 0) {
int tid = i % chunk;
d_write_permutation[d_gcs[chunk_id] * chunk + tid] = d_full_write_permutation[i];
}
} |
7,924 | #include "includes.h"
__global__ void redunction_neighbored_pairs_1(int * input, int * temp, int size)
{
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid > size)
return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2)
{
if (tid % (2 * offset) == 0)
{
input[gid] += input[gid + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp[blockIdx.x] = input[gid];
}
} |
7,925 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void addKernel()
{
__shared__ int Sum1[10][10];
int Sum[10][10];
if( threadIdx.x ==0 && threadIdx.y ==0){
for(int i=0; i<10; i++){
for(int j=0; j<10; j++){
Sum1[i][j] = 0;
}
}
}
__syncthreads();
for(int i=0; i<10; i++){
for(int j=0; j<10; j++){
Sum[i][j] = (i+1)*(j+1);
atomicAdd(&Sum1[i][j], Sum[i][j]);
}
}
__syncthreads();
if( threadIdx.x ==0 && threadIdx.y ==0){
for(int i=0; i<10; i++){
for(int j=0; j<10; j++){
printf("%d, ", Sum1[i][j]);
}
}
}
}
int main()
{
addKernel<<<1,16>>>();
return 0;
}
|
7,926 | #define BLOCKS_NUM 10
#define BLOCK_SIZE 256
#include <iostream>
using namespace std;
__global__ void my_kernel(float *in, float *out) {
int n = threadIdx.x + blockIdx.x * BLOCK_SIZE;
out[n] = in[n] * in[n]; // квадрат
// ...
}
int main() {
float data[BLOCKS_NUM * BLOCK_SIZE]; // CPU память
for (int i=0; i<BLOCKS_NUM*BLOCK_SIZE; i+=BLOCK_SIZE)
data[i]=i;
cudaSetDevice(0); // выбор устройства
float *in, *out; // GPU память
// GPU выделение памяти
uint memory_size = sizeof(float) * BLOCKS_NUM * BLOCK_SIZE;
cudaMalloc((void **)&in, memory_size);
cudaMalloc((void **)&out, memory_size);
// копируем память на устройство
cudaMemcpy(in, data, memory_size, cudaMemcpyHostToDevice);
dim3 block(BLOCK_SIZE);
dim3 grid(BLOCKS_NUM);
// запускаем ядро
my_kernel<<<grid, block>>>(in, out);
cudaThreadSynchronize(); // ждём когда досчитается
// копируем результаты обратно на хост
cudaMemcpy(data, out, memory_size, cudaMemcpyDeviceToHost);
for (int i=0; i<BLOCKS_NUM*BLOCK_SIZE; i+=BLOCK_SIZE)
std::cout<<data[i]<<" ";
std::cout<<std::endl;
// GPU! не забываем отчистить память на
cudaFree(in);
cudaFree(out);
return 0;
}
|
7,927 | #include<iostream>
using namespace std;
#define N 10
__global__ void add(int *a,int *b,int *c)
{
int tid = blockIdx.x;
if(tid < N)
{
c[tid] = a[tid] + b[tid];
}
}
int main()
{
int a[N],b[N],c[N];
int *dev_a;
int *dev_b;
int *dev_c;
int i;
cudaMalloc((void **)&dev_a,sizeof(int)*N);
cudaMalloc((void **)&dev_b,sizeof(int)*N);
cudaMalloc((void **)&dev_c,sizeof(int)*N);
for(i=0;i<N;i++)
{
a[i]=i;
b[i]=i;
}
cudaMemcpy(dev_a,a,sizeof(int)*N,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,sizeof(int)*N,cudaMemcpyHostToDevice);
add<<<N,1>>>(dev_a,dev_b,dev_c);
cudaMemcpy(c,dev_c,sizeof(int)*N,cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
for(i=0;i<N;i++)
{
printf("%d + %d = %d\n",a[i],b[i],c[i]);
}
return 0;
}
|
7,928 | #include <stdio.h>
#define TPB 1024 //Thread por bloque
__global__ void invertirLista(int *in, int *out){ //El apuntador in es el arreglo que contiene el dato que vamoa a mover,
//el apuntador out esta vacio y se ira llenando
const int idxIn = threadIdx.x; //Obtenemos el numero de indice del thread en el que se esta trabajndo
const int idxOut = blockDim.x - 1 - idxIn; // Calculamos el valor del nuevo indice al que se movera el dato
out[idxOut] = in[idxIn]; //Ponemos el dato en su nuevo index
}
int main(){
unsigned int size = TPB * sizeof(int); //Esta variable contiene el espacio de memoria requerido por el sistema
int* h_in = (int*) malloc(size); //Reservamos espacio de memmoria dinamica dentro del CPU
int i;
for( i = 0; i < TPB; i++){ //Llenamos el arreglo con lo datos,,,,
h_in[i] = i;
}
int *d_in; cudaMalloc((void**)&d_in, size); //Reservamos el espacio de memoria de la GPU
int *d_out; cudaMalloc((void**)&d_out, size); //Reservamos el espacio de memoria de la GPu
cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice); //Copiamos la memoria del CPU al GPU
invertirLista<<<1, TPB>>>(d_in, d_out); //Hacemos el lanzamiento de lo los
int* h_out = (int*) malloc(size); //Hacemos otra reservacion de memoria
cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost); //Copiamos los datos del GPu al CPU
cudaFree(d_in); cudaFree(d_out); //Liberamos el espacio de memoria de la GPU
printf(" IN / OUT \n");
for(i = 0; i < TPB; i++){ //Imprimimos el vector resultado.
printf(" %d / %d \n", h_in[i], h_out[i]);
}
free(h_in); free(h_out); //Liberamos el espacio de la CPU
return 0;
} |
7,929 | /************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#define BIG_NUM 99999999
/**
* @brief min.+
* @param num_nodes Number of vertices
* @param row CSR pointer array
* @param col CSR column array
* @param data Weight array
* @param x Input vector
* @param y Output vector
*/
__global__ void
spmv_min_dot_plus_kernel(const int num_rows, int *row, int *col, int *data,
int *x, int *y)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_rows) {
// Get the start and end pointers
int row_start = row[tid];
int row_end = row[tid + 1];
// Perform + for each pair of elements and a reduction with min
int min = x[tid];
for (int i = row_start; i < row_end; i++) {
if (data[i] + x[col[i]] < min) {
min = data[i] + x[col[i]];
}
}
y[tid] = min;
}
}
/**
* @brief min.+
* @param num_nodes number of vertices
* @param height the height of the adjacency matrix (col-major)
* @param col the col array
* @param data the data array
* @param x the input vector
* @param y the output vector
*/
__global__ void
ell_min_dot_plus_kernel(const int num_nodes, const int height, int *col,
int *data, int *x, int *y)
{
// Get workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
int mat_offset = tid;
int min = x[tid];
// The vertices process a row of matrix (col-major)
for (int i = 0; i < height; i++) {
int mat_elem = data[mat_offset];
int vec_elem = x[col[mat_offset]];
if (mat_elem + vec_elem < min) {
min = mat_elem + vec_elem;
}
mat_offset += num_nodes;
}
y[tid] = min;
}
}
/**
* @brief vector_init
* @param vector1 vector1
* @param vector2 vector2
* @param i source vertex id
* @param num_nodes number of vertices
*/
__global__ void
vector_init(int *vector1, int *vector2, const int i, const int num_nodes)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
if (tid == i) {
// If it is the source vertex
vector1[tid] = 0;
vector2[tid] = 0;
} else {
// If it a non-source vertex
vector1[tid] = BIG_NUM;
vector2[tid] = BIG_NUM;
}
}
}
/**
* @brief vector_assign
* @param vector1 vector1
* @param vector2 vector2
* @param num_nodes number of vertices
*/
__global__ void
vector_assign(int *vector1, int *vector2, const int num_nodes)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
vector1[tid] = vector2[tid];
}
}
/**
* @brief vector_diff
* @param vector1 vector1
* @param vector2 vector2
* @param stop termination variable
* @param num_nodes number of vertices
*/
__global__ void
vector_diff(int *vector1, int *vector2, int *stop, const int num_nodes)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
if (vector2[tid] != vector1[tid]) {
*stop = 1;
}
}
}
|
7,930 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define BLOCK_SIZE 512
#define _check(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
printf("Failed to run stmt ", #stmt); \
printf("Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
int main(int argc, char** argv) {
if(argc != 3) {
printf("Usage: ./scan <input_data_1> <input_data_2>\n");
return -1;
}
return 0;
}
|
7,931 | #include "includes.h"
__global__ void kernel2DXp ( double* dataOutput, double* dataInput, const double* weights, const int numSten, const int numStenLeft, const int numStenRight, const int nxLocal, const int nyLocal, const int BLOCK_X, const int nx )
{
// -----------------------------
// Allocate the shared memory
// -----------------------------
extern __shared__ int memory[];
double* arrayLocal = (double*)&memory;
double* weigthsLocal = (double*)&arrayLocal[nxLocal * nyLocal];
// Move the weigths into shared memory
#pragma unroll
for (int k = 0; k < numSten; k++)
{
weigthsLocal[k] = weights[k];
}
// -----------------------------
// Set the indexing
// -----------------------------
// True matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Local matrix index
int localIdx = threadIdx.x + numStenLeft;
int localIdy = threadIdx.y;
// Local sum variable
double sum = 0.0;
// Set index for summing stencil
int stenSet;
// -----------------------------
// Set interior
// -----------------------------
arrayLocal[localIdy * nxLocal + localIdx] = dataInput[globalIdy * nx + globalIdx];
// -----------------------------
// Set x boundaries
// -----------------------------
// If block is in the interior
if (blockIdx.x != 0 && blockIdx.x != nx / BLOCK_X - 1)
{
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (globalIdx - numStenLeft)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + globalIdx + BLOCK_X];
}
}
// If block is on the left boundary
if (blockIdx.x == 0)
{
arrayLocal[localIdy * nxLocal + localIdx] = dataInput[globalIdy * nx + globalIdx];
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (nx - numStenLeft + threadIdx.x)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + globalIdx + BLOCK_X];
}
}
// Set the right boundary blocks
if (blockIdx.x == nx / BLOCK_X - 1)
{
arrayLocal[localIdy * nxLocal + threadIdx.x + numStenLeft] = dataInput[globalIdy * nx + globalIdx];
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (globalIdx - numStenLeft)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + threadIdx.x];
}
}
// -----------------------------
// Compute the stencil
// -----------------------------
__syncthreads();
stenSet = localIdy * nxLocal + threadIdx.x;
#pragma unroll
for (int k = 0; k < numSten; k++)
{
sum += weigthsLocal[k] * arrayLocal[stenSet + k];
}
__syncthreads();
// -----------------------------
// Copy back to global
// -----------------------------
dataOutput[globalIdy * nx + globalIdx] = sum;
} |
7,932 | #include<stdio.h>
__global__ void myKernel()
{
__shared__ float sdata[1024*1024*1024];
sdata[blockIdx.x] = blockIdx.x;
}
int main(void){
myKernel<<<100,1>>>();
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
printf("status: %s\n",cudaGetErrorString(err));
return 0;
}
|
7,933 | #include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/uniform_real_distribution.h>
struct rng_gpu{
thrust::minstd_rand eng;
thrust::uniform_real_distribution<double> d;
__device__ __host__
double operator() (const int &i){
return d(eng);
}
};
int main()
{
int seed;
std::cin >> seed;
thrust::minstd_rand eng(seed);
thrust::uniform_real_distribution<double> d(25, 40);
for(int i = 0; i< 10; i ++)
{
std::cout << d(eng) << "\n";
}
rng_gpu rc = {.eng = eng, .d = d};
thrust::device_vector <double> vec(10);
thrust::transform(thrust::make_counting_iterator<int>(0),
thrust::make_counting_iterator<int>(10),
vec.begin(),
rc);
for(int i = 0; i< 10; i ++)
{
std::cout << vec[i] << "\n";
}
std::cout << "\n";
} |
7,934 | #include <cuda.h>
#include <stdio.h>
int main() {
return 0;
}
|
7,935 | #include "includes.h"
__global__ void matadd(const float *a, const float *b, float *c, int n, int m){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int idx = j * n + i;
if(i < n and j < m){
c[idx] = a[idx] + b[idx];
}
} |
7,936 | /* Block size X: 32 */
__global__ void fct_ale_a2(const int maxLevels, const int * __restrict__ nLevels, const int * __restrict__ elementNodes, double2 * __restrict__ UV_rhs, const double * __restrict__ fct_ttf_max, const double * __restrict__ fct_ttf_min)
{
const unsigned int element_index = (blockIdx.x * maxLevels);
const unsigned int element_node0_index = (elementNodes[(blockIdx.x * 3)] - 1) * maxLevels;
const unsigned int element_node1_index = (elementNodes[(blockIdx.x * 3) + 1] - 1) * maxLevels;
const unsigned int element_node2_index = (elementNodes[(blockIdx.x * 3) + 2] - 1) * maxLevels;
for ( unsigned int level = threadIdx.x; level < maxLevels + 1; level += 32 )
{
if ( level < nLevels[blockIdx.x] - 1 )
{
double2 temp = make_double2(0.0, 0.0);
temp.x = fmax(fct_ttf_max[element_node0_index + level], fct_ttf_max[element_node1_index + level]);
temp.x = fmax(temp.x, fct_ttf_max[element_node2_index + level]);
temp.y = fmin(fct_ttf_min[element_node0_index + level], fct_ttf_min[element_node1_index + level]);
temp.y = fmin(temp.y, fct_ttf_min[element_node2_index + level]);
UV_rhs[element_index + level] = temp;
}
else if ( level < maxLevels - 1 )
{
UV_rhs[element_index + level] = make_double2(-1.e+3, 1.e+3);
}
}
} |
7,937 | /**
* Sensitivity Encoding (SENSE) Reconstruction
* SENSE is a Parallel MRI reconstruction method. The inputs of SENSE reconstruction are aliased data from MRI scanner and
* receiver coil sensitivity encoding matrices.
* The output of SENSE reconstruction is reconstructed MR image for clinical usage.
*
* Mathematically, SENSE can be represented as: U = C x M (1), where U is the aliased image obtained from scanner, M is MR image to be
* reconstructed and C is receiver coil sensitivity encoding matrix.
* In order to find M, the above equation can be written as: M = inv(C) x U (2).
* To solve equation 2, there is requirement to invert large number of small encoding matrices (in this simpler case the order of small matrices will be 2x2).
* To perform this task iteratively significant computation time is involved. Parallel implementation of SENSE using GPU is presented in this work where
* number of CUDA threads are launched as per required matrix inversions to perform the tasks in parallel hence reducing the computation
* time (one of the main limitation in MRI). The GPU implementation using NVIDIA Titan XP GPU is more than 10x faster compared to CPU implementation
* (core i7 with 8GB RAM)
* In this work, the size of U, C and M are 128x256x2, 256x256x2 and 256x256
*
* Note: The data in all the matrices is complex, therefore real part and imaginary part are handled separately in the code given below
*
*/
//declaration of header files
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#define N 128
using namespace std;
//start of CUDA kernel "sense".
__global__ void sense( float * d_i1imag, float * d_i2imag, float * d_i1real, float * d_i2real, float * d_c1imag, float * d_c2imag, float * d_c1real, float * d_c2real, float * d_rmreal, float * d_rmimag)
{
float d_detreal; // determinant real part
float d_detimag; // determinant imaginary part
float d_divtemp; // complex determinant
float d_sreal[2][2]; //encoding matrix of size 2x2 real part
float d_simag[2][2]; //encoding matrix of size 2x2 imaginary part
float d_rpreal[2]; //reconstructed pixels real part
float d_rpimag[2]; //reconstructed pixels imaginary part
float d_sinreal[2][2]; // to find inverse of 2x2 matrix (real part)
float d_sinimag[2][2]; // to find inverse of 2x2 matrix (imaginary part)
//CUDA thread index calculation
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y +threadIdx.y;
int index=col+row*N;
//copying data from input receiver coil sensitivty matrix (real part)
d_sreal[0][0] = d_c1real[index];
d_sreal[0][1] = d_c1real[index+32768];
d_sreal[1][0] = d_c2real[index];
d_sreal[1][1] = d_c2real[index+32768];
//copying data from input receiver coil sensitivty matrix (imaginary part)
d_simag[0][0] = d_c1imag[index];
d_simag[0][1] = d_c1imag[index+32768];
d_simag[1][0] = d_c2imag[index];
d_simag[1][1] = d_c2imag[index+32768];
//calculation of complex determinant
d_detreal = ((d_sreal[0][0]*d_sreal[1][1])-(d_simag[0][0] *d_simag[1][1]))-((d_sreal[0][1] * d_sreal[1][0]) - (d_simag[0][1] *d_simag[1][0]));
d_detimag = ((d_simag[0][0]*d_sreal[1][1])+(d_sreal[0][0] *d_simag[1][1]))-((d_simag[0][1] * d_sreal[1][0]) + (d_sreal[0][1] *d_simag[1][0]));
d_divtemp = (d_detreal*d_detreal) + (d_detimag * d_detimag);
//dividing the adjoint of matrix with the determinant (real part)
d_sinreal[0][0] = ((d_sreal[1][1] * d_detreal) - (d_simag[1][1]*(-d_detimag))) /d_divtemp;
d_sinreal[0][1] = -(((d_sreal[0][1] * d_detreal) - (d_simag[0][1]*(-d_detimag)))) /d_divtemp;
d_sinreal[1][0] = -(((d_sreal[1][0] * d_detreal) - (d_simag[1][0]*(-d_detimag)))) /d_divtemp;
d_sinreal[1][1] = ((d_sreal[0][0] * d_detreal) - (d_simag[0][0]*(-d_detimag))) /d_divtemp;
//dividing the adjoint of matrix with the determinant (imaginary part)
d_sinimag[0][0] = ((d_simag[1][1] * d_detreal) + (d_sreal[1][1]*(-d_detimag)))/d_divtemp;
d_sinimag[0][1] = -(((d_simag[0][1] * d_detreal) + (d_sreal[0][1]*(-d_detimag))))/d_divtemp;
d_sinimag[1][0] = -(((d_simag[1][0] * d_detreal) + (d_sreal[1][0]*(-d_detimag))))/d_divtemp;
d_sinimag[1][1] = ((d_simag[0][0] * d_detreal) + (d_sreal[0][0]*(-d_detimag)))/d_divtemp;
//Multiplying the inverse of 2x2 encoding matrix (calculated above) with
//2x1 matrix of input aliased matrix (from scanner) real part
d_rpreal[0] = ((d_i1real[index] * d_sinreal[0][0]) - (d_i1imag[index] * d_sinimag[0][0])) + ((d_i2real[index] * d_sinreal[0][1]) - (d_i2imag[index] * d_sinimag[0][1]));
d_rpreal[1] = ((d_i1real[index] * d_sinreal[1][0]) - (d_i1imag[index] * d_sinimag[1][0])) + ((d_i2real[index] * d_sinreal[1][1]) - (d_i2imag[index] * d_sinimag[1][1]));
//Multiplying the inverse of 2x2 encoding matrix (calculated above) with
//2x1 matrix of input aliased matrix (from scanner) imaginary part
d_rpimag[0] = ((d_i1real[index] * d_sinimag[0][0]) + (d_i1imag[index] * d_sinreal[0][0])) + ((d_i2real[index] * d_sinimag[0][1]) + (d_i2imag[index] * d_sinreal[0][1]));
d_rpimag[1] = ((d_i1real[index] * d_sinimag[1][0]) + (d_i1imag[index] * d_sinreal[1][0])) + ((d_i2real[index] * d_sinimag[1][1]) + (d_i2imag[index] * d_sinreal[1][1]));
//Copying the resulting real and imaginary numbers to the final reconstructed image
d_rmreal[index] = d_rpreal[0];
d_rmreal[index+32768] = d_rpreal[1];
d_rmimag[index] = d_rpimag[0];
d_rmimag[index+32768] = d_rpimag[1];
}
int main ()
{
float temp; //temparary variable
int i,j,k; //index variables
int maxx, maxim; //maxx is size of unaliased image i.e.256x256, maxim is size of aliased image i.e. 128x256
float * h_coil1imag; //pointer for receiver coil 1 matrix imaginary part
float * h_coil1real; //pointer for receiver coil 1 matrix real part
float * h_coil2imag; //pointer for receiver coil 2 matrix imaginary part
float * h_coil2real; //pointer for receiver coil 2 matrix real part
float * h_im1real; //pointer for aliased image 1 matrix real part
float * h_im1imag; //pointer for aliased image 1 matrix imaginary part
float * h_im2real; //pointer for aliased image 2 matrix real part
float * h_im2imag; //pointer for aliased image 3 matrix imaginary part
float * h_rimreal; //pointer for reconstructed image matrix real part
float * h_rimimag; //pointer for reconstructed image matrix imaginary part
maxim = 128*256; //maxim is size of aliased image i.e. 128x256
maxx = 256*256; //maxx is size of unaliased image i.e.256x256
const int ARRAY_BYTES1 = maxx * sizeof(float); //it will be used for reconstructed image and receiver coils matrices
const int ARRAY_BYTES2 = maxim * sizeof(float); //it will be used for aliased image
dim3 grid(1,128); //dimensions of blocks in a grid
dim3 block(128,2); //dimensions of threads in a block
//memory allocation for reconstructed image real and imaginary parts respectively
h_rimreal = (float*) calloc (maxx,sizeof(float));
if (h_rimreal==NULL) exit (1); //exception handling
h_rimimag = (float*) calloc (maxx,sizeof(float));
if (h_rimimag==NULL) exit (1); //exception handling
//memory allocation for receiver coils matrices real and imaginary parts respectively
h_coil1imag = (float*) calloc (maxx,sizeof(float));
if (h_coil1imag==NULL) exit (1);
h_coil1real = (float*) calloc (maxx,sizeof(float));
if (h_coil1real==NULL) exit (1);
h_coil2imag = (float*) calloc (maxx,sizeof(float));
if (h_coil2imag==NULL) exit (1);
h_coil2real = (float*) calloc (maxx,sizeof(float));
if (h_coil2real==NULL) exit (1);
//memory allocation for aliased images real and imaginary parts respectively
h_im1real = (float*) calloc (maxim,sizeof(float));
if (h_im1real==NULL) exit (1);
h_im1imag = (float*) calloc (maxim,sizeof(float));
if (h_im1imag==NULL) exit (1);
h_im2real = (float*) calloc (maxim,sizeof(float));
if (h_im2real==NULL) exit (1);
h_im2imag = (float*) calloc (maxim,sizeof(float));
if (h_im2imag==NULL) exit (1);
//Copying data from file for aliased image1 real data
FILE *fptr5;
fptr5 = fopen ("im1real.txt", "r");
k=0;
for(i=0;i<=127;i++)
{
for(j=0;j<=255;j++)
{
fscanf(fptr5, "%f, ",&temp );
h_im1real[k] = temp;
k=k+1;
}
}
fclose (fptr5);
//Copying data from file for aliased image1 imaginary data
FILE *fptr6;
fptr6 = fopen ("im1imag.txt", "r");
k=0;
for(i=0;i<=127;i++)
{
for(j=0;j<=255;j++)
{
fscanf(fptr6, "%f, ",&temp );
h_im1imag[k] = temp;
k=k+1;
}
}
fclose (fptr6);
//Copying data from file for aliased image2 real data
FILE *fptr7;
fptr7 = fopen ("im2real.txt", "r");
k=0;
for(i=0;i<=127;i++)
{
for(j=0;j<=255;j++)
{
fscanf(fptr7, "%f, ",&temp );
h_im2real[k] = temp;
k=k+1;
}
}
fclose (fptr7);
//Copying data from file for aliased image2 imaginary data
FILE *fptr8;
fptr8 = fopen ("im2imag.txt", "r");
k=0;
for(i=0;i<=127;i++)
{
for(j=0;j<=255;j++)
{
fscanf(fptr8, "%f, ",&temp );
h_im2imag[k] = temp;
k=k+1;
}
}
fclose (fptr8);
//Copying data from file for rceiver coil 1 imaginary data
FILE *fptr1;
fptr1 = fopen ("coil1imag.txt", "r");
k=0;
for(i=0;i<=255;i++)
{
for(j=0;j<=255;j++)
{
fscanf(fptr1, "%f, ",&temp );
h_coil1imag[k] = temp;
k=k+1;
}
}
fclose (fptr1);
//Copying data from file for rceiver coil 1 real data
FILE *fptr2;
fptr2 = fopen ("coil1real.txt", "r");
k=0;
for(i=0;i<=255;i++)
{
for(j=0;j<=255;j++)
{
fscanf(fptr2, "%f, ",&temp );
h_coil1real[k] = temp;
k=k+1;
}
}
fclose (fptr2);
//Copying data from file for rceiver coil 2 imaginary data
FILE *fptr3;
fptr3 = fopen ("coil2imag.txt", "r");
k=0;
for(i=0;i<=255;i++)
{
for(j=0;j<=255;j++)
{
fscanf(fptr3, "%f, ",&temp );
h_coil2imag[k] = temp;
k=k+1;
}
}
fclose (fptr3);
//Copying data from file for rceiver coil 2 real data
FILE *fptr4;
fptr4 = fopen ("coil2real.txt", "r");
k=0;
for(i=0;i<=255;i++)
{
for(j=0;j<=255;j++)
{
fscanf(fptr4, "%f, ",&temp );
h_coil2real[k] = temp;
k=k+1;
}
}
fclose (fptr4);
// declaration of GPU memory pointers
float * d_c1imag; //device pointer for coil 1 matrix imaginary part
float * d_c1real; //device pointer for coil 1 matrix real part
float * d_c2imag; //device pointer for coil 2 matrix imaginary part
float * d_c2real; //device pointer for coil 2 matrix real part
float * d_i1real; //device pointer for aliased image 1 matrix real part
float * d_i1imag; //device pointer for aliased image 1 matrix imaginary part
float * d_i2real; //device pointer for aliased image 2 matrix real part
float * d_i2imag; //device pointer for aliased image 2 matrix imaginary part
float * d_rmreal; //device pointer for image matrix to be reconstructed real part
float * d_rmimag; //device pointer for image matrix to be reconstructed imaginary part
// Allocation of memory in device memory using CUDA Malloc function
cudaMalloc( (void**) &d_rmreal, ARRAY_BYTES1); // for image to be reconstructed real part
cudaMalloc( (void**) &d_rmimag, ARRAY_BYTES1); // for image to be reconstructed imaginary part
cudaMalloc( (void**) &d_c1imag, ARRAY_BYTES1); // for coil 1 imaginary part
cudaMalloc( (void**) &d_c1real, ARRAY_BYTES1); // for coil 1 real part
cudaMalloc( (void**) &d_c2imag, ARRAY_BYTES1); // for coil 2 imaginary part
cudaMalloc( (void**) &d_c2real, ARRAY_BYTES1); // for coil 2 real part
cudaMalloc( (void**) &d_i1real, ARRAY_BYTES2); // for aliased image 1 real part
cudaMalloc( (void**) &d_i1imag, ARRAY_BYTES2); // for aliased image 1 imaginary part
cudaMalloc( (void**) &d_i2real, ARRAY_BYTES2); // for aliased image 1 real part
cudaMalloc( (void**) &d_i2imag, ARRAY_BYTES2); // for aliased image 1 imaginary part
//Copying data from host memory to device memory
cudaMemcpy(d_i1imag, h_im1imag, ARRAY_BYTES2, cudaMemcpyHostToDevice); //for image 1 imaginary part
cudaMemcpy(d_i2imag, h_im2imag, ARRAY_BYTES2, cudaMemcpyHostToDevice); //for image 2 imaginary part
cudaMemcpy(d_i1real, h_im1real, ARRAY_BYTES2, cudaMemcpyHostToDevice); //for image 1 real part
cudaMemcpy(d_i2real, h_im2real, ARRAY_BYTES2, cudaMemcpyHostToDevice); //for image 2 real part
cudaMemcpy(d_c1imag, h_coil1imag, ARRAY_BYTES1, cudaMemcpyHostToDevice); //for coil 1 imaginary part
cudaMemcpy(d_c2imag, h_coil2imag, ARRAY_BYTES1, cudaMemcpyHostToDevice); //for coil 2 imaginary part
cudaMemcpy(d_c1real, h_coil1real, ARRAY_BYTES1, cudaMemcpyHostToDevice); //for coil 1 real part
cudaMemcpy(d_c2real, h_coil2real, ARRAY_BYTES1, cudaMemcpyHostToDevice); //for coil 2 real part
cudaMemcpy(d_rmreal, h_rimreal, ARRAY_BYTES1, cudaMemcpyHostToDevice); //for image to be reconstructed real part
cudaMemcpy(d_rmimag, h_rimimag, ARRAY_BYTES1, cudaMemcpyHostToDevice); //for image to be reconstructed imaginary part
//initiation of CUDA timer's event to estimate the computation time of kernel
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//Launching the Kernel
sense<<<grid,block>>>( d_i1imag, d_i2imag, d_i1real, d_i2real, d_c1imag, d_c2imag, d_c1real, d_c2real, d_rmreal, d_rmimag);
cudaDeviceSynchronize(); //synchronization of threads
//CUDA timer's events to estimate the computation time
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop) ;
cudaEventElapsedTime(&time, start, stop) ;
printf("Time for SENSE reconstruction: %3.1f ms \n", time);
//copying the results from device memory to host memory
cudaMemcpy(h_rimreal, d_rmreal, ARRAY_BYTES1, cudaMemcpyDeviceToHost); //image to be reconstructed real part
cudaMemcpy(h_rimimag, d_rmimag, ARRAY_BYTES1, cudaMemcpyDeviceToHost); //image to be reconstructed imaginary part
//storing the results in output file (real part)
FILE *fptr45;
fptr45 = fopen ("reconstructionreal.txt", "w");
k=0;
for(i=0;i<=255;i++)
{
for(j=0;j<=255;j++)
{
fprintf(fptr45, " %f \t",h_rimreal[k]);
k=k+1;
}
fprintf(fptr45, " \n");
}
fclose (fptr45);
//storing the results in output file (imaginary part)
FILE *fptr445;
fptr445 = fopen ("reconstructionimag.txt", "w");
k=0;
for(i=0;i<=255;i++)
{
for(j=0;j<=255;j++)
{
fprintf(fptr445, " %f \t",h_rimimag[k]);
k=k+1;
}
fprintf(fptr445, " \n");
}
fclose (fptr445);
printf("\n SENSE reconstruction is finished successfully \n");
// free the host memory
free (h_coil1imag);
free (h_coil1real);
free (h_coil2imag);
free (h_coil2real);
free (h_im1real);
free (h_im1imag);
free (h_im2real);
free (h_im2imag);
free (h_rimreal);
free (h_rimimag);
// free the device memory
cudaFree (d_c1imag);
cudaFree (d_c1real);
cudaFree (d_c2imag);
cudaFree (d_c2real);
cudaFree (d_i1real);
cudaFree (d_i1imag);
cudaFree (d_i2real);
cudaFree (d_i2imag);
cudaFree (d_rmreal);
cudaFree (d_rmimag);
getchar();
return 0;
}
|
7,938 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#define TOTAL_PRIMES 1000000
void start(unsigned long semiPrime);
void createPrimesArray(unsigned int *array);
__global__ void factorSemiprime(unsigned int* d_primes, unsigned int* d_solution, unsigned long semiPrime);
int main ( int argc, char *argv[] ) {
// Read in file
if (argc < 2) {
printf("Sorry, we need a command line argument\n");
printf("Run again with Semiprime you would like to factor\n");
exit(0);
}
else {
char* semiPrime = argv[1];
char* ptr;
unsigned long longSemiPrime;
longSemiPrime = strtoul(semiPrime, &ptr, 10);
start(longSemiPrime);
}
}
void start(unsigned long semiPrime) {
//unsigned long *primes;
//primes = malloc(sizeof(unsigned long) * TOTAL_PRIMES);
printf("\n**************Starting GPU***************\n");
// Initialize primes
unsigned int primes[TOTAL_PRIMES];
for (int i = 0; i < TOTAL_PRIMES; i++) {
primes[i] = 0;
}
createPrimesArray(&primes[0]);
// Allocate host memory
unsigned int *solution = (unsigned int*) malloc(sizeof(unsigned int) * 2);
//unsigned int solution[2];
for (int i = 0; i < 2; i++) {
solution[i] = 0;
}
unsigned int * d_primes;
unsigned int * d_solution;
// calculate grid value
int gridDimensions = (TOTAL_PRIMES / 16) + 1;
//int totalBlocks = (TOTAL_PRIMES / 512) + 1;
//int gridDimensions = sqrt(totalBlocks) + 1;
//printf("The Grid dimension is %d x %d\n", gridDimensions, gridDimensions);
dim3 dimGrid(gridDimensions, gridDimensions, 1);
dim3 dimBlock(16, 16, 1);
cudaMalloc((void**)&d_primes, TOTAL_PRIMES *sizeof(unsigned int));
cudaMalloc((void**)&d_solution, 2 * sizeof(unsigned int));
// Copy primes to GPU
cudaMemcpy(d_primes, primes, TOTAL_PRIMES * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(d_solution, solution, 2 * sizeof(unsigned int), cudaMemcpyHostToDevice);
// Kernel Invocation
factorSemiprime<<<dimGrid,dimBlock>>>(d_primes, d_solution, semiPrime);
//transfer C_d from device to host
cudaMemcpy(solution, d_solution, 2*sizeof(unsigned int), cudaMemcpyDeviceToHost);
int totalPrimes = TOTAL_PRIMES;
if (solution[0] == 0 && solution[1] == 0) {
printf("%lu is not a semiprime with factors less than %d\n\n", semiPrime, totalPrimes);
}
else {
printf("The prime factors of %lu are %u and %u\n\n", semiPrime, solution[0], solution[1]);
}
cudaFree(d_solution);
cudaFree(d_primes);
free(solution);
}
void createPrimesArray(unsigned int *array) {
FILE *inputFile;
unsigned int mostRecentNumber;
inputFile = fopen("primes1.txt", "r");
for (int i = 0; i < TOTAL_PRIMES; i++) {
fscanf(inputFile, "%u", &mostRecentNumber);
array[i] = mostRecentNumber;
}
}
__global__
void factorSemiprime(unsigned int* d_primes, unsigned int* d_solution, unsigned long semiPrime) {
int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
// Exit on the edge threads that exceed our data
if (xIndex > TOTAL_PRIMES || yIndex > TOTAL_PRIMES) {
return;
}
unsigned int xValue = d_primes[xIndex];
unsigned int yValue = d_primes[yIndex];
unsigned long value = (unsigned long)xValue * (unsigned long)yValue;
if (value == semiPrime) {
d_solution[0] = xValue;
d_solution[1] = yValue;
}
}
|
7,939 | #include <stdio.h>
// Function to print from device
__global__
void print(){
printf("Block %d, thread %d\n", blockIdx.x, threadIdx.x);
}
int main(){
// Run kernel on the GPU
print<<<2, 16>>>();
return 0;
}
|
7,940 | #include "includes.h"
__global__ void sum10ops(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i] - A[i]*A[i] + 3*B[i] - 4*A[i]*B[i] + B[i]*B[i]*7- 8;
} |
7,941 | #include <cuda.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <time.h>
#define THREADS 32
#define BLOCKS 1
#define N (THREADS*BLOCKS)
#define BIT 32
__host__ void cpuRadixSort(uint32_t* array, uint32_t length)
{
static uint32_t cpu_temp0[N];
static uint32_t cpu_temp1[N];
for(int bit=0;bit<BIT;bit++){
uint32_t base_cnt0 = 0;
uint32_t base_cnt1 = 0;
for(int i=0;i<N;i++){
const uint32_t elem = array[i];
const uint32_t bit_mask = (1 << bit);
if((elem & bit_mask) > 0){
cpu_temp1[base_cnt1] = elem;
base_cnt1++;
}else{
cpu_temp0[base_cnt0] = elem;
base_cnt0++;
}
}
for(int i=0;i<base_cnt0;i++){
array[i] = cpu_temp0[i];
}
for(int i=0;i<base_cnt1;i++){
array[i+base_cnt0] = cpu_temp1[i];
}
}
}
__device__ void gpuRadixSort(uint32_t* const array,
uint32_t* const sort_tmp0,
uint32_t* const sort_tmp1,
const uint32_t num_list,
const uint32_t num_elements,
const uint32_t tid)
{
for(int bit=0;bit<N;bit++){
uint32_t base_cnt0 = 0;
uint32_t base_cnt1 = 0;
for(int i=0;i<num_elements;i+=num_list){
const uint32_t elem = array[i + tid];
const uint32_t bit_mask = (1 << bit);
if((elem & bit_mask) > 0){
sort_tmp1[base_cnt1 + tid] = elem;
base_cnt1+=num_list;
}else{
sort_tmp0[base_cnt0 + tid] = elem;
base_cnt0+=num_list;
}
}
for(int i=0;i<base_cnt0;i+=num_list){
array[i+tid] = sort_tmp0[i+tid];
}
for(int i=0;i<base_cnt1;i+=num_list){
array[base_cnt0+i+tid] = sort_tmp1[i+tid];
}
}
__syncthreads();
}
__global__ void kernel_RadixSort(uint32_t* array, uint32_t* sort_tmp0, uint32_t* sort_tmp1, uint32_t num_elements){
uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x;
gpuRadixSort(array, sort_tmp0, sort_tmp1, (int)BIT, (int)N, idx);
}
int main(int argc, char** argv){
srand(time(NULL));
uint32_t* array = (uint32_t*)malloc(N*sizeof(uint32_t));
for(int i=0;i<N;i++){
array[i] = rand()%256;
}
for(int i=0;i<N;i++){
printf("%d\n", array[i]);
}
// CPU
// cpuRadixSort(array, N);
// GPU
uint32_t* d_array; cudaMalloc((void**)&d_array, N*sizeof(uint32_t));
uint32_t* d_sort_tmp0; cudaMalloc((void**)&d_sort_tmp0, N*sizeof(uint32_t));
uint32_t* d_sort_tmp1; cudaMalloc((void**)&d_sort_tmp1, N*sizeof(uint32_t));
cudaMemcpy(d_array, array, N*sizeof(uint32_t), cudaMemcpyHostToDevice);
kernel_RadixSort <<<BLOCKS, THREADS>>> (d_array, d_sort_tmp0, d_sort_tmp0, (uint32_t)N);
cudaMemcpy(array, d_array, N*sizeof(uint32_t), cudaMemcpyDeviceToHost);
printf("\n\n");
for(int i=0;i<N;i++){
printf("%d\n", array[i]);
}
cudaFree(d_array);
cudaFree(d_sort_tmp0);
cudaFree(d_sort_tmp1);
free(array);
return 0;
}
|
7,942 | #include<stdio.h>
__global__ void pattern(int *__restrict a, int n) {
for (int i = threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
a[i] = (long long)i*77%n;
}
}
__global__ void torture(int *__restrict a, int *__restrict b, int *__restrict c, int n) {
int s1 = 0, s2 = 0, s3 = 0, s4 = 0;
int i = threadIdx.x;
int gs = blockDim.x * gridDim.x;
int b1=0,b2=0,b3=0,b4=0;
for (i = i; i < n-gs*3; i += gs * 4) {
int aa1 = a[i], aa2 = a[i+gs], aa3 = a[i+gs*2], aa4 = a[i+gs*3];
s1 += b1;
b1 = b[aa1];
s2 += b2;
b2 = b[aa2];
s3 += b3;
b3 = b[aa3];
s4 += b4;
b4 = b[aa4];
}
s1 += b1; s2+=b2; s3+=b3; s4+=b4;
c[threadIdx.x + blockIdx.x * blockDim.x] = s1 + s2 + s3 + s4;
}
__global__ void torture2(int *__restrict a, int *__restrict b, int *__restrict c, int n) {
int s = 0;
int i = threadIdx.x;
int gs = blockDim.x * gridDim.x;
for (i = i; i < n; i += gs) {
s += b[a[i]];
}
c[threadIdx.x + blockIdx.x * blockDim.x] = s;
}
int main(){
int *a,*b,*c, d;
int n = 200000000;
cudaMalloc(&a, sizeof(int) * n);
cudaMalloc(&b, sizeof(int) * n);
cudaMalloc(&c, sizeof(int) * 10000);
pattern<<<10,1024>>>(a, n);
cudaDeviceSynchronize();
for (int i = 0; i < 10; i++)
torture2<<<10,1024>>>(a, b, c, n);
cudaMemcpy(&d, c, sizeof(int), cudaMemcpyDeviceToHost);
} |
7,943 | // test cuda launch speed
#include<stdio.h>
#include<cstring>
__global__ void emptyKernel() {
}
int main(int argc, char *argv[]) {
bool sync = false;
if (argc >= 2) {
if (strcmp(argv[1], "sync") == 0) {
sync = true;
}
}
if (sync) {
puts("use cudaDeviceSynchronize");
}
else {
puts("no cudaDeviceSynchronize");
}
for (int j=0;j<10;j++){
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
for (int i = 0; i < 100000; i++) {
emptyKernel<<<1,1>>>();
if (sync) cudaDeviceSynchronize();
}
cudaDeviceSynchronize();
cudaEventRecord(event2, 0);
cudaEventSynchronize(event2);
float timeMs;
cudaEventElapsedTime(&timeMs, event1, event2);
printf("time %fms (%f launches per sec)\n", timeMs, 1e5*1000.0/timeMs);
cudaEventDestroy(event1);
cudaEventDestroy(event2);
}
}
|
7,944 | #include "filter.cuh"
#include <cstdint>
/**********************************************Filter Methods***************************************************/
void launch_medKernel(const dim3& blocks, const dim3& threads, char *out, const char *in, size_t dimy, size_t dimx, size_t dimz, size_t pitch) {
medKernel<<<blocks, threads>>>(out, in, dimy, dimx, dimz, pitch);
}
__global__ void medKernel(char *out, const char *in, size_t dimy, size_t dimx, size_t dimz, size_t pitch) {
int row = threadIdx.y;
int col = threadIdx.x;
__shared__ char im[blockSizeRow + 2][blockSizeCol + 2][3];
if ((threadIdx.y + blockIdx.y * blockDim.y < dimy - 2) && (threadIdx.x + blockIdx.x * blockDim.x < dimx - 2)) {
im[row + 1][col + 1][0] = *(in + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch);
im[row + 1][col + 1][1] = *(in + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy);
im[row + 1][col + 1][2] = *(in + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy * 2);
if (threadIdx.x == 0) {
im[row + 1][col][0] = *(in + (blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch);
im[row + 1][col][1] = *(in + (blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy);
im[row + 1][col][2] = *(in + (blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy * 2);
}
if ((threadIdx.x == blockDim.x - 1) || (threadIdx.x + blockIdx.x * blockDim.x == dimx - 3)) {
im[row + 1][col + 2][0] = *(in + (2 + threadIdx.x + blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch);
im[row + 1][col + 2][1] = *(in + (2 + threadIdx.x + blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy);
im[row + 1][col + 2][2] = *(in + (2 + threadIdx.x + blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy * 2);
}
if (threadIdx.y == 0) {
im[row][col + 1][0] = *(in + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.y) * pitch);
im[row][col + 1][1] = *(in + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.y) * pitch + pitch * dimy);
im[row][col + 1][2] = *(in + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.y) * pitch + pitch * dimy * 2);
}
if ((threadIdx.y == blockDim.y - 1) || (threadIdx.y + blockIdx.y * blockDim.y == dimy - 3)) {
im[row + 2][col + 1][0] = *(in + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (2 + threadIdx.y + blockIdx.y * blockDim.y) * pitch);
im[row + 2][col + 1][1] = *(in + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (2 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy);
im[row + 2][col + 1][2] = *(in + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (2 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy * 2);
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
im[row][col][0] = *(in + (blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.y) * pitch);
im[row][col][1] = *(in + (blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.y) * pitch + pitch * dimy);
im[row][col][2] = *(in + (blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.y) * pitch + pitch * dimy * 2);
}
if ((threadIdx.x == blockDim.x - 1 || (threadIdx.x + blockIdx.x * blockDim.x == dimx - 3)) && threadIdx.y == 0) {
im[row][col + 2][0] = *(in + (2 + threadIdx.x + blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.y) * pitch);
im[row][col + 2][1] = *(in + (2 + threadIdx.x + blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.y) * pitch + pitch * dimy);
im[row][col + 2][2] = *(in + (2 + threadIdx.x + blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.y) * pitch + pitch * dimy * 2);
}
if (threadIdx.x == 0 && (threadIdx.y == blockDim.y - 1 || (threadIdx.y + blockIdx.y * blockDim.y == dimy - 3))) {
im[row + 2][col][0] = *(in + (blockIdx.x * blockDim.x) + (2 + threadIdx.y + blockIdx.y * blockDim.y) * pitch);
im[row + 2][col][1] = *(in + (blockIdx.x * blockDim.x) + (2 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy);
im[row + 2][col][2] = *(in + (blockIdx.x * blockDim.x) + (2 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy * 2);
}
if ((threadIdx.x == blockDim.x - 1 || (threadIdx.x + blockIdx.x * blockDim.x == dimx - 3)) && (threadIdx.y == blockDim.y - 1 || (threadIdx.y + blockIdx.y * blockDim.y == dimy - 3))) {
im[row + 2][col + 2][0] = *(in + (2 + threadIdx.x + blockIdx.x * blockDim.x) + (2 + threadIdx.y + blockIdx.y * blockDim.y) * pitch);
im[row + 2][col + 2][1] = *(in + (2 + threadIdx.x + blockIdx.x * blockDim.x) + (2 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy);
im[row + 2][col + 2][2] = *(in + (2 + threadIdx.x + blockIdx.x * blockDim.x) + (2 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy * 2);
}
}
//
__syncthreads();
if ((threadIdx.y + blockIdx.y * blockDim.y < dimy - 2) && (threadIdx.x + blockIdx.x * blockDim.x < dimx - 2)) {
char a0 = im[row][col][0];
char a1 = im[row + 1][col][0];
char a2 = im[row + 2][col][0];
char a3 = im[row][col + 1][0];
char a4 = im[row + 1][col + 1][0];
char a5 = im[row + 2][col + 1][0];
char a6 = im[row][col + 2][0];
char a7 = im[row + 1][col + 2][0];
char a8 = im[row + 2][col + 2][0];
char a9 = im[row][col][1];
char a10 = im[row + 1][col][1];
char a11 = im[row + 2][col][1];
char a12 = im[row][col + 1][1];
char a13 = im[row + 1][col + 1][1];
char a14 = im[row + 2][col + 1][1];
minmax15(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row][col + 2][1];
minmax14(&a1, &a2, &a3, &a4, &a5, &a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row + 1][col + 2][1];
minmax13(&a2, &a3, &a4, &a5, &a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row + 2][col + 2][1];
minmax12(&a3, &a4, &a5, &a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row][col][2];
minmax11(&a4, &a5, &a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row + 1][col][2];
minmax10(&a5, &a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row + 2][col][2];
minmax9(&a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row][col + 1][2];
minmax8(&a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row + 1][col + 1][2];
minmax7(&a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row + 2][col + 1][2];
minmax6(&a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row][col + 2][2];
minmax5(&a10, &a11, &a12, &a13, &a14);
a14 = im[row + 1][col + 2][2];
minmax4(&a11, &a12, &a13, &a14);
a14 = im[row + 2][col + 2][2];
minmax3(&a12, &a13, &a14);
*(out + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch) = a13;
}
for (int i = 1; i < dimz - 1; i++) {
int frame = (i - 1) % 3;
if ((threadIdx.y + blockIdx.y * blockDim.y < dimy - 2) && (threadIdx.x + blockIdx.x * blockDim.x < dimx - 2)) {
im[row + 1][col + 1][frame] = *(in + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy * (1 + i));
if (threadIdx.x == 0) {
im[row + 1][col][frame] = *(in + (blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy * (1 + i));
}
if ((threadIdx.x == blockDim.x - 1) || (threadIdx.x + blockIdx.x * blockDim.x == dimx - 3)) {
im[row + 1][col + 2][frame] = *(in + (2 + threadIdx.x + blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy * (1 + i));
}
if (threadIdx.y == 0) {
im[row][col + 1][frame] = *(in + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.y) * pitch + pitch * dimy * (1 + i));
}
if ((threadIdx.y == blockDim.y - 1) || (threadIdx.y + blockIdx.y * blockDim.y == dimy - 3)) {
im[row + 2][col + 1][frame] = *(in + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (2 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy * (1 + i));
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
im[row][col][frame] = *(in + (blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.y) * pitch + pitch * dimy * (1 + i));
}
if ((threadIdx.x == blockDim.x - 1 || (threadIdx.x + blockIdx.x * blockDim.x == dimx - 3)) && threadIdx.y == 0) {
im[row][col + 2][frame] = *(in + (2 + threadIdx.x + blockIdx.x * blockDim.x) + (blockIdx.y * blockDim.y) * pitch + pitch * dimy * (1 + i));
}
if (threadIdx.x == 0 && (threadIdx.y == blockDim.y - 1 || (threadIdx.y + blockIdx.y * blockDim.y == dimy - 3))) {
im[row + 2][col][frame] = *(in + (blockIdx.x * blockDim.x) + (2 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy * (1 + i));
}
if ((threadIdx.x == blockDim.x - 1 || (threadIdx.x + blockIdx.x * blockDim.x == dimx - 3)) && (threadIdx.y == blockDim.y - 1 || (threadIdx.y + blockIdx.y * blockDim.y == dimy - 3))) {
im[row + 2][col + 2][frame] = *(in + (2 + threadIdx.x + blockIdx.x * blockDim.x) + (2 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch * dimy * (1 + i));
}
}
__syncthreads();
if ((threadIdx.y + blockIdx.y * blockDim.y < dimy - 2) && (threadIdx.x + blockIdx.x * blockDim.x < dimx - 2)) {
char a0 = im[row][col][0];
char a1 = im[row + 1][col][0];
char a2 = im[row + 2][col][0];
char a3 = im[row][col + 1][0];
char a4 = im[row + 1][col + 1][0];
char a5 = im[row + 2][col + 1][0];
char a6 = im[row][col + 2][0];
char a7 = im[row + 1][col + 2][0];
char a8 = im[row + 2][col + 2][0];
char a9 = im[row][col][1];
char a10 = im[row + 1][col][1];
char a11 = im[row + 2][col][1];
char a12 = im[row][col + 1][1];
char a13 = im[row + 1][col + 1][1];
char a14 = im[row + 2][col + 1][1];
minmax15(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row][col + 2][1];
minmax14(&a1, &a2, &a3, &a4, &a5, &a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row + 1][col + 2][1];
minmax13(&a2, &a3, &a4, &a5, &a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row + 2][col + 2][1];
minmax12(&a3, &a4, &a5, &a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row][col][2];
minmax11(&a4, &a5, &a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row + 1][col][2];
minmax10(&a5, &a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row + 2][col][2];
minmax9(&a6, &a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row][col + 1][2];
minmax8(&a7, &a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row + 1][col + 1][2];
minmax7(&a8, &a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row + 2][col + 1][2];
minmax6(&a9, &a10, &a11, &a12, &a13, &a14);
a14 = im[row][col + 2][2];
minmax5(&a10, &a11, &a12, &a13, &a14);
a14 = im[row + 1][col + 2][2];
minmax4(&a11, &a12, &a13, &a14);
a14 = im[row + 2][col + 2][2];
minmax3(&a12, &a13, &a14);
*(out + (1 + threadIdx.x + blockIdx.x * blockDim.x) + (1 + threadIdx.y + blockIdx.y * blockDim.y) * pitch + pitch*dimy*i) = a13;
__syncthreads();
}
}
}
//////////////////////////////////////////////////////////////////
//Row convolution
//////////////////////////////////////////////////////////////////
void launch_convolutionRows(const dim3& blocks, const dim3& threads, char* output, const char* input, size_t imageW, size_t imageH, size_t pitch) {
convolutionRows<<<blocks, threads>>>(output, input, imageW, imageH, pitch);
}
__global__ void convolutionRows(char* output, const char* input, size_t imageW, size_t imageH, size_t pitch) {
__shared__ float sData[ROWS_Y][(COLS_X*PIX_PER_THREAD) + (2 * KERNEL_RADIUS)];
const int x = blockIdx.x * (COLS_X*PIX_PER_THREAD) + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
//check to see if we are guaranteed to be within the image bounds
if ((y < imageH) && (x + (COLS_X * PIX_PER_THREAD) < imageW)) {
//load left overlap
if (threadIdx.x == 0) {
for (int n = KERNEL_RADIUS; n >= 1; n--) {
sData[threadIdx.y][threadIdx.x + (-n + KERNEL_RADIUS)] = ((x - n) > 0) ? input[y * pitch + x - n] : 0;
}
}
//load PIX_PER_THREAD values
#pragma unroll
for (int i = 0; i < PIX_PER_THREAD; i++) {
sData[threadIdx.y][threadIdx.x + (COLS_X * i) + KERNEL_RADIUS] = input[y * pitch + x + (COLS_X * i)];
}
//load in right overlap
for (int n = 1; n <= KERNEL_RADIUS; n++) {
sData[threadIdx.y][(COLS_X * PIX_PER_THREAD) + KERNEL_RADIUS + n - 1] = (blockIdx.x * (COLS_X*PIX_PER_THREAD) + (COLS_X * PIX_PER_THREAD) + n < imageW) ? input[y * pitch + blockIdx.x * (COLS_X*PIX_PER_THREAD) + (COLS_X * PIX_PER_THREAD) + (n - 1)] : 0;
}
__syncthreads();
//do the convolution
//#pragma unroll
for (int i = 0; i < PIX_PER_THREAD; i++) {
float sum = 0;
//#pragma unroll
for (int n = -KERNEL_RADIUS; n <= KERNEL_RADIUS; n++) {
sum += gaussK[KERNEL_RADIUS - n] * (float)sData[threadIdx.y][threadIdx.x + i * COLS_X + n + KERNEL_RADIUS];
}
output[y * pitch + x + (COLS_X * i)] = sum;
}
//we are not guaranteed to be within the x image bounds
}
else if (y < imageH) {
//load left overlap (should evaluate to input[y*pitch+x-1] unless image is less than COLS_X*PIX_PER_THREAD wide)
if (threadIdx.x == 0) {
for (int n = KERNEL_RADIUS; n >= 1; n--) {
sData[threadIdx.y][threadIdx.x + (-n + KERNEL_RADIUS)] = ((x - n) > 0) ? input[y * pitch + x - n] : 0;
}
}
//load as many values as allowed fill the rest with zeros
//#pragma unroll
for (int i = 0; i < PIX_PER_THREAD; i++) {
if (x + (COLS_X * i) < imageW) {
sData[threadIdx.y][threadIdx.x + (COLS_X * i) + KERNEL_RADIUS] = input[y * pitch + x + (COLS_X * i)];
}
else {
//load in zeros for the rest of the shared memory
sData[threadIdx.y][threadIdx.x + (COLS_X * i) + KERNEL_RADIUS] = 0;
}
}
//load in right overlap
for (int n = 1; n <= KERNEL_RADIUS; n++) {
sData[threadIdx.y][(COLS_X * PIX_PER_THREAD) + KERNEL_RADIUS + n - 1] = (blockIdx.x * (COLS_X*PIX_PER_THREAD) + (COLS_X * PIX_PER_THREAD) + n < imageW) ? input[y * pitch + blockIdx.x * (COLS_X*PIX_PER_THREAD) + (COLS_X * PIX_PER_THREAD) + (n - 1)] : 0;
}
__syncthreads();
//do the convolution
//#pragma unroll
for (int i = 0; i < PIX_PER_THREAD; i++) {
float sum = 0;
if (x + (COLS_X * i) < imageW) {
//#pragma unroll
for (int n = -KERNEL_RADIUS; n <= KERNEL_RADIUS; n++) {
sum += gaussK[KERNEL_RADIUS - n] * (float)sData[threadIdx.y][threadIdx.x + i * COLS_X + n + KERNEL_RADIUS];
}
output[y * pitch + x + (COLS_X * i)] = sum;
}
else {
break;
}
}
}
}
//////////////////////////////////////////////////////////////////
//Column convolution
//////////////////////////////////////////////////////////////////
void launch_convolutionCols(const dim3& blocks, const dim3& threads, cudaSurfaceObject_t surf, char* input, size_t imageW, size_t imageH, size_t pitch, size_t zLoc) {
convolutionCols<<<blocks, threads>>>(surf, input, imageW, imageH, pitch, zLoc);
}
__global__ void convolutionCols(cudaSurfaceObject_t surf, char* input, size_t imageW, size_t imageH, size_t pitch, size_t zLoc) {
__shared__ float sData[COLS_Y*PIX_PER_THREAD + 2 * KERNEL_RADIUS][ROWS_X];
//const uint32_t zLoc = z;// bscanLocations[z];
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * (COLS_Y*PIX_PER_THREAD) + threadIdx.y;
//check to see if we are guaranteed to be within the image bounds
if ((x < imageW) && (y + (COLS_Y * PIX_PER_THREAD) < imageH)) {
//load top overlap
if (threadIdx.y == 0) {
for (int n = KERNEL_RADIUS; n >= 1; n--) {
sData[threadIdx.y + (-n + KERNEL_RADIUS)][threadIdx.x] = ((y - n) > 0) ? input[(y - n) * pitch + x] : 0;
}
}
//load PIX_PER_THREAD values
#pragma unroll
for (int i = 0; i < PIX_PER_THREAD; i++) {
sData[threadIdx.y + (COLS_Y * i) + KERNEL_RADIUS][threadIdx.x] = input[(y + (COLS_Y * i)) * pitch + x];
}
//load in bottom overlap
for (int n = 1; n <= KERNEL_RADIUS; n++) {
sData[(COLS_Y * PIX_PER_THREAD) + KERNEL_RADIUS + n - 1][threadIdx.x] = (blockIdx.y * (COLS_Y*PIX_PER_THREAD) + (COLS_Y * PIX_PER_THREAD) + n < imageH) ? input[(blockIdx.y * (COLS_Y*PIX_PER_THREAD) + (COLS_Y * PIX_PER_THREAD) + (n - 1)) * pitch + x] : 0;
}
__syncthreads();
//do the convolution
#pragma unroll
for (int i = 0; i < PIX_PER_THREAD; i++) {
float sum = 0;
#pragma unroll
for (int n = -KERNEL_RADIUS; n <= KERNEL_RADIUS; n++) {
sum += gaussK[KERNEL_RADIUS - n] * (float)sData[threadIdx.y + i * COLS_Y + n + KERNEL_RADIUS][threadIdx.x];
}
//output[(y + (COLS_Y * i)) * pitch + x] = sum;
//write out the the texture
//float tempVol = 255 * (sum - 40) / (110 - 40); //works for logged values
//if (tempVol < 0) {
// tempVol = 0.0;
//}
//if (tempVol > 255) {
// tempVol = 255.;
//}
////why does writing out tempvol not work?
//output[(y + (COLS_Y * i)) * pitch + x] = tempVol;
surf3Dwrite((unsigned char)sum, surf, x, (y + (COLS_Y * i)), zLoc);
}
//we are not guaranteed to be within the x image bounds
}
else if (x < imageW) {
//load left overlap (should evaluate to input[(y-1)*pitch+x] unless image is less than COLS_Y*PIX_PER_THREAD high)
if (threadIdx.y == 0) {
for (int n = KERNEL_RADIUS; n >= 1; n--) {
sData[threadIdx.y + (-n + KERNEL_RADIUS)][threadIdx.x] = ((y - n) > 0) ? input[(y - n) * pitch + x] : 0;
}
}
//load as many values as allowed fill the rest with zeros
#pragma unroll
for (int i = 0; i < PIX_PER_THREAD; i++) {
if (y + (COLS_Y * i) < imageH) {
sData[threadIdx.y + (COLS_Y * i) + KERNEL_RADIUS][threadIdx.x] = input[(y + (COLS_Y * i)) * pitch + x];
}
else {
//load in zeros for the rest of the shared memory
sData[threadIdx.y + (COLS_Y * i) + KERNEL_RADIUS][threadIdx.x] = 0;
}
}
//load in bottom overlap
for (int n = 1; n <= KERNEL_RADIUS; n++) {
sData[(COLS_Y * PIX_PER_THREAD) + KERNEL_RADIUS + n - 1][threadIdx.x] = (blockIdx.y * (COLS_Y*PIX_PER_THREAD) + (COLS_Y * PIX_PER_THREAD) + n < imageH) ? input[(blockIdx.y * (COLS_Y*PIX_PER_THREAD) + (COLS_Y * PIX_PER_THREAD) + (n - 1)) * pitch + x] : 0;
}
__syncthreads();
//do the convolution
#pragma unroll
for (int i = 0; i < PIX_PER_THREAD; i++) {
float sum = 0;
if (y + (COLS_Y * i) < imageH) {
#pragma unroll
for (int n = -KERNEL_RADIUS; n <= KERNEL_RADIUS; n++) {
sum += gaussK[KERNEL_RADIUS - n] * (float)sData[threadIdx.y + i * COLS_Y + n + KERNEL_RADIUS][threadIdx.x];
}
//write out to the texture
/* float tempVol = 255 * (sum - 40) / (110 - 40);
if (tempVol < 0) {
tempVol = 0.0;
}
if (tempVol > 255) {
tempVol = 255.;
}*/
//output[(y + (COLS_Y * i)) * pitch + x] = sum;
surf3Dwrite((unsigned char)sum, surf, x, (y + (COLS_Y * i)), zLoc);
}
else {
break;
}
}
}
}
|
7,945 | #include <cuda.h>
#include <stdio.h>
#include <assert.h>
#define N 2//8
__device__ double C[2][2][2];
__device__ int index (int a, int b, int c){
return 4*a + 2*b + c;
}
__global__ void foo(double *H) {
int idx = index (threadIdx.x,threadIdx.y,threadIdx.z);
H[idx] = C[threadIdx.x][threadIdx.y][threadIdx.z];
}
|
7,946 | #include "includes.h"
/*****************************************************************************/
// nvcc -O1 -o bpsw bpsw.cu -lrt -lm
// Assertion to check for errors
__global__ void kernel_lucas(long* nArray, long* dArray, int* rArray, long len) {
int bx = blockIdx.x; // ID thread
int tx = threadIdx.x;
int i, length;
long long d, n;
long long q, q2, u, u2, uold, v, v2, t;
// Identify the row and column of the Pd element to work on
long memIndex = bx*TILE_WIDTH + tx;
if (memIndex < len) //out of bounds checking - some threads will be doing nothing
{
d = (long long) dArray[memIndex];
n = (long long) nArray[memIndex];
q = (1 - d) / 4;
u = 0;
v = 2;
u2 = 1;
v2 = 1;
q2 = 2 * q;
t = (n + 1) / 2; //theta
length = 32 - __clz(t); //length of our number in bits. //clz(b00010010) = 3
for (i = 0; i < length; i++)
{
u2 = (u2 * v2) % n;
v2 = (v2 * v2 - q2) % n;
if (t & 1) //mask = 1
{
uold = u;
u = (u2 * v) + (u * v2);
u = (u % 2 == 1) ? u + n : u;
u = (u / 2) % n;
v = (v2 * v) + (u2 * uold * d);
v = (v % 2 == 1) ? v + n : v;
v = (v / 2) % n;
}
q = (q*q) % n;
q2 = q + q;
t = t >> 1;
}
}
__syncthreads();
if (memIndex < len)
rArray[memIndex] = (u == 0);
} |
7,947 | // ***************************************************************************
// Assignment #3
// Name: Yujin Yoshimura
// Parallel Programming Date: March 12, 2020
// ***************************************************************************
// References:
//
// NVIDIA CUDA Toolkit Documentation
// https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications
//
// NVIDIA Developer Blog
// https://devblogs.nvidia.com/how-query-device-properties-and-handle-errors-cuda-cc/
// https://devblogs.nvidia.com/cuda-pro-tip-the-fast-way-to-query-device-properties/
//
// Stack Overflow
// https://stackoverflow.com/questions/22520209/programmatically-retrieve-maximum-number-of-blocks-per-multiprocessor
// ***************************************************************************
// This sequential program queries device properties.
//
// For Turing, use the script on the same directory to compile and run.
//
// For TACC Maverick 2, use the script to run this code:
// sbatch YujinYoshimura.script
//
// Otherwise, use TACC Maverick 2 command to compile:
// nvcc YujinYoshimura.cu -o YujinYoshimura_Exe
// TACC Maverick 2 command to run executable:
// ./YujinYoshimura_Exe
// ***************************************************************************
#include <cuda.h>
#include <stdio.h>
// ***************************************************************************
// Function Name: main
// Parameters: int, char**
// Return: int
// Description: Main function of the program.
// ***************************************************************************
int main(int argc, char **argv) {
int nDevices, blocks, version;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
cudaDeviceGetAttribute(&version, cudaDevAttrComputeCapabilityMajor, i);
// According to Feature Support per Compute Capability
if (version < 3) {
blocks = 8;
} else if (version < 5) {
blocks = 16;
} else {
blocks = 32;
}
printf("=============================================================\n");
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Size of shared memory per block: %zu\n", prop.sharedMemPerBlock);
printf(" Number of registers per block: %d\n", prop.regsPerBlock);
printf(" The corresponding warp size: %d\n", prop.warpSize);
printf(" The maximum number of threads per block: %d\n", prop.maxThreadsPerBlock);
printf(" The maximum number of threads that we can have\n");
printf(" for a 3D layout: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf(" The maximum grid size: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf(" Max number of blocks per streaming multiprocessor: %d \n", blocks);
}
printf("=============================================================\n");
return EXIT_SUCCESS;
}
|
7,948 | #include <stdio.h>
#include <cuda.h>
#define N 10
__global__ void fun(int *a) {
a[threadIdx.x] = threadIdx.x * threadIdx.x;
}
int main() {
int a[N], *da;
int i;
cudaMalloc(&da, N * sizeof(int));
fun<<<1, N>>>(da);
cudaMemcpy(a, da, N * sizeof(int), cudaMemcpyDeviceToHost);
for (i = 0; i < N; ++i)
printf("%d\n", a[i]);
return 0;
}
|
7,949 | #include <bits/stdc++.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/find.h>
#include <thrust/generate.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/random.h>
#include <thrust/random/uniform_int_distribution.h>
#include <thrust/adjacent_difference.h>
#define REP(i,s,n) for(int (i)=s; (i)<(int)(n);(i)++)
#define RIT(it,c) for(__typeof(c.begin()) it = c.begin();it!=c.end();it++)
#define ALL(x) x.begin(), x.end()
#define SZ(x) (int)(x).size()
#define MSET(m,v) memset(m,v,sizeof(m))
using namespace std;
typedef vector<int> vi;
typedef vector<long> vl;
typedef vector<bool> vb;
typedef vector<double> vd;
typedef pair<int,int> ii;
typedef pair<long, long> ll;
typedef unordered_set<int> ui;
class SerialRunTime{
int N;
public:
SerialRunTime(int n):N(n){};
void seqRun(){
vector<int> Integers(N);
cout<<"\n==================================\n";
clock_t t_start = clock();
for(int i=0;i<N;++i) Integers[i] = i;
clock_t t_end = clock();
cout<<"Sequence Time Usage: "<<double(t_end-t_start)/CLOCKS_PER_SEC<<" s\nCheck Answer:"<<endl;
for(int i=0;i<10;++i) cout<<Integers[i]<<' ';
cout<<"\n==================================\n";
}
void genRun(){
int mod = 1E6;
vector<int> Integers(N);
cout<<"\n==================================\n";
clock_t t_start = clock();
generate(Integers.begin(), Integers.end(), [&mod](){return rand()%mod;});
clock_t t_end = clock();
cout<<"Random Number Generation Time Usage: "<<double(t_end-t_start)/CLOCKS_PER_SEC<<" s\nCheck Answer:"<<endl;
for(int i=0;i<10;++i) cout<<Integers[i]<<' ';
cout<<"\n==================================\n";
}
void unaryRun(){
int mod = 1E6;
vector<int> Integers(N),ans(N);
for(int i=0;i<N;++i) Integers[i] = i;
cout<<"\n==================================\n";
clock_t t_start = clock();
transform(Integers.begin(), Integers.end(), ans.begin(), [&mod](int x){return 2*x%mod;});
clock_t t_end = clock();
cout<<"Unary Operation transformation Time Usage: "<<double(t_end-t_start)/CLOCKS_PER_SEC<<" s\nCheck Answer:"<<endl;
for(int i=0;i<10;++i) cout<<ans[i]<<' ';
cout<<"\n==================================\n";
}
void binaryRun(){
vector<int> A(N),B(N),C(N);
for(int i=0;i<N;++i) A[i] = i,B[i] = 5;
cout<<"\n==================================\n";
clock_t t_start = clock();
transform(A.begin(), A.end(), B.begin(), C.begin(), modulus<int>());
clock_t t_end = clock();
cout<<"Binary Operation transformation Time Usage: "<<double(t_end-t_start)/CLOCKS_PER_SEC<<" s\nCheck Answer:"<<endl;
for(int i=0;i<10;++i) cout<<C[i]<<' ';
cout<<"\n==================================\n";
}
void diffRun(){
vector<int> A(N),B(N);
for(int i=0;i<N;++i) A[i] = i;
cout<<"\n==================================\n";
clock_t t_start = clock();
adjacent_difference(A.begin(), A.end(), B.begin());
clock_t t_end = clock();
cout<<"Adjacent difference Time Usage: "<<double(t_end-t_start)/CLOCKS_PER_SEC<<" s\nCheck Answer:"<<endl;
for(int i=0;i<10;++i) cout<<B[i]<<' ';
cout<<"\n==================================\n";
}
};
class SerialRunTimeTest{
SerialRunTime se;
public:
SerialRunTimeTest(int n):se(n){}
void run(){
se.seqRun();
se.genRun();
se.unaryRun();
se.binaryRun();
se.diffRun();
}
};
class ThrustRunTime{
int N;
public:
thrust::host_vector<int> A;
thrust::device_vector<int> dA;
struct getRand{
private:
thrust::uniform_int_distribution<int> g;
thrust::minstd_rand rng;
public:
getRand(int l, int u):g(l, u+1){}
__host__ __device__
int operator ()(){ return g(rng);}
};
struct Dop{
int M;
Dop(int m):M(m){}
__host__ __device__
int operator ()(int x){ return (2*x)%M;}
};
ThrustRunTime(int n):N(n){
A.resize(N);
dA.resize(N);
}
void seqRun(){
cout<<"\n==================================\n";
clock_t t_start = clock();
thrust::sequence(dA.begin(), dA.end());
clock_t t_end = clock();
cout<<"Sequence Time Usage: "<<double(t_end-t_start)/CLOCKS_PER_SEC<<" s\nCheck Answer:"<<endl;
thrust::copy(dA.begin(), dA.end(), A.begin());
for(int i=0;i<10;++i) cout<<A[i]<<' ';
cout<<"\n==================================\n";
}
void genRun(){
int mod = 1E6;
getRand g(0, mod);
thrust::device_vector<int> dB(N);
cout<<"\n==================================\n";
clock_t t_start = clock();
thrust::generate(dB.begin(), dB.end(), g);
clock_t t_end = clock();
cout<<"Random Number Generation Time Usage: "<<double(t_end-t_start)/CLOCKS_PER_SEC<<" s\nCheck Answer:"<<endl;
thrust::copy(dB.begin(), dB.end(), A.begin());
for(int i=0;i<N;i+=N/10) cout<<A[i]<<' ';
cout<<"\n==================================\n";
}
void unaryRun(){
int mod = 1E6;
Dop unary(mod);
thrust::device_vector<int> dB(N);
thrust::sequence(dB.begin(), dB.end());
cout<<"\n==================================\n";
clock_t t_start = clock();
thrust::transform(dB.begin(), dB.end(), dA.begin(), unary);
clock_t t_end = clock();
cout<<"Unary Operation transformation Time Usage: "<<double(t_end-t_start)/CLOCKS_PER_SEC<<" s\nCheck Answer:"<<endl;
thrust::copy(dA.begin(), dA.end(), A.begin());
for(int i=0;i<10;++i) cout<<A[i]<<' ';
cout<<"\n==================================\n";
}
void binaryRun(){
thrust::device_vector<int> dB(N),dC(N);
thrust::sequence(dB.begin(), dB.end());
thrust::fill(dC.begin(), dC.end(), 5);
cout<<"\n==================================\n";
clock_t t_start = clock();
thrust::transform(dB.begin(), dB.end(), dC.begin(), dA.begin(), thrust::modulus<int>());
clock_t t_end = clock();
cout<<"Binary Operation transformation Time Usage: "<<double(t_end-t_start)/CLOCKS_PER_SEC<<" s\nCheck Answer:"<<endl;
thrust::copy(dA.begin(), dA.end(), A.begin());
for(int i=0;i<10;++i) cout<<A[i]<<' ';
cout<<"\n==================================\n";
}
void diffRun(){
thrust::device_vector<int> dB(N);
thrust::sequence(dB.begin(), dB.end());
cout<<"\n==================================\n";
clock_t t_start = clock();
thrust::adjacent_difference(dB.begin(), dB.end(), dA.begin());
clock_t t_end = clock();
cout<<"Adjacent difference Time Usage: "<<double(t_end-t_start)/CLOCKS_PER_SEC<<" s\nCheck Answer:"<<endl;
thrust::copy(dA.begin(), dA.end(), A.begin());
for(int i=0;i<10;++i) cout<<A[i]<<' ';
cout<<"\n==================================\n";
}
};
class ThrustRunTimeTest{
ThrustRunTime se;
public:
ThrustRunTimeTest(int n):se(n){}
void run(){
se.seqRun();
se.genRun();
se.unaryRun();
se.binaryRun();
se.diffRun();
}
};
int main(int argc, char *argv[]){
std::ios_base::sync_with_stdio(false),cin.tie(0),cout.tie(0);
if(argc <= 1 || argv[1][0] == 'S') {
SerialRunTimeTest test(1<<28);
test.run();
}
else{
ThrustRunTimeTest test(1<<28);
test.run();
}
return 0;
}
|
7,950 |
#include <stdio.h>
#include "cuda_runtime.h"
int gridSize(int nElem, int blockSz) {
return (nElem + blockSz - 1)/blockSz;
}
void gridResize(dim3 block, dim3 grid, int nElem, int blockSz){
block.x = blockSz;
grid.x = gridSize(nElem, block.x);
printf("Blocks: %d, grids: %d\n", block.x, grid.x);
}
int main() {
//define total data elements
int nElem = 1024;
//inite block and grid sizes
dim3 block(1024);
dim3 grid (gridSize(nElem, block.x));
printf("Initial block size: %d, grid size: %d\n", block.x, grid.x);
//variate block sizes
for (int i = nElem; i>1; i/=2)
gridResize(block, grid, nElem, i);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
7,951 | #include "includes.h"
__global__ void InitializeMatrix_kernel( int8_t *matrix, int ldm, int rows, int columns) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
int offset = i + j * ldm;
matrix[offset] = 0;
if (i >= rows - 2 && j < 1) {
matrix[offset] = 0x0;
}
if (i < 1 && j >= columns - 2) {
matrix[offset] = 0x0;
}
}
} |
7,952 | #include "includes.h"
__global__ void ker_gkylCartFieldScale(unsigned s, unsigned nv, double fact, double *out)
{
for (int n = blockIdx.x*blockDim.x + threadIdx.x + s; n < s + nv; n += blockDim.x * gridDim.x)
out[n] *= fact;
} |
7,953 | // Ryan Jacoby
// Compiled on GNU/Linux with nvcc v10.2.89
#include<iostream>
__global__ void output();
int main() {
output <<<1, 32>>>();
cudaDeviceSynchronize();
return 1;
}
__global__
void output() {
int block = blockIdx.x;
int thread = threadIdx.x;
printf("Block: %d\tThread: %d\n", block, thread);
} |
7,954 | /**
* Inaki Urruta Sanchez
* Pedro Alexandre Simoes dos Reis
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#define BLOCK_SIZE 16
/**
* Initialize matrix M with dimension dim with n in all matrix's entries
*/
void initWith(float* M, int dim, float n) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = n;
}
}
}
/**
* Initialize matrix M with dimension dim with a random number between 0 and 9 in all matrix's entries
*/
void init(float* M, int dim) {
for (int i = 0; i < dim; i++ ) {
for (int j = 0; j < dim; j++) {
M[i * dim + j] = (rand() % 10);
}
}
}
/**
* Multiplies matrix left by the matrix right, both with dimensions dim and stores the result in matrix res
* Operation is done in GPU
*/
__global__
void matrixMul(float* left, float* right, float* res, int dim) {
int i, j, idx;
float temp = 0;
__shared__ float Left_shared_t [BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Right_shared_t[BLOCK_SIZE][BLOCK_SIZE];
// Row i of matrix left
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tileNUM = 0; tileNUM < gridDim.x; tileNUM++) {
// Column j of matrix left
j = tileNUM * BLOCK_SIZE + threadIdx.x;
i = tileNUM * BLOCK_SIZE + threadIdx.y;
// Load left[i][j] to shared mem
idx = row * dim + tileNUM * BLOCK_SIZE + threadIdx.x;
if (idx >= dim * dim) {
Left_shared_t[threadIdx.y][threadIdx.x] = 0;// Coalesced access
} else {
Left_shared_t[threadIdx.y][threadIdx.x] = left[row * dim + j];// Coalesced access
}
// Load right[i][j] to shared mem
idx = (tileNUM * BLOCK_SIZE + threadIdx.y) * dim + col;
if (idx >= dim * dim) {
Right_shared_t[threadIdx.y][threadIdx.x] = 0;
} else {
Right_shared_t[threadIdx.y][threadIdx.x] = right[i * dim + col]; // Coalesced access
}
// Synchronize before computation
__syncthreads();
// Accumulate one tile of res from tiles of left and right in shared mem
for (int k = 0; k < BLOCK_SIZE; k++) {
temp += Left_shared_t[threadIdx.y][k] * Right_shared_t[k][threadIdx.x]; //no shared memory bank conflict
}
// Synchronize
__syncthreads();
}
if ((row < dim) && (col < dim)) {
// Store accumulated value to res
res[row * dim + col] = temp;
}
}
/**
* Multiplies matrix A by matrix B, both with dimension dim X dim and stores the result in matrix C with dimension dim X dim
* Operation is done in CPU
*/
__host__
void matrixMulCPU(float* A, float* B, float* C, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
float tmp = 0.0;
for (int k = 0; k < dim; k++) {
tmp += A[i * dim + k] * B[k * dim + j];
}
C[i * dim + j] = tmp;
}
}
}
/**
* Given two matrices A and B, both with dimensions dim X dim, prints in stdout if the result stored in matrix C with dimension dim X dim
* is the same as the result given in matrix C_cpu
*/
void checkResult(float* A, float* B, float* C, float* C_cpu, int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
if (abs(C[i * dim + j] - C_cpu[i * dim + j]) > 0.001) {
printf("ERROR: Incorrect Results!\n");
return;
}
}
}
printf("Everything is OK! :D\n");
}
/**
* Returns the current time in milliseconds
* Used to calculate elapsed time
*/
double cpuTimer() {
struct timeval clock;
gettimeofday(&clock, NULL);
return ((double) clock.tv_sec + (double) clock.tv_usec * 1e-6);
}
int main(int argc, char** argv) {
// Set random seed
srand(time(0));
cudaError_t error;
cudaDeviceProp prop;
int numDevices = 0;
error = cudaGetDeviceCount(&numDevices);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
int totalMemory = 0;
for (int i = 0; i < numDevices; i++) {
error = cudaGetDeviceProperties(&prop, i);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
totalMemory += prop.totalGlobalMem;
}
// Matrix size definition and calculation
const int N = 10;
size_t size = N * N * sizeof(float);
int allMatrixSizes = (N * N) * 3;
if (allMatrixSizes > totalMemory) {
printf("ERROR");
exit(EXIT_FAILURE);
}
// Matrix allocation
float *A, *B, *C, *C_cpu;
error = cudaMallocManaged(&A, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMallocManaged(&B, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMallocManaged(&C, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error =cudaMallocManaged(&C_cpu, size);
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Matrix initialization
init(A, N);
init(B, N);
// Cuda layout definition
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE);
// Start timer
double start = cpuTimer();
matrixMul<<<blocksPerGrid, threadsPerBlock>>>(A, B, C, N);
cudaDeviceSynchronize();
// Stop timer
double stop = cpuTimer();
error = cudaGetLastError();
if (error != cudaSuccess) {
printf("ERROR: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Print time interval
float gpu_milliseconds = stop - start;
printf("Matrix Multiplication @ GPU: %f ms\n", gpu_milliseconds);
// Start timer
double begin = cpuTimer();
// Matrix multiplication in CPU
matrixMulCPU(A, B, C_cpu, N);
// Stop timer
double end = cpuTimer();
// Print time interval
float cpu_milliseconds = end - begin;
printf("Matrix Multiplication @ CPU: %f ms\n", cpu_milliseconds);
checkResult(A, B, C, C_cpu, N);
// Free memory
cudaFree(A);
cudaFree(B);
cudaFree(C);
cudaFree(C_cpu);
return 0;
}
|
7,955 | /*
Copyright 2020 Equinor ASA
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#define NBLOCKS 1024
#define NTHREADS 256
__global__ void dvc_ScaLBL_D3Q19_AAeven_Greyscale(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity, double *Pressure){
int n;
// conserved momemnts
double rho,vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;
//double uu;
// non-conserved moments
double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
// q=0
f0 = dist[n];
f1 = dist[2*Np+n];
f2 = dist[1*Np+n];
f3 = dist[4*Np+n];
f4 = dist[3*Np+n];
f5 = dist[6*Np+n];
f6 = dist[5*Np+n];
f7 = dist[8*Np+n];
f8 = dist[7*Np+n];
f9 = dist[10*Np+n];
f10 = dist[9*Np+n];
f11 = dist[12*Np+n];
f12 = dist[11*Np+n];
f13 = dist[14*Np+n];
f14 = dist[13*Np+n];
f15 = dist[16*Np+n];
f16 = dist[15*Np+n];
f17 = dist[18*Np+n];
f18 = dist[17*Np+n];
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
rho = f0+f2+f1+f4+f3+f6+f5+f8+f7+f10+f9+f12+f11+f14+f13+f16+f15+f18+f17;
pressure = rho/porosity/3.0;
vx = (f1-f2+f7-f8+f9-f10+f11-f12+f13-f14)/rho+0.5*porosity*Gx;
vy = (f3-f4+f7-f8-f9+f10+f15-f16+f17-f18)/rho+0.5*porosity*Gy;
vz = (f5-f6+f11-f12-f13+f14+f15-f16-f17+f18)/rho+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = -porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx;
Fy = -porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy;
Fz = -porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz;
if (porosity==1.0){
Fx=Gx;
Fy=Gy;
Fz=Gz;
}
//------------------------ BGK collison where body force has higher-order terms ----------------------------------------------------------//
// // q=0
// dist[n] = f0*(1.0-rlx)+ rlx*0.3333333333333333*rho*(1. - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// + 0.3333333333333333*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 1
// dist[1*Np+n] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(3. + (6.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q=2
// dist[2*Np+n] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(-3. + (6.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 3
// dist[3*Np+n] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. + (6.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 4
// dist[4*Np+n] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. + (6.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 5
// dist[5*Np+n] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(3. + (6.*uz)/porosity));
//
// // q = 6
// dist[6*Np+n] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux+ uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(-3. + (6.*uz)/porosity));
//
// // q = 7
// dist[7*Np+n] = f7*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uy) + (4.5*(ux + uy)*(ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3. - (3.*ux)/porosity + (9.*(ux + uy))/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(ux + uy))/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 8
// dist[8*Np+n] = f8*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uy) + (4.5*(-ux - uy)*(-ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3. - (3.*ux)/porosity - (9.*(-ux - uy))/porosity) + Fy*(-3. - (9.*(-ux - uy))/porosity - (3.*uy)/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 9
// dist[9*Np+n] = f9*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uy) + (4.5*(ux - uy)*(ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3. - (3.*ux)/porosity + (9.*(ux - uy))/porosity) + Fy*(-3. - (9.*(ux - uy))/porosity - (3.*uy)/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 10
// dist[10*Np+n] = f10*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uy) + (4.5*(-ux + uy)*(-ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3. - (3.*ux)/porosity - (9.*(-ux + uy))/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(-ux + uy))/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 11
// dist[11*Np+n] = f11*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uz) + (4.5*(ux + uz)*(ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(3. - (3.*ux)/porosity + (9.*(ux + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(ux + uz))/porosity));
//
// // q = 12
// dist[12*Np+n] = f12*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uz) + (4.5*(-ux - uz)*(-ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(-3. - (3.*ux)/porosity - (9.*(-ux - uz))/porosity) +
// Fz*(-3. - (9.*(-ux - uz))/porosity - (3.*uz)/porosity));
//
// // q = 13
// dist[13*Np+n] = f13*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uz) + (4.5*(ux - uz)*(ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(3. - (3.*ux)/porosity + (9.*(ux - uz))/porosity) +
// Fz*(-3. - (9.*(ux - uz))/porosity - (3.*uz)/porosity));
//
// // q= 14
// dist[14*Np+n] = f14*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uz) + (4.5*(-ux + uz)*(-ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(-3. - (3.*ux)/porosity - (9.*(-ux + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(-ux + uz))/porosity));
//
// // q = 15
// dist[15*Np+n] = f15*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy + uz) + (4.5*(uy + uz)*(uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(uy + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(uy + uz))/porosity));
//
// // q = 16
// dist[16*Np+n] = f16*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy - uz) + (4.5*(-uy - uz)*(-uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. - (3.*uy)/porosity - (9.*(-uy - uz))/porosity) +
// Fz*(-3. - (9.*(-uy - uz))/porosity - (3.*uz)/porosity));
//
// // q = 17
// dist[17*Np+n] = f17*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy - uz) + (4.5*(uy - uz)*(uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(uy - uz))/porosity) +
// Fz*(-3. - (9.*(uy - uz))/porosity - (3.*uz)/porosity));
//
// // q = 18
// dist[18*Np+n] = f18*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy + uz) + (4.5*(-uy + uz)*(-uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. - (3.*uy)/porosity - (9.*(-uy + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(-uy + uz))/porosity));
//----------------------------------------------------------------------------------------------------------------------------------------//
//------------------------ BGK collison where body force has NO higher-order terms ----------------------------------------------------------//
// q=0
dist[n] = f0*(1.0-rlx)+ rlx*0.3333333333333333*rho*(1. - (1.5*(ux*ux + uy*uy + uz*uz))/porosity);
// q = 1
dist[1*Np+n] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(3.));
// q=2
dist[2*Np+n] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(-3.));
// q = 3
dist[3*Np+n] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fy*(3.));
// q = 4
dist[4*Np+n] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fy*(-3.));
// q = 5
dist[5*Np+n] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fz*(3.));
// q = 6
dist[6*Np+n] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux+ uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fz*(-3.));
// q = 7
dist[7*Np+n] = f7*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uy) + (4.5*(ux + uy)*(ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fy*(3.));
// q = 8
dist[8*Np+n] = f8*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uy) + (4.5*(-ux - uy)*(-ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fy*(-3.));
// q = 9
dist[9*Np+n] = f9*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uy) + (4.5*(ux - uy)*(ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fy*(-3.));
// q = 10
dist[10*Np+n] = f10*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uy) + (4.5*(-ux + uy)*(-ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fy*(3.));
// q = 11
dist[11*Np+n] = f11*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uz) + (4.5*(ux + uz)*(ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fz*(3.));
// q = 12
dist[12*Np+n] = f12*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uz) + (4.5*(-ux - uz)*(-ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fz*(-3.));
// q = 13
dist[13*Np+n] = f13*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uz) + (4.5*(ux - uz)*(ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fz*(-3.));
// q= 14
dist[14*Np+n] = f14*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uz) + (4.5*(-ux + uz)*(-ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fz*(3.));
// q = 15
dist[15*Np+n] = f15*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy + uz) + (4.5*(uy + uz)*(uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(3.) + Fz*(3.));
// q = 16
dist[16*Np+n] = f16*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy - uz) + (4.5*(-uy - uz)*(-uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(-3.) + Fz*(-3.));
// q = 17
dist[17*Np+n] = f17*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy - uz) + (4.5*(uy - uz)*(uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(3.) + Fz*(-3.));
// q = 18
dist[18*Np+n] = f18*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy + uz) + (4.5*(-uy + uz)*(-uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(-3.) + Fz*(3.));
//-------------------------------------------------------------------------------------------------------------------------------------------//
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAodd_Greyscale(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity, double *Pressure){
int n;
// conserved momemnts
double rho,vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;
//double uu;
// non-conserved moments
double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int nr1,nr2,nr3,nr4,nr5,nr6,nr7,nr8,nr9,nr10,nr11,nr12,nr13,nr14,nr15,nr16,nr17,nr18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
// q=0
f0 = dist[n];
// q=1
nr1 = neighborList[n]; // neighbor 2 ( > 10Np => odd part of dist)
f1 = dist[nr1]; // reading the f1 data into register fq
nr2 = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
f2 = dist[nr2]; // reading the f2 data into register fq
// q=3
nr3 = neighborList[n+2*Np]; // neighbor 4
f3 = dist[nr3];
// q = 4
nr4 = neighborList[n+3*Np]; // neighbor 3
f4 = dist[nr4];
// q=5
nr5 = neighborList[n+4*Np];
f5 = dist[nr5];
// q = 6
nr6 = neighborList[n+5*Np];
f6 = dist[nr6];
// q=7
nr7 = neighborList[n+6*Np];
f7 = dist[nr7];
// q = 8
nr8 = neighborList[n+7*Np];
f8 = dist[nr8];
// q=9
nr9 = neighborList[n+8*Np];
f9 = dist[nr9];
// q = 10
nr10 = neighborList[n+9*Np];
f10 = dist[nr10];
// q=11
nr11 = neighborList[n+10*Np];
f11 = dist[nr11];
// q=12
nr12 = neighborList[n+11*Np];
f12 = dist[nr12];
// q=13
nr13 = neighborList[n+12*Np];
f13 = dist[nr13];
// q=14
nr14 = neighborList[n+13*Np];
f14 = dist[nr14];
// q=15
nr15 = neighborList[n+14*Np];
f15 = dist[nr15];
// q=16
nr16 = neighborList[n+15*Np];
f16 = dist[nr16];
// q=17
//fq = dist[18*Np+n];
nr17 = neighborList[n+16*Np];
f17 = dist[nr17];
// q=18
nr18 = neighborList[n+17*Np];
f18 = dist[nr18];
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
rho = f0+f2+f1+f4+f3+f6+f5+f8+f7+f10+f9+f12+f11+f14+f13+f16+f15+f18+f17;
pressure = rho/porosity/3.0;
vx = (f1-f2+f7-f8+f9-f10+f11-f12+f13-f14)/rho+0.5*porosity*Gx;
vy = (f3-f4+f7-f8-f9+f10+f15-f16+f17-f18)/rho+0.5*porosity*Gy;
vz = (f5-f6+f11-f12-f13+f14+f15-f16-f17+f18)/rho+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the body force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = -porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx;
Fy = -porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy;
Fz = -porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz;
if (porosity==1.0){
Fx=Gx;
Fy=Gy;
Fz=Gz;
}
//------------------------ BGK collison where body force has higher-order terms ----------------------------------------------------------//
// // q=0
// dist[n] = f0*(1.0-rlx) + rlx*0.3333333333333333*rho*(1. - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// + 0.3333333333333333*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 1
// dist[nr2] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(3. + (6.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q=2
// dist[nr1] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(-3. + (6.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 3
// dist[nr4] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. + (6.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 4
// dist[nr3] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. + (6.*uy)/porosity) + Fz*(0. - (3.*uz)/porosity));
//
// // q = 5
// dist[nr6] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(3. + (6.*uz)/porosity));
//
// // q = 6
// dist[nr5] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux+ uy*uy + uz*uz))/porosity)
// +0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(0. - (3.*uy)/porosity) + Fz*(-3. + (6.*uz)/porosity));
//
// // q = 7
// dist[nr8] = f7*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uy) + (4.5*(ux + uy)*(ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3. - (3.*ux)/porosity + (9.*(ux + uy))/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(ux + uy))/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 8
// dist[nr7] = f8*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uy) + (4.5*(-ux - uy)*(-ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3. - (3.*ux)/porosity - (9.*(-ux - uy))/porosity) + Fy*(-3. - (9.*(-ux - uy))/porosity - (3.*uy)/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 9
// dist[nr10] = f9*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uy) + (4.5*(ux - uy)*(ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3. - (3.*ux)/porosity + (9.*(ux - uy))/porosity) + Fy*(-3. - (9.*(ux - uy))/porosity - (3.*uy)/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 10
// dist[nr9] = f10*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uy) + (4.5*(-ux + uy)*(-ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3. - (3.*ux)/porosity - (9.*(-ux + uy))/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(-ux + uy))/porosity) +
// Fz*(0. - (3.*uz)/porosity));
//
// // q = 11
// dist[nr12] = f11*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uz) + (4.5*(ux + uz)*(ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(3. - (3.*ux)/porosity + (9.*(ux + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(ux + uz))/porosity));
//
// // q = 12
// dist[nr11] = f12*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uz) + (4.5*(-ux - uz)*(-ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(-3. - (3.*ux)/porosity - (9.*(-ux - uz))/porosity) +
// Fz*(-3. - (9.*(-ux - uz))/porosity - (3.*uz)/porosity));
//
// // q = 13
// dist[nr14] = f13*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uz) + (4.5*(ux - uz)*(ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(3. - (3.*ux)/porosity + (9.*(ux - uz))/porosity) +
// Fz*(-3. - (9.*(ux - uz))/porosity - (3.*uz)/porosity));
//
// // q= 14
// dist[nr13] = f14*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uz) + (4.5*(-ux + uz)*(-ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(0. - (3.*uy)/porosity) + Fx*(-3. - (3.*ux)/porosity - (9.*(-ux + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(-ux + uz))/porosity));
//
// // q = 15
// dist[nr16] = f15*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy + uz) + (4.5*(uy + uz)*(uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(uy + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(uy + uz))/porosity));
//
// // q = 16
// dist[nr15] = f16*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy - uz) + (4.5*(-uy - uz)*(-uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. - (3.*uy)/porosity - (9.*(-uy - uz))/porosity) +
// Fz*(-3. - (9.*(-uy - uz))/porosity - (3.*uz)/porosity));
//
// // q = 17
// dist[nr18] = f17*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy - uz) + (4.5*(uy - uz)*(uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(3. - (3.*uy)/porosity + (9.*(uy - uz))/porosity) +
// Fz*(-3. - (9.*(uy - uz))/porosity - (3.*uz)/porosity));
//
// // q = 18
// dist[nr17] = f18*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy + uz) + (4.5*(-uy + uz)*(-uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
// +0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(0. - (3.*ux)/porosity) + Fy*(-3. - (3.*uy)/porosity - (9.*(-uy + uz))/porosity) +
// Fz*(3. - (3.*uz)/porosity + (9.*(-uy + uz))/porosity));
//----------------------------------------------------------------------------------------------------------------------------------------//
//------------------------ BGK collison where body force has NO higher-order terms ----------------------------------------------------------//
// q=0
dist[n] = f0*(1.0-rlx) + rlx*0.3333333333333333*rho*(1. - (1.5*(ux*ux + uy*uy + uz*uz))/porosity);
// q = 1
dist[nr2] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(3.));
// q=2
dist[nr1] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*ux + (4.5*ux*ux)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fx*(-3.));
// q = 3
dist[nr4] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fy*(3.));
// q = 4
dist[nr3] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uy + (4.5*uy*uy)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fy*(-3.));
// q = 5
dist[nr6] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 + 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fz*(3.));
// q = 6
dist[nr5] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1 - 3.*uz + (4.5*uz*uz)/porosity - (1.5*(ux*ux+ uy*uy + uz*uz))/porosity)
+0.05555555555555555*rho*(1. - 0.5*rlx)*(Fz*(-3.));
// q = 7
dist[nr8] = f7*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uy) + (4.5*(ux + uy)*(ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fy*(3.));
// q = 8
dist[nr7] = f8*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uy) + (4.5*(-ux - uy)*(-ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fy*(-3.));
// q = 9
dist[nr10] = f9*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uy) + (4.5*(ux - uy)*(ux - uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fy*(-3.));
// q = 10
dist[nr9] = f10*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uy) + (4.5*(-ux + uy)*(-ux + uy))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fy*(3.));
// q = 11
dist[nr12] = f11*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux + uz) + (4.5*(ux + uz)*(ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fz*(3.));
// q = 12
dist[nr11] = f12*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux - uz) + (4.5*(-ux - uz)*(-ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fz*(-3.));
// q = 13
dist[nr14] = f13*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(ux - uz) + (4.5*(ux - uz)*(ux - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(3.) + Fz*(-3.));
// q= 14
dist[nr13] = f14*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-ux + uz) + (4.5*(-ux + uz)*(-ux + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fx*(-3.) + Fz*(3.));
// q = 15
dist[nr16] = f15*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy + uz) + (4.5*(uy + uz)*(uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(3.) + Fz*(3.));
// q = 16
dist[nr15] = f16*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy - uz) + (4.5*(-uy - uz)*(-uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(-3.) + Fz*(-3.));
// q = 17
dist[nr18] = f17*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(uy - uz) + (4.5*(uy - uz)*(uy - uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(3.) + Fz*(-3.));
// q = 18
dist[nr17] = f18*(1.0-rlx) + rlx*0.027777777777777776*rho*(1 + 3.*(-uy + uz) + (4.5*(-uy + uz)*(-uy + uz))/porosity - (1.5*(ux*ux + uy*uy + uz*uz))/porosity)
+0.027777777777777776*rho*(1. - 0.5*rlx)*(Fy*(-3.) + Fz*(3.));
//-------------------------------------------------------------------------------------------------------------------------------------------//
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAeven_Greyscale_IMRT(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity, double Den, double *Pressure){
int n;
double vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;//defined for this incompressible model
// conserved momemnts
double jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double fq;
//double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
double rlx_setA = rlx;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
//........................................................................
// READ THE DISTRIBUTIONS
// (read from opposite array due to previous swap operation)
//........................................................................
// q=0
fq = dist[n];
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
fq = dist[2*Np+n];
pressure = fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// f2 = dist[10*Np+n];
fq = dist[1*Np+n];
pressure += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
fq = dist[4*Np+n];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
fq = dist[3*Np+n];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
fq = dist[6*Np+n];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
fq = dist[5*Np+n];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
fq = dist[8*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
fq = dist[7*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
fq = dist[10*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
fq = dist[9*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
fq = dist[12*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
fq = dist[11*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
fq = dist[14*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
fq = dist[13*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
fq = dist[16*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
fq = dist[15*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
fq = dist[18*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
fq = dist[17*Np+n];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//---------------------------------------------------------------------//
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
vx = jx/Den+0.5*porosity*Gx;
vy = jy/Den+0.5*porosity*Gy;
vz = jz/Den+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = Den*(-porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx);
Fy = Den*(-porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy);
Fz = Den*(-porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz);
if (porosity==1.0){
Fx=Den*Gx;
Fy=Den*Gy;
Fz=Den*Gz;
}
//Calculate pressure for Incompressible-MRT model
pressure=0.5/porosity*(pressure-0.5*Den*u_mag*u_mag/porosity);
// //..............carry out relaxation process...............................................
// m1 = m1 + rlx_setA*((-30*Den+19*Den*(ux*ux+uy*uy+uz*uz)/porosity + 57*pressure*porosity) - m1)
// + (1-0.5*rlx_setA)*38*(Fx*ux+Fy*uy+Fz*uz)/porosity;
// m2 = m2 + rlx_setA*((12*Den - 5.5*Den*(ux*ux+uy*uy+uz*uz)/porosity-27*pressure*porosity) - m2)
// + (1-0.5*rlx_setA)*11*(-Fx*ux-Fy*uy-Fz*uz)/porosity;
// jx = jx + Fx;
// m4 = m4 + rlx_setB*((-0.6666666666666666*ux*Den) - m4)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
// jy = jy + Fy;
// m6 = m6 + rlx_setB*((-0.6666666666666666*uy*Den) - m6)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
// jz = jz + Fz;
// m8 = m8 + rlx_setB*((-0.6666666666666666*uz*Den) - m8)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
// m9 = m9 + rlx_setA*((Den*(2*ux*ux-uy*uy-uz*uz)/porosity) - m9)
// + (1-0.5*rlx_setA)*(4*Fx*ux-2*Fy*uy-2*Fz*uz)/porosity;
// m10 = m10 + rlx_setA*(-0.5*Den*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10)
// + (1-0.5*rlx_setA)*(-2*Fx*ux+Fy*uy+Fz*uz)/porosity;
// m11 = m11 + rlx_setA*((Den*(uy*uy-uz*uz)/porosity) - m11)
// + (1-0.5*rlx_setA)*(2*Fy*uy-2*Fz*uz)/porosity;
// m12 = m12 + rlx_setA*(-0.5*(Den*(uy*uy-uz*uz)/porosity)- m12)
// + (1-0.5*rlx_setA)*(-Fy*uy+Fz*uz)/porosity;
// m13 = m13 + rlx_setA*((Den*ux*uy/porosity) - m13)
// + (1-0.5*rlx_setA)*(Fy*ux+Fx*uy)/porosity;
// m14 = m14 + rlx_setA*((Den*uy*uz/porosity) - m14)
// + (1-0.5*rlx_setA)*(Fz*uy+Fy*uz)/porosity;
// m15 = m15 + rlx_setA*((Den*ux*uz/porosity) - m15)
// + (1-0.5*rlx_setA)*(Fz*ux+Fx*uz)/porosity;
// m16 = m16 + rlx_setB*( - m16);
// m17 = m17 + rlx_setB*( - m17);
// m18 = m18 + rlx_setB*( - m18);
// //.......................................................................................................
//-------------------- IMRT collison where body force has NO higher-order terms -------------//
//..............carry out relaxation process...............................................
m1 = m1 + rlx_setA*((-30*Den+19*Den*(ux*ux+uy*uy+uz*uz)/porosity + 57*pressure*porosity) - m1);
m2 = m2 + rlx_setA*((12*Den - 5.5*Den*(ux*ux+uy*uy+uz*uz)/porosity-27*pressure*porosity) - m2);
jx = jx + Fx;
m4 = m4 + rlx_setB*((-0.6666666666666666*ux*Den) - m4)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
jy = jy + Fy;
m6 = m6 + rlx_setB*((-0.6666666666666666*uy*Den) - m6)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
jz = jz + Fz;
m8 = m8 + rlx_setB*((-0.6666666666666666*uz*Den) - m8)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
m9 = m9 + rlx_setA*((Den*(2*ux*ux-uy*uy-uz*uz)/porosity) - m9);
m10 = m10 + rlx_setA*(-0.5*Den*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10);
m11 = m11 + rlx_setA*((Den*(uy*uy-uz*uz)/porosity) - m11);
m12 = m12 + rlx_setA*(-0.5*(Den*(uy*uy-uz*uz)/porosity)- m12);
m13 = m13 + rlx_setA*((Den*ux*uy/porosity) - m13);
m14 = m14 + rlx_setA*((Den*uy*uz/porosity) - m14);
m15 = m15 + rlx_setA*((Den*ux*uz/porosity) - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*Den-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10);
dist[1*Np+n] = fq;
// q=2
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10);
dist[2*Np+n] = fq;
// q = 3
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
dist[3*Np+n] = fq;
// q = 4
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
dist[4*Np+n] = fq;
// q = 5
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
dist[5*Np+n] = fq;
// q = 6
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
dist[6*Np+n] = fq;
// q = 7
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17);
dist[7*Np+n] = fq;
// q = 8
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m17-m16);
dist[8*Np+n] = fq;
// q = 9
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17);
dist[9*Np+n] = fq;
// q = 10
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17);
dist[10*Np+n] = fq;
// q = 11
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m18-m16);
dist[11*Np+n] = fq;
// q = 12
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18);
dist[12*Np+n] = fq;
// q = 13
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12-0.25*m15-0.125*(m16+m18);
dist[13*Np+n] = fq;
// q= 14
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12-0.25*m15+0.125*(m16+m18);
dist[14*Np+n] = fq;
// q = 15
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18);
dist[15*Np+n] = fq;
// q = 16
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17);
dist[16*Np+n] = fq;
// q = 17
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18);
dist[17*Np+n] = fq;
// q = 18
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18);
dist[18*Np+n] = fq;
//........................................................................
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAodd_Greyscale_IMRT(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity,double Den, double *Pressure){
int n, nread;
double vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;//defined for this incompressible model
// conserved momemnts
double jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double fq;
//double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
double rlx_setA = rlx;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
//........................................................................
// READ THE DISTRIBUTIONS
// (read from opposite array due to previous swap operation)
//........................................................................
// q=0
fq = dist[n];
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
nread = neighborList[n]; // neighbor 2 ( > 10Np => odd part of dist)
fq = dist[nread]; // reading the f1 data into register fq
pressure = fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// q=2
nread = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
fq = dist[nread]; // reading the f2 data into register fq
pressure += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
nread = neighborList[n+2*Np]; // neighbor 4
fq = dist[nread];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
nread = neighborList[n+3*Np]; // neighbor 3
fq = dist[nread];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
nread = neighborList[n+4*Np];
fq = dist[nread];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
nread = neighborList[n+5*Np];
fq = dist[nread];
pressure += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
nread = neighborList[n+6*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
nread = neighborList[n+7*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
nread = neighborList[n+8*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
nread = neighborList[n+9*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
nread = neighborList[n+10*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
nread = neighborList[n+11*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
nread = neighborList[n+12*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
nread = neighborList[n+13*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
nread = neighborList[n+14*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
nread = neighborList[n+15*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
nread = neighborList[n+16*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
nread = neighborList[n+17*Np];
fq = dist[nread];
pressure += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//---------------------------------------------------------------------//
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
vx = jx/Den+0.5*porosity*Gx;
vy = jy/Den+0.5*porosity*Gy;
vz = jz/Den+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = Den*(-porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx);
Fy = Den*(-porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy);
Fz = Den*(-porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz);
if (porosity==1.0){
Fx=Den*Gx;
Fy=Den*Gy;
Fz=Den*Gz;
}
//Calculate pressure for Incompressible-MRT model
pressure=0.5/porosity*(pressure-0.5*Den*u_mag*u_mag/porosity);
// //..............carry out relaxation process...............................................
// m1 = m1 + rlx_setA*((-30*Den+19*Den*(ux*ux+uy*uy+uz*uz)/porosity + 57*pressure*porosity) - m1)
// + (1-0.5*rlx_setA)*38*(Fx*ux+Fy*uy+Fz*uz)/porosity;
// m2 = m2 + rlx_setA*((12*Den - 5.5*Den*(ux*ux+uy*uy+uz*uz)/porosity-27*pressure*porosity) - m2)
// + (1-0.5*rlx_setA)*11*(-Fx*ux-Fy*uy-Fz*uz)/porosity;
// jx = jx + Fx;
// m4 = m4 + rlx_setB*((-0.6666666666666666*ux*Den) - m4)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
// jy = jy + Fy;
// m6 = m6 + rlx_setB*((-0.6666666666666666*uy*Den) - m6)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
// jz = jz + Fz;
// m8 = m8 + rlx_setB*((-0.6666666666666666*uz*Den) - m8)
// + (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
// m9 = m9 + rlx_setA*((Den*(2*ux*ux-uy*uy-uz*uz)/porosity) - m9)
// + (1-0.5*rlx_setA)*(4*Fx*ux-2*Fy*uy-2*Fz*uz)/porosity;
// m10 = m10 + rlx_setA*(-0.5*Den*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10)
// + (1-0.5*rlx_setA)*(-2*Fx*ux+Fy*uy+Fz*uz)/porosity;
// m11 = m11 + rlx_setA*((Den*(uy*uy-uz*uz)/porosity) - m11)
// + (1-0.5*rlx_setA)*(2*Fy*uy-2*Fz*uz)/porosity;
// m12 = m12 + rlx_setA*(-0.5*(Den*(uy*uy-uz*uz)/porosity)- m12)
// + (1-0.5*rlx_setA)*(-Fy*uy+Fz*uz)/porosity;
// m13 = m13 + rlx_setA*((Den*ux*uy/porosity) - m13)
// + (1-0.5*rlx_setA)*(Fy*ux+Fx*uy)/porosity;
// m14 = m14 + rlx_setA*((Den*uy*uz/porosity) - m14)
// + (1-0.5*rlx_setA)*(Fz*uy+Fy*uz)/porosity;
// m15 = m15 + rlx_setA*((Den*ux*uz/porosity) - m15)
// + (1-0.5*rlx_setA)*(Fz*ux+Fx*uz)/porosity;
// m16 = m16 + rlx_setB*( - m16);
// m17 = m17 + rlx_setB*( - m17);
// m18 = m18 + rlx_setB*( - m18);
// //.......................................................................................................
//-------------------- IMRT collison where body force has NO higher-order terms -------------//
//..............carry out relaxation process...............................................
m1 = m1 + rlx_setA*((-30*Den+19*Den*(ux*ux+uy*uy+uz*uz)/porosity + 57*pressure*porosity) - m1);
m2 = m2 + rlx_setA*((12*Den - 5.5*Den*(ux*ux+uy*uy+uz*uz)/porosity-27*pressure*porosity) - m2);
jx = jx + Fx;
m4 = m4 + rlx_setB*((-0.6666666666666666*ux*Den) - m4)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
jy = jy + Fy;
m6 = m6 + rlx_setB*((-0.6666666666666666*uy*Den) - m6)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
jz = jz + Fz;
m8 = m8 + rlx_setB*((-0.6666666666666666*uz*Den) - m8)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
m9 = m9 + rlx_setA*((Den*(2*ux*ux-uy*uy-uz*uz)/porosity) - m9);
m10 = m10 + rlx_setA*(-0.5*Den*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10);
m11 = m11 + rlx_setA*((Den*(uy*uy-uz*uz)/porosity) - m11);
m12 = m12 + rlx_setA*(-0.5*(Den*(uy*uy-uz*uz)/porosity)- m12);
m13 = m13 + rlx_setA*((Den*ux*uy/porosity) - m13);
m14 = m14 + rlx_setA*((Den*uy*uz/porosity) - m14);
m15 = m15 + rlx_setA*((Den*ux*uz/porosity) - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*Den-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10);
nread = neighborList[n+Np];
dist[nread] = fq;
// q=2
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10);
nread = neighborList[n];
dist[nread] = fq;
// q = 3
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
nread = neighborList[n+3*Np];
dist[nread] = fq;
// q = 4
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
nread = neighborList[n+2*Np];
dist[nread] = fq;
// q = 5
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
nread = neighborList[n+5*Np];
dist[nread] = fq;
// q = 6
fq = mrt_V1*Den-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
nread = neighborList[n+4*Np];
dist[nread] = fq;
// q = 7
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17);
nread = neighborList[n+7*Np];
dist[nread] = fq;
// q = 8
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m17-m16);
nread = neighborList[n+6*Np];
dist[nread] = fq;
// q = 9
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17);
nread = neighborList[n+9*Np];
dist[nread] = fq;
// q = 10
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17);
nread = neighborList[n+8*Np];
dist[nread] = fq;
// q = 11
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m18-m16);
nread = neighborList[n+11*Np];
dist[nread] = fq;
// q = 12
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18);
nread = neighborList[n+10*Np];
dist[nread]= fq;
// q = 13
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12-0.25*m15-0.125*(m16+m18);
nread = neighborList[n+13*Np];
dist[nread] = fq;
// q= 14
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12-0.25*m15+0.125*(m16+m18);
nread = neighborList[n+12*Np];
dist[nread] = fq;
// q = 15
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18);
nread = neighborList[n+15*Np];
dist[nread] = fq;
// q = 16
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17);
nread = neighborList[n+14*Np];
dist[nread] = fq;
// q = 17
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18);
nread = neighborList[n+17*Np];
dist[nread] = fq;
// q = 18
fq = mrt_V1*Den+mrt_V9*m1+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18);
nread = neighborList[n+16*Np];
dist[nread] = fq;
//........................................................................
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAodd_Greyscale_MRT(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity,double rho0, double *Pressure){
int n, nread;
int nr1,nr2,nr3,nr4,nr5,nr6;
int nr7,nr8,nr9,nr10;
int nr11,nr12,nr13,nr14;
double vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;//defined for this incompressible model
// conserved momemnts
double rho,jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double fq;
//double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
double rlx_setA = rlx;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
//........................................................................
// READ THE DISTRIBUTIONS
// (read from opposite array due to previous swap operation)
//........................................................................
// q=0
fq = dist[n];
rho = fq;
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
//nread = neighborList[n]; // neighbor 2
//fq = dist[nread]; // reading the f1 data into register fq
nr1 = neighborList[n];
fq = dist[nr1]; // reading the f1 data into register fq
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// f2 = dist[10*Np+n];
//nread = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
//fq = dist[nread]; // reading the f2 data into register fq
nr2 = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist)
fq = dist[nr2]; // reading the f2 data into register fq
rho += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
//nread = neighborList[n+2*Np]; // neighbor 4
//fq = dist[nread];
nr3 = neighborList[n+2*Np]; // neighbor 4
fq = dist[nr3];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
//nread = neighborList[n+3*Np]; // neighbor 3
//fq = dist[nread];
nr4 = neighborList[n+3*Np]; // neighbor 3
fq = dist[nr4];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
//nread = neighborList[n+4*Np];
//fq = dist[nread];
nr5 = neighborList[n+4*Np];
fq = dist[nr5];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
//nread = neighborList[n+5*Np];
//fq = dist[nread];
nr6 = neighborList[n+5*Np];
fq = dist[nr6];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
//nread = neighborList[n+6*Np];
//fq = dist[nread];
nr7 = neighborList[n+6*Np];
fq = dist[nr7];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
//nread = neighborList[n+7*Np];
//fq = dist[nread];
nr8 = neighborList[n+7*Np];
fq = dist[nr8];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
//nread = neighborList[n+8*Np];
//fq = dist[nread];
nr9 = neighborList[n+8*Np];
fq = dist[nr9];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
//nread = neighborList[n+9*Np];
//fq = dist[nread];
nr10 = neighborList[n+9*Np];
fq = dist[nr10];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
//nread = neighborList[n+10*Np];
//fq = dist[nread];
nr11 = neighborList[n+10*Np];
fq = dist[nr11];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
//nread = neighborList[n+11*Np];
//fq = dist[nread];
nr12 = neighborList[n+11*Np];
fq = dist[nr12];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
//nread = neighborList[n+12*Np];
//fq = dist[nread];
nr13 = neighborList[n+12*Np];
fq = dist[nr13];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
//nread = neighborList[n+13*Np];
//fq = dist[nread];
nr14 = neighborList[n+13*Np];
fq = dist[nr14];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
nread = neighborList[n+14*Np];
fq = dist[nread];
//fq = dist[17*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
nread = neighborList[n+15*Np];
fq = dist[nread];
//fq = dist[8*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
//fq = dist[18*Np+n];
nread = neighborList[n+16*Np];
fq = dist[nread];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
nread = neighborList[n+17*Np];
fq = dist[nread];
//fq = dist[9*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//---------------------------------------------------------------------//
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
vx = jx/rho0+0.5*porosity*Gx;
vy = jy/rho0+0.5*porosity*Gy;
vz = jz/rho0+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = rho0*(-porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx);
Fy = rho0*(-porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy);
Fz = rho0*(-porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz);
if (porosity==1.0){
Fx=rho0*Gx;
Fy=rho0*Gy;
Fz=rho0*Gz;
}
//Calculate pressure for MRT model
pressure=rho/3.f;
//-------------------- MRT collison where body force has NO higher-order terms -------------//
m1 = m1 + rlx_setA*((19*(ux*ux+uy*uy+uz*uz)*rho0/porosity - 11*rho) - m1);
m2 = m2 + rlx_setA*((3*rho - 5.5*(ux*ux+uy*uy+uz*uz)*rho0/porosity) - m2);
jx = jx + Fx;
m4 = m4 + rlx_setB*((-0.6666666666666666*ux*rho0)- m4)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
jy = jy + Fy;
m6 = m6 + rlx_setB*((-0.6666666666666666*uy*rho0)- m6)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
jz = jz + Fz;
m8 = m8 + rlx_setB*((-0.6666666666666666*uz*rho0)- m8)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
m9 = m9 + rlx_setA*(((2*ux*ux-uy*uy-uz*uz)*rho0/porosity) - m9);
m10 = m10 + rlx_setA*( - m10);
//m10 = m10 + rlx_setA*(-0.5*rho0*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10);
m11 = m11 + rlx_setA*(((uy*uy-uz*uz)*rho0/porosity) - m11);
m12 = m12 + rlx_setA*( - m12);
//m12 = m12 + rlx_setA*(-0.5*(rho0*(uy*uy-uz*uz)/porosity)- m12);
m13 = m13 + rlx_setA*( (ux*uy*rho0/porosity) - m13);
m14 = m14 + rlx_setA*( (uy*uz*rho0/porosity) - m14);
m15 = m15 + rlx_setA*( (ux*uz*rho0/porosity) - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*rho-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10);
//nread = neighborList[n+Np];
dist[nr2] = fq;
// q=2
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10);
//nread = neighborList[n];
dist[nr1] = fq;
// q = 3
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
//nread = neighborList[n+3*Np];
dist[nr4] = fq;
// q = 4
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
//nread = neighborList[n+2*Np];
dist[nr3] = fq;
// q = 5
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
//nread = neighborList[n+5*Np];
dist[nr6] = fq;
// q = 6
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
//nread = neighborList[n+4*Np];
dist[nr5] = fq;
// q = 7
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17);
//nread = neighborList[n+7*Np];
dist[nr8] = fq;
// q = 8
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11
+mrt_V12*m12+0.25*m13+0.125*(m17-m16);
//nread = neighborList[n+6*Np];
dist[nr7] = fq;
// q = 9
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17);
//nread = neighborList[n+9*Np];
dist[nr10] = fq;
// q = 10
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17);
//nread = neighborList[n+8*Np];
dist[nr9] = fq;
// q = 11
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12+0.25*m15+0.125*(m18-m16);
//nread = neighborList[n+11*Np];
dist[nr12] = fq;
// q = 12
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+
mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18);
//nread = neighborList[n+10*Np];
dist[nr11]= fq;
// q = 13
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15-0.125*(m16+m18);
//nread = neighborList[n+13*Np];
dist[nr14] = fq;
// q= 14
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15+0.125*(m16+m18);
//nread = neighborList[n+12*Np];
dist[nr13] = fq;
// q = 15
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18);
nread = neighborList[n+15*Np];
dist[nread] = fq;
// q = 16
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17);
nread = neighborList[n+14*Np];
dist[nread] = fq;
// q = 17
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)
-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18);
nread = neighborList[n+17*Np];
dist[nread] = fq;
// q = 18
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)
-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18);
nread = neighborList[n+16*Np];
dist[nread] = fq;
//........................................................................
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_AAeven_Greyscale_MRT(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Gx, double Gy, double Gz,
double *Poros,double *Perm, double *Velocity,double rho0, double *Pressure){
int n;
double vx,vy,vz,v_mag;
double ux,uy,uz,u_mag;
double pressure;//defined for this incompressible model
// conserved momemnts
double rho,jx,jy,jz;
// non-conserved moments
double m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
double fq;
//double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double GeoFun;//geometric function from Guo's PRE 66, 036304 (2002)
double porosity;
double perm;//voxel permeability
double c0, c1; //Guo's model parameters
double mu_eff = (1.0/rlx_eff-0.5)/3.0;//kinematic viscosity
double Fx, Fy, Fz;//The total body force including Brinkman force and user-specified (Gx,Gy,Gz)
double rlx_setA = rlx;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
const double mrt_V1=0.05263157894736842;
const double mrt_V2=0.012531328320802;
const double mrt_V3=0.04761904761904762;
const double mrt_V4=0.004594820384294068;
const double mrt_V5=0.01587301587301587;
const double mrt_V6=0.0555555555555555555555555;
const double mrt_V7=0.02777777777777778;
const double mrt_V8=0.08333333333333333;
const double mrt_V9=0.003341687552213868;
const double mrt_V10=0.003968253968253968;
const double mrt_V11=0.01388888888888889;
const double mrt_V12=0.04166666666666666;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start;
if ( n<finish ){
//........................................................................
// READ THE DISTRIBUTIONS
// (read from opposite array due to previous swap operation)
//........................................................................
// q=0
fq = dist[n];
rho = fq;
m1 = -30.0*fq;
m2 = 12.0*fq;
// q=1
fq = dist[2*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jx = fq;
m4 = -4.0*fq;
m9 = 2.0*fq;
m10 = -4.0*fq;
// f2 = dist[10*Np+n];
fq = dist[1*Np+n];
rho += fq;
m1 -= 11.0*(fq);
m2 -= 4.0*(fq);
jx -= fq;
m4 += 4.0*(fq);
m9 += 2.0*(fq);
m10 -= 4.0*(fq);
// q=3
fq = dist[4*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy = fq;
m6 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 = fq;
m12 = -2.0*fq;
// q = 4
fq = dist[3*Np+n];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jy -= fq;
m6 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 += fq;
m12 -= 2.0*fq;
// q=5
fq = dist[6*Np+n];
rho += fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz = fq;
m8 = -4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q = 6
fq = dist[5*Np+n];
rho+= fq;
m1 -= 11.0*fq;
m2 -= 4.0*fq;
jz -= fq;
m8 += 4.0*fq;
m9 -= fq;
m10 += 2.0*fq;
m11 -= fq;
m12 += 2.0*fq;
// q=7
fq = dist[8*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 = fq;
m16 = fq;
m17 = -fq;
// q = 8
fq = dist[7*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 += fq;
m16 -= fq;
m17 += fq;
// q=9
fq = dist[10*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jy -= fq;
m6 -= fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 += fq;
m17 += fq;
// q = 10
fq = dist[9*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jy += fq;
m6 += fq;
m9 += fq;
m10 += fq;
m11 += fq;
m12 += fq;
m13 -= fq;
m16 -= fq;
m17 -= fq;
// q=11
fq = dist[12*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 = fq;
m16 -= fq;
m18 = fq;
// q=12
fq = dist[11*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 += fq;
m16 += fq;
m18 -= fq;
// q=13
fq = dist[14*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx += fq;
m4 += fq;
jz -= fq;
m8 -= fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 -= fq;
m18 -= fq;
// q=14
fq = dist[13*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jx -= fq;
m4 -= fq;
jz += fq;
m8 += fq;
m9 += fq;
m10 += fq;
m11 -= fq;
m12 -= fq;
m15 -= fq;
m16 += fq;
m18 += fq;
// q=15
fq = dist[16*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 = fq;
m17 += fq;
m18 -= fq;
// q=16
fq = dist[15*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 += fq;
m17 -= fq;
m18 += fq;
// q=17
fq = dist[18*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy += fq;
m6 += fq;
jz -= fq;
m8 -= fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 += fq;
m18 += fq;
// q=18
fq = dist[17*Np+n];
rho += fq;
m1 += 8.0*fq;
m2 += fq;
jy -= fq;
m6 -= fq;
jz += fq;
m8 += fq;
m9 -= 2.0*fq;
m10 -= 2.0*fq;
m14 -= fq;
m17 -= fq;
m18 -= fq;
//---------------------------------------------------------------------//
porosity = Poros[n];
perm = Perm[n];
c0 = 0.5*(1.0+porosity*0.5*mu_eff/perm);
if (porosity==1.0) c0 = 0.5;//i.e. apparent pore nodes
GeoFun = 1.75/sqrt(150.0*porosity*porosity*porosity);
c1 = porosity*0.5*GeoFun/sqrt(perm);
if (porosity==1.0) c1 = 0.0;//i.e. apparent pore nodes
vx = jx/rho0+0.5*porosity*Gx;
vy = jy/rho0+0.5*porosity*Gy;
vz = jz/rho0+0.5*porosity*Gz;
v_mag=sqrt(vx*vx+vy*vy+vz*vz);
ux = vx/(c0+sqrt(c0*c0+c1*v_mag));
uy = vy/(c0+sqrt(c0*c0+c1*v_mag));
uz = vz/(c0+sqrt(c0*c0+c1*v_mag));
u_mag=sqrt(ux*ux+uy*uy+uz*uz);
//Update the total force to include linear (Darcy) and nonlinear (Forchheimer) drags due to the porous medium
Fx = rho0*(-porosity*mu_eff/perm*ux - porosity*GeoFun/sqrt(perm)*u_mag*ux + porosity*Gx);
Fy = rho0*(-porosity*mu_eff/perm*uy - porosity*GeoFun/sqrt(perm)*u_mag*uy + porosity*Gy);
Fz = rho0*(-porosity*mu_eff/perm*uz - porosity*GeoFun/sqrt(perm)*u_mag*uz + porosity*Gz);
if (porosity==1.0){
Fx=rho0*Gx;
Fy=rho0*Gy;
Fz=rho0*Gz;
}
//Calculate pressure for Incompressible-MRT model
pressure=rho/3.f;
//-------------------- IMRT collison where body force has NO higher-order terms -------------//
m1 = m1 + rlx_setA*((19*(ux*ux+uy*uy+uz*uz)*rho0/porosity - 11*rho) - m1);
m2 = m2 + rlx_setA*((3*rho - 5.5*(ux*ux+uy*uy+uz*uz)*rho0/porosity) - m2);
jx = jx + Fx;
m4 = m4 + rlx_setB*((-0.6666666666666666*ux*rho0)- m4)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fx);
jy = jy + Fy;
m6 = m6 + rlx_setB*((-0.6666666666666666*uy*rho0)- m6)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fy);
jz = jz + Fz;
m8 = m8 + rlx_setB*((-0.6666666666666666*uz*rho0)- m8)
+ (1-0.5*rlx_setB)*(-0.6666666666666666*Fz);
m9 = m9 + rlx_setA*(((2*ux*ux-uy*uy-uz*uz)*rho0/porosity) - m9);
m10 = m10 + rlx_setA*( - m10);
//m10 = m10 + rlx_setA*(-0.5*rho0*((2*ux*ux-uy*uy-uz*uz)/porosity)- m10);
m11 = m11 + rlx_setA*(((uy*uy-uz*uz)*rho0/porosity) - m11);
m12 = m12 + rlx_setA*( - m12);
//m12 = m12 + rlx_setA*(-0.5*(rho0*(uy*uy-uz*uz)/porosity)- m12);
m13 = m13 + rlx_setA*( (ux*uy*rho0/porosity) - m13);
m14 = m14 + rlx_setA*( (uy*uz*rho0/porosity) - m14);
m15 = m15 + rlx_setA*( (ux*uz*rho0/porosity) - m15);
m16 = m16 + rlx_setB*( - m16);
m17 = m17 + rlx_setB*( - m17);
m18 = m18 + rlx_setB*( - m18);
//.......................................................................................................
//.................inverse transformation......................................................
// q=0
fq = mrt_V1*rho-mrt_V2*m1+mrt_V3*m2;
dist[n] = fq;
// q = 1
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jx-m4)+mrt_V6*(m9-m10);
dist[1*Np+n] = fq;
// q=2
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m4-jx)+mrt_V6*(m9-m10);
dist[2*Np+n] = fq;
// q = 3
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jy-m6)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
dist[3*Np+n] = fq;
// q = 4
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m6-jy)+mrt_V7*(m10-m9)+mrt_V8*(m11-m12);
dist[4*Np+n] = fq;
// q = 5
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(jz-m8)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
dist[5*Np+n] = fq;
// q = 6
fq = mrt_V1*rho-mrt_V4*m1-mrt_V5*m2+0.1*(m8-jz)+mrt_V7*(m10-m9)+mrt_V8*(m12-m11);
dist[6*Np+n] = fq;
// q = 7
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx+jy)+0.025*(m4+m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12+0.25*m13+0.125*(m16-m17);
dist[7*Np+n] = fq;
// q = 8
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jy)-0.025*(m4+m6) +mrt_V7*m9+mrt_V11*m10+mrt_V8*m11
+mrt_V12*m12+0.25*m13+0.125*(m17-m16);
dist[8*Np+n] = fq;
// q = 9
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jx-jy)+0.025*(m4-m6)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13+0.125*(m16+m17);
dist[9*Np+n] = fq;
// q = 10
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2+0.1*(jy-jx)+0.025*(m6-m4)+
mrt_V7*m9+mrt_V11*m10+mrt_V8*m11+mrt_V12*m12-0.25*m13-0.125*(m16+m17);
dist[10*Np+n] = fq;
// q = 11
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx+jz)+0.025*(m4+m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12+0.25*m15+0.125*(m18-m16);
dist[11*Np+n] = fq;
// q = 12
fq = mrt_V1*rho+mrt_V9*m1+mrt_V10*m2-0.1*(jx+jz)-0.025*(m4+m8)+
mrt_V7*m9+mrt_V11*m10-mrt_V8*m11-mrt_V12*m12+0.25*m15+0.125*(m16-m18);
dist[12*Np+n] = fq;
// q = 13
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jx-jz)+0.025*(m4-m8)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15-0.125*(m16+m18);
dist[13*Np+n] = fq;
// q= 14
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jx)+0.025*(m8-m4)
+mrt_V7*m9+mrt_V11*m10-mrt_V8*m11
-mrt_V12*m12-0.25*m15+0.125*(m16+m18);
dist[14*Np+n] = fq;
// q = 15
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy+jz)+0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m17-m18);
dist[15*Np+n] = fq;
// q = 16
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2-0.1*(jy+jz)-0.025*(m6+m8)
-mrt_V6*m9-mrt_V7*m10+0.25*m14+0.125*(m18-m17);
dist[16*Np+n] = fq;
// q = 17
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jy-jz)+0.025*(m6-m8)
-mrt_V6*m9-mrt_V7*m10-0.25*m14+0.125*(m17+m18);
dist[17*Np+n] = fq;
// q = 18
fq = mrt_V1*rho+mrt_V9*m1
+mrt_V10*m2+0.1*(jz-jy)+0.025*(m8-m6)
-mrt_V6*m9-mrt_V7*m10-0.25*m14-0.125*(m17+m18);
dist[18*Np+n] = fq;
//........................................................................
//Update velocity on device
Velocity[0*Np+n] = ux;
Velocity[1*Np+n] = uy;
Velocity[2*Np+n] = uz;
//Update pressure on device
Pressure[n] = pressure;
}
}
}
__global__ void dvc_ScaLBL_D3Q19_GreyIMRT_Init(double *dist, int Np, double Den)
{
int n;
int S = Np/NBLOCKS/NTHREADS + 1;
for (int s=0; s<S; s++){
//........Get 1-D index for this thread....................
n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x;
if (n<Np ){
dist[n] = Den - 0.6666666666666667;
dist[Np+n] = 0.055555555555555555; //double(100*n)+1.f;
dist[2*Np+n] = 0.055555555555555555; //double(100*n)+2.f;
dist[3*Np+n] = 0.055555555555555555; //double(100*n)+3.f;
dist[4*Np+n] = 0.055555555555555555; //double(100*n)+4.f;
dist[5*Np+n] = 0.055555555555555555; //double(100*n)+5.f;
dist[6*Np+n] = 0.055555555555555555; //double(100*n)+6.f;
dist[7*Np+n] = 0.0277777777777778; //double(100*n)+7.f;
dist[8*Np+n] = 0.0277777777777778; //double(100*n)+8.f;
dist[9*Np+n] = 0.0277777777777778; //double(100*n)+9.f;
dist[10*Np+n] = 0.0277777777777778; //double(100*n)+10.f;
dist[11*Np+n] = 0.0277777777777778; //double(100*n)+11.f;
dist[12*Np+n] = 0.0277777777777778; //double(100*n)+12.f;
dist[13*Np+n] = 0.0277777777777778; //double(100*n)+13.f;
dist[14*Np+n] = 0.0277777777777778; //double(100*n)+14.f;
dist[15*Np+n] = 0.0277777777777778; //double(100*n)+15.f;
dist[16*Np+n] = 0.0277777777777778; //double(100*n)+16.f;
dist[17*Np+n] = 0.0277777777777778; //double(100*n)+17.f;
dist[18*Np+n] = 0.0277777777777778; //double(100*n)+18.f;
}
}
}
extern "C" void ScaLBL_D3Q19_AAeven_Greyscale(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double *Pressure){
dvc_ScaLBL_D3Q19_AAeven_Greyscale<<<NBLOCKS,NTHREADS >>>(dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,Pressure);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAeven_Greyscale: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAodd_Greyscale(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double *Pressure){
dvc_ScaLBL_D3Q19_AAodd_Greyscale<<<NBLOCKS,NTHREADS >>>(neighborList,dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,Pressure);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAodd_Greyscale: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAeven_Greyscale_IMRT(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double Den,double *Pressure){
dvc_ScaLBL_D3Q19_AAeven_Greyscale_IMRT<<<NBLOCKS,NTHREADS >>>(dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,Den,Pressure);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAeven_Greyscale_IMRT: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAodd_Greyscale_IMRT(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double Den,double *Pressure){
dvc_ScaLBL_D3Q19_AAodd_Greyscale_IMRT<<<NBLOCKS,NTHREADS >>>(neighborList,dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,Den,Pressure);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAodd_Greyscale_IMRT: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAodd_Greyscale_MRT(int *neighborList, double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double rho0,double *Pressure){
dvc_ScaLBL_D3Q19_AAodd_Greyscale_MRT<<<NBLOCKS,NTHREADS >>>(neighborList,dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,rho0,Pressure);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAodd_Greyscale_MRT: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_AAeven_Greyscale_MRT(double *dist, int start, int finish, int Np, double rlx, double rlx_eff, double Fx, double Fy, double Fz,double *Poros,double *Perm, double *Velocity,double rho0,double *Pressure){
dvc_ScaLBL_D3Q19_AAeven_Greyscale_MRT<<<NBLOCKS,NTHREADS >>>(dist,start,finish,Np,rlx,rlx_eff,Fx,Fy,Fz,Poros,Perm,Velocity,rho0,Pressure);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_AAeven_Greyscale_MRT: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_D3Q19_GreyIMRT_Init(double *dist, int Np, double Den){
dvc_ScaLBL_D3Q19_GreyIMRT_Init<<<NBLOCKS,NTHREADS >>>(dist, Np, Den);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("CUDA error in ScaLBL_D3Q19_GreyIMRT_Init: %s \n",cudaGetErrorString(err));
}
}
|
7,956 | #include<stdio.h>
#define BUF_SIZE 20 * 1048576
__global__ void get_nl_count(char *buf, int n, int *out) {
int tid = threadIdx.x;
__shared__ volatile int cnt[256];
for (int i = tid + blockDim.x * blockIdx.x, j = blockIdx.x; j < n; i += blockDim.x * gridDim.x, j += gridDim.x) {
cnt[tid] = buf[i] == '\n';
//__syncthreads();
//if (tid < 512) cnt[tid] += cnt[tid+512];
//__syncthreads();
//if (tid < 256) cnt[tid] += cnt[tid+256];
__syncthreads();
if (tid < 128) cnt[tid] += cnt[tid+128];
__syncthreads();
if (tid < 64) cnt[tid] += cnt[tid+64];
__syncthreads();
if (tid < 32) {
cnt[tid] += cnt[tid+32];
cnt[tid] += cnt[tid+16];
cnt[tid] += cnt[tid+8];
cnt[tid] += cnt[tid+4];
cnt[tid] += cnt[tid+2];
cnt[tid] += cnt[tid+1];
if (tid == 0) out[j] = cnt[0];
}
__syncthreads();
}
}
__global__ void get_line_pos(char *buf, int n, int *pos, int *out) {
int tid = threadIdx.x;
__shared__ int cnt[256], cnt2[256];
for (int i = tid + blockDim.x * blockIdx.x, j = blockIdx.x; j < n; i += blockDim.x * gridDim.x, j += gridDim.x) {
int b = buf[i] == '\n';
cnt[tid] = b;
if (tid == 0) cnt[0] += pos[j];
// reduce sum
__syncthreads();
#pragma unroll
for (int dd = 0; dd < 8; dd += 2) {
cnt2[tid] = tid >= 1<<dd ? cnt[tid] + cnt[tid-(1<<dd)] : cnt[tid];
__syncthreads();
cnt[tid] = tid >= 2<<dd ? cnt2[tid] + cnt2[tid-(2<<dd)] : cnt2[tid];
__syncthreads();
}
if (b) out[cnt[tid]-1] = i;
}
}
__global__ void parse(char *buf, int *bmp, int n) {
int gid = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = gid; i < n - 1; i += blockDim.x * gridDim.x) {
int pos = bmp[i] + 1;
int red = 0, green = 0, blue = 0;
while (buf[pos] <= ' ' && buf[pos] != '\n') { pos += 1; }
while (buf[pos] >= '0' && buf[pos] <= '9') {
red = red * 10 + (buf[pos] - '0');
pos += 1;
}
while (buf[pos] <= ' ' && buf[pos] != '\n') { pos += 1; }
while (buf[pos] >= '0' && buf[pos] <= '9') {
green = green * 10 + (buf[pos] - '0');
pos += 1;
}
while (buf[pos] <= ' ' && buf[pos] != '\n') { pos += 1; }
while (buf[pos] >= '0' && buf[pos] <= '9') {
blue = blue * 10 + (buf[pos] - '0');
pos += 1;
}
bmp[i] = red | green<<8 | blue<<16;
}
}
__global__ void hist_kern(int *bmp, int n, int *out) {
__shared__ int sr[256], sg[256], sb[256];
int gid = threadIdx.x + blockDim.x * blockIdx.x;
sr[threadIdx.x] = 0;
sg[threadIdx.x] = 0;
sb[threadIdx.x] = 0;
__syncthreads();
for (int i = gid; i < n; i += blockDim.x * gridDim.x) {
int rgb = bmp[i];
atomicAdd(&sr[rgb & 255], 1);
atomicAdd(&sg[rgb >> 8 & 255], 1);
atomicAdd(&sb[rgb >> 16 & 255], 1);
}
__syncthreads();
out[gid] += sr[threadIdx.x];
out[gid + 256 * gridDim.x] += sg[threadIdx.x];
out[gid + 256 * gridDim.x * 2] += sb[threadIdx.x];
}
int main() {
if (cudaSetDevice(0) != cudaSuccess) return 2;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("using GPU %s\n", prop.name);
int smcount = prop.multiProcessorCount;
cudaEvent_t e, e2;
cudaEventCreate(&e);
cudaEventCreate(&e2);
char *gpu_buf, *buf;
cudaHostAlloc(&buf, BUF_SIZE + 100, cudaHostAllocDefault);
cudaMalloc(&gpu_buf, BUF_SIZE + 100);
int *gpu_what, *what;
cudaMalloc(&gpu_what, sizeof(int) * 1048576);
int *hist = new int[256*3*4*smcount], *gpu_hist;
cudaMalloc(&gpu_hist, sizeof(int) * (256*3*4*smcount));
cudaMemset(gpu_hist, 0, sizeof(int) * (256*3*4*smcount));
int histogram_results[768] = {0};
what = new int[1048576];
FILE *inFile = fopen("input", "rb");
FILE *outFile = fopen("yyyyyy.out", "wb");
if (!inFile || !outFile) return 1;
printf("open file\n");
unsigned n = 0;
if (fscanf(inFile, "%u", &n) != 1 || n%3 != 0) return 1;
int *bmp = new int[n/3], *gpu_bmp;
cudaMalloc(&gpu_bmp, sizeof(int) * BUF_SIZE);
fgets(buf, BUF_SIZE-1, inFile);
int bytes_read, tail = 1;
unsigned sum = 0;
buf[0] = '\n';
cudaEventRecord(e, 0);
while ((bytes_read = fread(buf + tail, 1, BUF_SIZE - tail, inFile)) > 0) {
printf("read %d bytes\n", bytes_read);
int blocks = (tail + bytes_read-1) / 256 + 1;
for (int i = bytes_read + tail; i < blocks*256; i++) buf[i] = 0;
cudaMemcpy(gpu_buf, buf, BUF_SIZE, cudaMemcpyHostToDevice);
get_nl_count<<<8*smcount, 256>>>(gpu_buf, blocks, gpu_what);
cudaMemcpy(what, gpu_what, sizeof(int) * blocks, cudaMemcpyDeviceToHost);
unsigned sum2 = sum;
for (int i = 0; i < blocks; i++) sum2 += what[i];
for (int i = 1; i < blocks; i++) what[i] += what[i-1];
for (int i = blocks-1; i > 0; i--) what[i] = what[i-1];
what[0] = 0;
cudaMemcpy(gpu_what, what, sizeof(int) * blocks, cudaMemcpyHostToDevice);
get_line_pos<<<8*smcount, 256>>>(gpu_buf, blocks, gpu_what, gpu_bmp);
//cudaDeviceSynchronize();
//cudaMemcpy(bmp + sum, gpu_bmp, sizeof(int) * (sum2 - sum), cudaMemcpyDeviceToHost);
parse<<<8*smcount, 256>>>(gpu_buf, gpu_bmp, sum2 - sum);
//cudaDeviceSynchronize();
hist_kern<<<4*smcount, 256>>>(gpu_bmp, sum2-1 - sum, gpu_hist);
printf("sum = %u sum2 = %u\n", sum, sum2);
sum = sum2-1;
if (bytes_read + tail < BUF_SIZE) break;
for (tail = 1; tail <= BUF_SIZE; tail++) {
if (buf[BUF_SIZE - tail] == '\n') break;
}
if (tail > BUF_SIZE) tail = 0;
for (int i = 0; i < tail; i++) buf[i] = buf[i + BUF_SIZE-tail];
}
cudaMemcpy(hist, gpu_hist, sizeof(int) * (256*3*4*smcount), cudaMemcpyDeviceToHost);
for (int c = 0; c < 3; c++) {
for (int i = 0; i < 4*smcount; i++) {
for (int j = 0; j < 256; j++) histogram_results[j+c*256] += hist[j+i*256+c*256*20];
}
}
int sum2 = 0;
for (int i = 0; i < 768; i++) {
if (i%256 == 0 && i != 0) fputc('\n', outFile);
fprintf(outFile, "%d ", histogram_results[i]);
sum2 += histogram_results[i];
}
cudaEventRecord(e2, 0);
float t;
cudaEventSynchronize(e);
cudaEventSynchronize(e2);
cudaEventElapsedTime(&t, e, e2);
printf("%d %d %f %d\n", n, sum, t, sum2);
fclose(inFile);
fclose(outFile);
delete[] what;
cudaFree(gpu_what);
cudaFreeHost(buf);
}
|
7,957 |
#include <cuda.h>
__global__ void test() {
int idx = threadIdx.x;
}
|
7,958 | #include <stdio.h>
#include <stdlib.h>
#include <string.h> /* memcpy */
#include <math.h>
#include <stdint.h>
void *cuda_upload_var(void *host_var, int size)
{
void *cuda_var;
cudaMalloc(&cuda_var, 4);
cudaMemcpy(cuda_var, host_var, size, cudaMemcpyHostToDevice);
return cuda_var;
}
void cuda_download_var(void *cuda_var, void *host_var, int size)
{
cudaMemcpy(host_var, cuda_var, size, cudaMemcpyDeviceToHost);
cudaFree(cuda_var);
}
typedef struct intfield5
{
int *m;
int size[5];
int is_device_field;
} intfield5;
void memcpy_field_intfield5(intfield5 dst, intfield5 src)
{
if (dst.is_device_field == 0 && src.is_device_field == 0) {
cudaMemcpy(dst.m, src.m, (sizeof(*dst.m))*dst.size[0]*dst.size[1]*dst.size[2]*dst.size[3]*dst.size[4], cudaMemcpyHostToHost);
}
if (dst.is_device_field == 1 && src.is_device_field == 0) {
cudaMemcpy(dst.m, src.m, (sizeof(*dst.m))*dst.size[0]*dst.size[1]*dst.size[2]*dst.size[3]*dst.size[4], cudaMemcpyHostToDevice);
}
if (dst.is_device_field == 0 && src.is_device_field == 1) {
cudaMemcpy(dst.m, src.m, (sizeof(*dst.m))*dst.size[0]*dst.size[1]*dst.size[2]*dst.size[3]*dst.size[4], cudaMemcpyDeviceToHost);
}
if (dst.is_device_field == 1 && src.is_device_field == 1) {
cudaMemcpy(dst.m, src.m, (sizeof(*dst.m))*dst.size[0]*dst.size[1]*dst.size[2]*dst.size[3]*dst.size[4], cudaMemcpyDeviceToDevice);
}
}
int size_intfield5(intfield5 field, int index)
{
return field.size[index];
}
intfield5 alloc_device_field_intfield5(int size_0, int size_1, int size_2, int size_3, int size_4)
{
intfield5 field;
cudaMalloc((void**)&field.m, (sizeof(*field.m))*size_0*size_1*size_2*size_3*size_4);
field.size[0] = size_0;
field.size[1] = size_1;
field.size[2] = size_2;
field.size[3] = size_3;
field.size[4] = size_4;
field.is_device_field = 1;
return field;
}
void free_device_field_intfield5(intfield5 field)
{
cudaFree(field.m);
}
typedef struct intmat5
{
int m[5];
} intmat5;
/* Adapted from: */
/* Z_2 lattice gauge simulation */
/* Michael Creutz <creutz@bnl.gov> */
/* http://thy.phy.bnl.gov/~creutz/z2.c */
/* the lattice is of dimensions SIZE**4 */
/* @todo Move field decl to main */
__constant__ const int SIZE = 10;
typedef intfield5 Links; /* Last index is link direction */
intfield5 link;
__constant__ const int RAND_DATA_COUNT = 128;
/* Poor man's random generator */
__constant__ const float rand_data[128] = {
0.765778,
0.380508,
0.976123,
0.047972,
0.027949,
0.493132,
0.145068,
0.937659,
0.688443,
0.317046,
0.803646,
0.917738,
0.513913,
0.363706,
0.137274,
0.666660,
0.250019,
0.622242,
0.021247,
0.406825,
0.707708,
0.856293,
0.947693,
0.207796,
0.362935,
0.902242,
0.427960,
0.704711,
0.613763,
0.660261,
0.378255,
0.654958,
0.936904,
0.683342,
0.891384,
0.299881,
0.064560,
0.300503,
0.572774,
0.132678,
0.132292,
0.438706,
0.594546,
0.837315,
0.180435,
0.215016,
0.726831,
0.767127,
0.556461,
0.860724,
0.132273,
0.288679,
0.001132,
0.946316,
0.740891,
0.502307,
0.189147,
0.609733,
0.716687,
0.098146,
0.650990,
0.476326,
0.958396,
0.458836,
0.834419,
0.876043,
0.820873,
0.433127,
0.800544,
0.939788,
0.741833,
0.905454,
0.796914,
0.567545,
0.054171,
0.333496,
0.247967,
0.880176,
0.760589,
0.769755,
0.011049,
0.361483,
0.829162,
0.228125,
0.572835,
0.854979,
0.070170,
0.759810,
0.022272,
0.477994,
0.014528,
0.991334,
0.314297,
0.940028,
0.235618,
0.840691,
0.882266,
0.840194,
0.985364,
0.713334,
0.697650,
0.090573,
0.262273,
0.534600,
0.761973,
0.146971,
0.667842,
0.069159,
0.102225,
0.982492,
0.933260,
0.441284,
0.149844,
0.039490,
0.520590,
0.071531,
0.141776,
0.701622,
0.213773,
0.717888,
0.621524,
0.285984,
0.442431,
0.471437,
0.197912,
0.314655,
0.496274,
0.896794
};
__host__ __device__ void moveup(intmat5 *x, int d)
{
x->m[1*d] += 1;
if (x->m[1*d] >= SIZE) {
x->m[1*d] -= SIZE;
}
}
__host__ __device__ void movedown(intmat5 *x, int d)
{
x->m[1*d] -= 1;
if (x->m[1*d] < 0) {
x->m[1*d] += SIZE;
}
}
__global__ void kernel_0(intfield5 link)
{
if (threadIdx.x + blockIdx.x*blockDim.x >= link.size[0]*link.size[1]*link.size[2]*link.size[3]*link.size[4]) {
return;
}
intmat5 id;
id.m[1*0] = (threadIdx.x + blockIdx.x*blockDim.x) % link.size[0]/1;
id.m[1*1] = (threadIdx.x + blockIdx.x*blockDim.x) % (link.size[0]*link.size[1])/link.size[0];
id.m[1*2] = (threadIdx.x + blockIdx.x*blockDim.x) % (link.size[0]*link.size[1]*link.size[2])/(link.size[0]*link.size[1]);
id.m[1*3] = (threadIdx.x + blockIdx.x*blockDim.x) % (link.size[0]*link.size[1]*link.size[2]*link.size[3])/(link.size[0]*link.size[1]*link.size[2]);
id.m[1*4] = (threadIdx.x + blockIdx.x*blockDim.x) % (link.size[0]*link.size[1]*link.size[2]*link.size[3]*link.size[4])/(link.size[0]*link.size[1]*link.size[2]*link.size[3]);
link.m[link.size[1]*link.size[2]*link.size[3]*link.size[4]*id.m[1*0] + link.size[2]*link.size[3]*link.size[4]*id.m[1*1] + link.size[3]*link.size[4]*id.m[1*2] + link.size[4]*id.m[1*3] + 1*id.m[1*4]] = 1;
}
void coldstart()
{
{
dim3 dim_grid(link.size[0]*link.size[1]*link.size[2]*link.size[3]*link.size[4]/128 + 1, 1, 1);
dim3 dim_block(128, 1, 1);
kernel_0<<<dim_grid, dim_block>>>(link);
}
}
__global__ void kernel_1(intfield5 link, double beta, int iter, float *cuda_action, int oddeven_phase)
{
if (threadIdx.x + blockIdx.x*blockDim.x >= link.size[0]*link.size[1]*link.size[2]*link.size[3]*link.size[4]) {
return;
}
intmat5 id;
id.m[1*0] = (threadIdx.x + blockIdx.x*blockDim.x) % link.size[0]/1;
id.m[1*1] = (threadIdx.x + blockIdx.x*blockDim.x) % (link.size[0]*link.size[1])/link.size[0];
id.m[1*2] = (threadIdx.x + blockIdx.x*blockDim.x) % (link.size[0]*link.size[1]*link.size[2])/(link.size[0]*link.size[1]);
id.m[1*3] = (threadIdx.x + blockIdx.x*blockDim.x) % (link.size[0]*link.size[1]*link.size[2]*link.size[3])/(link.size[0]*link.size[1]*link.size[2]);
id.m[1*4] = (threadIdx.x + blockIdx.x*blockDim.x) % (link.size[0]*link.size[1]*link.size[2]*link.size[3]*link.size[4])/(link.size[0]*link.size[1]*link.size[2]*link.size[3]);
if ((id.m[1*0] + id.m[1*1] + id.m[1*2] + id.m[1*3]) % 2 == oddeven_phase % 2 || id.m[1*4] != oddeven_phase/2) {
return;
}
int dperp;
float staplesum = 0;
int staple;
float bplus;
float bminus;
int d = id.m[1*4];
for (dperp = 0; dperp < 4; dperp += 1) {
if (dperp != d) {
movedown(&id, dperp);
int v1 = link.m[link.size[1]*link.size[2]*link.size[3]*link.size[4]*id.m[1*0] + link.size[2]*link.size[3]*link.size[4]*id.m[1*1] + link.size[3]*link.size[4]*id.m[1*2] + link.size[4]*id.m[1*3] + 1*dperp];
int v2 = link.m[link.size[1]*link.size[2]*link.size[3]*link.size[4]*id.m[1*0] + link.size[2]*link.size[3]*link.size[4]*id.m[1*1] + link.size[3]*link.size[4]*id.m[1*2] + link.size[4]*id.m[1*3] + 1*d];
staple = v1*v2;
moveup(&id, d);
staple *= link.m[link.size[1]*link.size[2]*link.size[3]*link.size[4]*id.m[1*0] + link.size[2]*link.size[3]*link.size[4]*id.m[1*1] + link.size[3]*link.size[4]*id.m[1*2] + link.size[4]*id.m[1*3] + 1*dperp];
moveup(&id, dperp);
staplesum += staple;
staple = link.m[link.size[1]*link.size[2]*link.size[3]*link.size[4]*id.m[1*0] + link.size[2]*link.size[3]*link.size[4]*id.m[1*1] + link.size[3]*link.size[4]*id.m[1*2] + link.size[4]*id.m[1*3] + 1*dperp];
moveup(&id, dperp);
movedown(&id, d);
staple *= link.m[link.size[1]*link.size[2]*link.size[3]*link.size[4]*id.m[1*0] + link.size[2]*link.size[3]*link.size[4]*id.m[1*1] + link.size[3]*link.size[4]*id.m[1*2] + link.size[4]*id.m[1*3] + 1*d];
movedown(&id, dperp);
staple *= link.m[link.size[1]*link.size[2]*link.size[3]*link.size[4]*id.m[1*0] + link.size[2]*link.size[3]*link.size[4]*id.m[1*1] + link.size[3]*link.size[4]*id.m[1*2] + link.size[4]*id.m[1*3] + 1*dperp];
staplesum += staple;
}
}
bplus = exp(beta*staplesum);
bminus = 1/bplus;
bplus = bplus/(bplus + bminus);
int rand_ix = id.m[1*0] + id.m[1*1]*SIZE + id.m[1*3]*SIZE*SIZE + id.m[1*4]*SIZE*SIZE*SIZE + iter*SIZE*SIZE*SIZE*SIZE;
if (rand_data[rand_ix % RAND_DATA_COUNT] < bplus) {
link.m[link.size[1]*link.size[2]*link.size[3]*link.size[4]*id.m[1*0] + link.size[2]*link.size[3]*link.size[4]*id.m[1*1] + link.size[3]*link.size[4]*id.m[1*2] + link.size[4]*id.m[1*3] + 1*d] = 1;
atomicAdd(cuda_action, staplesum);
} else {
link.m[link.size[1]*link.size[2]*link.size[3]*link.size[4]*id.m[1*0] + link.size[2]*link.size[3]*link.size[4]*id.m[1*1] + link.size[3]*link.size[4]*id.m[1*2] + link.size[4]*id.m[1*3] + 1*d] = -1;
atomicAdd(cuda_action, -staplesum);
}
}
double update(double beta, int iter)
{
float action = 0.000000;
{
float *cuda_action = (float*)cuda_upload_var(&action, sizeof(action));
dim3 dim_grid(link.size[0]*link.size[1]*link.size[2]*link.size[3]*link.size[4]/128 + 1, 1, 1);
dim3 dim_block(128, 1, 1);
for (int cuda_phase = 0; cuda_phase < 8; ++cuda_phase) {
kernel_1<<<dim_grid, dim_block>>>(link, beta, iter, cuda_action, cuda_phase);
}
cuda_download_var(cuda_action, &action, sizeof(action));
}
action /= SIZE*SIZE*SIZE*SIZE*4*6;
return 1.000000 - action;
}
int main()
{
link = alloc_device_field_intfield5(SIZE, SIZE, SIZE, SIZE, 4);
double beta;
double action;
double dbeta = 0.010000;
coldstart();
int iter = 0;
for (beta = 1; beta > 0.000000; beta -= dbeta) {
action = update(beta, iter);
printf("%g\t%g\n", beta, action);
++iter;
}
printf("\n\n");
for (beta = 0; beta < 1.000000; beta += dbeta) {
action = update(beta, iter);
printf("%g\t%g\n", beta, action);
++iter;
}
free_device_field_intfield5(link);
return 0;
}
|
7,959 | // XXX XXX XXX CAREFUL WITH IMAGINARY TIME EVOLUTION: MUST BE tau=-it
//#include <math.h>
#include <stdio.h>
#include <stdlib.h>
//#include <time.h>
//#include <cuda.h> // XXX REALLY NECESSARY?
#define SHARED_SIZE_PRIMARY 28
#define SHARED_SIZE_SECONDARY 14
void evolve2d(float *ket, float t, float deltat, float *energias, int order, int Nx, int Ny, float Vx, float Vy, int pbcx, int pbcy, float sinkxax, float coskxax, float sinkyay, float coskyay);
void evolve2dO2(float *ket, float t, float deltat, float *energias, int Nx, int Ny, float Vx, float Vy, int pbcx, int pbcy, float kxax, float coskxax, float sinkyay, float kyay);
void H2d(float *ket, float deltat, int id, int Nx, int Ny, float Vx, float Vy, int pbcx, int pbcy, float sinkxax, float coskxax, float sinkyay, float coskyay);
__global__ void H2d_step(float *ket_in, float *ket_out, int dataketid, float deltat, float *prepared_energias, int pbcx, int pbcy, int Nx, int Ny, float Vx, float Vy, float sinkxax, float coskxax, float sinkyay, float coskyay);
__global__ void H2d_x(float *ket, float deltat, int id, int pbcx, int Nx, int Ny, float Vx, float sinkxax, float coskxax);
__global__ void H2d_y(float *ket, float deltat, int id, int pbcy, int Nx, int Ny, float Vy, float sinkyay, float coskyay);
__global__ void Hdiag2d(float *ket, float deltat, float *energias, int Nx, int Ny, float Vx, float Vy);
//__global__ void H2di_x(float *ket, float deltatau, int id, int pbcx, int Nx, int Ny, float Vx, float sinkxax, float coskxax);
//__global__ void H2di_y(float *ket, float deltatau, int id, int pbcy, int Nx, int Ny, float Vy, float sinkyay, float coskyay);
//__global__ void Hdiag2di(float *ket, float deltatau, float *energias, int Nx, int Ny, float Vx, float Vy);
// Definition of variables {{{
/*
int debug;
float pi;
float hbar;
float dNx;
float dNy;
float Lx;
float Ly;
//int pbcx;
//int pbcy;
float lambda;
float innerR;
float outerR;
float alpha;
float beta;
float dt;
float dtmax;
int order;
int gsteps;
int verbose;
int debut;
int ground;
int nodyn;
int pars;
char* filepars;
float cloakEnergy;
float tmax;
float mass0;
//float ax;
//float ay;
//float kx;
//float ky;
*/
//}}}
// Definition of functions { { {
void evolve2d(float *ket, float t, float deltat, float *energias, int order, int Nx, int Ny, float Vx, float Vy, int pbcx, int pbcy, float sinkxax, float coskxax, float sinkyay, float coskyay) {{{
{
if (order==2) evolve2dO2(ket, t, deltat, energias, Nx, Ny, Vx, Vy, pbcx, pbcy, sinkxax, coskxax, sinkyay, coskyay);
// if (order==4) evolve2dO4(ket, t, deltat, energias, Nx, Ny, Vx, Vy, sinkxax, coskxax, sinkyay, coskyay);
} }}}
void evolve2dO2(float *ket, float t, float deltat, float *energias, int Nx, int Ny, float Vx, float Vy, int pbcx, int pbcy, float sinkxax, float coskxax, float sinkyay, float coskyay) {{{
// This function leads the real-time evolution in second order up to time t.
// float ket 1D array describing the ket always alternating real and imaginary part of the ket
// float t duration of the evolution
// float deltat incremental timestep
// float energias* Potential surface (e.g. external trapping potential) acting on the condensate
// int Nx Number of discretization points in x direction
// int Ny Number of discretization points in y direction
// float Vx Off-disgonal matrix terms in x direction
// float Vy Off-disgonal matrix terms in y direction
// float sinkxax sin(kx*ax) - for better performance
// float coskxax cos(kx*ax) - for better performance
// float sinkyay sin(kx*ax) - for better performance
// float coskyay cos(kx*ax) - for better performance
{
// Threadsize depends on local (cache) memory.
// My cards have only 16kB per block.
// Each floating point takes 4 bits x 2 for complex numbers
// Therefore, 32*32*8 = 8kB is already quite a lot.
// XXX TRY LARGER VALUES
// XXX 45x45*8 = 16200 < 16384
int threadsPerBlockX = SHARED_SIZE_PRIMARY;
int threadsPerBlockY = SHARED_SIZE_SECONDARY;
int overhead = 2;
int effectiveThreadsPerBlock = threadsPerBlockX - 2*overhead;
int blocksPerGridX = (Nx + effectiveThreadsPerBlock - 1) / effectiveThreadsPerBlock;
int blocksPerGridY = (Ny + effectiveThreadsPerBlock - 1) / effectiveThreadsPerBlock;
dim3 threadsPerBlock(threadsPerBlockX, threadsPerBlockY);
dim3 blocksPerGrid(blocksPerGridX, blocksPerGridY);
int Nbrofits = (int) (t/deltat);
float Remainingtime=t-((float)Nbrofits)*deltat;
//int *inputarray = (int*) malloc(blocksPerGridX*blocksPerGridY*sizeof(int));
//for (int i=0; i<blocksPerGridX*blocksPerGridY; i++) inputarray[i] = 0;
float *prepared_energias = (float*) malloc(2*Nx*Ny*sizeof(float));
for (int i=0; i<Nx*Ny; i++)
{
prepared_energias[2*i] = cos(-deltat*(energias[i]-2.0*Vx-2.0*Vy));
prepared_energias[2*i+1] = sin(-deltat*(energias[i]-2.0*Vx-2.0*Vy));
//prepared_energias[2*i] = 1.0;
//prepared_energias[2*i+1] = 0.0;
}
float *d_ket0;
float *d_ket1;
float *d_prepared_energias;
//int *d_inputarray;
cudaMalloc((void**)&d_ket0, 2*Nx*Ny*sizeof(float));
cudaMalloc((void**)&d_ket1, 2*Nx*Ny*sizeof(float));
cudaMalloc((void**)&d_prepared_energias, 2*Nx*Ny*sizeof(float));
//cudaMalloc((void**)&d_inputarray, blocksPerGridX*blocksPerGridY*sizeof(int));
cudaMemcpy(d_ket0, ket, 2*Nx*Ny*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_ket1, ket, 2*Nx*Ny*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_prepared_energias, prepared_energias, 2*Nx*Ny*sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(d_inputarray, inputarray, blocksPerGridX*blocksPerGridY*sizeof(int), cudaMemcpyHostToDevice);
bool DoRemainder;
if (Remainingtime/deltat > 0.001) DoRemainder = true;
else DoRemainder = false;
float dt;
float dthalf;
dt = deltat;
dthalf = deltat/2.0;
int dataketid=0;
//printf("%f %d %d %d %d %f %f %f %f %f %f %f %f\n", dt, pbcx, pbcy, Nx, Ny, Vx, Vy, sinkxax, coskxax, sinkyay, coskyay);
for (int i=0; i<Nbrofits; i++)
//for (int i=0; i<1; i++)
{
H2d_step<<<blocksPerGrid, threadsPerBlock>>>(d_ket0, d_ket1, dataketid, dt, d_prepared_energias, pbcx, pbcy, Nx, Ny, Vx, Vy, sinkxax, coskxax, sinkyay, coskyay);
cudaThreadSynchronize();
dataketid=1-dataketid;
}
// if (DoRemainder)
// {
// dt = Remainingtime;
// H2d_step<<<blocksPerGrid, threadsPerBlock>>>(d_ket, dt, energias, pbcx, pbcy, Nx, Ny, Vx, Vy, sinkxax, coskxax, sinkyay, coskyay); // x-axis, odd
// }
if (dataketid == 0) cudaMemcpy(ket, d_ket0, 2*Nx*Ny*sizeof(float), cudaMemcpyDeviceToHost);
else cudaMemcpy(ket, d_ket1, 2*Nx*Ny*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_ket0);
cudaFree(d_ket1);
cudaFree(d_prepared_energias);
} }}}
__global__ void H2d_step(float *ket0, float *ket1, int dataketid, float deltat, float* prepared_energias, int pbcx, int pbcy, int Nx, int Ny, float Vx, float Vy, float sinkxax, float coskxax, float sinkyay, float coskyay) {{{
{
// Two ket positions on each side need to be calculated for
// intermediate steps but cannot be returned as result
int overhead = 2;
// The number of threads to be returned from this block
// is the number of input threads minus the overhead on each side
// Note taht the number of ket elements will be (twice=complex) the
// square of this number
//int effectiveSize = SHARED_SIZE_PRIMARY - 2*overhead;
// The local storage block has the same size as the
__shared__ float shared_ket[2*SHARED_SIZE_PRIMARY*SHARED_SIZE_PRIMARY];
int globalx0 = (SHARED_SIZE_PRIMARY-2*overhead)*blockIdx.x - overhead;
int globaly0 = (SHARED_SIZE_PRIMARY-2*overhead)*blockIdx.y - overhead;
int globalid = ((globalx0 + threadIdx.x))*Ny + ((globaly0+2*threadIdx.y));
int globalidpbc0 = ((Nx + globalx0 + threadIdx.x)%Nx)*Ny + ((Ny+globaly0+2*threadIdx.y)%Ny);
int globalidpbc1 = ((Nx + globalx0 + threadIdx.x)%Nx)*Ny + ((Ny+globaly0+2*threadIdx.y+1)%Ny);
int sharedid = threadIdx.x*SHARED_SIZE_PRIMARY + 2*threadIdx.y;
int spos = 2*(sharedid);
int gpos = 2*(globalid);
int gpospbc0 = 2*(globalidpbc0);
int gpospbc1 = 2*(globalidpbc1);
if ( dataketid == 0 )
{
shared_ket[spos] = ket0[gpospbc0];
shared_ket[spos+1] = ket0[gpospbc0+1];
shared_ket[spos+2] = ket0[gpospbc1];
shared_ket[spos+3] = ket0[gpospbc1+1];
} else {
shared_ket[spos] = ket1[gpospbc0];
shared_ket[spos+1] = ket1[gpospbc0+1];
shared_ket[spos+2] = ket1[gpospbc1];
shared_ket[spos+3] = ket1[gpospbc1+1];
}
// if (!(pbcx)) // XXX NOT IMPLEMENTED YET
// if (!(pbcy)) // XXX NOT IMPLEMENTED YET
// {
// shared_ket[spos] = 0.0;
// shared_ket[spos+1] = 0.0;
// shared_ket[spos+2] = 0.0;
// shared_ket[spos+3] = 0.0;
// }
__syncthreads();
int mx, my;
float temp1re, temp1im, temp2re, temp2im;
float ketre, ketim;
int cap, cap_p;
float ketre_p, ketim_p;
float deltathalf = deltat/2.0;
float costx=(float) cos(deltathalf*Vx);
float sintx=(float) sin(deltathalf*Vx);
float costy=(float) cos(deltathalf*Vy);
float sinty=(float) sin(deltathalf*Vy);
int rap;
float kre, kim, tmp, costmp, sintmp;
my = threadIdx.x;
mx = 2*threadIdx.y;
cap=2*(mx*SHARED_SIZE_PRIMARY+my);
cap_p=2*((mx+1)*SHARED_SIZE_PRIMARY+my);
ketre=shared_ket[cap];
ketim=shared_ket[cap+1];
ketre_p=shared_ket[cap_p];
ketim_p=shared_ket[cap_p+1];
// temp1=costx*ket(x,y) - i sintx ket(x+1,y) exp(i ax kx)
temp1re=costx*ketre+sintx*(ketim_p*coskxax+ketre_p*sinkxax);
temp1im=costx*ketim-sintx*(ketre_p*coskxax-ketim_p*sinkxax);
// temp2=- i sintx ket(x,y) exp(-i ax kx) + costx*ket(x+1,y);
temp2re=sintx*(-ketre*sinkxax+ketim*coskxax) + costx*ketre_p;
temp2im=sintx*(-ketim*sinkxax-ketre*coskxax) + costx*ketim_p;
//ket(x,y) = (costx-i*sintx)*temp1
shared_ket[cap]=costx*temp1re+sintx*temp1im;
shared_ket[cap+1]=costx*temp1im-sintx*temp1re;
//ket(x+1,y) = (costx-i*sintx)*temp2
shared_ket[cap_p]=costx*temp2re+sintx*temp2im;
shared_ket[cap_p+1]=costx*temp2im-sintx*temp2re;
__syncthreads();
if (threadIdx.y < SHARED_SIZE_SECONDARY-1)
{
mx = 2*threadIdx.y+1;
cap=2*(mx*SHARED_SIZE_PRIMARY+my);
cap_p=2*((mx+1)*SHARED_SIZE_PRIMARY+my);
ketre=shared_ket[cap];
ketim=shared_ket[cap+1];
ketre_p=shared_ket[cap_p];
ketim_p=shared_ket[cap_p+1];
temp1re=costx*ketre+sintx*(ketim_p*coskxax+ketre_p*sinkxax);
temp1im=costx*ketim-sintx*(ketre_p*coskxax-ketim_p*sinkxax);
temp2re=sintx*(-ketre*sinkxax+ketim*coskxax) + costx*ketre_p;
temp2im=sintx*(-ketim*sinkxax-ketre*coskxax) + costx*ketim_p;
shared_ket[cap]=costx*temp1re+sintx*temp1im;
shared_ket[cap+1]=costx*temp1im-sintx*temp1re;
shared_ket[cap_p]=costx*temp2re+sintx*temp2im;
shared_ket[cap_p+1]=costx*temp2im-sintx*temp2re;
}
__syncthreads();
mx = threadIdx.x;
my = 2*threadIdx.y;
cap=2*(mx*SHARED_SIZE_PRIMARY+my);
cap_p=2*(mx*SHARED_SIZE_PRIMARY+(my+1));
ketre=shared_ket[cap];
ketim=shared_ket[cap+1];
ketre_p=shared_ket[cap_p];
ketim_p=shared_ket[cap_p+1];
temp1re=costy*ketre+sinty*(ketim_p*coskyay+ketre_p*sinkyay);
temp1im=costy*ketim-sinty*(ketre_p*coskyay-ketim_p*sinkyay);
temp2re=sinty*(-ketre*sinkyay+ketim*coskyay) + costy*ketre_p;
temp2im=sinty*(-ketim*sinkyay-ketre*coskyay) + costy*ketim_p;
shared_ket[cap]=costy*temp1re+sinty*temp1im;
shared_ket[cap+1]=costy*temp1im-sinty*temp1re;
shared_ket[cap_p]=costy*temp2re+sinty*temp2im;
shared_ket[cap_p+1]=costy*temp2im-sinty*temp2re;
__syncthreads();
if (threadIdx.y < SHARED_SIZE_SECONDARY-1)
{
my = 2*threadIdx.y+1;
cap=2*(mx*SHARED_SIZE_PRIMARY+my);
cap_p=2*(mx*SHARED_SIZE_PRIMARY+(my+1));
ketre=shared_ket[cap];
ketim=shared_ket[cap+1];
ketre_p=shared_ket[cap_p];
ketim_p=shared_ket[cap_p+1];
temp1re=costy*ketre+sinty*(ketim_p*coskyay+ketre_p*sinkyay);
temp1im=costy*ketim-sinty*(ketre_p*coskyay-ketim_p*sinkyay);
temp2re=sinty*(-ketre*sinkyay+ketim*coskyay) + costy*ketre_p;
temp2im=sinty*(-ketim*sinkyay-ketre*coskyay) + costy*ketim_p;
shared_ket[cap]=costy*temp1re+sinty*temp1im;
shared_ket[cap+1]=costy*temp1im-sinty*temp1re;
shared_ket[cap_p]=costy*temp2re+sinty*temp2im;
shared_ket[cap_p+1]=costy*temp2im-sinty*temp2re;
}
__syncthreads();
for (int id=0; id<2; id++)
{
my = 2*threadIdx.y+id;
rap=2*(((Nx+globalx0+mx)%Nx)*Ny+((globaly0+my+Ny)%Ny));
cap=2*(mx*SHARED_SIZE_PRIMARY+my);
ketre=shared_ket[cap];
ketim=shared_ket[cap+1];
//tmp=-deltat*(energias[rap]-2.0*Vx-2.0*Vy);
//costmp=(float) cos(tmp);
//sintmp=(float) sin(tmp);
costmp=prepared_energias[rap];
sintmp=prepared_energias[rap+1];
kre = ketre*costmp - ketim*sintmp;
kim = ketim*costmp + ketre*sintmp;
shared_ket[cap] = kre;
shared_ket[cap+1] =kim;
}
__syncthreads();
if (threadIdx.y < SHARED_SIZE_SECONDARY-1)
{
my = 2*threadIdx.y+1;
cap=2*(mx*SHARED_SIZE_PRIMARY+my);
cap_p=2*(mx*SHARED_SIZE_PRIMARY+(my+1));
ketre=shared_ket[cap];
ketim=shared_ket[cap+1];
ketre_p=shared_ket[cap_p];
ketim_p=shared_ket[cap_p+1];
temp1re=costy*ketre+sinty*(ketim_p*coskyay+ketre_p*sinkyay);
temp1im=costy*ketim-sinty*(ketre_p*coskyay-ketim_p*sinkyay);
temp2re=sinty*(-ketre*sinkyay+ketim*coskyay) + costy*ketre_p;
temp2im=sinty*(-ketim*sinkyay-ketre*coskyay) + costy*ketim_p;
shared_ket[cap]=costy*temp1re+sinty*temp1im;
shared_ket[cap+1]=costy*temp1im-sinty*temp1re;
shared_ket[cap_p]=costy*temp2re+sinty*temp2im;
shared_ket[cap_p+1]=costy*temp2im-sinty*temp2re;
}
__syncthreads();
my = 2*threadIdx.y;
cap=2*(mx*SHARED_SIZE_PRIMARY+my);
cap_p=2*(mx*SHARED_SIZE_PRIMARY+(my+1));
ketre=shared_ket[cap];
ketim=shared_ket[cap+1];
ketre_p=shared_ket[cap_p];
ketim_p=shared_ket[cap_p+1];
temp1re=costy*ketre+sinty*(ketim_p*coskyay+ketre_p*sinkyay);
temp1im=costy*ketim-sinty*(ketre_p*coskyay-ketim_p*sinkyay);
temp2re=sinty*(-ketre*sinkyay+ketim*coskyay) + costy*ketre_p;
temp2im=sinty*(-ketim*sinkyay-ketre*coskyay) + costy*ketim_p;
shared_ket[cap]=costy*temp1re+sinty*temp1im;
shared_ket[cap+1]=costy*temp1im-sinty*temp1re;
shared_ket[cap_p]=costy*temp2re+sinty*temp2im;
shared_ket[cap_p+1]=costy*temp2im-sinty*temp2re;
__syncthreads();
my = threadIdx.x;
if (threadIdx.y < SHARED_SIZE_SECONDARY-1)
{
mx = 2*threadIdx.y+1;
cap=2*(mx*SHARED_SIZE_PRIMARY+my);
cap_p=2*((mx+1)*SHARED_SIZE_PRIMARY+my);
ketre=shared_ket[cap];
ketim=shared_ket[cap+1];
ketre_p=shared_ket[cap_p];
ketim_p=shared_ket[cap_p+1];
temp1re=costx*ketre+sintx*(ketim_p*coskxax+ketre_p*sinkxax);
temp1im=costx*ketim-sintx*(ketre_p*coskxax-ketim_p*sinkxax);
temp2re=sintx*(-ketre*sinkxax+ketim*coskxax) + costx*ketre_p;
temp2im=sintx*(-ketim*sinkxax-ketre*coskxax) + costx*ketim_p;
shared_ket[cap]=costx*temp1re+sintx*temp1im;
shared_ket[cap+1]=costx*temp1im-sintx*temp1re;
shared_ket[cap_p]=costx*temp2re+sintx*temp2im;
shared_ket[cap_p+1]=costx*temp2im-sintx*temp2re;
}
__syncthreads();
mx = 2*threadIdx.y;
cap=2*(mx*SHARED_SIZE_PRIMARY+my);
cap_p=2*((mx+1)*SHARED_SIZE_PRIMARY+my);
ketre=shared_ket[cap];
ketim=shared_ket[cap+1];
ketre_p=shared_ket[cap_p];
ketim_p=shared_ket[cap_p+1];
temp1re=costx*ketre+sintx*(ketim_p*coskxax+ketre_p*sinkxax);
temp1im=costx*ketim-sintx*(ketre_p*coskxax-ketim_p*sinkxax);
temp2re=sintx*(-ketre*sinkxax+ketim*coskxax) + costx*ketre_p;
temp2im=sintx*(-ketim*sinkxax-ketre*coskxax) + costx*ketim_p;
shared_ket[cap]=costx*temp1re+sintx*temp1im;
shared_ket[cap+1]=costx*temp1im-sintx*temp1re;
shared_ket[cap_p]=costx*temp2re+sintx*temp2im;
shared_ket[cap_p+1]=costx*temp2im-sintx*temp2re;
__syncthreads();
if (threadIdx.x>=overhead && 2*threadIdx.y>=overhead && threadIdx.x<SHARED_SIZE_PRIMARY-overhead && 2*threadIdx.y<SHARED_SIZE_PRIMARY-overhead-1)
{
if ( (globalx0+threadIdx.x < Nx) && (globaly0+2*threadIdx.y<Ny-1) )
{
if ( dataketid == 0 )
{
ket1[gpos] = shared_ket[spos];
ket1[gpos+1] = shared_ket[spos+1];
ket1[gpos+2] = shared_ket[spos+2];
ket1[gpos+3] = shared_ket[spos+3];
} else
{
ket0[gpos] = shared_ket[spos];
ket0[gpos+1] = shared_ket[spos+1];
ket0[gpos+2] = shared_ket[spos+2];
ket0[gpos+3] = shared_ket[spos+3];
}
}
}
} }}}
__global__ void H2d_x(float *ket, float deltat, int id, int pbcx, int Nx, int Ny, float Vx, float sinkxax, float coskxax) {{{
{
// int Radius = 3;
// int BlockDim_x = blockDim.x;
// __shared__ float s_a[BlockDim_x+2*Radius];
// int global_ix = blockDim.x*blockIdx.x + threadIdx.x;
// int local_ix = Radius + threadIdx.x;
//
// s_a[local_ix] = input[global_ix];
//
// if ( threadIdx.x < Radius )
// {
// s_a[local_ix-Radius] = input[global_ix-Radius];
// s_a[local_ix+BlockDim_x] = input[global_ix+BlockDim_x];
// }
// __syncthreads();
//
// float value = 0.0;
// for (offset = -Radius; offset <=Radius; offset++)
// value += s_a[local_ix + offset];
//
// output[global_ix] = value;
// int globalSizeX = 1000;
// int globalSizeY = 1000;
// int RadiusX = 3;
// int RadiusY = 3;
// int BlockDimX = blockDim.x;
// int BlockDimY = blockDim.y;
// int localSizeX = BlockDimX+2*RadiusX;
// int localSizeY = BlockDimY+2*RadiusY;
// __shared__ float s_a[SizeX*SizeY];
// int global_ix = blockDim.x*blockIdx.x + threadIdx.x;
// int global_iy = blockDim.y*blockIdx.y + threadIdx.y;
// int local_ix = RadiusX + threadIdx.x;
// int local_iy = RadiusY + threadIdx.y;
//
// s_a[local_ix*localSizeY+local_iy] = input[global_ix*globalSizeY+global_iy];
//
// if ( threadIdx.x < RadiusX )
// {
// s_a[(local_ix-RadiusX)*localSizeY+local_iy] = input[(global_ix-Radius)*globalSizeY+global_iy];
// s_a[(local_ix+BlockDimX)*localSizeY+local_iy] = input[(global_ix+BlockDim_x)*globalSizeY+global_iy];
// }
// if ( threadIdx.y < RadiusY )
// {
// s_a[local_ix*localSizeY+local_iy-RadiusY] = input[global_ix*globalSizeY+global_iy-RadiusY];
// s_a[local_ix*localSizeY+local_iy+BlockDimY] = input[global_ix*globalSizeY+global_iy+BlockDimY];
// }
// __syncthreads();
//
// float value = 0.0;
// for (offsetX = -RadiusX; offsetX <=RadiusX; offsetX++)
// for (offsetY = -RadiusY; offsetY <=RadiusY; offsetY++)
// value += s_a[(local_ix + offsetX)*localSizeY+local_iy+offsetY];
//
// output[global_ix*globalSizeY+global_iy] = value;
/*
int globalSizeX = 500;
int globalSizeY = 500;
int RadiusX = 3;
int RadiusY = 3;
int BlockDimX = blockDim.x;
int BlockDimY = blockDim.y;
int localSizeX = BlockDimX+2*RadiusX;
int localSizeY = BlockDimY+2*RadiusY;
__shared__ float s_a[SizeX*SizeY];
int global_ix = blockDim.x*blockIdx.x + threadIdx.x;
int global_iy = blockDim.y*blockIdx.y + threadIdx.y;
int local_ix = RadiusX + threadIdx.x;
int local_iy = RadiusY + threadIdx.y;
s_a[local_ix*localSizeY+local_iy] = input[global_ix*globalSizeY+global_iy];
if ( threadIdx.x < RadiusX )
{
s_a[(local_ix-RadiusX)*localSizeY+local_iy] = input[(global_ix-Radius)*globalSizeY+global_iy];
s_a[(local_ix+BlockDimX)*localSizeY+local_iy] = input[(global_ix+BlockDim_x)*globalSizeY+global_iy];
}
if ( threadIdx.y < RadiusY )
{
s_a[local_ix*localSizeY+local_iy-RadiusY] = input[global_ix*globalSizeY+global_iy-RadiusY];
s_a[local_ix*localSizeY+local_iy+BlockDimY] = input[global_ix*globalSizeY+global_iy+BlockDimY];
}
__syncthreads();
float value = 0.0;
for (offsetX = -RadiusX; offsetX <=RadiusX; offsetX++)
for (offsetY = -RadiusY; offsetY <=RadiusY; offsetY++)
value += s_a[(local_ix + offsetX)*localSizeY+local_iy+offsetY];
output[global_ix*globalSizeY+global_iy] = value;
*/
int my = blockDim.x * blockIdx.x + threadIdx.x;
int Nstart;
int Nfinal;
if (id==1)
{
Nstart=0;
Nfinal=Nx-1;
}
if (id==2)
{
Nstart=1;
Nfinal=Nx-2;
}
float cost, sint, temp1re, temp1im, temp2re, temp2im;;
int mx, cap, cap_p;
cost=(float) cos(deltat*Vx);
sint=(float) sin(deltat*Vx);
float ketre, ketim, ketre_p, ketim_p;
if (my<Ny)
{
for (mx=Nstart; mx<Nfinal; mx+=2)
{
cap=2*(mx*Ny+my);
cap_p=2*((mx+1)*Ny+my);
ketre=ket[cap];
ketim=ket[cap+1];
ketre_p=ket[cap_p];
ketim_p=ket[cap_p+1];
// temp1=cost*ket(x,y) - i sint ket(x+1,y) exp(i ax kx)
temp1re=cost*ketre+sint*(ketim_p*coskxax+ketre_p*sinkxax); // checked: ok - fixed one sign error!!
temp1im=cost*ketim-sint*(ketre_p*coskxax-ketim_p*sinkxax); // checked: ok - fixed one sign error!!
// temp2=- i sint ket(x,y) exp(-i ax kx) + cost*ket(x+1,y);
temp2re=sint*(-ketre*sinkxax+ketim*coskxax) + cost*ketre_p; // checked: ok
temp2im=sint*(-ketim*sinkxax-ketre*coskxax) + cost*ketim_p; // checked: ok
//ket(x,y) = (cost-i*sint)*temp1
ket[cap]=cost*temp1re+sint*temp1im; // checked: ok
ket[cap+1]=cost*temp1im-sint*temp1re; // checked: ok
//ket(x+1,y) = (cost-i*sint)*temp2
ket[cap_p]=cost*temp2re+sint*temp2im; // checked: ok
ket[cap_p+1]=cost*temp2im-sint*temp2re; // checked: ok
}
}
if ((pbcx)&&(id==2))
{
if (my<Ny)
{
cap=2*((Nx-1)*Ny+my);
cap_p=2*my;
ketre=ket[cap];
ketim=ket[cap+1];
ketre_p=ket[cap_p];
ketim_p=ket[cap_p+1];
temp1re=cost*ketre+sint*(ketim_p*coskxax+ketre_p*sinkxax); // checked: ok - fixed one sign error!!
temp1im=cost*ketim-sint*(ketre_p*coskxax-ketim_p*sinkxax); // checked: ok - fixed one sign error!!
temp2re=sint*(-ketre*sinkxax+ketim*coskxax) + cost*ketre_p; // checked: ok
temp2im=sint*(-ketim*sinkxax-ketre*coskxax) + cost*ketim_p; // checked: ok
ket[cap]=cost*temp1re+sint*temp1im; // checked: ok
ket[cap+1]=cost*temp1im-sint*temp1re; // checked: ok
ket[cap_p]=cost*temp2re+sint*temp2im; // checked: ok
ket[cap_p+1]=cost*temp2im-sint*temp2re; // checked: ok
}
}
} }}}
__global__ void H2d_y(float *ket, float deltat, int id, int pbcy, int Nx, int Ny, float Vy, float sinkyay, float coskyay) {{{
{
int mx = blockDim.x * blockIdx.x + threadIdx.x;
int Nstart;
int Nfinal;
if (id==1)
{
Nstart=0;
Nfinal=Ny-1;
}
if (id==2)
{
Nstart=1;
Nfinal=Ny-2;
}
float cost, sint, temp1re, temp1im, temp2re, temp2im;;
int my, cap, cap_p;
cost=(float) cos(deltat*Vy);
sint=(float) sin(deltat*Vy);
float ketre, ketim, ketre_p, ketim_p;
if (mx<Nx)
{
for (my=Nstart; my<Nfinal; my+=2)
{
cap=2*(mx*Ny+my);
cap_p=2*(mx*Ny+(my+1));
ketre=ket[cap];
ketim=ket[cap+1];
ketre_p=ket[cap_p];
ketim_p=ket[cap_p+1];
temp1re=cost*ketre+sint*(ketim_p*coskyay+ketre_p*sinkyay); // checked: ok - fixed one sign error!!
temp1im=cost*ketim-sint*(ketre_p*coskyay-ketim_p*sinkyay); // checked: ok - fixed one sign error!!
temp2re=sint*(-ketre*sinkyay+ketim*coskyay) + cost*ketre_p; // checked: ok
temp2im=sint*(-ketim*sinkyay-ketre*coskyay) + cost*ketim_p; // checked: ok
ket[cap]=cost*temp1re+sint*temp1im; // checked: ok
ket[cap+1]=cost*temp1im-sint*temp1re; // checked: ok
ket[cap_p]=cost*temp2re+sint*temp2im; // checked: ok
ket[cap_p+1]=cost*temp2im-sint*temp2re; // checked: ok
}
}
if ((pbcy)&&(id==2))
{
if (mx<Nx)
{
cap=2*(mx*Ny+Ny-1);
cap_p=2*(mx*Ny);
ketre=ket[cap];
ketim=ket[cap+1];
ketre_p=ket[cap_p];
ketim_p=ket[cap_p+1];
temp1re=cost*ketre+sint*(ketim_p*coskyay+ketre_p*sinkyay); // checked: ok - fixed one sign error!!
temp1im=cost*ketim-sint*(ketre_p*coskyay-ketim_p*sinkyay); // checked: ok - fixed one sign error!!
temp2re=sint*(-ketre*sinkyay+ketim*coskyay) + cost*ketre_p; // checked: ok
temp2im=sint*(-ketim*sinkyay-ketre*coskyay) + cost*ketim_p; // checked: ok
ket[cap]=cost*temp1re+sint*temp1im; // checked: ok
ket[cap+1]=cost*temp1im-sint*temp1re; // checked: ok
ket[cap_p]=cost*temp2re+sint*temp2im; // checked: ok
ket[cap_p+1]=cost*temp2im-sint*temp2re; // checked: ok
}
}
} }}}
__global__ void Hdiag2d(float *ket, float deltat, float *energias, int Nx, int Ny, float Vx, float Vy) {{{
{
int mx = blockDim.x * blockIdx.x + threadIdx.x;
int my;
int cap;
int rap;
float kre, kim, tmp, costmp, sintmp;
float ketre, ketim;
if (mx < Nx)
{
for (my=0; my<Ny; my++)
{
rap=mx*Ny+my;
cap=2*rap;
ketre=ket[cap];
ketim=ket[cap+1];
tmp=-deltat*(energias[rap]-2.0*Vx-2.0*Vy);
costmp=(float) cos(tmp);
sintmp=(float) sin(tmp);
kre = ketre*costmp - ketim*sintmp;
kim = ketim*costmp + ketre*sintmp;
ket[cap] = kre;
ket[cap+1] =kim;
}
}
} }}}
// } } }
// commented main
int main(int argc, char *argv[])
{
// Definition of variables {{{
int debug = 0;
float pi = 4.0*((float) atan(1.0));
float hbar = 1.0; // XXX XXX XXX WE SET hbar = 1.0 XXX XXX XXX
int Nx = 400;
int Ny = 400;
float dNx = (float) Nx;
float dNy = (float) Ny;
float Lx = 20.0;
float Ly = 20.0;
int pbcx = 1; // C has no boolean data type; we use false=0, true=1
int pbcy = 1; // C has no boolean data type; we use false=0, true=1
float lambda = 0.0; // XXX NEVER USED XXX
float innerR = 0.0; // XXX NEVER USED XXX
float outerR = 4.0; // XXX NEVER USED XXX
float alpha = 100.0;
float beta = 20.0;
float x0 = -12.0;
float y0 = 0.0; // XXX NEVER USED XXX
float sx = 3.0;
float sy = 3.0; // XXX NEVER USED XXX
float px0 = 0.0; // XXX formerly: 0.1*pi*hbar/ax;
float py0 = 0.0;
float dt = 0.001;
// float dtmax = 1.0; // XXX NEVER USED XXX
int order = 2; // XXX NEVER USED XXX
// int gsteps = 100; // XXX NEVER USED XXX
char *file = "test"; // XXX NEVER USED XXX
// char *filein = "test"; // XXX NEVER USED XXX
char *dir = "data"; // XXX NEVER USED XXX
int verbose = 0;
// int debut = 0; // XXX NEVER USED XXX
// int ground = 0; // XXX NEVER USED XXX
// int nodyn = 0; // XXX NEVER USED XXX
char *form = "DT"; // XXX NEVER USED XXX
int pars = 0; // XXX NEVER USED XXX
int abort = 0; // XXX NEVER USED XXX
float cloakEnergy = 0.0; // XXX NEVER USED XXX
float mass0=0.5;
float tmax = 10.0;
char *filepars;
float ax;
float ay;
//}}}
// Command line arguments analysis {{{
int n=1;
while (n<argc)
{
if (strcmp(argv[n],"-pars")==0)
{
if (n+1<argc)
{
if (debug) printf("-pars found at position %i, argument is %s\n", n, argv[n+1]);
filepars=argv[n+1];
pars=1;
n+=2;
}
} else
if (strcmp(argv[n],"-nx")==0)
{
if (n+1<argc)
{
if (debug) printf("-nx found at position %i, argument is %s\n", n, argv[n+1]);
Nx=atoi(argv[n+1]);
dNx=(float) Nx;
n+=2;
}
} else
if (strcmp(argv[n],"-ny")==0)
{
if (n+1<argc)
{
if (debug) printf("-ny found at position %i, argument is %s\n", n, argv[n+1]);
Ny=atoi(argv[n+1]);
dNy=(float) Ny;
n+=2;
}
} else
if (strcmp(argv[n],"-Lx")==0)
{
if (n+1<argc)
{
if (debug) printf("-Lx found at position %i, argument is %s\n", n, argv[n+1]);
Lx=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-Ly")==0)
{
if (n+1<argc)
{
if (debug) printf("-Ly found at position %i, argument is %s\n", n, argv[n+1]);
Ly=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-lambda")==0)
{
if (n+1<argc)
{
if (debug) printf("-lambda found at position %i, argument is %s\n", n, argv[n+1]);
lambda=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-R1")==0)
{
if (n+1<argc)
{
if (debug) printf("-R1 found at position %i, argument is %s\n", n, argv[n+1]);
innerR=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-R2")==0)
{
if (n+1<argc)
{
if (debug) printf("-R2 found at position %i, argument is %s\n", n, argv[n+1]);
outerR=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-target")==0)
{
if (n+1<argc)
{
if (debug) printf("-target found at position %i, argument is %s\n", n, argv[n+1]);
cloakEnergy=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-alpha")==0)
{
if (n+1<argc)
{
if (debug) printf("-alpha found at position %i, argument is %s\n", n, argv[n+1]);
alpha=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-beta")==0)
{
if (n+1<argc)
{
if (debug) printf("-beta found at position %i, argument is %s\n", n, argv[n+1]);
beta=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-x0")==0)
{
if (n+1<argc)
{
if (debug) printf("-x0 found at position %i, argument is %s\n", n, argv[n+1]);
x0=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-y0")==0)
{
if (n+1<argc)
{
if (debug) printf("-y0 found at position %i, argument is %s\n", n, argv[n+1]);
y0=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-sx")==0)
{
if (n+1<argc)
{
if (debug) printf("-sx found at position %i, argument is %s\n", n, argv[n+1]);
sx=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-sy")==0)
{
if (n+1<argc)
{
if (debug) printf("-sy found at position %i, argument is %s\n", n, argv[n+1]);
sy=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-px0")==0)
{
if (n+1<argc)
{
if (debug) printf("-px0 found at position %i, argument is %s\n", n, argv[n+1]);
px0=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-py0")==0)
{
if (n+1<argc)
{
if (debug) printf("-py0 found at position %i, argument is %s\n", n, argv[n+1]);
py0=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-dt")==0)
{
if (n+1<argc)
{
if (debug) printf("-dt found at position %i, argument is %s\n", n, argv[n+1]);
dt=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-order")==0)
{
if (n+1<argc)
{
if (debug) printf("-order found at position %i, argument is %s\n", n, argv[n+1]);
order=atoi(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-time")==0)
{
if (n+1<argc)
{
if (debug) printf("-time found at position %i, argument is %s\n", n, argv[n+1]);
tmax=(float) atof(argv[n+1]);
n+=2;
}
} else
if (strcmp(argv[n],"-file")==0)
{
if (n+1<argc)
{
if (debug) printf("-file found at position %i, argument is %s\n", n, argv[n+1]);
file=argv[n+1];
n+=2;
}
} else
if (strcmp(argv[n],"-format")==0)
{
if (n+1<argc)
{
if (debug) printf("-format found at position %i, argument is %s\n", n, argv[n+1]);
form=argv[n+1];
n+=2;
}
} else
if (strcmp(argv[n],"-dir")==0)
{
if (n+1<argc)
{
if (debug) printf("-dir found at position %i, argument is %s\n", n, argv[n+1]);
dir=argv[n+1];
n+=2;
}
} else
if (strcmp(argv[n],"-V")==0)
{
if (debug) printf("-verbose found at position %i\n", n);
verbose=1;
n++;
} else
if (strcmp(argv[n],"-pbcx")==0)
{
if (debug) printf("-pbcx found at position %i\n", n);
pbcx=1;
n++;
} else
if (strcmp(argv[n],"-pbcy")==0)
{
if (debug) printf("-pbcy found at position %i\n", n);
pbcy=1;
n++;
} else
if (strcmp(argv[n],"-debug")==0)
{
debug=1;
printf("-debug found at position %i; all subsequent arguments are listed\n", n);
n++;
} else
{
printf("ERROR: Wrong argument at position %i: $s\n", n, argv[n]);
printf("Cloaking of matter waves in 2D. Wrong parameters.\n");
printf("Options: \n");
printf(" System:\n");
printf(" -nx -ny Points in X and Y direction\n");
printf(" -Lx -Ly Size in X and Y direction (system is from -L to +L)\n");
printf(" -lambda Boson-boson interaction strength\n");
printf(" -R1 -R2 Internal and external cloak radius\n");
printf(" -target target energy (factor from chemical potential)\n");
printf(" -alpha Intensity of perturbation at origin\n");
printf(" -beta 1/Radius^2 of perturbation at origin\n");
printf(" -pbcx -pbcy Periodic boundary conditions\n");
printf(" Wave packet:\n" );
printf(" -x0 -y0 Initial position\n");
printf(" -sx -sy Position dispersion (sigmas)\n");
printf(" -px0 -py0 Initial momentum\n");
printf(" Simulation:\n" );
printf(" -time Evolution time\n");
printf(" -dt Time step\n");
printf(" -order Algorithm order (default 2)\n");
printf(" Input/Output:\n" );
printf(" -file Output file base name\n");
printf(" -format Output file format (DT, TP)\n");
printf(" -pars Parameters file (all other input is ignored)\n");
printf(" -dir Input/Output directory\n");
printf(" -V Verbose\n");
printf(" -debug Debug\n");
abort = 1;
exit(1);
}
}
//}}}
// Memory allocation {{{
// XXX file=trim(adjustl(dir))//'/'//trim(file)
// XXX fileini=trim(adjustl(dir))//'/'//trim(fileini)
//int steps = (int) (tmax/dt);
if (pbcx) ax=2.0*Lx/dNx; else ax=2.0*Lx/(dNx+1.0);
if (pbcy) ay=2.0*Ly/dNy; else ay=2.0*Ly/(dNy+1.0);
float Vx = -hbar*hbar/(2.0*ax*ax);
float Vy = -hbar*hbar/(2.0*ay*ay);
float kx = 0.0; // rotx*pi/Lx
float ky = 0.0; // roty*pi/Ly
if (debug) printf("Allocating memory\n");
int MatrixSize=Nx*Ny;
float *phi0 = (float*) malloc(2*MatrixSize*sizeof(float));
if (phi0==0)
{
printf("ERROR: memory of variable phi0 could not be allocated.\n");
exit(1);
}
float *phit = (float*) malloc(2*MatrixSize*sizeof(float));
if (phit==0)
{
printf("ERROR: memory of variable phit could not be allocated.\n");
exit(1);
}
float *potential = (float*) malloc(MatrixSize*sizeof(float));
if (potential==0)
{
printf("ERROR: memory of variable potential could not be allocated.\n");
exit(1);
}
float *Et = (float*) malloc(MatrixSize*sizeof(float));
if (Et==0)
{
printf("ERROR: memory of variable Et could not be allocated.\n");
exit(1);
}
float *pert = (float*) malloc(MatrixSize*sizeof(float));
if (pert==0)
{
printf("ERROR: memory of variable pert could not be allocated.\n");
exit(1);
}
//}}}
// Compute initial conditions{{{
int mx, my, carraypos, rarraypos;
float x, y, x2, y2, dx, dx2, sx2, exp_local, exp_xplus, exp_xminus, r2;
sx2=sx*sx;
for (mx=0; mx<Nx; mx++)
{
x=((float)mx)*ax-Lx;
x2=x*x;
dx=x-x0;
dx2=dx*dx;
exp_local = ((float) exp(-0.5*dx2/sx2));
exp_xminus = ((float) exp(-0.5*(dx-2.0*Lx)*(dx-2.0*Lx)/sx2));
exp_xplus = ((float) exp(-0.5*(dx+2.0*Lx)*(dx+2.0*Lx)/sx2));
for (my=0; my<Ny; my++)
{
y=((float)my)*ay-Ly;
y2=y*y;
rarraypos=mx*Ny+my;
carraypos=2*(mx*Ny+my);
float phi0re=0.0;
float phi0im=0.0;
phi0re = exp_local*((float) cos(px0*x+py0*y));
phi0im = exp_local*((float) sin(px0*x+py0*y));
if (pbcx)
{
phi0re += exp_xminus * ((float) cos(px0*(x-2.0*Lx)+py0*y));
phi0re += exp_xplus * ((float) cos(px0*(x+2.0*Lx)+py0*y));
phi0im += exp_xminus * ((float) sin(px0*(x-2.0*Lx)+py0*y));
phi0im += exp_xplus * ((float) sin(px0*(x+2.0*Lx)+py0*y));
}
if (pbcy)
{
// phi0re += exp_local * ((float) cos(px0*x+py0*(y-2.0*Ly)));
// phi0re += exp_local * ((float) cos(px0*x+py0*(y+2.0*Ly)));
// phi0im += exp_local * ((float) sin(px0*x+py0*(y-2.0*Ly)));
// phi0im += exp_local * ((float) sin(px0*x+py0*(y-2.0*Ly)));
}
phi0[carraypos] = phi0re;
phi0[carraypos+1] = phi0im;
r2=x2+y2;
if (r2<beta*beta) potential[rarraypos]=alpha;
//potential[rarraypos] = alpha*exp(-beta*r2);
}
}
float rtemp=0.0;
float phi0re, phi0re2, phi0im, phi0im2;
for (mx=0; mx<Nx; mx++)
{
for (my=0; my<Ny; my++)
{
carraypos=2*(mx*Ny+my);
phi0re=phi0[carraypos];
phi0re2=phi0re*phi0re;
phi0im=phi0[carraypos+1];
phi0im2=phi0im*phi0im;
rtemp += ax*ay*(phi0re2+phi0im2);
}
}
float sqr_rtemp=sqrt(rtemp);
for (mx=0; mx<Nx; mx++)
{
for (my=0; my<Ny; my++)
{
carraypos=2*(mx*Ny+my);
phi0[carraypos] *= 1.0/sqr_rtemp;
phi0[carraypos+1] *= 1.0/sqr_rtemp;
}
}
// Calculate chemical potential and <r^2>
float ctempre = 0.0;
float ctempim = 0.0;
rtemp = 0.0;
if (verbose) printf("Computing chemical potential\n");
int mxm, mxp, mym, myp;
float rxm, rxp, rym, ryp;
float prefact, cVx, cVy;
for (mx=1; mx<(Nx-1); mx++)
{
x=((float)mx)*ax-Lx;
mxp = mx+1;
if ((pbcx)&&(mx==Nx)) mxp=1;
rxp=((float)mxp)*ax-Lx;
mxm = mx-1;
if ((pbcx)&&(mx==1)) mxm=Nx;
rxm=((float)mxm)*ax-Lx;
for (my=1; my<(Ny-1); my++)
{
y=((float)my)*ay-Ly;
myp = my+1;
if ((pbcy)&&(my==Ny)) myp=1;
ryp=((float)myp)*ay-Ly;
mym = my-1;
if ((pbcy)&&(my==1)) mym=Ny;
rym=((float)mym)*ay-Ly;
carraypos=2*(mx*Ny+my);
prefact=ax*ay;
// XXX XXX XXX cVx=Vx*(phi0[2*(mxp*Ny+my)]*massxx[rxp*Ny+y]+phi0[2*(mxm*Ny+my)]*massxx[rxm*Ny+y]-phi0[2*(mx*Ny+my)]*(massxx[rxm*Ny+y]+massxx[rxp*Ny+y]))
cVx=Vx*(phi0[2*(mxp*Ny+my)]*mass0+phi0[2*(mxm*Ny+my)]*mass0-phi0[2*(mx*Ny+my)]*(mass0+mass0));
// XXX XXX XXX cVy=Vy*(phi0[2*(mx*Ny+myp)]*massyy[x*Ny+ryp]+phi0[2*(mx*Ny+mym)]*massyy[x*Ny+rym]-phi0[2*(mx*Ny+my)]*(massyy[x*Ny+ryp]+massyy[x*Ny+rym]))
cVy=Vy*(phi0[2*(mx*Ny+myp)]*mass0+phi0[2*(mx*Ny+mym)]*mass0-phi0[2*(mx*Ny+my)]*(mass0+mass0));
phi0re=phi0[carraypos];
phi0im=phi0[carraypos+1];
ctempre += prefact*phi0re*(cVx+cVy);
ctempim += -prefact*phi0im*(cVx+cVy);
}
}
printf("mu = %f + %f*i; taking real part only\n", ctempre, ctempim);
float mu=ctempre;
printf("Chemical potential: %f\n", mu);
float p2=(px0*px0+py0*py0)/(2.0*mass0);
printf("p^2: %f\n", p2);
rtemp=2.0*Vx*(1.0-((float) cos(px0*ax)))/mass0+2.0*Vy*(1.0-((float) cos(py0*ay)))/mass0; //Energy of wave
printf("Lattice energy\n");
for (mx=0; mx<Nx; mx++)
{
x=((float)mx)*ax-Lx;
//x2=x*x;
for (my=0; my<Ny; my++)
{
y=((float)my)*ay-Ly;
//y2=y*y;
//r=sqrt(x2+y2);
// XXX XXX XXX potential[mx*Ny+my]+=(1.0d0-detLame(1.0d0*mx,1.0d0*my))*(rtemp*cloakEnergy);
}
}
printf("Hoppings: Vx=%f, Vy=%f\n", Vx, Vy);
if (verbose) printf("Hoppings: Vx=%f, Vy=%f\n", Vx, Vy);
if (verbose) printf("Momenta: kx=%f, ky=%f\n", kx, ky);
// XXX NOT USED XXX if (verbose) printf("Imaginary time step=%f\n", isteps);
if (verbose)
{
rtemp = 0.0;
for (mx=1; mx<Nx; mx++)
{
for (my=1; my<Ny; my++)
{
carraypos=2*(mx*Ny+my);
phi0re=phi0[carraypos];
phi0im=phi0[carraypos+1];
phi0re2=phi0re*phi0re;
phi0im2=phi0im*phi0im;
rtemp+=ax*ay*(phi0re2+phi0im2);
}
}
printf("Initial state module: rtemp=%f\n", rtemp);
}
//}}}
// Perform imaginary time evolution {{ {
int m;
for (m=0; m<2*Nx*Ny; m++) phit[m] = phi0[m];
for (m=0; m<Nx*Ny; m++) Et[m] = potential[m];
if (debug)
{
printf("Saving potential [not implemented yet]\n");
}
if (debug)
{
printf("Saving initial configuration [not implemented yet]\n");
}
printf("tmax %f\n", tmax);
printf("dt %f\n", dt);
printf("order %d\n",order);
float sinkxax = ((float) sin(kx*ax));
float coskxax = ((float) cos(kx*ax));
float sinkyay = ((float) sin(ky*ay));
float coskyay = ((float) cos(ky*ay));
FILE *fp;
fp=fopen("GPU_Suzuki.ket", "w+");
int Itot, q, irc;
Itot=100;
int NbrOfIterations=Itot+1;
irc = fwrite(&NbrOfIterations, sizeof(int), 1, fp);
irc = fwrite(&Nx, sizeof(int), 1, fp);
irc = fwrite(&Ny, sizeof(int), 1, fp);
irc = fwrite(phit, sizeof(float), 2*Nx*Ny, fp);
for (q=0; q<Itot; q++)
{
printf("%d\n", q);
evolve2d(phit, tmax, dt, Et, order, Nx, Ny, Vx, Vy, pbcx, pbcy, sinkxax, coskxax, sinkyay, coskyay);
irc = fwrite(phit, sizeof(float), 2*Nx*Ny, fp);
}
fclose(fp);
// }} }
// Free memory and exit {{{
if (debug) printf("Freeing allocated memory\n");
free(phi0);
free(phit);
free(potential);
free(Et);
free(pert);
if (debug) printf("All done\n");
return 0;
//}}}
}
|
7,960 | template<long long i>
__global__ void add_kernel(float *a, float *b, float *c, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
c[idx] = a[idx] + b[idx];
}
}
template<long long l = 0, long long r = 1000>
void _add(float *a, float *b, float *c, int N) {
if constexpr (l == r) {
add_kernel<l><<<(N + 255) / 256, 256>>>(a, b, c, N);
} else if constexpr (l + 1 == r) {
add_kernel<l><<<(N + 255) / 256, 256>>>(a, b, c, N);
add_kernel<r><<<(N + 255) / 256, 256>>>(a, b, c, N);
} else {
constexpr long long m = (l + r) / 2;
_add<l, m>(a, b, c, N);
_add<m, r>(a, b, c, N);
}
}
void add(float *a, float *b, float *c, int N) {
_add(a, b, c, N);
}
|
7,961 | /***
This program represents an example of CUDA Thrust-based implementation of dot-product.
***/
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
using namespace std;
struct multiply_functor{
float w;
multiply_functor(float _w = 1): w(_w) {}
__device__ float operator() (const float & x, const float & y){
return w * x * y;
}
};
float dot_product(thrust::device_vector<float> &v,
thrust::device_vector<float> &w)
{
thrust::device_vector<float> z(v.size());
thrust::transform(v.begin(), v.end(), w.begin(), z.begin(), multiply_functor());
return thrust::reduce(z.begin(), z.end());
}
int main(void)
{
thrust::device_vector<float> v;
v.push_back(1.0f);
v.push_back(2.0f);
v.push_back(3.0f);
thrust::device_vector<float> w(3);
thrust::fill(w.begin(), w.end(), 1.0f);
for(int i = 0; i < v.size(); i++)
cout << "v[" << i << "] == " << v[i] << endl;
for(int i = 0; i < w.size(); i++)
cout << "w[" << i << "] == " << w[i] << endl;
cout << "dot_product(v, w) == " << dot_product(v, w) << endl;
return 0;
}
|
7,962 | // a toy program to get device property
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
int main(){
int dev_count;
cudaGetDeviceCount(&dev_count);
printf("Number of CUDA devices: %d\n", dev_count);
cudaDeviceProp prop;
int i;
for (i = 0; i < dev_count; i++){
cudaGetDeviceProperties(&prop,i);
printf("Name: %s\n", prop.name);
printf("SM count: %d\n", prop.multiProcessorCount);
printf("Max threads per SM: %d\n", prop.maxThreadsPerBlock);
printf("Max threads dim x: %d\n", prop.maxThreadsDim[0]);
printf("Max threads dim y: %d\n", prop.maxThreadsDim[1]);
printf("Max threads dim z: %d\n", prop.maxThreadsDim[2]);
printf("Number of threads in a warp: %d\n", prop.warpSize);
printf("Max memory (GB) on this device: %d\n", (int)(prop.totalGlobalMem * pow(10, -9)));
printf("Max shared memory (KB) per block: %d\n", (int)(prop.sharedMemPerBlock * pow(10, -3)));
printf("Total constant memory (KB): %d\n", (int) (prop.totalConstMem * pow(10, -3)));
}
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess)
{
fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) );
exit(-1);
}
return 0;
}
|
7,963 | #include <stdio.h>
#include "reduction.h"
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
// cuda thread synchronization
__global__ void
reduction_kernel(float* d_out, float* d_in, unsigned int size)
{
unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ float s_data[];
s_data[threadIdx.x] = (idx_x < size) ? d_in[idx_x] : 0.f;
__syncthreads();
// do reduction
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2)
{
// thread synchronous reduction
// to reduce the compute utilization, we can switch the operation
// if ( (idx_x % (stride * 2)) == 0 ) // 0.433 ms
if ( (idx_x & (stride * 2 - 1)) == 0 ) // 0.399 ms
s_data[threadIdx.x] += s_data[threadIdx.x + stride];
__syncthreads();
}
if (threadIdx.x == 0)
d_out[blockIdx.x] = s_data[0];
}
void reduction(float *d_out, float *d_in, int n_threads, int size)
{
cudaMemcpy(d_out, d_in, size * sizeof(float), cudaMemcpyDeviceToDevice);
while(size > 1)
{
int n_blocks = (size + n_threads - 1) / n_threads;
reduction_kernel<<< n_blocks, n_threads, n_threads * sizeof(float), 0 >>>(d_out, d_out, size);
size = n_blocks;
}
} |
7,964 | #include <chrono>
#include <iostream>
#include <cuda_runtime.h>
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code,
const char *file,
int line,
bool abort = true) {
if(code != cudaSuccess) {
std::cerr <<"GPUassert: " << cudaGetErrorString(code) << " " << file << " "
<< line << std::endl;
exit(1);
}
}
//Copy from other GPUs to this device
double normalMemCpyOther2This(float ** gpu_ptrs, int num_gpus, int bytes_to_transfer_each, int curr_gpu) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < num_gpus; i++) {
if (i != curr_gpu) {
CUDA_CHECK(cudaMemcpy(gpu_ptrs[curr_gpu] + (bytes_to_transfer_each/4/num_gpus)*i,
gpu_ptrs[i] + (bytes_to_transfer_each/4/num_gpus)*i,
bytes_to_transfer_each/num_gpus,
cudaMemcpyDefault));
}
cudaSetDevice(i);
cudaDeviceSynchronize();
cudaSetDevice(curr_gpu);
cudaDeviceSynchronize();
}
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> secs = end-start;
return secs.count();
}
//From this device to other GPUS, async
double normalMemCpyThis2Other(float ** gpu_ptrs, int num_gpus, int bytes_to_transfer_each, int curr_gpu) {
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < num_gpus; i++) {
if (i != curr_gpu) {
CUDA_CHECK(cudaMemcpy(gpu_ptrs[i] + (bytes_to_transfer_each/4/num_gpus)*i,
gpu_ptrs[curr_gpu] + (bytes_to_transfer_each/4/num_gpus)*i,
bytes_to_transfer_each/num_gpus,
cudaMemcpyDefault));
}
cudaSetDevice(i);
cudaDeviceSynchronize();
cudaSetDevice(curr_gpu);
cudaDeviceSynchronize();
}
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> secs = end-start;
return secs.count();
}
//Copy from other GPUs to this device, async
double normalMemCpyOther2ThisAsync(float ** gpu_ptrs, int num_gpus, int bytes_to_transfer_each, int curr_gpu) {
cudaStream_t streams[num_gpus];
for (int i = 0; i < num_gpus; i++) {
CUDA_CHECK(cudaStreamCreate(&streams[i]));
}
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < num_gpus; i++) {
if (i != curr_gpu) {
CUDA_CHECK(cudaMemcpyAsync(gpu_ptrs[curr_gpu] + (bytes_to_transfer_each/4/num_gpus)*i,
gpu_ptrs[i] + (bytes_to_transfer_each/4/num_gpus)*i,
bytes_to_transfer_each/num_gpus,
cudaMemcpyDefault, streams[i]));
}
cudaSetDevice(i);
cudaDeviceSynchronize();
cudaSetDevice(curr_gpu);
cudaDeviceSynchronize();
}
for (int i = 0; i < num_gpus; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> secs = end-start;
return secs.count();
}
//From this device to other GPUS, async
double normalMemCpyThis2OtherAsync(float ** gpu_ptrs, int num_gpus, int bytes_to_transfer_each, int curr_gpu) {
cudaStream_t streams[num_gpus];
for (int i = 0; i < num_gpus; i++) {
CUDA_CHECK(cudaStreamCreate(&streams[i]));
}
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < num_gpus; i++) {
if (i != curr_gpu) {
CUDA_CHECK(cudaMemcpyAsync(gpu_ptrs[i] + (bytes_to_transfer_each/4/num_gpus)*i,
gpu_ptrs[curr_gpu] + (bytes_to_transfer_each/4/num_gpus)*i,
bytes_to_transfer_each/num_gpus,
cudaMemcpyDefault, streams[i]));
}
cudaSetDevice(i);
cudaDeviceSynchronize();
cudaSetDevice(curr_gpu);
cudaDeviceSynchronize();
}
for (int i = 0; i < num_gpus; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> secs = end-start;
return secs.count();
}
int main(int argc, char * argv[]) {
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " num gpus total_model_memory" << std::endl;
exit(1);
}
int num_gpus = atoi(argv[1]);
int bytes_to_transfer_each = atoi(argv[2])*1024*1024;
float ** gpu_ptrs = new float*[num_gpus];
//Initiate with random memory, we don't care what it is. Also init the streams
for (int i = 0; i < num_gpus; i++) {
cudaSetDevice(i);
CUDA_CHECK(cudaMalloc(&gpu_ptrs[i], bytes_to_transfer_each));
}
auto other_this = normalMemCpyOther2This(gpu_ptrs, num_gpus, bytes_to_transfer_each, 0);
auto this_other = normalMemCpyThis2Other(gpu_ptrs, num_gpus, bytes_to_transfer_each, 0);
auto other_thisAsync = normalMemCpyOther2ThisAsync(gpu_ptrs, num_gpus, bytes_to_transfer_each, 0);
auto this_otherAsync = normalMemCpyThis2OtherAsync(gpu_ptrs, num_gpus, bytes_to_transfer_each, 0);
for (int i = 1; i < num_gpus; i++) {
other_this += normalMemCpyOther2This(gpu_ptrs, num_gpus, bytes_to_transfer_each, i);
this_other += normalMemCpyThis2Other(gpu_ptrs, num_gpus, bytes_to_transfer_each, i);
other_thisAsync += normalMemCpyOther2ThisAsync(gpu_ptrs, num_gpus, bytes_to_transfer_each, i);
this_otherAsync += normalMemCpyThis2OtherAsync(gpu_ptrs, num_gpus, bytes_to_transfer_each, i);
}
std::cout << std::fixed << "Other to this took: " << other_this << " seconds." << std::endl;
std::cout << std::fixed << "This to other took: " << this_other << " seconds." << std::endl;
std::cout << std::fixed << "Other to this Async took: " << other_this << " seconds." << std::endl;
std::cout << std::fixed << "This to other Async took: " << this_other << " seconds." << std::endl;
//Attempt to enable peer access
for (int i = 0; i<num_gpus; i++) {
for (int j = 0; j<num_gpus; j++) {
if (i != j) {
int result;
CUDA_CHECK(cudaDeviceCanAccessPeer(&result, i, j));
if (result) {
cudaSetDevice(i);
cudaDeviceEnablePeerAccess (j, 0);
} else {
std::cout << std::fixed << "Peer access unavailable between devices: " << i << " and " << j << std::endl;
}
}
}
}
//Redo the benchmarks, see if it is any different
other_this = normalMemCpyOther2This(gpu_ptrs, num_gpus, bytes_to_transfer_each, 0);
this_other = normalMemCpyThis2Other(gpu_ptrs, num_gpus, bytes_to_transfer_each, 0);
other_thisAsync = normalMemCpyOther2ThisAsync(gpu_ptrs, num_gpus, bytes_to_transfer_each, 0);
this_otherAsync = normalMemCpyThis2OtherAsync(gpu_ptrs, num_gpus, bytes_to_transfer_each, 0);
for (int i = 1; i < num_gpus; i++) {
other_this += normalMemCpyOther2This(gpu_ptrs, num_gpus, bytes_to_transfer_each, i);
this_other += normalMemCpyThis2Other(gpu_ptrs, num_gpus, bytes_to_transfer_each, i);
other_thisAsync += normalMemCpyOther2ThisAsync(gpu_ptrs, num_gpus, bytes_to_transfer_each, i);
this_otherAsync += normalMemCpyThis2OtherAsync(gpu_ptrs, num_gpus, bytes_to_transfer_each, i);
}
std::cout << std::fixed << "Peer other to this took: " << other_this << " seconds." << std::endl;
std::cout << std::fixed << "Peer this to other took: " << this_other << " seconds." << std::endl;
std::cout << std::fixed << "Peer other to this Async took: " << other_this << " seconds." << std::endl;
std::cout << std::fixed << "Peer this to other Async took: " << this_other << " seconds." << std::endl;
return 0;
}
|
7,965 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define PI 3.141592
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__global__ void anglesum(float *sino,
float *image,
int nangles,
int nrays,
int wdI,
float delta,
float ini)
{
int i, j, T, k;
float dt, th, dth, t, cost, sint, cumsum;
float x;
float y;
i = blockDim.x * blockIdx.x + threadIdx.x;
j = blockDim.y * blockIdx.y + threadIdx.y;
if ((i<wdI) && (j < wdI) ){
x = ini + i * delta;
y = ini + j * delta;
cumsum = 0;
dt = 2.0/(nrays-1);
dth = PI/(nangles);
for(k=0; k < nangles; k++)
{
th = k*dth;
cost = cos(th);
sint = sin(th);
t = x*cost + y*sint;
T = (int) floor((t + 1)/dt);
if(T > -1 && T < nrays-1)
{
cumsum = cumsum + (sino[nangles*(T+1) + k]-sino[nangles*T + k])*(t-(-1.0 + T*dt))/dt + sino[nangles*T + k];
}
}
image[j*wdI + i] = (cumsum*dth);
}
}
int main(int argc, char *argv[]) {
int i, j;
float delta, ini;
int sizeImage = atoi(argv[2]);
int nrays = atoi(argv[3]);
int nangles = atoi(argv[4]);
FILE *fp=fopen(argv[1], "r");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
float *dev_i = NULL;
float *dev_s = NULL;
float *image;
float *sino;
// GRID SIZE
unsigned int gridp;
gridp = (unsigned int) ceilf(((float)(sizeImage)/16));
dim3 grid(gridp, gridp, 1);
dim3 blocks(16, 16, 1);
// MALLOC AND CUDA COPY
image = (float *)malloc(sizeImage*sizeImage*sizeof(float));
sino = (float *)malloc(nangles*nrays*sizeof(float));
for (i = 0; i < nangles*nrays; i++)
fscanf(fp, "%f", &sino[i]);
cudaEventRecord(start);
CUDA_CHECK_RETURN(cudaMalloc((void**) &dev_i, sizeof(float)*sizeImage*sizeImage));
CUDA_CHECK_RETURN(cudaMalloc((void **)&dev_s , nangles*nrays*sizeof(float) ) );
CUDA_CHECK_RETURN(cudaMemcpy (dev_s , sino , nrays*nangles*sizeof(float) , cudaMemcpyHostToDevice));
//////////////////////
// BACK PROJECTION
ini = -sqrt(2)/2;
delta = (double) sqrt(2)/(sizeImage-1);
anglesum<<<grid, blocks>>>(dev_s, dev_i, nangles, nrays, sizeImage, delta, ini);
// BACK PROJECTION FINISHED
// COPY RESULTS
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaMemcpy (image , dev_i , sizeImage*sizeImage*sizeof(float) , cudaMemcpyDeviceToHost) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
fprintf(stderr, "%f ms\n", milliseconds);
for(i=0; i< sizeImage; i++)
{
for(j=0; j< sizeImage; j++)
{
fprintf(stdout, "%f ", image[sizeImage*(sizeImage-1-i) + j]);
}
fprintf(stdout, "\n");
}
//////////////
// FREE MEMORY
free(sino);
free(image);
CUDA_CHECK_RETURN(cudaFree((void*) dev_s));
CUDA_CHECK_RETURN(cudaFree((void*) dev_i));
CUDA_CHECK_RETURN(cudaDeviceReset());
return 0;
}
|
7,966 | __global__
void nekbone(double *w, double *u, double *g, double *d, double *dt, const int N) {
const int e_size = N * N * N;
const int e_offset = e_size * blockIdx.x;
__shared__ double ur[1024];
__shared__ double us[1024];
__shared__ double ut[1024];
__shared__ double ul[1024];
for (int it = threadIdx.x; it < e_size; it += blockDim.x) {
ul[it] = u[e_offset + it];
}
__syncthreads();
int i, j, k;
for (int it = threadIdx.x; it < e_size; it += blockDim.x) {
j = it / N;
i = it - j * N;
k = j / N;
j -= k * N;
double wr = 0.0;
double ws = 0.0;
double wt = 0.0;
for (int n = 0; n < N; ++n) {
wr += dt[i * N + n] * ul[N * (j + k * N) + n];
ws += dt[j * N + n] * ul[N * (n + k * N) + i];
wt += dt[k * N + n] * ul[N * (j + n * N) + i];
}
int g_offset = 6 * (e_offset + it);
ur[it] = g[g_offset + 0] * wr + g[g_offset + 1] * ws + g[g_offset + 2] * wt;
us[it] = g[g_offset + 1] * wr + g[g_offset + 3] * ws + g[g_offset + 4] * wt;
ut[it] = g[g_offset + 2] * wr + g[g_offset + 4] * ws + g[g_offset + 5] * wt;
}
__syncthreads();
for (int it = threadIdx.x; it < e_size; it += blockDim.x) {
j = it / N;
i = it - j * N;
k = j / N;
j -= k * N;
double s = 0.0;
for (int n = 0; n < N; ++n) {
s += d[i * N + n] * ur[N * (j + N * k) + n] +
d[j * N + n] * us[N * (n + N * k) + i] +
d[k * N + n] * ut[N * (j + N * n) + i];
}
w[e_offset + it] = s;
}
}
|
7,967 | #include "CudaUtilities.cuh"
namespace Helix {
void *fixedCudaMalloc(size_t len) {
void *p;
if (cudaMalloc(&p, len) == cudaSuccess) return p;
return 0;
}
template<typename FN>
FN *cudaAlloCopy(FN *org, const size_t size) {
void* mem = fixedCudaMalloc(size);
FN *res = (FN *)mem;
cudaMemcpy(res, org, size, cudaMemcpyHostToDevice);
return res;
}
template float4 *cudaAlloCopy <float4>(float4 *, const size_t);
template float3 *cudaAlloCopy <float3>(float3 *, const size_t);
template double4 *cudaAlloCopy <double4>(double4 *, const size_t);
template double3 *cudaAlloCopy <double3>(double3 *, const size_t);
}
|
7,968 |
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
unsigned char *input_gpu;
unsigned int *output_gpu;
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// GPU kernel and functions
__global__ void kernel(unsigned char *input,
unsigned int *output,
unsigned int numberOfPixels){
// int x = blockIdx.x*TILE_SIZE+threadIdx.x;
// int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int pixel = blockIdx.x * blockDim.x + threadIdx.x;
if (pixel < numberOfPixels) {
atomicAdd(&(output[input[pixel]]), 1);
}
}
void calculateHistogram(unsigned char *inputImageData,
unsigned int height,
unsigned int width,
unsigned int* outputHistogram) {
unsigned int numberOfPixels = height * width;
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned int)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned int)));
checkCuda(cudaMemcpy(input_gpu, inputImageData, height*width*sizeof(char),cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// Kernel Call
kernel<<<dimGrid, dimBlock>>>(input_gpu, output_gpu, numberOfPixels);
checkCuda(cudaDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(cudaMemcpy(outputHistogram,
output_gpu,
256 * sizeof(unsigned int),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
|
7,969 | #include <fstream>
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <unistd.h>
#include <utility>
#include <vector>
#define BLOCK_N 2 // 16 on Alan's PC, 2 on remote
#define THREAD_N 96 // 2048 on Alan's PC, 96 on remote
struct Point {
double x;
double y;
};
struct Dist {
double da;
double db;
double dc;
Dist(double a, double b, double c) : da(a), db(b), dc(c) {}
};
std::ostream& operator<<(std::ostream& stream, Point& p) {
return stream << p.x << " " << p.y;
}
__device__ Point& operator+=(Point &a, const Point &b) {
a.x += b.x;
a.y += b.y;
return a;
}
__device__ Point& operator/(Point &a, double d) {
a.x /= d;
a.y /= d;
return a;
}
__device__ Point Trilaterate(const Point &a, const Point &b, const Point &c, const Dist &ref) {
double A = (-2 * a.x) + (2 * b.x),
B = (-2 * a.y) + (2 * b.y),
C = (ref.da * ref.da) - (ref.db * ref.db) - (a.x * a.x) + (b.x * b.x) - (a.y * a.y) + (b.y * b.y),
D = (-2 * b.x) + (2 * c.x),
E = (-2 * b.y) + (2 * c.y),
F = (ref.db * ref.db) - (ref.dc * ref.dc) - (b.x * b.x) + (c.x * c.x) - (b.y * b.y) + (c.y * c.y);
if (A * E == D * B || B * D == E * A) { // Don't divide by 0
return Point { 0, 0 };
}
// printf("P(%f, %f)\n", ((C * E) - (F * B)) / ((A * E) - (D * B)), ((C * D) - (F * A)) / ((B * D) - (E * A)));
return Point { ((C * E) - (F * B)) / ((A * E) - (D * B)), ((C * D) - (F * A)) / ((B * D) - (E * A)) };
}
// Kernel definition
__global__ void FindPoint(const Point a, const Point b, const Point c, const Dist *dst, size_t num, Point *pts) {
__shared__ Point t[THREAD_N];
// const int i = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += blockDim.x * gridDim.x) {
//if (i < num) {
// printf("BlockIdx=%d, ThreadIdx=%d, i=%d, dst[i]=(%f,%f,%f)\n",
// blockIdx.x, threadIdx.x, i, dst[i].da, dst[i].db, dst[i].dc);
int tid = threadIdx.x;
t[tid] = Trilaterate(a, b, c, dst[i]);//dst[i + vec_n * THREAD_N]);
// Have each thread calculate the trilateration of a dst[i]
__syncthreads();
// Avg every 4 points, store in pts
for (int fold = 2; fold > 0; fold /= 2) {
if (tid % 4 < fold) { // 4-point segment
t[tid] += t[tid + fold];
}
__syncthreads();
}
if (tid % 4 == 0) pts[i / 4] = t[tid] / 4;
//}
}
}
// Set up guard points
void setGuards(std::ifstream &ifs, Point &a, Point &b, Point &c) {
ifs >> a.x >> a.y
>> b.x >> b.y
>> c.x >> c.y;
}
/* The current CUDA API goes up to 3.0 SM support (Kepler). For newer
* architectures, like Maxwell and Pascal, we use this function that
* many industry professionals have agreed on to determine the number of
* cores. Until the API is updated to include the numbers for >3.0 SM, we
* will use this function to get the count.
*/
int getSPcores(cudaDeviceProp devProp) {
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major) {
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else std::cerr << "Unknown device type\n";
break;
default:
std::cerr << "Unknown device type\n";
break;
}
return cores;
}
int main(int argc, char* argv[]) {
// Process flags
std::string inputFile, outputFile;
int flag;
double UFactor = 1.0, VFactor = 1.0;
opterr = 0;
while ((flag = getopt(argc, argv, "hi:o:U:V:")) != -1) {
switch(flag) {
case 'i':
inputFile = optarg;
break;
case 'o':
outputFile = optarg;
break;
case 'U':
UFactor = atof(optarg);
break;
case 'V':
VFactor = atof(optarg);
break;
case 'h':
std::cerr << "Usage: ./assign2 [-hioUV] <file-path>\n\n" <<
"Options:\n" <<
"-h\t\t Show usage string and exit\n" <<
"-i <file-path>\t Read input from provided file\n" <<
"-o <file-path>\t Write output to file\n" <<
"-U <factor for U>\t Block size = U (SMs) * factor for U\n" <<
"-V <factor for V>\t Thread size = V (Cores) * factor for V\n";
exit(-1);
case '?':
if (optopt == 'i' || optopt == 'o' || optopt == 'U' || optopt == 'V') {
std::cerr << "Option -" << (char)optopt << " requires an argument.\n";
} else if (isprint(optopt)) {
std::cerr << "Unknown option `-" << (char)optopt << "'.\n";
} else {
std::cerr << "Unknown option character `\\x" << (char)optopt << "'.\n";
}
exit(-1);
default:
exit(-1);
}
}
// Ensure filename was passed
if (inputFile.empty()) {
std::cerr << "Error: input filename required\n";
exit(-1);
}
if (outputFile.empty()) {
std::cerr << "Error: output filename required\n";
exit(-1);
}
// Use API to determine U and V values
int nDevices, U = -1, V = -1;
cudaError_t err = cudaGetDeviceCount(&nDevices);
if (err != cudaSuccess) {
std::cerr << cudaGetErrorString(err) << "\n";
exit(-1);
} else {
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
U = prop.multiProcessorCount;
V = getSPcores(prop) / U; // Get number of cores per sm
}
}
// Check values for U and V
if (U == -1 || V == -1) {
std::cerr << "Error: Could not fetch information on number of multiprocessors or cores\n";
exit(-1);
}
// Open input file for reading
std::ifstream ifs;
ifs.open(inputFile, std::ios::in);
if (!ifs.is_open()) {
std::cerr << "Error: failed to open " << inputFile << "\n";
exit(-1);
}
// Set up guard points
Point a, b, c;
setGuards(ifs, a, b, c);
std::vector<Dist> data;
while (true) {
double da, db, dc;
ifs >> da >> db >> dc;
if (ifs.eof()) break;
data.push_back(Dist(da, db, dc));
//std::cerr << "Dist { " << da << " " << db << " " << dc << " }\n";
}
//std::cerr << "Size of dists: " << data.size() << "\n";
ifs.close();
Dist *dst;
Point *pts;
Point *res = new Point[data.size() / 4];
err = cudaMalloc((void **)&dst, data.size() * sizeof(Dist));
if (err != cudaSuccess) {
std::cerr << cudaGetErrorString(err) << "\n";
exit(-1);
}
err = cudaMalloc((void **)&pts, (data.size() / 4) * sizeof(Point));
if (err != cudaSuccess) {
std::cerr << cudaGetErrorString(err) << "\n";
exit(-1);
}
err = cudaMemcpy(dst, data.data(), data.size() * sizeof(Dist), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cerr << cudaGetErrorString(err) << "\n";
exit(-1);
}
// Pass in our U and V for this run
// std::cerr << U << ", " << UFactor << ", " << V << ", " << VFactor << "\n";
int numBlocks = U * UFactor;
int numThreads = V * VFactor;
// Start timer
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
FindPoint<<<numBlocks, numThreads>>>(a, b, c, dst, data.size(), pts);
// End timer
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
std::cerr << "Elapsed time: " << elapsedTime << " ms\n";
err = cudaMemcpy(res, pts, (data.size() / 4) * sizeof(Point), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cerr << cudaGetErrorString(err) << "\n";
exit(-1);
}
std::ofstream ofs;
ofs.open(outputFile, std::ofstream::out | std::ofstream::trunc);
for (int i = 0; i < data.size() / 4; i++) {
ofs << res[i] << "\n";
}
ofs.close();
cudaFree(dst);
cudaFree(pts);
delete[] res;
return 0;
}
|
7,970 | #include <bits/stdint-uintn.h>
#include <stdio.h>
#include <sys/param.h>
struct RenderSettings {
uint32_t *outputBuffer;
int width;
int height;
double zoom;
double xoffset;
double yoffset;
unsigned int iterations;
uint32_t *deviceBuffer;
};
uint32_t *deviceBuffer;
char cudaInitialized = 0;
__global__ void mandelbrotCalc(struct RenderSettings rs) {
int *deviceBuffer = (int *)rs.deviceBuffer;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double cReal, cImag, zReal, zImag, z2Real, z2Imag, zrzi;
int color;
int colorbias;
double x1 = rs.xoffset - 2.0 / rs.zoom * rs.width / rs.height;
double x2 = rs.xoffset + 2.0 / rs.zoom * rs.width / rs.height;
double y1 = rs.yoffset + 2.0 / rs.zoom;
double pixel_pitch = (x2 - x1) / rs.width;
int x, y;
for (int w = index; w < rs.height * rs.width; w += stride) {
y = w / rs.width;
if (y > 0) {
x = w % rs.width;
} else {
x = w;
}
cImag = y1 - pixel_pitch * y;
cReal = x1 + pixel_pitch * x;
zReal = cReal;
zImag = cImag;
color = 0x000000FF; // black as default for values that converge to 0
for (int i = 0; i < rs.iterations; i++) {
z2Real = zReal * zReal;
z2Imag = zImag * zImag;
zrzi = zReal * zImag;
zReal = cReal + z2Real - z2Imag;
zImag = zrzi + zrzi + cImag;
if (z2Real + z2Imag > 4.0f) {
colorbias = MIN(255, i * 510.0 / rs.iterations);
color = (color | (colorbias << 24) | (colorbias << 16) | colorbias << 8);
break;
}
}
deviceBuffer[w] = color;
}
}
__global__ void mandelbrotCalcSP(struct RenderSettings rs) {
int *deviceBuffer = (int *)rs.deviceBuffer;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float cReal, cImag, zReal, zImag, z2Real, z2Imag, zrzi;
int color;
int colorbias;
float x1 = rs.xoffset - 2.0 / rs.zoom * rs.width / rs.height;
float x2 = rs.xoffset + 2.0 / rs.zoom * rs.width / rs.height;
float y1 = rs.yoffset + 2.0 / rs.zoom;
float pixel_pitch = (x2 - x1) / rs.width;
int x, y;
for (int w = index; w < rs.height * rs.width; w += stride) {
y = w / rs.width;
if (y > 0) {
x = w % rs.width;
} else {
x = w;
}
cImag = y1 - pixel_pitch * y;
cReal = x1 + pixel_pitch * x;
zReal = cReal;
zImag = cImag;
color = 0x000000FF; // black as default for values that converge to 0
for (int i = 0; i < rs.iterations; i++) {
z2Real = zReal * zReal;
z2Imag = zImag * zImag;
zrzi = zReal * zImag;
zReal = cReal + z2Real - z2Imag;
zImag = zrzi + zrzi + cImag;
if (z2Real + z2Imag > 4.0f) {
colorbias = MIN(255, i * 510.0 / rs.iterations);
color = (color | (colorbias << 24) | (colorbias << 16) | colorbias << 8);
break;
}
}
deviceBuffer[w] = color;
}
}
extern "C" void freeCUDA() {
if (cudaInitialized == 1) {
cudaFree(deviceBuffer);
cudaInitialized = 0;
}
}
extern "C" void initCUDA(struct RenderSettings rs) {
// allocates device buffer on first run
// destroys and re-allocates buffer if window dimensions change
static int width = 0;
static int height = 0;
if (cudaInitialized == 0) {
cudaMalloc((void **)&deviceBuffer, rs.width * rs.height * 4);
width = rs.width;
height = rs.height;
cudaInitialized = 1;
} else {
if (rs.width != width || rs.height != height) {
freeCUDA();
initCUDA(rs);
}
}
}
extern "C" void mandelbrotCUDA(struct RenderSettings rs) {
initCUDA(rs);
uint32_t *screenBuffer = rs.outputBuffer;
rs.deviceBuffer = deviceBuffer;
mandelbrotCalc<<<2048, 1024>>>(rs);
cudaDeviceSynchronize();
cudaMemcpy(screenBuffer, deviceBuffer, rs.width * rs.height * 4, cudaMemcpyDeviceToHost);
}
extern "C" void mandelbrotCUDAsp(struct RenderSettings rs) {
initCUDA(rs);
uint32_t *screenBuffer = rs.outputBuffer;
rs.deviceBuffer = deviceBuffer;
mandelbrotCalcSP<<<2048, 1024>>>(rs);
cudaDeviceSynchronize();
cudaMemcpy(screenBuffer, deviceBuffer, rs.width * rs.height * 4, cudaMemcpyDeviceToHost);
}
|
7,971 | __global__ void PDH_kernel3(unsigned long long* d_histogram,
double* d_atom_x_list, double* d_atom_y_list, double * d_atom_z_list,
long long acnt, double res)//,
//int numBlocks, int blockSize)
{
extern __shared__ double R[];
//the size of this should be 3*BLOCK_SIZE*sizeof(double), to house the three arrays in shared memory
//where t is a specific index into the 'atom' array
//
//the rth x array should be accessed by R[t + 3*BLOCK_SIZE]
//the rth y array should be accessed by R[t + BLOCK_SIZE + 3*BLOCK_SIZE]
//the rth z array should be accessed by R[t + BLOCK_SIZE*2 + 3*BLOCK_SIZE]
int cur_id = blockIdx.x * blockDim.x + threadIdx.x;
int i, j, h_pos;
//int i_id, j_id;
// int cur_id;
double Lx, Ly, Lz, Rt;//, Rx, Ry, Rz;
double dist;
if(cur_id < acnt)
{
Lx = d_atom_x_list[cur_id];
Ly = d_atom_y_list[cur_id];
Lz = d_atom_z_list[cur_id];
for(i = blockIdx.x +1; i < gridDim.x; i++)
{
cur_id = i * blockDim.x + threadIdx.x; //only valid threads may load into shared memory for block i
if(cur_id < acnt)
{
R[threadIdx.x] = d_atom_x_list[cur_id];
R[threadIdx.x + blockDim.x] = d_atom_y_list[cur_id];
R[threadIdx.x + blockDim.x*2] = d_atom_z_list[cur_id];
}
__syncthreads();
for(j = 0; j < blockDim.x; j++)
{
cur_id = i * blockDim.x + j; //now this prevents us from writing junk data for thread j
if(cur_id < acnt)
{
// Rx = R[j];
// Ry = R[j + blockDim.x];
// Rz = R[j + blockDim.x*2];
// dist = sqrt((Lx - Rx)*(Lx-Rx) + (Ly - Ry)*(Ly - Ry) + (Lz - Rz)*(Lz - Rz));
dist = 0.0;
//Rx
Rt = Lx - R[j];
Rt *= Rt;
dist += Rt;
//Ry
Rt = Ly - R[j + blockDim.x];
Rt *= Rt;
dist += Rt;
//Rz
Rt = Lz - R[j + blockDim.x*2];
Rt *= Rt;
dist += Rt;
dist = sqrt(dist);
h_pos = (int)(dist/res);
atomicAdd(&d_histogram[h_pos], 1);
}
}
__syncthreads();
}
//now load the L values into R
R[threadIdx.x] = Lx;
R[threadIdx.x + blockDim.x] = Ly;
R[threadIdx.x + blockDim.x*2] = Lz;
__syncthreads();
for(i = threadIdx.x+ 1; i < blockDim.x; i++)
{
cur_id = blockIdx.x * blockDim.x + i; //we only proceed with valid threads for each thread i
if(cur_id < acnt)
{
// Rx = R[i];
// Ry = R[i + blockDim.x];
// Rz = R[i + blockDim.x*2];
// dist = sqrt((Lx - Rx)*(Lx-Rx) + (Ly - Ry)*(Ly - Ry) + (Lz - Rz)*(Lz - Rz));
dist = 0.0;
//Rx
Rt = Lx - R[i];
Rt *= Rt;
dist += Rt;
//Ry
Rt = Ly - R[i + blockDim.x];
Rt *= Rt;
dist += Rt;
//Rz
Rt = Lz - R[i + blockDim.x*2];
Rt *= Rt;
dist += Rt;
dist = sqrt(dist);
h_pos = (int)(dist/res);
atomicAdd(&d_histogram[h_pos], 1);
}
}
}
} |
7,972 | __global__ void kernel(float *a, float *b, int n) {
*a = *b;
}
int main(void) {
float *a, *b;
cudaMalloc(&a, 10 * sizeof(float));
cudaMalloc(&b, 10 * sizeof(float));
kernel<<<1,1>>>(a,b,10);
cudaDeviceSynchronize();
} |
7,973 | #include <cuda.h>
#include <cuda_runtime.h>
int main(){
const unsigned int X = 1048576; // 1 Megabyte
const unsigned int bytes = X * sizeof(int);
int *hostArray = (int*) malloc(bytes);
int *deviceArray;
cudaMalloc((int**)&deviceArray, bytes);
memset(hostArray, 0, bytes);
cudaMemcpy(deviceArray, hostArray, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(hostArray, deviceArray, bytes, cudaMemcpyDeviceToHost);
cudaFree(deviceArray);
return 0;
} |
7,974 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <cuda_runtime.h>
#define ROW 773
#define COL 26
#define TRAIN_ROW 541
#define TEST_ROW 232
// define nodes
#define INPUT_NODES 26
#define HIDDEN_NODES 10
#define OUTPUT_NODES 1
#define ALPHA 0.1
// Activation Functions
__device__ float sigmoid_device(float x){
return 1/(1 + exp(-x));
}
float sigmoid(float x){
return 1/(1 + exp(-x));
}
__device__ float diff_Sigmoid(float x){
return x * (1 - x);
}
__global__ void cuda_forward_1(float* input, float* weight1, float* layer1, float* bias_layer1){
int ind_x = blockIdx.x * blockDim.x + threadIdx.x;
if(ind_x < HIDDEN_NODES){
float act = 0.0;
for(int j = 0; j < INPUT_NODES; j++){
act += input[j]*weight1[ind_x*INPUT_NODES + j] ;
}
layer1[ind_x] = sigmoid_device(act+ bias_layer1[ind_x]);
}
}
__global__ void cuda_forward_2(float* weight2, float* layer1, float* layer2, float* bias_layer2){
int ind_x = blockIdx.x * blockDim.x + threadIdx.x;
if(ind_x < OUTPUT_NODES){
float act = 0.0;
for(int j = 0; j < HIDDEN_NODES; j++){
act += layer1[j]*weight2[ind_x* HIDDEN_NODES + j] ;
}
layer2[ind_x] = sigmoid_device(act+ bias_layer2[ind_x]);
}
}
__global__ void cuda_backprop_out(float* d3, float *layer2, float *label){
int ind_x = blockIdx.x * blockDim.x + threadIdx.x;
if(ind_x < OUTPUT_NODES){
float err = layer2[ind_x] - label[ind_x];
d3[ind_x] = err;
}
return;
}
__global__ void cuda_backprop_hidden(float* d2, float* layer1, float* weight2, float* d3){
int ind_x = blockIdx.x * blockDim.x + threadIdx.x;
if(ind_x < HIDDEN_NODES){
float error_hidden = 0.0;
for(int j = 0; j < OUTPUT_NODES; j++){
error_hidden += d3[j]*weight2[j*HIDDEN_NODES + ind_x];
}
d2[ind_x] = error_hidden * (layer1[ind_x] * (1 - layer1[ind_x]));
}
}
__global__ void update_weight2(float* weight2, float* layer1, float* d3){
int ind_x = blockIdx.x * blockDim.x + threadIdx.x;
if(ind_x < OUTPUT_NODES){
for(int j = 0; j < HIDDEN_NODES; j++){
weight2[ind_x*HIDDEN_NODES + j] -= layer1[j]*d3[ind_x]*ALPHA;
}
}
}
__global__ void update_weight1(float* weight1, float* input, float* d2){
int ind_x = blockIdx.x * blockDim.x + threadIdx.x;
if(ind_x < HIDDEN_NODES){
for(int j = 0; j < INPUT_NODES; j++){
weight1[ind_x*INPUT_NODES + j] -= input[j]*d2[ind_x]*ALPHA;
}
}
}
void predict(float *input_matrix,
float *pred_arr,
float *weight1,
float *weight2,
float layer1[HIDDEN_NODES],
float layer2[OUTPUT_NODES])
{
//this will be each extracted input row
float input[COL];
//float output=0;
// iterate through input matrix row by row, extracting each row for training
for(int row = 0; row < TEST_ROW; row++){
for(int col = 0; col < COL; col++){
input[col] = input_matrix[row*COL + col];
}
// FORWARD PROPAGATION:
for(int i = 0; i < HIDDEN_NODES; i++){
float act = 0.0;
for(int j = 0; j < INPUT_NODES; j++){
act += input[j]*weight1[i * INPUT_NODES + j];
}
layer1[i] = sigmoid(act);
}
for(int i = 0; i < OUTPUT_NODES; i++){
float act = 0.0;
for(int j = 0; j < HIDDEN_NODES; j++){
act += layer1[j]*weight2[i * HIDDEN_NODES + j];
}
layer2[i] = sigmoid(act);
}
//store predictions in an array
for(int i = 0; i < OUTPUT_NODES; i++){
if(layer2[i]>0.5){
pred_arr[row] = 1;
}
else{
pred_arr[row] = 0;
}
}
}
return;
}
float train_nn(float *input_matrix,
float label[TRAIN_ROW],
float *weight1,
float *weight2,
float layer1[HIDDEN_NODES],
float layer2[OUTPUT_NODES],
int p_epoch)
{
//this will be each extracted input row
float input[COL];
for(int epoch=0; epoch < p_epoch; epoch++){
// iterate through input matrix row by row, extracting each row for training
for(int row = 0; row < TRAIN_ROW; row++){
for(int col = 0; col < COL; col++){
input[col] = input_matrix[row*COL + col];
}
//this is for one row instance of forward and backprop
// FORWARD PROPAGATION:
for(int i = 0; i < HIDDEN_NODES; i++){
float act = 0.0;
for(int j = 0; j < INPUT_NODES; j++){
act += input[j]*weight1[i*INPUT_NODES + j];
}
layer1[i] = sigmoid(act);
}
for(int i = 0; i < OUTPUT_NODES; i++){
float act = 0.0;
for(int j = 0; j < HIDDEN_NODES; j++){
act += layer1[j]*weight2[i* HIDDEN_NODES + j];
}
layer2[i] = sigmoid(act);
}
// BACKPROPAGATION:
// calculate errors
float d3[OUTPUT_NODES];
for(int i = 0; i < OUTPUT_NODES; i++){
float error_output = layer2[i] - label[row];
d3[i] = error_output;
}
float d2[HIDDEN_NODES];
for(int i = 0; i < HIDDEN_NODES; i++){
float error_hidden = 0.0;
for(int j = 0; j < OUTPUT_NODES; j++){
error_hidden += d3[j]*weight2[j*HIDDEN_NODES + i];
}
d2[i] = error_hidden * (layer1[i] * (1 - layer1[i]));
}
// update weights
for(int i = 0; i < OUTPUT_NODES; i++){
for(int j = 0; j < HIDDEN_NODES; j++){
weight2[i*HIDDEN_NODES + j] -= layer1[j]*d3[i]*ALPHA;
}
}
for(int i = 0; i < HIDDEN_NODES; i++){
for(int j = 0; j < INPUT_NODES; j++){
weight1[i*INPUT_NODES + j] -= input[j]*d2[i]*ALPHA;
}
}
}
}
return 0;
}
void import_data(float *train_arr, float *train_y_arr, float *test_arr , float *test_y_arr){
FILE* str = fopen("train_data.csv", "r");
char line[1024];
int count = 0;
while (fgets(line, 1024, str))
{
char* tmp = strdup(line);
char* c = strtok(tmp,",");
//train_arr[count] = new float[1];
while(c != NULL){
train_arr[count] = (float)atof(c);
count ++;
c = strtok(NULL, ",");
}
free(tmp);
}
//IMPORT TRAINING LABELS
FILE* str_y = fopen("train_y.csv", "r");
char line_y[1024];
int count_y = 0;
while (fgets(line_y, 1024, str_y))
{
char* tmp = strdup(line_y);
char* c = strtok(tmp,",");
while(c != NULL){
train_y_arr[count_y] = (float)atof(c);
count_y ++;
c = strtok(NULL, ",");
}
free(tmp);
}
//IMPORT TESTING DATA
FILE* str_t = fopen("test_data.csv", "r");
char line_t[1024];
int count_t = 0;
while (fgets(line_t, 1024, str_t))
{
char* tmp = strdup(line_t);
char* c = strtok(tmp,",");
while(c != NULL){
test_arr[count_t] = (float)atof(c);
count_t ++;
c = strtok(NULL, ",");
}
free(tmp);
}
//IMPORT TEST LABELS
FILE* str_ty = fopen("test_y.csv", "r");
char line_ty[1024];
int count_ty = 0;
while (fgets(line_ty, 1024, str_ty))
{
char* tmp = strdup(line_ty);
char* c = strtok(tmp,",");
while(c != NULL){
test_y_arr[count_ty] = (float)atof(c);
count_ty ++;
c = strtok(NULL, ",");
}
free(tmp);
}
}
int main(int argc, char *argv[]){
float train_arr[TRAIN_ROW*COL];
float train_y_arr[TRAIN_ROW*1];
float test_arr[TEST_ROW*COL];
float test_y_arr[TEST_ROW*1];
float weight_layer1[HIDDEN_NODES*INPUT_NODES];
float weight_layer2[OUTPUT_NODES*HIDDEN_NODES];
float bias_layer1[HIDDEN_NODES];
float bias_layer2[OUTPUT_NODES];
float layer1[HIDDEN_NODES];
float layer2[OUTPUT_NODES];
float d3[OUTPUT_NODES];
float d2[HIDDEN_NODES];
float** train_arr_device = new float*[TRAIN_ROW];
float** train_arr_y_device = new float*[TRAIN_ROW];
float* weight1_device;
float* weight2_device;
float* layer1_device;
float* layer2_device;
float* d3_device;
float* d2_device;
float* bias_layer1_device;
float* bias_layer2_device;
cudaDeviceReset();
float** train_final = new float* [TRAIN_ROW];
float** train_y_final = new float* [TRAIN_ROW];
float *output = (float *)malloc(sizeof(TRAIN_ROW*sizeof(float)));
float *output_test = (float *)malloc(sizeof(TEST_ROW*sizeof(float)));
//IMPORT TRAINING DATA
import_data(train_arr, train_y_arr, test_arr, test_y_arr);
for (size_t i = 0; i < TRAIN_ROW; i++) {
train_final[i] = new float[COL];
train_y_final[i] = new float[COL];
for (size_t j = 0; j < COL; j++) {
train_final[i][j] = train_arr[i*COL + j];
}
for (size_t k = 0; k < 1; k++) {
train_y_final[i][k] = train_y_arr[i];
}
}
// generate random weights and biases
for(int i = 0; i < HIDDEN_NODES; i++){
for(int j = 0; j < INPUT_NODES; j++){
weight_layer1[i*INPUT_NODES + j] = ((double)rand())/((double)RAND_MAX);
}
}
for(int i = 0; i < OUTPUT_NODES; i++){
for(int j = 0; j < HIDDEN_NODES; j++){
weight_layer2[i*HIDDEN_NODES + j] = ((double)rand())/((double)RAND_MAX);
}
}
for(int i = 0; i < HIDDEN_NODES; i++){
bias_layer1[i] = ((double)rand())/((double)RAND_MAX);
}
for(int i = 0; i < OUTPUT_NODES; i++){
bias_layer2[i] = ((double)rand())/((double)RAND_MAX);
}
for (size_t i = 0; i < TRAIN_ROW; i++) {
cudaMalloc(&train_arr_device[i], sizeof(float)*COL);
cudaMemcpy(train_arr_device[i], train_final[i], sizeof(float)*COL, cudaMemcpyHostToDevice);
cudaMalloc(&train_arr_y_device[i], sizeof(float)*1);
cudaMemcpy(train_arr_y_device[i], train_y_final[i], sizeof(float)*1, cudaMemcpyHostToDevice);
}
//cudaMalloc(&train_arr_y_device, sizeof(float)*TRAIN_ROW*1);
//cudaMemcpy(train_arr_y_device, train_y_arr, sizeof(float)*TRAIN_ROW*1, cudaMemcpyHostToDevice);
cudaMalloc(&weight1_device, sizeof(float)*HIDDEN_NODES*INPUT_NODES);
cudaMemcpy(weight1_device, weight_layer1, sizeof(float)*HIDDEN_NODES*INPUT_NODES, cudaMemcpyHostToDevice);
cudaMalloc(&weight2_device, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES);
cudaMemcpy(weight2_device, weight_layer2, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES, cudaMemcpyHostToDevice);
cudaMalloc(&layer1_device, sizeof(float)*HIDDEN_NODES);
cudaMemcpy(layer1_device, layer1, sizeof(float)*HIDDEN_NODES, cudaMemcpyHostToDevice);
cudaMalloc(&layer2_device, sizeof(float)*OUTPUT_NODES);
cudaMemcpy(layer2_device, layer2, sizeof(float)*OUTPUT_NODES, cudaMemcpyHostToDevice);
cudaMalloc(&d3_device, sizeof(float)*OUTPUT_NODES);
cudaMemcpy(d3_device, d3, sizeof(float)*OUTPUT_NODES, cudaMemcpyHostToDevice);
cudaMalloc(&d2_device, sizeof(float)*HIDDEN_NODES);
cudaMemcpy(d2_device, d2, sizeof(float)*HIDDEN_NODES, cudaMemcpyHostToDevice);
cudaMalloc(&bias_layer1_device, sizeof(float)*HIDDEN_NODES);
cudaMemcpy(bias_layer1_device, bias_layer1, sizeof(float)*HIDDEN_NODES, cudaMemcpyHostToDevice);
cudaMalloc(&bias_layer2_device, sizeof(float)*HIDDEN_NODES);
cudaMemcpy(bias_layer2_device, bias_layer2, sizeof(float)*HIDDEN_NODES, cudaMemcpyHostToDevice);
// NEURAL NETWORK
//ceil(541/14) = 39
//ceil(26/14) = 2
dim3 dimGrid(39,2,1);
dim3 dimBlock(14,14,1);
/*printf("%s\n","Weight Layer 1:" );
for (size_t i = 0; i < HIDDEN_NODES; i++) {
for (size_t j = 0; j < INPUT_NODES; j++) {
printf("%f ",weight_layer1[i*INPUT_NODES + j] );
}
printf("\n");
}
printf("%s\n","Weight Layer 2:" );
for (size_t i = 0; i < OUTPUT_NODES; i++) {
for (size_t j = 0; j < HIDDEN_NODES; j++) {
printf("%f ",weight_layer2[i*HIDDEN_NODES + j] );
}
printf("\n");
}*/
int epoch = 400;
printf(" TRAINING WITH %d EPOCHS:\n__________________________________________________________________________\n__________________________________________________________________________\n\n", epoch);
cudaEvent_t beginLaunch, endLaunch;
cudaEventCreate(&beginLaunch);
cudaEventCreate(&endLaunch);
cudaEventRecord(beginLaunch,0);
float mse_total;
float mse_old = 100000;
float mse_difference = 100000;
float mse_abs = 10000;
int max_epoch = 0;
while(mse_abs > 0.0001 && max_epoch < epoch){ //
//for (size_t i = 0; i < epoch; i++) {
mse_total = 0.0;
for (size_t j = 0; j < TRAIN_ROW; j++) { //TRAIN_ROW
cuda_forward_1<<<dimBlock , dimGrid>>>(train_arr_device[j], weight1_device, layer1_device, bias_layer1_device);
//cudaMemcpy(layer1, layer1_device, sizeof(float)*HIDDEN_NODES, cudaMemcpyDeviceToHost);
//cudaMemcpy(layer1_device, layer1, sizeof(float)*HIDDEN_NODES, cudaMemcpyHostToDevice);
//printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError()));
cuda_forward_2<<<dimBlock , dimGrid>>>(weight2_device, layer1_device, layer2_device, bias_layer2_device);
//cudaMemcpy(layer2, layer2_device, sizeof(float)*OUTPUT_NODES, cudaMemcpyDeviceToHost);
//cudaMemcpy(layer2_device, layer2, sizeof(float)*OUTPUT_NODES, cudaMemcpyHostToDevice);
//printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError()));
cuda_backprop_out<<<dimBlock , dimGrid>>>(d3_device, layer2_device, train_arr_y_device[j]);
cudaMemcpy(d3, d3_device, sizeof(float)*OUTPUT_NODES, cudaMemcpyDeviceToHost);
cudaMemcpy(d3_device, d3, sizeof(float)*OUTPUT_NODES, cudaMemcpyHostToDevice);
//printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError()));
mse_total += abs(0.5*d3[0]*d3[0]);
//printf("%f\n", d3[0]);
cuda_backprop_hidden<<<dimBlock , dimGrid>>>(d2_device, layer1_device, weight2_device, d3_device);
//cudaMemcpy(d2, d2, sizeof(float)*HIDDEN_NODES, cudaMemcpyDeviceToHost);
//cudaMemcpy(d2, d2, sizeof(float)*HIDDEN_NODES, cudaMemcpyHostToDevice);
//printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError()));
update_weight2<<<dimBlock , dimGrid>>>(weight2_device, layer1_device, d3_device);
//cudaMemcpy(weight_layer2, weight2_device, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES, cudaMemcpyDeviceToHost);
//cudaMemcpy(weight2_device, weight_layer2, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES, cudaMemcpyHostToDevice);
//printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError()));
update_weight1<<<dimBlock , dimGrid>>>(weight1_device, train_arr_device[j], d2_device);
//cudaMemcpy(weight_layer1, weight1_device, sizeof(float)*HIDDEN_NODES*INPUT_NODES, cudaMemcpyDeviceToHost);
//cudaMemcpy(weight1_device, weight_layer1, sizeof(float)*HIDDEN_NODES*INPUT_NODES, cudaMemcpyHostToDevice);
//printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError()));
}
printf("%f\n", mse_total);
mse_difference = mse_old - mse_total;
mse_abs = abs(mse_difference);
mse_old = mse_total;
max_epoch += 1;
printf("MSE ABS DIFFERENCE FOR EPOCH: %f\n", mse_abs);
}
float mse_final = mse_total;
cudaEventRecord(endLaunch,0);
cudaEventSynchronize(endLaunch);
cudaMemcpy(weight_layer1, weight1_device, sizeof(float)*HIDDEN_NODES*INPUT_NODES, cudaMemcpyDeviceToHost);
cudaMemcpy(weight_layer2, weight2_device, sizeof(float)*OUTPUT_NODES*HIDDEN_NODES, cudaMemcpyDeviceToHost);
float time_share = 0;
cudaEventElapsedTime(&time_share, beginLaunch, endLaunch);
printf("The time taken to train with %d epochs is: %fms\n", max_epoch, time_share);
printf("MSE FINAL: %f\n", mse_final);
//printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError()));
/*printf("%s\n","Weight Layer 1:" );
for (size_t i = 0; i < HIDDEN_NODES; i++) {
for (size_t j = 0; j < INPUT_NODES; j++) {
printf("%f ",weight_layer1[i*INPUT_NODES + j] );
}
printf("\n");
}
printf("%s\n","Weight Layer 2:" );
for (size_t i = 0; i < OUTPUT_NODES; i++) {
for (size_t j = 0; j < HIDDEN_NODES; j++) {
printf("%f ",weight_layer2[i*HIDDEN_NODES + j] );
}
printf("\n");
}*/
predict(test_arr, output, weight_layer1, weight_layer2, layer1, layer2);
int count_final=0;
for(int i = 0; i < TEST_ROW; i++){
//printf("predicted %f\n", output[i]);
//printf("actual %f\n", train_y_arr[i]);
if(output[i] == test_y_arr[i]){
count_final +=1;
}
}
float prediction = (float)count_final/TEST_ROW;
printf("The final prediction accuracy is: %f \n", prediction);
free(output);
cudaFree(train_arr_device);
cudaFree(train_arr_y_device);
cudaFree(weight1_device);
cudaFree(weight2_device);
cudaFree(bias_layer1_device);
cudaFree(bias_layer2_device);
cudaFree(layer1_device);
cudaFree(layer2_device);
cudaFree(d3_device);
cudaFree(d2_device);
return 0;
}
|
7,975 | #include "cuda.h" /* CUDA runtime API */
#include "cstdio"
#include "math.h"
#include <sys/time.h>
/*****************************************************************************/
/*** local definition ************/
/*****************************************************************************/
#define MAX_NUMBER (1000000000)
#define NEEDED_PRIME_NUM (5)
/* Make sure the following definition satisfy the condition:
** (CPU_CALC_END * CPU_CALC_END) > MAX_NUMBER
** CPU runs the sieve arithmetic for the range [2, CPU_CALC_END)
** and GPU runs the remain part [CPU_CALC_END, MAX_NUMBER] */
#define CPU_CALC_END (32000)
#define BLOCK_SIZE (512)
typedef struct
{
int smallPrime;
int largePrime;
int distance;
} primeInfo;
/*****************************************************************************/
/*** Static Databases/Variables *****/
/*****************************************************************************/
/* The biggest NEEDED_PRIME_NUM distances between continuous prime number in
** sorted list. The largest distance will be saved at the first one
** primeList[0]. Here, one more item is defined for simplify the calculation
** in loop. */
primeInfo primeList[NEEDED_PRIME_NUM+1];
/* The number of found prime number. Range: 0 ~ NEEDED_PRIME_NUM */
int foundPrimeNum;
/* Function insertNewDistance(int distance, int smallerPrime, int largePrime)
*******************************************************************************
* Function description: getPrimeCUDA() is used to insert the found new large
* distance.
*
* Inputs:
* distance: the new distance between smaller prime and larger prime
* smallerPrime: small prime
* largerPrime: large prime
*
* Output:
* Save the data to array primeList[] and update variable 'foundPrimeNum'
*
* Return:
* None
*
******************************************************************************/
void insertNewDistance(int distance, int smallPrime, int largePrime)
{
int j;
for (j=foundPrimeNum; j>=0; j--)
{
/* Save the new result to the sorted place */
/* Note: 6 items are defined in array primeList to avoid overrun */
if ( (0 == j) || (distance <= primeList[j - 1].distance))
{
primeList[j].smallPrime = smallPrime;
primeList[j].largePrime = largePrime;
primeList[j].distance = distance;
break;
}
else if (NEEDED_PRIME_NUM != j)
{
/* Move the item */
primeList[j].smallPrime = primeList[j-1].smallPrime;
primeList[j].largePrime = primeList[j-1].largePrime;
primeList[j].distance = primeList[j-1].distance;
}
}
/* Maximum 5 largest distances are kept. Update the number of distances */
if (foundPrimeNum < NEEDED_PRIME_NUM)
{
foundPrimeNum++;
}
}
/* Function getPrimeCUDA()
*******************************************************************************
* Function description: getPrimeCUDA() is used to find out all the primes.
*
* Inputs:
* dev: the pointer to whole buffer in device [0, 1000000000]
* prm: the array where the prime numbers in [2, CPU_CALC_END] found were save.
* limit: the prime numbers found by CPU (cuda limit)
*
* Output:
* The result is saved in the memory through pointer dev
*
* Return:
* None
*
******************************************************************************/
__global__ void getPrimeCUDA(unsigned char* dev, int* prm, int limit)
{
int j, k;
int testPrime;
int x;
int start, end;
int range;
x = threadIdx.x;
range = (MAX_NUMBER - CPU_CALC_END) / BLOCK_SIZE;
start = CPU_CALC_END + range * x;
end = start + range;
if ((BLOCK_SIZE-1) == x)
{
end = MAX_NUMBER;
}
for (k=0; k<limit; k++)
{
testPrime = prm[k];
j = start -(start % testPrime);
if (j < start)
{
j += testPrime;
}
for (; j<end; j+=testPrime)
{
dev[j]=0;
}
}
}
int main()
{
unsigned char* sieve;
unsigned char* devA;
/* Save the found prime and pass them to cuda. The length is estimated: 1-1/2-1/3 = 1/6 */
int primeByCPU[CPU_CALC_END/6];
int* devPrimes;
int foundByCPU = 0;
int i;
int j;
int totalSize;
struct timeval startTime; /* Record the start time */
struct timeval currentTime; /* Record the current time */
int recSmallDist = 0; /* Smallest distance in the 5 recorded distance */
int lastPrime = 2; /* Record of last prime */
int currDistance;
/* Verify setting for searching primes between [2, MAX_NUMBER]. */
if ((CPU_CALC_END * CPU_CALC_END) <= MAX_NUMBER)
{
printf("The CPU_CALC_END * CPU_CALC_END is too small.\n", CPU_CALC_END);
j = 2;
while ((j*j) < MAX_NUMBER)
{
j++;
}
printf("Please change definition of from (%d) to value no less than (%d).\n", CPU_CALC_END, j);
return 0;
}
totalSize = sizeof(unsigned char)*MAX_NUMBER;
sieve = (unsigned char*)malloc(totalSize);
/* allocate arrays on device */
cudaMalloc((void **) &devA, totalSize);
for (i=2; i<MAX_NUMBER; i++)
{
sieve[i]=1; //initialize
}
gettimeofday(&startTime, NULL);
for (i=2; i<CPU_CALC_END; i++)
{
if(0 == sieve[i])
{
continue;
}
for (j=i+i;j<CPU_CALC_END;j=j+i)
{
sieve[j]=0;
}
primeByCPU[foundByCPU++] = i;
currDistance = i - lastPrime;
/* The current distance is larger than the smallest record distance. Save it. */
if (currDistance >= recSmallDist)
{
insertNewDistance(currDistance, lastPrime, i);
/* Update the current smallest distance */
recSmallDist = primeList[foundPrimeNum - 1].distance;
}
lastPrime = i;
}
gettimeofday(¤tTime, NULL);
printf ("First time taken by CPU: %f seconds\n",
(double) (currentTime.tv_usec - startTime.tv_usec) / 1000000 +
(double) (currentTime.tv_sec - startTime.tv_sec));
/* allocate arrays on device */
cudaMalloc((void **) &devPrimes, foundByCPU*sizeof(int));
/* copy arrays to device memory (synchronous) */
cudaMemcpy(devA, sieve, totalSize, cudaMemcpyHostToDevice);
/* copy arrays to device memory (synchronous) */
cudaMemcpy(devPrimes, primeByCPU, foundByCPU*sizeof(int), cudaMemcpyHostToDevice);
/* guarantee synchronization */
cudaDeviceSynchronize();
/* execute kernel (asynchronous!) */
getPrimeCUDA<<<1, BLOCK_SIZE>>>(devA, devPrimes, foundByCPU);
/* retrieve results from device (synchronous) */
cudaMemcpy(&sieve[CPU_CALC_END], &devA[CPU_CALC_END], totalSize-CPU_CALC_END, cudaMemcpyDeviceToHost);
/* guarantee synchronization */
cudaDeviceSynchronize();
for (i=CPU_CALC_END; i<MAX_NUMBER; i++)
{
if(sieve[i]==0)
{
continue;
}
currDistance = i - lastPrime;
/* The current distance is larger than the smallest record distance. Save it. */
if (currDistance >= recSmallDist)
{
insertNewDistance(currDistance, lastPrime, i);
/* Update the current smallest distance */
recSmallDist = primeList[foundPrimeNum - 1].distance;
}
lastPrime = i;
}
gettimeofday(¤tTime, NULL);
printf("Largest prime number is %d. \n", lastPrime);
printf("Now, print the 5 biggest distances between two continue prime numbers.\n");
for(i=0;i<NEEDED_PRIME_NUM;i++)
{
printf("Between continue prime number (%d) and (%d), the distance is (%d). \n", primeList[i].smallPrime, primeList[i].largePrime, primeList[i].distance);
}
printf ("Total time taken by CPU: %f seconds\n",
(double) (currentTime.tv_usec - startTime.tv_usec) / 1000000 +
(double) (currentTime.tv_sec - startTime.tv_sec));
free(sieve);
return 0;
}
|
7,976 | /** \file
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <time.h>
#include <sys/time.h>
#include <stdint.h>
#define BLOCKLEN 512
int nextPower(int);
void die(const char *);
void warn(const char *);
void read_from_file(int *, char *, int);
void write_to_file(int *, char *, int);
/**
* play - Plays the game for one step.
* First, counts the neighbors, taking into account boundary conditions
* Then, acts on the rules.
* Updates need to happen all together, so a temporary new array is allocated
*/
__global__ void play(int *X, int *d_new, int N){
//a block of threads works on a row
int glob_i = blockIdx.x; //which row
int temp_i = glob_i;
//shared memory size = 3 lines * N cells each
extern __shared__ int localArray[];
//read row above (i-1) and write to localArray[0][j]
if (glob_i == 0){
temp_i = N - 1; //cyclic boundary condition
}else{
temp_i = glob_i - 1; //simple case
}
for(int j = threadIdx.x; j < N; j+=blockDim.x){
if(j<N){
localArray[0*N + j] = X[(temp_i)*N + j];
}
}
//read own row (i) and write to localArray[1][j]
for(int j = threadIdx.x; j < N; j+=blockDim.x){
if(j<N){
localArray[1*N + j] = X[(glob_i)*N + j];
}
}
//read from row below (i+1) and write to localArray[2][j]
if(glob_i == N-1){
temp_i = 0; //cyclic boundary condition
}else{
temp_i = glob_i + 1; //simple case
}
for(int j = threadIdx.x; j < N; j+=blockDim.x){
if(j<N){
localArray[2*N + j] = X[(temp_i)*N + j];
}
}
//wait for shared memory to be "full"
__syncthreads();
//shared memory is now complete, we're ready to operate on it
int up, down, left, right;
for(int j = threadIdx.x; j < N; j+=blockDim.x){
if (j < N){
int idx = N*glob_i + j;
up = 0;
down = 2;
//cyclic boundary conditions
left = j == 0 ? N - 1 : j - 1;
right = j == N-1 ? 0 : j + 1;
int sum =
localArray[N*up+left]+ //i-1, j-1
localArray[N*up+j]+ //i-1, j
localArray[N*up+right]+ //i-1, j+1
localArray[N*1+left]+ //i, j-1
localArray[N*1+right]+ //i, j+1
localArray[N*down+left]+ //i+1, j-1
localArray[N*down+j]+ //i+1, j
localArray[N*down+right]; //i+1, j+1
//act based on rules - write to global array
if(localArray[1*N + j] == 0 && sum == 3 ){
d_new[idx]=1; //born
}else if ( localArray[1*N + j] == 1 && (sum < 2 || sum>3 ) ){
d_new[idx]=0; //dies - loneliness or overpopulation
}else{
d_new[idx] = localArray[1*N + j]; //nothing changes
}
}
}
return;
}
/**
* main - plays the game of life for t steps according to the rules:
* - A dead(0) cell with exactly 3 living neighbors becomes alive (birth)
* - A dead(0) cell with any other number of neighbors stays dead (barren)
* - A live(1) cell with 0 or 1 living neighbors dies (loneliness)
* - A live(1) cell with 4 or more living neighbors dies (overpopulation)
* - A live(1) cell with 2 or 3 living neighbors stays alive (survival)
*/
int main(int argc, char **argv){
//sanity check for input
if(argc !=4){
printf("Usage: %s filename size t, where:\n", argv[0]);
printf("\tfilename is the input file \n");
printf("\tsize is the grid side and \n");
printf("\tt generations to play\n");
die("Wrong arguments");
}
//declarations
char *filename = argv[1];
int N = atoi(argv[2]);
int t = atoi(argv[3]);
int gen = 0;
int *table = (int *)malloc(N*N*sizeof(int));
if (!table)
die("Couldn't allocate memory to table");
//CUDA - divide the table in N blocks of 1 line, 512 threads per block
dim3 threadsPerBlock(BLOCKLEN, 1); //max threads/block
dim3 numBlocks(N, 1); //split board into blocks
//CUDA - timing
float gputime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//read input
read_from_file(table, filename, N);
//CUDA - copy to device
int *d_table;
cudaMalloc(&d_table, N*N*sizeof(int));
int *d_new;
cudaMalloc(&d_new, N*N*sizeof(int));
cudaEventRecord(start, 0);
cudaMemcpy(d_table, table, N*N*sizeof(int), cudaMemcpyHostToDevice);
//play game for t generations
for(gen=0; gen<t; gen++){
//alternate between using d_table and d_new as temp
if(gen%2==0){
//3*N*sizeof(int): size for shared memory
play<<<numBlocks, threadsPerBlock, 3*N*sizeof(int)>>>(d_table /*data*/, d_new /*temp*/, N);
}else{
play<<<numBlocks, threadsPerBlock, 3*N*sizeof(int)>>>(d_new /*data*/, d_table /*temp*/, N);
}
cudaDeviceSynchronize(); //don't continue if kernel not done
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gputime, start, stop);
printf("[%d]\t %g \n",gen, gputime/1000.0f);
//copy back from device
if(t%2==1){
cudaMemcpy(table, d_new, N*N*sizeof(int), cudaMemcpyDeviceToHost);
}else{
cudaMemcpy(table, d_table, N*N*sizeof(int), cudaMemcpyDeviceToHost);
}
//save output for later
write_to_file(table, filename, N);
free(table);
cudaFree(d_new);
cudaFree(d_table);
return 0;
}
/**
* die - display an error and terminate.
* Used when some fatal error happens
* and continuing would mess things up.
*/
void die(const char *message){
if(errno){
perror(message);
}else{
printf("Error: %s\n", message);
}
exit(1);
}
/**
* warn - display a warning and continue
* used when something didn't go as expected
*/
void warn(const char *message){
if(errno){
perror(message);
}else{
printf("Warning: %s\n", message);
}
return;
}
/**
* read_from_file - read N*N integer values from an appropriate file.
* Saves the game's board into array X for use by other functions
* Warns or kills the program if something goes wrong
*/
void read_from_file(int *X, char *filename, int N){
FILE *fp = fopen(filename, "r+");
int size = fread(X, sizeof(int), N*N, fp);
if(!fp)
die("Couldn't open file to read");
if(!size)
die("Couldn't read from file");
if(N*N != size)
warn("Expected to read different number of elements");
printf("elements read: %d\n", size);
fclose(fp);
return;
}
/**
* write_to_file - write N*N integer values to a binary file.
* Saves game's board from array X to the file
* Names the file tableNxN_new.bin, so the input file is not overwritten
*/
void write_to_file(int *X, char *filename, int N){
//save as tableNxN_new.bin
char newfilename[100];
sprintf(newfilename, "cuda_table%dx%d.bin", N, N);
printf("writing to: %s\n", newfilename);
FILE *fp;
int size;
if( ! ( fp = fopen(newfilename, "w+") ) )
die("Couldn't open file to write");
if( ! (size = fwrite(X, sizeof(int), N*N, fp)) )
die("Couldn't write to file");
if (size != N*N)
warn("Expected to write different number of elements");
fclose(fp);
return;
}
int nextPower(int N){
int n=0;
while(1){
if(1<<n < N){
n++;
}else{
return 1<<n;
}
}
}
|
7,977 | __global__ void kmeans_cluster_assignment(float *data, int *data_ca,
float *centroids, int numData,
int numCluster, int numDimensions) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= numData)
return;
float min_dist = INFINITY;
int cluster_assignment = -1;
for (int cluster = 0; cluster < numCluster; ++cluster) {
float dist = 0;
for (int dim = 0; dim < numDimensions; ++dim) {
float square = data[tid * numDimensions + dim] -
centroids[cluster * numDimensions + dim];
square *= square;
dist += square;
}
dist = sqrt(dist);
if (dist < min_dist) {
min_dist = dist;
cluster_assignment = cluster;
}
}
data_ca[tid] = cluster_assignment;
return;
}
void kmeans_cluster_assignment_wrapper(int grid_size, int block_size,
float *data, int *data_ca,
float *centroids, int numData,
int numCluster, int numDimensions) {
kmeans_cluster_assignment<<<grid_size, block_size>>>(
data, data_ca, centroids, numData, numCluster, numDimensions);
}
__global__ void kmeans_centroid_sum(float *data, int *data_ca, float *centroids,
int *cluster_count, int numData,
int numCluster, int numDimensions) {
extern __shared__ float shm[];
float *s_data = (float *)shm;
int *s_ca = (int *)(s_data + blockDim.x * numDimensions);
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= numData)
return;
for (int dim = 0; dim < numDimensions; ++dim) {
s_data[threadIdx.x * numDimensions + dim] = data[tid * numDimensions + dim];
}
s_ca[threadIdx.x] = data_ca[tid];
__syncthreads();
if (threadIdx.x < numCluster * numDimensions) {
int cluster = threadIdx.x / numDimensions;
int dim = threadIdx.x % numDimensions;
float sum = 0.0;
int count = 0;
for (int i = 0; i < blockDim.x; ++i) {
if (s_ca[i] == cluster) {
++count;
sum += s_data[i * numDimensions + dim];
}
}
atomicAdd(¢roids[cluster * numDimensions + dim], sum);
if (dim == 0)
atomicAdd(&cluster_count[cluster], count);
}
}
void kmeans_centroid_sum_wrapper(int grid_size, int block_size, float *data,
int *data_ca, float *centroids,
int *cluster_count, int numData,
int numCluster, int numDimensions) {
kmeans_centroid_sum<<<grid_size, block_size,
block_size *(sizeof(int) +
numDimensions * sizeof(float))>>>(
data, data_ca, centroids, cluster_count, numData, numCluster,
numDimensions);
}
__global__ void kmeans_centroid_update(float *centroids, int *cluster_count,
int numCluster, int numDimensions) {
if (threadIdx.x < numCluster * numDimensions) {
int cluster = threadIdx.x / numDimensions;
int dim = threadIdx.x % numDimensions;
centroids[cluster * numDimensions + dim] /= cluster_count[cluster];
}
}
void kmeans_centriod_update_wrapper(int grid_size, int block_size,
float *centroids, int *cluster_count,
int numCluster, int numDimensions) {
kmeans_centroid_update<<<grid_size, block_size>>>(centroids, cluster_count,
numCluster, numDimensions);
}
|
7,978 | // Basic CUDA Program
// Author: alpha74
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void kernel();
int main()
{
printf("\n Hello HOST ");
kernel <<<1, 1>>> ();
printf("\n");
return 0;
}
__global__ void kernel()
{
printf("\n Hello KERNEL" );
}
|
7,979 |
#include <stdlib.h>
#include <stdio.h>
#include <float.h>
#include <math.h>
double **malloc_matrix(int m, int n){
int i;
if (m <= 0 || n <= 0)
return NULL;
double **A = (double **)malloc(m * sizeof(double *));
if (A == NULL)
return NULL;
A[0] = (double *)malloc(m*n*sizeof(double));
if (A[0] == NULL) {
free(A);
return NULL;
}
for (i = 1; i < m; i++)
A[i] = A[0] + i * n;
return A;
}
void
init_data (int m, int n, double **A) {
int i, j;
for( i = 0; i < m; i++)
for( j = 0; j < n; j++) {
A[i][j] = 0.0;
}
}
void init_f (int N, double *A){
int i, j;
double i_inf=0.5*(N+1), i_sup=2*(N+1)/3, j_inf=(N+1)/6, j_sup=(N+1)/3;
double delta = 2.0/(N+1);
double fdelta2 = 200*delta*delta;
for(i = 1; i <= N; i++){
for(j = 1; j <= N; j++){
if(i>=i_inf && i <= i_sup && j>=j_inf && j<=j_sup){
A[i*(N+2)+j] = fdelta2;
}
else{
A[i*(N+2)+j] = 0.0;
}
}
}
}
void init_f_2d (int N, double **A){
int i, j;
double i_inf=0.5*(N+1), i_sup=2*(N+1)/3, j_inf=(N+1)/6, j_sup=(N+1)/3;
double delta = 2.0/(N+1);
double fdelta2 = 200*delta*delta;
for(i = 1; i <= N; i++){
for(j = 1; j <= N; j++){
if(i>=i_inf && i <= i_sup && j>=j_inf && j<=j_sup){
A[i][j] = fdelta2;
}
else{
A[i][j] = 0.0;
}
}
}
}
|
7,980 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void curvi (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_i= (int)(blockDim.z);
int i0 = (int)(blockIdx.z)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.z);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (i>=2 & k>=2 & i<=N-3 & k<=N-3) {
for (int j=2; j<=N-3; j++) {
double _t_87_ = u1[i+2][j][k+2];
double _t_8_ = u1[i+2][j][k+2];
_t_87_ -= u1[i+2][j][k-2];
double _t_27_ = u1[i+2][j][k-2];
double _t_84_ = c2 * _t_87_;
double _t_88_ = u1[i+2][j][k+1];
double _t_47_ = u1[i+2][j][k+1];
_t_88_ -= u1[i+2][j][k-1];
double _t_66_ = u1[i+2][j][k-1];
_t_84_ += c1 * _t_88_;
double _t_86_ = 2.0 * mu[i+2][j][k];
_t_86_ += la[i+2][j][k];
double _t_85_ = _t_86_ * met2[i+2][j][k];
double _t_83_ = _t_85_ * met1[i+2][j][k];
double _t_283_ = la[i+2][j][k] * met1[i+2][j][k];
double _t_82_ = _t_83_ * _t_84_;
double _t_81_ = _t_82_ * strx[i];
double _t_93_ = u2[i+2][j][k+2];
double _t_13_ = u2[i+2][j][k+2];
_t_93_ -= u2[i+2][j][k-2];
double _t_32_ = u2[i+2][j][k-2];
double _t_91_ = c2 * _t_93_;
double _t_94_ = u2[i+2][j][k+1];
double _t_52_ = u2[i+2][j][k+1];
_t_94_ -= u2[i+2][j][k-1];
double _t_71_ = u2[i+2][j][k-1];
_t_91_ += c1 * _t_94_;
double _t_92_ = la[i+2][j][k] * met3[i+2][j][k];
double _t_90_ = _t_92_ * met1[i+2][j][k];
double _t_89_ = _t_90_ * _t_91_;
_t_81_ += _t_89_ * stry[j];
double _t_98_ = u3[i+2][j][k+2];
double _t_19_ = u3[i+2][j][k+2];
_t_98_ -= u3[i+2][j][k-2];
double _t_38_ = u3[i+2][j][k-2];
double _t_96_ = c2 * _t_98_;
double _t_99_ = u3[i+2][j][k+1];
double _t_58_ = u3[i+2][j][k+1];
_t_99_ -= u3[i+2][j][k-1];
double _t_77_ = u3[i+2][j][k-1];
_t_96_ += c1 * _t_99_;
double _t_97_ = la[i+2][j][k] * met4[i+2][j][k];
double _t_95_ = _t_97_ * met1[i+2][j][k];
_t_81_ += _t_95_ * _t_96_;
double _t_105_ = u1[i-2][j][k+2];
_t_8_ -= u1[i-2][j][k+2];
_t_105_ -= u1[i-2][j][k-2];
_t_27_ -= u1[i-2][j][k-2];
double _t_102_ = c2 * _t_105_;
double _t_106_ = u1[i-2][j][k+1];
_t_47_ -= u1[i-2][j][k+1];
_t_106_ -= u1[i-2][j][k-1];
_t_66_ -= u1[i-2][j][k-1];
_t_102_ += c1 * _t_106_;
double _t_104_ = 2.0 * mu[i-2][j][k];
_t_104_ += la[i-2][j][k];
double _t_103_ = _t_104_ * met2[i-2][j][k];
double _t_101_ = _t_103_ * met1[i-2][j][k];
double _t_288_ = la[i-2][j][k] * met1[i-2][j][k];
double _t_100_ = _t_101_ * _t_102_;
_t_81_ += _t_100_ * strx[i];
double _t_111_ = u2[i-2][j][k+2];
_t_13_ -= u2[i-2][j][k+2];
_t_111_ -= u2[i-2][j][k-2];
_t_32_ -= u2[i-2][j][k-2];
double _t_109_ = c2 * _t_111_;
double _t_112_ = u2[i-2][j][k+1];
_t_52_ -= u2[i-2][j][k+1];
_t_112_ -= u2[i-2][j][k-1];
_t_71_ -= u2[i-2][j][k-1];
_t_109_ += c1 * _t_112_;
double _t_110_ = la[i-2][j][k] * met3[i-2][j][k];
double _t_108_ = _t_110_ * met1[i-2][j][k];
double _t_107_ = _t_108_ * _t_109_;
_t_81_ += _t_107_ * stry[j];
double _t_116_ = u3[i-2][j][k+2];
_t_19_ -= u3[i-2][j][k+2];
_t_116_ -= u3[i-2][j][k-2];
_t_38_ -= u3[i-2][j][k-2];
double _t_114_ = c2 * _t_116_;
double _t_117_ = u3[i-2][j][k+1];
_t_58_ -= u3[i-2][j][k+1];
_t_117_ -= u3[i-2][j][k-1];
_t_77_ -= u3[i-2][j][k-1];
_t_114_ += c1 * _t_117_;
double _t_115_ = la[i-2][j][k] * met4[i-2][j][k];
double _t_113_ = _t_115_ * met1[i-2][j][k];
_t_81_ += _t_113_ * _t_114_;
double _t_80_ = c2 * _t_81_;
double _t_124_ = u1[i+1][j][k+2];
double _t_9_ = u1[i+1][j][k+2];
_t_124_ -= u1[i+1][j][k-2];
double _t_28_ = u1[i+1][j][k-2];
double _t_121_ = c2 * _t_124_;
double _t_125_ = u1[i+1][j][k+1];
double _t_48_ = u1[i+1][j][k+1];
_t_125_ -= u1[i+1][j][k-1];
double _t_67_ = u1[i+1][j][k-1];
_t_121_ += c1 * _t_125_;
double _t_123_ = 2.0 * mu[i+1][j][k];
_t_123_ += la[i+1][j][k];
double _t_122_ = _t_123_ * met2[i+1][j][k];
double _t_120_ = _t_122_ * met1[i+1][j][k];
double _t_294_ = la[i+1][j][k] * met1[i+1][j][k];
double _t_119_ = _t_120_ * _t_121_;
double _t_118_ = _t_119_ * strx[i];
double _t_130_ = u2[i+1][j][k+2];
double _t_14_ = u2[i+1][j][k+2];
_t_130_ -= u2[i+1][j][k-2];
double _t_33_ = u2[i+1][j][k-2];
double _t_128_ = c2 * _t_130_;
double _t_131_ = u2[i+1][j][k+1];
double _t_53_ = u2[i+1][j][k+1];
_t_131_ -= u2[i+1][j][k-1];
double _t_72_ = u2[i+1][j][k-1];
_t_128_ += c1 * _t_131_;
double _t_129_ = la[i+1][j][k] * met3[i+1][j][k];
double _t_127_ = _t_129_ * met1[i+1][j][k];
double _t_126_ = _t_127_ * _t_128_;
_t_118_ += _t_126_ * stry[j];
double _t_135_ = u3[i+1][j][k+2];
double _t_20_ = u3[i+1][j][k+2];
_t_135_ -= u3[i+1][j][k-2];
double _t_39_ = u3[i+1][j][k-2];
double _t_133_ = c2 * _t_135_;
double _t_136_ = u3[i+1][j][k+1];
double _t_59_ = u3[i+1][j][k+1];
_t_136_ -= u3[i+1][j][k-1];
double _t_78_ = u3[i+1][j][k-1];
_t_133_ += c1 * _t_136_;
double _t_134_ = la[i+1][j][k] * met4[i+1][j][k];
double _t_132_ = _t_134_ * met1[i+1][j][k];
_t_118_ += _t_132_ * _t_133_;
double _t_142_ = u1[i-1][j][k+2];
_t_9_ -= u1[i-1][j][k+2];
_t_142_ -= u1[i-1][j][k-2];
_t_28_ -= u1[i-1][j][k-2];
double _t_139_ = c2 * _t_142_;
double _t_143_ = u1[i-1][j][k+1];
_t_48_ -= u1[i-1][j][k+1];
_t_143_ -= u1[i-1][j][k-1];
_t_67_ -= u1[i-1][j][k-1];
_t_139_ += c1 * _t_143_;
double _t_141_ = 2.0 * mu[i-1][j][k];
_t_141_ += la[i-1][j][k];
double _t_140_ = _t_141_ * met2[i-1][j][k];
double _t_138_ = _t_140_ * met1[i-1][j][k];
double _t_299_ = la[i-1][j][k] * met1[i-1][j][k];
double _t_137_ = _t_138_ * _t_139_;
_t_118_ += _t_137_ * strx[i];
double _t_148_ = u2[i-1][j][k+2];
_t_14_ -= u2[i-1][j][k+2];
_t_148_ -= u2[i-1][j][k-2];
_t_33_ -= u2[i-1][j][k-2];
double _t_146_ = c2 * _t_148_;
double _t_149_ = u2[i-1][j][k+1];
_t_53_ -= u2[i-1][j][k+1];
_t_149_ -= u2[i-1][j][k-1];
_t_72_ -= u2[i-1][j][k-1];
_t_146_ += c1 * _t_149_;
double _t_147_ = la[i-1][j][k] * met3[i-1][j][k];
double _t_145_ = _t_147_ * met1[i-1][j][k];
double _t_144_ = _t_145_ * _t_146_;
_t_118_ += _t_144_ * stry[j];
double _t_153_ = u3[i-1][j][k+2];
_t_20_ -= u3[i-1][j][k+2];
_t_153_ -= u3[i-1][j][k-2];
_t_39_ -= u3[i-1][j][k-2];
double _t_151_ = c2 * _t_153_;
double _t_154_ = u3[i-1][j][k+1];
_t_59_ -= u3[i-1][j][k+1];
_t_154_ -= u3[i-1][j][k-1];
_t_78_ -= u3[i-1][j][k-1];
_t_151_ += c1 * _t_154_;
double _t_152_ = la[i-1][j][k] * met4[i-1][j][k];
double _t_150_ = _t_152_ * met1[i-1][j][k];
_t_118_ += _t_150_ * _t_151_;
_t_80_ += c1 * _t_118_;
double r1ic0jc0kc0 = r1[i][j][k];
r1ic0jc0kc0 += _t_80_ * stry[j];
double _t_17_ = c2 * _t_19_;
double _v_0_ = c2 * _t_8_;
double _v_3_ = c2 * _t_13_;
double _v_9_ = c2 * _t_27_;
double _v_12_ = c2 * _t_32_;
double _v_15_ = c2 * _t_38_;
double _v_25_ = c2 * _t_58_;
double _v_19_ = c2 * _t_47_;
double _v_22_ = c2 * _t_52_;
double _v_28_ = c2 * _t_66_;
double _v_31_ = c2 * _t_71_;
double _v_34_ = c2 * _t_77_;
_t_17_ += c1 * _t_20_;
double _v_1_ = c1 * _t_9_;
double _v_4_ = c1 * _t_14_;
double _v_10_ = c1 * _t_28_;
double _v_13_ = c1 * _t_33_;
double _v_16_ = c1 * _t_39_;
double _v_26_ = c1 * _t_59_;
double _v_20_ = c1 * _t_48_;
double _v_23_ = c1 * _t_53_;
double _v_29_ = c1 * _t_67_;
double _v_32_ = c1 * _t_72_;
double _v_35_ = c1 * _t_78_;
double _t_18_ = mu[i][j][k+2] * met4[i][j][k+2];
double _t_16_ = _t_18_ * met1[i][j][k+2];
double _t_15_ = _t_16_ * _t_17_;
double _t_1_ = _t_15_ * stry[j];
double _t_5_ = _v_0_;
_t_5_ += _v_1_;
double _t_7_ = 2.0 * mu[i][j][k+2];
double _t_12_ = mu[i][j][k+2] * met3[i][j][k+2];
_t_7_ += la[i][j][k+2];
double _t_6_ = _t_7_ * met2[i][j][k+2];
double _t_166_ = la[i][j][k+2] * met2[i][j][k+2];
double _t_4_ = _t_6_ * met1[i][j][k+2];
double _t_3_ = _t_4_ * _t_5_;
double _t_2_ = _t_3_ * strx[i];
_t_1_ += _t_2_ * stry[j];
double _t_11_ = _v_3_;
_t_11_ += _v_4_;
double _t_10_ = _t_12_ * met1[i][j][k+2];
_t_1_ += _t_10_ * _t_11_;
double _t_24_ = _v_9_;
_t_24_ += _v_10_;
double _t_26_ = 2.0 * mu[i][j][k-2];
_t_26_ += la[i][j][k-2];
double _t_25_ = _t_26_ * met2[i][j][k-2];
double _t_178_ = la[i][j][k-2] * met2[i][j][k-2];
double _t_23_ = _t_25_ * met1[i][j][k-2];
double _t_22_ = _t_23_ * _t_24_;
double _t_21_ = _t_22_ * strx[i];
_t_1_ += _t_21_ * stry[j];
double _t_30_ = _v_12_;
_t_30_ += _v_13_;
double _t_31_ = mu[i][j][k-2] * met3[i][j][k-2];
double _t_37_ = mu[i][j][k-2] * met4[i][j][k-2];
double _t_29_ = _t_31_ * met1[i][j][k-2];
_t_1_ += _t_29_ * _t_30_;
double _t_36_ = _v_15_;
_t_36_ += _v_16_;
double _t_35_ = _t_37_ * met1[i][j][k-2];
double _t_34_ = _t_35_ * _t_36_;
_t_1_ += _t_34_ * stry[j];
double _t_0_ = c2 * _t_1_;
double _t_56_ = _v_25_;
_t_56_ += _v_26_;
double _t_57_ = mu[i][j][k+1] * met4[i][j][k+1];
double _t_55_ = _t_57_ * met1[i][j][k+1];
double _t_54_ = _t_55_ * _t_56_;
double _t_40_ = _t_54_ * stry[j];
double _t_44_ = _v_19_;
_t_44_ += _v_20_;
double _t_46_ = 2.0 * mu[i][j][k+1];
double _t_51_ = mu[i][j][k+1] * met3[i][j][k+1];
_t_46_ += la[i][j][k+1];
double _t_45_ = _t_46_ * met2[i][j][k+1];
double _t_191_ = la[i][j][k+1] * met2[i][j][k+1];
double _t_43_ = _t_45_ * met1[i][j][k+1];
double _t_42_ = _t_43_ * _t_44_;
double _t_41_ = _t_42_ * strx[i+2];
_t_40_ += _t_41_ * stry[j];
double _t_50_ = _v_22_;
_t_50_ += _v_23_;
double _t_49_ = _t_51_ * met1[i][j][k+1];
_t_40_ += _t_49_ * _t_50_;
double _t_63_ = _v_28_;
_t_63_ += _v_29_;
double _t_65_ = 2.0 * mu[i][j][k-1];
_t_65_ += la[i][j][k-1];
double _t_64_ = _t_65_ * met2[i][j][k-1];
double _t_203_ = la[i][j][k-1] * met2[i][j][k-1];
double _t_62_ = _t_64_ * met1[i][j][k-1];
double _t_61_ = _t_62_ * _t_63_;
double _t_60_ = _t_61_ * strx[i-2];
_t_40_ += _t_60_ * stry[j];
double _t_69_ = _v_31_;
_t_69_ += _v_32_;
double _t_70_ = mu[i][j][k-1] * met3[i][j][k-1];
double _t_76_ = mu[i][j][k-1] * met4[i][j][k-1];
double _t_68_ = _t_70_ * met1[i][j][k-1];
_t_40_ += _t_68_ * _t_69_;
double _t_75_ = _v_34_;
_t_75_ += _v_35_;
double _t_74_ = _t_76_ * met1[i][j][k-1];
double _t_73_ = _t_74_ * _t_75_;
_t_40_ += _t_73_ * stry[j];
_t_0_ += c1 * _t_40_;
r1ic0jc0kc0 += _t_0_;
double _t_281_ = _t_283_ * met1[i+2][j][k];
double _t_286_ = _t_288_ * met1[i-2][j][k];
double _t_292_ = _t_294_ * met1[i+1][j][k];
double _t_297_ = _t_299_ * met1[i-1][j][k];
double _t_161_ = _t_12_;
double _t_159_ = _t_161_ * met1[i][j][k+2];
double _t_162_ = u1[i][j+2][k+2];
double _t_213_ = u1[i][j+2][k+2];
_t_162_ -= u1[i][j-2][k+2];
double _t_225_ = u1[i][j-2][k+2];
double _t_160_ = c2 * _t_162_;
double _t_163_ = u1[i][j+1][k+2];
double _t_238_ = u1[i][j+1][k+2];
_t_163_ -= u1[i][j-1][k+2];
double _t_250_ = u1[i][j-1][k+2];
_t_160_ += c1 * _t_163_;
double _t_158_ = _t_159_ * _t_160_;
double _t_157_ = _t_158_ * stry[j+2];
double _t_156_ = _t_157_ * strx[i];
double _t_164_ = _t_166_ * met1[i][j][k+2];
double _t_167_ = u2[i][j+2][k+2];
double _t_218_ = u2[i][j+2][k+2];
_t_167_ -= u2[i][j-2][k+2];
double _t_230_ = u2[i][j-2][k+2];
double _t_165_ = c2 * _t_167_;
double _t_168_ = u2[i][j+1][k+2];
double _t_243_ = u2[i][j+1][k+2];
_t_168_ -= u2[i][j-1][k+2];
double _t_255_ = u2[i][j-1][k+2];
_t_165_ += c1 * _t_168_;
_t_156_ += _t_164_ * _t_165_;
double _t_173_ = _t_31_;
double _t_171_ = _t_173_ * met1[i][j][k-2];
double _t_174_ = u1[i][j+2][k-2];
_t_213_ -= u1[i][j+2][k-2];
_t_174_ -= u1[i][j-2][k-2];
_t_225_ -= u1[i][j-2][k-2];
double _t_172_ = c2 * _t_174_;
double _t_175_ = u1[i][j+1][k-2];
_t_238_ -= u1[i][j+1][k-2];
_t_175_ -= u1[i][j-1][k-2];
_t_250_ -= u1[i][j-1][k-2];
_t_172_ += c1 * _t_175_;
double _t_170_ = _t_171_ * _t_172_;
double _t_169_ = _t_170_ * stry[j];
_t_156_ += _t_169_ * strx[i];
double _t_176_ = _t_178_ * met1[i][j][k-2];
double _t_179_ = u2[i][j+2][k-2];
_t_218_ -= u2[i][j+2][k-2];
_t_179_ -= u2[i][j-2][k-2];
_t_230_ -= u2[i][j-2][k-2];
double _t_177_ = c2 * _t_179_;
double _t_180_ = u2[i][j+1][k-2];
_t_243_ -= u2[i][j+1][k-2];
_t_180_ -= u2[i][j-1][k-2];
_t_255_ -= u2[i][j-1][k-2];
_t_177_ += c1 * _t_180_;
_t_156_ += _t_176_ * _t_177_;
double _t_155_ = c2 * _t_156_;
double _t_186_ = _t_51_;
double _t_184_ = _t_186_ * met1[i][j][k+1];
double _t_187_ = u1[i][j+2][k+1];
double _t_214_ = u1[i][j+2][k+1];
_t_187_ -= u1[i][j-2][k+1];
double _t_226_ = u1[i][j-2][k+1];
double _t_185_ = c2 * _t_187_;
double _t_188_ = u1[i][j+1][k+1];
double _t_239_ = u1[i][j+1][k+1];
_t_188_ -= u1[i][j-1][k+1];
double _t_251_ = u1[i][j-1][k+1];
_t_185_ += c1 * _t_188_;
double _t_183_ = _t_184_ * _t_185_;
double _t_182_ = _t_183_ * stry[j-2];
double _t_181_ = _t_182_ * strx[i];
double _t_189_ = _t_191_ * met1[i][j][k+1];
double _t_192_ = u2[i][j+2][k+1];
double _t_219_ = u2[i][j+2][k+1];
_t_192_ -= u2[i][j-2][k+1];
double _t_231_ = u2[i][j-2][k+1];
double _t_190_ = c2 * _t_192_;
double _t_193_ = u2[i][j+1][k+1];
double _t_244_ = u2[i][j+1][k+1];
_t_193_ -= u2[i][j-1][k+1];
double _t_256_ = u2[i][j-1][k+1];
_t_190_ += c1 * _t_193_;
_t_181_ += _t_189_ * _t_190_;
double _t_198_ = _t_70_;
double _t_196_ = _t_198_ * met1[i][j][k-1];
double _t_199_ = u1[i][j+2][k-1];
_t_214_ -= u1[i][j+2][k-1];
_t_199_ -= u1[i][j-2][k-1];
_t_226_ -= u1[i][j-2][k-1];
double _t_197_ = c2 * _t_199_;
double _t_200_ = u1[i][j+1][k-1];
_t_239_ -= u1[i][j+1][k-1];
_t_200_ -= u1[i][j-1][k-1];
_t_251_ -= u1[i][j-1][k-1];
_t_197_ += c1 * _t_200_;
double _t_195_ = _t_196_ * _t_197_;
double _t_194_ = _t_195_ * stry[j];
_t_181_ += _t_194_ * strx[i];
double _t_201_ = _t_203_ * met1[i][j][k-1];
double _t_204_ = u2[i][j+2][k-1];
_t_219_ -= u2[i][j+2][k-1];
_t_204_ -= u2[i][j-2][k-1];
_t_231_ -= u2[i][j-2][k-1];
double _t_202_ = c2 * _t_204_;
double _t_205_ = u2[i][j+1][k-1];
_t_244_ -= u2[i][j+1][k-1];
_t_205_ -= u2[i][j-1][k-1];
_t_256_ -= u2[i][j-1][k-1];
_t_202_ += c1 * _t_205_;
_t_181_ += _t_201_ * _t_202_;
_t_155_ += c1 * _t_181_;
r1ic0jc0kc0 += _t_155_;
double _t_211_ = c2 * _t_213_;
double _v_105_ = c2 * _t_218_;
double _v_108_ = c2 * _t_225_;
double _v_111_ = c2 * _t_230_;
double _v_115_ = c2 * _t_238_;
double _v_118_ = c2 * _t_243_;
double _v_121_ = c2 * _t_250_;
double _v_124_ = c2 * _t_255_;
_t_211_ += c1 * _t_214_;
double _v_106_ = c1 * _t_219_;
double _v_109_ = c1 * _t_226_;
double _v_112_ = c1 * _t_231_;
double _v_116_ = c1 * _t_239_;
double _v_119_ = c1 * _t_244_;
double _v_122_ = c1 * _t_251_;
double _v_125_ = c1 * _t_256_;
double _t_212_ = mu[i][j+2][k] * met3[i][j+2][k];
double _t_210_ = _t_212_ * met1[i][j+2][k];
double _t_209_ = _t_210_ * _t_211_;
double _t_208_ = _t_209_ * stry[j+1];
double _t_207_ = _t_208_ * strx[i];
double _t_216_ = _v_105_;
_t_216_ += _v_106_;
double _t_217_ = mu[i][j+2][k] * met2[i][j+2][k];
double _t_215_ = _t_217_ * met1[i][j+2][k];
_t_207_ += _t_215_ * _t_216_;
double _t_223_ = _v_108_;
_t_223_ += _v_109_;
double _t_224_ = mu[i][j-2][k] * met3[i][j-2][k];
double _t_222_ = _t_224_ * met1[i][j-2][k];
double _t_221_ = _t_222_ * _t_223_;
double _t_220_ = _t_221_ * stry[j];
_t_207_ += _t_220_ * strx[i];
double _t_228_ = _v_111_;
_t_228_ += _v_112_;
double _t_229_ = mu[i][j-2][k] * met2[i][j-2][k];
double _t_227_ = _t_229_ * met1[i][j-2][k];
_t_207_ += _t_227_ * _t_228_;
double _t_206_ = c2 * _t_207_;
double _t_236_ = _v_115_;
_t_236_ += _v_116_;
double _t_237_ = mu[i][j+1][k] * met3[i][j+1][k];
double _t_235_ = _t_237_ * met1[i][j+1][k];
double _t_234_ = _t_235_ * _t_236_;
double _t_233_ = _t_234_ * stry[j-1];
double _t_232_ = _t_233_ * strx[i];
double _t_241_ = _v_118_;
_t_241_ += _v_119_;
double _t_242_ = mu[i][j+1][k] * met2[i][j+1][k];
double _t_240_ = _t_242_ * met1[i][j+1][k];
_t_232_ += _t_240_ * _t_241_;
double _t_248_ = _v_121_;
_t_248_ += _v_122_;
double _t_249_ = mu[i][j-1][k] * met3[i][j-1][k];
double _t_247_ = _t_249_ * met1[i][j-1][k];
double _t_246_ = _t_247_ * _t_248_;
double _t_245_ = _t_246_ * stry[j];
_t_232_ += _t_245_ * strx[i];
double _t_253_ = _v_124_;
_t_253_ += _v_125_;
double _t_254_ = mu[i][j-1][k] * met2[i][j-1][k];
double _t_252_ = _t_254_ * met1[i][j-1][k];
_t_232_ += _t_252_ * _t_253_;
_t_206_ += c1 * _t_232_;
r1ic0jc0kc0 += _t_206_;
double _t_261_ = mu[i][j+2][k] * met1[i][j+2][k];
double _t_259_ = _t_261_ * met1[i][j+2][k];
double _t_262_ = u2[i+2][j+2][k];
_t_262_ -= u2[i-2][j+2][k];
double _t_284_ = u2[i+2][j+2][k];
_t_284_ -= u2[i+2][j-2][k];
double _t_289_ = u2[i-2][j+2][k];
_t_289_ -= u2[i-2][j-2][k];
double _t_267_ = u2[i+2][j-2][k];
_t_267_ -= u2[i-2][j-2][k];
double _t_260_ = c2 * _t_262_;
double _t_263_ = u2[i+1][j+2][k];
_t_263_ -= u2[i-1][j+2][k];
double _t_295_ = u2[i+1][j+2][k];
_t_295_ -= u2[i+1][j-2][k];
double _t_300_ = u2[i-1][j+2][k];
_t_300_ -= u2[i-1][j-2][k];
double _t_268_ = u2[i+1][j-2][k];
_t_268_ -= u2[i-1][j-2][k];
_t_260_ += c1 * _t_263_;
double _t_258_ = _t_259_ * _t_260_;
double _t_266_ = mu[i][j-2][k] * met1[i][j-2][k];
double _t_264_ = _t_266_ * met1[i][j-2][k];
double _t_265_ = c2 * _t_267_;
double _v_142_ = c2 * _t_284_;
double _v_145_ = c2 * _t_289_;
double _v_149_ = c2 * _t_295_;
double _v_152_ = c2 * _t_300_;
_t_265_ += c1 * _t_268_;
_t_258_ += _t_264_ * _t_265_;
double _t_257_ = c2 * _t_258_;
double _t_282_ = _v_142_;
double _t_285_ = u2[i+2][j+1][k];
_t_285_ -= u2[i+2][j-1][k];
double _t_273_ = u2[i+2][j+1][k];
_t_273_ -= u2[i-2][j+1][k];
double _t_278_ = u2[i+2][j-1][k];
_t_278_ -= u2[i-2][j-1][k];
double _t_290_ = u2[i-2][j+1][k];
_t_290_ -= u2[i-2][j-1][k];
_t_282_ += c1 * _t_285_;
double _t_280_ = _t_281_ * _t_282_;
double _t_287_ = _v_145_;
_t_287_ += c1 * _t_290_;
_t_280_ += _t_286_ * _t_287_;
_t_257_ += c2 * _t_280_;
double _t_272_ = mu[i][j+1][k] * met1[i][j+1][k];
double _t_270_ = _t_272_ * met1[i][j+1][k];
double _t_271_ = c2 * _t_273_;
double _v_138_ = c2 * _t_278_;
double _t_274_ = u2[i+1][j+1][k];
_t_274_ -= u2[i-1][j+1][k];
double _t_296_ = u2[i+1][j+1][k];
_t_296_ -= u2[i+1][j-1][k];
double _t_301_ = u2[i-1][j+1][k];
_t_301_ -= u2[i-1][j-1][k];
double _t_279_ = u2[i+1][j-1][k];
_t_279_ -= u2[i-1][j-1][k];
_t_271_ += c1 * _t_274_;
double _t_269_ = _t_270_ * _t_271_;
double _t_277_ = mu[i][j-1][k] * met1[i][j-1][k];
double _t_275_ = _t_277_ * met1[i][j-1][k];
double _t_276_ = _v_138_;
_t_276_ += c1 * _t_279_;
double _v_150_ = c1 * _t_296_;
double _v_153_ = c1 * _t_301_;
_t_269_ += _t_275_ * _t_276_;
_t_257_ += c1 * _t_269_;
double _t_293_ = _v_149_;
_t_293_ += _v_150_;
double _t_291_ = _t_292_ * _t_293_;
double _t_298_ = _v_152_;
_t_298_ += _v_153_;
_t_291_ += _t_297_ * _t_298_;
_t_257_ += c1 * _t_291_;
r1ic0jc0kc0 += _t_257_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 1, 8);
dim3 gridconfig (ceil(N, blockconfig.x), 1, ceil(N, blockconfig.z));
curvi <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
7,981 | #include "includes.h"
//-----include header files, ¤Þ¤J¼ÐÀYÀÉ-----
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b) // addKernel¨ç¼Æ
{ // addKernel function, addKernel¨ç¼Æ
int i = threadIdx.x;
c[i] = a[i] + b[i];
} |
7,982 | #include "includes.h"
__global__ void SortDistances(float* dist, int* idMat, int n, int k)
{
// Get the index of the column that the current thread is responsible for
auto col = blockIdx.x * blockDim.x + threadIdx.x;
// IF col is out of bounds, then do nothing
if(col < n)
{
auto id = &idMat[col * n];
for(auto i = 0; i < n; ++i)
id[i] = i;
auto distCol = &dist[col * n];
// Only care about the first k elements being sorted
for (auto i = 0; i < k; ++i)
{
auto minIndex = i;
for (auto j = i + 1; j < n; ++j)
{
if(distCol[j] < distCol[minIndex])
minIndex = j;
}
auto tmp = distCol[minIndex];
distCol[minIndex] = distCol[i];
distCol[i] = tmp;
auto tmpId = id[minIndex];
id[minIndex] = id[i];
id[i] = tmpId;
}
}
} |
7,983 | #include <stdio.h>
//#ifdef __NVCC__
__global__ void hello(){
#if (__CUDA_ARCH__ > 200)
printf("hello world !\n");
#endif
}
//#endif
int main(int argc, char **argv){
//float *ptr;
//cudaMalloc(&ptr, sizeof(float));
hello<<<1,2>>>();
cudaDeviceSynchronize();
return 0;
}
|
7,984 | //#ifdef SW2_BUILD_WITH_CUDA
typedef int ScoreType;
// CUDA global constants
__constant__ int partSeqSize, partsNumber, overlapLength, seqLibLength, queryLength;
__constant__ int gapOpen, gapExtension, maxScore, queryPartLength;
//global function
__global__ void calculateMatrix(const char * seqLib, ScoreType* queryProfile,
ScoreType* g_HdataUp, ScoreType* g_HdataRec, ScoreType* g_HdataMax,
ScoreType* g_FdataUp,
ScoreType* g_directionsUp, ScoreType* g_directionsRec, ScoreType* g_directionsMax,
int queryStartPos)
{
//registers
int patternPos = threadIdx.x;
int globalPatternPos = queryStartPos + patternPos;
int seqStartPos = blockIdx.x * (partSeqSize - overlapLength);
int globalStartPos = blockIdx.x * (partSeqSize + 1);
int seqPos = 0, globalPos = 0, diagNum = 0;
ScoreType substScore = 0;
ScoreType E = 0, E_left = 0, F = 0, F_up = 0, H = 0,
H_left = 0, H_up = 0, H_upleft = 0, E_left_init = 0,
H_left_init = 0, directionLeft = 0, directionUp = 0,
directionUpLeft = 0, direction = 0, directionInit = 0,
maxScore = 0;
//dynamic allocation shared memory
extern __shared__ ScoreType shared_H[];
ScoreType* shared_E = (ScoreType*)&shared_H[queryPartLength + 1];
ScoreType* shared_direction = (ScoreType*)&shared_E[queryPartLength + 1];
shared_H[patternPos] = 0;
shared_E[patternPos] = 0;
shared_direction[patternPos] = 0;
__syncthreads();
seqPos = seqStartPos - patternPos;
globalPos = globalStartPos - patternPos;
diagNum = (partSeqSize + queryPartLength - 1);
for (int iteration = 0; iteration < diagNum; iteration++) {
//check boundaries
bool isActual = seqPos < seqLibLength && seqPos >= seqStartPos && seqPos < seqStartPos + partSeqSize && globalPatternPos < queryLength;
if (isActual) {
substScore = queryProfile[seqLib[seqPos] * queryLength + globalPatternPos];
H_left = shared_H[patternPos];
E_left = shared_E[patternPos];
directionLeft = shared_direction[patternPos];
if (patternPos == 0) {
H_left = g_HdataUp[globalPos];
E_left = g_FdataUp[globalPos];
directionLeft = g_directionsUp[globalPos];
if (globalPos > 0) {
H_upleft = g_HdataUp[globalPos - 1];
directionUpLeft = g_directionsUp[globalPos - 1];
}
if (queryStartPos == 0 || iteration == 0) {
directionUpLeft = seqPos;
}
}
}
__syncthreads();
if (isActual) {
E = max(E_left - gapExtension, H_left - gapOpen);
F = max(F_up - gapExtension, H_up - gapOpen);
// Compute H
H = max(0, E);
H = max(H, F);
H = max(H, H_upleft + substScore);
//chose direction
if (H == 0) {
direction = seqPos + 1;
}
else if (H == E) {
direction = directionLeft;
}
else if (H == F) {
direction = directionUp;
}
//(H == H_upleft + substScore)
else {
direction = directionUpLeft;
}
shared_E[patternPos + 1] = E;
shared_H[patternPos + 1] = H;
shared_direction[patternPos + 1] = direction;
H_upleft = H_left;
H_up = H;
F_up = F;
directionUp = direction;
directionUpLeft = directionLeft;
//collect best result
maxScore = max(H, g_HdataMax[globalPos]);
if (maxScore == H) {
g_HdataMax[globalPos] = maxScore;
g_directionsMax[globalPos] = direction;
}
//if this last iteration then start prepare next
if (patternPos == (queryPartLength - 1)) {
g_HdataRec[globalPos] = H;
g_FdataUp[globalPos] = E;
g_directionsRec[globalPos] = direction;
}
}
__syncthreads();
seqPos++;// = seqStartPos + iteration - patternPos;
globalPos++;// = globalStartPos + iteration - patternPos;
}
}
void calculateMatrix_wrap(int blockSize, int threadNum, const char * seqLib, ScoreType* queryProfile,
ScoreType* g_HdataUp, ScoreType* g_HdataRec, ScoreType* g_HdataMax,
ScoreType* g_FdataUp,
ScoreType* g_directionsUp, ScoreType* g_directionsRec, ScoreType* g_directionsMax,
int iteration)
{
size_t sh_mem_size = sizeof(ScoreType) * (threadNum + 1) * 3;
calculateMatrix<<<blockSize, threadNum, sh_mem_size>>>(seqLib,
queryProfile, g_HdataUp,
g_HdataRec, g_HdataMax, g_FdataUp,
g_directionsUp, g_directionsRec, g_directionsMax, iteration);
}
void setConstants(int partSeqSize, int partsNumber, int overlapLength, int seqLibLength, int queryLength, int gapOpen, int gapExtension, int maxScore, int queryPartLength) {
cudaMemcpyToSymbol("partSeqSize", &partSeqSize, sizeof(partSeqSize));
cudaMemcpyToSymbol("partsNumber", &partsNumber, sizeof(partsNumber));
cudaMemcpyToSymbol("overlapLength", &overlapLength, sizeof(overlapLength));
cudaMemcpyToSymbol("seqLibLength", &seqLibLength, sizeof(seqLibLength));
cudaMemcpyToSymbol("queryLength", &queryLength, sizeof(queryLength));
cudaMemcpyToSymbol("gapOpen", &gapOpen, sizeof(gapOpen));
cudaMemcpyToSymbol("gapExtension", &gapExtension, sizeof(gapExtension));
cudaMemcpyToSymbol("maxScore", &maxScore, sizeof(maxScore));
cudaMemcpyToSymbol("queryPartLength", &queryPartLength, sizeof(queryPartLength));
}
//#endif //SW2_BUILD_WITH_CUDA
|
7,985 |
extern "C" {
//图像平滑
__global__ void smoothingFilter(int Lx, int Ly, int Threshold, int MaxRad,
float* IMG, float* BOX, float* NORM)
{
// Indexing
int tid = threadIdx.x;
int tjd = threadIdx.y;
int i = blockIdx.x * blockDim.x + tid;
int j = blockIdx.y * blockDim.y + tjd;
int stid = tjd * blockDim.x + tid;
int gtid = j * Ly + i;
// Smoothing params
float qq = 1.0;
float sum = 0.0;
float ksum = 0.0;
float ss = qq;
// Shared memory
extern __shared__ float s_IMG[];
s_IMG[stid] = IMG[gtid];
__syncthreads();
// Compute all pixels except for image border
if ( i >= 0 && i < Ly && j >= 0 && j < Lx )
{
// Continue until parameters are met
while (sum < Threshold && qq < MaxRad)
{
ss = qq;
sum = 0.0;
ksum = 0.0;
// Normal adaptive smoothing (w/o gaussian sum)
for (int ii = -ss; ii < ss+1; ii++)
{
for (int jj = -ss; jj < ss+1; jj++)
{
if ( (i-ss >= 0) && (i+ss < Lx) && (j-ss >= 0) && (j+ss < Ly) )
{
// Compute within bounds of block dimensions
if( tid-ss > 0 && tid+ss < blockDim.x && tjd-ss > 0 && tjd+ss < blockDim.y )
{
sum += s_IMG[stid + ii*blockDim.y + jj];
ksum += 1.0;
}
// Compute block borders with global memory
else
{
sum += IMG[gtid + ii*Ly + jj];
ksum += 1.0;
}
}
}
}
qq += 1;
}
BOX[gtid] = ss;
__syncthreads();
// Determine the normalization for each box
for (int ii = -ss; ii < ss+1; ii++)
{
for (int jj = -ss; jj < ss+1; jj++)
{
if (ksum != 0)
{
NORM[gtid + ii*Ly + jj] += 1.0 / ksum;
}
}
}
}
__syncthreads();
}
__global__ void normalizeFilter(int Lx, int Ly, float* IMG, float* NORM )
{
// Indexing
int tid = threadIdx.x;
int tjd = threadIdx.y;
int i = blockIdx.x * blockDim.x + tid;
int j = blockIdx.y * blockDim.y + tjd;
int stid = tjd * blockDim.x + tid;
int gtid = j * Ly + i;
// shared memory for IMG and NORM
extern __shared__ float s_NORM[];
s_NORM[stid] = NORM[gtid];
__syncthreads();
// Compute all pixels except for image border
if ( i >= 0 && i < Ly && j >= 0 && j < Lx )
{
// Compute within bounds of block dimensions
if( tid > 0 && tid < blockDim.x && tjd > 0 && tjd < blockDim.y )
{
if (s_NORM[stid] != 0)
{
IMG[gtid] /= s_NORM[stid];
}
}
// Compute block borders with global memory
else
{
if (NORM[gtid] != 0)
{
IMG[gtid] /= NORM[gtid];
}
}
}
__syncthreads();
}
__global__ void outFilter( int Lx, int Ly, float* IMG, float* BOX, float* OUT )
{
// Indexing
int tid = threadIdx.x;
int tjd = threadIdx.y;
int i = blockIdx.x * blockDim.x + tid;
int j = blockIdx.y * blockDim.y + tjd;
int stid = tjd * blockDim.x + tid;
int gtid = j * Ly + i;
// Smoothing params
float ss = BOX[gtid];
float sum = 0.0;
float ksum = 0.0;
extern __shared__ float s_IMG[];
s_IMG[stid] = IMG[gtid];
__syncthreads();
// Compute all pixels except for image border
if ( i >= 0 && i < Ly && j >= 0 && j < Lx )
{
for (int ii = -ss; ii < ss+1; ii++)
{
for (int jj = -ss; jj < ss+1; jj++)
{
if ( (i-ss >= 0) && (i+ss < Lx) && (j-ss >= 0) && (j+ss < Ly) )
{
// Compute within bounds of block dimensions
if( tid-ss > 0 && tid+ss < blockDim.x && tjd-ss > 0 && tjd+ss < blockDim.y )
{
sum += s_IMG[stid + ii*blockDim.y + jj];
ksum += 1.0;
}
// Compute block borders with global memory
else
{
sum += IMG[gtid + ii*Ly + jj];
ksum += 1.0;
}
}
}
}
}
if ( ksum != 0 )
{
OUT[gtid] = sum / ksum;
}
__syncthreads();
}
} |
7,986 | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<cuda.h>
//************variables globales***************
int dimx=1040, dimy=1388, tam_imag=1388*1040;
//**********KERNEL**************
__global__ void kernel (float *max, float *var, int *top, int k){
int idx=threadIdx.x + blockIdx.x*blockDim.x;
int tam_imag=1388*1040;
if(idx<tam_imag){
if(var[idx]>max[idx]){
top[idx]=k;
max[idx]=var[idx];
}
}
}
float *leerMatrizVarianza(int d);
//*****************funcion main**********************
int main(int argc,char* argv[]){
//***************declaracion de variables**************
int N=atoi(argv[2]);
int i,k,temp;
int *top_d; int top_h[dimx*dimy];
cudaMalloc((void **)&top_d,sizeof(int)*dimx*dimy);
float *max_d, *var_d;
float *max_h, *var_h;
var_h=(float *)malloc(sizeof(float)*dimx*dimy);
max_h=(float *)malloc(sizeof(float)*dimx*dimy);
cudaMalloc((void **)&max_d,sizeof(float)*dimx*dimy);
cudaMalloc((void **)&var_d,sizeof(float)*dimx*dimy);
float t;
clock_t tinicio, t_GPU;
tinicio=clock();
//***************calculo de la mayor varianza************
temp=1;
max_h=leerMatrizVarianza(temp);
for(i=0;i<dimx*dimy;i++)
top_h[i]=temp;
for(k=2;k<=N;k++){
printf("k=%d\n", k);
var_h=leerMatrizVarianza(k);
cudaMemcpy(max_d,max_h,sizeof(float)*dimx*dimy,cudaMemcpyHostToDevice);
cudaMemcpy(var_d,var_h,sizeof(float)*dimx*dimy,cudaMemcpyHostToDevice);
cudaMemcpy(top_d,top_h,sizeof(int)*dimx*dimy,cudaMemcpyHostToDevice);
kernel<<<6940,208>>>(max_d,var_d,top_d,k);
cudaMemcpy(top_h,top_d,sizeof(int)*dimx*dimy,cudaMemcpyDeviceToHost);
cudaMemcpy(max_h,max_d,sizeof(float)*dimx*dimy,cudaMemcpyDeviceToHost);
}
cudaFree(max_d);
cudaFree(var_d);
cudaFree(top_d);
FILE *topo;
topo=fopen("Resultados/topo","w+");
for(i=0;i<dimx*dimy;i++){
if(i%dimy==0 && i!=0)
fprintf(topo,"\n");
fprintf(topo,"%d ",top_h[i]);
}
fclose(topo);
t_GPU=clock();
t = ((float)t_GPU-(float)tinicio)/CLOCKS_PER_SEC;
printf("tiempo de procesamiento de topografia: %6.3f s\n",t);
}//FIN funcion main()
//******************leerMatrizVarianza****************
float* leerMatrizVarianza(int d){
int i;
char rutavar[]="VARIANZAS/";
sprintf(rutavar,"%s%d",rutavar,d);
FILE* archivo;
archivo=fopen(rutavar,"r") ;
float *var;
var=(float *)malloc(sizeof(float)*dimx*dimy);
for(i=0;i<dimx*dimy;i++)
fscanf(archivo,"%f",&var[i]);
fclose(archivo);
return var;
} |
7,987 | /*************************************************
** Accelereyes Training Day 1 **
** Vector Addition **
** **
** This program will add two vectors and store **
** the result in a third vector using the GPU **
*************************************************/
#include <iostream>
#include <vector>
#include "cuda.h"
__global__ void add(int* a, int* b, int* c) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
c[id] = a[id] + b[id];
}
int main(void) {
using namespace std;
long N = 1000;
size_t size = N * sizeof(int);
// initialize device pointers and allocate memory on the GPU
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, size);
cudaMalloc(&d_b, size);
cudaMalloc(&d_c, size);
// initalize data on host
vector<int> h_a(N, 1);
vector<int> h_b(N, 2);
vector<int> h_c(N);
// move host data to the GPU
cudaMemcpy(d_a, &h_a.front(), size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &h_b.front(), size, cudaMemcpyHostToDevice);
// launch kernel
int blocks = 10;
add <<< blocks, N/blocks >>> (d_a, d_b, d_c);
// get the results from the GPU
cudaMemcpy(&h_c.front(), d_c, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < N; ++i) {
cout << h_c[i] << ", ";
}
return 0;
}
|
7,988 | #include "includes.h"
__global__ void scaleVector(float *d_res, const float *d_src, float scale, const int len)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos >= len) return;
d_res[pos] = d_src[pos] * scale;
} |
7,989 | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2019 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Asher Elmquist
// =============================================================================
//
// =============================================================================
#include <cuda.h>
#include "pointcloud.cuh"
namespace chrono {
namespace sensor {
// Converts 32bpp ARGB imgIn pixels to 8bpp Grayscale imgOut pixels
__global__ void pointcloud_from_depth_kernel(float* imgIn, float* imgOut, int numPixels, LidarParams params) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < numPixels) {
int hIndex = index % params.horizontal_samples;
int vIndex = index / params.horizontal_samples;
float vAngle = (vIndex / (float)(params.vertical_samples)) * params.vFOV - params.vFOV / 2.;
float hAngle = (hIndex / (float)(params.horizontal_samples)) * params.hFOV - params.hFOV / 2.;
float range = imgIn[2 * index];
float proj_xy = range * cos(vAngle);
float x = proj_xy * cos(hAngle);
float y = proj_xy * sin(hAngle);
float z = range * sin(vAngle);
imgOut[4 * index] = x;
imgOut[4 * index + 1] = y;
imgOut[4 * index + 2] = z;
imgOut[4 * index + 3] = imgIn[2 * index + 1];
}
}
void cuda_pointcloud_from_depth(void* bufDI, void* bufOut, int width, int height, LidarParams params) {
int numPixels = width * height;
const int nThreads = 512;
int nBlocks = (numPixels + nThreads - 1) / nThreads;
pointcloud_from_depth_kernel<<<nBlocks, nThreads>>>((float*)bufDI, (float*)bufOut, numPixels, params);
}
} // namespace sensor
} // namespace chrono
|
7,990 | #include "includes.h"
/**
* Various matrix utils using cuda
**/
/**
* Kronecker product of two matrices kernel
* input :
* a : first matrix
* nax, nay : matrix a dimensions
* b: second matrix
* nbx, nby : matrix b dimensions
* results : kronecker product of a and b
**/
__global__ void fillUp(size_t * d_dst, size_t N){
int myId = blockIdx.x * blockDim.x + threadIdx.x;
if (myId >= N)
return;
d_dst[myId] = myId;
} |
7,991 | #include "includes.h"
__global__ void affine_transform(size_t sz, float_t* audio, float_t* end_out, size_t stride)
{
size_t index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < sz)
{
audio[index+stride] = (audio[index+stride]-end_out[index])/expf(end_out[index+stride]);
}
} |
7,992 | #include <stdio.h>
////////////////////////////////////////////////////////////////////////////////
__global__ void Kernel1( int )
{
printf( "Kernel1\n" );
}
////////////////////////////////////////////////////////////////////////////////
__global__ void Kernel2( int )
{
printf( "Kernel2\n" );
}
////////////////////////////////////////////////////////////////////////////////
|
7,993 | #include "includes.h"
__global__ void kExpand(float* source, float* indices, float* target, int height, int width, int target_width){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < target_width*height; i += numThreads) {
const int pos = height * (int)indices[i / height] + i % height;
target[i] = (pos < height * width)? source[pos] : 1.0/0.0 - 1.0/0.0;
}
} |
7,994 |
#ifndef TEST_CU
#define TEST_CU
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
extern "C"
{
void testVector();
}
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
void testVector() {
int N = 5;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
// Initialize input vectors.
// Allocate vectors in device memory
void* amp;
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
for( int i = 0; i < N; i++ )
{
h_A[i] = 2;
h_B[i] = 3;
}
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
// Copy result from device memory to host memory
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// assert( d_A[0]+d_B[0] == d_C[0]&&"Cuda is not running or has problems" );
fflush(stdin);
fflush(stdout);
for( int i = 0; i < N; i++ )
{
// printf("%f", h_C[i] );
}
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
#endif
|
7,995 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <time.h>
#define BLOCKS 1024
#define THREADS 1024
#define SIZE BLOCKS*THREADS*16
__global__
void testgpu(int *memInt, int times){
int i;
for(i = 0; i < times; i++)
*memInt += (*memInt)*i;
}
int main(int argc, char *argv[]){
int *gpuInt;
dim3 block(1024, 1);
dim3 grid(1024, 1024);
printf("A %lf\n", clock() / (double) CLOCKS_PER_SEC);
cudaMalloc( (void **) &gpuInt, sizeof(int));
printf("B %lf\n", clock() / (double) CLOCKS_PER_SEC);
// printf("Test 1\n");
// testgpu<<<grid, block>>>(gpuInt, 800000);
printf("C %lf\n", clock() / (double) CLOCKS_PER_SEC);
testgpu<<<8, 16>>>(gpuInt, 1024 * 1024 * 1024);
printf("D %lf\n", clock() / (double) CLOCKS_PER_SEC);
cudaFree(gpuInt);
printf("E %lf\n", clock() / (double) CLOCKS_PER_SEC);
return 0;
}
|
7,996 | #define W 500
#define H 500
#define TX 32
#define TY 32
__device__
unsigned char clip(int n){
return n > 255 ? 255: (n < 0 ? 0:n);
}
__global__
void distanceKernel(uchar4 *d_out, int w,int h, int2 pos){
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
const int i = r*w + c;
if ((c >= w)||(r >= h)) return;
const int d = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - d);
d_out[i].x = intensity;
d_out[i].y = intensity;
d_out[i].z = 0;
d_out[i].z = 255;
}
int main(){
uchar4 *out = (uchar4 *)calloc(W*H, sizeof(uchar4));
uchar4 *d_out;
cudaMalloc(&d_out, W*H*sizeof(uchar4));
const int2 pos = {0, 0};
const dim3 blockSize(TX, TY);
const int bx = (W + TX - 1)/TX;
const int by = (W + TY - 1)/TY;
const dim3 gridSize = dim3(bx, by);
distanceKernel<<<gridSize, blockSize>>>(d_out, W, H, pos);
cudaMemcpy(out, d_out, W*H*sizeof(uchar4), cudaMemcpyDeviceToHost);
cudaFree(d_out);
free(out);
return 0;
}
|
7,997 | /*
* JCuda - Java bindings for NVIDIA CUDA driver and runtime API
* http://www.jcuda.org
*
*
* This code is based on the NVIDIA 'reduction' CUDA sample,
* Copyright 1993-2010 NVIDIA Corporation.
*/
extern "C"
__global__ void sum(float *g_idata,float *g_odata, unsigned int n)
{
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x; // thread courant dans le block
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; // index gnral
sdata[tid] = g_idata[i]; // copy vers la shared memory du block
__syncthreads(); // on attends tous les blocks
if (i >= n) return; // on coupe au dela du cutoff
// do reduction in shared mem for one block
for(unsigned int s=1; s < blockDim.x; s *= 2) {
if (tid % (2*s) == 0) { // si correspond un multiple de la dimension
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // on attends
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
} |
7,998 | #include "includes.h"
__global__ void reduceSmemUnrollDyn(int *g_idata, int *g_odata, unsigned int n)
{
extern __shared__ int smem[];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// unrolling 4
int tmpSum = 0;
if (idx < n)
{
int a1, a2, a3, a4;
a1 = a2 = a3 = a4 = 0;
a1 = g_idata[idx];
if (idx + blockDim.x < n) a2 = g_idata[idx + blockDim.x];
if (idx + 2 * blockDim.x < n) a3 = g_idata[idx + 2 * blockDim.x];
if (idx + 3 * blockDim.x < n) a4 = g_idata[idx + 3 * blockDim.x];
tmpSum = a1 + a2 + a3 + a4;
}
smem[tid] = tmpSum;
__syncthreads();
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
} |
7,999 | #include <stdio.h>
#include <stdlib.h>
void f(double *res, double *vec1, double *vec2, int n) {
int i = 0;
for(i = 0; i < n; i++)
res[i] = vec1[i] + vec2[i];
}
int main() {
int i, n;
scanf("%d", &n);
double *res = (double *)malloc(sizeof(double) * n);
double *vec1 = (double *)malloc(sizeof(double) * n);
double *vec2 = (double *)malloc(sizeof(double) * n);
for(i = 0; i < n; i++)
scanf("%lf", &vec1[i]);
for(i = 0; i < n; i++)
scanf("%lf", &vec2[i]);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
f(res, vec1, vec2, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
fprintf(stderr, "time = %f\n", time);
cudaEventDestroy(stop);
cudaEventDestroy(start);
// for(i = 0; i < n; i++)
// printf("%f ", res[i]);
// printf("\n");
free(res);
free(vec1);
free(vec2);
return 0;
}
|
8,000 | #include "includes.h"
__global__ void cuAddRNorm(float *dist, int width, int pitch, int height, float *vec){
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int xIndex = blockIdx.x * blockDim.x + tx;
unsigned int yIndex = blockIdx.y * blockDim.y + ty;
__shared__ float shared_vec[16];
if (tx==0 && yIndex<height)
shared_vec[ty]=vec[yIndex];
__syncthreads();
if (xIndex<width && yIndex<height)
dist[yIndex*pitch+xIndex]+=shared_vec[ty];
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.