serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
13,401 | // ##########################################################
// By Eugene Ch'ng | www.complexity.io
// Email: genechng@gmail.com
// ----------------------------------------------------------
// The ERC 'Lost Frontiers' Project
// Development for the Parallelisation of ABM Simulation
// ----------------------------------------------------------
// A Basic CUDA Application for ABM Development
//
// Passing a struct to the kernel
// ----------------------------------------------------------
// How to compile:
// nvcc <filename>.cu -o <outputfile>
// ##########################################################
#include <iostream>
using namespace std;
// struct to contain data of all agents
struct MYSTRUCT {
int id; // array of IDs
// constructor taking in an array of agent IDs
MYSTRUCT(int _id)
{
id = _id;
}
};
// kernel code
__global__ void changeID(MYSTRUCT *_struct)
{
_struct->id = _struct->id - 1;
}
int main(void)
{
cout<<"\n------------- assigning variables in host"<<endl;
MYSTRUCT *dev_struct; // the device struct
cout<<"\n------------- instantiating host id"<<endl;
// send the IDs into the agent struct constructor
MYSTRUCT *mystruct = new MYSTRUCT(100);
cout<<"** host mystruct->id: "<<mystruct->id<<endl;
cout<<"\n------------- allocate memory to device"<<endl;
cudaMalloc( (void**)&dev_struct, sizeof(MYSTRUCT) );
cout<<"\n------------- copy mystruct to dev_struct"<<endl;
cudaMemcpy( dev_struct, mystruct, sizeof(MYSTRUCT), cudaMemcpyHostToDevice);
cout<<"\n------------- calling device kernel and change the id in the struct"<<endl;
changeID<<<1,1>>>(dev_struct);
// copy changed dev_agent to the struct, output the printing in the kernel
cout<<"\n------------- copying memory from device to host and printing"<<endl;
cudaMemcpy( mystruct, dev_struct, sizeof(MYSTRUCT), cudaMemcpyDeviceToHost );
cout<<"\n------------- output changed results"<<endl;
cout<<"** host mystruct->id: "<<mystruct->id<<endl;
cout<<"\n------------- cleaning up"<<endl;
delete mystruct;
cudaFree(dev_struct);
return 0;
}
|
13,402 | #include "includes.h"
__global__ void kernel(float *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = sqrt(pow(3.14159, i));
}
} |
13,403 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <iostream>
#include <fstream>
__global__ void mapping_kernel(unsigned int * d_out, unsigned int * d_in, const unsigned int IN_SIZE)
{
unsigned int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking for out-of-bounds
if (myId>=IN_SIZE)
{
return;
}
d_out[myId] = 1*d_in[myId]/1;
}
int main(int argc, char **argv)
{
std::ofstream myfile;
myfile.open ("par_mapping.csv");
// printf("---STARTED---\n");
const unsigned int times = 10;
const unsigned int IN_SIZE = 1<<29;
const unsigned int IN_BYTES = sizeof(unsigned int) * IN_SIZE;
const unsigned int OUT_SIZE = IN_SIZE;
const unsigned int OUT_BYTES = IN_BYTES;
for(unsigned int rounds = 1; rounds<33; rounds++)
{
const dim3 NUM_THREADS(32*rounds);
const dim3 NUM_BLOCKS(IN_SIZE/NUM_THREADS.x + ((IN_SIZE % NUM_THREADS.x)?1:0));
// Generate the input array on host
unsigned int * h_in = (unsigned int *)malloc(IN_BYTES);
unsigned int * h_out = (unsigned int *)malloc(OUT_BYTES);
for (unsigned int j = 0; j<IN_SIZE; j++) {h_in[j] = 1;} //printf(" h_in[%d]: %d\n", j, h_in[j]);}
// Declare GPU memory pointers
unsigned int * d_in;
unsigned int * d_out;
printf("\n@@@ROUND@@@: %d\n", rounds);
// printf("---IN_SIZE---: %d\n", IN_SIZE);
// printf("---IN_BYTES---: %d\n", IN_BYTES);
// printf("---OUT_SIZE---: %d\n", OUT_SIZE);
// printf("---OUT_BYTES---: %d\n", OUT_BYTES);
// printf("---THREAD_SIZE---: %d\n", NUM_THREADS.x);
// printf("---NUM_BLOCKS---: %d\n", NUM_BLOCKS.x);
// Allocate GPU memory
cudaMalloc((void **) &d_in, IN_BYTES);
cudaMalloc((void **) &d_out, OUT_BYTES);
// Transfer the arrays to the GPU
cudaMemcpy(d_in, h_in, IN_BYTES, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// running the code on the GPU $times times
for (unsigned int k = 0; k<times; k++)
{
mapping_kernel<<<NUM_BLOCKS, NUM_THREADS>>>(d_out, d_in, IN_SIZE);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculating time
float elapsedTime = .0f;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime = elapsedTime / ((float) times);
printf(" time: %.5f\n", elapsedTime);
// Copy back to HOST
cudaMemcpy(h_out, d_out, OUT_BYTES, cudaMemcpyDeviceToHost);
int sum = 0;
for(unsigned int i = 0; i<OUT_SIZE; i++){sum += h_out[i];}
for(unsigned int i = 0; (i<OUT_SIZE) && (i<10); i++)
{
// printf("OUT %d: count %d\n", i, h_out[i]);
}
// printf("%d\n", sum);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
myfile << elapsedTime << ",";
free(h_in);
free(h_out);
}
myfile.close();
return 0;
}
|
13,404 | #include "includes.h"
__global__ void sigmoid32(float* A, int size)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int idx = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (idx >= size) {
return;
}
A[idx] = 1 / (1 + powf((float)(M_E), (-1 * A[idx])));
} |
13,405 | #include "includes.h"
#define W 4000
#define H 20530
__global__ void calcmean(float *matrix, float *mean){
} |
13,406 | #define NUM_BLOCKS 8
#define THREADS_PER_BLOCK 64
__global__ void example(int **data) {
int value1, value2, value3, value4, value5;
int idx1, idx2, idx3;
idx1 = blockIdx.x * blockDim.x;
idx2 = threadIdx.x;
idx3 = idx1 + idx2;
value1 = *(data[idx1]);
value2 = *(data[idx2]);
value3 = value1 + value2;
value4 = value1 * value2;
value5 = value3 + value4;
*(data[idx3]) = value5;
*(data[idx1]) = value3;
*(data[idx2]) = value4;
idx1 = idx2 = idx3 = 0;
}
int main(int argc, char *argv[]) {
int *host_data[NUM_BLOCKS*THREADS_PER_BLOCK];
int **dev_data;
const int zero = 0;
/* Allocate an integer for each thread in each block */
for (int block = 0; block < NUM_BLOCKS; block++) {
for (int thread = 0; thread < THREADS_PER_BLOCK; thread++) {
int idx = thread + block * THREADS_PER_BLOCK;
cudaMalloc(&host_data[idx], sizeof(int));
cudaMemcpy(host_data[idx], &zero, sizeof(int), cudaMemcpyHostToDevice);
}
}
/* This inserts an error into block 3, thread 33*/
host_data[3*THREADS_PER_BLOCK + 33] = NULL;
/* Copy the array of pointers to the device */
cudaMalloc((void**)&dev_data, sizeof(host_data));
cudaMemcpy(dev_data, host_data, sizeof(host_data), cudaMemcpyHostToDevice);
/* Execute example */
example <<< NUM_BLOCKS, THREADS_PER_BLOCK >>> (dev_data);
cudaThreadSynchronize();
}
|
13,407 | #include "includes.h"
__global__ void kernel1(int k, int m, int n, float* searchPoints, float* referencePoints, int* indices)
{
int minIndex;
float minSquareSum, diff, squareSum;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < m) {
minSquareSum = -1;
// Iterate over all reference points
for (int nInd = 0; nInd < n; nInd++) {
squareSum = 0;
for (int kInd = 0; kInd < k; kInd++) {
diff = searchPoints[k * tid + kInd] - referencePoints[k * nInd + kInd];
squareSum += (diff * diff);
}
if (minSquareSum < 0 || squareSum < minSquareSum) {
minSquareSum = squareSum;
minIndex = nInd;
}
}
indices[tid] = minIndex;
}
} |
13,408 | #include <iostream>
#include <math.h>
#include <stdlib.h>
#include <assert.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
typedef struct {
int width;
int height;
float * elements;
} Matrix;
Matrix initMatrix(int height, int width) {
Matrix A;
A.width = width;
A.height = height;
A.elements = (float*)malloc(width * height * sizeof(float));
return A;
}
void setRandom(Matrix A) {
for (int i = 0; i < A.height; i++)
for (int j = 0; j < A.width; j++)
A.elements[i*A.width + j] = (float)(rand() % 3);
}
void printMatrix(Matrix A){
for (int i = 0; i < A.height; i++)
for(int j = 0; j < A.width; j++) {
if ( j == 0 ) printf("\n");
printf(" %f ", A.elements[i*A.width + j]);
}
printf("\n");
}
float cell(Matrix A, int row, int column) {
return A.elements[row * A.width + column];
}
Matrix allocateMatrixToDevice(Matrix A) {
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaError_t err = cudaMalloc(&d_A.elements, size);
printf("CUDA malloc Matrix : %s\n", cudaGetErrorString(err));
err = cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
printf("Copy Matrix to device: %s\n",cudaGetErrorString(err));
return d_A;
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
float Cvalue = 0.0;
/* calculate value for C(row, column) */
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
/* not all threads in grid need return a value, as C may not fit exactly the grid */
if (row > A.height || col > B.width) return;
/* we are using Row Major representation for the matrix */
for (int e = 0; e < A.width; ++e) {
int a = row * A.width + e; /* row major, so just add e to index*/
int b = e * B.width + col; /* row major, so multiply index by e */
Cvalue += (A.elements[a] * B.elements[b]);
}
C.elements[row * C.width + col] = Cvalue;
}
void matmul(Matrix A, Matrix B, Matrix C) {
/* copy the matrices to the GPU */
Matrix d_A = allocateMatrixToDevice(A);
Matrix d_B = allocateMatrixToDevice(B);
Matrix d_C = allocateMatrixToDevice(C);
/* specify 2 dimensional blocks of 16 x 16 = 256 threads per block */
dim3 dimBlock(16,16);
/* calculate how many blocks we need to perform the calculation */
/* the grid is based on the size of the product matrix */
/* ie: A(2,3) * B(3,4) = C(2,4) */
/* A(height,width) * B(height,width) = C(A height, B width) */
dim3 dimGrid(
( (B.width + dimBlock.x - 1 ) / dimBlock.x),
( (A.height + dimBlock.y -1 ) / dimBlock.y)
);
/* launch a grid and run the kernel function*/
MatMulKernel<<<dimGrid, dimBlock>>>(d_A,d_B,d_C);
/* wait for all threads to finish */
cudaError_t err = cudaThreadSynchronize();
err = cudaMemcpy(C.elements, d_C.elements, C.height * C.width * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_A.elements);
cudaFree(d_B.elements);
}
int main(void)
{
Matrix A = initMatrix(4,4);
Matrix B = initMatrix(4,4);
Matrix C = initMatrix(4,4);
setRandom(A);
setRandom(B);
printMatrix(A);
printMatrix(B);
matmul(A,B,C);
printMatrix(C);
float c_0_0 = cell(A,0,0) * cell(B,0,0) + cell(A,0,1) * cell(B,1,0) + cell(A,0,2) * cell(B,2,0) + cell(A,0,3) * cell(B,3,0);
printf("%f\n", c_0_0);
assert(c_0_0 == cell(C,0,0));
}
|
13,409 | /*
Boggle Environment - Board and Tile Class
Miguel Aroca-Ouellette
05/14/2016
*/
#include <iostream>
#include <stdlib.h>
#include <string>
#include <cassert>
#include <time.h>
#include "boggle_env.cuh"
#define NUM_LETTERS 26
/*Constructor: Set dimensions of board.*/
CUDA_CALLABLE_MEMBER
Board::Board(int _width, int _height)
{
width = _width;
height = _height;
grid = (Tile *)malloc(sizeof(Tile)*height*width);
}
/* Destructor */
CUDA_CALLABLE_MEMBER
Board::~Board()
{
free(grid);
}
/*Copy construtor*/
CUDA_CALLABLE_MEMBER
Board::Board(const Board &obj)
{
//printf("Copy constructor allocating grid.\n");
width = obj.width;
height = obj.height;
grid = (Tile *)malloc(sizeof(Tile)*height*width);
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
grid[x + y*width].letter = obj.grid[x + y*width].letter;
grid[x + y*width].x = x;
grid[x + y*width].y = y;
grid[x + y*width].used = false;
for (int i = 0; i < NUM_ADJ; i++)
{
grid[x + y*width].adj_available[i] = true;
grid[x + y*width].adj_list[i] = NULL;
}
getAllAdj(&grid[x + y*width], grid[x + y*width].adj_list);
}
}
}
/*Populates the board with random letters.*/
void Board::genRandLetters()
{
//random seed
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
//get random number between 0 and 25 -> letters
char rand_char = 'a' + rand() % NUM_LETTERS;
grid[x + y*width].letter = rand_char;
grid[x + y*width].x = x;
grid[x + y*width].y = y;
grid[x + y*width].used = false;
for (int i = 0; i < NUM_ADJ; i++)
{
grid[x + y*width].adj_available[i] = true;
grid[x + y*width].adj_list[i] = NULL;
}
getAllAdj(&grid[x + y*width], grid[x + y*width].adj_list);
}
}
}
/*Set letters. Ensure that it is the right size.*/
void Board::setLetters(string letters)
{
assert(letters.length() == width*height);
int x, y;
for (int i = 0; i < (width*height); i++)
{
x = i % width;
y = i / width;
grid[x+y*width].x = x;
grid[x+y*width].y = y;
grid[x+y*width].letter = letters[i];
grid[x+y*width].used = false;
for (int i = 0; i < NUM_ADJ; i++)
{
grid[x + y*width].adj_available[i] = true;
grid[x + y*width].adj_list[i] = NULL;
}
getAllAdj(&grid[x + y*width], grid[x + y*width].adj_list);
}
}
/* Returns list of adjancent tiles filtered by adj_available list */
void Board::getAdjList(Tile *center, Tile **adj)
{
for (int i = 0; i < NUM_ADJ; i++)
{
if ((center->adj_list[i] != NULL) && (center->adj_available[i]))
{
adj[i] = center->adj_list[i];
}
else
adj[i] = NULL;
}
}
/*Get adjacent tile letters which are not used. Origin is top left.
Inputs: (x,y) coordinate of center tile.
Pointer to adjacency list to fill. MUST BE ABLE TO ACCOMODATE 8 LETTERS.
Returns indicates size of filled adjacenct list.*/
int Board::getAdj(Tile* center, Tile **adj)
{
int size = 0;
int x = center->x;
int y = center->y;
if ((x > 0) && (y > 0)) //top left
pushTile(adj, x - 1, y - 1, &size, false);
if ((x > 0) && (y < (height - 1))) //bottom left
pushTile(adj, x - 1, y + 1, &size, false);
if ((x < (width - 1)) && (y > 0)) //top right
pushTile(adj, x + 1, y - 1, &size, false);
if ((x < (width - 1)) && (y < (height - 1))) //bottom right
pushTile(adj, x + 1, y + 1, &size, false);
if (y > 0) //top center
pushTile(adj, x, y - 1, &size, false);
if (y < (height - 1)) //bottom center
pushTile(adj, x, y + 1, &size, false);
if (x > 0) //left center
pushTile(adj, x - 1, y, &size, false);
if (x < (width - 1)) //right center
pushTile(adj, x + 1, y, &size, false);
return size;
}
/*Private: Helper function for get_adj. Only assigns the tile if it is unused. Increments counts.*/
void Board::pushTile(Tile **target, int x, int y, int *count, bool all)
{
if ((!grid[x+y*width].used) || all)
target[(*count)++] = &grid[x+y*width];
}
/*Get adjacent tile letters which are not used. Origin is top left.
Inputs: (x,y) coordinate of center tile.
Pointer to adjacency list to fill. MUST BE ABLE TO ACCOMODATE 8 LETTERS.
Returns indicates size of filled adjacenct list.*/
int Board::getAllAdj(Tile* center, Tile **adj)
{
int size = 0;
int x = center->x;
int y = center->y;
if ((x > 0) && (y > 0)) //top left
pushTile(adj, x - 1, y - 1, &size, true);
if ((x > 0) && (y < (height - 1))) //bottom left
pushTile(adj, x - 1, y + 1, &size, true);
if ((x < (width - 1)) && (y > 0)) //top right
pushTile(adj, x + 1, y - 1, &size, true);
if ((x < (width - 1)) && (y < (height - 1))) //bottom right
pushTile(adj, x + 1, y + 1, &size, true);
if (y > 0) //top center
pushTile(adj, x, y - 1, &size, true);
if (y < (height - 1)) //bottom center
pushTile(adj, x, y + 1, &size, true);
if (x > 0) //left center
pushTile(adj, x - 1, y, &size, true);
if (x < (width - 1)) //right center
pushTile(adj, x + 1, y, &size, true);
return size;
}
/* Prints the boggle board*/
void Board::printBoard()
{
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
printf("%c ",grid[x+y*width].letter);
printf("\n");
}
}
/* Prints the used tiles as 1, otherwise 0*/
void Board::printUsed()
{
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
printf("%c ", ((grid[x + y*width].used) ? grid[x + y*width].letter : ' '));
printf("\n");
}
}
/*Getter for tile.*/
Tile* Board::getTile(int x, int y)
{
return &grid[x+y*width];
}
/*Returns all tiles in the board.*/
void Board::getAllTiles(Tile *all_tiles[])
{
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
all_tiles[y*width + x] = getTile(x, y);
}
}
/* Resets all tiles on board to unused*/
void Board::resetBoard()
{
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
grid[x + y*width].used = false;
for (int i = 0; i < NUM_ADJ; i++)
grid[x + y*width].adj_available[i] = true;
}
}
}
/*Checks if the Tile is in the list of Tiles */
bool checkTileList(Tile *check, Tile **list, int size)
{
for (int i = 0; i < size; i++)
{
if (check == list[i])
return true;
}
return false;
}
/* Returns the number of tiles with this letter. */
int Board::getLetterCount(char c)
{
int count = 0;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
if (grid[x + y*width].letter == c)
count++;
}
}
return count;
}
/*Returns all tiles in the board that match a given letter.
Should call getLetterCount before hand to get size.*/
void Board::getTilesByLetter(Tile *all_tiles[], char c)
{
int i = 0;
for (int x = 0; x < height*width; x++)
{
if (grid[x].letter == c)
all_tiles[i++] = &grid[x];
}
} |
13,410 | #include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 0.1
#undef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#undef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
__global__ void histogram_kernel( float *I, float *minI, float *maxI, float *mask, int nbins, int c, int h, int w, float *hist )
{
int _id = blockIdx.x * blockDim.x + threadIdx.x;
int size = h * w;
if (_id < c * size) {
int id = _id % size, dc = _id / size;
if (mask[id] < EPS)
return ;
float val = I[_id];
float _minI = minI[dc];
float _maxI = maxI[dc];
if (_minI == _maxI) {
_minI -= 1;
_maxI += 1;
}
if (_minI <= val && val <= _maxI) {
int idx = MIN((val - _minI) / (_maxI - _minI) * nbins, nbins-1);
int index = dc * nbins + idx;
atomicAdd(&hist[index], 1.0f);
}
}
return ;
} |
13,411 | #include "includes.h"
__device__ float logit1(const float x) {
return expf(x) / (1. + expf(x));
}
__global__ void logit(float* y, const float* x, int leng) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < leng) {
y[ i ] = logit1(x[ i ]);
}
} |
13,412 | #include "includes.h"
const int NUMTHREADS = 1024;
int startNodeNumber;
int endNodeNumber;
typedef struct lList {
int path[50];
struct lList *next;
} lList;
__global__ void GPUMultiplyMatrix(long *matrix1, long *matrix2, int paths, int count) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
int i;
while (paths > 0) {
long sum = 0;
int col = element % count;
int row = element / count;
for (i = 0; i < count; i++) {
sum += matrix1[count * i + col] * matrix2[row * count + i];
}
//Wait till all GPU cores are finished
__syncthreads();
matrix2[element] = sum;
paths--;
}
} |
13,413 | #include "includes.h"
__global__ void ComputePositions(float *g_Data1, float *g_Data2, float *g_Data3, int *d_Ptrs, float *d_Sift, int numPts, int maxPts, int w, int h)
{
int i = __mul24(blockIdx.x, POSBLK_SIZE) + threadIdx.x;
if (i>=numPts)
return;
int p = d_Ptrs[i];
//if (p<w+1 || p>=(w*h-w-1))
// printf("ComputePositions: read error\n");
float val[7];
val[0] = g_Data2[p];
val[1] = g_Data2[p-1];
val[2] = g_Data2[p+1];
float dx = 0.5f*(val[2] - val[1]);
float dxx = 2.0f*val[0] - val[1] - val[2];
val[3] = g_Data2[p-w];
val[4] = g_Data2[p+w];
float dy = 0.5f*(val[4] - val[3]);
float dyy = 2.0f*val[0] - val[3] - val[4];
val[5] = g_Data3[p];
val[6] = g_Data1[p];
float ds = 0.5f*(val[6] - val[5]);
float dss = 2.0f*val[0] - val[5] - val[6];
float dxy = 0.25f*
(g_Data2[p+w+1] + g_Data2[p-w-1] - g_Data2[p-w+1] - g_Data2[p+w-1]);
float dxs = 0.25f*
(g_Data3[p+1] + g_Data1[p-1] - g_Data1[p+1] - g_Data3[p-1]);
float dys = 0.25f*
(g_Data3[p+w] + g_Data1[p-w] - g_Data3[p-w] - g_Data1[p+w]);
float idxx = dyy*dss - dys*dys;
float idxy = dys*dxs - dxy*dss;
float idxs = dxy*dys - dyy*dxs;
float idyy = dxx*dss - dxs*dxs;
float idys = dxy*dxs - dxx*dys;
float idss = dxx*dyy - dxy*dxy;
float det = idxx*dxx + idxy*dxy + idxs*dxs;
float idet = 1.0f / det;
float pdx = idet*
(idxx*dx + idxy*dy + idxs*ds);
float pdy = idet*
(idxy*dx + idyy*dy + idys*ds);
float pds = idet*
(idxs*dx + idys*dy + idss*ds);
if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f){
pdx = __fdividef(dx, dxx);
pdy = __fdividef(dy, dyy);
pds = __fdividef(ds, dss);
}
float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds);
d_Sift[i+0*maxPts] = (p%w) + pdx;
d_Sift[i+1*maxPts] = (p/w) + pdy;
d_Sift[i+2*maxPts] = d_ConstantA[0] * exp2f(pds*d_ConstantB[0]);
d_Sift[i+3*maxPts] = val[0] + dval;
float tra = dxx + dyy;
det = dxx*dyy - dxy*dxy;
d_Sift[i+4*maxPts] = __fdividef(tra*tra, det);
} |
13,414 | extern "C" {
__global__ void pyrup_rgb_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = (xIndex)* aabhas + (3 * (yIndex));
const int color_tid1= (xIndex/2)* colorWidthStep + (3 * (yIndex/2));
if(yIndex >=width || xIndex>=height)
{
return;
}
if(yIndex%2==0 &&xIndex%2==0)
{
d_out[color_tid]=d_in[color_tid1];
d_out[color_tid+1]=d_in[color_tid1+1];
d_out[color_tid+2]=d_in[color_tid1+2];
}
else
{
d_out[color_tid]=0;
d_out[color_tid+1]=0;//d_in[color_tid1+1];
d_out[color_tid+2]=0;//d_in[color_tid1+2];
}
}
__global__ void pyrup_gray_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = (xIndex)* aabhas + yIndex;
const int color_tid1= (xIndex/2)* colorWidthStep + yIndex/2;
if(yIndex >=width || xIndex>=height)
{
return;
}
if(yIndex%2==0 &&xIndex%2==0)
{
d_out[color_tid]=d_in[color_tid1];
//d_out[color_tid+1]=d_in[color_tid1+1];
//d_out[color_tid+2]=d_in[color_tid1+2];
}
else
{
d_out[color_tid]=255;
//d_out[color_tid+1]=0;//d_in[color_tid1+1];
//d_out[color_tid+2]=0;//d_in[color_tid1+2];
}
}
__global__ void pyrdown_rgb_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = (xIndex)* aabhas + (3 * (yIndex));
const int color_tid1= (2*xIndex)* colorWidthStep + (3 * (2*yIndex));
if(yIndex >=width || xIndex>=height)
{
return;
}
d_out[color_tid]=d_in[color_tid1];
d_out[color_tid+1]=d_in[color_tid1+1];
d_out[color_tid+2]=d_in[color_tid1+2];
}
__global__ void pyrdown_gray_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = (xIndex)* aabhas + yIndex;
const int color_tid1= (2*xIndex)* colorWidthStep + 2*yIndex;
if(yIndex >=width || xIndex>=height)
{
return;
}
d_out[color_tid]=d_in[color_tid1];
//d_out[color_tid+1]=d_in[color_tid1+1];
//d_out[color_tid+2]=d_in[color_tid1+2];
}
} |
13,415 | #include <stdio.h>
int main(int argc, char** argv) {
// init vars
int malloc_size_bytes, num_mallocs;
// not enough args throw error
if(argc < 2){
printf("usage: %s <int malloc_size_bytes> <int number_mallocs>");
}
// take in a command line arg to set the loop count
if(argc > 2){
malloc_size_bytes = atoi(argv[1]);
num_mallocs = atoi(argv[2]);
}
// delcare two variables
int *dev_a;
// get the size of an int for the cuda malloc
int size = malloc_size_bytes;
// loop over num_mallocs
for(int i = 0; i < num_mallocs; i++){
cudaMalloc((void **)&dev_a, size);
}
return 0;
}
|
13,416 | #include <iostream>
#include <math.h>
// TODO: find proper alignment (__align__(x))
struct node {
int value;
int children[8];
int next;
};
// function to add the elements of a static array and a more dynamic tree structure
__global__
void add(int n, float* a, node* nodes, float* b) {
printf("hello world from the device!\n");
int index = threadIdx.x;
int stride = blockDim.x;
int childID = 0;
//int childID = nodes[0].children[0];
for (int i = index; i < n; i += stride)
b[i] += a[i] + nodes[childID].value;
}
int main(void) {
// 1 + 8 + 8*8 + 8*8*8
int N = 585;
//int N = 1<<20;
// create buffers
float *a, *b;
// initialize a and b arrays on the host
for (int i = 0; i < N; i++) {
a[i] = 1.0f;
b[i] = 2.0f;
}
// create node buffer
node nodes[N];
// initialize "tree" structure
// calculate the required depth for our structure first
int depth = 1;
int x = N;
while (x >= 8) {
x -= x % 8;
x /= 8;
depth++;
}
int rootnodelength = 1;
for (int d = 0; d < depth; d++) {
rootnodelength *= 8;
}
// pad rootnodelength with unused fields
rootnodelength += N % rootnodelength;
rootnodelength += 1;
for (int i = 0; i < N; i++) {
// replace depth with current depth
nodes[i].value = (100 * depth) + i;
// logarithm of x base b = log(x)/log(b)
// TODO: create some sort of magical function to get the current depth for i
// using a log base 8 function and more sik math skills
// then populate nodes and their indeces in accordance with the current depth
// for testing purposes **ONLY**
for (int j = 0; j < 8; j++) {
// set all children to node number 1 (yeah I know pretty lame)
nodes[i].children[j] = 1;
}
// also set next temporarily
nodes[i].next = 2;
}
printf("aaaaaaaaaaaaaaaa\n");
// create space in memory for a copy of nodes to be used by the device
node *nodes_d;
cudaMallocManaged(&a, N * sizeof(float));
cudaMallocManaged(&b, N * sizeof(float));
cudaMallocManaged((void**) &nodes_d, sizeof(node) * sizeof(nodes));
// copy nodes buffer to nodes_d (memory space we allocated for the device)
cudaMemcpy(nodes_d, nodes, sizeof(node) * sizeof(node), cudaMemcpyHostToDevice);
// suffering
printf("AAAAAAAAAAAA\n");
add<<<1, 256>>>(N, a, nodes, b);
// wait for gpu (blocks thread till end is signaled)
cudaDeviceSynchronize();
// even more suffering
printf("aaaaaabbbbbbbbbbbbbbbb");
/*
float max_error = 0.0f;
for (int i = 0; i < N; i++)
max_error = fmax(max_error, fabs(b[i]));
std::cout << "max error: " << max_error << std::endl;
*/
float median = 0.0f;
for (int i = 0; i < N; i++) {
median = fmax(median, fabs(b[i]));
}
std::cout << "median : " << median << "\n";
// free memory
cudaFree(a);
cudaFree(b);
cudaFree(nodes);
return 0;
}
|
13,417 |
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <cuda.h>
__global__ void getValue(float *data) {
data[0] = pow(data[1], data[2]);
data[4] = min(data[1], data[2]);
data[5] = max(data[1], data[2]);
data[6] = ::max(data[1], data[2]);
// data[7] = std::max(data[1], data[2]);
}
int main(int argc, char *argv[]) {
int N = 1024;
CUstream stream;
cuStreamCreate(&stream, 0);
float *hostFloats1;
cuMemHostAlloc((void **)&hostFloats1, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE);
CUdeviceptr deviceFloats1;
cuMemAlloc(&deviceFloats1, N * sizeof(float));
hostFloats1[0] = 0;
hostFloats1[1] = 3;
hostFloats1[2] = 4.5f;
cuMemcpyHtoDAsync(
(CUdeviceptr)(((float *)deviceFloats1)),
hostFloats1,
N * sizeof(float),
stream
);
// cuStreamSynchronize(stream);
getValue<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(((float *)deviceFloats1));
cuMemcpyDtoHAsync(hostFloats1, deviceFloats1, N * sizeof(float), stream);
cuStreamSynchronize(stream);
// and check the values...
for(int i = 0; i < 7; i++) {
cout << "hostFloats1[" << i << "]=" << hostFloats1[i] << endl;
}
// cout << hostFloats1[0] << endl;
// cout << hostFloats1[1] << endl;
// cout << hostFloats1[2] << endl;
// cout << hostFloats1[4] << endl;
// cout << hostFloats1[5] << endl;
assert(std::abs(hostFloats1[0] - 140.296) < 0.01);
assert(hostFloats1[4] == 3);
assert(hostFloats1[5] == 4.5f);
cuMemFreeHost(hostFloats1);
cuMemFree(deviceFloats1);
cuStreamDestroy(stream);
return 0;
}
|
13,418 | #include "includes.h"
__global__ void additionMatricesKernel(int* d_a, int* d_b, int* d_c) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
while (i < N) {
j = threadIdx.y + blockIdx.y * blockDim.y;
while (j < N) {
d_c[i * N + j] = d_a[i * N + j] + d_b[i * N + j];
j += blockDim.y * gridDim.y;
}
i += blockDim.x * gridDim.x;
}
} |
13,419 | #include "includes.h"
using namespace std;
const int DIMBLOCKX=32;
//DEVICE
//HOST
__global__ void kernelSum_Column_Matrix(float* matrix, float* array, int tam){
__shared__ float shareMatrix[DIMBLOCKX];
float value=0;
int col=blockIdx.x;
int step= tam/blockDim.x;
int posIni= col*tam+threadIdx.x*step;
for(int i=0;i<step;i++){
value=value+matrix[posIni+i];
}
shareMatrix[threadIdx.x]=value;
__syncthreads();
if(threadIdx.x==0){
for(int j=1;j<blockDim.x;j++){
shareMatrix[0]=shareMatrix[0]+shareMatrix[j];
}
array[blockIdx.x]=shareMatrix[0];
}
} |
13,420 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define MAXITER 1000000
#define TOLERANCE 0.000000001
#define NORM (N*N)
__global__ void diff_eq (float *A, int it, float *diff, int N) {
int j;
int i = (blockIdx.y * blockDim.y + threadIdx.y);
float old;
if((it%2) != (i%2)){
j = 2*(blockIdx.x * blockDim.x + threadIdx.x);
}else{
j = 2*(blockIdx.x * blockDim.x + threadIdx.x) + 1;
}
if((j >= N) || (i >= N)){
return;
}
old = *(A + (N+2)*(i+1) + (j+1));
*(A + (N+2)*(i+1) + (j+1)) = 0.2*(old)
+ 0.2*(*(A + (N+2)*(i+2) + (j+1)))
+ 0.2*(*(A + (N+2)*(i) + (j+1)))
+ 0.2*(*(A + (N+2)*(i+1) + (j+2)))
+ 0.2*(*(A + (N+2)*(i+1) + (j)));
atomicAdd(diff, fabs(*(A + (N+2)*(i+1) + (j+1)) - old));
return;
}
int main(void){
float *A, *diff, gpuTime;
int i, j, N, it;
cudaEvent_t start, end;
printf ("Dimensione della matrice escluse le condizioni al contorno (la matrice completa sarà qundi (N+2)*(N+2)): ");
scanf("%d", &N);
cudaMallocManaged (&A, sizeof(float) * (N+2) * (N+2));
cudaMallocManaged (&diff, sizeof(float));
cudaDeviceSynchronize ();
for(i = 1; i < (N+1); i++){
for(j = 1; j < (N+1); j++){
*(A + (N+2)*i + j) = 0;
}
}
for(i = 0; i < (N+2); i++){
*(A + (N+2)*i + 0) = i + 1;
*(A + (N+2)*i + (N+1)) = i + 1;
}
for(j = 0; j < (N+2); j++){
*(A + (N+2)*0 + j) = 1;
*(A + (N+2)*(N+1) + j) = N + 2;
}
dim3 blocksPerGrid ((int)(N/32 + 1), (int)(N/32 + 1), 1);
dim3 threadsPerBlock (16, 32, 1);
cudaEventCreate (&start);
cudaEventCreate (&end);
cudaEventRecord (start);
do{
it++;
*diff = 0;
diff_eq <<< blocksPerGrid, threadsPerBlock>>> (A, it, diff, N);
cudaDeviceSynchronize();
it++;
diff_eq <<< blocksPerGrid, threadsPerBlock>>> (A, it, diff, N);
cudaDeviceSynchronize();
}while (((*diff / NORM) > TOLERANCE) && ((it / 2) < MAXITER));
cudaEventRecord (end);
cudaDeviceSynchronize ();
cudaEventElapsedTime (&gpuTime, start, end);
if(N <= 19){
for(i = 0; i < (N+2); i++){
for(j = 0; j < (N+2); j++){
printf("%6.3f ",*(A + (N+2)*i + j));
}
printf("\n");
}
}
printf("\nIterazioni = %d\n", (int)(it / 2));
printf ("Tempo impiegato: %.2f ms\n", gpuTime);
cudaFree(A);
cudaFree(diff);
return 0;
}
|
13,421 | #include <stdio.h>
__global__ void compute_transport_props_gpu_kernel(double *vx, double *vy, double *vz, double *u, int nelt, int nxyz,int nnel,int irpu, int irpv, int irpw, int iret, int irg, int toteq,int if3d,double *vtrans, int irho, double *phig, int lx1, int ly1, int lz1, int *lglel, double *xm1, double *ym1, double *zm1, double *t,int ldimt, int npscal, double *pr, double p0th, double *sii, double *siii, double *vdiff, int ifield,char *cb, int icv, int icp, double *csound, int imu,int ilam, double cpgref, double cvgref, double gmaref, double rgasref, int *gllel, double *res2, int iknd, int inus, int lxy,int nlel ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nnel){
int e = id/nxyz;
int ieg= lglel[e];
int k = (id / (lx1*ly1))%lz1;
int j = (id/lx1)%ly1;
int newi = id % lx1;
// nekasgn
double x = xm1[e*nxyz+k*lxy+j*lx1+newi];
double y = ym1[e*nxyz+k*lxy+j*lx1+newi];
double z = zm1[e*nxyz+k*lxy+j*lx1+newi];
double r = x*x+y*y;
double theta=0.0;
if (r>0.0){ r = sqrtf(r);}
if ( x != 0.0 || y!= 0.0){theta = atan2(y,x); }
double ux= vx[e*nxyz+k*lxy+j*lx1+newi];
double uy= vy[e*nxyz+k*lxy+j*lx1+newi];
double uz= vz[e*nxyz+k*lxy+j*lx1+newi];
double temp = t [ e*nxyz+k*lxy+j*lx1+newi ];
int ips;
double ps[10]; // ps is size of ldimt which is 3. Not sure npscal is also 3. Need to check with Dr.Tania
for (ips=0;ips<npscal;ips++){
ps[ips]=t[(ips+1)*nlel+e*nxyz+k*lxy+j*lx1+newi ]; // 5 th dimension of t is idlmt which is 3. Not sure how the nekasgn access ips+1. Need to check with Dr.Tania
}
double pa = pr [e*nxyz+k*lxy+j*lx1+newi];
double p0= p0th;
double si2 = sii[e*nxyz+k*lxy+j*lx1+newi];
double si3 = siii[e*nxyz+k*lxy+j*lx1+newi];
double udiff = vdiff[(ifield-1)*nlel+e*nxyz+k*lxy+j*lx1+newi];
double utrans = vtrans[(ifield-1)*nlel+e*nxyz+k*lxy+j*lx1+newi];
char cbu1 = cb[0];
char cbu2 = cb[1];
char cbu3 = cb[2];
//cmtasgn
int eqnum;
double varsic[10];
int e_offset = toteq*nxyz;
for (eqnum=0;eqnum<toteq;eqnum++){
varsic[eqnum] = u[e*e_offset+eqnum*nxyz+k*lxy+j*lx1+newi];
}
double phi = phig[e*nxyz+k*lxy+j*lx1+newi];
double rho = vtrans[(irho-1)*nlel +e*nxyz+k*lxy+j*lx1+newi];
double pres = pr[e*nxyz+k*lxy+j*lx1+newi];
double cv=0.0,cp=0.0;
if(rho!=0){
cv=vtrans[(icv-1)*nlel +e*nxyz+k*lxy+j*lx1+newi]/rho;
cp=vtrans[(icp-1)*nlel +e*nxyz+k*lxy+j*lx1+newi]/rho;
}
double asnd = csound [e*nxyz+k*lxy+j*lx1+newi];
double mu = vdiff[(imu-1)*nlel+e*nxyz+k*lxy+j*lx1+newi];
udiff = vdiff[(imu-1)*nlel+e*nxyz+k*lxy+j*lx1+newi];// this overrides the udiff in nekasgn (line 63 in this function). Need to check withDr.Tania
double lambda = vdiff[(ilam-1)*nlel+e*nxyz+k*lxy+j*lx1+newi];
// uservp
int uservpe = gllel[ieg];
mu=rho*res2[(uservpe-1)*nxyz+k*lxy+j*lx1+newi] ;//! finite c_E;
double nu_s=0.75*mu/rho;
mu=0.5*mu ;
lambda=0.0;
udiff=0.0; // uservp makes these to zero. vdiff get zeros. Check with Dr. Tania. adeesha
utrans=0.0;
vdiff[(imu-1)*nlel +e*nxyz+k*lxy+j*lx1+newi]=mu; // vidff [ + id] is same as this. check later . adeesha
vdiff[(ilam-1)*nlel +e*nxyz+k*lxy+j*lx1+newi]=lambda;
vdiff[(iknd-1)*nlel +e*nxyz+k*lxy+j*lx1+newi]=iknd;
vdiff[(inus-1)*nlel +e*nxyz+k*lxy+j*lx1+newi]=nu_s;
}
}
extern "C" void compute_transport_props_gpu_wrapper_(int *glbblockSize1,double *d_vx, double *d_vy, double *d_vz, double *d_u,int *nelt,int *irpu, int *irpv, int *irpw, int* iret, int *irg, int *toteq, int *if3d, double *d_vtrans, int *irho, double *d_phig, int *lx1, int *ly1, int *lz1, int *d_lglel, double *d_xm1, double *d_ym1, double *d_zm1, double *d_t,int *ldimt, int *npscal, double *d_pr, double *p0th, double *d_sii, double *d_siii, double *d_vdiff, int *ifield,char *d_cb, int *icv, int *icp, double *d_csound, int *imu,int *ilam, double *cpgref, double *cvgref, double *gmaref, double *rgasref, int *d_gllel, double *d_res2, int *iknd, int *inus,int *lelt){
cudaError_t code1 = cudaPeekAtLastError();
printf("CUDA: Start compute_transport_props_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1));
printf("CUDA: Start compute_transport_props_gpu_wrapper values nelt =%d,irpu=%d,irpv=%d,irpw=%d,iret=%d,irg=%d,toteq=%d,if3d=%d,irho=%d,lx1=%d,ly1=%d,lz1=%d,ldimt=%d,npscal=%d,p0th=%lf,ifield=%d,icv=%d,icp=%d,imu=%d,ilam=%d,cpgref=%lf,cvgref=%lf,gmaref=%lf,rgasref=%lf,iknd=%d,inus=%d,lelt=%d ,\n", nelt[0],irpu[0],irpv[0],irpw[0],iret[0],irg[0],toteq[0],if3d[0],irho[0],lx1[0],ly1[0],lz1[0],ldimt[0],npscal[0],p0th[0],ifield[0],icv[0],icp[0],imu[0],ilam[0],cpgref[0],cvgref[0],gmaref[0],rgasref[0],iknd[0],inus[0],lelt[0]);
int blockSize = glbblockSize1[0], gridSize;
int lxy = lx1[0]*ly1[0];
int nxyz= lxy*lz1[0];
int nnel=nxyz*nelt[0];
int nlel= nxyz*lelt[0];
gridSize = (int)ceil((float)nnel/blockSize);
compute_transport_props_gpu_kernel<<<gridSize, blockSize>>>(d_vx, d_vy, d_vz, d_u, nelt[0], nxyz,nnel, irpu[0], irpv[0], irpw[0], iret[0],irg[0],toteq[0],if3d[0],d_vtrans, irho[0],d_phig ,lx1[0], ly1[0],lz1[0], d_lglel, d_xm1, d_ym1,d_zm1, d_t,ldimt[0], npscal[0], d_pr,p0th[0], d_sii,d_siii,d_vdiff, ifield[0],d_cb, icv[0], icp[0],d_csound,imu[0],ilam[0], cpgref[0], cvgref[0], gmaref[0], rgasref[0], d_gllel, d_res2, iknd[0], inus[0], lxy,nlel);
cudaError_t code2 = cudaPeekAtLastError();
printf("CUDA: End compute_transport_props_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2));
}
__global__ void imqqtu_gpu_kernel(double *flux, int nf, int lf, int iuj, int ium, int iup,int totthreads){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<totthreads){
flux[(iuj-1)+id] =flux[(ium-1)+id] + flux[(iup-1)+id] ; //add3
flux[(iuj-1)+id] =flux[(iuj-1)+id] * -0.5; //cmult
flux[(iuj-1)+id] = flux[(iuj-1)+id] + flux[(ium-1)+id] ;// add 2
//above 3 can be simplified iuj= (ium-ium) /2 check with Dr.Tania adeesha
}
}
extern "C" void imqqtu_gpu_wrapper_(int *glbblockSize2,double *d_flux,int *iuj,int *ium,int *iup,int *lx1, int *ly1, int *lz1, int *ldim, int *nelt, int *lelt, int *toteq){
cudaDeviceSynchronize();
cudaError_t code1 = cudaPeekAtLastError();
printf("CUDA: Start imqqtu_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1));
printf("CUDA: Start imqqtu_gpu_wrapper values iuj =%d ,ium=%d,iup=%d,lx1=%d,ly1=%d,lz1=%d,ldim=%d,nelt=%d,lelt=%d,toteq=%d\n",iuj[0],ium[0],iup[0],lx1[0],ly1[0],lz1[0],ldim[0],nelt[0],lelt[0],toteq[0]);
int nf= lx1[0]*lz1[0]*2*ldim[0]*nelt[0];
int lf= lx1[0]*lz1[0]*2*ldim[0]*lelt[0];
int totthreads = nf*toteq[0];
int blockSize = glbblockSize2[0], gridSize;
gridSize = (int)ceil((float)totthreads/blockSize);
imqqtu_gpu_kernel<<<gridSize, blockSize>>>(d_flux,nf,lf,iuj[0],ium[0],iup[0],totthreads);
cudaError_t code2 = cudaPeekAtLastError();
printf("CUDA: End imqqtu_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2));
}
__global__ void imqqtu_dirichlet_gpu_kernel(double *flux, int ntot, int ifield, int ltot, int ilam, int irho, int icv, int icp, int imu, double molmass, int iwp, int iwm, int iuj, int iux,int iuy,int iuz, int iph, int ithm, int iu1, int iu2, int iu3, int iu4, int iu5, int icvf,int toteq,int lx1,int ly1,int lz1,int lxy, int lxz,int nxyz, int lxz2ldim, int lxz2ldimlelt, int a2ldim, char *cbc, int *lglel, double *xm1,double *ym1, double *zm1, double *vx, double *vy, double *vz, double *t, double *pr, double *sii, double *siii, double *vdiff, double *vtrans, char *cb, double *u, double *phig, double *pres, double *csound,int npscal,double p0th,int e_offset,int nlel,int nqq){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
int i1 = id % lx1;
int i2 = (id/lx1)%lz1;
int iface= ((id/lxz)%a2ldim);
int e = id/lxz2ldim;
char cb1 = cbc[e*18+iface];
char cb2 = cbc[e*18+iface+1];
char cb3 = cbc[e*18+iface+2];
if(cb1!='E' && cb1!='P'){
if(cb1 =='W'|| cb1=='I'){
int ieg=lglel[e];
if(iface==0){
int iy=0;
int iz=i2;
int ix=i1;
//nekasgn
double x = xm1[e*nxyz+iz*lxy+iy*lx1+ix];
double y = ym1[e*nxyz+iz*lxy+iy*lx1+ix];
double z = zm1[e*nxyz+iz*lxy+iy*lx1+ix];
double r = x*x+y*y;
double theta=0.0;
if (r>0.0){ r = sqrtf(r);}
if ( x != 0.0 || y!= 0.0){theta = atan2(y,x); }
double ux= vx[e*nxyz+iz*lxy+iy*lx1+ix];
double uy= vy[e*nxyz+iz*lxy+iy*lx1+ix];
double uz= vz[e*nxyz+iz*lxy+iy*lx1+ix];
double temp = t [ e*nxyz+iz*lxy+iy*lx1+ix];
int ips;
double ps[10]; // ps is size of ldimt which is 3. Not sure npscal is also 3. Need to check with Dr.Tania
for (ips=0;ips<npscal;ips++){
ps[ips]=t[(ips+1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]; // 5 th dimension of t is idlmt which is 3. Not sure how the nekasgn access ips+1. Need to check with Dr.Tania
}
double pa = pr [e*nxyz+iz*lxy+iy*lx1+ix];
double p0= p0th;
double si2 = sii[e*nxyz+iz*lxy+iy*lx1+ix];
double si3 = siii[e*nxyz+iz*lxy+iy*lx1+ix];
double udiff = vdiff[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
double utrans = vtrans[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
char cbu1 = cb[0];
char cbu2 = cb[1];
char cbu3 = cb[2];
//cmtasgn
int eqnum;
double varsic[10];
for (eqnum=0;eqnum<toteq;eqnum++){
varsic[eqnum] = u[e*e_offset+eqnum*nxyz+iz*lxy+iy*lx1+ix];
}
double phi = phig[e*nxyz+iz*lxy+iy*lx1+ix];
double rho = vtrans[(irho-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix];
double pres = pr[e*nxyz+iz*lxy+iy*lx1+ix];
double cv=0.0,cp=0.0;
if(rho!=0){
cv=vtrans[(icv-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho;
cp=vtrans[(icp-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho;
}
double asnd = csound [e*nxyz+iz*lxy+iy*lx1+ix];
double mu = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
udiff = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];// this overrides the udiff in nekasgn (line 63 in this function). Need to check withDr.Tania
double lambda = vdiff[(ilam-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
// userbc
double molarmass = molmass;
int l=lx1*iz+ix; // this is the parallelized version of l = l+1 in every thread. Check with Dr.Tania . adeesha
if(fabs(vdiff[(ilam-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]) > 0.0000000001){
flux[(iwp-1)+(iux-1)*lxz2ldimlelt+id] = ux;
flux[(iwp-1)+(iuy-1)*lxz2ldimlelt+id] = uy;
flux[(iwp-1)+(iuz-1)*lxz2ldimlelt+id] = uz;
flux[(iwp-1)+(iph-1)*lxz2ldimlelt+id] = phi;
flux[(iwp-1)+(ithm-1)*lxz2ldimlelt+id] = temp;
flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id] = flux[(iwm-1)+(iu1-1)*lxz2ldimlelt+id];// can remove later and just muliitply with followings. adeesha.
flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*ux;
flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id+l] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*uy;
flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*uz;
if(cb1=='W'){
flux[(iwp-1)+(iu5-1)*lxz2ldimlelt+id] = phi*flux[(iwm-1)+(icvf-1)*lxz2ldimlelt+id]*temp+0.5/flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*( (flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id])+(flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id])+(flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id]));
}
else if(cb1=='I'){
flux[(iwp-1)+(iu5-1)*lxz2ldimlelt+id]=flux[(iwm-1)+(iu5-1)*lxz2ldimlelt+id];
}
}
else{
for(int m=0;m<nqq;m++){
flux[(iwp-1)+m*lxz2ldimlelt+id]= flux[(iwm-1)+m*lxz2ldimlelt+id];
}
}
}
if(iface==3){
int ix=0;
int iz=i2;
int iy=i1;
//nekasgn
double x = xm1[e*nxyz+iz*lxy+iy*lx1+ix];
double y = ym1[e*nxyz+iz*lxy+iy*lx1+ix];
double z = zm1[e*nxyz+iz*lxy+iy*lx1+ix];
double r = x*x+y*y;
double theta=0.0;
if (r>0.0){ r = sqrtf(r);}
if ( x != 0.0 || y!= 0.0){theta = atan2(y,x); }
double ux= vx[e*nxyz+iz*lxy+iy*lx1+ix];
double uy= vy[e*nxyz+iz*lxy+iy*lx1+ix];
double uz= vz[e*nxyz+iz*lxy+iy*lx1+ix];
double temp = t [ e*nxyz+iz*lxy+iy*lx1+ix];
int ips;
double ps[10]; // ps is size of ldimt which is 3. Not sure npscal is also 3. Need to check with Dr.Tania
for (ips=0;ips<npscal;ips++){
ps[ips]=t[(ips+1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]; // 5 th dimension of t is idlmt which is 3. Not sure how the nekasgn access ips+1. Need to check with Dr.Tania
}
double pa = pr [e*nxyz+iz*lxy+iy*lx1+ix];
double p0= p0th;
double si2 = sii[e*nxyz+iz*lxy+iy*lx1+ix];
double si3 = siii[e*nxyz+iz*lxy+iy*lx1+ix];
double udiff = vdiff[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
double utrans = vtrans[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
char cbu1 = cb[0];
char cbu2 = cb[1];
char cbu3 = cb[2];
//cmtasgn
int eqnum;
double varsic[10];
for (eqnum=0;eqnum<toteq;eqnum++){
varsic[eqnum] = u[e*e_offset+eqnum*nxyz+iz*lxy+iy*lx1+ix];
}
double phi = phig[e*nxyz+iz*lxy+iy*lx1+ix];
double rho = vtrans[(irho-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix];
double pres = pr[e*nxyz+iz*lxy+iy*lx1+ix];
double cv=0.0,cp=0.0;
if(rho!=0){
cv=vtrans[(icv-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho;
cp=vtrans[(icp-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho;
}
double asnd = csound [e*nxyz+iz*lxy+iy*lx1+ix];
double mu = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
udiff = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];// this overrides the udiff in nekasgn (line 63 in this function). Need to check withDr.Tania
double lambda = vdiff[(ilam-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
// userbc
double molarmass = molmass;
int l=ly1*iz+iy;
// this is the parallelized version of l = l+1 in every thread. Check with Dr.Tania . adeesha
if(fabs(vdiff[(ilam-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]) > 0.0000000001){
flux[(iwp-1)+(iux-1)*lxz2ldimlelt+id] = ux;
flux[(iwp-1)+(iuy-1)*lxz2ldimlelt+id] = uy;
flux[(iwp-1)+(iuz-1)*lxz2ldimlelt+id] = uz;
flux[(iwp-1)+(iph-1)*lxz2ldimlelt+id] = phi;
flux[(iwp-1)+(ithm-1)*lxz2ldimlelt+id] = temp;
flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id] = flux[(iwm-1)+(iu1-1)*lxz2ldimlelt+id];// can remove later and just muliitply with followings. adeesha.
flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*ux;
flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id+l] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*uy;
flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*uz;
if(cb1=='W'){
flux[(iwp-1)+(iu5-1)*lxz2ldimlelt+id] = phi*flux[(iwm-1)+(icvf-1)*lxz2ldimlelt+id]*temp+0.5/flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*( (flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id])+(flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id])+(flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id]));
}
else if(cb1=='I'){
flux[(iwp-1)+(iu5-1)*lxz2ldimlelt+id]=flux[(iwm-1)+(iu5-1)*lxz2ldimlelt+id];
}
}
else{
for(int m=0;m<nqq;m++){
flux[(iwp-1)+m*lxz2ldimlelt+id]= flux[(iwm-1)+m*lxz2ldimlelt+id];
}
}
}
if(iface==4){
int iz=0;
int iy=i2;
int ix=i1;
//nekasgn
double x = xm1[e*nxyz+iz*lxy+iy*lx1+ix];
double y = ym1[e*nxyz+iz*lxy+iy*lx1+ix];
double z = zm1[e*nxyz+iz*lxy+iy*lx1+ix];
double r = x*x+y*y;
double theta=0.0;
if (r>0.0){ r = sqrtf(r);}
if ( x != 0.0 || y!= 0.0){theta = atan2(y,x); }
double ux= vx[e*nxyz+iz*lxy+iy*lx1+ix];
double uy= vy[e*nxyz+iz*lxy+iy*lx1+ix];
double uz= vz[e*nxyz+iz*lxy+iy*lx1+ix];
double temp = t [ e*nxyz+iz*lxy+iy*lx1+ix];
int ips;
double ps[10]; // ps is size of ldimt which is 3. Not sure npscal is also 3. Need to check with Dr.Tania
for (ips=0;ips<npscal;ips++){
ps[ips]=t[(ips+1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]; // 5 th dimension of t is idlmt which is 3. Not sure how the nekasgn access ips+1. Need to check with Dr.Tania
}
double pa = pr [e*nxyz+iz*lxy+iy*lx1+ix];
double p0= p0th;
double si2 = sii[e*nxyz+iz*lxy+iy*lx1+ix];
double si3 = siii[e*nxyz+iz*lxy+iy*lx1+ix];
double udiff = vdiff[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
double utrans = vtrans[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
char cbu1 = cb[0];
char cbu2 = cb[1];
char cbu3 = cb[2];
//cmtasgn
int eqnum;
double varsic[10];
for (eqnum=0;eqnum<toteq;eqnum++){
varsic[eqnum] = u[e*e_offset+eqnum*nxyz+iz*lxy+iy*lx1+ix];
}
double phi = phig[e*nxyz+iz*lxy+iy*lx1+ix];
double rho = vtrans[(irho-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix];
double pres = pr[e*nxyz+iz*lxy+iy*lx1+ix];
double cv=0.0,cp=0.0;
if(rho!=0){
cv=vtrans[(icv-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho;
cp=vtrans[(icp-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho;
}
double asnd = csound [e*nxyz+iz*lxy+iy*lx1+ix];
double mu = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
udiff = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];// this overrides the udiff in nekasgn (line 63 in this function). Need to check withDr.Tania
double lambda = vdiff[(ilam-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
// userbc
double molarmass = molmass;
int l=lx1*iy+ix;// this is the parallelized version of l = l+1 in every thread. Check with Dr.Tania . adeesha
if(fabs(vdiff[(ilam-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]) > 0.0000000001){
flux[(iwp-1)+(iux-1)*lxz2ldimlelt+id] = ux;
flux[(iwp-1)+(iuy-1)*lxz2ldimlelt+id] = uy;
flux[(iwp-1)+(iuz-1)*lxz2ldimlelt+id] = uz;
flux[(iwp-1)+(iph-1)*lxz2ldimlelt+id] = phi;
flux[(iwp-1)+(ithm-1)*lxz2ldimlelt+id] = temp;
flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id] = flux[(iwm-1)+(iu1-1)*lxz2ldimlelt+id];// can remove later and just muliitply with followings. adeesha.
flux[(iwp-10)+(iu2-1)*lxz2ldimlelt+id] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*ux;
flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id+l] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*uy;
flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*uz;
if(cb1=='W'){
flux[(iwp-1)+(iu5-1)*lxz2ldimlelt+id] = phi*flux[(iwm-1)+(icvf-1)*lxz2ldimlelt+id]*temp+0.5/flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*( (flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id])+(flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id])+(flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id]));
}
else if(cb1=='I'){
flux[(iwp-1)+(iu5-1)*lxz2ldimlelt+id]=flux[(iwm-1)+(iu5-1)*lxz2ldimlelt+id];
}
}
else{
for(int m=0;m<nqq;m++){
flux[(iwp-1)+m*lxz2ldimlelt+id]= flux[(iwm-1)+m*lxz2ldimlelt+id];
}
}
}
if(iface==1){
int ix=lx1-1;
int iz=i2;
int iy=i1;
//nekasgn
double x = xm1[e*nxyz+iz*lxy+iy*lx1+ix];
double y = ym1[e*nxyz+iz*lxy+iy*lx1+ix];
double z = zm1[e*nxyz+iz*lxy+iy*lx1+ix];
double r = x*x+y*y;
double theta=0.0;
if (r>0.0){ r = sqrtf(r);}
if ( x != 0.0 || y!= 0.0){theta = atan2(y,x); }
double ux= vx[e*nxyz+iz*lxy+iy*lx1+ix];
double uy= vy[e*nxyz+iz*lxy+iy*lx1+ix];
double uz= vz[e*nxyz+iz*lxy+iy*lx1+ix];
double temp = t [ e*nxyz+iz*lxy+iy*lx1+ix];
int ips;
double ps[10]; // ps is size of ldimt which is 3. Not sure npscal is also 3. Need to check with Dr.Tania
for (ips=0;ips<npscal;ips++){
ps[ips]=t[(ips+1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]; // 5 th dimension of t is idlmt which is 3. Not sure how the nekasgn access ips+1. Need to check with Dr.Tania
}
double pa = pr [e*nxyz+iz*lxy+iy*lx1+ix];
double p0= p0th;
double si2 = sii[e*nxyz+iz*lxy+iy*lx1+ix];
double si3 = siii[e*nxyz+iz*lxy+iy*lx1+ix];
double udiff = vdiff[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
double utrans = vtrans[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
char cbu1 = cb[0];
char cbu2 = cb[1];
char cbu3 = cb[2];
//cmtasgn
int eqnum;
double varsic[10];
for (eqnum=0;eqnum<toteq;eqnum++){
varsic[eqnum] = u[e*e_offset+eqnum*nxyz+iz*lxy+iy*lx1+ix];
}
double phi = phig[e*nxyz+iz*lxy+iy*lx1+ix];
double rho = vtrans[(irho-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix];
double pres = pr[e*nxyz+iz*lxy+iy*lx1+ix];
double cv=0.0,cp=0.0;
if(rho!=0){
cv=vtrans[(icv-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho;
cp=vtrans[(icp-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho;
}
double asnd = csound [e*nxyz+iz*lxy+iy*lx1+ix];
double mu = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
udiff = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];// this overrides the udiff in nekasgn (line 63 in this function). Need to check withDr.Tania
double lambda = vdiff[(ilam-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
// userbc
double molarmass = molmass;
int l=lx1*iz+iy;// this is the parallelized version of l = l+1 in every thread. Check with Dr.Tania . adeesha
if(fabs(vdiff[(ilam-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]) > 0.0000000001){
flux[(iwp-1)+(iux-1)*lxz2ldimlelt+id] = ux;
flux[(iwp-1)+(iuy-1)*lxz2ldimlelt+id] = uy;
flux[(iwp-1)+(iuz-1)*lxz2ldimlelt+id] = uz;
flux[(iwp-1)+(iph-1)*lxz2ldimlelt+id] = phi;
flux[(iwp-1)+(ithm-1)*lxz2ldimlelt+id] = temp;
flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id] = flux[(iwm-1)+(iu1-1)*lxz2ldimlelt+id];// can remove later and just muliitply with followings. adeesha.
flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*ux;
flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id+l] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*uy;
flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*uz;
if(cb1=='W'){
flux[(iwp-1)+(iu5-1)*lxz2ldimlelt+id] = phi*flux[(iwm-1)+(icvf-1)*lxz2ldimlelt+id]*temp+0.5/flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*( (flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id])+(flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id])+(flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id]));
}
else if(cb1=='I'){
flux[(iwp-1)+(iu5-1)*lxz2ldimlelt+id]=flux[(iwm-1)+(iu5-1)*lxz2ldimlelt+id];
}
}
else{
for(int m=0;m<nqq;m++){
flux[(iwp-1)+m*lxz2ldimlelt+id]= flux[(iwm-1)+m*lxz2ldimlelt+id];
}
}
}
if(iface==2){
int iy=ly1-1;
int iz=i2;
int ix=i1;
//nekasgn
double x = xm1[e*nxyz+iz*lxy+iy*lx1+ix];
double y = ym1[e*nxyz+iz*lxy+iy*lx1+ix];
double z = zm1[e*nxyz+iz*lxy+iy*lx1+ix];
double r = x*x+y*y;
double theta=0.0;
if (r>0.0){ r = sqrtf(r);}
if ( x != 0.0 || y!= 0.0){theta = atan2(y,x); }
double ux= vx[e*nxyz+iz*lxy+iy*lx1+ix];
double uy= vy[e*nxyz+iz*lxy+iy*lx1+ix];
double uz= vz[e*nxyz+iz*lxy+iy*lx1+ix];
double temp = t [ e*nxyz+iz*lxy+iy*lx1+ix];
int ips;
double ps[10]; // ps is size of ldimt which is 3. Not sure npscal is also 3. Need to check with Dr.Tania
for (ips=0;ips<npscal;ips++){
ps[ips]=t[(ips+1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]; // 5 th dimension of t is idlmt which is 3. Not sure how the nekasgn access ips+1. Need to check with Dr.Tania
}
double pa = pr [e*nxyz+iz*lxy+iy*lx1+ix];
double p0= p0th;
double si2 = sii[e*nxyz+iz*lxy+iy*lx1+ix];
double si3 = siii[e*nxyz+iz*lxy+iy*lx1+ix];
double udiff = vdiff[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
double utrans = vtrans[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
char cbu1 = cb[0];
char cbu2 = cb[1];
char cbu3 = cb[2];
//cmtasgn
int eqnum;
double varsic[10];
for (eqnum=0;eqnum<toteq;eqnum++){
varsic[eqnum] = u[e*e_offset+eqnum*nxyz+iz*lxy+iy*lx1+ix];
}
double phi = phig[e*nxyz+iz*lxy+iy*lx1+ix];
double rho = vtrans[(irho-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix];
double pres = pr[e*nxyz+iz*lxy+iy*lx1+ix];
double cv=0.0,cp=0.0;
if(rho!=0){
cv=vtrans[(icv-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho;
cp=vtrans[(icp-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho;
}
double asnd = csound [e*nxyz+iz*lxy+iy*lx1+ix];
double mu = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
udiff = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];// this overrides the udiff in nekasgn (line 63 in this function). Need to check withDr.Tania
double lambda = vdiff[(ilam-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
// userbc
double molarmass = molmass;
int l=lx1*iz+ix;// this is the parallelized version of l = l+1 in every thread. Check with Dr.Tania . adeesha
if(fabs(vdiff[(ilam-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]) > 0.0000000001){
flux[(iwp-1)+(iux-1)*lxz2ldimlelt+id] = ux;
flux[(iwp-1)+(iuy-1)*lxz2ldimlelt+id] = uy;
flux[(iwp-1)+(iuz-1)*lxz2ldimlelt+id] = uz;
flux[(iwp-1)+(iph-1)*lxz2ldimlelt+id] = phi;
flux[(iwp-1)+(ithm-1)*lxz2ldimlelt+id] = temp;
flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id] = flux[(iwm-1)+(iu1-1)*lxz2ldimlelt+id];// can remove later and just muliitply with followings. adeesha.
flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*ux;
flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id+l] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*uy;
flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*uz;
if(cb1=='W'){
flux[(iwp-1)+(iu5-1)*lxz2ldimlelt+id] = phi*flux[(iwm-1)+(icvf-1)*lxz2ldimlelt+id]*temp+0.5/flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*( (flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id])+(flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id])+(flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id]));
}
else if(cb1=='I'){
flux[(iwp-1)+(iu5-1)*lxz2ldimlelt+id]=flux[(iwm-1)+(iu5-1)*lxz2ldimlelt+id];
}
}
else{
for(int m=0;m<nqq;m++){
flux[(iwp-1)+m*lxz2ldimlelt+id]= flux[(iwm-1)+m*lxz2ldimlelt+id];
}
}
}
if(iface==5){
int iz=lz1-1;
int iy=i2;
int ix=i1;
//nekasgn
double x = xm1[e*nxyz+iz*lxy+iy*lx1+ix];
double y = ym1[e*nxyz+iz*lxy+iy*lx1+ix];
double z = zm1[e*nxyz+iz*lxy+iy*lx1+ix];
double r = x*x+y*y;
double theta=0.0;
if (r>0.0){ r = sqrtf(r);}
if ( x != 0.0 || y!= 0.0){theta = atan2(y,x); }
double ux= vx[e*nxyz+iz*lxy+iy*lx1+ix];
double uy= vy[e*nxyz+iz*lxy+iy*lx1+ix];
double uz= vz[e*nxyz+iz*lxy+iy*lx1+ix];
double temp = t [ e*nxyz+iz*lxy+iy*lx1+ix];
int ips;
double ps[10]; // ps is size of ldimt which is 3. Not sure npscal is also 3. Need to check with Dr.Tania
for (ips=0;ips<npscal;ips++){
ps[ips]=t[(ips+1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]; // 5 th dimension of t is idlmt which is 3. Not sure how the nekasgn access ips+1. Need to check with Dr.Tania
}
double pa = pr [e*nxyz+iz*lxy+iy*lx1+ix];
double p0= p0th;
double si2 = sii[e*nxyz+iz*lxy+iy*lx1+ix];
double si3 = siii[e*nxyz+iz*lxy+iy*lx1+ix];
double udiff = vdiff[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
double utrans = vtrans[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
char cbu1 = cb[0];
char cbu2 = cb[1];
char cbu3 = cb[2];
//cmtasgn
int eqnum;
double varsic[10];
for (eqnum=0;eqnum<toteq;eqnum++){
varsic[eqnum] = u[e*e_offset+eqnum*nxyz+iz*lxy+iy*lx1+ix];
}
double phi = phig[e*nxyz+iz*lxy+iy*lx1+ix];
double rho = vtrans[(irho-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix];
double pres = pr[e*nxyz+iz*lxy+iy*lx1+ix];
double cv=0.0,cp=0.0;
if(rho!=0){
cv=vtrans[(icv-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho;
cp=vtrans[(icp-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho;
}
double asnd = csound [e*nxyz+iz*lxy+iy*lx1+ix];
double mu = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
udiff = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];// this overrides the udiff in nekasgn (line 63 in this function). Need to check withDr.Tania
double lambda = vdiff[(ilam-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];
// userbc
double molarmass = molmass;
int l=lx1*iy+ix;;// this is the parallelized version of l = l+1 in every thread. Check with Dr.Tania . adeesha
if(fabs(vdiff[(ilam-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]) > 0.0000000001){
flux[(iwp-1)+(iux-1)*lxz2ldimlelt+id] = ux;
flux[(iwp-1)+(iuy-1)*lxz2ldimlelt+id] = uy;
flux[(iwp-1)+(iuz-1)*lxz2ldimlelt+id] = uz;
flux[(iwp-1)+(iph-1)*lxz2ldimlelt+id] = phi;
flux[(iwp-1)+(ithm-1)*lxz2ldimlelt+id] = temp;
flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id] = flux[(iwm-1)+(iu1-1)*lxz2ldimlelt+id];// can remove later and just muliitply with followings. adeesha.
flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*ux;
flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id+l] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*uy;
flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id] = flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*uz;
if(cb1=='W'){
flux[(iwp-1)+(iu5-1)*lxz2ldimlelt+id] = phi*flux[(iwm-1)+(icvf-1)*lxz2ldimlelt+id]*temp+0.5/flux[(iwp-1)+(iu1-1)*lxz2ldimlelt+id]*( (flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu2-1)*lxz2ldimlelt+id])+(flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu3-1)*lxz2ldimlelt+id])+(flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id]*flux[(iwp-1)+(iu4-1)*lxz2ldimlelt+id]));
}
else if(cb1=='I'){
flux[(iwp-1)+(iu5-1)*lxz2ldimlelt+id]=flux[(iwm-1)+(iu5-1)*lxz2ldimlelt+id];
}
}
else{
for(int m=0;m<nqq;m++){
flux[(iwp-1)+m*lxz2ldimlelt+id]= flux[(iwm-1)+m*lxz2ldimlelt+id];
}
}
}
// __syncthreads();// double check whether this is needed. adeesha
int iy=0;
int iz=i2;
int ix=i1;
for(int ivar=0;ivar<toteq;ivar++){
flux[(iuj-1)+ivar*lxz2ldimlelt+id] = flux[(iwm-1)+ivar*lxz2ldimlelt+id]-flux[(iwp-1)+ivar*lxz2ldimlelt+id];
}
}
}
}
}
extern "C" void imqqtu_dirichlet_gpu_wrapper_(int *glbblockSize2,double *d_flux, int *ifield, int *ilam, int *irho, int *icv, int *icp, int *imu, double *molmass, int *iwp, int *iwm, int *iuj, int *iux,int *iuy,int *iuz, int *iph, int *ithm, int *iu1, int *iu2, int *iu3, int *iu4, int *iu5, int *icvf,int *toteq,int *lx1,int *ly1,int *lz1, char *d_cbc, int *d_lglel, double *d_xm1,double *d_ym1, double *d_zm1, double *d_vx, double *d_vy, double *d_vz, double *d_t, double *d_pr, double *d_sii, double *d_siii, double *d_vdiff, double *d_vtrans, char *d_cb, double *d_u, double *d_phig, double *d_pres, double *d_csound, int *ldim, int *lelt, int *nelt,int *npscal, double *p0th,int *nqq ){
cudaError_t code1 = cudaPeekAtLastError();
printf("CUDA: Start imqqtu_dirichlet_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1));
printf("CUDA: Start imqqtu_dirichlet_gpu_wrapper values ifield=%d,ilam=%d,irho=%d,icv=%d,icp=%d,imu=%d,molmass=%lf,iwp=%d,iwm=%d,iuj=%d,iux=%d,iuy=%d,iuz=%d,iph=%d,ithm=%d,iu1=%d,iu2=%d,iu3=%d,iu4=%d,iu5=%d,icvf=%d,toteq=%d,lx1=%d,ly1=%d,lz1=%d,ldim=%d,lelt=%d,nelt=%d,npscal=%d,p0th=%lf,nqq=%d,\n",ifield[0],ilam[0],irho[0],icv[0],icp[0],imu[0],molmass[0],iwp[0],iwm[0],iuj[0],iux[0],iuy[0],iuz[0],iph[0],ithm[0],iu1[0],iu2[0],iu3[0],iu4[0],iu5[0],icvf[0],toteq[0],lx1[0],ly1[0],lz1[0],ldim[0],lelt[0],nelt[0],npscal[0],p0th[0],nqq[0]);
int lxz2ldim = lx1[0]*lz1[0]*2*ldim[0];
int nxyz = lx1[0]*ly1[0]*lz1[0];
int lxy = lx1[0]*ly1[0];
int lxz = lx1[0]*lz1[0];
int ntot = nelt[0]*lxz2ldim;
int lxz2ldimlelt = lxz2ldim*lelt[0];
int ltot= lxz2ldim*lelt[0];
int e_offset=toteq[0]*nxyz;
int nlel= nxyz*lelt[0];
int a2ldim= 2*ldim[0];
int blockSize = glbblockSize2[0], gridSize;
gridSize = (int)ceil((float)ntot/blockSize);
imqqtu_dirichlet_gpu_kernel<<<gridSize, blockSize>>>(d_flux, ntot, ifield[0], ltot,ilam[0],irho[0],icv[0],icp[0],imu[0],molmass[0],iwp[0],iwm[0],iuj[0],iux[0],iuy[0],iuz[0],iph[0],ithm[0],iu1[0],iu2[0],iu3[0],iu4[0],iu5[0],icvf[0],toteq[0],lx1[0],ly1[0],lz1[0],lxy,lxz,nxyz,lxz2ldim,lxz2ldimlelt,a2ldim,d_cbc, d_lglel,d_xm1,d_ym1, d_zm1, d_vx,d_vy,d_vz,d_t,d_pr,d_sii,d_siii,d_vdiff,d_vtrans, d_cb,d_u, d_phig,d_pres,d_csound,npscal[0],p0th[0],e_offset,nlel,nqq[0] );
cudaError_t code2 = cudaPeekAtLastError();
printf("CUDA: End imqqtu_dirichlet_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2));
}
|
13,422 | #include <stdio.h>
int measureSimilarity(int *a, int *b, int len1, int len2)
{
if(len1 == 0 || len2 ==0)
return 0;
else if(a[0]==b[0])
return (1 + measureSimilarity(&a[1], &b[1], len1-1, len2-1));
else if(a[0]>b[0])
return (measureSimilarity(&a[1], b, len1-1, len2));
else
return (measureSimilarity(a, &b[1], len1, len2-1));
}
int measurePeriodicSimilarity(int *a, int *b, int period, int len1, int len2)
{
if(len1 == 0 || len2 ==0)
return 0;
else
return measureSimilarity(a, b, period, period) + measurePeriodicSimilarity(&a[period], &b[period], period, len1-period, len2-period);
}
/*
int main()
{
int a[20] = {30, 25, 22, 20, 20, 20, 20, 20, 17, 17, 17, 17, 17, 17, 17, 17, 15, 15, 15, 12};
int b[20] = {22, 20, 20, 20, 20, 20, 17, 17, 17, 17, 17, 17, 17, 17, 15, 15, 12, 12, 12, 12};
int c = measureSimilarity(a,b, 20, 20);
printf("Similarity : %d\n", c);
int d[40] = {30, 25, 22, 20, 20, 20, 20, 20, 17, 17, 17, 17, 17, 17, 17, 17, 15, 15, 15, 12, 30, 25, 22, 20, 20, 20, 20, 20, 17, 17, 17, 17, 17, 17, 17, 17, 15, 15, 15, 12};
int e[40] = {22, 20, 20, 20, 20, 20, 17, 17, 17, 17, 17, 17, 17, 17, 15, 15, 12, 12, 12, 12, 22, 20, 20, 20, 20, 20, 17, 17, 17, 17, 17, 17, 17, 17, 15, 15, 12, 12, 12, 12};
int g = measurePeriodicSimilarity(d,e, 20, 40, 40);
printf("Similarity : %d\n", g);
}
*/
|
13,423 | #include "includes.h"
__global__ void Mask_Subtract_Kernel( int* A, int* B, int* devOut)
{
const int idx = blockDim.x*blockIdx.x + threadIdx.x;
if( B[idx] == 0)
devOut[idx] = A[idx];
else
devOut[idx] = 0;
} |
13,424 |
#include "sql_truncate.cuh"
using namespace std;
#define invalidQuery(query) {utils::invalidQuery(query); return;}
void sql_truncate::execute(std::string &query) {
utils::toLower(query);
tokenizer t(query);
string word;
t >> word;
if(word != "truncate")
invalidQuery(query);
t >> word;
if(!utils::tableExists(word))
invalidQuery(query);
Metadata metadata(word);
metadata.rowCount = 0;
metadata.commit();
//delete files
string dataFileName;
dataFileName = utils::getDataFileName(word);
remove(dataFileName.c_str());
std::ofstream fout = ofstream(dataFileName);
fout.close();
return;
}
|
13,425 | #include "includes.h"
__global__ void transposeRow(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int row = iy * gridDim.x * blockDim.x + ix;
if (row < ny)
{
int row_start = row * nx;
int row_end = (row + 1) * nx;
int col_index = row;
for (int i = row_start; i < row_end; i++) {
out[col_index] = in[i];
col_index += nx;
}
}
} |
13,426 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <stdio.h>
#include <iostream>
using namespace std;
class Matrix
{
private:
int row, col;
public:
thrust::host_vector<thrust::host_vector<int> > v;
Matrix();
Matrix(const int r, const int c);
int randNum() { return rand() % 100; };
int rows() { return row; };
int cols() { return col; };
void show() const;
~Matrix();
};
Matrix::Matrix()
{
row = col = 0;
for (size_t i = 0; i < row; i++)
{
thrust::host_vector<int> t;
for (size_t j = 0; j < col; j++)
{
t.push_back(this->randNum());
}
v.push_back(t);
}
}
Matrix::Matrix(const int r, const int c)
{
row = r, col = c;
for (size_t i = 0; i < row; i++)
{
thrust::host_vector<int> t;
for (size_t j = 0; j < col; j++)
{
t.push_back(this->randNum());
}
v.push_back(t);
}
}
void Matrix::show() const
{
for (size_t i = 0; i < row; i++)
{
for (size_t j = 0; j < col; j++)
{
cout << v[i][j] << ", ";
}
cout << endl;
}
}
Matrix::~Matrix()
{
}
__global__ void matrixMultiply(int *d_a, size_t pitch_a, int *d_b, size_t pitch_b, int *d_c, size_t pitch_c, const int N, const int M)
{
__shared__ int input1Temp[4][3];
__shared__ int input2Temp[3][4];
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
if (row < N&&col < N)
{
// load d_a to shared memory
if (col < N - 1)
{
int *shared_a = (int *)((char *)d_a + row*pitch_a) + col;
input1Temp[row][col] = *shared_a;
__syncthreads();
printf("input1Temp[%d][%d]: %d\n", row, col, input1Temp[row][col]);
}
// load d_b to shared memory
if (row < N - 1)
{
int *shared_b = (int *)((char *)d_b + row*pitch_b) + col;
input2Temp[row][col] = *shared_b;
__syncthreads();
printf("input2Temp[%d][%d]: %d\n", row, col, input2Temp[row][col]);
}
int tmp = 0;
for (size_t i = 0; i < N; i++)
{
tmp += input1Temp[row][i] * input2Temp[i][col];
}
//shared_c[col*pitch_c + row] = tmp;
//d_c[row*pitch_c+col] = tmp;
int *shared_c = (int *)((char *)d_c + row*pitch_c) + col;
*shared_c = tmp;
}
}
__global__ void showPitch(int *a, size_t pitch, int rows, int cols)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
if (row < rows&&col < cols)
{
int *t = (int *)((char *)a + row*pitch) + col;
printf("a[%d][%d]: %d", row, col, *t);
}
}
int main()
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
const int N = 4;
const int M = 3;
// use three streams to async copy array from host to device
cudaStream_t stream_a, stream_b, stream_c;
cudaStreamCreate(&stream_a); cudaStreamCreate(&stream_b); cudaStreamCreate(&stream_c);
// allocate output array on device
static int h_c[N][N];
int *d_c;
size_t pitch_c;
cudaMallocPitch(&d_c, &pitch_c, N * sizeof(int), N);
cudaMemcpy2DAsync(d_c, pitch_c, h_c, N * sizeof(int), N * sizeof(int), N, cudaMemcpyHostToDevice, stream_c);
// allocate 2d array on device
int h_a[N][M] = { { 1,2,3 },{ 4,5,6 },{ 7,8,9 },{ 1,3,4 } };
size_t pitch_a;
int *d_a;
cudaMallocPitch(&d_a, &pitch_a, M * sizeof(int), N);
cudaMemcpy2DAsync(d_a, pitch_a, h_a, M * sizeof(int), M * sizeof(int), N, cudaMemcpyHostToDevice, stream_a);
int h_b[M][N] = { { 1,2,3,4 },{ 4,5,6,7 },{ 7,8,9,10 } };
size_t pitch_b;
int *d_b;
cudaMallocPitch(&d_b, &pitch_b, N * sizeof(int), M);
cudaMemcpy2DAsync(d_b, pitch_b, h_b, N * sizeof(int), N * sizeof(int), M, cudaMemcpyHostToDevice, stream_b);
cudaStreamSynchronize(stream_a); cudaStreamSynchronize(stream_b); cudaStreamSynchronize(stream_c);
cout << "------------------" << endl;
dim3 blockSize(1);
dim3 threadSize(N, N);
cout << threadSize.x << endl;
//showPitch <<<blockSize, threadSize>>> (d_a, pitch, N, M);
matrixMultiply <<<blockSize, threadSize>>>(d_a, pitch_a, d_b, pitch_b, d_c, pitch_c, N, M);
cudaDeviceSynchronize();
// copy result to host
cudaMemcpy2D(h_c, M * sizeof(int), d_c, pitch_c, M * sizeof(int), N, cudaMemcpyDeviceToHost);
for (size_t i = 0; i < N; i++)
{
for (size_t j = 0; j < M; j++)
{
cout << h_c[i][j] << ", ";
}
cout << endl;
}
system("pause");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaStreamDestroy(stream_a); cudaStreamDestroy(stream_b); cudaStreamDestroy(stream_c);
return 0;
} |
13,427 | #include "includes.h"
extern "C"
__global__ void updateCenters(float *centers, float *images, int *updates, int noClusters)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int imagesOffset;
int centersIndex=0;
float sum=0;
int index=0;
float weight;
float min;
int minCenterIndex=-1;
int imageSize=784;
float pImage[784];
imagesOffset = gid*imageSize;
for (index=0;index<imageSize;index++){
pImage[index]=images[imagesOffset+index];
}
min=100000000;
for(centersIndex=0;centersIndex<100;centersIndex++)
{
sum = 0;
for(index=0;index<784;index++)
{
weight = centers[centersIndex*imageSize+index]-pImage[index];
sum = sum+weight*weight;
}
if (sum<min)
{
min = sum;
minCenterIndex = centersIndex;
}
}
updates[gid]=minCenterIndex;
} |
13,428 | #include "cuda_prefix_sum.cuh"
#include <cstdio>
namespace pplanner {
__device__
void DevicePrefixSum(int thid, int *idata, int *odata, int *tmp, int n) {
int offset = 1;
int ai = thid;
int bi = thid + (n / 2);
int bank_offset_a = ConflictFreeOffset(ai);
int bank_offset_b = ConflictFreeOffset(bi);
tmp[ai + bank_offset_a] = idata[ai];
tmp[bi + bank_offset_b] = idata[bi];
for (int d = n >> 1; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
ai += ConflictFreeOffset(ai);
bi += ConflictFreeOffset(bi);
tmp[bi] += tmp[ai];
}
offset *= 2;
}
if (thid == 0) tmp[n - 1 + ConflictFreeOffset(n - 1)] = 0;
for (int d = 1; d < n; d *= 2) {
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
ai += ConflictFreeOffset(ai);
bi += ConflictFreeOffset(bi);
int t = tmp[ai];
tmp[ai] = tmp[bi];
tmp[bi] += t;
}
}
__syncthreads();
odata[ai] = tmp[ai + bank_offset_a];
odata[bi] = tmp[bi + bank_offset_b];
}
__global__ void PrefixSum(int *idata, int *odata, int n) {
extern __shared__ int tmp[];
int block_offset = blockIdx.x * blockDim.x;
DevicePrefixSum(threadIdx.x, &idata[block_offset], &odata[block_offset], tmp,
blockDim.x);
}
__global__ void AddBlockPrefixSum(int *idata, int *odata, int n) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n) odata[threadIdx.x] += idata[blockIdx.x];
}
} // namespace pplanner
|
13,429 | #include "includes.h"
__global__ void addArrayGPU(int* a, int* b, int* c) {
int i = threadIdx.x;
c[i] = a[i] + b[i];
} |
13,430 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define DEBUG
typedef unsigned long ulong;
typedef unsigned short ushort;
__device__ ushort add(long a, long b)
{
return (ushort)((a + b) % 65536l);
}
__device__ ushort multiply(long a, long b)
{
long ch, cl, c;
if (a == 0) a = 65536l;
if (b == 0) b = 65536l;
c = a * b;
if (c) {
ch = (c >> 16) & 65535l;
cl = c & 65535l;
if (cl >= ch) return (ushort) (cl - ch);
return (ushort) ((cl - ch + 65537l) & 65535l);
}
if (a == b) return 1;
return 0;
}
__global__ void IDEA_encryption(ushort *X, ushort *Y, long *K)
{
ushort a, r, t0, t1, t2;
//ushort i;
int tid = threadIdx.x;
for (r = 0; r < 8; r++) {
X[tid * 4 + 0] = multiply(X[tid * 4 + 0], K[r*6+0]);
X[tid * 4 + 3] = multiply(X[tid * 4 + 3], K[r*6+3]);
X[tid * 4 + 1] = add(X[tid * 4 + 1], K[r*6+1]);
X[tid * 4 + 2] = add(X[tid * 4 + 2], K[r*6+2]);
t0 = multiply(K[r*6+4], X[tid * 4 + 0] ^ X[tid * 4 + 2]);
t1 = multiply(K[r*6+5], add(t0, X[tid * 4 + 1] ^ X[tid * 4 + 3]));
t2 = add(t0, t1);
X[tid * 4 + 0] ^= t1;
X[tid * 4 + 3] ^= t2;
a = X[tid * 4 + 1] ^ t2;
X[tid * 4 + 1] = X[tid * 4 + 2] ^ t1;
X[tid * 4 + 2] = a;
#ifdef DEBUG
/*printf("%u ", r + 1);
for (i = 0; i < 6; i++)
printf("%hu ", (ushort) K[r*6+i]);
printf("%hu %hu %hu %hu\n", X[0], X[1], X[2], X[3]);*/
#endif
}
Y[tid * 4 + 0] = multiply(X[tid * 4 + 0], K[8*6+0]);
Y[tid * 4 + 3] = multiply(X[tid * 4 + 3], K[8*6+3]);
Y[tid * 4 + 1] = add(X[tid * 4 + 2], K[8*6+1]);
Y[tid * 4 + 2] = add(X[tid * 4 + 1], K[8*6+2]);
#ifdef DEBUG
/*printf("9 ");
for (i = 0; i < 6; i++)
printf("%hu ", (ushort) K[8*6+i]);
printf("%hu %hu %hu %hu\n", Y[0], Y[1], Y[2], Y[3]);*/
#endif
}
__device__ ushort bits_to_ushort(ushort *bits)
{
ushort i, value = bits[0];
for (i = 1; i < 16; i++)
value = (ushort) ((value << 1) + bits[i]);
return value;
}
__device__ void ushort_to_bits(ushort number, ushort *bits)
{
ushort i, temp[16];
int tid = threadIdx.x;
for (i = 0; i < 16; i++) {
temp[i] = (ushort) (number & 1);
number >>= 1;
}
/*for (i = 0; i < 16; i++)
bits[i] = temp[15 - i];*/
if(tid<16)
bits[tid] = temp[15-tid];
}
__device__ void cyclic_left_shift(ushort index, ushort *bits1,
ushort *bits2, long *K)
{
ushort i;
int tid = threadIdx.x;
if (index == 0) {
for (i = 0; i < 6; i++)
ushort_to_bits((ushort) K[0*6+i], bits1 + 16 * i);
ushort_to_bits((ushort) K[1*6+0], bits1 + 96);
ushort_to_bits((ushort) K[1*6+1], bits1 + 112);
}
/*i = 0;
for (j = 25; j < 128; j++)
bits2[i++] = bits1[j];
for (j = 0; j < 25; j++)
bits2[i++] = bits1[j];*/
if( tid < 103)
bits2[tid] = bits1[tid + 25];
else if(tid >= 103 && tid <128)
bits2[tid] = bits1[tid - 103];
switch (index) {
case 0 :
/*for (i = 2; i < 6; i++)
K[1][i] = bits_to_ushort(bits2 + 16 * (i - 2));
for (i = 0; i < 4; i++)
K[2][i] = bits_to_ushort(bits2 + 64 + 16 * i);*/
if(tid >=2 && tid < 6)
K[1*6+tid] = bits_to_ushort(bits2 + 16 * (tid - 2 ));
if(tid < 4)
K[2*6+tid] = bits_to_ushort(bits2 + 64 + 16 * tid);
break;
case 1 :
K[2*6+4] = bits_to_ushort(bits2);
K[2*6+5] = bits_to_ushort(bits2 + 16);
/*for (i = 0; i < 6; i++)
K[3][i] = bits_to_ushort(bits2 + 32 + 16 * i);*/
if(tid < 6)
K[3*6+tid] = bits_to_ushort(bits2 + 32 + 16 * tid);
break;
case 2 :
/*for (i = 0; i < 6; i++)
K[4*6+i] = bits_to_ushort(bits2 + 16 * i);*/
if(tid < 6)
K[4*6+tid] = bits_to_ushort(bits2 + 16 * tid);
K[5*6+0] = bits_to_ushort(bits2 + 96);
K[5*6+1] = bits_to_ushort(bits2 + 112);
break;
case 3 :
/*for (i = 2; i < 6; i++)
K[5][i] = bits_to_ushort(bits2 + 16 * (i - 2));
for (i = 0; i < 4; i++)
K[6][i] = bits_to_ushort(bits2 + 64 + 16 * i);*/
if(tid >=2 && tid < 6)
K[5*6+tid] = bits_to_ushort(bits2 + 16 * (tid - 2));
if(tid < 4)
K[6*6+tid] = bits_to_ushort(bits2 + 64 + 16 * tid);
break;
case 4 :
K[6*6+4] = bits_to_ushort(bits2);
K[6*6+5] = bits_to_ushort(bits2 + 16);
/*for (i = 0; i < 6; i++)
K[7][i] = bits_to_ushort(bits2 + 32 + 16 * i);*/
if(tid < 6)
K[7*6+tid] = bits_to_ushort(bits2 + 32 + 16 * tid);
break;
case 5 :
/*for (i = 0; i < 4; i++)
K[8][i] = bits_to_ushort(bits2 + 16 * i);*/
if(tid < 4)
K[8*6+tid] = bits_to_ushort(bits2 + 16 * tid);
break;
}
}
__global__ void IDEA_encryption_key_schedule(ushort *key, long *K)
{
ushort bits1[128], bits2[128];
int tid = threadIdx.x;
//for (i = 0; i < 6; i++) K[0][i] = key[i];
if(tid < 6)
K[0*6+tid] = key[tid];
if(tid == 32)
K[1*6+0] = key[6], K[1*6+1] = key[7];
cyclic_left_shift(0, bits1, bits2, K);
cyclic_left_shift(1, bits2, bits1, K);
cyclic_left_shift(2, bits1, bits2, K);
cyclic_left_shift(3, bits2, bits1, K);
cyclic_left_shift(4, bits1, bits2, K);
cyclic_left_shift(5, bits2, bits1, K);
}
__device__ void extended_euclidean(long a, long b, long *x, long *y, long *d)
{
long q, r, x1, x2, y1, y2;
if (b == 0) {
*d = a, *x = 1, *y = 0;
return;
}
x2 = 1, x1 = 0, y2 = 0, y1 = 1;
while (b > 0) {
q = a / b, r = a - q * b;
*x = x2 - q * x1;
*y = y2 - q * y1;
a = b, b = r, x2 = x1, x1 = *x, y2 = y1, y1 = *y;
}
*d = a, *x = x2, *y = y2;
}
__device__ long inv(ushort ub)
{
long d, a = 65537l, b = ub, x, y;
if (ub == 0) return 65536l;
extended_euclidean(a, b, &x, &y, &d);
if (y >= 0) return (ushort) y;
return (ushort) (y + 65537l);
}
__global__ void IDEA_decryption_key_schedule(long *K, long *L)
{
ushort r8, r9;
int tid = threadIdx.x;
if(tid == 0){
L[0*6+0] = inv((ushort) K[8*6+0]);
L[0*6+1] = - K[8*6+1];
L[0*6+2] = - K[8*6+2];
L[0*6+3] = inv((ushort) K[8*6+3]);
L[0*6+4] = K[7*6+4];
L[0*6+5] = K[7*6+5];
}
if(tid>0 && tid <8){
r9 = (ushort) (8 - tid);
r8 = (ushort) (7 - tid);
L[tid*6+0] = inv((ushort) K[r9*6+0]);
L[tid*6+1] = - K[r9*6+2];
L[tid*6+2] = - K[r9*6+1];
L[tid*6+3] = inv((ushort) K[r9*6+3]);
L[tid*6+4] = K[r8*6+4];
L[tid*6+5] = K[r8*6+5];
}
if(tid == 0){
L[8*6+0] = inv((ushort) K[0*6+0]);
L[8*6+1] = - K[0*6+1];
L[8*6+2] = - K[0*6+2];
L[8*6+3] = inv((ushort) K[0*6+3]);
L[8*6+4] = L[8*6+6] = 0;
}
}
int main(int argc, char *argv[])
{
/*int devcount;
cudaGetDeviceCount(&devcount);
printf("%d ",devcount);
for(int i=0;i<devcount;i++){
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop , i);
printf("%d ",prop.warpSize);
}*/
int tc=512;
int count;
long *K, *L;K=NULL;L=NULL;
long *dev_K , *dev_L; dev_K=NULL; dev_L = NULL;
ushort key[8] = {1, 2, 3, 4, 5, 6, 7, 8};
ushort *X, *Y;
ushort *dev_X, *dev_Y;
ushort *dev_key = NULL;
FILE *in_file = fopen("/home/pratik/input.jpg", "r");
FILE *out_file = fopen("/home/pratik/output.jpg", "w");
FILE *mid_w = fopen("/home/pratik/encrypt.jpg", "w");
if (in_file == NULL || out_file == NULL)
{
printf("Error! Could not open file\n");
exit(-1);
}
K = (long *)malloc(54 * sizeof(long ));
L = (long *)malloc(54 * sizeof(long ));
cudaMalloc((void **) &dev_key , 16);
cudaMemcpy( dev_key, key, 16, cudaMemcpyHostToDevice);
size_t pitch;
cudaMallocPitch(&dev_K, &pitch, sizeof(long)*9, 6);
cudaMallocPitch(&dev_L, &pitch, sizeof(long)*9, 6);
IDEA_encryption_key_schedule<<<1,128>>>(dev_key, dev_K);
cudaMemcpy( K , dev_K, 9*6* sizeof(long *),cudaMemcpyDeviceToHost);
X = (ushort *)malloc(tc * 4 * sizeof(ushort));
Y = (ushort *)malloc(tc * 4 * sizeof(ushort));
count = fread( X,2,tc*4,in_file);
cudaMalloc((void **) &dev_Y , tc*4*2);
cudaMalloc((void **) &dev_X , tc*4*2);
while(count==tc * 4){
cudaMemcpy( dev_X, X,tc * 8, cudaMemcpyHostToDevice);
IDEA_encryption<<<1,tc>>>(dev_X, dev_Y, dev_K);
cudaMemcpy( Y, dev_Y,tc * 8, cudaMemcpyDeviceToHost);
fwrite( Y,2,tc*4,mid_w);
count = fread( X,2,tc*4,in_file);
}
if(count < tc*4 && count > 0){
fwrite( X,2,count,mid_w);
}
fclose(mid_w);
FILE *mid_file = fopen("/home/pratik/encrypt.jpg", "r");
IDEA_decryption_key_schedule<<<1,8>>>(dev_K, dev_L);
count = fread( Y,2,tc*4,mid_file);
while(count==tc * 4){
cudaMemcpy( dev_Y, Y,tc * 8, cudaMemcpyHostToDevice);
IDEA_encryption<<<1,tc>>>(dev_Y, dev_X, dev_L);
cudaMemcpy( X, dev_X,tc * 8, cudaMemcpyDeviceToHost);
fwrite( X,2,tc*4,out_file);
count = fread( Y,2,tc*4,mid_file);
}
if(count < tc*4 && count > 0){
fwrite( Y,2,count,out_file);
}
free(K);
free(L);
return 0;
}
|
13,431 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a,int *b,int*c){
int tid = threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main(void){
int n,a[1000],b[1000],c[1000],i,size,*d_a,*d_b,*d_c;
printf("Enter no. of elements:\n");
scanf("%d",&n);
for(i=0;i<n;i++){
a[i] = i;
b[i] = i*2;
}
size = sizeof(int);
cudaMalloc((void **)&d_a,size*n);
cudaMalloc((void **)&d_b,size*n);
cudaMalloc((void **)&d_c,size*n);
cudaMemcpy(d_a,a,size*n,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size*n,cudaMemcpyHostToDevice);
add <<<1,n>>> (d_a,d_b,d_c);
cudaMemcpy(c,d_c,size*n,cudaMemcpyDeviceToHost);
for(i=0;i<n;i++)
printf("%d\t",c[i]);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
13,432 | #include "snippet_extractor.cuh"
/**
* @brief Update the samples, crossings, and frame offset.
* @param samples New data data.
* @param crossings Threshold crossing information.
* @param frame_offset New frame offset.
*/
template<class T>
void
SnippetExtractor<T>::Update(std::vector<T> &samples,
std::vector<uint8_t> &crossings,
uint64_t frame_offset) {
samples_ = std::move(samples);
crossings_ = std::move(crossings);
frame_offset_ = frame_offset;
}
/**
* @brief Extract snippets from the underlying data.
* @return Vector of snippets.
*/
template<class T>
std::vector<Snippet> SnippetExtractor<T>::ExtractSnippets() {
std::vector<Snippet> snippets;
auto n_sites_snippet = params_.extract.n_sites;
auto n_frames_snippet = params_.extract.n_frames(probe_.sample_rate());
if (n_frames() >= n_frames_snippet && crossings_.size() == samples_.size()) {
auto n_before = params_.extract.n_frames_before(probe_.sample_rate());
auto n_after = params_.extract.n_frames_after(probe_.sample_rate());
std::vector<float> snippet_buf;
for (auto site_idx = 0; site_idx < probe_.n_active(); ++site_idx) {
auto chan_idx = probe_.chan_index(site_idx);
auto neighbors = probe_.NearestNeighbors(site_idx, n_sites_snippet);
// check all crossings on this channel
for (auto frame = n_before; frame < n_frames() - n_after; ++frame) {
auto k = frame * probe_.n_total() + chan_idx;
if (!crossings_.at(k)) {
continue;
}
// found a crossing -- extract snippet in row-major order
for (auto &neighbor : neighbors) {
auto neighbor_chan_idx = probe_.chan_index(neighbor);
for (auto f = frame - n_before; f < frame + n_after + 1; ++f) {
k = f * probe_.n_total() + neighbor_chan_idx;
snippet_buf.push_back(samples_.at(k));
}
}
Snippet snippet(snippet_buf, n_frames_snippet);
snippet_buf.clear();
snippet.set_channel_ids(neighbors);
// TODO: extract spike time, not just frame offset
// e.g., frame_offset_ + k
snippet.set_frame_offset(frame_offset_);
snippets.push_back(snippet);
}
}
}
return snippets;
}
template
class SnippetExtractor<short>;
template
class SnippetExtractor<float>; |
13,433 | #include <stdio.h>
#include <cuda.h>
#define NUM_THREADS 1000000
#define BLOCK_DIM 1000
#define ARRAY_SIZE 10
#define USE_ATOMICS true
__global__ void naiveAddKernel(float* d_arr)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx = idx % ARRAY_SIZE;
d_arr[idx] += 1;
}
__global__ void atomicAddKernel(float* d_arr)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx = idx % ARRAY_SIZE;
atomicAdd(&d_arr[idx], 1);
}
int main()
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize the array
float *d_arr, *h_arr;
cudaMalloc((void **) &d_arr, ARRAY_SIZE * sizeof(float));
cudaMemset((void **) &d_arr, 0, sizeof(float));
cudaEventRecord(start);
#if USE_ATOMICS
atomicAddKernel<<<NUM_THREADS / BLOCK_DIM, BLOCK_DIM>>>(d_arr);
#else
naiveAddKernel<<<NUM_THREADS / BLOCK_DIM, BLOCK_DIM>>>(d_arr);
#endif
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Kernel execution time: %f ms\n", milliseconds);
// Copy back the results
cudaMemcpy(h_arr, d_arr, ARRAY_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < ARRAY_SIZE; i++)
{
printf("%f", h_arr[i]);
printf ((i % 4 == 3)? "\n" : "\t");
}
cudaFree(h_arr);
cudaFree(d_arr);
return 0;
} |
13,434 | __global__ void global_reduce_kernel(float * d_out, float * d_in)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
d_in[myId] += d_in[myId + s];
}
__syncthreads();
}
// thread 0 outputs
if (tid == 0) {
d_out[blockIdx.x] = d_in[myId];
}
}
__global__ void shared_reduce_kernel(float * d_out, float * d_in) {
// s data allcoated in kernel call's third argument
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared from global
sdata[tid] = d_in[myId];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[myId] += d_in[myId + s];
}
__syncthreads();
}
// thread 0 outputs
if (tid == 0) {
d_out[blockIdx.x] = sdata[0];
}
}
|
13,435 | #include <stdio.h>
// For the CUDA runtime library/routines (prefixed with "cuda_") - must include this file
#include <cuda_runtime.h>
/* CUDA Kernel Device code
* Computes the vector addition of 10 to each iteration i */
__global__ void kernelTest(int* i, int length){
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid < length)
i[tid] = i[tid] + 10;}
/* This is the main routine which declares and initializes the integer vector, moves it to the device, launches kernel
* brings the result vector back to host and dumps it on the console. */
int main(){
//declare pointer and allocate memory for host CPU variable - must use MALLOC of CudaHostAlloc here
int length = 100;
int* i = (int*)malloc(length*sizeof(int));
//fill CPU variable with values from 1 to 100 via loop
for(int x=0;x<length;x++)
i[x] = x;
//declare pointer and allocate memory for device GPU variable denoted with "_d"
int* i_d;
cudaMalloc((void**)&i_d,length*sizeof(int));
//copy contents of host CPU variable over to GPU variable on GPU device
cudaMemcpy(i_d, i, length*sizeof(int), cudaMemcpyHostToDevice);
//designate how many threads and blocks to use on GPU for CUDA function call/calculation - this depends on each device
dim3 threads; threads.x = 256;
dim3 blocks; blocks.x = (length/threads.x) + 1;
//call CUDA C function - note triple chevron here - this is CUDA syntax
kernelTest<<<threads,blocks>>>(i_d,length);
//wait for CUDA C function to finish and then copy results from GPU variable on device back over to CPU variable on host
cudaMemcpy(i, i_d, length*sizeof(int), cudaMemcpyDeviceToHost);
//print results of CPU variable to console
for(int x=0;x<length;x++)
printf("%d\t",i[x]);
//free memory for both CPU and GPU variables/pointers
free (i); cudaFree (i_d);
//reset GPU device
system("pause");
cudaDeviceReset(); }
|
13,436 | /*https://cdac.in/index.aspx?id=ev_hpc_gpu-comp-nvidia-cuda-streams#hetr-cuda-prog-cuda-streams*/
#include <stdio.h>
#include <time.h>
#include <cuda.h>
#define sizeOfArray 1024*1024 * 15
constexpr int arraySizePerCall = 1024 * 1024;
static_assert (sizeOfArray% arraySizePerCall == 0, "stream size must divide evenly into the total size");
__global__ void arrayAddition(int *device_a, int *device_b, int *device_result, int iteration)
{
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
threadId += iteration * arraySizePerCall;
if (threadId < sizeOfArray)
device_result[threadId]= device_a[threadId]+device_b[threadId];
}
/* Check for safe return of all calls to the device */
int main ( int argc, char **argv )
{
cudaDeviceProp prop;
int *host_a, *host_b, *host_result;
int *device_a, *device_b, *device_result;
int whichDevice;
cudaGetDeviceCount( &whichDevice);
cudaGetDeviceProperties( &prop, whichDevice);
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaMalloc( ( void**)& device_a, sizeOfArray * sizeof ( *device_a ) );
cudaMalloc( ( void**)& device_b,sizeOfArray * sizeof ( *device_b ) );
cudaMalloc( ( void**)& device_result, sizeOfArray * sizeof ( *device_result ) );
cudaHostAlloc((void **)&host_a, sizeOfArray*sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **)&host_b, sizeOfArray*sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **)&host_result, sizeOfArray*sizeof(int), cudaHostAllocDefault);
for(int index = 0; index < sizeOfArray; index++)
{
host_a[index] = rand()%10;
host_b[index] = rand()%10;
}
cudaEventRecord(start);
cudaMemcpyAsync(device_a, host_a,sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(device_b, host_b, sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice, stream);
/*Kernel call*/
int totalIterations = sizeOfArray / arraySizePerCall;
for (int iteration = 0; iteration < totalIterations; iteration++) {
arrayAddition <<<sizeOfArray, 1, 1, stream >>> (device_a, device_b, device_result, iteration);
int ptrOffset = iteration * arraySizePerCall;
cudaMemcpyAsync(host_result + ptrOffset, device_result + ptrOffset, arraySizePerCall * sizeof(int), cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("*********** CDAC - Tech Workshop : hyPACK-2013 \n");
printf("\n Size of array : %d \n", sizeOfArray);
printf("\n Time taken: %3.1f ms \n", elapsedTime);
for (int i = 0; i < sizeOfArray; i++) {
if (host_a[i] + host_b[i] != host_result[i]) {
printf("ERROR(%d): %d + %d = %d\n", host_a[i], host_b[i], host_result[i]);
}
}
cudaFreeHost(host_a);
cudaFreeHost(host_b);
cudaFreeHost(host_result);
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_result);
return 0;
}
|
13,437 | /**
* jrc3_cuda_rho.cu
* block loading rho calculation. should be much faster
* system('nvcc -ptx -m 64 -arch sm_35 jrc3_cuda_rho.cu')
* i1 is multiple of chunk (16)
* J. James Jun, Vidrio Technologies, LLC., 2017 Jun 11
* 7/13/17: fDc_spk option added, which uses spike-specific distance cut-off (dc)
*/
#include <cuda_runtime.h>
// #include "cublas_v2.h"
#include <math.h>
#define ABS(my_val) ((my_val) < 0) ? (-1*(my_val)) : (my_val)
#define MIN(A,B) ((A)<(B)) ? (A) : (B)
#define MAX(A,B) ((A)>(B)) ? (A) : (B)
#define NTHREADS 128
#define NC 45 //max dimm
#define CHUNK 16
#define SINGLE_INF (3.402E+38) // equipvalent to NAN. consider -1 value
/** Main entry point.
* Works out where the current thread should read/write to global memory
* and calls doIterations to do the actual work.
* Step through one B at a time
*/
__global__ void jrc3_cuda_rho(float * vrRho1, const float * mrFet12, const int * viiSpk12_ord, const int * vnConst, const float dc2){
//__global__ void jrc3_cuda_rho(int *vnRho1, int *vnComp1, float const *mrFet12, int const *viiSpk12_ord, int const *vnC4, float const dc2){
int i1 = (blockIdx.x + blockIdx.y * gridDim.x) * CHUNK; // base index of i1
int tx = threadIdx.x; //nThreads for i12 index
int i1_tx = i1+tx;
int n1 = vnConst[0];
int n12 = vnConst[1];
int nC = vnConst[2];
int dn_max = vnConst[3];
int fDc_spk = vnConst[4];
__shared__ int viiSpk1_ord_[CHUNK];
__shared__ float mrFet1_[NC][CHUNK];
__shared__ int mnRho1_[NTHREADS][CHUNK]; // count then divide later
__shared__ int mnComp1_[NTHREADS][CHUNK]; // count number of elements compared
__shared__ float vrDc1_[CHUNK]; // use if fDc_spk=1
// cache shared memory
if (tx < nC){ //use tx as iC
for (int i_c = 0; i_c < CHUNK; ++i_c){
int i1_c = i_c + i1;
if (i1_c < n1){
mrFet1_[tx][i_c] = mrFet12[tx + i1_c * nC];
}else{
mrFet1_[tx][i_c] = 0.0f;
}
}
}
if (tx < CHUNK && i1_tx < n1) viiSpk1_ord_[tx] = viiSpk12_ord[i1_tx];
for (int i_c = 0; i_c < CHUNK; ++i_c){
mnRho1_[tx][i_c] = 0; // initialize rho
mnComp1_[tx][i_c] = 0;
}
// calculate spike-specific distance cut-off vrDc1_ only if fDc_spk==1
if (tx < CHUNK && fDc_spk==1){
vrDc1_[tx] = 0.0f; //init
//for (int iC = 0; iC < 1; ++iC){ //center only scale
for (int iC = 0; iC < nC; ++iC){
float temp_ = mrFet1_[iC][tx];
vrDc1_[tx] += (temp_ * temp_);
}
vrDc1_[tx] *= dc2;
}
__syncthreads();
// Inspect distance relationship between i1 and i12_tx
for (int i12_tx = tx; i12_tx < n12; i12_tx += blockDim.x){
//for (int i12_tx = 1; i12_tx < n12; ++i12_tx){
// compute time difference
//char vlDist_c[CHUNK];
int iiSpk12_ord_tx = viiSpk12_ord[i12_tx];
/*for (int i_c = 0; i_c < CHUNK; ++i_c){
int di_spk_tx = ABS(viiSpk1_ord_[i_c] - iiSpk12_ord_tx);
vlDist_c[i_c] = (di_spk_tx <= dn_max);
} */
// compute distance
float vrDist_c[CHUNK];
for (int i_c = 0; i_c < CHUNK; ++i_c) vrDist_c[i_c] = 0.0f;
for (int iC = 0; iC < nC; ++iC){
float fet12_tx = mrFet12[iC + i12_tx * nC];
for (int i_c = 0; i_c < CHUNK; ++i_c){
float temp = fet12_tx - mrFet1_[iC][i_c];
vrDist_c[i_c] += temp * temp;
}
}
// Compare the index and distance
for (int i_c = 0; i_c < CHUNK; ++i_c){
int di_spk_tx = ABS(viiSpk1_ord_[i_c] - iiSpk12_ord_tx);
if (di_spk_tx <= dn_max){
//if (vlDist_c[i_c] == 1){
++mnComp1_[tx][i_c];
if (fDc_spk==0){
if (vrDist_c[i_c] <= dc2) ++mnRho1_[tx][i_c];
}else{
if (vrDist_c[i_c] < vrDc1_[i_c]) ++mnRho1_[tx][i_c];
}
}
}
} // while
// final count
__syncthreads();
//if (tx < CHUNK && i1_tx < n1){ // use tx as i_c
if (tx < CHUNK){ // use tx as i_c
int nRho1 = 0;
int nComp1 = 0;
for (int tx1=0; tx1<blockDim.x; ++tx1){
nRho1 += mnRho1_[tx1][tx];
nComp1 += mnComp1_[tx1][tx];
}
if (i1_tx < n1){
//if (nRho1<1) nRho1 = 1;
vrRho1[i1_tx] = (float)(((double)(nRho1)) / ((double)nComp1));
}
// vnRho1[i1 + i_c_] = nRho1 - 1;
// vnComp1[i1 + i_c_] = nComp1;
}
//vnRho1[0] = blockDim.x; //debug
//vnComp1[0] = blockDim.x; //debug
} // func |
13,438 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <unistd.h>
#define NUM_ELEMENTS 1<<20
#define BLOCK_SIZE 1024
#define CUDA_ERROR_CHECK(func) { gpuAssert((func), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void reduce(volatile bool *timeout, bool *executedBlocks, int *input, int *output) {
__shared__ unsigned int block_timeout;
/*Calculate block ID in grid */
unsigned long long int bid = blockIdx.x + gridDim.x *
(blockIdx.y + gridDim.z * blockIdx.z);
/* Copy timeout signal from host to local block variable */
if(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
block_timeout = *timeout;
}
/* Return if block was previously executed */
if(executedBlocks[bid]){
return;
}
/* Preventy any warps from proceeding until timeout is copied */
__syncthreads();
/* Return if block_timeout is true */
if(block_timeout){
return;
}
/* Mark block as executed */
executedBlocks[bid] = true;
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = input[i];
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
if (tid % (2 * s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0){
output[blockIdx.x] = sdata[0];
}
}
int main(){
size_t elems = NUM_ELEMENTS;
size_t grid_size = (size_t)(ceill((long double)elems/(long double)BLOCK_SIZE));
size_t input_size = elems * sizeof(int);
size_t output_size = grid_size * sizeof(int);
int *deviceInput = NULL;
int *deviceOutput = NULL;
int *hostInput = NULL;
int *hostOutput = NULL;
hostInput = (int *)malloc(input_size);
hostOutput = (int *)malloc(output_size);
if(hostInput == NULL){
fprintf(stderr, "Failed to allocate %zu bytes for input!\n", input_size);
exit(EXIT_FAILURE);
}
if(hostOutput == NULL){
fprintf(stderr, "Failed to allocate %zu bytes for output!\n", output_size);
exit(EXIT_FAILURE);
}
CUDA_ERROR_CHECK(cudaMalloc((void **)&deviceInput, input_size));
CUDA_ERROR_CHECK(cudaMalloc((void **)&deviceOutput, output_size));
size_t i = 0;
for(i = 0; i < elems; i++){
hostInput[i] = 1;
}
volatile bool *timeout = NULL;
bool complete = false;
bool *executedBlocks = NULL;
cudaMallocManaged((void **)&timeout, sizeof(volatile bool), cudaMemAttachGlobal);
cudaMallocManaged((void **)&executedBlocks, grid_size * sizeof(bool), cudaMemAttachGlobal);
memset(executedBlocks, 0, grid_size * sizeof(bool));
*timeout = false;
size_t interrupt_count = 0;
CUDA_ERROR_CHECK(cudaMemcpy(deviceInput, hostInput, input_size, cudaMemcpyHostToDevice));
while(!complete){
reduce<<<grid_size, BLOCK_SIZE, BLOCK_SIZE*sizeof(int)>>>(timeout, executedBlocks, deviceInput, deviceOutput);
CUDA_ERROR_CHECK(cudaPeekAtLastError());
usleep(0.001);
*timeout = true;
CUDA_ERROR_CHECK(cudaDeviceSynchronize());
/* Check if kernel is complete */
size_t i = 0;
for(i = 0; i < grid_size; i++){
if(executedBlocks[i] == false){
break;
}
}
interrupt_count++;
if(i == grid_size){
complete = true;
}else{
*timeout = false;
}
}
fprintf(stdout, "Interrupt count: %zu\n", interrupt_count);
CUDA_ERROR_CHECK(cudaMemcpy(hostOutput, deviceOutput, output_size, cudaMemcpyDeviceToHost));
for(i = 1; i < grid_size; i++){
hostOutput[0] += hostOutput[i];
}
fprintf(stdout, "Result: ");
if(hostOutput[0] == NUM_ELEMENTS){
fprintf(stdout, "PASS\n");
}else{
fprintf(stderr, "FAIL\n");
}
fprintf(stdout, "Sum = %d\n", hostOutput[0]);
free(hostInput);
free(hostOutput);
CUDA_ERROR_CHECK(cudaFree(deviceInput));
CUDA_ERROR_CHECK(cudaFree(deviceOutput));
CUDA_ERROR_CHECK(cudaFree(executedBlocks));
CUDA_ERROR_CHECK(cudaFree((void *)timeout));
CUDA_ERROR_CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
13,439 | #include <iostream>
#include <math.h>
#include <ctime>
#include <cstdlib>
#include <chrono>
#include <cuda_runtime.h>
using namespace std;
struct stats {
double mean;
double min;
double max;
double stddev;
};
// CPU function to find mean of an array
double cpu_get_mean(int n, double *x) {
double sum = 0;
for (int i = 0; i < n; i++) {
sum += x[i];
}
return sum/n;
}
// use CPU to calculate std deviation (Welford's algorithm)
double cpu_get_stddev(int n, double *x){
double mean = x[0];
double m2 = 0;
double delta;
double delta2;
for (int i = 1; i < n; i++){
delta = x[i] - mean;
mean += delta/(i+1);
delta2 = x[i] - mean;
m2 += delta * delta2;
}
return sqrt(m2/n);
}
// CPU function to find max element of an array
double cpu_get_max(int n, double *x) {
double max = x[0];
for (int i = 1; i < n; i++) {
max = (max < x[i]) ? x[i] : max;
}
return max;
}
// CPU function to find min element of an array
double cpu_get_min(int n, double *x) {
double min = x[0];
for (int i = 1; i < n; i++) {
min = (x[i] < min) ? x[i] : min;
}
return min;
}
// use CPU to calculate min, mean, max, std deviation (Welford's algorithm)
stats cpu_get_all(int n, double *x){
stats myStats;
double mean = x[0];
double min = x[0];
double max = x[0];
double m2 = 0;
double delta;
double delta2;
for (int i = 1; i < n; i++){
max = (max < x[i]) ? x[i] : max;
min = (x[i] < min) ? x[i] : min;
delta = x[i] - mean;
mean += delta/(i+1);
delta2 = x[i] - mean;
m2 += delta * delta2;
}
myStats.mean = mean;
myStats.min = min;
myStats.max = max;
myStats.stddev = sqrt(m2/n);
return myStats;
}
// Kernel function to find the maximum element of an array
__global__ void get_gpu_max(int n, double *x, double *results) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double max = x[index];
for (int i = index + stride; i < n; i += stride) {
max = (max < x[i]) ? x[i] : max;
}
results[index] = max;
}
// Kernel function to find the minimum element of an array
__global__ void get_gpu_min(int n, double *x, double *results) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double min = x[index];
for (int i = index + stride; i < n; i += stride) {
min = (x[i] < min) ? x[i] : min;
}
results[index] = min;
}
// kernel to calculate the mean on the GPU
__global__ void get_gpu_mean(int n, double *x, double *results) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double mean = x[index];
int count = 1;
for (int i = index + stride; i < n; i += stride){
count++;
mean += (x[i] - mean)/count;
}
results[index] = mean;
}
// Calculate std deviation on the GPU
__global__ void get_gpu_stddev(int n, double *x, double *results){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double mean = x[index];
double m2 = 0;
double delta;
double delta2;
int count = 1;
for (int i = index + stride; i < n; i += stride){
count++;
delta = x[i] - mean;
mean += delta/count;
delta2 = x[i] - mean;
m2 += delta * delta2;
}
results[index] = m2;
}
// caluclate all stats on the GPU
__global__ void get_gpu_all(int n, double *x, stats *all_results){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double mean = x[index];
double min = x[index];
double max = x[index];
double m2 = 0;
double delta;
double delta2;
int count = 1;
for (int i = index + stride; i < n; i += stride){
max = (max < x[i]) ? x[i] : max;
min = (x[i] < min) ? x[i] : min;
count++;
delta = x[i] - mean;
mean += delta/count;
delta2 = x[i] - mean;
m2 += delta * delta2;
}
all_results[index].mean = mean;
all_results[index].min = min;
all_results[index].max = max;
all_results[index].stddev = m2; // m2 not actually std dev
}
void print_diff(double x, double y){
cout << "Difference: " << 100*(y - x)/x << "%\n";
}
void run_tests(int N_pre, int N_BLOCKS, int THREADS_PER_BLK) {
// We need N to be a multiple of N_THREADS
int N = N_BLOCKS * THREADS_PER_BLK * floor(N_pre / (THREADS_PER_BLK * N_BLOCKS));
/**
cout << "N = " << N << endl;
cout << "N_BLOCKS = " << N_BLOCKS << endl;
cout << "THREADS_PER_BLK = " << THREADS_PER_BLK << endl;
cout << "Allocating memory and initializing...";
**/
double *x;
cudaMallocManaged(&x, N*sizeof(double));
srand(time(NULL));
for (int i = 0; i < N; i++) {
x[i] = ((double) rand()) / ((double) RAND_MAX);
}
double *results;
cudaMallocManaged(&results, N_BLOCKS*THREADS_PER_BLK*sizeof(double));
// use CPU to calculate max
auto start = std::chrono::high_resolution_clock::now();
double cpu_max = cpu_get_max(N, x);
auto end = std::chrono::high_resolution_clock::now();
auto dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start);
// cout << "CPU calculated max:" << fixed << cpu_max << endl;
// fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count());
fprintf(stdout,"%d,%d,%d,max,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count());
// use GPU to calculate max
start = std::chrono::high_resolution_clock::now();
get_gpu_max<<<N_BLOCKS, THREADS_PER_BLK>>>(N, x, results);
cudaDeviceSynchronize();
double gpu_max = results[0];
for (int i = 1; i < N_BLOCKS*THREADS_PER_BLK; i++) {
gpu_max = (gpu_max < results[i]) ? results[i] : gpu_max;
}
end = std::chrono::high_resolution_clock::now();
dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start);
//cout << "GPU calculated max:" << fixed << gpu_max << endl;
//fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count());
fprintf(stdout,"%d,%d,%d,max,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count());
//print_diff(cpu_max, gpu_max);
//cout << endl;
// use CPU to calculate min
start = std::chrono::high_resolution_clock::now();
double cpu_min = cpu_get_min(N, x);
end = std::chrono::high_resolution_clock::now();
dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start);
//cout << "CPU calculated min:" << fixed << cpu_min << endl;
//fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count());
fprintf(stdout,"%d,%d,%d,min,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count());
// use GPU to calculate min
start = std::chrono::high_resolution_clock::now();
get_gpu_min<<<N_BLOCKS, THREADS_PER_BLK>>>(N, x, results);
cudaDeviceSynchronize();
double gpu_min = results[0];
for (int i = 1; i < N_BLOCKS*THREADS_PER_BLK; i++) {
gpu_min = (results[i] < gpu_min) ? results[i] : gpu_min;
}
end = std::chrono::high_resolution_clock::now();
dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start);
//cout << "GPU calculated min:" << fixed << gpu_min << endl;
//fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count());
fprintf(stdout,"%d,%d,%d,min,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count());
//print_diff(cpu_min, gpu_min);
//cout << endl;
// use CPU to calculate mean
start = std::chrono::high_resolution_clock::now();
double cpu_mean = cpu_get_mean(N, x);
end = std::chrono::high_resolution_clock::now();
dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start);
//cout << "CPU calculated mean:" << fixed << cpu_mean << endl;
//fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count());
fprintf(stdout,"%d,%d,%d,avg,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count());
// use GPU to calculate mean
start = std::chrono::high_resolution_clock::now();
get_gpu_mean<<<N_BLOCKS, THREADS_PER_BLK>>>(N, x, results);
cudaDeviceSynchronize();
double gpu_mean_sum = 0;
for (int i = 0; i < N_BLOCKS*THREADS_PER_BLK; i++) {
gpu_mean_sum += results[i];
}
double gpu_mean = gpu_mean_sum/(N_BLOCKS*THREADS_PER_BLK);
end = std::chrono::high_resolution_clock::now();
dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start);
//cout << "GPU calculated mean:" << fixed << gpu_mean << endl;
//fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count());
fprintf(stdout,"%d,%d,%d,avg,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count());
//print_diff(cpu_mean, gpu_mean);
//cout << endl;
// use CPU to calculate std dev
start = std::chrono::high_resolution_clock::now();
double cpu_stddev = cpu_get_stddev(N, x);
end = std::chrono::high_resolution_clock::now();
dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start);
//cout << "CPU calculated std dev:" << fixed << cpu_stddev << endl;
//fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count());
fprintf(stdout,"%d,%d,%d,dev,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count());
// use GPU to calculate std dev
start = std::chrono::high_resolution_clock::now();
get_gpu_stddev<<<N_BLOCKS, THREADS_PER_BLK>>>(N, x, results);
cudaDeviceSynchronize();
double gpu_m2 = 0;
for (int i = 0; i < N_BLOCKS*THREADS_PER_BLK; i++) {
gpu_m2 += results[i];
}
double gpu_stddev = sqrt(gpu_m2/N);
end = std::chrono::high_resolution_clock::now();
dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start);
//cout << "GPU calculated std dev:" << fixed << gpu_stddev << endl;
//fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count());
fprintf(stdout,"%d,%d,%d,dev,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count());
/**
print_diff(cpu_stddev, gpu_stddev);
cout << endl;
**/
// use CPU to calculate all stats
start = std::chrono::high_resolution_clock::now();
stats my_stats = cpu_get_all(N, x);
end = std::chrono::high_resolution_clock::now();
dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start);
/**
cout << "Concurrent: CPU calculated max:" << fixed << my_stats.max << endl;
cout << "Concurrent: CPU calculated min:" << fixed << my_stats.min << endl;
cout << "Concurrent: CPU calculated mean:" << fixed << my_stats.mean << endl;
cout << "Concurrent: CPU calculated std dev:" << fixed << my_stats.stddev << endl;
fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count());
**/
fprintf(stdout,"%d,%d,%d,all,cpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count());
cudaFree(results);
// use GPU to calculate all stats
stats* all_results;
cudaMallocManaged(&all_results, N_BLOCKS*THREADS_PER_BLK*sizeof(stats));
// start the timer
start = std::chrono::high_resolution_clock::now();
// run calculations on the GPU
get_gpu_all<<<N_BLOCKS, THREADS_PER_BLK>>>(N, x, all_results);
// synchrnonize
cudaDeviceSynchronize();
// We now need to accumulate results from all threads
double m2 = all_results[0].stddev;
double mean = all_results[0].mean;
double delta;
double new_mean;
int n_a = N / (N_BLOCKS*THREADS_PER_BLK);
int n_b = n_a;
double max = all_results[0].max;
double min = all_results[0].min;
for (int i = 1; i < N_BLOCKS*THREADS_PER_BLK; i++) {
new_mean = all_results[i].mean;
delta = new_mean - mean;
// we update our running mean value
mean = (n_a*mean + n_b*new_mean)/(n_a + n_b);
m2 += all_results[i].stddev + delta * delta * n_a * n_b / (n_a + n_b);
n_a += n_b;
min = (all_results[i].min < min) ? all_results[i].min : min;
max = (all_results[i].max > max) ? all_results[i].max : max;
}
double stddev = sqrt(m2/N);
end = std::chrono::high_resolution_clock::now();
dur_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(end-start);
/**
cout << "Concurrent: GPU calculated max:" << fixed << max << endl;
cout << "Concurrent: GPU calculated min:" << fixed << min << endl;
cout << "Concurrent: GPU calculated mean:" << fixed << mean << endl;
cout << "Concurrent: GPU calculated std dev:" << fixed << stddev << endl;
fprintf(stdout, "Elapsed time %lld ns\n", dur_ns.count());
**/
fprintf(stdout,"%d,%d,%d,all,gpu,%lld\n",N,N_BLOCKS,THREADS_PER_BLK,dur_ns.count());
// Free memory
cudaFree(x);
cudaFree(all_results);
}
int main(void) {
// We want to display floats with max precision
cout.precision(17);
int Ns[] = {50000000,100000000,150000000,200000000};
int TPBs[] = {128, 256, 512, 1024};
int NBs[] = {1, 4, 16, 28};
for (int n : Ns) {
for (int threads_per_block : TPBs) {
for (int n_blocks : NBs) {
run_tests(n, n_blocks, threads_per_block);
}
}
}
}
|
13,440 | #include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<stdio.h>
#include<cmath>
//#include<time.h>
//#include<string.h>
#define CHECK(call){ \
const cudaError_t error = call; \
if (error!=cudaSuccess) { \
printf("Error:%s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
} \
void printMatrix(int *C, const int nx, const int ny) {
int *ic = C;
printf("\nMatrix: (%d.%d)\n", nx, ny);
for (int iy = 0; iy<ny; iy++) {
for (int ix = 0; ix<nx; ix++) {
printf("%3d", ic[ix]);
}
ic += nx;
printf("\n");
}
printf("\n");
return;
}
__global__ void printThreadIndex(int *A, const int nx, const int ny) {
int ix = threadIdx.x+blockIdx.x*blockDim.x;
int iy = threadIdx.y+blockIdx.y*blockDim.y;
unsigned int idx = iy*nx+ix;
printf("thread_id(%d,%d) block_id(%d,%d) coordinate(%d %d) global index %2d ival %2d\n",
threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]);
}
int main(int argc, char **argv) {
printf("%s Starting... \n", argv[0]);
//デバイス情報取得
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
//行列の次元を設定
int nx = 8;
int ny = 6;
int nxy = nx*ny;
int nBytes = nxy*sizeof(float);
//ホストメモリ確保
int *h_A;
h_A = (int *)malloc(nBytes);
//ホスト行列を整数で初期化
for (int i = 0; i<nxy; i++) {
h_A[i] = i;
}
printMatrix(h_A, nx, ny);
//デバイスメモリを確保
int *d_MatA;
CHECK(cudaMalloc((void **)&d_MatA,nBytes));
//ホストからデバイスへデータを転送
CHECK(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice));
//実行設定をセットアップ
dim3 block(4, 2);
dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y);
//カーネルを呼び出す
printThreadIndex<<<grid, block>>>(d_MatA, nx, ny);
CHECK(cudaDeviceSynchronize());
//ホストとデバイスのメモリを解放
CHECK(cudaFree(d_MatA));
free(h_A);
//デバイスをリセット
CHECK(cudaDeviceReset());
return 0;
}
|
13,441 | #include "includes.h"
__global__ void expKernel(float* Z, float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = __expf(-Z[id]);
}
} |
13,442 | #include "includes.h"
__global__ void update_wout(double * weights_out_d, double *weights_out_delta_d, int bit){
//__shared__ double weights_out_delta_ds[10 * 55];
int tix = threadIdx.x;
int tiy = threadIdx.y;
int offset = OUTPUTS * H_HEIGHT;
//weights_out_delta_ds[tiy*offset+tix] = weights_out_delta_d[tiy*offset+tix];
for(int s=32; s > 0; s>>=1){
//int index = 2 * s * tiy;
if(tiy < s && (tiy+s) < blockDim.y)
weights_out_delta_d[tiy*offset+tix] += weights_out_delta_d[(tiy+s)*offset+tix];
__syncthreads();
}
if(tiy == 0){
weights_out_d[tix] -= (alpha_d * weights_out_delta_d[tix] / (true_sample*55.0));
}
__syncthreads();
weights_out_delta_d[tiy*offset+tix] = 0.0;
} |
13,443 | #include "includes.h"
#define THREADS_PER_BLOCK 256
#define CHUNK_SIZE 16
// flag if the prng has been seeded
int randNotSeeded = 1;
// tests the gpu merge sort
__device__ void insertionSort(int *array, int a, int b)
{
int current;
for (int i = a + 1; i < b; i++)
{
current = array[i];
for (int j = i - 1; j >= a - 1; j--)
{
if (j == a - 1 || current > array[j])
{
array[j + 1] = current;
break;
}
else
{
array[j + 1] = array[j];
}
}
}
}
__global__ void gpu_sort(int *d_array, int size, int chunkSize)
{
// Figure out left and right for this thread
int a = (threadIdx.x + blockDim.x * blockIdx.x) * chunkSize;
if (a >= size) return;
int b = a + chunkSize;
if (b > size) b = size;
insertionSort(d_array, a, b);
} |
13,444 | #include <cuda.h>
#include <iostream>
#include <ostream>
#include <fstream>
#include <sys/time.h>
#include <time.h>
using namespace std;
#define CASENAME "test_new"
#define BLOCKSIZEX 64
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 48
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define XDIM 64
#define YDIM 64
#define ZDIM 8
#define TMAX 1000
#define STARTF 0
#define OBSTR1 4.f
#define OBSTX1 23.5f
#define OBSTY1 23.5f
#define OBSTZ1 32.5f
#define OBSTR2 32.f
#define OBSTX2 319.5f
#define OBSTY2 511.5f
#define OBSTZ2 31.5f
#define LRFACTOR 0.5f
#define LRLEVEL 2
#define LRX0 11.75f //minimum x coord of LR
#define XLRDIM 48 //number of nodes in x
#define LRY0 11.75f
#define YLRDIM 64
#define LRZ0 -0.25f
#define ZLRDIM 8
#define RE 20.f//2000.f//100.f;
#define UMAX 0.08f
#define SmagLES 0 //YES,NO
#define MODEL "MRT" //BGK,MRT,STREAM
#define REFINEMENT 0 //1,0
#define CS 0.1f
#define VELAV 0
#define START_VELAV 400000
#define START_VELFLUC 700000
inline __device__ int ImageFcn(float x, float y, float z){
int value = 0;
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
return value;
}
inline __device__ int ImageFcn(int x, int y, int z){
int value = 0;
//Cylinder
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
//Lid Driven Cavity
// if(y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1)
// value = 1;
// else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2)
// return 1;
// else if(x == 0)
// return 1;
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
// if(z == 1)
// value = 1;
// if(z == ZDIM-2)
// value = 1;
else if(y == 0)
value = 200;//22;
else if(y == YDIM-1)
value = 100;
else if(x == 0)
value = 26;
else if(x == XDIM-1)
value = 25;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011,
float v100, float v101, float v110, float v111, float x, float y, float z){
return v000*(1.f-x)*(1.f-y)*(1.f-z)+
v001*( x)*(1.f-y)*(1.f-z)+
v010*(1.f-x)*( y)*(1.f-z)+
v011*( x)*( y)*(1.f-z)+
v100*(1.f-x)*(1.f-y)*( z)+
v101*( x)*(1.f-y)*( z)+
v110*(1.f-x)*( y)*( z)+
v111*( x)*( y)*( z);
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YDIM*(zInner));
return index;
}
inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM*(zInner));
return index;
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YDIM;
index = dmax(index);
index = dmin(index,19*pitch*YDIM);
return index;
}
inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YLRDIM;
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM);
return index;
}
inline __device__ void mrt_meq(float* meq, float rho, float u, float v, float w)
{
meq[ 0] = rho;
meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w);
meq[ 2] = 7.53968254f*(u*u+v*v+w*w);;
meq[ 3] = u;
meq[ 4] = -0.666666667f*u;
meq[ 5] = v;
meq[ 6] = -0.666666667f*v;
meq[ 7] = w;
meq[ 8] = -0.666666667f*w;
meq[ 9] = 2.f*u*u-(v*v+w*w);
meq[11] = v*v-w*w;
}
inline __device__ void InvertMoments(float* f, float* m)
{
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void mrt_collide(float* f, float omega)
{
float m[19];
float u,v,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float rho = f[ 0]+f[ 1]+f[ 2]+f[ 3]+f[ 4]+f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[ 9]+
f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = 19.f*(-f[ 0]+ f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18] -(u*u+v*v+w*w));//+8.f*(f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18]);
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18] +7.53968254f*(u*u+v*v+w*w);
m[ 4] = 1.666666667f*(-3.f*f[1]+3.f*f[ 3]+u);
m[ 6] = 1.666666667f*(-3.f*f[2]+3.f*f[ 4]+v);
m[ 8] = 1.666666667f*(-3.f*f[9]+3.f*f[14]+w);
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+- f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18] -(2.f*u*u-(v*v+w*w));
m[10] = 0.f;
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+-f[10] +-f[12] +- f[14]+-f[15] +-f[17] -(v*v-w*w);
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+-f[10] +-f[12] + 2.f*f[14]+-f[15] +-f[17] ;
m[13] = f[ 5]+-f[ 6]+ f[ 7]+-f[ 8] -u*v;
m[14] = f[11] +- f[13] + - f[16] + f[18] -v*w;
m[15] = f[10] + - f[12] +-f[15] + f[17] -u*w;
m[16] = f[ 5]+-f[ 6]+-f[ 7]+ f[ 8] -f[10] + f[12] +-f[15] + f[17] ;
m[17] = -f[ 5]+-f[ 6]+ f[ 7]+ f[ 8] + f[11] +- f[13] + f[16] +- f[18];
m[18] = f[10]+- f[11]+ f[12]+- f[13] +-f[15]+ f[16]+-f[17]+ f[18];
f[ 0] -=- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]);
f[ 1] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]);
f[ 2] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]);
f[ 3] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]);
f[ 4] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]);
f[ 5] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])));
f[ 6] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])));
f[ 7] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])));
f[ 8] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])));
f[ 9] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]);
f[10]-= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])));
f[11]-= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])));
f[12]-= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])));
f[13]-= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])));
f[14]-=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]);
f[15]-= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])));
f[16]-= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])));
f[17]-= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])));
f[18]-= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])));
}
inline __device__ void North_Extrap(float* f, float rho)
{
float m[19];
rho = 1.0f;
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void South_Extrap(float* f, float v)
{
float m[19];
float u = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
__device__ void xsymmetry_bot(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13]=f[18];
f[11]=f[18];
f[16]=f[18];
f[ 6] =f[ 7];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == 0 && z == ZDIM-1){
f[ 4] = f[ 2];
f[11]=f[13];
f[18]=f[13];
f[16]=f[13];
f[ 6] =f[ 7];
f[14]=f[ 9];
f[17]=f[12];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[11]=f[16];
f[18]=f[16];
f[13]=f[16];
f[ 7] =f[ 6];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[16]=f[11];
f[18]=f[11];
f[13]=f[11];
f[ 7] =f[ 6];
f[14]=f[ 9];
f[17]=f[12];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11]=f[13];
f[16]=f[18];
f[ 8] = f[ 5];
}
else if(y == YDIM-1){
f[ 4]=f[ 2] ;
f[13]=f[11];
f[18]=f[16];
f[ 5]=f[ 8] ;
}
}
f[ 1] = f[ 3] ;
f[ 5] = f[ 6] ;
f[ 8] = f[ 7] ;
f[10]= f[12];
f[15]= f[17];
}
__device__ void xsymmetry_top(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13] = f[18];
f[11] = f[18];
f[16] = f[18];
f[ 5] = f[ 8];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == 0 && z == ZDIM-1){
f[ 2] = f[ 4];
f[11] = f[13];
f[18] = f[13];
f[16] = f[13];
f[ 5] = f[ 8];
f[14] = f[ 9];
f[15] = f[10];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[18] = f[16];
f[11] = f[16];
f[13] = f[16];
f[ 8] = f[ 5];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[16] = f[11];
f[18] = f[11];
f[ 8] = f[ 5];
f[14] = f[ 9];
f[15] = f[10];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11] = f[13];
f[16] = f[18];
f[ 5] = f[ 8];
}
else if(y == YDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[18] = f[16];
f[ 8] = f[ 5];
}
}
f[ 3] = f[ 1] ;
f[ 6] = f[ 5] ;
f[ 7] = f[ 8] ;
f[12]= f[10];
f[17]= f[15];
}
inline __device__ void vel_av(float* f, float& uAv, float& vAv, int t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1);
vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1);
}
inline __device__ void vel_avLR(float* f, float& uAv, float& vAv, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
}
inline __device__ void vel_fluc(float* f, float& uAv,
float& vAv, float& ufluc, float& vfluc, int t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1);
vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1);
}
inline __device__ void vel_flucLR(float* f, float& uAv,
float& vAv, float& ufluc, float& vfluc, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
}
__global__ void initialize(float *fout, size_t pitch, int zInner, int GPU_N, int level)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*ZDIM;
if(level > 0){
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0+z*LRFACTOR;
}
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f[19] = {0};
float m[19] = {0};
int im = ImageFcn(xcoord,ycoord,zcoord);
float u,v,w,rho;
rho = 1.f;
u = 0.01f;
v = UMAX;
w = 0.0;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
mrt_meq(m,rho,u,v,w);
InvertMoments(f,m);
for(int i = 0; i<19; i++)
fout[j+i *pitch*YDIM*zInner]=f[ i];
}
__global__ void update_top(float* hB, float* hA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f[19];
f[0 ]= hA [j];
f[1 ]= hA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= hA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= hA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= hA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= hA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= hA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= hA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= hA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= fA [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f[10]= fA [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f[11]= fA [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f[12]= fA [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f[13]= fA [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f[14]= temp[buff_mem(14,x ,y ,pitch)];
f[15]= temp[buff_mem(15,x-1,y ,pitch)];
f[16]= temp[buff_mem(16,x ,y-1,pitch)];
f[17]= temp[buff_mem(17,x+1,y ,pitch)];
f[18]= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_mem(0 ,x,y,pitch)] = f[0 ];
hB[buff_mem(1 ,x,y,pitch)] = f[3 ];
hB[buff_mem(2 ,x,y,pitch)] = f[4 ];
hB[buff_mem(3 ,x,y,pitch)] = f[1 ];
hB[buff_mem(4 ,x,y,pitch)] = f[2 ];
hB[buff_mem(5 ,x,y,pitch)] = f[7 ];
hB[buff_mem(6 ,x,y,pitch)] = f[8 ];
hB[buff_mem(7 ,x,y,pitch)] = f[5 ];
hB[buff_mem(8 ,x,y,pitch)] = f[6 ];
hB[buff_mem(9 ,x,y,pitch)] = f[14];
hB[buff_mem(10,x,y,pitch)] = f[17];
hB[buff_mem(11,x,y,pitch)] = f[18];
hB[buff_mem(12,x,y,pitch)] = f[15];
hB[buff_mem(13,x,y,pitch)] = f[16];
hB[buff_mem(14,x,y,pitch)] = f[9 ];
hB[buff_mem(15,x,y,pitch)] = f[12];
hB[buff_mem(16,x,y,pitch)] = f[13];
hB[buff_mem(17,x,y,pitch)] = f[10];
hB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y+1,pitch)];
South_Extrap(f,UMAX);
}
if(im == 25)
xsymmetry_top(f,y,(GPU+1)*(zInner+2)-1);
if(im == 26)
xsymmetry_bot(f,y,(GPU+1)*(zInner+2)-1);
mrt_collide(f,omega);
for(int i = 0; i<19; i++)
hB[buff_mem(i ,x,y,pitch)] = f[i ];
}
}
__global__ void update_bot(float* gB, float* gA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f[19];
f[0 ]= gA [j];
f[1 ]= gA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= gA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= gA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= gA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= gA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= gA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= gA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= gA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= temp[buff_mem(9 ,x ,y ,pitch)];
f[10]= temp[buff_mem(10,x-1,y ,pitch)];
f[11]= temp[buff_mem(11,x ,y-1,pitch)];
f[12]= temp[buff_mem(12,x+1,y ,pitch)];
f[13]= temp[buff_mem(13,x ,y+1,pitch)];
f[14]= fA [f_mem (14,x ,y ,0,pitch, zInner)];
f[15]= fA [f_mem (15,x-1,y ,0,pitch, zInner)];
f[16]= fA [f_mem (16,x ,y-1,0,pitch, zInner)];
f[17]= fA [f_mem (17,x+1,y ,0,pitch, zInner)];
f[18]= fA [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_mem(0 ,x,y,pitch)] = f[0 ];
gB[buff_mem(1 ,x,y,pitch)] = f[3 ];
gB[buff_mem(2 ,x,y,pitch)] = f[4 ];
gB[buff_mem(3 ,x,y,pitch)] = f[1 ];
gB[buff_mem(4 ,x,y,pitch)] = f[2 ];
gB[buff_mem(5 ,x,y,pitch)] = f[7 ];
gB[buff_mem(6 ,x,y,pitch)] = f[8 ];
gB[buff_mem(7 ,x,y,pitch)] = f[5 ];
gB[buff_mem(8 ,x,y,pitch)] = f[6 ];
gB[buff_mem(9 ,x,y,pitch)] = f[14];
gB[buff_mem(10,x,y,pitch)] = f[17];
gB[buff_mem(11,x,y,pitch)] = f[18];
gB[buff_mem(12,x,y,pitch)] = f[15];
gB[buff_mem(13,x,y,pitch)] = f[16];
gB[buff_mem(14,x,y,pitch)] = f[9 ];
gB[buff_mem(15,x,y,pitch)] = f[12];
gB[buff_mem(16,x,y,pitch)] = f[13];
gB[buff_mem(17,x,y,pitch)] = f[10];
gB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y+1,pitch)];
South_Extrap(f,UMAX);
}
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2));
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2));
mrt_collide(f,omega);
for(int i = 0; i<19; i++)
gB[buff_mem(i ,x,y,pitch)] = f[i ];
}
}
__global__ void update_inn(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float **velAv, float ** velFluc, int t)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z);
float f[19];
f[ 0] = fA[j];
f[ 1] = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f[ 3] = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f[ 2] = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f[ 5] = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f[ 6] = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f[ 4] = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f[ 7] = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f[ 8] = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f[ 9] = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f[10]= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f[11]= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f[12]= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f[13]= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f[14]= h [buff_mem(14,x ,y ,pitch)];
f[15]= h [buff_mem(15,x-1,y ,pitch)];
f[16]= h [buff_mem(16,x ,y-1,pitch)];
f[17]= h [buff_mem(17,x+1,y ,pitch)];
f[18]= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f[ 9] =g [buff_mem(9 ,x ,y ,pitch)];
f[10]= g [buff_mem(10,x-1,y ,pitch)];
f[11]= g [buff_mem(11,x ,y-1,pitch)];
f[12]= g [buff_mem(12,x+1,y ,pitch)];
f[13]= g [buff_mem(13,x ,y+1,pitch)];
f[14]= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f[15]= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f[16]= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f[17]= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f[18]= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f[ 9] = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f[10]= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f[11]= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f[12]= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f[13]= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f[14]= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f[15]= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f[16]= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f[17]= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f[18]= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f[ 3] ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f[ 4] ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f[ 1] ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f[ 2] ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f[ 7] ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f[ 8] ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f[ 5] ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f[ 6] ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f[14];
fB[f_mem(10,x,y,z,pitch,zInner)] = f[17];
fB[f_mem(11,x,y,z,pitch,zInner)] = f[18];
fB[f_mem(12,x,y,z,pitch,zInner)] = f[15];
fB[f_mem(13,x,y,z,pitch,zInner)] = f[16];
fB[f_mem(14,x,y,z,pitch,zInner)] = f[ 9] ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f[12];
fB[f_mem(16,x,y,z,pitch,zInner)] = f[13];
fB[f_mem(17,x,y,z,pitch,zInner)] = f[10];
fB[f_mem(18,x,y,z,pitch,zInner)] = f[11];
}
else{
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)];
North_Extrap(f,1.0f);
}
if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y+1,z,pitch,zInner)];
South_Extrap(f,UMAX);
}
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2)+1+z);
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2)+1+z);
mrt_collide(f,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = velAv[0][x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv[1][x+y*pitch+(z+1)*pitch*YDIM];
vel_av(f,u_Av,v_Av,t);
velAv[0][x+y*pitch+(z+1)*pitch*YDIM] = u_Av;
velAv[1][x+y*pitch+(z+1)*pitch*YDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = velAv[0][x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv[1][x+y*pitch+(z+1)*pitch*YDIM];
float u_fluc = velFluc[0][x+y*pitch+(z+1)*pitch*YDIM];
float v_fluc = velFluc[1][x+y*pitch+(z+1)*pitch*YDIM];
vel_fluc(f,u_Av,v_Av,u_fluc,v_fluc,t);
velFluc[0][x+y*pitch+(z+1)*pitch*YDIM] = u_fluc;
velFluc[1][x+y*pitch+(z+1)*pitch*YDIM] = v_fluc;
}
}
for(int i = 0; i<19; i++)
fB[f_mem(i ,x,y,z,pitch,zInner)] = f[ i] ;
}
// for(int i = 0; i<19; i++)
// fB[f_mem(i ,x,y,z,pitch,zInner)] = 1.f ;
}
void WriteResults(ostream &output, float *fin, float *gin, float *hin, float **velAv,
float **velFluc, float omega, int GPU_N, int GPU)
{
float f[19];
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM/GPU_N<<"\n";
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*GPU)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM]<<","<<velAv[1][i+j*XDIM]<<", "<<velFluc[0][i+j*XDIM]<<","<<velFluc[1][i+j*XDIM]<<endl;
}}
for(int k = 1; k<ZDIM/GPU_N-1; k++){
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XDIM)+(k-1)*XDIM*YDIM+l*XDIM*YDIM*(ZDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
int z = (ZDIM/GPU_N*GPU+k);
output<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", "
<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<endl;
}}}
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*(GPU+1)-1)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<", "
<<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<endl;
}}
}
void WriteInputs(ostream &output, float omega, float omegaLR, int GPU_per_node)
{
output<<"Base domain size \t"<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
output<<"Base blocksize: \t"<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
output<<"Obst1 location: \t("<<OBSTX1<<","<<OBSTY1<<","<<OBSTZ1<<")"<<endl;
output<<"Obst1 radius: \t"<<OBSTR1<<endl;
output<<"Obst2 location: \t("<<OBSTX2<<","<<OBSTY2<<","<<OBSTZ2<<")"<<endl;
output<<"Obst2 radius: \t"<<OBSTR2<<endl;
output<<"RE: \t"<<RE<<endl;
output<<"UMAX: \t"<<UMAX<<endl;
output<<"omega \t: "<<omega<<endl;
output<<"TMAX: \t"<<TMAX<<endl;
output<<"STARTF: \t"<<STARTF<<endl;
output<<"START_VELAV: \t"<<START_VELAV<<endl;
output<<"START_VELFLUC: \t"<<START_VELFLUC<<endl;
output<<"REFINEMENT: \t"<<REFINEMENT<<endl;
output<<"MODEL: \t"<<MODEL<<endl;
output<<"Smagorinski LES: \t"<<SmagLES<<endl;
output<<"CS: \t"<<CS<<endl;
output<<"LR domain size \t"<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
output<<"LR factor \t"<<LRFACTOR<<endl;
output<<"LR location \t"<<LRX0<<"x"<<LRY0<<"x"<<LRZ0<<endl;
output<<"LR blocksize: \t"<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl;
output<<"omega in LR \t: "<<omegaLR<<endl;
output<<"GPUs per node \t: "<<GPU_per_node<<endl;
}
int main(int argc, char *argv[])
{
int GPU_N;
cudaGetDeviceCount(&GPU_N);
cout<<"number of GPUs: "<<GPU_N<<endl;
ofstream output;
ofstream outputForce;
ofstream outputInputs;
string FileName = CASENAME;
output.open ((FileName+".dat").c_str());
outputForce.open ((FileName+".force").c_str());
outputInputs.open ((FileName+".inputs").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch *= sizeof(float);//pitch*sizeof(float);
size_t pitch_e = pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
float CharLength = OBSTR1*2.f;
float omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
if(LRFACTOR == 0.25f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR);
float SF_fc = 1.f/SF_cf;
WriteInputs(outputInputs,omega,omegaLR,GPU_N);
WriteInputs(cout,omega,omegaLR,GPU_N);
if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f && REFINEMENT == 1){
cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl;
return 0;
}
int zInner = ZDIM/GPU_N-2; //excluding halo
int ForceTime = max(0,TMAX-STARTF);
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
cudaStream_t stream_halo[GPU_N];
cudaStream_t stream_inner[GPU_N];
//data pointers as 3D array (GPUxCoord)
float *f_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N];
float *f_d[GPU_N][2], *g_d[GPU_N][2], *h_d[GPU_N][2];
float *g_temp[GPU_N], *h_temp[GPU_N];
float *F_h[GPU_N][3];
float *F_d[GPU_N][3];
float *F_total[3];
float *velAv_h[GPU_N][3],*velFluc_h[GPU_N][3];
float *velAv_d[GPU_N][3],*velFluc_d[GPU_N][3];
for(int i = 0; i<3; i++)
F_total[i] = (float *)malloc(ForceTime*sizeof(float));
for(int i=0;i<3;i++)
for(int j=0;j<(ForceTime);j++)
F_total[i][j] = 0;
//Malloc and Initialize for each GPU
for(int n = 0; n<GPU_N; n++){
f_h [n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float));
g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
F_h [n][i] = (float *)malloc(ForceTime*sizeof(float));
velAv_h [n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
velFluc_h[n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
}
cudaSetDevice(n);
cudaStreamCreate(&stream_halo[n]);
cudaStreamCreate(&stream_inner[n]);
for(int m = 0; m<GPU_N; m++)
if(m != n) cudaDeviceEnablePeerAccess(m,0);
for(int i = 0; i<2; i++){
cudaMalloc((void **) &f_d[n][i], pitch_e*YDIM*zInner*19*sizeof(float));
cudaMalloc((void **) &g_d[n][i], pitch_e*YDIM* 19*sizeof(float));
cudaMalloc((void **) &h_d[n][i], pitch_e*YDIM* 19*sizeof(float));
}
cudaMalloc((void **) & g_temp[n], pitch_e*YDIM* 19*sizeof(float));
cudaMalloc((void **) & h_temp[n], pitch_e*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
cudaMalloc((void **) & F_d [n][i], (ForceTime)*sizeof(float));
cudaMalloc((void **) & velAv_d [n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & velFluc_d[n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
}
//initialize host f_inner
for (int i = 0; i < XDIM*YDIM*zInner*19; i++)
f_h[n][i] = 0;
//initialize host g,h
for (int i = 0; i < XDIM*YDIM*19; i++){
g_h[n][i] = 0;
h_h[n][i] = 0;
}
for(int i=0;i<3;i++){
for(int j=0;j<(ForceTime);j++)
F_h[n][i][j] = 0;
for (int j = 0; j < XDIM*YDIM*ZDIM/GPU_N; j++){
velAv_h [n][i][j] = 0;
velFluc_h[n][i][j] = 0;
}
}
for(int i = 0; i<2; i++){
cudaMemcpy2D(f_d[n][i],pitch,f_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(g_d[n][i],pitch,g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,cudaMemcpyHostToDevice);
cudaMemcpy2D(h_d[n][i],pitch,h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,cudaMemcpyHostToDevice);
}
for(int i = 0; i<3; i++){
cudaMemcpy2D(velAv_d [n][i],pitch,velAv_h [n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D(velFluc_d[n][i],pitch,velFluc_h[n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy(F_d[n][i],F_h[n][i],sizeof(float)*(ForceTime),cudaMemcpyHostToDevice);
}
//initialization kernels
for(int i = 0; i<2; i++){
initialize<<< grid,threads>>>(f_d[n][i],pitch_e,zInner,GPU_N,0);
initialize<<<g_grid,threads>>>(g_d[n][i],pitch_e, 1,GPU_N,0);
initialize<<<g_grid,threads>>>(h_d[n][i],pitch_e, 1,GPU_N,0);
}
initialize<<<g_grid,threads>>>(g_temp[n],pitch_e, 1,GPU_N,0);
initialize<<<g_grid,threads>>>(h_temp[n],pitch_e, 1,GPU_N,0);
}//end Malloc and Initialize
//setup LR
int A = 0;
int B = 1;
struct timeval tdr0,tdr1;
double restime;
cudaDeviceSynchronize();
gettimeofday (&tdr0,NULL);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
size_t mem_avail, mem_total;
cudaMemGetInfo(&mem_avail,&mem_total);
cout<<"Device memory used for dev"<<n<<" : "<<(mem_total-mem_avail)*pow(10,-9)<<" GB\n";
cout<<"Device memory available for dev"<<n<<" : "<<(mem_avail)*pow(10,-9)<<" GB\n";
}
//time loop
for(int t = 0; t<TMAX; t++)
{
for(int n = 0; n<GPU_N; n++)
{
cudaSetDevice(n);
update_top<<<g_grid, threads, 0, stream_halo [n]>>>(h_d[n][B],h_d[n][A],f_d[n][A],h_temp[n],omega,pitch_e,n,zInner);
update_bot<<<g_grid, threads, 0, stream_halo [n]>>>(g_d[n][B],g_d[n][A],f_d[n][A],g_temp[n],omega,pitch_e,n,zInner);
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_inn<<<grid,threads,0,stream_inner[n]>>>(f_d[n][B],f_d[n][A],g_d[n][A], h_d[n][A],omega,pitch_e,n,zInner,velAv_d[n],velFluc_d[n],t);
}
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&h_temp[n][0],n,&g_d[ (n+1)%GPU_N][B][0], (n+1)%GPU_N,pitch_e*YDIM*sizeof(float)*19,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&g_temp[n][0],n,&h_d[abs(n-1)%GPU_N][B][0],abs(n-1)%GPU_N,pitch_e*YDIM*sizeof(float)*19,stream_halo[n]);
cudaDeviceSynchronize();
swap(A,B);
}
cudaDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
if (REFINEMENT == 1)
Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n";
//D2H Memcpy and write results
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaMemcpy2D(f_h[n],XDIM*sizeof(float),f_d[n][A],pitch,XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(g_h[n],XDIM*sizeof(float),g_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(h_h[n],XDIM*sizeof(float),h_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,cudaMemcpyDeviceToHost);
for(int i = 0; i<3; i++){
cudaMemcpy2D( velAv_h[n][i],XDIM*sizeof(float),velAv_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(velFluc_h[n][i],XDIM*sizeof(float),velAv_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy(F_h[n][i],F_d[n][i],sizeof(float)*ForceTime,cudaMemcpyDeviceToHost);
}
WriteResults(output,f_h[n],g_h[n],h_h[n],velAv_h[n],velFluc_h[n],omega,GPU_N,n);
output<<endl;
for(int i=0;i<3;i++)
for(int j=0;j<ForceTime;j++)
F_total[i][j] += F_h[n][i][j];
for(int i = 0; i<2; i++){
cudaFree(f_d[n][i]);
cudaFree(g_d[n][i]);
cudaFree(h_d[n][i]);
}
cudaFree(f_d[n]);
cudaFree(g_d[n]);
cudaFree(h_d[n]);
cudaFree(g_temp[n]);
cudaFree(h_temp[n]);
for(int i=0;i<3;i++)
cudaFree(F_d[n][i]);
cudaFree(F_d[n]);
}//end Memcpy and write results
return 0;
}
|
13,445 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <math.h>
#include <iostream>
/************************************************
For more examples visit:
https://github.com/thrust/thrust/tree/master/examples
************************************************/
int main(void)
{
// H has storage for 4 integers
thrust::host_vector<float> H(4);
// initialize individual elements
for(int i = 0; i < H.size(); i++)
{
H[i] =log(i+0.004)*cos(i);
// std::cout <<H[i] << std::endl;
}
// H.size() returns the size of vector H
std::cout << "H has size " << H.size() << std::endl;
// print contents of H
for(int i = 0; i < H.size(); i++)
{
std::cout << "H[" << i << "] = " << H[i] << std::endl;
}
// resize H
H.resize(2);
std::cout << "H now has size " << H.size() << std::endl;
// Copy host_vector H to device_vector D
thrust::device_vector<float> D = H;
// elements of D can be modified
for(int i = 0; i < D.size(); i++)
{D[i] = H[i]*cos(i)*M_PI;
// std::cout <<H[i] << std::endl;
}
// print contents of D
for(int i = 0; i < D.size(); i++)
{
std::cout << "D[" << i << "] = " << D[i] << std::endl;
}
// H and D are automatically destroyed when the function returns
return 0;
}
|
13,446 | __global__
void vecAdd(float *l, float *r, float *result, size_t N) {
size_t i = threadIdx.x;
result[i] = l[i] + r[i];
}
|
13,447 | extern "C" {
__device__ int Reflect(int size, int p)
{
if (p < 0)
return -p - 1;
if (p >= size)
return 2*size - p - 1;
return p;
}
__global__ void convolution_kernel_single_channel(float* src, float* dst, int width, int height, float* kernel, int kernel_width)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
// consider only valid pixel coordinates
if((x < width) && (y < height))
{
// pixel index in the src array
const int pixel_tid = y * width + x;
int i, j, x_tmp, y_tmp, flat_index, flat_kernel_index;
int k = kernel_width / 2;
float sum = 0.0;
for (int n = 0; n < kernel_width*kernel_width; n++)
{
i = n % kernel_width;
j = n / kernel_width;
x_tmp = Reflect(width, x-(j-k));
y_tmp = Reflect(height, y-(i-k));
flat_index = x_tmp + width * y_tmp ;
flat_kernel_index = i + kernel_width * j;
sum += kernel[flat_kernel_index] * src[flat_index];
}
dst[pixel_tid] = sum;
}
}
__global__ void convolution_rgb(float3* src, float3* dst, int width, int height, float* kernel, int kernel_width)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
// consider only valid pixel coordinates
if((x < width) && (y < height))
{
// pixel index in the src array
const int pixel_tid = y * width + x;
int i, j, x_tmp, y_tmp, flat_index, flat_kernel_index;
int k = kernel_width / 2;
float sumr = 0.0,sumg=0.0,sumb=0.0;
for (int n = 0; n < kernel_width*kernel_width; n++)
{
i = n % kernel_width;
j = n / kernel_width;
x_tmp = Reflect(width, x-(j-k));
y_tmp = Reflect(height, y-(i-k));
flat_index = x_tmp + width * y_tmp ;
flat_kernel_index = i + kernel_width * j;
sumr += kernel[flat_kernel_index] * src[flat_index].x;
sumg += kernel[flat_kernel_index] * src[flat_index].z;
sumb += kernel[flat_kernel_index] * src[flat_index].z;
}
dst[pixel_tid].x = sumr;
dst[pixel_tid].y = sumg;
dst[pixel_tid].z = sumb;
}
}
} |
13,448 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <limits>
#include <sys/time.h>
#include <time.h>
#include <cuda_runtime.h>
using namespace std;
__global__ void assignmentKernel(float* d_in, float* d_out, int threads_num) {
const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < threads_num) {
float x = d_in[tid];
d_out[tid] = pow((x/(x-2.3)),3.0);
}
}
int timeval_subtract( struct timeval* result, struct timeval* t2,struct timeval* t1) {
unsigned int resolution=1000000;
long int diff = (t2->tv_usec + resolution * t2->tv_sec) -
(t1->tv_usec + resolution * t1->tv_sec) ;
result->tv_sec = diff / resolution;
result->tv_usec = diff % resolution;
return (diff<0);
}
unsigned long int cudaCalculation(unsigned int num_threads, unsigned int mem_size, float* h_in, float* h_out) {
unsigned int block_size = 1024;
unsigned int num_blocks = ceil(((num_threads + (block_size -1)) / block_size));
// Allocate device memory
float* d_in;
cudaMalloc((void**) &d_in, mem_size);
float* d_out;
cudaMalloc((void**) &d_out, mem_size);
// Copy host memory to device
cudaMemcpy(d_in, h_in, mem_size, cudaMemcpyHostToDevice);
// Start timing the CUDA run.
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// Execute the kernel
assignmentKernel<<<num_blocks, block_size>>>(d_in, d_out, num_threads);
cudaThreadSynchronize();
// End timing the CUDA run,
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = t_diff.tv_sec*1e6+t_diff.tv_usec;
// Copy the result form device to host
cudaMemcpy(h_out, d_out, mem_size, cudaMemcpyDeviceToHost);
// Free device memory.
cudaFree(d_in);
cudaFree(d_out);
return elapsed;
}
unsigned long int cpuCalculation(unsigned int array_length, float* h_in, float* h_out) {
// Start timing the CPU run.
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// Perform serial calculations
for (unsigned int i=0 ; i < array_length ; i++) {
float x = h_in[i];
h_out[i] = pow((x/(x-2.3)),3.0);
}
// End timing the CPU run,
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = t_diff.tv_sec*1e6+t_diff.tv_usec;
return elapsed;
}
bool areSame(float* h_in, float* cpu_arr, float* cuda_arr, unsigned int array_length) {
for(unsigned int i = 0 ; i < array_length ; i++) {
if(!(std::abs(cpu_arr[i] - cuda_arr[i]) < std::numeric_limits<float>::epsilon())) {
printf("INVALID:\nInput: %f\nCPU: %.15f\nGPU: %.15f\n", h_in[i], cpu_arr[i], cuda_arr[i]);
return false;
}
}
return true;
}
int main(int argc, char** argv) {
unsigned int array_length = 753411;
unsigned int mem_size = array_length*sizeof(float);
// Allocate host memory
float* h_in = (float*) malloc(mem_size);
float* cuda_out = (float*) malloc(mem_size);
float* cpu_out = (float*) malloc(mem_size);
// Initialize the memory
for(unsigned int i=0 ; i < array_length ; ++i) {
h_in[i] = (float)i+1;
}
// Call the CUDA code.
unsigned long int cuda_elapsed = cudaCalculation(array_length, mem_size, h_in, cuda_out);
printf("CUDA took %d microseconds (%.2fms)\n",cuda_elapsed,cuda_elapsed/1000.0);
// Call the CPU code.
unsigned long int cpu_elapsed = cpuCalculation(array_length, h_in, cpu_out);
printf("CPU took %d microseconds (%.2fms)\n",cpu_elapsed,cpu_elapsed/1000.0);
if(areSame(h_in, cpu_out, cuda_out, array_length)) {
printf("VALID: CPU and GPU agreed on results.\n");
}
// print result
//for(unsigned int i=0; i<array_length; ++i) printf("%.6f\n", cuda_out[i]);
// Clean up memory
free(h_in);
free(cuda_out);
free(cpu_out);
return 0;
}
|
13,449 | #include "cuda.h"
#include "cuda_runtime.h"
#include "cuda_runtime_api.h"
#include "iostream"
__global__ void test_min(int * min_num) {
int thread_id = threadIdx.x + blockDim.x * blockIdx.x;
if (thread_id < 5) {
return;
}
printf("%d %d\n", *min_num, thread_id);
atomicMin(min_num, thread_id);
}
int main(int argc, char * argv[]) {
int * min_num;
int num = 220;
cudaMalloc((void **) &min_num, sizeof(int));
cudaMemcpy(min_num, &num, sizeof(int), cudaMemcpyHostToDevice);
test_min <<<1000, 1>>> (min_num);
cudaMemcpy(&num, min_num, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << num << std::endl;;
}
|
13,450 | #include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define SYMBOL_NUM 256
#define MAX_CODE_LEN (SYMBOL_NUM - 1)
#define GRID_X (1u << 12)
#define GRID_Y 1
#define BLOCK_X (1u << 10)
#define BLOCK_Y 1
#define RAW_BUFF_SIZE (GRID_X * GRID_Y * BLOCK_X * BLOCK_Y)
#define DEF_IDX \
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
#define CUDA_SAFE_CALL(func) \
do { \
cudaError_t err = (func); \
if (err != cudaSuccess) { \
fprintf(stderr, "[Error] %s (error code: %d) at %s line %d\n", \
cudaGetErrorString(err), err, __FILE__, __LINE__); \
exit(err); \
} \
} while (0)
void prefixSumHost(
char* raw_data, size_t* code_len, size_t* len_ps, size_t raw_len);
__global__ void prefixSum(
char* raw_data, size_t* code_len, size_t* len_ps, size_t raw_len);
__global__ void genByteStream(
char* raw_data, bool* code, size_t* code_len, size_t* len_ps, bool* bytes,
size_t raw_len);
__global__ void compressByteStream(bool* bytes, uint8_t* bits, size_t bits_len);
int main(int argc, char** argv)
{
if (argc != 3) {
fprintf(stderr, "Usage: %s raw_data huffman_table\n", argv[0]);
exit(EXIT_FAILURE);
}
// Read raw data
char* raw_data_h = (char*)calloc(RAW_BUFF_SIZE, 1);
size_t raw_len = 0;
{
printf("Reading raw data ... ");
FILE* raw_file = fopen(argv[1], "r");
size_t r;
while ((r = fread(
raw_data_h + raw_len, 1, RAW_BUFF_SIZE - raw_len, raw_file))
!= 0) {
raw_len += r;
}
fclose(raw_file);
printf("done. Read %zu bytes.\n", raw_len);
}
char* raw_data;
CUDA_SAFE_CALL(cudaMalloc((void**)&raw_data, raw_len));
CUDA_SAFE_CALL(cudaMemcpy(
raw_data, raw_data_h, raw_len, cudaMemcpyHostToDevice));
// Read Huffman table
bool* code_h = (bool*)calloc(MAX_CODE_LEN * SYMBOL_NUM, sizeof(bool));
size_t* code_len_h = (size_t*)calloc(SYMBOL_NUM, sizeof(size_t));
{
printf("Reading Huffman table ... ");
FILE* hufftable_file = fopen(argv[2], "r");
int c;
while (fscanf(hufftable_file, "%d", &c) != EOF) {
size_t len;
int _ = fscanf(hufftable_file, "%zu", &len);
code_len_h[c] = len;
for (size_t j = 0; j < len; j++) {
int b;
int _ = fscanf(hufftable_file, "%d", &b);
code_h[c * MAX_CODE_LEN + j] = b;
}
}
fclose(hufftable_file);
printf("done.\n");
}
bool* code;
CUDA_SAFE_CALL(cudaMalloc(
(void**)&code, MAX_CODE_LEN * SYMBOL_NUM * sizeof(bool)));
CUDA_SAFE_CALL(cudaMemcpy(
code, code_h, MAX_CODE_LEN * SYMBOL_NUM * sizeof(bool),
cudaMemcpyHostToDevice));
size_t* code_len;
CUDA_SAFE_CALL(cudaMalloc((void**)&code_len, SYMBOL_NUM * sizeof(size_t)));
CUDA_SAFE_CALL(cudaMemcpy(
code_len, code_len_h, SYMBOL_NUM * sizeof(size_t),
cudaMemcpyHostToDevice));
// free(code_h);
// Run on CUDA
dim3 grid(GRID_X, GRID_Y);
dim3 block(BLOCK_X, BLOCK_Y, 1);
struct timeval time_start, time_end;
gettimeofday(&time_start, NULL);
// Prefix sum of code length
size_t* len_ps_h = (size_t*)calloc(raw_len, sizeof(size_t));
prefixSumHost(raw_data_h, code_len_h, len_ps_h, raw_len);
// prefixSum<<<grid, block>>>(raw_data, code_len, len_ps, raw_len);
CUDA_SAFE_CALL(cudaThreadSynchronize());
size_t* len_ps;
CUDA_SAFE_CALL(cudaMalloc((void**)&len_ps, raw_len * sizeof(size_t)));
CUDA_SAFE_CALL(cudaMemcpy(
len_ps, len_ps_h, raw_len * sizeof(size_t), cudaMemcpyHostToDevice));
/* {
size_t* len_ps_h = (size_t*)malloc(sizeof(size_t) * raw_len);
CUDA_SAFE_CALL(cudaMemcpy(
len_ps_h, len_ps, sizeof(size_t) * raw_len, cudaMemcpyDeviceToHost));
for (size_t i = 0; i < raw_len; i++)
printf("%zu\n", len_ps_h[i]);
} */
size_t len_ps_end;
CUDA_SAFE_CALL(cudaMemcpy(
&len_ps_end, &len_ps[raw_len - 1], sizeof(size_t),
cudaMemcpyDeviceToHost));
size_t bytes_len = len_ps_end + code_len_h[raw_data_h[raw_len - 1]];
printf("bytes len: %zu\n", bytes_len);
// free(code_len_h);
// free(raw_data_h);
if (bytes_len > RAW_BUFF_SIZE) {
fprintf(stderr, "Bytes stream overflowed\n");
exit(EXIT_FAILURE);
}
bool* bytes;
CUDA_SAFE_CALL(cudaMalloc((void**)&bytes, sizeof(bool) * bytes_len));
genByteStream<<<grid, block>>>(
raw_data, code, code_len, len_ps, bytes, raw_len);
CUDA_SAFE_CALL(cudaThreadSynchronize());
// CUDA_SAFE_CALL(cudaFree(len_ps));
// CUDA_SAFE_CALL(cudaFree(code_len));
// CUDA_SAFE_CALL(cudaFree(code));
// CUDA_SAFE_CALL(cudaFree(raw_data));
uint8_t* bits;
size_t bits_len = bytes_len / 8 + 1;
printf("bits len: %zu\n", bits_len);
CUDA_SAFE_CALL(cudaMalloc((void**)&bits, bits_len));
compressByteStream<<<grid, block>>>(bytes, bits, bits_len);
CUDA_SAFE_CALL(cudaThreadSynchronize());
// CUDA_SAFE_CALL(cudaFree(bits));
// CUDA_SAFE_CALL(cudaFree(bytes));
gettimeofday(&time_end, NULL);
double sec = (double)(time_end.tv_sec - time_start.tv_sec)
+ (double)(time_end.tv_usec - time_start.tv_usec) / 1e6;
printf("bytes: %zu sec: %lf bytes/sec: %lf\n",
raw_len, sec, raw_len / sec);
return 0;
}
void prefixSumHost(
char* raw_data, size_t* code_len, size_t* len_ps, size_t raw_len)
{
len_ps[0] = 0;
for (size_t i = 1; i < raw_len; i++) {
len_ps[i] = len_ps[i - 1] + code_len[raw_data[i - 1]];
}
}
__global__ void prefixSum(
char* raw_data, size_t* code_len, size_t* len_ps, size_t raw_len)
{
DEF_IDX;
if (2 * idx + 1 < raw_len) {
len_ps[2 * idx] = code_len[raw_data[2 * idx]];
len_ps[2 * idx + 1] = code_len[raw_data[2 * idx + 1]];
}
__syncthreads();
// build sum in place up the tree
size_t offset = 1;
for (size_t d = raw_len >> 1; d > 0; d >>= 1) {
__syncthreads();
if (idx < d) {
size_t ai = offset * (2 * idx + 1) - 1;
size_t bi = offset * (2 * idx + 2) - 1;
len_ps[bi] += len_ps[ai];
}
offset *= 2;
}
// clear the last element
if (idx == 0)
len_ps[raw_len - 1] = 0;
// traverse down tree & build scan
for (size_t d = 1; d < raw_len; d *= 2) {
offset >>= 1;
__syncthreads();
if (idx < d) {
size_t ai = offset * (2 * idx + 1) - 1;
size_t bi = offset * (2 * idx + 2) - 1;
size_t t = len_ps[ai];
len_ps[ai] = len_ps[bi];
len_ps[bi] += t;
}
}
}
__global__ void genByteStream(
char* raw_data, bool* code, size_t* code_len, size_t* len_ps, bool* bytes,
size_t raw_len)
{
DEF_IDX;
if (idx < raw_len) {
size_t start_pos = len_ps[idx];
char symbol = raw_data[idx];
for (size_t i = 0, len = code_len[symbol]; i < len; i++) {
bytes[start_pos + i] = code[symbol * MAX_CODE_LEN + i];
}
}
}
__global__ void compressByteStream(bool* bytes, uint8_t* bits, size_t bits_len)
{
DEF_IDX;
if (idx < bits_len) {
for (size_t i = 0; i < 8; i++) {
bits[idx] |= (uint8_t)bytes[8 * idx + i] << (7 - i);
}
}
}
|
13,451 | /* ==================================================================
Programmer: Daniel Sawyer (danielsawyer@mail.usf.edu)
The basic SDH algorithm implementation for 3D data
To compile: nvcc proj1-danielsawyer.cu -o SDH in the rc machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
//MY INCLUDES
#include <iostream>
#include <cuda_runtime.h>
#include <cuda.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
//overloaded to show GPU time
double report_running_time(int blah) {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("\nRunning time for GPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
//overloaded taking 1 arg
void output_histogram(bucket* histogram1){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram1[i].d_cnt);
total_cnt += histogram1[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
//overloaded taking 2 args
void output_histogram(bucket* histogram1, bucket* histogram2){
int i;
long long total_cnt = 0, total_cnt2 = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", abs(histogram1[i].d_cnt - histogram2[i].d_cnt));
total_cnt += histogram1[i].d_cnt;
total_cnt2 += histogram2[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", abs(total_cnt - total_cnt2));
else printf("| ");
}
}
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
//CUDA KERNEL FOR SDH
__global__ void PDH_Cuda(atom *d_atom_list, bucket *d_histogram, long long d_PDH_acnt, double d_PDH_res) {
double dist;
int i, j, h_pos;
i = threadIdx.x + blockDim.x * blockIdx.x;
//if(i == 0) printf("\nTHE I VALUE = 0\n");
//if(i == 0) printf("\nwarpSize = %d\n", warpSize);
for(j = i+1; j < d_PDH_acnt; ++j) {
dist = sqrt( (d_atom_list[i].x_pos - d_atom_list[j].x_pos)*(d_atom_list[i].x_pos - d_atom_list[j].x_pos) +
(d_atom_list[i].y_pos - d_atom_list[j].y_pos)*(d_atom_list[i].y_pos - d_atom_list[j].y_pos) +
(d_atom_list[i].z_pos - d_atom_list[j].z_pos)*(d_atom_list[i].z_pos - d_atom_list[j].z_pos) );
h_pos = (int)(dist/d_PDH_res);
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
//d_histogram[h_pos].d_cnt++;
//__syncthreads();
}
}
void CudaPrep(bucket * histogram2) {
//sizes of atom and bucket arrays
int size_atom = sizeof(atom)*PDH_acnt;
int size_hist = sizeof(bucket)*num_buckets;
//grid and block sizes
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
//printf("\nWARP = %d\n", deviceProp.warpSize);
dim3 threads(deviceProp.warpSize);
dim3 grid(ceil((float)PDH_acnt/threads.x));
//Device Vars
bucket *d_histogram;
atom *d_atom_list;
//Allocate device memory
cudaMalloc((void **) &d_histogram, size_hist);
cudaMalloc((void**) &d_atom_list, size_atom);
//Copy to device
cudaMemcpy(d_atom_list, atom_list, size_atom, cudaMemcpyHostToDevice);
cudaMemset(d_histogram, 0, size_hist);
//run cuda kernel
PDH_Cuda<<<grid,threads>>>(d_atom_list, d_histogram, PDH_acnt, PDH_res);
//copy new gpu histogram back to host from device
cudaMemcpy(histogram2, d_histogram, size_hist, cudaMemcpyDeviceToHost);
//free device memory
cudaFree(d_histogram); cudaFree(d_atom_list);
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
//printf("args are %d and %f\n", PDH_acnt, PDH_res);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* start counting time */
gettimeofday(&startTime, &Idunno);
/* call CPU single thread version to compute the histogram */
PDH_baseline();
/* check the total running time */
report_running_time();
/* print out the histogram */
output_histogram();
/* NEW SHIT */
//New histogram that will come from the device
bucket *histogram2 = (bucket*)malloc(sizeof(bucket)*num_buckets);
//memset(histogram2, 0, size_hist);
//start time
gettimeofday(&startTime, &Idunno);
//run on GPU
CudaPrep(histogram2);
//check runtime
report_running_time(1);
//print device histogram
output_histogram(histogram2);
//Difference between cpu and gpu
printf("\nCPU vs GPU Histogram Differences\n");
output_histogram(histogram, histogram2);
//Free memory.
free(histogram); free(atom_list);
return 0;
}
|
13,452 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void increment_kernel(int *g_data, int inc_value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + inc_value;
}
bool correct_output(int *data, const int n, const int x)
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %d\n", i, data[i], x);
return false;
}
return true;
}
int main()
{
int n = 16 * 1024 * 1024;
int nbytes = n * sizeof(int);
int value = 26;
// allocate host memory
int *a = 0;
cudaMallocHost((void **)&a, nbytes);
memset(a, 0, nbytes);
// allocate device memory
int *d_a=0;
cudaMalloc((void**)&d_a, nbytes);
cudaMemset(d_a, 0, nbytes);
// set kernel launch configuration
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n/threads.x, 1);
// issue work to the GPU
cudaMemcpy(d_a,a,nbytes,cudaMemcpyHostToDevice);
increment_kernel<<<blocks, threads, 0, 0>>>(d_a, value);
cudaMemcpy(a, d_a, nbytes, cudaMemcpyDeviceToHost);
// check the output for correctness
bool bFinalResults = correct_output(a, n, value);
// release resources
cudaFreeHost(a);
cudaFree(d_a);
exit(0);
} |
13,453 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <math_constants.h>
extern "C"
{
__global__ void
rtruncnorm_kernel(float *vals, int n,
float *mu, float *sigma,
float *lo, float *hi,
int rng_a, int rng_b,
int rng_c,
int maxtries)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if (idx < n)
{
// Setup the RNG:
curandState rng;
curand_init (rng_a+idx*rng_b, rng_c, 0, &rng);
// Sample:
int accept=0;
int numtries=0;
while (!accept && numtries < maxtries)
{
numtries ++;
vals[idx]=mu[idx]+sigma[idx]*curand_normal(&rng);
if (vals[idx]>=lo[idx] && vals[idx]<=hi[idx])
{ accept=1; }
else {}
}
}
return;
}
} // END extern "C" |
13,454 | /*--------------------------------------------------------------------------------------------------*/
/* */
/* Alberto Quesada Aranda */
/* Åbo Akademi University */
/* Advanced Computer Graphics and Graphics Hardware */
/* */
/* Two-Point angula correlation code */
/* Input: two list of galaxies */
/* Output: .txt file with the data for generate the histogram */
/* */
/* Base code taken from: https://github.com/djbard/ccogs/tree/master/angular_correlation */
/* */
/*--------------------------------------------------------------------------------------------------*/
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<math.h>
#include<unistd.h>
#include<cuda_runtime.h>
#include<time.h>
using namespace std;
#define SUBMATRIX_SIZE 16384 // parallel threads: 32 blocks * 512 threads/block = 16384 threads
#define DEFAULT_NBINS 256 // num of bins for the histogram
#define arcm_to_rad 1/3437.7468 // (1/60)*(pi/180), convert from arcm to rad
#define conv_angle 57.2957795; // 180/pi, convert from rad to degrees
// variables for calculate the execution time
static clock_t start_time;
static double elapsed;
/*------------------------------------------------------------------
Kernel to calculate angular distances
-------------------------------------------------------------------*/
__global__ void distance(volatile float *a0, volatile float *d0, volatile float *a1, volatile float *d1, int xind, int yind, int max_xind, int max_yind, volatile int *dev_hist, float hist_min, float hist_max, int nbins, float bin_width, bool two_different_files=1) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // idx is the thread id, it must range to 32blocks * 512threads/block = 16384 threads
idx += xind; // allow the thread to know which submatrix has to calculate
// printf("%d %d %d\n", blockIdx.x, threadIdx.x, blockDim.x);
// blockIdx.x [0-31]
// blockDim.x 512
// threadIdx.x [0-511]
__shared__ int shared_hist[DEFAULT_NBINS+2]; // shared vector for save the results within a block of threads
// initialize it only once in each block
if(threadIdx.x==0)
{
for (int i=0;i<nbins+2;i++)
shared_hist[i] = 0;
}
// before starting the calculations we need to be sure that the shared_hist is initialized; if not, we take the risk
// of loss calculations because we don't know the order in which the threads are executed and is possible that a thread
// performs the calculations and write the results in shared_hist before the thread 0 has initialized it.
__syncthreads();
// if NUM_GALAXIES0 % SUBMATRIX_SIZE != 0 in the last submatrix we will have more threads than needed calculations, therefore
// we won't perform calculations with those threads.
if (idx<max_xind) {
float dist, alpha1, delta1, a_diff;
float alpha0 = a0[idx];
float delta0 = d0[idx];
bool do_calc = 1;
int bin_index = 0;
// each kernel will calculate the angle between one galaxy of the first input data (idx) and all the galaxies within [yind-ymax]
int ymax = yind + SUBMATRIX_SIZE; // ymax will be the end of the submatrix that we are calculating of the second galaxies input
// we have to take care and if ymax > NUM_GALAXIES1, we stop the calculations bucle at that point
if (ymax>max_yind)
ymax = max_yind;
// we will perform the same calculation between input0[idx] and every input1[] (range [yind-ymax])
for(int i=yind; i<ymax; i++)
{
// if the two input files are different (DR case) we have to perform all N*N calculations
if (two_different_files) {
do_calc = 1;
}
// if the two input files are the same (DD, RR cases) we have to perform N*(N-1)/2 calculations
else {
if (xind != yind) {
do_calc = 1;
}
else {
if(idx > i)
do_calc=1;
else
do_calc=0;
}
}
if (do_calc)
{
alpha1 = a1[i];
delta1 = d1[i];
a_diff = alpha0 - alpha1;
dist = acos(sin(delta0)*sin(delta1) + cos(alpha0)*cos(alpha1)*cos(a_diff));
dist *= conv_angle; // convert from rad to degrees
// check in which bin we have to include the angle calculated
if(dist < hist_min) // underflow
bin_index = 0;
else if(dist >= hist_max) // overflow
bin_index = nbins + 1;
else {
bin_index = int((dist-hist_min)/bin_width) + 1;
}
// more than one thread could try to write its result in the shared_hist at the same time and in the same memory
// location. We need an atomic operation for prevent the loss of data
atomicAdd(&shared_hist[bin_index],1); // increment by one the corresponding histogram bin
}
}
}
// before copy the results of each block to the global histogram we have to be sure that all the threads within the block have
// ended its calculations
__syncthreads();
// only one thread (0) will write the results of the block to which it belongs to the global histogram
// for avoid the need of another atomic operation, our global histogram save the result of each block successively :
// [block[0], block[1] .... block [31]]
if(threadIdx.x==0)
{
for(int i=0;i<nbins+2;i++)
dev_hist[i+(blockIdx.x*(nbins+2))]=shared_hist[i];
}
}
/*------------------------------------------------------------------
Calculations and call to kernel
-------------------------------------------------------------------*/
int calc(FILE *infile0, FILE *infile1, FILE *outfile, int nbins, float hist_lower_range, float hist_upper_range, float hist_bin_width, bool two_different_files){
//d_ means device -> GPU, h_ means host -> CPU
float *d_alpha0, *d_delta0, *d_alpha1, *d_delta1;
float *h_alpha0, *h_delta0, *h_alpha1, *h_delta1;
int NUM_GALAXIES0, NUM_GALAXIES1;
// reading the data of the input files
// first we read the number of galaxies of each file
fscanf(infile0, "%d", &NUM_GALAXIES0);
fscanf(infile1, "%d", &NUM_GALAXIES1);
// calculate the size of the array needed for save in memory all the galaxies
int size_of_galaxy_array0 = NUM_GALAXIES0 * sizeof(float);
int size_of_galaxy_array1 = NUM_GALAXIES1 * sizeof(float);
printf("SIZE 0 # GALAXIES: %d\n",NUM_GALAXIES0);
printf("SIZE 1 # GALAXIES: %d\n",NUM_GALAXIES1);
// allocate space for the galaxies data in global memory
h_alpha0 = (float*)malloc(size_of_galaxy_array0);
h_delta0 = (float*)malloc(size_of_galaxy_array0);
h_alpha1 = (float*)malloc(size_of_galaxy_array1);
h_delta1 = (float*)malloc(size_of_galaxy_array1);
float temp0, temp1;
// reading and saving the galaxies data in radians
for(int i=0; i<NUM_GALAXIES0; i++)
{
fscanf(infile0, "%f %f", &temp0, &temp1);
h_alpha0[i] = temp0 * arcm_to_rad;
h_delta0[i] = temp1 * arcm_to_rad;
}
for(int i=0; i<NUM_GALAXIES1; i++)
{
fscanf(infile1, "%f %f", &temp0, &temp1);
h_alpha1[i] = temp0 * arcm_to_rad;
h_delta1[i] = temp1 * arcm_to_rad;
}
// defining dimensions for the grid and block
dim3 grid, block;
grid.x = 8192/(DEFAULT_NBINS); // number of blocks = 32
block.x = SUBMATRIX_SIZE/grid.x; // number of threads/block = 512
// allocating the histograms
/* I will need 3 arrays for the histograms:
- hist : for each submatrix, save the results of each thread block seccuentially (global memory)
- dev_hist : the same as hist, but in GPU memory
- hist_array : save the global result of all the submatrix in global memory
*/
int *hist, *dev_hist;
int size_hist = grid.x * (nbins+2); // I use +2, one for underflow and other for overflow
int size_hist_bytes = size_hist*sizeof(int);
// allocating and initializing to 0 hist in global mem
hist = (int*)malloc(size_hist_bytes);
memset(hist, 0, size_hist_bytes);
// allocating and initializing to 0 dev_hist in GPU mem
cudaMalloc((void **) &dev_hist, (size_hist_bytes));
cudaMemset(dev_hist, 0, size_hist_bytes);
unsigned long *hist_array;
// allocating and initializing to 0 the array for the final histogram (the sum of each submatrix partial result)
int hist_array_size = (nbins+2) * sizeof(unsigned long);
hist_array = (unsigned long*)malloc(hist_array_size);
memset(hist_array,0,hist_array_size);
// allocating memory in GPU for save the galaxies data
cudaMalloc((void **) &d_alpha0, size_of_galaxy_array0 );
cudaMalloc((void **) &d_delta0, size_of_galaxy_array0 );
cudaMalloc((void **) &d_alpha1, size_of_galaxy_array1 );
cudaMalloc((void **) &d_delta1, size_of_galaxy_array1 );
// check to see if we allocated enough memory.
if (0==d_alpha0 || 0==d_delta0 || 0==d_alpha1 || 0==d_delta1 || 0==dev_hist)
{
printf("couldn't allocate enough memory in GPU\n");
return 1;
}
// initialize array to all 0's
/*cudaMemset(d_alpha0,0,size_of_galaxy_array0);
cudaMemset(d_delta0,0,size_of_galaxy_array0);
cudaMemset(d_alpha1,0,size_of_galaxy_array1);
cudaMemset(d_delta1,0,size_of_galaxy_array1);*/
// copy galaxies data to GPU
cudaMemcpy(d_alpha0, h_alpha0, size_of_galaxy_array0, cudaMemcpyHostToDevice );
cudaMemcpy(d_delta0, h_delta0, size_of_galaxy_array0, cudaMemcpyHostToDevice );
cudaMemcpy(d_alpha1, h_alpha1, size_of_galaxy_array1, cudaMemcpyHostToDevice );
cudaMemcpy(d_delta1, h_delta1, size_of_galaxy_array1, cudaMemcpyHostToDevice );
int x, y;
int num_submatrices_x = NUM_GALAXIES0 / SUBMATRIX_SIZE;
int num_submatrices_y = NUM_GALAXIES1 / SUBMATRIX_SIZE;
// if NUM_GALAXIES % SUBMATRIX_SIZE != 0, we will need one submatrix more (not the whole submatrix) for perform all the calculations
if (NUM_GALAXIES0%SUBMATRIX_SIZE != 0) {
num_submatrices_x += 1;
}
if (NUM_GALAXIES1%SUBMATRIX_SIZE != 0) {
num_submatrices_y += 1;
}
int bin_index = 0;
// explanation of the iterations in the documentation
for(int k = 0; k < num_submatrices_y; k++)
{
y = k*SUBMATRIX_SIZE;
int jmax = 0;
// if the two files are the same, then only loop over the upper half of the matrix of operations
if (two_different_files == 0)
jmax = k;
for(int j = jmax; j < num_submatrices_x; j++)
{
x = j*SUBMATRIX_SIZE;
// set the histogram to all zeros each time.
cudaMemset(dev_hist,0,size_hist_bytes);
// call to the kernel
distance<<<grid,block>>>(d_alpha0, d_delta0,d_alpha1, d_delta1, x, y, NUM_GALAXIES0, NUM_GALAXIES1, dev_hist, hist_lower_range, hist_upper_range, nbins, hist_bin_width, two_different_files);
// copy the results from GPU memory to global mem
cudaMemcpy(hist, dev_hist, size_hist_bytes, cudaMemcpyDeviceToHost);
// add together the results of each block in a single histogram
for(int m=0; m<size_hist; m++)
{
bin_index = m%(nbins+2); // range it to [0-258]
hist_array[bin_index] += hist[m];
}
}
}
unsigned long total = 0;
// write in the output file the range of the bin of the histogram and the number of galaxies included in that range
// start in k = 1 and finish before nbins+1 for avoid the over/underflow
float lo = hist_lower_range;
float hi = 0;
for(int k=1; k<nbins+1; k++)
{
hi = lo + hist_bin_width;
fprintf(outfile, "%.3e %.3e %lu \n",lo,hi,hist_array[k]);
total += hist_array[k];
lo = hi;
}
printf("total: %lu \n", total);
// close opened files
fclose(infile0);
fclose(infile1);
fclose(outfile);
// free global memory
free(h_alpha0);
free(h_delta0);
free(h_alpha1);
free(h_delta1);
free(hist);
// free GPU memory
cudaFree(d_alpha0);
cudaFree(d_delta0);
cudaFree(d_alpha1);
cudaFree(d_delta1);
cudaFree(dev_hist);
return 0;
}
/*------------------------------------------------------------------
MAIN
-------------------------------------------------------------------*/
int main(int argc, char **argv) {
start_time = clock(); // start the timer
int nbins = DEFAULT_NBINS; // 256 bins --> 0-64º --> 64/0.25 = 256
float bin_width = 0.25;
float lower_range = 0.0000001;
float upper_range = nbins * bin_width; // 256 * 0.25 = 64
bool different_files = 1;
if (argc != 3) {
printf("\nMust pass two input files.\n");
exit(1);
}
// opening the input files and creating the output file
FILE *infile0, *infile1, *outfile;
infile0 = fopen(argv[optind],"r");
infile1 = fopen(argv[optind+1],"r");
outfile = fopen("output.txt", "w");
// if the input files are the same (DD, RR) the calculations needed are different from the DR case
if (strcmp(argv[optind],argv[optind+1]) == 0) {
different_files = 0;
printf("Input files are the same!\n");
}
calc(infile0, infile1, outfile, nbins, lower_range, upper_range, bin_width, different_files);
// total time
elapsed = clock() - start_time;
elapsed = elapsed / CLOCKS_PER_SEC;
printf("Execution time: %f \n", elapsed);
return 0;
}
|
13,455 | #include <iostream>
#include <math.h>
#include <cstdio>
#include <cstdint>
#include <cuda_profiler_api.h>
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define ull unsigned long long
__global__
void mandelbrot(ull x, ull y, int steps, uint8_t *res)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
ull size = y * x/8;
for (ull i = index; i < size; i += stride) {
for (int bytepos = 0; bytepos < 8; bytepos++) {
float r0 = 3.0f * ((i*8+bytepos) % x) / x - 2.0f;
float c0 = 2.0f * ((i*8+bytepos) / x) / y - 1.0f;
float r = r0, c = c0;
for (int step = 0; step < steps && r < 2 && r > -3 && c < 2 && c > -2; step++) {
float r2 = r * r - c * c + r0;
float c2 = 2 * r * c + c0;
r = r2;
c = c2;
}
if (r > 2 || r < -3 || c > 2 || c < -2) {
res[i] |= 0x80 >> bytepos;
}
}
}
}
int main(int argc, char **args)
{
if (argc != 4) {
std::cerr << "Bad params" << std::endl;
return 1;
}
cudaError_t error;
int SCALE = atoi(args[1]);
int STEPS = atoi(args[2]);
char *OUT = args[3];
ull X = 3 * SCALE, Y = 2*SCALE;
if (X % 8 != 0) {
std::cerr << "Scale has to be divisible by 8\n";
return 1;
}
std::cerr << "Scale: " << SCALE << " (X = " << X << ", Y = " << Y << ")\nSteps: " << STEPS << "\nOutput file: " << OUT << std::endl;
uint8_t *res;
error = cudaMalloc(&res, X*Y*sizeof(uint8_t) / 8);
if (!res || error == cudaErrorMemoryAllocation) {
std::cerr << "Unable to allocate " << X*Y*sizeof(uint8_t)/8 << " bytes of GPU memory" << std::endl;
return 1;
}
uint8_t *dataHost = (uint8_t *)malloc(X * Y * sizeof(uint8_t)/8);
if (!dataHost) {
std::cerr << "Not enough RAM" << std::endl;
return 1;
}
FILE *output = fopen(OUT, "wb");
std::cerr << "Writing" << sizeof(uint8_t) * X * Y /8 << " bytes "<< std::endl;
cudaMemset(res, 0x0, X * Y * sizeof(uint8_t)/8);
int blockSize = 256;
int numBlocks = (X*Y + blockSize - 1) / blockSize;
mandelbrot<<<numBlocks, blockSize>>>(X, Y, STEPS, res);
cudaDeviceSynchronize();
cudaMemcpy(dataHost, res, X * Y * sizeof(uint8_t)/8, cudaMemcpyDeviceToHost);
fwrite(dataHost, sizeof(uint8_t), X*Y/8, output);
cudaFree(res);
std::cerr << "Closing" << std::endl;
std::cout << "convert -size " << X << "x" << Y << " -depth 1 -define png:compression-strategy=3 gray:" << OUT << " res.png" << std::endl;
fclose(output);
cudaProfilerStop();
return 0;
}
|
13,456 | // Copyright 2022 The IREE Authors
//
// Licensed under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// This minimal example just has some publicly exported (__global__) kernels.
// It's possible with more build goo to include .cuh files and pull in any
// CUDA functions that do not involve host behavior (kernel launches/etc).
//
// NOTE: kernels must be exported with C naming (no C++ mangling) in order to
// match the names used in the IR declarations.
//
// NOTE: arguments are packed as a dense list of
// ([ordered bindings...], [push constants...]). If a binding is declared as
// read-only the kernel must not write to it as it may be shared by other
// invocations.
//
// NOTE: today all constants must be i32. If larger types are required there are
// packing rules that must line up with compiler expectations - passed i64
// values must be padded to natural 8-byte alignment, for example.
//
// NOTE: IREE ensures that all I/O buffers are legal to have the __restrict__
// keyword defined (no aliasing is induced that is potentially unsafe). It's
// still possible for users to do bad things but such is the case with native
// CUDA programming.
//
// NOTE: I/O buffer base pointers are likely to be nicely aligned (64B minimum
// but usually larger) but the pointers passed in may be offset by any value
// as they represent subranges of the underlying buffers. For example if the
// user slices out elements 3 and 4 out of a 4xf32 tensor then the base buffer
// pointer will be at +8B. In general if the input wasn't trying to be tricky
// (bitcasting/etc) then natural alignment is guaranteed (an f32 tensor will
// always have buffer pointers aligned to 4B).
// `ret = lhs * rhs`
//
// Conforms to ABI:
// #hal.pipeline.layout<push_constants = 1, sets = [
// <0, bindings = [
// <0, storage_buffer, ReadOnly>,
// <1, storage_buffer, ReadOnly>,
// <2, storage_buffer>
// ]>
// ]>
// workgroup_size = [64 : index, 1 : index, 1 : index]
extern "C" __global__ void simple_mul(const float* __restrict__ binding0,
const float* __restrict__ binding1,
float* __restrict__ binding2, int dim) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < dim) {
binding2[tid] = binding0[tid] * binding1[tid];
}
}
// `rhs *= lhs`
//
// Conforms to ABI:
// #hal.pipeline.layout<push_constants = 1, sets = [
// <0, bindings = [
// <0, storage_buffer, ReadOnly>,
// <1, storage_buffer>
// ]>
// ]>
// workgroup_size = [64 : index, 1 : index, 1 : index]
extern "C" __global__ void simple_mul_inplace(
const float* __restrict__ binding0, float* __restrict__ binding1, int dim) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < dim) {
binding1[tid] *= binding0[tid];
}
}
|
13,457 | #include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("%lu\n", prop.sharedMemPerBlock);
}
}
|
13,458 | #include "includes.h"
__global__ void BackwardSigmoid(float* Z, float* dA, int nRowsdZ, int nColsdZ, float *dZ)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nRowsdZ * nColsdZ)
{
dZ[index] = 1 / (1 + exp(-Z[index])) * (1 - 1 / (1 + exp(-Z[index]))) *
dA[index];
}
} |
13,459 |
/* compile with:
nvcc -fatbin -O2
-gencode=arch=compute_20,code=sm_20
-gencode=arch=compute_30,code=sm_30
-gencode=arch=compute_35,code=sm_35
-gencode=arch=compute_50,code=sm_50
-gencode=arch=compute_52,code=sm_52
-gencode=arch=compute_60,code=sm_60
-gencode=arch=compute_61,code=sm_61
-gencode=arch=compute_62,code=sm_62
-gencode=arch=compute_62,code=compute_62
"kernelSource/ccd.cu" -o "kernelBinaries/ccd.bin"
See Maxwell compatibility guide for more info:
http://docs.nvidia.com/cuda/maxwell-compatibility-guide/index.html#building-maxwell-compatible-apps-using-cuda-6-0
*/
// use same settings as CCDMinimizer on the java side
const int MaxIterations = 30;
const double ConvergenceThreshold = 0.001;
const double Tolerance = 1e-6;
const double OneDegree = M_PI/180.0;
const double InitialStep = OneDegree*0.25;
typedef struct __align__(8) {
int numPairs;
int num14Pairs;
double coulombFactor;
double scaledCoulombFactor;
double solvCutoff2;
double internalSolvationEnergy;
bool useDistDepDielec;
bool useHEs;
bool useHVdw;
} ForcefieldArgs;
// sizeof = 48
typedef struct __align__(8) {
int subsetTableOffset;
int numPairs;
int num14Pairs;
int rotatedIndicesOffset;
int numRotatedIndices;
int firstModifiedCoord;
int lastModifiedCoord;
// 4 bytes space
double internalSolvationEnergy;
} DofArgs;
// sizeof = 48
typedef struct {
const double *coords;
const int *atomFlags;
const double *precomputed;
const ForcefieldArgs *ffargs;
const int *subsetTable;
const int *dihedralIndices;
const int *rotatedIndices;
const DofArgs *dofdargs;
double *modifiedCoords;
double *threadEnergies;
} DofPoseAndEnergyArgs;
typedef struct {
double step;
double xdstar;
double fxdstar;
} LinesearchOut;
typedef struct __align__(8) {
double xd;
double xdmin;
double xdmax;
} XdAndBounds;
// sizeof = 24
// dayum, CUDA... no libraries for vector math? what gives??
__device__ void set(double2 &v, double x, double y) {
v.x = x;
v.y = y;
}
__device__ void set(double3 &v, double x, double y, double z) {
v.x = x;
v.y = y;
v.z = z;
}
__device__ void sub(double3 &a, double3 &b) {
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
}
__device__ void add(double3 &a, double3 &b) {
a.x += b.x;
a.y += b.y;
a.z += b.z;
}
__device__ double dot(double2 &a, double2 &b) {
return a.x*b.x + a.y*b.y;
}
__device__ double dot(double3 &a, double3 &b) {
return a.x*b.x + a.y*b.y + a.z*b.z;
}
__device__ void cross(double3 &out, double3 &a, double3 &b) {
out.x = a.y*b.z - a.z*b.y;
out.y = a.z*b.x - a.x*b.z;
out.z = a.x*b.y - a.y*b.x;
}
__device__ double lengthSq(double2 &v) {
return dot(v, v);
}
__device__ double lengthSq(double3 &v) {
return dot(v, v);
}
__device__ double length(double2 &v) {
return sqrt(lengthSq(v));
}
__device__ double length(double3 &v) {
return sqrt(lengthSq(v));
}
__device__ void negate(double3 &v) {
v.x = -v.x;
v.y = -v.y;
v.z = -v.z;
}
__device__ void mult(double3 &v, double c) {
v.x *= c;
v.y *= c;
v.z *= c;
}
__device__ void div(double3 &v, double c) {
v.x /= c;
v.y /= c;
v.z /= c;
}
__device__ void normalize(double2 &v) {
double l = length(v);
v.x /= l;
v.y /= l;
}
__device__ void normalize(double3 &v) {
double l = length(v);
v.x /= l;
v.y /= l;
v.z /= l;
}
__device__ void rotateVec(double3 &v, double3 &x, double3 &y, double3 &z) {
set(v,
v.x*x.x + v.y*y.x + v.z*z.x,
v.x*x.y + v.y*y.y + v.z*z.y,
v.x*x.z + v.y*y.z + v.z*z.z
);
}
__device__ void rotateVecInverse(double3 &v, double3 &x, double3 &y, double3 &z) {
set(v,
dot(v, x),
dot(v, y),
dot(v, z)
);
}
__device__ void rotateVecZ(double3 &v, double &sinTheta, double &cosTheta) {
double vx = v.x*cosTheta - v.y*sinTheta;
double vy = v.x*sinTheta + v.y*cosTheta;
v.x = vx;
v.y = vy;
}
__device__ double3 readCoord(double *coords, int i) {
int i3 = i*3;
return make_double3(coords[i3], coords[i3 + 1], coords[i3 + 2]);
}
__device__ void writeCoord(double *coords, int i, double3 &val) {
int i3 = i*3;
coords[i3] = val.x;
coords[i3 + 1] = val.y;
coords[i3 + 2] = val.z;
}
__device__ int getAtomIndex(int flags) {
return abs(flags) - 1;
}
__device__ bool isHydrogen(int flags) {
return flags > 0;
}
__device__ double getCoord(const double *coords, const double *modifiedCoords, int firstModifiedCoord, int lastModifiedCoord, int coordIndex) {
if (modifiedCoords != NULL && coordIndex >= firstModifiedCoord && coordIndex <= lastModifiedCoord) {
return modifiedCoords[coordIndex - firstModifiedCoord];
} else {
return coords[coordIndex];
}
}
__device__ double calcPairEnergy(
const double *coords,
const int *atomFlags,
const double *precomputed,
const ForcefieldArgs *args,
const int i,
const bool is14Pair,
const double *modifiedCoords,
const int firstModifiedCoord,
const int lastModifiedCoord
) {
// start with zero energy
double energy = 0;
// read atom flags and calculate all the things that use the atom flags in this scope
bool bothHeavy;
double r2 = 0;
{
int atom1Flags, atom2Flags;
{
int i2 = i*2;
atom1Flags = atomFlags[i2];
atom2Flags = atomFlags[i2 + 1];
}
bothHeavy = !isHydrogen(atom1Flags) && !isHydrogen(atom2Flags);
// calculate the squared radius
int atom1Index3 = getAtomIndex(atom1Flags)*3;
int atom2Index3 = getAtomIndex(atom2Flags)*3;
double d;
d = getCoord(coords, modifiedCoords, firstModifiedCoord, lastModifiedCoord, atom1Index3)
- getCoord(coords, modifiedCoords, firstModifiedCoord, lastModifiedCoord, atom2Index3);
r2 += d*d;
d = getCoord(coords, modifiedCoords, firstModifiedCoord, lastModifiedCoord, atom1Index3 + 1)
- getCoord(coords, modifiedCoords, firstModifiedCoord, lastModifiedCoord, atom2Index3 + 1);
r2 += d*d;
d = getCoord(coords, modifiedCoords, firstModifiedCoord, lastModifiedCoord, atom1Index3 + 2)
- getCoord(coords, modifiedCoords, firstModifiedCoord, lastModifiedCoord, atom2Index3 + 2);
r2 += d*d;
}
int i9 = i*9;
// calculate electrostatics
if (bothHeavy || args->useHEs) {
double esEnergy = is14Pair ? args->scaledCoulombFactor : args->coulombFactor;
{
double charge = precomputed[i9 + 2];
esEnergy *= charge;
}
{
esEnergy /= args->useDistDepDielec ? r2 : sqrt(r2);
}
energy += esEnergy;
}
// calculate vdw
if (bothHeavy || args->useHVdw) {
double Aij, Bij;
{
Aij = precomputed[i9];
Bij = precomputed[i9 + 1];
}
// compute vdw
double r6 = r2*r2*r2;
double r12 = r6*r6;
energy += Aij/r12 - Bij/r6;
}
// calculate solvation
if (bothHeavy && r2 < args->solvCutoff2) {
double r = sqrt(r2);
{
double lambda1 = precomputed[i9 + 3];
double radius1 = precomputed[i9 + 4];
double alpha1 = precomputed[i9 + 5];
double Xij = (r - radius1)/lambda1;
energy -= alpha1*exp(-Xij*Xij)/r2;
}
{
double lambda2 = precomputed[i9 + 6];
double radius2 = precomputed[i9 + 7];
double alpha2 = precomputed[i9 + 8];
double Xji = (r - radius2)/lambda2;
energy -= alpha2*exp(-Xji*Xji)/r2;
}
}
return energy;
}
__device__ void blockSum(double threadEnergy, double *threadEnergies) {
// compute the energy sum in SIMD-style
// see url for a tutorial on GPU reductions:
// http://developer.amd.com/resources/articles-whitepapers/opencl-optimization-case-study-simple-reductions/
threadEnergies[threadIdx.x] = threadEnergy;
__syncthreads();
for (int offset = 1; offset < blockDim.x; offset <<= 1) {
// sum this level of the reduction tree
int mask = (offset << 1) - 1;
if ((threadIdx.x & mask) == 0) {
int pos = threadIdx.x + offset;
if (pos < blockDim.x) {
threadEnergies[threadIdx.x] += threadEnergies[pos];
}
}
__syncthreads();
}
}
__device__ double calcFullEnergy(const double *coords, const int *atomFlags, const double *precomputed, const ForcefieldArgs *ffargs, double *threadEnergies) {
// add up the pairwise energies
double energy = 0;
for (int i = threadIdx.x; i < ffargs->numPairs; i += blockDim.x) {
energy += calcPairEnergy(
coords, atomFlags, precomputed, ffargs,
i, i < ffargs->num14Pairs,
NULL, 0, 0
);
}
blockSum(energy, threadEnergies);
return threadEnergies[0] + ffargs->internalSolvationEnergy;
}
__device__ void pose(const DofPoseAndEnergyArgs &args, double dihedralRadians) {
double *coords = args.modifiedCoords;
// get the four atom positions: a, b, c, d
double3 a = readCoord(coords, args.dihedralIndices[0]);
double3 b = readCoord(coords, args.dihedralIndices[1]);
double3 c = readCoord(coords, args.dihedralIndices[2]);
double3 d = readCoord(coords, args.dihedralIndices[3]);
// translate so everything is centered on b
sub(a, b);
sub(c, b);
sub(d, b);
// build a right orthonormal matrix [rx,ry,rz] where z is bc and ba points along x
double3 rz = c;
normalize(rz);
double3 rx = c;
mult(rx, dot(a, c)/dot(c, c));
negate(rx);
add(rx, a);
normalize(rx);
double3 ry;
cross(ry, rz, rx);
// use r^{-1} to rotate d into our axis-aligned space
rotateVecInverse(d, rx, ry, rz);
// look at the x,y coords of d to get the dihedral angle
double2 cossin = make_double2(d.x, d.y);
normalize(cossin);
double currentSin = cossin.y;
double currentCos = cossin.x;
// get the delta dihedral
double newSin, newCos;
sincos(dihedralRadians, &newSin, &newCos);
double deltaSin = newSin*currentCos - newCos*currentSin;
double deltaCos = newCos*currentCos + newSin*currentSin;
// modify the atoms in parallel
if (threadIdx.x < args.dofdargs->numRotatedIndices) {
int index = args.rotatedIndices[threadIdx.x];
double3 p = readCoord(coords, index);
sub(p, b);
rotateVecInverse(p, rx, ry, rz);
rotateVecZ(p, deltaSin, deltaCos);
rotateVec(p, rx, ry, rz);
add(p, b);
writeCoord(coords, index, p);
}
__syncthreads();
}
__device__ void copyCoordsGtoS(const DofArgs *dofdargs, const double *coords, double *modifiedCoords) {
int numModifiedCoords = dofdargs->lastModifiedCoord - dofdargs->firstModifiedCoord + 1;
for (int i = threadIdx.x; i < numModifiedCoords; i += blockDim.x) {
modifiedCoords[i] = coords[dofdargs->firstModifiedCoord + i];
}
__syncthreads();
}
__device__ void copyCoordsStoG(const DofArgs *dofdargs, double *coords, const double *modifiedCoords) {
int numModifiedCoords = dofdargs->lastModifiedCoord - dofdargs->firstModifiedCoord + 1;
for (int i = threadIdx.x; i < numModifiedCoords; i += blockDim.x) {
coords[dofdargs->firstModifiedCoord + i] = modifiedCoords[i];
}
__syncthreads();
}
__device__ double poseAndCalcDofEnergy(const DofPoseAndEnergyArgs &args, double dihedralRadians) {
// pose the coords
pose(args, dihedralRadians);
// add up the pairwise energies
double energy = 0;
for (int i = threadIdx.x; i < args.dofdargs->numPairs; i += blockDim.x) {
bool is14Pair = i < args.dofdargs->num14Pairs;
energy += calcPairEnergy(
args.coords, args.atomFlags, args.precomputed, args.ffargs,
args.subsetTable[i], is14Pair,
args.modifiedCoords, args.dofdargs->firstModifiedCoord, args.dofdargs->lastModifiedCoord
);
}
blockSum(energy, args.threadEnergies);
return args.threadEnergies[0] + args.dofdargs->internalSolvationEnergy;
}
__device__ double getTolerance(double f) {
// scale abs(f) by tolerance, unless f is very small
return Tolerance * fmax(1.0, fabs(f));
}
__device__ LinesearchOut linesearch(const DofPoseAndEnergyArgs &args, const double xd, const double xdmin, const double xdmax, const double step) {
// sample energy at the starting point
double fxd = poseAndCalcDofEnergy(args, xd);
// get the positive (p) neighbor
double fxdp = INFINITY;
{
double xdp = xd + step;
if (xdp <= xdmax) {
fxdp = poseAndCalcDofEnergy(args, xdp);
}
}
// get the negative (n) neighbor
double fxdm = INFINITY;
{
double xdm = xd - step;
if (xdm >= xdmin) {
fxdm = poseAndCalcDofEnergy(args, xdm);
}
}
// fit a quadratic to the objective function, locally:
// q(x) = fx + a*(x - xd)^2 + b*(x - xd)
// a*step^2 + b*step = fxp - fx
// a*step^2 - b*step = fxm - fx
// solve for a to determine the shape
double xdstar = 0;
{
double shape = fxdp + fxdm - 2*fxd;
const double ShapeEpsilon = 1e-12;
if (shape < -ShapeEpsilon || shape == NAN || shape == INFINITY) {
// negative shape means quadratic is concave down
// infinite or nan a means we're hitting a constraint or impossible conformation
// so just minimize over the endpoints of the interval
if (fxdm < fxdp) {
xdstar = xd - step;
} else {
xdstar = xd + step;
}
} else if (shape <= ShapeEpsilon) {
// flat here, don't step
xdstar = xd;
} else {
// positive shape means quadratic is concave up
// step to the optimum
xdstar = xd + (fxdm - fxdp)*step/2/shape;
}
}
// clamp xdstar to the range
if (xdstar < xdmin) {
xdstar = xdmin;
}
if (xdstar > xdmax) {
xdstar = xdmax;
}
double fxdstar = poseAndCalcDofEnergy(args, xdstar);
// did we go downhill?
if (fxdstar < fxd) {
double fxdmin = NAN;
double fxdmax = NAN;
// surf along f locally to try to find better minimum
double xdsurfHere = xdstar;
double fxdsurfHere = fxdstar;
while (true) {
// take a step twice as far as we did last time
double xdsurfNext = xd + 2*(xdsurfHere - xd);
// did we step off the min?
if (xdsurfNext < xdmin) {
// if the min is better, go there instead
if (isnan(fxdmin)) {
fxdmin = poseAndCalcDofEnergy(args, xdmin);
}
if (fxdmin < fxdsurfHere) {
xdsurfHere = xdmin;
fxdsurfHere = fxdmin;
}
break;
// did we step off the max?
} else if (xdsurfNext > xdmax) {
// if the max is better, go there instead
if (isnan(fxdmax)) {
fxdmax = poseAndCalcDofEnergy(args, xdmax);
}
if (fxdmax < fxdsurfHere) {
xdsurfHere = xdmax;
fxdsurfHere = fxdmax;
}
break;
}
double fxdsurfNext = poseAndCalcDofEnergy(args, xdsurfNext);
// did we improve the min enough to keep surfing?
if (fxdsurfNext < fxdsurfHere - getTolerance(fxdsurfHere)) {
// yeah, keep going
xdsurfHere = xdsurfNext;
fxdsurfHere = fxdsurfNext;
} else {
// nope, stop surfing
break;
}
}
// update the minimum estimate so far
xdstar = xdsurfHere;
fxdstar = fxdsurfHere;
// did we go significantly uphill?
} else if (fxdstar > fxd + Tolerance) {
// try to surf back downhill
double xdsurfHere = xdstar;
double fxdsurfHere = fxdstar;
while (true) {
// cut the step in half
double xdsurfNext = xd + (xdsurfHere - xd)/2;
double fxdsurfNext = poseAndCalcDofEnergy(args, xdsurfNext);
// did we improve the min enough to keep surfing?
if (fxdsurfNext < fxdsurfHere - getTolerance(fxdsurfHere)) {
// yeah, keep going
xdsurfHere = xdsurfNext;
fxdsurfHere = fxdsurfNext;
} else {
// nope, stop surfing
break;
}
}
// did the quadratic step help at all?
if (fxdstar < fxd) {
// yeah, keep it!
} else {
// nope, the original spot was lower
xdstar = xd;
fxdstar = fxd;
}
// did surfing help at all?
if (fxdsurfHere < fxdstar) {
// yeah, use the surf spot
xdstar = xdsurfHere;
fxdstar = fxdsurfHere;
}
}
// compute the step taken before wall jumping
LinesearchOut out;
out.step = xdstar - xd;
// try to jump over walls arbitrarily
// look in a 1-degree step for a better minimum
// NOTE: skipping this can make minimization a bit faster,
// but skipping this causes a noticeable rise in final energies too
// it's best to keep doing it I think
double xdm = xdstar - OneDegree;
double xdp = xdstar + OneDegree;
if (xdm >= xdmin) {
fxdm = poseAndCalcDofEnergy(args, xdm);
if (fxdm < fxdstar) {
xdstar = xdm;
fxdstar = fxdm;
}
}
if (xdp <= xdmax) {
fxdp = poseAndCalcDofEnergy(args, xdp);
if (fxdp < fxdstar) {
xdstar = xdp;
fxdstar = fxdp;
}
}
// one last pose
pose(args, xdstar);
// set outputs
out.xdstar = xdstar;
out.fxdstar = fxdstar;
return out;
}
__device__ void copyx(const double *src, double *dest, int size) {
for (int i = threadIdx.x; i < size; i += blockDim.x) {
dest[i] = src[i];
}
__syncthreads();
}
extern "C" __global__ void ccd(
double *coords,
const int *atomFlags,
const double *precomputed,
const ForcefieldArgs *ffargs,
const int *subsetTables,
const int *dihedralIndices,
const int *rotatedIndices,
const DofArgs *dofargs,
const int maxNumModifiedCoords,
const XdAndBounds *xAndBounds,
const int numDofs,
double *out // size is numDofs + 1
) {
// partition shared memory
extern __shared__ unsigned char shared[];
double *threadEnergies = (double *)shared;
double *modifiedCoords = threadEnergies + blockDim.x;
double *nextx = modifiedCoords + maxNumModifiedCoords;
double *firstSteps = nextx + numDofs;
double *lastSteps = firstSteps + numDofs;
// partition out memory
double *outfx = out;
double *outx = out + 1; // size is numDofs
// build the poseAndCalcDofEnergy() args
// at least, the parts that are independent of the dof
DofPoseAndEnergyArgs args = {
coords, atomFlags, precomputed, ffargs,
NULL, NULL, NULL, NULL,
modifiedCoords, threadEnergies
};
// make a copy of x (in parallel)
double *herex = outx;
for (int d = threadIdx.x; d < numDofs; d += blockDim.x) {
herex[d] = xAndBounds[d].xd;
}
__syncthreads();
// init the step sizes
for (int d = threadIdx.x; d < numDofs; d+= blockDim.x) {
firstSteps[d] = OneDegree;
lastSteps[d] = OneDegree;
}
__syncthreads();
// get the initial energy
double herefx = calcFullEnergy(coords, atomFlags, precomputed, ffargs, threadEnergies);
for (int iter=0; iter<MaxIterations; iter++) {
copyx(herex, nextx, numDofs);
// for each dimension...
for (int d=0; d<numDofs; d++) {
// get the dof info
args.dofdargs = dofargs + d;
args.subsetTable = subsetTables + args.dofdargs->subsetTableOffset;
args.dihedralIndices = dihedralIndices + d*4;
args.rotatedIndices = rotatedIndices + args.dofdargs->rotatedIndicesOffset;
// copy the coords we need to modify to shared mem
copyCoordsGtoS(args.dofdargs, coords, modifiedCoords);
double xd = nextx[d];
double xdmin = xAndBounds[d].xdmin;
double xdmax = xAndBounds[d].xdmax;
// get the step size, try to make it adaptive (based on historical steps if possible; else on step #)
double step;
{
double firstStep = firstSteps[d];
double lastStep = lastSteps[d];
if (fabs(lastStep) > Tolerance && fabs(firstStep) > Tolerance) {
step = InitialStep*fabs(lastStep/firstStep);
} else {
step = InitialStep/pow(iter + 1.0, 3.0);
}
// make sure the step isn't so big that the quadratic approximation is worthless
while (xdmax > xdmin && xd - step < xdmin && xd + step > xdmax) {
step /= 2;
}
}
// do line search
LinesearchOut lsout = linesearch(args, xd, xdmin, xdmax, step);
// update x and the step
if (threadIdx.x == 0) {
// update step tracking
if (iter == 0) {
firstSteps[d] = lsout.step;
}
lastSteps[d] = lsout.step;
// update nextxd
nextx[d] = lsout.xdstar;
}
__syncthreads();
// update the global protein pose
copyCoordsStoG(args.dofdargs, coords, modifiedCoords);
}
// evaluate the whole energy function
double nextfx = calcFullEnergy(coords, atomFlags, precomputed, ffargs, threadEnergies);
double improvement = herefx - nextfx;
if (improvement > 0) {
// take the step
copyx(nextx, herex, numDofs);
herefx = nextfx;
if (improvement < ConvergenceThreshold) {
break;
}
} else {
break;
}
}
// update outputs
*outfx = herefx;
}
|
13,460 | #include <iostream>
#include <cmath>
#include <algorithm>
#include <fstream>
#define N 100
#define nrange 20
#define bkgd 3
#define CL 0.9
__global__ void kernel(double*, int*, double*);
__device__ double poissonP(double, double);
__device__ double factorial(double n);
__global__ void kernel(double* mu, int* n, double* R) {
int thId = threadIdx.x;
int blId = blockIdx.x;
int atId = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ double cacheR[nrange];
cacheR[thId] = poissonP(mu[blId], thId)/poissonP(max(0, thId - bkgd), thId);
__syncthreads();
n[atId] = thId;
R[atId] = cacheR[thId];
}
__device__ double poissonP(double mu, double n) {
return pow(mu + 3., n)*exp(-(mu + 3.))/factorial(n);
}
__device__ double factorial(double n) {
double fn = 1.;
if (n == 0) {
return 1.;
} else {
for (int i = 1; i < n + 1; i++) {
fn *= (double)i;
}
}
return fn;
}
int main() {
double* mu = new double[N];
double* R = new double[N*nrange];
int* n = new int[N*nrange];
double* dev_mu;
double* dev_R;
int* dev_n;
cudaMalloc((void**)&dev_mu, N*sizeof(double));
cudaMalloc((void**)&dev_R, N*nrange*sizeof(double));
cudaMalloc((void**)&dev_n, N*nrange*sizeof(int));
double muMax = 10;
double muMin = 0;
double step = (muMax - muMin)/N;
for (int i = 0; i < N; i++) {
mu[i] = muMin + (double)i * step;
}
cudaMemcpy(dev_mu, mu, N*sizeof(double), cudaMemcpyHostToDevice);
kernel<<<N,nrange>>>(dev_mu, dev_n, dev_R);
cudaMemcpy(R, dev_R, N*nrange*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(n, dev_n, N*nrange*sizeof(int), cudaMemcpyDeviceToHost);
std::ofstream ofs;
ofs.open ("ul.dat", std::ofstream::out | std::ofstream::app);
for (int i = 0; i < N; i++) {
ofs << mu[i];
for (int j = 0; j < nrange; j++) {
ofs << "," << n[j + i*nrange] << "," << R[j + i * nrange];
}
ofs << std::endl;
}
ofs.close();
cudaFree(dev_mu);
cudaFree(dev_n);
cudaFree(dev_R);
return 0;
}
|
13,461 | //pass
//--blockDim=512 --gridDim=1 --no-inline
#include <cuda.h>
#include <assert.h>
#include <stdio.h>
#define N 2//512
__global__ void helloCUDA(volatile int* p)
{
//__assert(__no_read(p));
p[threadIdx.x] = threadIdx.x;
}
|
13,462 | #include "includes.h"
#define N 100000000
__global__ void vec_sum(float* a, float* b, float* c) {
int bid = blockIdx.x;
if (bid < N) {
c[bid] = a[bid] + b[bid];
}
} |
13,463 | #include <iostream>
#include <cuda.h>
#define ITER 11
using namespace std;
__global__ void stream(double *d_a, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= N) {
return;
}
d_a[i] = i;
}
int main(int argc, char **argv)
{
int i, N;
double *a, *d_a;
float msec, ave_msec, bandwidth;
cudaEvent_t event[2];
for (i = 0; i < 2; ++i) {
cudaEventCreate(&(event[i]));
}
if (argc > 1) {
N = atoi(argv[1]) * 1024 * 1024;
}
else {
N = 1 * 1024 * 1024;
}
a = new double[N];
cudaMalloc((void **)&d_a, sizeof(double) * N);
ave_msec = 0;
int GS, BS;
BS = 512;
GS = (N + BS - 1) / BS;
for (i = 0; i < ITER; ++i) {
cudaEventRecord(event[0], 0);
stream<<<GS, BS>>>(d_a, N);
cudaEventRecord(event[1], 0);
cudaThreadSynchronize();
cudaEventElapsedTime(&msec, event[0], event[1]);
if (i > 0) {
ave_msec += msec;
}
}
ave_msec /= (ITER - 1);
bandwidth = (float)(N * sizeof(double)) / 1024 / 1024 / ave_msec ;
cout << "StreamTest, " << N << ", " << bandwidth << ", " << ave_msec << endl;
cudaFree(d_a);
free(a);
return 0;
}
|
13,464 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
void init(float *matrix, const int cols, const int rows)
{
int i;
for (i = 0; i < rows * cols; ++i)
{
matrix[i] = rand() % 10;
}
}
void multiply(float *A, float *B, float *C, const int m, const int n, const int k)
{
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < n; ++j)
{
C[i * n + j] = 0;
for (int h = 0; h < k; ++h)
{
C[i * n + j] += A[i * k + h] * B[h * n + j];
}
}
}
}
void print(float *matrix, const int cols, const int rows)
{
for (int i = 0; i < rows; ++i)
{
for (int j = 0; j < cols; ++j)
{
printf("%f\t", matrix[i * cols + j]);
}
printf("\n");
}
printf("\n");
}
int main()
{
srand(time(0));
int m = 0;
int n = 0;
int k = 0;
printf("Enter: m, n and k...\n");
scanf("%d %d %d", &m, &n, &k);
float *matrix_A = (float *) malloc(sizeof(float) * m * k);
float *matrix_B = (float *) malloc(sizeof(float) * k * n);
float *result = (float *) malloc(sizeof(float) * m * n);
init(matrix_A, m, k);
init(matrix_B, k, n);
if (m < 5)
{
printf("\nMatrix A\n");
print(matrix_A, m, k);
printf("Matrix B\n");
print(matrix_B, k, n);
}
printf("\nMatrices have been initialized.\n");
float cpu_elapsed_time_ms = 0;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
multiply(matrix_A, matrix_B, result, m, n, k);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop);
printf("CPU execution time: %f ms.\n\n", cpu_elapsed_time_ms);
if (m < 5)
{
printf("Result matrix\n");
print(result, m, n);
}
free(matrix_A);
free(matrix_B);
free(result);
return 0;
}
|
13,465 | #include "includes.h"
__global__ static void gaussdensity_direct_alt(int natoms, const float4 *xyzr, float gridspacing, unsigned int z, float *densitygrid) {
unsigned int xindex = (blockIdx.x * blockDim.x) * DUNROLLX + threadIdx.x;
unsigned int yindex = (blockIdx.y * blockDim.y) + threadIdx.y;
unsigned int zindex = (blockIdx.z * blockDim.z) + threadIdx.z;
unsigned int outaddr =
((gridDim.x * blockDim.x) * DUNROLLX) * (gridDim.y * blockDim.y) * zindex +
((gridDim.x * blockDim.x) * DUNROLLX) * yindex + xindex;
zindex += z;
float coorx = gridspacing * xindex;
float coory = gridspacing * yindex;
float coorz = gridspacing * zindex;
float densityvalx1=0.0f;
float densityvalx2=0.0f;
#if DUNROLLX >= 4
float densityvalx3=0.0f;
float densityvalx4=0.0f;
#endif
#if DUNROLLX >= 8
float densityvalx5=0.0f;
float densityvalx6=0.0f;
float densityvalx7=0.0f;
float densityvalx8=0.0f;
#endif
float gridspacing_coalesce = gridspacing * DBLOCKSZX;
int atomid;
for (atomid=0; atomid<natoms; atomid++) {
float4 atom = xyzr[atomid];
float dy = coory - atom.y;
float dz = coorz - atom.z;
float dyz2 = dy*dy + dz*dz;
float dx1 = coorx - atom.x;
float r21 = (dx1*dx1 + dyz2) * atom.w;
densityvalx1 += exp2f(-r21);
float dx2 = dx1 + gridspacing_coalesce;
float r22 = (dx2*dx2 + dyz2) * atom.w;
densityvalx2 += exp2f(-r22);
#if DUNROLLX >= 4
float dx3 = dx2 + gridspacing_coalesce;
float r23 = (dx3*dx3 + dyz2) * atom.w;
densityvalx3 += exp2f(-r23);
float dx4 = dx3 + gridspacing_coalesce;
float r24 = (dx4*dx4 + dyz2) * atom.w;
densityvalx4 += exp2f(-r24);
#endif
#if DUNROLLX >= 8
float dx5 = dx4 + gridspacing_coalesce;
float r25 = (dx5*dx5 + dyz2) * atom.w;
densityvalx5 += exp2f(-r25);
float dx6 = dx5 + gridspacing_coalesce;
float r26 = (dx6*dx6 + dyz2) * atom.w;
densityvalx6 += exp2f(-r26);
float dx7 = dx6 + gridspacing_coalesce;
float r27 = (dx7*dx7 + dyz2) * atom.w;
densityvalx7 += exp2f(-r27);
float dx8 = dx7 + gridspacing_coalesce;
float r28 = (dx8*dx8 + dyz2) * atom.w;
densityvalx8 += exp2f(-r28);
#endif
}
densitygrid[outaddr ] += densityvalx1;
densitygrid[outaddr+1*DBLOCKSZX] += densityvalx2;
#if DUNROLLX >= 4
densitygrid[outaddr+2*DBLOCKSZX] += densityvalx3;
densitygrid[outaddr+3*DBLOCKSZX] += densityvalx4;
#endif
#if DUNROLLX >= 8
densitygrid[outaddr+4*DBLOCKSZX] += densityvalx5;
densitygrid[outaddr+5*DBLOCKSZX] += densityvalx6;
densitygrid[outaddr+6*DBLOCKSZX] += densityvalx7;
densitygrid[outaddr+7*DBLOCKSZX] += densityvalx8;
#endif
} |
13,466 | #include <iostream>
#include <string>
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; ++i)
a[i] = rand();
}
__global__ void add(int * a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
void printc(int* c, int N) {
for(int i = 0; i < N; i++){
std::cout << c[i];
}
}
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int N = 2048*2048;
int NUM_THREADS = 512;
int size = N*sizeof(int);
cudaMalloc((void **) &d_a,size);
cudaMalloc((void **) &d_b,size);
cudaMalloc((void **) &d_c,size);
a = (int *)malloc(size);
random_ints(a,N);
b = (int *)malloc(size);
c = (int *)malloc(size);
random_ints(b,N);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
add<<<N/NUM_THREADS,NUM_THREADS>>>(d_a,d_b,d_c);
cudaMemcpy(c,d_c, size, cudaMemcpyDeviceToHost);
printc(c, N);
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
13,467 | #include <stdio.h>
#include <math.h>
// Kernel addition on GPU
__global__ void conv (float** I, float** F, int H, int W, int K, int filterPad, float** out)
{
int m = threadIdx.y; // First dim
int n = threadIdx.x; // Second dim
float convOut = 0.0f;
for (int i = 0; i < K; i++)
{
for (int j = 0; j < K; j++)
{
// For correlation
int indI = m - filterPad + i;
int indJ = n - filterPad + j;
// For convolution
// int indI = m + filterPad - i;
// int indJ = n + filterPad - j;
if (indI < 0 || indI >= H || indJ < 0 || indJ >= W)
{
}
else
{
convOut += I[indI][indJ] * F[i][j]; // Correlation
}
}
}
out[m][n] = convOut;
}
void init (float** I, float** F, int H, int W, int K, int filterPad)
{
// Initialize I
for (int i = 0; i < H; i++)
{
for (int j = 0; j < W; j++)
{
I[i][j] = (i * W) + (j);
}
}
// Initialize F
for (int i = 0; i < K; i++)
{
for (int j = 0; j < K; j++)
{
F[i][j] = j - filterPad;
}
}
}
// Main function on the host
int main()
{
int H = 5, W = 5, K = 3;
int filterPad = (int) (floor(K) / 2.0f);
float **I, **dev_I;
float **F, **dev_F;
float **out, **dev_Out;
printf ("Allocating memory on Host\n");
I = (float **) malloc(H * sizeof(float *));
for (int i = 0; i < H; i++)
I[i] = (float*) malloc(sizeof(float) * W);
F = (float **) malloc(K * sizeof(float *));
for (int i = 0; i < K; i++)
F[i] = (float*) malloc(sizeof(float) * K);
out = (float **) malloc(H * sizeof(float *));
for (int i = 0; i < H; i++)
out[i] = (float*) malloc(sizeof(float) * W);
printf ("Host memory allocated\n");
printf ("Initializing array\n");
init(I, F, H, W, K, filterPad); // Initialize the array
printf ("Initialization complete\n");
printf ("Allocating device memory for I\n");
cudaError_t err = cudaMalloc((void **) &dev_I, sizeof(float *) * H);
if(err != cudaSuccess)
{
printf("Failure in allocating I\n");
exit(1);
}
for (int i = 0; i < H; i++)
{
// cudaMalloc((void **) &dev_I[i], sizeof(float) * W);
err = cudaMalloc((void **) &I[i], sizeof(float) * W);
if(err != cudaSuccess)
{
printf("Failure in populating I\n");
exit(1);
}
}
printf ("Allocating device memory for F\n");
err = cudaMalloc((void **) &dev_F, sizeof(float *) * K);
if(err != cudaSuccess)
{
printf("Failure in allocating F\n");
exit(1);
}
for (int i = 0; i < K; i++)
{
err = cudaMalloc((void **) &F[i], sizeof(float) * K);
if(err != cudaSuccess)
{
printf("Failure in populating K\n");
exit(1);
}
}
printf ("Allocating device memory for output\n");
err = cudaMalloc((void **) &dev_Out, sizeof(float *) * H);
if(err != cudaSuccess)
{
printf("Failure in allocating output array\n");
exit(1);
}
for (int i = 0; i < H; i++)
{
err = cudaMalloc((void **) &out[i], sizeof(float) * W);
if(err != cudaSuccess)
{
printf("Failure in populating ouput array\n");
exit(1);
}
}
printf ("Device memory allocated\n");
printf ("Moving data to device\n");
// for (int i = 0; i < H; i++)
// cudaMemcpy(dev_I[i], I[i], sizeof(float) * W, cudaMemcpyHostToDevice);
cudaMemcpy(dev_I, I, sizeof(float *) * H, cudaMemcpyHostToDevice);
// for (int i = 0; i < K; i++)
// cudaMemcpy(dev_F[i], F[i], sizeof(float) * K, cudaMemcpyHostToDevice);
cudaMemcpy(dev_F, F, sizeof(float *) * K, cudaMemcpyHostToDevice);
printf ("Data moved to device\n");
printf ("Performing convolution\n");
conv <<< H, W >>> (dev_I, dev_F, H, W, K, filterPad, dev_Out);
printf ("Kernel sucessfully executed!\n");
printf ("Moving data back to host\n");
// for (int i = 0; i < H; i++)
// cudaMemcpy(I[i], dev_Out[i], sizeof(float) * W, cudaMemcpyDeviceToHost);
cudaMemcpy(out, dev_Out, sizeof(float *) * H, cudaMemcpyDeviceToHost);
printf ("Data moved to host\n");
cudaFree(dev_I);
cudaFree(dev_F);
cudaFree(dev_Out);
printf ("Device memory released\n");
printf ("Convolution output:\n");
for (int i = 0; i < H; i++)
{
for (int j = 0; j < W; j++)
{
printf ("%.2f\t", out[i][j]);
}
printf ("\n");
}
return 0;
} |
13,468 | #include "includes.h"
__device__ static float disp_absolute_residual(float Xd, float Yd, float Zd, float Xm, float Ym, float Zm, float nx, float ny, float nz, float T0, float T1, float T2, float R0, float R1, float R2, float fx, float b) {
float r = -Xd * nx + Xm * nx - Yd * ny + Ym * ny - Zd * nz + Zm * nz +
nx * T0 + ny * T1 + nz * T2 + Xm * ny * R2 - Xm * nz * R1 -
Ym * nx * R2 + Ym * nz * R0 + Zm * nx * R1 - Zm * ny * R0;
// weight to convert distance units to pixels
r *= fx * b / (Zm * Zm);
return fabsf(r);
}
__global__ void disp_absolute_residual_scalable_GPU( float *d_abs_res, const float *d_disparity_compact, const float4 *d_Zbuffer_normals_compact, const int *d_ind_disparity_Zbuffer, const unsigned int *d_valid_disparity_Zbuffer, float fx, float fy, float ox, float oy, float b, int n_cols, int n_valid_disparity_Zbuffer, const int *d_offset_ind, const int *d_segment_translation_table, float w_disp, const float *d_dTR) {
int ind = blockDim.x * blockIdx.x + threadIdx.x;
if (ind < n_valid_disparity_Zbuffer) {
// determine current segment
int segment = d_segment_translation_table[d_valid_disparity_Zbuffer[ind]];
// fetch disparity, Zbuffer and normal from global memory
float disp = d_disparity_compact[ind];
float4 tmp = d_Zbuffer_normals_compact[ind];
float Zbuffer = tmp.x;
float nx = tmp.y;
float ny = tmp.z;
float nz = tmp.w;
// compute coordinates
int pixel_ind = d_ind_disparity_Zbuffer[ind];
float y = floorf(__fdividef((float)pixel_ind, n_cols));
float x = (float)pixel_ind - y * n_cols;
x = __fdividef((x - ox), fx);
y = __fdividef((y - oy), fy);
// reconstruct 3D point from disparity
float Zd = -(fx * b) / disp; // arbitrary use of fx for now
float Xd = x * Zd;
float Yd = y * Zd;
// reconstruct 3D point from model
float Zm = Zbuffer;
float Xm = x * Zm;
float Ym = y * Zm;
// compute absolute residual (weighted by disparity vs flow importance)
int ind_out = ind + d_offset_ind[segment];
int s6 = segment * 6;
d_abs_res[ind_out] =
w_disp * disp_absolute_residual(Xd, Yd, Zd, Xm, Ym, Zm, nx, ny, nz,
d_dTR[s6], d_dTR[s6 + 1], d_dTR[s6 + 2],
d_dTR[s6 + 3], d_dTR[s6 + 4],
d_dTR[s6 + 5], fx, b);
}
} |
13,469 | /*
* thread_block_test.cu
* Copyright (C) 2016 <@A0835-PC>
*
* Distributed under terms of the MIT license.
*/
#include <iostream>
#include <cstdlib>
int N = 21504;
int blockPerGrid(const int dim, const int threadPerBlock)
{
int temp = dim / threadPerBlock;
if (dim % threadPerBlock != 0) {
temp += 1;
}
return temp;
}
__device__ void initial_a(int *a, int tid)
{
a[tid] = tid * tid;
}
__device__ void initial_b(int *b)
{
int tid = threadIdx.x;
b[tid] = -tid;
}
__global__ void add(long long *a, long long *b, long long *c, int *bdim, int *gdim)
{
// int tid = blockIdx.x;
long long tid = threadIdx.x + blockIdx.x * blockDim.x;
// while (tid < N) {
a[tid] = tid * tid;
b[tid] = -tid;
c[tid] = a[tid] + b[tid];
// tid += blockDim.x * gridDim.x;
// }
if (tid == 0) {
*bdim = blockDim.x;
*gdim = gridDim.x;
}
}
inline void checkCudaError(cudaError_t error, const char *file, const int line)
{
if (error != cudaSuccess) {
std::cerr << "CUDA CALL FAILED: " << file << "( " << line << " )- " <<
cudaGetErrorString(error) << std::endl;
exit(EXIT_FAILURE);
}
else
std::cout << "cuda call success" << std::endl;
}
inline void checkCudaState(const char *msg, const char *file, const int line)
{
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
std::cerr << "---" << msg << " Error--" << std::endl;
std::cerr << file << "( " << line << " )- " <<
cudaGetErrorString(error) << std::endl;
exit(EXIT_FAILURE);
}
else
std::cout << "cuda state Success: " << msg << std::endl;
}
#define CHECK_ERROR(error) checkCudaError(error, __FILE__, __LINE__);
#define CHECK_STATE(msg) checkCudaState(msg, __FILE__, __LINE__);
void print_device(const int id)
{
cudaDeviceProp props;
CHECK_ERROR(cudaGetDeviceProperties(&props, id));
std::cout << "---Property of currently device used---" << std::endl;
std::cout << "Device " << id << ": " << props.name << std::endl;
std::cout << "CUDA Capability: " << props.major << "." << props.minor
<< std::endl;
std::cout << "MultiProcessor count: " << props.multiProcessorCount << std::endl;
}
void setCudaDevice(int id)
{
int numDevice = 0;
CHECK_ERROR(cudaGetDeviceCount(&numDevice));
std::cout << "Total CUDA device number: " << numDevice << std::endl;
if (numDevice > 1) {
cudaDeviceProp props;
cudaGetDeviceProperties(&props, id);
int maxMultiProcessors = props.multiProcessorCount;
for (int device = 1; device < numDevice; ++device) {
CHECK_ERROR(cudaGetDeviceProperties(&props, device));
if (maxMultiProcessors < props.multiProcessorCount) {
maxMultiProcessors = props.multiProcessorCount;
id = device;
}
}
}
CHECK_ERROR(cudaSetDevice(id));
print_device(id);
}
int main(int argc, char **argv)
{
int id = 0;
setCudaDevice(id);
if (argc > 1)
N = atoi(argv[1]);
long long *c = new long long[N];
int bdim, gdim;
long long *dev_a, *dev_b, *dev_c;
int *dev_bdim, *dev_gdim;
cudaMalloc(&dev_a, N * sizeof(long long));
cudaMalloc(&dev_b, N * sizeof(long long));
cudaMalloc(&dev_c, N * sizeof(long long));
cudaMalloc(&dev_bdim, sizeof(int));
cudaMalloc(&dev_gdim, sizeof(int));
// add<<<N, 1>>>(dev_a, dev_b, dev_c);
int threadPerBlock = 256;
int blockSize = blockPerGrid(N, threadPerBlock);
add<<<blockSize, threadPerBlock>>>(dev_a, dev_b, dev_c, dev_bdim, dev_gdim);
cudaMemcpy(c, dev_c, N * sizeof(long long), cudaMemcpyDeviceToHost);
cudaMemcpy(&bdim, dev_bdim, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&gdim, dev_gdim, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_bdim);
cudaFree(dev_gdim);
for (int i = 0; i < N; ++i) {
std::cout << i << ": " << c[i] << std::endl;
}
std::cout << "blockDim.x: " << bdim << std::endl;
std::cout << "gridDim.x: " << gdim << std::endl;
free(c);
return 0;
}
|
13,470 | #include <stdio.h>
/* add a and b on the device.
* uses pointers because function will run on the device.
*/
__global__ void device_add(int *a, int *b, int *res)
{
*res = *a + *b;
}
int main(void)
{
int a, b, res; /* host copies */
int *dev_a, *dev_b, *dev_res; /* device copies */
int size = sizeof(int); /* space needed for an integer */
/* allocate device copies of a, b, c */
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_res, size);
a = 2;
b = 7;
/* copy the inputs to the device */
cudaMemcpy(dev_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, &b, size, cudaMemcpyHostToDevice);
/* launch device_add() on the gpu. */
device_add<<<1, 1>>>(dev_a, dev_b, dev_res);
/* copy the device result dev_res, back to the host copy, res. */
cudaMemcpy(&res, dev_res, size, cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_res);
printf("result: %i\n", res);
return 0;
}
|
13,471 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cstring>
#include <time.h>
/*
1. three arrays of size 2 ^ 22 randomly initialized.
2. cpu comparision for three arrays sum
3. gpu kernel to sum three arrays.
4. cuda error mechanism.
5. grid is 1D.
6. check with block size - 64, 128, 256, 512.
*/
#define CHECK_ERROR(value) { check_cuda_error((value), __FILE__, __LINE__); }
inline void check_cuda_error(cudaError_t error, const char * file, int line, bool abort = true)
{
if (error != cudaSuccess) {
fprintf(stderr, "GPUAssert: %s %s %d \n", cudaGetErrorString(error), file, line);
if (abort) {
exit(error);
}
}
}
// GPU device kernel.
__global__ void sum_three_arrays_gpu(int * a, int * b, int * c, int * d, int size)
{
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < size) {
d[global_id] = a[global_id] + b[global_id] + c[global_id];
}
}
// CPU implementation.
void sum_three_arrays_cpu(int * a, int * b, int * c, int * d, int size)
{
for (int i=0; i < size; i++) {
d[i] = a[i] + b[i] + c[i];
}
}
bool compare_two_arrays(int * a, int * b, int size)
{
bool same = true;
for (int i=0; i < size; i++) {
if (a[i] != b[i]) {
same = false;
break;
}
}
return same;
}
int main(int argc, char * argv[])
{
int size = 2 << 22;
int byte_size = size * sizeof(int);
int block_size = 256;
std::cout << "Experiment: Block size used is: " << block_size << std::endl;
//Create Host arrays.
int * h_a, *h_b, *h_c, *gpu_results, *cpu_results;
h_a = (int *)malloc(byte_size);
h_b = (int *)malloc(byte_size);
h_c = (int *)malloc(byte_size);
gpu_results = (int *)malloc(byte_size);
cpu_results = (int *)malloc(byte_size);
time_t t;
srand((unsigned) time(&t));
//Initalize random values for arrays.
//initialize array a
for(int i=0; i < size; i++) {
h_a[i] = (int)(rand() && 0xff);
}
//initailize array b
for(int i=0; i < size; i++) {
h_a[i] = (int)(rand() && 0xff);
}
//initialize array c.
for(int i=0; i < size; i++) {
h_a[i] = (int)(rand() && 0xff);
}
memset(gpu_results, 0, byte_size);
memset(cpu_results, 0, byte_size);
//CPU Results.
clock_t cpu_start, cpu_end;
cpu_start = clock();
sum_three_arrays_cpu(h_a, h_b, h_c, cpu_results, size);
cpu_end = clock();
// Allocate device memory.
int * d_a, *d_b, *d_c, *d_results;
CHECK_ERROR(cudaMalloc((int **)&d_a, byte_size));
CHECK_ERROR(cudaMalloc((int **)&d_b, byte_size));
CHECK_ERROR(cudaMalloc((int **)&d_c, byte_size));
CHECK_ERROR(cudaMalloc((int **)&d_results, byte_size));
// Move memory host to device.
clock_t htod_start, htod_end;
htod_start = clock();
CHECK_ERROR(cudaMemcpy(d_a, h_a, byte_size, cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(d_b, h_b, byte_size, cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(d_c, h_c, byte_size, cudaMemcpyHostToDevice));
htod_end = clock();
// Grid and block Size for device execution.
dim3 block(block_size);
dim3 grid((size/block.x) + 1);
// Device execution of summation.
clock_t gpu_start, gpu_end;
gpu_start = clock();
sum_three_arrays_gpu<<<grid, block>>>(d_a, d_b, d_c, d_results, size);
cudaDeviceSynchronize();
gpu_end = clock();
//Device To Host.
clock_t dtoh_start, dtoh_end;
dtoh_start = clock();
CHECK_ERROR(cudaMemcpy(gpu_results, d_results, byte_size, cudaMemcpyDeviceToHost));
dtoh_end = clock();
// compare the results.
auto result = compare_two_arrays(gpu_results, cpu_results, size);
if (result) {
printf("Both the CPU and GPU results match. \n");
} else {
printf("Mismatch in CPU and GPU results.\n");
}
// Print execution times.
printf("Sum array CPU execution time : %4.6f \n" , (double)((double)(cpu_end - cpu_start)/ CLOCKS_PER_SEC));
printf("H to D mem transfer time : %4.6f \n" , (double)((double)(htod_end - htod_start)/ CLOCKS_PER_SEC));
printf("Sum array GPU execution time : %4.6f \n" , (double)((double)(gpu_end - gpu_start)/ CLOCKS_PER_SEC));
printf("D to H mem transfer time : %4.6f \n" , (double)((double)(dtoh_end - dtoh_start)/ CLOCKS_PER_SEC));
printf("Sum array GPU total execution time: %4.6f \n" , (double)((double)(dtoh_end - htod_start)/ CLOCKS_PER_SEC));
CHECK_ERROR(cudaFree(d_results));
CHECK_ERROR(cudaFree(d_c));
CHECK_ERROR(cudaFree(d_b));
CHECK_ERROR(cudaFree(d_a));
free(gpu_results);
free(cpu_results);
free(h_a);
free(h_b);
free(h_c);
cudaDeviceReset();
return 0;
}
|
13,472 | #include<cuda.h>
#include<stdio.h>
#include<math.h>
const double PI = 3.141592653589793238460;
__global__
void vecFFTKernel(float* A, float* C, int n){
//identify the index of the data to be read
int i= threadIdx.x + blockDim.x * blockIdx.x;
C[i]=PI*A[i];
}
__host__
void vecFFT(float* A,float* C, int n){
int c=ceil(n/256.0);
int size = n * sizeof(float);
float *d_A, *d_C;
//Allocate device memory for A,C
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_C, size);
//copy A,B to device memory
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
//call kernal function that the calculates sum and stores it in C
vecFFTKernel<<< ceil(n/256.0),256 >>>(d_A,d_C,n);
//the y and z dimensions are set to 1 by default
//copy C from devce memory
cudaMemcpy( C,d_C, size, cudaMemcpyDeviceToHost);
//free device memories
cudaFree(d_A);
cudaFree(d_C);
}
//Kernal function that runs in each thread
int main(){
float *A,*C;
int n=32; //must be a power of 2
A=(float*)malloc(n*sizeof(float));
C=(float*)malloc(n*sizeof(float));
int i;
for(i=0;i<n;i++){
A[i]=(float)i;
}
vecFFT(A,C,n);
for(i=0;i<n;i++){
printf("%f ",C[i]);
}
free(A);
free(C);
return 0;
}
|
13,473 | #include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c) {
// Position 1: To write Code here later
int n = 16;
int index = blockIdx.x * blockDim.x + threadIdx.x ;
int stride = gridDim.x * blockDim.x;
for (int i = index; i < n; i+=stride)
c[i] = a[i] + b[i];
}
int main()
{
int *a, *b, *c, *da, *db, *dc, N=16, i;
a = (int*)malloc(sizeof(int)*N); // allocate host mem
b = (int*)malloc(sizeof(int)*N); // and assign random
c = (int*)malloc(sizeof(int)*N); // memory
// Write code to initialize both a and b to 1’s.
for (i = 0; i < N; i++) {
a[i] = b[i] = 1;
}
cudaMalloc((void **)&da, sizeof(int)*N);
cudaMalloc((void **)&db, sizeof(int)*N);
cudaMalloc((void **)&dc, sizeof(int)*N);
cudaMemcpy(da, a, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(db, b, sizeof(int)*N, cudaMemcpyHostToDevice);
dim3 dimGrid(N/8, 1, 1);
dim3 dimBlock(N/4, 1, 1);
add<<<dimGrid,dimBlock>>>(da, db, dc);
cudaMemcpy(c, dc, sizeof(int)*N, cudaMemcpyDeviceToHost);
for (i = 0; i < N; i++) {
printf("a[%d] + b[%d] = %d\n", i, i, c[i]);
}
} |
13,474 | #include <stdio.h>
#include <cuComplex.h>
#define CUDA_KERNEL_LOOP_x(i,n) \
for(int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
#define CUDA_KERNEL_LOOP_y(j,m) \
for(int j = blockIdx.y * blockDim.y + threadIdx.y; \
j < (m); \
j += blockDim.y * gridDim.y)
__device__ int mandelbrot(cuDoubleComplex c, int threshold)
{
cuDoubleComplex z = make_cuDoubleComplex(0, 0);
for(int i = 0; i < threshold; i++)
{
z = cuCadd(cuCmul(z, z), c);
if(cuCabs(z) > 2)
return i;
}
return 0;
}
__global__ void mandelbrot_set(double xmin, double xmax, double ymin, double ymax,
int xn, int yn, int threshold, int *atlas)
{
CUDA_KERNEL_LOOP_y(j, yn)
{
CUDA_KERNEL_LOOP_x(i, xn)
{
double cx = xmin + i * (xmax - xmin) / xn;
double cy = ymin + j * (ymax - ymin) / yn;
cuDoubleComplex c = make_cuDoubleComplex(cx, cy);
atlas[j * xn + i] = mandelbrot(c, threshold);
}
}
}
int main()
{
int *host_atlas = nullptr;
int *device_atlas = nullptr;
host_atlas = (int*)malloc(1920 * 1080 * sizeof(int));
cudaMalloc((void**) &device_atlas, 1920 * 1080 * sizeof(int));
dim3 d(16, 16, 1);
mandelbrot_set<<<d, d>>>(-0.748768, -0.748718, 0.0650619375, 0.0650900625, 1920, 1080, 2048, device_atlas);
cudaMemcpy(host_atlas, device_atlas, 1920 * 1080 * sizeof(int), cudaMemcpyDeviceToHost);
FILE *fp = fopen("MathPic.ppm","wb");
int max = 0;
fprintf(fp, "P6\n%d %d\n255\n", 1920, 1080);
for(int i = 0; i < 1920 * 1080; i++)
{
if(host_atlas[i] > max)
max = host_atlas[i];
}
for(int i = 0; i < 1920 * 1080; i++)
{
char c = host_atlas[i] * 255 / max;
fwrite(&c, 1, 1, fp);
fwrite(&c, 1, 1, fp);
fwrite(&c, 1, 1, fp);
}
fclose(fp);
return 0;
}
|
13,475 | /*************************************************************************
File : lcsCollectActiveParticlesForNewInterval.cu
Author : Mingcheng Chen
Last Update : September 3rd, 2012
**************************************************************************/
#include <stdio.h>
#define BLOCK_SIZE 1024
__global__ void InitializeScanArrayKernel(int *exitCells, int *scanArray, int length) {
int globalID = blockDim.x * blockIdx.x + threadIdx.x;
if (globalID < length) {
if (exitCells[globalID] < -1) exitCells[globalID] = -(exitCells[globalID] + 2);
scanArray[globalID] = exitCells[globalID] == -1 ? 0 : 1;
}
}
__global__ void CollectActiveParticlesKernel(int *exitCells, int *scanArray, int *activeParticles, int length) {
int globalID = blockDim.x * blockIdx.x + threadIdx.x;
if (globalID < length) {
if (exitCells[globalID] != -1)
activeParticles[scanArray[globalID]] = globalID;
}
}
extern "C"
void InitializeScanArray(int *exitCells, int *scanArray, int length) {
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid((length - 1) / dimBlock.x + 1, 1, 1);
InitializeScanArrayKernel<<<dimGrid, dimBlock>>>(exitCells, scanArray, length);
cudaError_t err = cudaDeviceSynchronize();
if (err) {
cudaGetErrorString(err);
exit(0);
}
}
extern "C"
void CollectActiveParticles(int *exitCells, int *scanArray, int *activeParticles, int length) {
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid((length - 1) / dimBlock.x + 1, 1, 1);
CollectActiveParticlesKernel<<<dimGrid, dimBlock>>>(exitCells, scanArray, activeParticles, length);
cudaError_t err = cudaDeviceSynchronize();
if (err) {
cudaGetErrorString(err);
exit(0);
}
}
|
13,476 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float* var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27) {
if (comp <= (-0.0f * sinhf((+1.5104E-35f + var_3)))) {
float tmp_1 = (var_4 - var_5 * var_6 - var_7);
comp = tmp_1 - (+1.0667E-36f * var_8);
if (comp > var_9 - var_10 + (-1.1912E-37f - var_11)) {
comp = cosf((-1.5333E-37f * -0.0f));
comp += -1.2488E-41f * (-1.2186E-43f - (var_12 * (-1.2041E-42f - var_13 / -1.1994E-44f)));
comp += +1.4314E-43f / +1.2993E-28f + +1.5299E-15f - (var_14 - log10f((var_15 - +1.8415E-37f * -1.3874E-44f - +1.5091E35f)));
comp += -0.0f + -1.8204E-37f;
}
for (int i=0; i < var_1; ++i) {
comp = var_16 - var_17 + (-1.4232E34f + (-1.3987E-44f * -0.0f * +0.0f));
}
for (int i=0; i < var_2; ++i) {
float tmp_2 = +1.1386E-35f + var_19 - +0.0f + var_20 / -1.2414E-37f * +1.5185E-43f;
comp = tmp_2 / (var_21 * (-1.4031E13f + var_22 / (var_23 + var_24)));
var_18[i] = (var_25 / (-1.9526E-24f / +1.8340E-4f));
comp += var_18[i] / (var_26 - (var_27 - -1.7063E-37f * +1.9946E-44f));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float* tmp_19 = initPointer( atof(argv[19]) );
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28);
cudaDeviceSynchronize();
return 0;
}
|
13,477 | #include <cuda.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#define NANO 1000000000
#define PGSIZE 0x1000
#define SUBSIZE 1000
#define BLOCK 8
#define GRID 128
int size;
float *matrixA, *matrixB, *matrixBT, *matrixC_serial, *matrixC_cuda;
__global__
void cudaMatMul(float *A_d, float *B_d, float *C_d, int x, int y, int n, int size)
{
int i = n * x + blockIdx.x * blockDim.x + threadIdx.x;
int j = n * y + blockIdx.y * blockDim.y + threadIdx.y;
float value = 0.0;
if (i >= n * (x + 1) || j >= n * (y + 1))
return;
for (int k = 0; k < size; k++)
value += A_d[i * size + k] * B_d[k * size + j];
C_d[i * size + j] = value;
return;
}
float *make_matrix(int size)
{
float *matrix = (float *) malloc(sizeof(float) * size * size);
if (matrix == NULL)
{
perror("malloc");
exit(0);
}
if (malloc(PGSIZE) == NULL)
{
perror("malloc");
exit(0);
}
return matrix;
}
void set_matrix(float *matrix, int size)
{
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
matrix[i * size + j] = (float) drand48();
}
}
void print_matrix(double *matrix, int size)
{
printf("[");
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
printf(" %f", matrix[i * size + j]);
printf(";");
}
printf(" ]\n");
}
void cuda_mmul(float *A, float *B, float *C, int size)
{
int mem_size = sizeof(float) * size * size;
float *A_d, *B_d, *C_d;
dim3 dimBlock(BLOCK, BLOCK);
dim3 dimGrid(GRID, GRID);
cudaMalloc((void **) &A_d, mem_size);
cudaMemcpy(A_d, A, mem_size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &B_d, mem_size);
cudaMemcpy(B_d, B, mem_size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &C_d, mem_size);
for (int i = 0; i < size / SUBSIZE; i++)
{
for (int j = 0; j < size / SUBSIZE; j++)
cudaMatMul<<<dimGrid, dimBlock>>>(A_d, B_d, C_d, i, j, SUBSIZE, size);
}
cudaMemcpy(C, C_d, mem_size, cudaMemcpyDeviceToHost);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
}
void serial_mmul(float *matrixA, float *matrixB, float *matrixC)
{
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
matrixC[i * size + j] = 0.0;
for (int k = 0; k < size; k++)
matrixC[i * size + j] += matrixA[i * size + k] * matrixB[k * size + j];
}
}
}
int main(int argc, char **argv, char **envp)
{
int opt;
struct timespec tstart, tend;
while ((opt = getopt(argc, argv, "n:p:")) != -1)
{
switch (opt)
{
case 'n':
size = atoi(optarg);
break;
case '?':
printf("Usage: %s -n N\n", argv[0]);
exit(0);
}
}
if (size <= 0)
{
printf("Usage: %s -n N\n", argv[0]);
exit(0);
}
matrixA = make_matrix(size);
matrixB = make_matrix(size);
matrixC_serial = make_matrix(size);
matrixC_cuda = make_matrix(size);
srand48(time(NULL));
set_matrix(matrixA, size);
set_matrix(matrixB, size);
printf("Multi Thread Computation Start\n");
if (clock_gettime(CLOCK_MONOTONIC, &tstart) == -1)
{
perror("clock_gettime");
exit(0);
}
cuda_mmul(matrixA, matrixB, matrixC_cuda, size);
if (clock_gettime(CLOCK_MONOTONIC, &tend) == -1)
{
perror("clock_gettime");
exit(0);
}
long start_nsec = tstart.tv_sec * NANO + tstart.tv_nsec;
long end_nsec = tend.tv_sec * NANO + tend.tv_nsec;
double microsec = (end_nsec - start_nsec) / 1000.0;
printf("Multi Thread Computation End: %.3f us.\n", microsec);
printf("Single Thread Computation Start\n");
if (clock_gettime(CLOCK_MONOTONIC, &tstart) == -1)
{
perror("clock_gettime");
exit(0);
}
serial_mmul(matrixA, matrixB, matrixC_serial);
if (clock_gettime(CLOCK_MONOTONIC, &tend) == -1)
{
perror("clock_gettime");
exit(0);
}
start_nsec = tstart.tv_sec * NANO + tstart.tv_nsec;
end_nsec = tend.tv_sec * NANO + tend.tv_nsec;
microsec = (end_nsec - start_nsec) / 1000.0;
printf("Single Thread Computation End: %.3f us.\n", microsec);
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
if (fabs(matrixC_cuda[i * size + j] - matrixC_serial[i * size + j]) > 1e-3)
{
printf("Verification Fail.\n");
printf("(%d, %d): %f - %f\n", i, j, matrixC_cuda[i * size + j], matrixC_serial[i * size + j]);
}
}
}
printf("Verification Success.\n");
// print_matrix(matrixA, size);
// print_matrix(matrixB, size);
// print_matrix(matrixC, size);
return 0;
}
|
13,478 | // HEADERS
#include <iostream>
#include <iomanip>
#include <string>
#include <limits>
#include <stdlib.h>
#include <fstream>
#include <math.h>
#include <time.h>
using namespace std;
// DEFINITIONS
#define NX 192 //was 201
#define PX 192 //was 224
#define NY 192 //was 201
#define PY 192 //was 224
#define NT 401
#define NS 640 //number of sensors
#define BLOCK_X 16
#define BLOCK_Y 16
__constant__ float hx = 0.001f;
__constant__ float hy = 0.001f; // pixel size
__constant__ float h = 0.001f;
/* __constant__ float T = 1.3333e-04f; // 0.2f / 1500.f; */
__constant__ float dt = 3.3333e-07f; // T / 400.f;
/* __constant__ float fre = 125000.f; */
__constant__ float omegac = 7.8540e+05f; // 2.f * pi * fre; // wavelength
__constant__ float tao = 4.0000e-06f; // pi / omegac;
__constant__ float tt = 8.1573e-06f; // sqrtf(6.f * logf(2.f)) * tao; // time delay
// FUNCTIONS DECLARATION
void Ultrasonic_Tomography(int group_size, float target_epsilon, int max_iterations, int ti);
void Position_Transducers (int *&, int *&, int);
void IO_Files(float*, float*, float*, float*);
float norm(float*, int, int);
__global__ void field_setup(const float*, const float*, float*);
__global__ void propagation(int, int, int, int, const float*, float*, int);
__global__ void propagation_at_corners(float*);
__global__ void initial_signal(const float*, float*, float*, float*, float*, int);
__global__ void difference_signal(const float*, const float*, const float*, const float*, const float*, float*, float*, float*, float*, int);
__global__ void backpropagation1(float*, const float*, int);
__global__ void backpropagation2(float*, const float*, const float*, const float*, const float*, int);
__global__ void laplace(const float*, float*);
__global__ void laplace_corners(const float*, float*);
__global__ void update_differential(float*, const float*, const float*, const float*);
__global__ void update_field(float*, const float*, float*, const float*);
template <typename T>
__host__ __device__
T& get(T* ptr, int i, int j = 0, int k = 0, int nx = PX, int ny = PY)
{
return ptr[i + nx * j + nx * ny * k];
}
// MAIN PROGRAM
int main(int argc, char **argv)
{
if (argc != 4) {
cerr << "Usage: " << argv[0] << " <sensor group size> <target epsilon> <max iterations>\n\n";
exit(1);
}
int group_size = stoi(argv[1]);
float target_epsilon = stof(argv[2]);
int max_iterations = stoi(argv[3]);
if (max_iterations == -1)
max_iterations = numeric_limits<int>::max();
// Time measuring variables
int ti = 0, tf = 0;
// Function Execution
// set floting-point precision on stdout and stderr
cout << fixed << setprecision(10);
cerr << fixed << setprecision(10);
cout << "Ultrasonic Tomography Running:\n\n";
ti = clock();
cout << "ti = " << ti << "\n";
Ultrasonic_Tomography(group_size, target_epsilon, max_iterations, ti);
tf = clock();
cout << "tf = " << tf << "\n"
<< "tt = " << tf - ti << "\n"
<< "Total Seconds = " << (float)(tf - ti) / CLOCKS_PER_SEC << "\n";
// End of the program
return 0;
}
inline int grid_size(int n, int threads)
{
return ceil(float(n) / threads);
}
// FUNCTIONS DEFINITION
void Ultrasonic_Tomography(int group_size, float target_epsilon, int max_iterations, int ti)
{
// Simulation Variables
float hx = 0.001f;
float hy = 0.001f;
int i = 0, j = 0, k = 0;
float *x = new float[PX];
float *y = new float[PY];
float *fo = new float[PX * PY];
dim3 Block_Size(BLOCK_X, BLOCK_Y);
dim3 Grid_Size(grid_size(PX, BLOCK_X), grid_size(PY, BLOCK_Y));
// Variables of allocation
float *dev_x;
int size_x = PX * sizeof(float);
float *dev_y;
int size_y = PY * sizeof(float);
float *dev_fo;
int size_fo = PX * PY * sizeof(float);
float *dev_u;
int size_u = PX * PY * NT * sizeof(float);
float *dev_g_bottom;
float *dev_g_right;
float *dev_g_top;
float *dev_g_left;
int size_g = PX * NT * (NS / group_size) * sizeof(float);
cudaMalloc((void**) &dev_x, size_x);
cudaMalloc((void**) &dev_y, size_y);
cudaMalloc((void**) &dev_fo, size_fo);
cudaMalloc((void**) &dev_u, size_u);
cudaMalloc((void**) &dev_g_bottom, size_g);
cudaMalloc((void**) &dev_g_right, size_g);
cudaMalloc((void**) &dev_g_top, size_g);
cudaMalloc((void**) &dev_g_left, size_g);
cudaMemset(dev_u, 0.f, size_u);
cudaMemset(dev_g_bottom, 0.f, size_g);
cudaMemset(dev_g_right, 0.f, size_g);
cudaMemset(dev_g_top, 0.f, size_g);
cudaMemset(dev_g_left, 0.f, size_g);
// Environment Initialization
for(i = 0; i < NX; i++)
{
x[i] = -0.1f + i * hx;
}
for(j = 0; j < NY; j++)
{
y[j] = -0.1f + j * hy;
}
cudaMemcpy(dev_x, x, size_x, cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, y, size_y, cudaMemcpyHostToDevice);
field_setup<<<Grid_Size, Block_Size>>>(dev_x, dev_y, dev_fo);
cudaMemcpy(fo, dev_fo, size_fo, cudaMemcpyDeviceToHost);
// Position of the transducers
int *ii, *jj;
Position_Transducers(ii, jj, NS);
dim3 threads_propagation(NX, 1, 1);
dim3 grid_propagation(
grid_size(PX, threads_propagation.x),
grid_size(PY, threads_propagation.y));
int p;
for(p = 0; p < NS; p += group_size)
{
cudaMemset(dev_u, 0.f, size_u);
int jp1 = jj[p];
int jp2 = jj[p + group_size - 1];
int ip1 = ii[p];
int ip2 = ii[p + group_size - 1];
if (jp2 < jp1)
{
int jp = jp1;
jp1 = jp2;
jp2 = jp;
}
if (ip2 < ip1)
{
int ip = ip1;
ip1 = ip2;
ip2 = ip;
}
// Boundary
for(k = 1; k < NT - 1; k++)
{
propagation<<<grid_propagation, threads_propagation>>>(jp1, jp2, ip1, ip2, dev_fo, dev_u, k);
}
// Four corners
propagation_at_corners<<<NT, 1>>>(dev_u);
initial_signal<<<NT - 2, 159>>>(dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, p / group_size);
}
// Kaczmarz method
// propagation
float *dev_rr_bottom;
int size_rr_bottom = PX * NT * sizeof(float);
float *dev_rr_right;
int size_rr_right = PX * NT * sizeof(float);
float *dev_rr_top;
int size_rr_top = PX * NT * sizeof(float);
float *dev_rr_left;
int size_rr_left = PX * NT * sizeof(float);
float *dev_z;
int size_z = PX * PY * (NT + 1) * sizeof(float);
float *dev_Lu;
int size_Lu = PX * PY * NT * sizeof(float);
float *dev_f;
int size_f = PX * PY * sizeof(float);
float *dev_df;
int size_df = PX * PY * sizeof(float);
float *dev_f_minus_fo;
int size_f_minus_fo = PX * PY * sizeof(float);
// Allocation
cudaMalloc((void**) &dev_rr_bottom, size_rr_bottom);
cudaMalloc((void**) &dev_rr_right, size_rr_right);
cudaMalloc((void**) &dev_rr_top, size_rr_top);
cudaMalloc((void**) &dev_rr_left, size_rr_left);
cudaMalloc((void**) &dev_z, size_z);
cudaMalloc((void**) &dev_Lu, size_Lu);
cudaMalloc((void**) &dev_f, size_f);
cudaMalloc((void**) &dev_df, size_df);
cudaMalloc((void**) &dev_f_minus_fo, size_f_minus_fo);
cudaMemset(dev_rr_bottom, 0.f, size_rr_bottom);
cudaMemset(dev_rr_right, 0.f, size_rr_right);
cudaMemset(dev_rr_top, 0.f, size_rr_top);
cudaMemset(dev_rr_left, 0.f, size_rr_left);
cudaMemset(dev_f, 0.f, size_f);
cudaMemset(dev_Lu, 0.f, size_Lu);
float *f = new float[PX * PY];
float *f_minus_fo = new float[PX * PY];
// initialize epsilon values
float prev_epsilon = std::numeric_limits<float>::infinity();
float curr_epsilon = -std::numeric_limits<float>::infinity();
cerr << "writing convergence to 'art_convergence.txt'...\n"
<< "writing time to 'art_time.txt'...\n";
ofstream convergence_file("art_convergence.txt");
ofstream time_file("art_time.txt");
dim3 threads_backpropagation1(NX, 1, 1);
dim3 grid_backpropagation1(
grid_size(PX, threads_backpropagation1.x),
grid_size(PY, threads_backpropagation1.y));
dim3 threads_laplace(96, 2, 1);
dim3 grid_laplace(
grid_size(PX, threads_laplace.x),
grid_size(PY, threads_laplace.y),
grid_size(NT, threads_laplace.z));
dim3 threads_differential(96, 2, 1);
dim3 grid_differential(
grid_size(PX, threads_differential.x),
grid_size(PY, threads_differential.y),
grid_size(NT, threads_differential.z));
for(int iter = 0; iter < max_iterations; iter++)
{
cout << "\nIter: " << iter << "\n";
cudaMemset(dev_u, 0.f, size_u);
for(p = 0; p < NS; p += group_size)
{
int jp1 = jj[p];
int jp2 = jj[p + group_size - 1];
int ip1 = ii[p];
int ip2 = ii[p + group_size - 1];
if (jp2 < jp1)
{
int jp = jp1;
jp1 = jp2;
jp2 = jp;
}
if (ip2 < ip1)
{
int ip = ip1;
ip1 = ip2;
ip2 = ip;
}
// Boundary
for(k = 1; k < NT - 1; k++)
{
propagation<<<grid_propagation, threads_propagation>>>(jp1, jp2, ip1, ip2, dev_f, dev_u, k);
}
// Four corners
propagation_at_corners<<<NT, 1>>>(dev_u);
difference_signal<<<NT - 2, 159>>>(dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, p / group_size);
cudaMemset(dev_z, 0.f, size_z);
for(k = NT - 2; k > 0; k--)
{
backpropagation1<<<grid_backpropagation1, threads_backpropagation1>>>(dev_z, dev_f, k);
backpropagation2<<<NX, 1>>>(dev_z, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, k);
}
laplace<<<grid_laplace, threads_laplace>>>(dev_u, dev_Lu);
laplace_corners<<<NT, 1>>>(dev_u, dev_Lu);
cudaMemset(dev_df, 0.f, size_df);
update_differential<<<grid_differential, threads_differential>>>(dev_df, dev_z, dev_Lu, dev_f);
update_field<<<Grid_Size, Block_Size>>>(dev_f, dev_df, dev_f_minus_fo, dev_fo);
}
cudaMemcpy(f_minus_fo, dev_f_minus_fo, size_f_minus_fo, cudaMemcpyDeviceToHost);
curr_epsilon = norm(f_minus_fo, NX, NY) / norm(fo, NX, NY) * 100.f;
float current_t = (float)(clock()-ti) / CLOCKS_PER_SEC;
convergence_file << curr_epsilon << " ";
time_file << current_t << " ";
cout << "epsilon = " << curr_epsilon << "\n";
// stop if reached target epsilon
if (curr_epsilon <= target_epsilon) {
break;
}
// stop if epsilon diverges
if (curr_epsilon > prev_epsilon ||
std::isnan(curr_epsilon)) {
break;
}
// update prev_epsilon
prev_epsilon = curr_epsilon;
}
cout << endl;
cudaMemcpy(f, dev_f, size_f, cudaMemcpyDeviceToHost);
IO_Files(x, y, fo, f);
// Free Variables
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_fo);
cudaFree(dev_u);
cudaFree(dev_g_bottom);
cudaFree(dev_g_right);
cudaFree(dev_g_top);
cudaFree(dev_g_left);
cudaFree(dev_rr_bottom);
cudaFree(dev_rr_right);
cudaFree(dev_rr_top);
cudaFree(dev_rr_left);
cudaFree(dev_z);
cudaFree(dev_Lu);
cudaFree(dev_f);
cudaFree(dev_df);
cudaFree(dev_f_minus_fo);
delete [] x;
delete [] y;
delete [] fo;
delete [] ii;
delete [] jj;
delete [] f;
delete [] f_minus_fo;
cudaDeviceReset();
}
void Position_Transducers(int *&ii, int *&jj, int num)
{
//returns the (x,y) coordinates of the number of total transducers
int p = 0;
ii = (int*)malloc(num * sizeof(int));
jj = (int*)malloc(num * sizeof(int));
for(p = 0; p < 160; p++)
{
ii[p] = 21 + (p + 1);
jj[p] = 181;
}
for(p = 160; p < 320; p++)
{
ii[p] = 181;
jj[p] = 181 - ((p + 1) - 160);
}
for(p = 320; p < 480; p++)
{
ii[p] = 181 - ((p + 1) - 320);
jj[p] = 21;
}
for(p = 480; p < num; p++)
{
ii[p] = 21;
jj[p] = 21 + ((p + 1) - 480);
}
}
__global__ void field_setup(const float *x, const float *y, float *fo)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i < NX) && (j < NY))
{
/* int offset = i + PX * j; */
float value = 0.f;
/* if(((sqrtf(powf(x[i] - 0.015f, 2.f) + powf(y[j] + 0.000f, 2.f))) <= 0.005f) || ((sqrtf(powf(x[i] + 0.015f, 2.f) + powf(y[j] + 0.000f, 2.f))) <= 0.005f)) */
/* { */
/* value = 0.06f; */
/* } */
/* else */
/* { */
/* if(sqrtf(x[i] * x[i] + y[j] * y[j]) <= 0.03f) */
/* { */
/* value = 0.02f; */
/* } */
/* else */
/* { */
/* value = 0.f; */
/* } */
/* } */
float rc = 0.015f;
float rp = 0.005f;
/* float lim = 0.020f; */
float sc = 0.03f;
float sp = 0.05f;
/* float sb = 0.02f; */
if (powf(x[i], 2) + powf(y[j], 2) <= powf(rc, 2))
{
value = sc;
}
if (powf(x[i] - rc * cos(-30 * (3.14159265f / 180)), 2) + powf(y[j] - rc * sin(30 * (3.14159265f / 180)), 2) <= powf(rp, 2))
{
value = sp;
}
if (powf(x[i] + rc * cos(-30 * (3.14159265f / 180)), 2) + powf(y[j] - rc * sin(30 * (3.14159265f / 180)), 2) <= powf(rp, 2))
{
value = sp;
}
if (powf(x[i], 2) + powf(y[j] + rc, 2) <= powf(rp, 2))
{
value = sp;
}
get(fo, i, j) = value;
/* fo(i, j) = value; */
/*int offset = i + NX * j;
float value = 0.f;
if (((sqrtf(powf(x[i] - 0.05f, 2.f) + powf(y[j] + 0.000f, 2.f))) <= 0.005f) || ((sqrtf(powf(x[i] + 0.05f, 2.f) + powf(y[j] + 0.000f, 2.f))) <= 0.005f))
{
value = 0.06f;
}
else
{
if (sqrtf(x[i] * x[i] + y[j] * y[j]) <= 0.03f)
{
value = 0.02f;
}
else
{
if ((x[i] >= -0.05f) && (x[i] <= 0.05f) && (y[j] >= -0.06f) && (y[j] <= -0.045f))
{
value = 0.04f;
}
else
{
if ((x[i] >= -0.03f) && (x[i] <= 0.00f) && (y[j] <= 0.065f) && (y[j] >= (0.04f - 0.5f * x[i])))
{
value = 0.03f;
}
else
{
if ((x[i] >= 0.00f) && (x[i] <= 0.03f) && (y[j] <= 0.065f) && (y[j] >= (0.04f + 0.5f * x[i])))
{
value = 0.03f;
}
else
{
value = 0.f;
}
}
}
}
}
fo[offset] = value;
v[offset] = 1500.f * sqrtf(1.f + value);
r[offset] = v[offset] * dt / hx;
r2[offset] = powf(r[offset], 2.f);
s[offset] = 2.f - 4.f * r2[offset];
*/
}
}
__global__ void propagation(int jp1, int jp2, int ip1, int ip2, const float *f, float *u, int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if(i < NX && j < NY) {
float v = 1500.f * sqrtf(1.f + get(f, i, j));
float r = v * dt / hx;
float s = 2.f - 4.f * r * r;
float val; // will hold new u at (i, j, k + 1)
// not at boundary
if (i != 0 && i != NX - 1 && j != 0 && j != NY - 1) {
val =
r * r *
(get(u, i+1, j, k) +
get(u, i-1, j, k) +
get(u, i, j-1, k) +
get(u, i, j+1, k)) +
s * get(u, i, j, k) -
get(u, i, j, k-1);
// at sensor, k <= 24
if (j + 1 >= jp1 && j + 1 <= jp2 && i + 1 >= ip1 && i + 1 <= ip2 && k + 1 <= 24) {
float t = k * dt - tt;
// add wave value
val +=
v * v * dt * dt *
cosf(omegac * t) *
expf(-(t * t) / (2.f * tao * tao));
}
}
// at boundary
else {
// index variables for different boundary cases
// TODO: need better names
int i_A, i_B, j_A, j_B;
// top boundary
if (j == 0)
{
i_A = i;
i_B = i;
j_A = j + 1;
j_B = j + 2;
}
// bottom boundary
else if (j == NY - 1)
{
i_A = i;
i_B = i;
j_A = j - 1;
j_B = j - 2;
}
// left boundary
else if (i == 0)
{
i_A = i + 1;
i_B = i + 2;
j_A = j;
j_B = j;
}
// right boundary
else
{
i_A = i - 1;
i_B = i - 2;
j_A = j;
j_B = j;
}
val =
(2.f - 2.f * r - r * r) * get(u, i, j, k) +
2.f * r * (1.f + r) * get(u, i_A, j_A, k) -
r * r * get(u, i_B, j_B, k) +
(2.f * r - 1.f) * get(u, i, j, k-1) -
2.f * r * get(u, i_A, j_A, k-1);
}
get(u, i, j, k+1) = val;
}
}
__global__ void propagation_at_corners(float *u)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
if (k < NT) {
get(u, 0, 0, k) =
1.f / 2.f * (get(u, 0, 1, k) + get(u, 1, 0, k));
get(u, NX-1, 0, k) =
1.f / 2.f * (get(u, NX-2, 0, k) + get(u, NX-1, 1, k));
get(u, 0, NY-1, k) =
1.f / 2.f * (get(u, 0, NY-2, k) + get(u, 1, NY-1, k));
get(u, NX-1, NY-1, k) =
1.f / 2.f * (get(u, NX-2, NY-1, k) + get(u, NX-1, NY-2, k));
}
}
__global__ void initial_signal(const float *u, float *g_bottom, float *g_right, float *g_top, float *g_left, int p)
{
int i = threadIdx.x;
int k = blockIdx.x;
// store values at bottom sensor row of u
get(g_bottom, i+21, k+2, p, PX, NT) =
get(u, i+21, 180, k+2);
// store values at top sensor row of u
get(g_top, i+21, k+2, p, PX, NT) =
get(u, i+21, 20, k+2);
// store values at right sensor column of u
get(g_right, i+21, k+2, p, PX, NT) =
get(u, 180, i+21, k+2);
// store values at left sensor column of u
get(g_left, i+21, k+2, p, PX, NT) =
get(u, 20, i+21, k+2);
}
__global__ void difference_signal(const float *u, const float *g_bottom, const float *g_right, const float *g_top, const float *g_left, float *rr_bottom, float *rr_right, float *rr_top, float *rr_left, int p)
{
int i = threadIdx.x;
int k = blockIdx.x;
// store difference at time k+2 of original signal
// and current signal at bottom sensor row
get(rr_bottom, i+21, k+2) =
get(g_bottom, i+21, k+2, p, PX, NT) -
get(u, i+21, 180, k+2);
// store difference at time k+2 of original signal
// and current signal at top sensor row
get(rr_top, i+21, k+2) =
get(g_top, i+21, k+2, p, PX, NT) -
get(u, i+21, 20, k+2);
// store difference at time k+2 of original signal
// and current signal at right sensor column
get(rr_right, i+21, k+2) =
get(g_right, i+21, k+2, p, PX, NT) -
get(u, 180, i+21, k+2);
// store difference at time k+2 of original signal
// and current signal at left sensor column
get(rr_left, i+21, k+2) =
get(g_left, i+21, k+2, p, PX, NT) -
get(u, 20, i+21, k+2);
}
__global__ void backpropagation1(float *z, const float *f, int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if(i >= 1 && i < (NX - 1) && j >= 1 && j < (NY - 1))
{
get(z, i, j, k) =
1500.f * 1500.f * (dt * dt) *
((1.f + get(f, i, j-1)) * get(z, i, j-1, k+1) +
(1.f + get(f, i, j+1)) * get(z, i, j+1, k+1) +
(1.f + get(f, i-1, j)) * get(z, i-1, j, k+1) +
(1.f + get(f, i+1, j)) * get(z, i+1, j, k+1) -
4.f * (1.f + get(f, i, j)) *
get(z, i, j, k+1)) / (h * h) +
2.f * get(z, i, j, k+1) -
get(z, i, j, k+2);
}
}
__global__ void backpropagation2(float *z, const float *rr_bottom, const float *rr_right, const float *rr_top, const float *rr_left, int k)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= 21 && i < 180) {
get(z, i, 180, k) =
get(z, i, 179, k) +
get(rr_bottom, i, k) * h * 1000.f;
get(z, i, 20, k) =
get(z, i, 21, k) +
get(rr_top, i, k) * h * 1000.f;
get(z, 180, i, k) =
get(z, 179, i, k) +
get(rr_right, i, k) * h * 1000.f;
get(z, 20, i, k) =
get(z, 21, i, k) +
get(rr_left, i, k) * h * 1000.f;
}
if (i >= 1 && i < (NX - 1)) {
get(z, i, 0, k) =
get(z, i, 1, k);
get(z, i, NY-1, k) =
get(z, i, NY-2, k);
get(z, 0, i, k) =
get(z, 1, i, k);
get(z, NX-1, i, k) =
get(z, NX-2, i, k);
}
else if (i == 0) {
get(z, 0, 0, k) =
(get(z, 1, 0, k) +
get(z, 0, 1, k)) / 2.f;
get(z, NX-1, 0, k) =
(get(z, NX-2, 0, k) +
get(z, NX-1, 1, k)) / 2.f;
get(z, 0, NY-1, k) =
(get(z, 1, NY-1, k) +
get(z, 0, NY-2, k)) / 2.f;
get(z, NX-1, NY-1, k) =
(get(z, NX-2, NY-1, k) +
get(z, NX-1, NY-2, k)) / 2.f;
}
}
__global__ void laplace(const float *u, float *Lu)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
if (i < NX && j < NY && (k + 1) < NT) {
int j_prev = (j > 0) ? j - 1 : j;
int j_next = (j < NY - 1) ? j + 1 : j;
int i_prev = (i > 0) ? i - 1 : i;
int i_next = (i < NX - 1) ? i + 1 : i;
get(Lu, i, j, k+1) =
(get(u, i, j_prev, k+1) +
get(u, i, j_next, k+1) +
get(u, i_prev, j, k+1) +
get(u, i_next, j, k+1) -
4.f * get(u, i, j, k+1)) / (h * h);
}
}
__global__ void laplace_corners(const float *u, float *Lu)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
if ((k + 1) < NT) {
get(Lu, 0, 0, k+1) =
(get(Lu, 1, 0, k+1) +
get(Lu, 0, 1, k+1)) / 2.f;
get(Lu, NX-1, 0, k+1) =
(get(Lu, NX-2, 0, k+1) +
get(Lu, NX-1, 1, k+1)) / 2.f;
get(Lu, 0, NY-1, k+1) =
(get(Lu, 1, NY-1, k+1) +
get(Lu, 0, NY-2, k+1)) / 2.f;
get(Lu, NX-1, NY-1, k+1) =
(get(Lu, NX-2, NY-1, k+1) +
get(Lu, NX-1, NY-2, k+1)) / 2.f;
}
}
__global__ void update_differential(float *df, const float *z, const float *Lu, const float *f)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
if(i < NX && j < NY && (k + 1) < NT) {
atomicAdd(
&get(df, i, j),
get(z, i, j, k+1) *
get(Lu, i, j, k+1) /
(1.f + get(f, i, j)));
}
}
__global__ void update_field(float *f, const float *df, float *f_minus_fo, const float *fo)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < NX && j < NY)
{
bool in_sensor_field = (i >= 21) && (i < 180) && (j >= 21) && (j < 180);
float alpha = in_sensor_field ? 1.f : 0.f;
get(f, i, j) += 20000.f * alpha * get(df, i, j);
get(f_minus_fo, i, j) = get(f, i, j) - get(fo, i, j);
}
}
void IO_Files(float *x, float *y, float *fo, float *f)
{
int i = 0, j = 0;
// I/O Files
ofstream x_file, y_file;
ofstream fo_file;
ofstream f_file;
cerr << "writing x to 'dev_x.txt'...\n"
<< "writing y to 'dev_y.txt'...\n"
<< "writing f0 to 'dev_f0.txt'...\n"
<< "writing f to 'dev_f.txt'...\n\n";
x_file.open("dev_x.txt");
y_file.open("dev_y.txt");
fo_file.open("dev_f0.txt");
f_file.open("dev_f.txt");
for(i = 0; i < NX; i++) {
x_file << x[i];
x_file << "\n";
}
for(j = 0; j < NX; j++) {
y_file << y[j];
y_file << "\n";
}
for(j = 0; j < NY; j++) {
for(i = 0; i < NX; i++) {
fo_file << get(fo, i, j);
fo_file << " ";
}
fo_file << "\n";
}
for(j = 0; j < NY; j++) {
for(i = 0; i < NX; i++) {
f_file << get(f, i, j);
f_file << " ";
}
f_file << "\n";
}
x_file.close();
y_file.close();
fo_file.close();
f_file.close();
}
float norm(float *A, int nx, int ny)
{
float sum = 0;
for (int j = 0; j < ny; ++j)
for (int i = 0; i < nx; ++i)
sum += get(A, i, j) * get(A, i, j);
return sqrtf(sum);
}
|
13,479 | #include <stdlib.h>
#include <memory.h>
#include <stdio.h>
#include <time.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <curand_kernel.h>
#include <device_functions.h>
#define uint8 unsigned char
#define uint32 unsigned long int
/****************************** MACROS ******************************/
#define MD5_BLOCK_SIZE 16 // MD5 outputs a 16 byte digest
/**************************** DATA TYPES ****************************/
typedef unsigned char BYTE; // 8-bit byte
typedef unsigned int WORD; // 32-bit word, change to "long" for 16-bit machines
typedef struct {
BYTE data[64];
WORD datalen;
unsigned long long bitlen;
WORD state[4];
} CUDA_MD5_CTX;
/****************************** MACROS ******************************/
#ifndef ROTLEFT
#define ROTLEFT(a,b) ((a << b) | (a >> (32-b)))
#endif
#define F(x,y,z) ((x & y) | (~x & z))
#define G(x,y,z) ((x & z) | (y & ~z))
#define H(x,y,z) (x ^ y ^ z)
#define I(x,y,z) (y ^ (x | ~z))
#define FF(a,b,c,d,m,s,t) { a += F(b,c,d) + m + t; \
a = b + ROTLEFT(a,s); }
#define GG(a,b,c,d,m,s,t) { a += G(b,c,d) + m + t; \
a = b + ROTLEFT(a,s); }
#define HH(a,b,c,d,m,s,t) { a += H(b,c,d) + m + t; \
a = b + ROTLEFT(a,s); }
#define II(a,b,c,d,m,s,t) { a += I(b,c,d) + m + t; \
a = b + ROTLEFT(a,s); }
/*********************** FUNCTION DEFINITIONS ***********************/
__device__ __host__ inline void cuda_md5_transform(CUDA_MD5_CTX* ctx, const BYTE data[])
{
WORD a, b, c, d, m[16], i, j;
// MD5 specifies big endian byte order, but this implementation assumes a little
// endian byte order CPU. Reverse all the bytes upon input, and re-reverse them
// on output (in md5_final()).
for (i = 0, j = 0; i < 16; ++i, j += 4)
m[i] = (data[j]) + (data[j + 1] << 8) + (data[j + 2] << 16) + (data[j + 3] << 24);
a = ctx->state[0];
b = ctx->state[1];
c = ctx->state[2];
d = ctx->state[3];
FF(a, b, c, d, m[0], 7, 0xd76aa478);
FF(d, a, b, c, m[1], 12, 0xe8c7b756);
FF(c, d, a, b, m[2], 17, 0x242070db);
FF(b, c, d, a, m[3], 22, 0xc1bdceee);
FF(a, b, c, d, m[4], 7, 0xf57c0faf);
FF(d, a, b, c, m[5], 12, 0x4787c62a);
FF(c, d, a, b, m[6], 17, 0xa8304613);
FF(b, c, d, a, m[7], 22, 0xfd469501);
FF(a, b, c, d, m[8], 7, 0x698098d8);
FF(d, a, b, c, m[9], 12, 0x8b44f7af);
FF(c, d, a, b, m[10], 17, 0xffff5bb1);
FF(b, c, d, a, m[11], 22, 0x895cd7be);
FF(a, b, c, d, m[12], 7, 0x6b901122);
FF(d, a, b, c, m[13], 12, 0xfd987193);
FF(c, d, a, b, m[14], 17, 0xa679438e);
FF(b, c, d, a, m[15], 22, 0x49b40821);
GG(a, b, c, d, m[1], 5, 0xf61e2562);
GG(d, a, b, c, m[6], 9, 0xc040b340);
GG(c, d, a, b, m[11], 14, 0x265e5a51);
GG(b, c, d, a, m[0], 20, 0xe9b6c7aa);
GG(a, b, c, d, m[5], 5, 0xd62f105d);
GG(d, a, b, c, m[10], 9, 0x02441453);
GG(c, d, a, b, m[15], 14, 0xd8a1e681);
GG(b, c, d, a, m[4], 20, 0xe7d3fbc8);
GG(a, b, c, d, m[9], 5, 0x21e1cde6);
GG(d, a, b, c, m[14], 9, 0xc33707d6);
GG(c, d, a, b, m[3], 14, 0xf4d50d87);
GG(b, c, d, a, m[8], 20, 0x455a14ed);
GG(a, b, c, d, m[13], 5, 0xa9e3e905);
GG(d, a, b, c, m[2], 9, 0xfcefa3f8);
GG(c, d, a, b, m[7], 14, 0x676f02d9);
GG(b, c, d, a, m[12], 20, 0x8d2a4c8a);
HH(a, b, c, d, m[5], 4, 0xfffa3942);
HH(d, a, b, c, m[8], 11, 0x8771f681);
HH(c, d, a, b, m[11], 16, 0x6d9d6122);
HH(b, c, d, a, m[14], 23, 0xfde5380c);
HH(a, b, c, d, m[1], 4, 0xa4beea44);
HH(d, a, b, c, m[4], 11, 0x4bdecfa9);
HH(c, d, a, b, m[7], 16, 0xf6bb4b60);
HH(b, c, d, a, m[10], 23, 0xbebfbc70);
HH(a, b, c, d, m[13], 4, 0x289b7ec6);
HH(d, a, b, c, m[0], 11, 0xeaa127fa);
HH(c, d, a, b, m[3], 16, 0xd4ef3085);
HH(b, c, d, a, m[6], 23, 0x04881d05);
HH(a, b, c, d, m[9], 4, 0xd9d4d039);
HH(d, a, b, c, m[12], 11, 0xe6db99e5);
HH(c, d, a, b, m[15], 16, 0x1fa27cf8);
HH(b, c, d, a, m[2], 23, 0xc4ac5665);
II(a, b, c, d, m[0], 6, 0xf4292244);
II(d, a, b, c, m[7], 10, 0x432aff97);
II(c, d, a, b, m[14], 15, 0xab9423a7);
II(b, c, d, a, m[5], 21, 0xfc93a039);
II(a, b, c, d, m[12], 6, 0x655b59c3);
II(d, a, b, c, m[3], 10, 0x8f0ccc92);
II(c, d, a, b, m[10], 15, 0xffeff47d);
II(b, c, d, a, m[1], 21, 0x85845dd1);
II(a, b, c, d, m[8], 6, 0x6fa87e4f);
II(d, a, b, c, m[15], 10, 0xfe2ce6e0);
II(c, d, a, b, m[6], 15, 0xa3014314);
II(b, c, d, a, m[13], 21, 0x4e0811a1);
II(a, b, c, d, m[4], 6, 0xf7537e82);
II(d, a, b, c, m[11], 10, 0xbd3af235);
II(c, d, a, b, m[2], 15, 0x2ad7d2bb);
II(b, c, d, a, m[9], 21, 0xeb86d391);
ctx->state[0] += a;
ctx->state[1] += b;
ctx->state[2] += c;
ctx->state[3] += d;
}
__device__ __host__ inline void cuda_md5_init(CUDA_MD5_CTX* ctx)
{
ctx->datalen = 0;
ctx->bitlen = 0;
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
}
__device__ __host__ inline void cuda_md5_update(CUDA_MD5_CTX* ctx, const BYTE data[], size_t len)
{
size_t i;
for (i = 0; i < len; ++i) {
ctx->data[ctx->datalen] = data[i];
ctx->datalen++;
if (ctx->datalen == 64) {
cuda_md5_transform(ctx, ctx->data);
ctx->bitlen += 512;
ctx->datalen = 0;
}
}
}
__device__ __host__ inline void cuda_md5_final(CUDA_MD5_CTX* ctx, BYTE hash[])
{
size_t i;
i = ctx->datalen;
// Pad whatever data is left in the buffer.
if (ctx->datalen < 56) {
ctx->data[i++] = 0x80;
while (i < 56)
ctx->data[i++] = 0x00;
}
else if (ctx->datalen >= 56) {
ctx->data[i++] = 0x80;
while (i < 64)
ctx->data[i++] = 0x00;
cuda_md5_transform(ctx, ctx->data);
memset(ctx->data, 0, 56);
}
// Append to the padding the total message's length in bits and transform.
ctx->bitlen += ctx->datalen * 8;
ctx->data[56] = ctx->bitlen;
ctx->data[57] = ctx->bitlen >> 8;
ctx->data[58] = ctx->bitlen >> 16;
ctx->data[59] = ctx->bitlen >> 24;
ctx->data[60] = ctx->bitlen >> 32;
ctx->data[61] = ctx->bitlen >> 40;
ctx->data[62] = ctx->bitlen >> 48;
ctx->data[63] = ctx->bitlen >> 56;
cuda_md5_transform(ctx, ctx->data);
// Since this implementation uses little endian byte ordering and MD uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (i = 0; i < 4; ++i) {
hash[i] = (ctx->state[0] >> (i * 8)) & 0x000000ff;
hash[i + 4] = (ctx->state[1] >> (i * 8)) & 0x000000ff;
hash[i + 8] = (ctx->state[2] >> (i * 8)) & 0x000000ff;
hash[i + 12] = (ctx->state[3] >> (i * 8)) & 0x000000ff;
}
}
__device__ __host__ inline void md5new(uint8* msg, uint8 length, uint8 md5[16]) {
CUDA_MD5_CTX ctx;
cuda_md5_init(&ctx);
cuda_md5_update(&ctx, msg, length);
cuda_md5_final(&ctx, md5);
}
/*
__global__ void kernel_md5_hash(BYTE* indata, WORD inlen, BYTE* outdata, WORD n_batch)
{
WORD thread = blockIdx.x * blockDim.x + threadIdx.x;
if (thread >= n_batch)
{
return;
}
BYTE* in = indata + thread * inlen;
BYTE* out = outdata + thread * MD5_BLOCK_SIZE;
CUDA_MD5_CTX ctx;
cuda_md5_init(&ctx);
cuda_md5_update(&ctx, in, inlen);
cuda_md5_final(&ctx, out);
}
extern "C"
{
void mcm_cuda_md5_hash_batch(BYTE* in, WORD inlen, BYTE* out, WORD n_batch)
{
BYTE* cuda_indata;
BYTE* cuda_outdata;
cudaMalloc(&cuda_indata, inlen * n_batch);
cudaMalloc(&cuda_outdata, MD5_BLOCK_SIZE * n_batch);
cudaMemcpy(cuda_indata, in, inlen * n_batch, cudaMemcpyHostToDevice);
WORD thread = 256;
WORD block = (n_batch + thread - 1) / thread;
kernel_md5_hash << < block, thread >> > (cuda_indata, inlen, cuda_outdata, n_batch);
cudaMemcpy(out, cuda_outdata, MD5_BLOCK_SIZE * n_batch, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("Error cuda md5 hash: %s \n", cudaGetErrorString(error));
}
cudaFree(cuda_indata);
cudaFree(cuda_outdata);
}
}
*/ |
13,480 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#define BLOCKSIZE 512
__device__ float gpu_compute_acc(float r, float J2, float *rp, float *cm, int N)
{
float acc;
float M = 0.0;
int i = 0;
float r2 = r*r;
while ( (r > rp[i])&&(i<N))
++i;
if (i>0)
M = cm[i-1];
acc = -M/r2 + J2/(r*r2);
return acc;
}
__global__ void gpu_rk4(float *r, float *vr, float *J2, float *rp, float *cm, float *t, int N, float dt, float tend)
{
float kr[4], kv[4];
float r0, v0, r1;
// float* t = (float *)malloc(N);
// float* t = new float[N]; // (float *)malloc(N);
int i;
i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < N)
{
t[i] = 0;
while (t[i] < tend)
{
r0 = r[i];
v0 = vr[i];
// Step 1
r1 = r0;
kr[0] = v0 ;
kv[0] = gpu_compute_acc(r1, J2[i], rp, cm, N);
// Step 2
r1 = r0 + 0.5*dt*kr[0];
kr[1] = v0 + 0.5*dt*kv[0];
kv[1] = gpu_compute_acc(r1, J2[i], rp, cm, N);
// Step 3
r1 = r0 + 0.5*dt*kr[1];
kr[2] = v0 + 0.5*dt*kv[1];
kv[2] = gpu_compute_acc(r1, J2[i], rp, cm, N);
// Step 4
r1 = r0 + dt*kr[2];
kr[3] = v0 + dt*kv[2];
kv[3] = gpu_compute_acc(r1, J2[i], rp, cm, N);
r[i] = r0 + dt*(kr[0] + 2.0*kr[1] + 2.0*kr[2] + kr[3])/6.0;
vr[i] = v0 + dt*(kv[0] + 2.0*kv[1] + 2.0*kv[2] + kv[3])/6.0;
t[i] += dt;
}
}
}
extern "C" void rk4(float *r, float *vr, float *J2, float *rp, float *cm, int N, float dt, float tend)
{
float *r_d, *vr_d, *J2_d, *rp_d, *cm_d, *t_d;
cudaMalloc(&r_d , sizeof(float)*N);
cudaMalloc(&vr_d , sizeof(float)*N);
cudaMalloc(&J2_d , sizeof(float)*N);
cudaMalloc(&rp_d , sizeof(float)*N);
cudaMalloc(&cm_d , sizeof(float)*N);
cudaMalloc(&t_d , sizeof(float)*N);
cudaMemcpy(r_d, r , sizeof(float)*N, cudaMemcpyHostToDevice); // Host -> Device
cudaMemcpy(vr_d, vr, sizeof(float)*N, cudaMemcpyHostToDevice); // Host -> Device
cudaMemcpy(J2_d, J2, sizeof(float)*N, cudaMemcpyHostToDevice); // Host -> Device
cudaMemcpy(rp_d, rp, sizeof(float)*N, cudaMemcpyHostToDevice); // Host -> Device
cudaMemcpy(cm_d, cm, sizeof(float)*N, cudaMemcpyHostToDevice); // Host -> Device
gpu_rk4 <<< 256, BLOCKSIZE >>>(r_d, vr_d, J2_d, rp_d, cm_d, t_d, N, dt, tend);
cudaMemcpy(r , r_d, sizeof(float)*N, cudaMemcpyDeviceToHost); // Device -> Host
cudaMemcpy(vr, vr_d, sizeof(float)*N, cudaMemcpyDeviceToHost); // Device -> Host
cudaFree(r_d);
cudaFree(vr_d);
cudaFree(J2_d);
cudaFree(rp_d);
cudaFree(cm_d);
cudaFree(t_d);
return;
}
|
13,481 | #include <stdio.h>
#define LOG_BANK_SIZE 5
#define BLOCK_SIZE 256
#define LOG_BLOCK_SIZE 8
#define BLOCK_NUM 8
#define IDX(n) (n + ((n) >> LOG_BANK_SIZE))
#define CSC(call) do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); \
exit(1); \
} \
} while (0)
int getNearPow2(int n) {
int m = BLOCK_SIZE;
while (m < n) {
m <<= 1;
}
return m;
}
__global__ void makeKey(uint* in, uint* key, int n, int shift) {
int offset = gridDim.x * blockDim.x;
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < n; i += offset) {
key[i] = (in[i] >> shift) & 1;
}
}
__global__ void scan(uint* in, uint* out, uint* s, int gn) {
volatile extern __shared__ uint temp[];
int tid = threadIdx.x;
int shift = BLOCK_SIZE * blockIdx.x;
int n = BLOCK_SIZE;
while (shift < gn) {
int offset = 1;
int ai = tid;
int bi = tid + (n / 2);
temp[IDX(ai)] = in[ai + shift];
temp[IDX(bi)] = in[bi + shift];
for (int d = n >> 1; d > 0; d >>= 1) {
__syncthreads();
if (tid < d) {
int ai = offset * (2 * tid + 1) - 1;
int bi = offset * (2 * tid + 2) - 1;
temp[IDX(bi)] += temp[IDX(ai)];
}
offset *= 2;
}
if (tid == 0) {
s[shift / BLOCK_SIZE] = temp[IDX(n - 1)];
temp[IDX(n - 1)] = 0;
}
for (int d = 1; d < n; d <<= 1) {
offset >>= 1;
__syncthreads();
if (tid < d) {
int ai = offset * (2 * tid + 1) - 1;
int bi = offset * (2 * tid + 2) - 1;
uint t = temp[IDX(ai)];
temp[IDX(ai)] = temp[IDX(bi)];
temp[IDX(bi)] += t;
}
}
__syncthreads();
out[ai + shift] = temp[IDX(ai)];
out[bi + shift] = temp[IDX(bi)];
shift += gridDim.x * BLOCK_SIZE;
}
}
__global__ void sum(uint* in, uint* s, int n) {
int offset = gridDim.x * blockDim.x;
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < n; i += offset) {
in[i] += s[i >> LOG_BLOCK_SIZE];
}
}
__global__ void swap(uint* in, uint* out, uint* s, int n, uint mask) {
int beg = n - (s[n - 1] + !!(in[n - 1] & mask));
int offset = gridDim.x * blockDim.x;
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < n; i += offset) {
if ((in[i] & mask) == 0) {
out[i - s[i]] = in[i];
}
else {
out[beg + s[i]] = in[i];
}
}
}
uint** scanArr;
void largeScan(uint* dev_in, uint* dev_s, int n, int k) {
int sz = getNearPow2(n >> LOG_BLOCK_SIZE);
scan<<<BLOCK_NUM, BLOCK_SIZE / 2, sizeof(uint) * BLOCK_NUM * (BLOCK_SIZE + (BLOCK_SIZE >> LOG_BANK_SIZE)) >>> (dev_in, dev_s, scanArr[k], n);
if (n > BLOCK_SIZE) {
largeScan(scanArr[k], scanArr[k + 1], sz, k + 2);
sum<<<32, 32>>>(dev_s, scanArr[k + 1], n);
}
}
void SortBit(uint* dev_in, uint* dev_out, uint* dev_s, int n, int i) {
makeKey<<<32, 32>>>(dev_in, dev_out, n, i);
largeScan(dev_out, dev_s, n, 0);
swap<<<32, 32>>>(dev_in, dev_out, dev_s, n, 1u << i);
}
int main() {
int n;
fread(&n, sizeof(int), 1, stdin);
if (n == 0) return 0;
int sz = getNearPow2(n);
uint* ar = (uint*)malloc(sz * sizeof(uint));
fread(ar, sizeof(uint), n, stdin);
for (int i = n; i < sz; ++i) {
ar[i] = 0u - 1;
}
int tmp;
int cnt = 0;
for (tmp = sz; tmp > BLOCK_SIZE;) {
cnt++;
tmp = getNearPow2(tmp / BLOCK_SIZE);
}
scanArr = (uint**)malloc((2 * cnt + 1) * sizeof(uint*));
tmp = sz;
for (int i = 0; tmp > BLOCK_SIZE; i += 2) {
tmp = getNearPow2(tmp / BLOCK_SIZE);
CSC(cudaMalloc(&(scanArr[i]), sizeof(uint) * tmp));
CSC(cudaMalloc(&(scanArr[i + 1]), sizeof(uint) * tmp));
}
CSC(cudaMalloc(&(scanArr[2 * cnt]), sizeof(uint) * BLOCK_SIZE));
uint* dev_in;
uint* dev_out;
uint* dev_s;
CSC(cudaMalloc(&dev_in, sizeof(uint) * sz));
CSC(cudaMalloc(&dev_out, sizeof(uint) * sz));
CSC(cudaMalloc(&dev_s, sizeof(uint) * sz));
CSC(cudaMemcpy(dev_in, ar, sizeof(uint) * sz, cudaMemcpyHostToDevice));
for (int i = 0; i < 32; i += 2) {
SortBit(dev_in, dev_out, dev_s, sz, i);
SortBit(dev_out, dev_in, dev_s, sz, i + 1);
}
CSC(cudaMemcpy(ar, dev_in, sizeof(uint) * n, cudaMemcpyDeviceToHost));
for (int i = 0; i <= 2 * cnt; ++i) {
CSC(cudaFree(scanArr[i]));
}
CSC(cudaFree(dev_in));
CSC(cudaFree(dev_out));
CSC(cudaFree(dev_s));
free(scanArr);
fwrite(ar, sizeof(uint), n, stdout);
free(ar);
} |
13,482 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
__global__ void dot( int *a, int *b, int *c, int Ns);
int* allocAndAssignMat(int size);
//===========================================
__global__ void dot( int *a, int *b, int *c, int Ns){
//int i = threadIdx.x + blockIdx.x * blockDim.x;
//c[i] += a[i] * b[i];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < Ns){
c[tid] += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
}
int* allocAndAssignMat(int size) {
/*
This function takes in the size of the matrix (N*N) and returns a pointer with appropriate memory allocated as well as filled with values
@params: int size
@returns: int* ptr
*/
int* ptr = (int*)malloc(size * sizeof(int));
for (int i = 0; i < size; i++) {
ptr[i] = 2;
}
return ptr;
}
// Note: It is assumed that machine has 2 GPUs
int main( void ) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int mySum = 0;
const int N = 10000;
int *a, *b, *c;
//double *dev_a, *dev_b, *dev_c;
// allocate the memory on the CPU
a=allocAndAssignMat(N * N);
b=allocAndAssignMat(N * N);;
c=(int*)malloc((N * N) * sizeof(int));
for (int i = 0; i < N * N; i++) {
c[i] = 0;
}
// There's 2 GPUs on this machine
int *dev_a[2], *dev_b[2], *dev_c[2];
const int Ns[2] = {N/2, N-(N/2)};
// Allocate the memory on the GPUs
for(int dev=0; dev<2; dev++) {
cudaSetDevice(dev);
cudaMalloc( (void**)&dev_a[dev], Ns[dev] * sizeof(int) );
cudaMalloc( (void**)&dev_b[dev], Ns[dev] * sizeof(int) );
cudaMalloc( (void**)&dev_c[dev], Ns[dev] * sizeof(int) );
}
// Copy a and b to GPUs
for(int dev=0,pos=0; dev<2; pos+=Ns[dev], dev++) {
cudaSetDevice(dev);
cudaMemcpy(dev_a[dev], a+pos, Ns[dev] * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b[dev], b+pos, Ns[dev] * sizeof(int), cudaMemcpyHostToDevice);
}
// Time
cudaEventRecord(start);
for(int i=0;i<10000;++i) {
for(int dev=0; dev<2; dev++) {
cudaSetDevice(dev);
dot<<<((N*N)+255)/256, 256>>>(dev_a[dev], dev_b[dev], dev_c[dev], Ns[dev]);
}
}
// Copy c back from the GPU to the CPU
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
for (int i = 0; i < N*N; i++) {
mySum += c[i];
}
//Results
printf("Size of N*N: %d \nResult: %d \nTime in kernel %f \n", N * N, mySum, milliseconds);
// free the memory allocated on the GPU
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// free the memory allocated on the CPU
free(a);
free(b);
free(c);
return 0;
}
|
13,483 | #include <stdio.h>
#define N 10000
__global__ void add(int *a,int *b,int *c)
{
int tid = blockIdx.x;
if(tid < N)
c[tid] = tid; // a[tid] + b[tid];
}
int main(void)
{
int i;
// The three arrays on the CPU
int a[N], b[N], c[N];
// Pointers that will be allocated on the GPU (device)
int *dev_a, *dev_b, *dev_c;
// Allocate memory on the device, the size of the whole array
if(cudaMalloc(&dev_a, N*sizeof(int)) != cudaSuccess)
printf("Error!");
if(cudaMalloc(&dev_b, N*sizeof(int)) != cudaSuccess)
printf("Error!");
if(cudaMalloc(&dev_c, N*sizeof(int)) != cudaSuccess)
printf("Error!");
for(i = 0;i<N;i++)
{
a[i] = 1;
b[i] = i*i;
}
// Copy the arrays to the device
cudaMemcpy(dev_a, a, N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, N*sizeof(int),cudaMemcpyHostToDevice);
add<<<N,1>>>(dev_a,dev_b,dev_c);
// Copy the array back to the host
if(cudaMemcpy(c, dev_c, N*sizeof(int),cudaMemcpyDeviceToHost) != cudaSuccess)
printf("Error!");
for(i = 0; i<N;i++)
printf("%d\n",c[i]);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
13,484 | #define N 64
#define B 2
#define T 32
__global__ void dl(int* in)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(blockIdx.x % 2 == 0)
{
if(tid % 2 == 0)
in[tid]++;
// Fine because conditional synchronization will
// happen within a block.
__syncthreads();
}
else {
if(tid % 2 == 1)
in[tid]--;
__syncthreads();
}
} |
13,485 | #include "includes.h"
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int i = threadIdx.x+blockDim.x*blockIdx.x;
if(i<len) out[i] = in1[i] + in2[i];
} |
13,486 | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "stdio.h"
#define N 10
__global__
void add11( int *a, int *b, int *c ) {
int i = 0;
while (i < N) {
c[i] = a[i] + b[i];
i += 1;
}
}
__global__
void addn1( int *a, int *b, int *c ) {
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
__global__
void add1n( int *a, int *b, int *c ) {
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__
void add( int *a, int *b, int *c ) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N; i += stride)
c[i] = a[i] + b[i];
}
int main( void ) {
int *a, *b, *c;
cudaMallocManaged(&a, N*sizeof(int));
cudaMallocManaged(&b, N*sizeof(int));
cudaMallocManaged(&c, N*sizeof(int));
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i * i;
}
add11<<<1, 1>>>(a, b, c);
cudaDeviceSynchronize();
printf("1, 1\n");
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
add1n<<<1, N>>>(a, b, c);
cudaDeviceSynchronize();
printf("1, N\n");
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
addn1<<<N, 1>>>(a, b, c);
cudaDeviceSynchronize();
printf("N, 1\n");
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(a, b, c);
cudaDeviceSynchronize();
printf("<<<...>>>\n");
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
return 0;
}
|
13,487 | #include "includes.h"
__global__ void count_zero_one(float *vec, float *data, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( (xIndex < n) ){
if (vec[xIndex] == 0)
atomicAdd(data,1);
else if (vec[xIndex] == 1)
atomicAdd(data+1,1);
}
} |
13,488 | #include <iostream>
#include <cstdlib>
#include <stdlib.h>
#include <ctime>
using namespace std;
void kernel_max_wrapper(unsigned long long *arr, unsigned long long *max, int *mtx, unsigned long long int N);
__global__ void find_maximum_kernel(unsigned long long *arr, unsigned long long *max, int *mtx, unsigned long long int N);
int main()
{
// Declare arrays, mutex, and size
// default size is 20971520
unsigned long long int N = 20971520;
unsigned long long *seq_array, *cuda_array, *seq_max, *cuda_max;
int *mtx;
// Declare timers
float cuda_elapsed_time;
cudaEvent_t cuda_start, cuda_stop;
double seq_start, seq_stop, seq_elapsed_time;
cout << "Enter size of array: ";
cin >> N;
// allocate memory for seq
seq_array = (unsigned long long*)malloc(N*sizeof(unsigned long long));
seq_max = (unsigned long long*)malloc(sizeof(unsigned long long));
srand(time(0));
// set array of seq to random double values
for(unsigned long long int i=0; i<N; i++){
seq_array[i] = ((unsigned long long)rand() /((unsigned long long) RAND_MAX / (10000000000000.0)));
}
// allocate memory for cuda
cudaMalloc((void**)&cuda_array, N*sizeof(unsigned long long));
cudaMalloc((void**)&cuda_max, sizeof(unsigned long long));
cudaMalloc((void**)&mtx, sizeof(int));
// set values of max and mtx to all 0
cudaMemset(cuda_max, 0, sizeof(unsigned long long));
cudaMemset(mtx, 0, sizeof(int));
// set up timing variables
cudaEventCreate(&cuda_start);
cudaEventCreate(&cuda_stop);
cudaMemcpy(cuda_array, seq_array, N*sizeof(unsigned long long), cudaMemcpyHostToDevice);
// copy from host to device
cudaEventRecord(cuda_start, 0);
// START CUDA
kernel_max_wrapper(cuda_array, cuda_max, mtx, N);
// copy from device to host
cudaEventRecord(cuda_stop, 0);
cudaEventSynchronize(cuda_stop);
cudaEventElapsedTime(&cuda_elapsed_time, cuda_start, cuda_stop);
cudaMemcpy(seq_max, cuda_max, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
// destroy timers
cudaEventDestroy(cuda_start);
cudaEventDestroy(cuda_stop);
cout << "----------------------------------------------------------" << endl;
cout << "Max: " << *seq_max << endl;
cout << "[CUDA] Elapsed time: " << cuda_elapsed_time << " clock cycles" << endl;
cout << "----------------------------------------------------------" << endl;
cout << endl;
cout << "Starting sequential version." << endl;
seq_start = (double) clock();
*seq_max = 0;
for(unsigned long long int j = 0; j < N ; j++){
if(seq_array[j] > *seq_max){
*seq_max = seq_array[j];
}
}
seq_stop = (double) clock();
seq_elapsed_time = (double) (seq_stop - seq_start)/CLOCKS_PER_SEC;
seq_elapsed_time *= 1000.0;
cout << "----------------------------------------------------------" << endl;
cout << "Max: " << *seq_max << endl;
cout << "[SEQUENTIAL] Elapsed time: " << seq_elapsed_time << " clock cycles" << endl;
cout << "----------------------------------------------------------" << endl;
// free and cuda free
free(seq_array);
free(seq_max);
cudaFree(cuda_array);
cudaFree(cuda_max);
return 0;
}
void kernel_max_wrapper(unsigned long long *arr, unsigned long long *max, int *mtx, unsigned long long int N)
{
// 1 dimensional
dim3 gridSize = (N + 512 * 2048 - 1) / (512 * 2048);
dim3 blockSize = 256;
find_maximum_kernel<<< gridSize, blockSize >>>(arr, max, mtx, N);
}
__global__ void find_maximum_kernel(unsigned long long *arr, unsigned long long *max, int *mtx, unsigned long long int N)
{
long long index = threadIdx.x + blockIdx.x*blockDim.x;
long long span = gridDim.x*blockDim.x;
__shared__ unsigned long long cache[256];
unsigned long long temp = 0;
for (unsigned long long int offset = 0; index + offset < N; offset += span) {
if (temp < arr[index+offset]) {
temp = arr[index+offset];
}
}
cache[threadIdx.x] = temp;
__syncthreads();
// cuda reduction
for (unsigned long long int offset = blockDim.x/2; offset != 0; offset /= 2) {
if (threadIdx.x < offset) {
if (cache[threadIdx.x] < cache[threadIdx.x + offset]) {
cache[threadIdx.x] = cache[threadIdx.x + offset];
}
}
__syncthreads();
}
// atomic setting of max!
if(threadIdx.x == 0){
// lock mtx
while(atomicCAS(mtx, 0, 1) != 0);
if (*max < cache[0]) {
*max = cache[0];
}
// unlock mtx
atomicExch(mtx, 0);
}
}
|
13,489 | #include "includes.h"
__global__ static void calc_linear_kernel_predict(int objs,int coords,double* x,int objs_train,double* x_train,double* out){
int id=blockDim.x * blockIdx.x + threadIdx.x;
int i=id/objs;
int j=id%objs;
if (i<objs_train){
double r=1.0;
for (int k=0;k<coords;k++){
r += x_train[coords*i+k] * x[coords*j+k];
}
out[id]=r;
}
} |
13,490 | #include <cuda.h>
#include <iostream>
#include <vector>
using namespace std;
__global__
void sumReduction(float* v, int size, int jump)
{
// linear id
unsigned int t = threadIdx.x;
unsigned int t0 = blockIdx.x*blockDim.x;
unsigned int k = jump*(t0 + t);
// load vector into shared memory
extern __shared__ float vs[];
vs[t] = v[k];
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) {
__syncthreads();
if(t % (2*stride) == 0)
vs[t] += vs[t + stride];
}
if (t == 0)
v[jump*t0] = vs[0];
}
int main(int argc, char* argv[])
{
// Query GPU properties
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop, 0);
cout << "---------------------------------------------" << endl;
cout << " GPU PROPERTIES " << endl;
cout << "---------------------------------------------" << endl;
cout << "Device Name: " << dev_prop.name << endl;
cout << "Memory Clock Rate: " << dev_prop.memoryClockRate/1.0e6 << " GHz" << endl;
cout << "Memory Bandwidth: " << 2.0*dev_prop.memoryClockRate*(dev_prop.memoryBusWidth/8)/1.0e6 << " GB/s" << endl;
cout << "Number of SM: " << dev_prop.multiProcessorCount << endl;
cout << "Max Threads per SM: " << dev_prop.maxThreadsPerMultiProcessor << endl;
cout << "Registers per Block: " << dev_prop.regsPerBlock << endl;
cout << "Shared Memory per Block: " << dev_prop.sharedMemPerBlock << " B" << endl;
cout << "Total Global Memory per Block: " << dev_prop.totalGlobalMem/1.0e9 << " GB" << endl;
cout << endl;
int size = atoi(argv[1]);
// creating vector on host side
vector<float> vec(size, 1.0f);
// Copy vector on device side
float* d_vec;
cudaMalloc((void**)&d_vec, size*sizeof(float));
cudaMemcpy((void*)d_vec, (void*)vec.data(), size*sizeof(float), cudaMemcpyHostToDevice);
// call Kernel
int blockDim = 4;
int jump = 1;
int number_of_blocks = size;
do {
number_of_blocks = ceil(number_of_blocks/(float)blockDim);
sumReduction<<<number_of_blocks, blockDim, blockDim*sizeof(float)>>>(d_vec, size, jump);
jump *= 4;
} while (number_of_blocks != 1);
// Recover vector from device to host
cudaMemcpy((void*)vec.data(), (void*)d_vec, size*sizeof(float), cudaMemcpyDeviceToHost);
// Check results
if (fabs(vec[0] - size) > 0.0001f)
cout << "ERROR: something is not right." << endl;
// Finalize storage
cudaFree(d_vec);
cout << "Closing..." << endl;
return 0;
}
|
13,491 | #include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <math.h>
#include <iostream>
#include <iomanip>
__global__ void ingenuo(double *out, double step, long num_steps) {
long i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= num_steps) return;
double val = (i + 0.5) * step;
out[i] = 4.0 / (1.0 + val * val);
}
__global__ void esperto(double *out, double step, long num_steps, long sz) {
long i = blockIdx.x * blockDim.x + threadIdx.x;
long start = i * sz;
long end = (i+1) * sz;
double sum = 0.0;
for (int k = start; k < end; k++) {
if (k >= num_steps) break;
double val = (k + 0.5) * step;
sum += 4.0 / (1.0 + val * val);
}
out[i] = sum;
}
int main() {
long num_steps = 1000000000;
double step = 1.0 / num_steps;
int nthreads = 1024;
thrust::device_vector<double> ingenuo_data(num_steps);
int nblocks = ceil(double(num_steps) / nthreads);
ingenuo<<<nblocks, nthreads>>>(thrust::raw_pointer_cast(ingenuo_data.data()), step, num_steps);
double pi = step * thrust::reduce(ingenuo_data.begin(), ingenuo_data.end(), 0.0, thrust::plus<double>());
std::cout << std::setprecision(13);
std::cout << pi << "\n";
int els_per_thread = 4096;
thrust::device_vector<double> esperto_data(num_steps/els_per_thread+1, 0);
int nblocks2 = ceil(double(num_steps)/(nthreads * els_per_thread));
esperto<<<nblocks2, nthreads>>>(thrust::raw_pointer_cast(esperto_data.data()), step, num_steps, els_per_thread);
double pi2 = step * thrust::reduce(esperto_data.begin(), esperto_data.end(), 0.0, thrust::plus<double>());
std::cout << std::setprecision(13);
std::cout << pi2 << "\n";
return 0;
} |
13,492 | /*
Template code for convolution. CS6023, IITM */
#include<stdio.h>
#include<cuda.h>
#include<math.h>
#define W 1024 // Input DIM
#define OW (W-4) // Output DIM
#define D 8 // Input and Kernel Depth
#define T 5 // Kernel DIM
#define N 128 // Number of kernels
#define BLOCK_DIM_Z 8
#define BLOCK_DIM_Y 8
#define BLOCK_DIM_X 8
void fillMatrix(unsigned char *matrix){
unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix;
for(int i=0;i<W;i++){
for(int j=0;j<W;j++){
for(int k=0;k<D;k++){
m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255;
}
}
}
}
void fillKernel(float *kernel){
float (*t)[T][T][D]=(float (*)[T][T][D])kernel;
for(int i=0;i<N;i++){
for(int j=0;j<T;j++){
for(int k=0;k<T;k++){
for(int l=0;l<D;l++){
t[i][j][k][l]=fmod(-(i+1)*2.1+(j+1)*3.2-(k+1)*4.8+(l+1)*7.1,1.0);
}
}
}
}
}
void print_matrix_to_file(float *m){
const char *fname = "assignment4_out";
FILE *f = fopen(fname, "w");
float (*mat)[OW][OW]=(float (*)[OW][OW])m;
for(unsigned i=0; i < N; i++) {
for(unsigned j=0; j < OW; j++)
for(unsigned k=0;k<OW;k++)
fprintf(f,"%4.4f ", mat[i][j][k]);
fprintf(f,"\n");
}
fclose(f);
}
__global__ void convolution_3d(unsigned char *matrix,float* kernel,float *output){
__shared__ float s_matrix[BLOCK_DIM_Z+(T-1)][BLOCK_DIM_Y+(T-1)][D]; /* Shared memory for the matrix values */
__shared__ float s_conv[BLOCK_DIM_Z][BLOCK_DIM_Y][BLOCK_DIM_X];
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
int bx = blockIdx.x;
int by = blockIdx.y;
int bz = blockIdx.z;
int gid_x = blockDim.x*bx+tx;
int gid_y = blockDim.y*by+ty;
int gid_z = blockDim.z*bz+tz;
int num_tasks = D/BLOCK_DIM_X;
s_conv[tz][ty][tx] = 0;
for(int k=0;k<(T-1)/2;k++){
for(int i=0;i<(T-1)/2;i++){
for(int j=0;j<num_tasks;j++){
if( ((ty+i*BLOCK_DIM_Y) < (BLOCK_DIM_Y+T-1)) && (tz+k*BLOCK_DIM_Z < BLOCK_DIM_Z+T-1) ){
s_matrix[tz+k*BLOCK_DIM_Z][ty+i*BLOCK_DIM_Y][tx+j*BLOCK_DIM_X] = matrix[(gid_z+k*BLOCK_DIM_Z)*W*D+(gid_y+i*BLOCK_DIM_Y)*D+tx+j*BLOCK_DIM_X];
}
}
}
}
__syncthreads();
/* Now perform the multiplication to find the convolution */
if(gid_z<OW && gid_y<OW){
for(int id=bx*BLOCK_DIM_X;id<(bx+1)*BLOCK_DIM_X;id++){
if( id<N ){
float conv = 0;
for(int k=0;k<num_tasks;k++){
for(int i=-(T-1)/2;i<=(T-1)/2;i++){
for(int j=-(T-1)/2;j<=(T-1)/2;j++){
conv += s_matrix[tz+i+(T-1)/2][ty+j+(T-1)/2][tx+BLOCK_DIM_X*k] * kernel[id*T*T*D+(i+(T-1)/2)*T*D+(j+(T-1)/2)*D+tx+BLOCK_DIM_X*k];
}
}
}
atomicAdd(&(s_conv[tz][ty][id-bx*BLOCK_DIM_X]),conv);
}
}
output[gid_x*OW*OW+gid_z*OW+gid_y] = s_conv[tz][ty][tx];
}
}
int main()
{
unsigned char *matrix=(unsigned char*)malloc(sizeof(unsigned char)*W*W*D);
float *kernel=(float*)malloc(sizeof(float)*T*T*D*N);
float *output=(float *)malloc(sizeof(float)*N*OW*OW);
fillMatrix(matrix);
fillKernel(kernel);
unsigned char *Dmatrix;cudaMalloc((void **)&Dmatrix,sizeof(unsigned char)*W*W*D);
float *Dkernel;cudaMalloc((void **)&Dkernel,sizeof(float)*N*T*T*D);
float *Doutput;cudaMalloc((void **)&Doutput,sizeof(float)*N*OW*OW);
cudaMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,cudaMemcpyHostToDevice);
cudaMemcpy(Dkernel, kernel, sizeof(float)*T*T*D*N,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaEventRecord(start,0);
//Make your cuda kernel call
dim3 blockd(BLOCK_DIM_X,BLOCK_DIM_Y,BLOCK_DIM_Z);
dim3 gridd((N+BLOCK_DIM_X-1)/BLOCK_DIM_X,(OW+BLOCK_DIM_Y-1)/(BLOCK_DIM_Y),(OW+BLOCK_DIM_Z-1)/BLOCK_DIM_Z);
convolution_3d<<<gridd,blockd>>>(Dmatrix,Dkernel,Doutput);
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%f\n",milliseconds);
cudaMemcpy(output, Doutput, sizeof(float)*N*OW*OW,cudaMemcpyDeviceToHost);
//Use print_matrix_to_file function only
print_matrix_to_file(output);
cudaFree(Dmatrix);
cudaFree(Dkernel);
cudaFree(Doutput);
free(matrix);
free(kernel);
free(output);
}
|
13,493 | #include <iostream>
#include <fstream>
#include <sstream>
#include <chrono>
#include <vector>
#include <cmath>
#include <dirent.h>
#include <cstring>
using namespace std;
#define NUM_GANGS_START 2
#define NUM_GANGS_END 512
/**
* In-place Iterative Fast Fourier Transformation with OpenACC support.
*/
template<int T>
float* fft(float* __restrict__ x, size_t n) {
size_t n2 = n * 2;
int s = log2(n);
float* r = new float[n2];
#pragma acc data copy(x[0:n]) create(r[0:n2]) copyout(r[0:n])
{
// Bit-reversal reordering
#pragma acc parallel loop
for (int i = 0; i <= n-1; i++) {
int j = i, k = 0;
#pragma acc loop seq
for (int l = 1; l <= s; l++) {
k = k * 2 + (j & 1);
j >>= 1;
}
r[k*2]= x[i];
r[k*2 + 1] = 0;
}
#pragma acc loop seq
for (int i = 1; i <= s; i++) {
int m = 1 << i;
#pragma acc parallel loop num_gangs(T)
for (int j = 0; j < n; j += m) {
#pragma acc loop independent
for(int k = 0; k < m/2; k++) {
if (j + k + m / 2 < n) {
float t[2], u[2], tr[2];
t[0] = cos((2.0*M_PI*k)/(1.0*m));
t[1] = -sin((2.0*M_PI*k)/(1.0*m));
size_t ridx = (j + k) * 2;
u[0] = r[ridx];
u[1] = r[ridx + 1];
size_t ridx2 = ridx + m;
tr[0] = t[0] * r[ridx2] - t[1] * r[ridx2 + 1];
tr[1] = t[0] * r[ridx2] + t[1] * r[ridx2];
t[0] = tr[0];
t[1] = tr[1];
r[ridx] = u[0] + t[0];
r[ridx + 1] = u[1] + t[1];
r[ridx2] = u[0] - t[0];
r[ridx2 + 1] = u[1] - t[1];
// Whew. That was ugly. Eww.
}
}
}
}
}
return r;
}
/**
* Reads numeric data from a file.
*/
void read_file(const char* filename, vector<float>& out) {
ifstream file;
file.open(filename);
if (file.is_open()) {
while (!file.eof()) {
float val;
if (file >> val) {
out.push_back(val);
}
}
} else {
cerr << "Can't open file " << filename << " for reading." << endl;
}
file.close();
}
void compute(int num_gangs, float* buffer, size_t count, int sample_rate, const char* filename) {
float time = 0;
float* result = new float[1];
for (int i = 0; i < 4; i++) {
// Start the stopwatch
auto start = chrono::high_resolution_clock::now();
// Run FFT algorithm with loaded data
switch (num_gangs) {
case 2: result = fft<2>(buffer, count); break;
case 4: result = fft<4>(buffer, count); break;
case 8: result = fft<8>(buffer, count); break;
case 16: result = fft<16>(buffer, count); break;
case 32: result = fft<32>(buffer, count); break;
case 64: result = fft<64>(buffer, count); break;
case 128: result = fft<128>(buffer, count); break;
case 256: result = fft<256>(buffer, count); break;
case 512: result = fft<512>(buffer, count); break;
}
// Log the elapsed time
auto finish = chrono::high_resolution_clock::now();
auto microseconds = chrono::duration_cast<std::chrono::microseconds>(finish-start);
time += microseconds.count();
}
cout << count << "," << sample_rate << "," << num_gangs << "," << time/4 << endl;
// Save the computed data
char* outfilename = new char[512];
strcpy(outfilename, filename);
strcat(outfilename, ".out");
ofstream outfile;
outfile.open (outfilename);
outfile.precision(4);
outfile << "frequency, value" << endl;
for (int i = 0; i < count / 2; i++) {
outfile << i * ((float)sample_rate/count) << "," << result[i] << endl;
}
outfile.close();
}
void compute_file(const char* folder, const char* filename,
const char* sample_count, int sample_rate) {
vector<float> buffer;
// Read data file
read_file(filename, buffer);
int count = buffer.size();
// Is power of 2?
if (count & (count-1)) {
cerr << "Input data sample count have to be power of two." << endl;
} else {
// Go through initialization
fft<1>(&buffer[0], count);
// Run compute for various num_gangs
for (int ng = NUM_GANGS_START; ng <= NUM_GANGS_END; ng <<= 1) {
cout << folder << ",";
compute(ng, &buffer[0], count, sample_rate, filename);
}
}
}
int main(int argc, char** argv) {
srand (time(NULL));
// Deal with program arguments
if (argc < 2) {
cerr << "Usage: " << argv[0] << " [input_folder]"; return 2;
}
// Compute all files in folder
DIR* dirp = opendir(argv[1]);
struct dirent *epdf;
while ((epdf = readdir(dirp)) != NULL) {
size_t len = strlen(epdf->d_name);
if (strcmp(epdf->d_name,".") != 0 && strcmp(epdf->d_name,"..") != 0
&& strcmp(&epdf->d_name[len-3], "dat") == 0) {
stringstream fname(epdf->d_name);
string samples, sr;
getline(fname, samples, '@');
getline(fname, sr, '.');
char* fold = new char[512];
strcpy(fold, argv[1]);
compute_file(argv[1], strcat(strcat(fold, "/"), epdf->d_name), samples.c_str(), atoi(sr.c_str()));
}
}
closedir(dirp);
return 0;
}
|
13,494 | #pragma once
#include "Vector3.cuh.cu"
#include "Ray.cuh.cu"
namespace RayTracing
{
class Material;
struct HitRecord
{
float t;
// or barycentric alpha
float u;
// or barycentric beta
float v;
Vector3 normal;
Point3 point;
const Material *material = nullptr;
__host__ __device__
void SetNormal(const Ray &ray, const Vector3 &pointNormal)
{
normal = pointNormal.Dot(ray.direction) < 0 ? pointNormal : -pointNormal;
}
};
} // namespace RayTracing
|
13,495 | #include "includes.h"
__device__ void MakeCountSegment(float *segment, int *bins, const int seglength, int *segCounter, const int countlength, const float low, const float high, const float slope)
{
int bin;
float temp;
for (int jj=0; jj<seglength; jj++){
temp = abs(segment[jj]);
if ( ( temp > low ) & ( temp < high ) ) {
bin = (int)ceil(slope*abs(high-temp));
}
else if (temp >= high) {
bin = 0;
}
else bin = countlength - 1;
bins[jj]=bin;
segCounter[bin] = segCounter[bin] + 1;
}
return;
}
__global__ void make_and_count_seg(float *vec, int *bin, int *segcounter, const int length, const int countlength, const int HighLength, const int HighSegmentLength, const int threadsHigh, const int LowSegmentLength, const float low, const float high, const float slope)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int startIndex, SegmentLength, startCountIndex;
startCountIndex = xIndex*countlength;
if ( (xIndex*HighSegmentLength > HighLength) & ( (HighLength + (xIndex-threadsHigh+1)*LowSegmentLength) < length ) ){
startIndex = HighLength + (xIndex-threadsHigh)*LowSegmentLength;
SegmentLength = LowSegmentLength;
}
else {
startIndex = xIndex*HighSegmentLength;
SegmentLength = HighSegmentLength;
}
MakeCountSegment(vec+startIndex, bin+startIndex, SegmentLength, segcounter+startCountIndex, countlength, low, high, slope);
} |
13,496 | //general parts
#include <stdio.h>
#include <vector>
#include <memory>
#include <string.h>
#include <chrono>
#include <thread>
#include <iostream>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
//CUDA parts
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#define GROUP 1
void sample_7_benchmark_cuFFT_single_Bluestein(bool file_output, FILE* output, int device_id)
{
if (file_output)
fprintf(output, "7 - cuFFT FFT + iFFT C2C big prime benchmark in single precision (similar to VkFFT Bluestein)\n");
printf("7 - cuFFT FFT + iFFT C2C big prime benchmark in single precision (similar to VkFFT Bluestein)\n");
cudaSetDevice(device_id);
const int num_benchmark_samples = 54;
const int num_runs = 3;
uint64_t benchmark_dimensions[num_benchmark_samples][4] = { {1024, 1024, 1, 2},
{17, 17, 1, 2},{19, 19, 1, 2},{23, 23, 1, 2}, {29, 29, 1, 2},{31, 31, 1, 2},{37, 37, 1, 2},{41, 41, 1, 2},{43, 43, 1, 2},{47, 47, 1, 2},{53, 53, 1, 2},{59, 59, 1, 2},{61, 61, 1, 2},{67, 67, 1, 2},{71, 71, 1, 2},{73, 73, 1, 2},{79, 79, 1, 2},{83, 83, 1, 2},{89, 89, 1, 2},{97, 97, 1, 2},
{17, 17, 17, 3},{19, 19, 19, 3},{23, 23, 23, 3}, {29, 29, 29, 3},{31, 31, 31, 3},{37, 37, 37, 3},{41, 41, 41, 3},{43, 43, 43, 3},{47, 47, 47, 3},{53, 53, 53, 3},{59, 59, 59, 3},{61, 61, 61, 3},{67, 67, 67, 3},{71, 71, 71, 3},{73, 73, 73, 3},{79, 79, 79, 3},{83, 83, 83, 3},{89, 89, 89, 3},{97, 97, 97, 3},
{179, 179, 1, 2},{283, 283, 1, 2},{419, 419, 1, 2}, {547, 547, 1, 2},{661, 661, 1, 2},{811, 811, 1, 2},{947, 947, 1, 2},{1087, 1087, 1, 2},{1229, 1229, 1, 2},{1381, 1381, 1, 2},{1523, 1523, 1, 2},{2909, 2909, 1, 2},{4241, 4241, 1, 2},{6841, 6841, 1, 2},{7727, 7727, 1, 2}
};
double benchmark_result[2] = { 0,0 };//averaged result = sum(system_size/iteration_time)/num_benchmark_samples
cufftComplex* inputC = (cufftComplex*)malloc((uint64_t)sizeof(cufftComplex)*pow(2, 27));
for (uint64_t i = 0; i < pow(2, 27); i++) {
inputC[i].x = 2 * ((float)rand()) / RAND_MAX - 1.0;
inputC[i].y = 2 * ((float)rand()) / RAND_MAX - 1.0;
}
for (int n = 0; n < num_benchmark_samples; n++) {
double run_time[num_runs][2];
for (int r = 0; r < num_runs; r++) {
cufftHandle planC2C;
cufftComplex* dataC;
uint64_t dims[3] = { benchmark_dimensions[n][0] , benchmark_dimensions[n][1] ,benchmark_dimensions[n][2] };
cudaMalloc((void**)&dataC, sizeof(cufftComplex) * dims[0] * dims[1] * dims[2]);
cudaMemcpy(dataC, inputC, sizeof(cufftComplex) * dims[0] * dims[1] * dims[2], cudaMemcpyHostToDevice);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Cuda error: Failed to allocate\n");
return;
}
switch (benchmark_dimensions[n][3]) {
case 1:
cufftPlan1d(&planC2C, dims[0], CUFFT_C2C, 1);
break;
case 2:
cufftPlan2d(&planC2C, dims[1], dims[0], CUFFT_C2C);
break;
case 3:
cufftPlan3d(&planC2C, dims[2], dims[1], dims[0], CUFFT_C2C);
break;
}
float totTime = 0;
uint64_t cuBufferSize = sizeof(float) * 2 * dims[0] * dims[1] * dims[2];
uint64_t num_iter = ((4096 * 1024.0 * 1024.0) / cuBufferSize > 1000) ? 1000 : (4096 * 1024.0 * 1024.0) / cuBufferSize;
if (num_iter == 0) num_iter = 1;
std::chrono::steady_clock::time_point timeSubmit = std::chrono::steady_clock::now();
for (int i = 0; i < num_iter; i++) {
cufftExecC2C(planC2C, dataC, dataC, -1);
cufftExecC2C(planC2C, dataC, dataC, 1);
}
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point timeEnd = std::chrono::steady_clock::now();
totTime = (std::chrono::duration_cast<std::chrono::microseconds>(timeEnd - timeSubmit).count() * 0.001) / num_iter;
run_time[r][0] = totTime;
if (n > 0) {
if (r == num_runs - 1) {
double std_error = 0;
double avg_time = 0;
for (uint64_t t = 0; t < num_runs; t++) {
avg_time += run_time[t][0];
}
avg_time /= num_runs;
for (uint64_t t = 0; t < num_runs; t++) {
std_error += (run_time[t][0] - avg_time) * (run_time[t][0] - avg_time);
}
std_error = sqrt(std_error / num_runs);
if (file_output)
fprintf(output, "cuFFT System: %" PRIu64 "x%" PRIu64 "x%" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 "\n", benchmark_dimensions[n][0], benchmark_dimensions[n][1], benchmark_dimensions[n][2], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize / 1024) / avg_time));
printf("cuFFT System: %" PRIu64 "x%" PRIu64 "x%" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 "\n", benchmark_dimensions[n][0], benchmark_dimensions[n][1], benchmark_dimensions[n][2], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize / 1024) / avg_time));
benchmark_result[0] += ((double)cuBufferSize / 1024) / avg_time;
}
}
cufftDestroy(planC2C);
cudaFree(dataC);
cudaDeviceSynchronize();
//cufftComplex* output_cuFFT = (cufftComplex*)(malloc(sizeof(cufftComplex) * dims[0] * dims[1] * dims[2]));
//cudaMemcpy(output_cuFFT, dataC, sizeof(cufftComplex) * dims[0] * dims[1] * dims[2], cudaMemcpyDeviceToHost);
//cudaDeviceSynchronize();
}
}
free(inputC);
benchmark_result[0] /= (num_benchmark_samples - 1);
if (file_output)
fprintf(output, "Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0]));
printf("Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0]));
}
|
13,497 | #include "includes.h"
__global__ void cuComputeNorm(float *mat, int width, int pitch, int height, float *norm){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
float val, sum=0;
int i;
for (i=0;i<height;i++){
val = mat[i*pitch+xIndex];
sum += val*val;
}
norm[xIndex] = sum;
}
} |
13,498 | #include <stdio.h>
#include <stdlib.h>
#define SIZE 10
__global__ void demo(int * p){
int tx=threadIdx.x;
// Trying to access variables allocated on main memory in a kernel function is illegal.
p[tx]=tx+p[tx];
}
int main(int argc , char **argv){
int * p;
p=(int *)malloc(10*sizeof(int));
int i;
for(i=0;i<SIZE;i++){
p[i]=1;
}
dim3 dimGrid(1,1);
dim3 dimBlock(SIZE,1);
demo<<<dimGrid,dimBlock>>>(p);
for(i=0;i<SIZE;i++){
printf("p[%d]=%d\n",i,p[i]);
}
free(p);
return 0;
}
|
13,499 | #include "includes.h"
__global__ void kSetOnes(float *dest, int count){
for (int i = threadIdx.x; i < count; i += blockDim.x) {
dest[i] = 1;
}
} |
13,500 | #include "includes.h"
__global__ void cuSubPixelOffset_kernel(const int2 *offsetInit, const int2 *offsetZoomIn, float2 *offsetFinal, const float OSratio, const float xoffset, const float yoffset, const int size)
{
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx >= size) return;
offsetFinal[idx].x = OSratio*(offsetZoomIn[idx].x ) + offsetInit[idx].x - xoffset;
offsetFinal[idx].y = OSratio*(offsetZoomIn[idx].y ) + offsetInit[idx].y - yoffset;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.