serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
6,101 | #include <assert.h>
//#include <sys/time.h>
#include <time.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cmath>
#include <algorithm>
using std::cout;
using std::endl;
using std::cerr;
#define DECLINE_HORIZONTAL 0.1
#define DECLINE_VERTICAL 0.1
#define STEPS 1000 /* number of time steps */
/* error check on hip macro */
#define CUDA_CHECK(command) \
{ \
cudaError_t status = command; \
if(status != cudaSuccess) \
{ \
cerr << "Error : Cuda reports " << cudaGetErrorString(status) << endl; \
heatCleanExit(-6); \
} \
}
/* Device(GPU) FUNCTIONS TO BE LAUNCHED AS KERNELS FROM HOST(CPU) */
/* This function is used to discover the local working area of each thread */
__global__ void heatDiscover(int * __restrict__ workRowS,
int * __restrict__ workRowE,
int * __restrict__ workColS,
int * __restrict__ workColE,
const int blockRows, const int blockCols,
const int threadsPerRow, const int threadsPerCol,
const int probCase,
const int gridRows, const int gridCols)
{
const int thread_Id = blockIdx.x * blockDim.x + threadIdx.x;
if(probCase == 1)//2 threads
{
if(thread_Id == 0)//west thread
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 2;
workColS[thread_Id] = 1;
workColE[thread_Id] = blockCols - 1;
}
else//east thread
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 2;
workColS[thread_Id] = blockCols;
workColE[thread_Id] = gridCols - 2;
}
}
else if(probCase == 2)//6, 8, 10, ... OR 4, 16, 64 ... threads
{
if(thread_Id == 0)//NW corner
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 1;
workColS[thread_Id] = 1;
workColE[thread_Id] = blockCols - 1;
}
else if(thread_Id == (threadsPerCol - 1))//NE corner
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 1;
workColS[thread_Id] = gridCols - blockCols;
workColE[thread_Id] = gridCols - 2;
}
else if(thread_Id == ((threadsPerRow * threadsPerCol) - threadsPerCol))//SW corner
{
workRowS[thread_Id] = gridRows - blockRows;
workRowE[thread_Id] = gridRows - 2;
workColS[thread_Id] = 1;
workColE[thread_Id] = blockCols - 1;
}
else if(thread_Id == ((threadsPerCol * threadsPerRow) - 1))//SE corner
{
workRowS[thread_Id] = gridRows - blockRows;
workRowE[thread_Id] = gridRows - 2;
workColS[thread_Id] = gridCols - blockCols;
workColE[thread_Id] = gridCols - 2;
}
else if(thread_Id < threadsPerCol)//NN side
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 1;
workColS[thread_Id] = thread_Id * blockCols;
workColE[thread_Id] = workColS[thread_Id] + blockCols - 1;
}
else if((thread_Id > ((threadsPerCol * threadsPerRow) - threadsPerCol)) &&
(thread_Id < ((threadsPerCol * threadsPerRow) - 1)))//SS side
{
workRowS[thread_Id] = gridRows - blockRows;
workRowE[thread_Id] = gridRows - 2;
workColS[thread_Id] = (thread_Id % threadsPerCol) * blockCols;
workColE[thread_Id] = workColS[thread_Id] + blockCols - 1;
}
else if((thread_Id % threadsPerCol) == 0)//WW side
{
workRowS[thread_Id] = (thread_Id / threadsPerCol) * blockRows;
workRowE[thread_Id] = workRowS[thread_Id] + blockRows - 1;
workColS[thread_Id] = 1;
workColE[thread_Id] = blockCols - 1;
}
else if((thread_Id + 1) % threadsPerCol == 0)//EE side
{
workRowS[thread_Id] = ((thread_Id + 1 - threadsPerCol) / threadsPerCol) *
blockRows;
workRowE[thread_Id] = workRowS[thread_Id] + blockRows - 1;
workColS[thread_Id] = gridCols - blockCols;
workColE[thread_Id] = gridCols - 2;
}
else//general case middle location
{
int rowMarginS = threadsPerCol;
int rowMarginE = rowMarginS + threadsPerCol - 1;
int rowOffset = 1;
while(1)
{
if((thread_Id > rowMarginS) && (thread_Id < rowMarginE))
{
workRowS[thread_Id] = rowOffset * blockRows;
break;
}
else
{
rowMarginS += threadsPerCol;
rowMarginE += threadsPerCol;
++rowOffset;
}
}
workRowE[thread_Id] = workRowS[thread_Id] + blockRows - 1;
workColS[thread_Id] = (thread_Id % threadsPerCol) * blockCols;
workColE[thread_Id] = workColS[thread_Id] + blockCols - 1;
}
}
}
/* This function updates the grid and is invoked on serial executions */
__global__ void heatUpdateSerial(const double * __restrict__ devOldHeatGrid,
double * __restrict__ devNewHeatGrid,
const int gridRows, const int gridCols)
{
for(int i = 1; i < gridRows - 1; ++i)
{
for(int j = 1; j < gridCols - 1; ++j)
{
devNewHeatGrid[i*gridCols+j] = devOldHeatGrid[i*gridCols+j] +
DECLINE_HORIZONTAL *
(devOldHeatGrid[(i+1)*gridCols+j] +
devOldHeatGrid[(i-1)*gridCols+j] -
(2 * devOldHeatGrid[i*gridCols+j])) +
DECLINE_VERTICAL *
(devOldHeatGrid[i*gridCols+j+1] +
devOldHeatGrid[i*gridCols+j-1] -
(2 * devOldHeatGrid[i*gridCols+j]));
}
}
}
/* This function updates the grid and is invoked on parallel executions */
__global__ void heatUpdateParallel(const double * __restrict__ devOldHeatGrid,
double * __restrict__ devNewHeatGrid,
const int * __restrict__ workRowS,
const int * __restrict__ workRowE,
const int * __restrict__ workColS,
const int * __restrict__ workColE,
const int gridCols)
{
const int thread_Id = blockIdx.x * blockDim.x + threadIdx.x;
//get the borders in registers for 1 cycle memory access
const int wRowS = workRowS[thread_Id];
const int wRowE = workRowE[thread_Id];
const int wColS = workColS[thread_Id];
const int wColE = workColE[thread_Id];
for(int i = wRowS; i <= wRowE; ++i)
{
for(int j = wColS; j <= wColE; ++j)
{
devNewHeatGrid[i*gridCols+j] = devOldHeatGrid[i*gridCols+j] +
DECLINE_HORIZONTAL *
(devOldHeatGrid[(i+1)*gridCols+j] +
devOldHeatGrid[(i-1)*gridCols+j] -
(2*devOldHeatGrid[i*gridCols+j])) +
DECLINE_VERTICAL *
(devOldHeatGrid[i*gridCols+j+1] +
devOldHeatGrid[i*gridCols+j-1] -
(2*devOldHeatGrid[i*gridCols+j]));
}
}
}
//declare dynamic variables
double * heatGrid = nullptr;
double * devNewHeatGrid = nullptr;
double * devOldHeatGrid = nullptr;
int * workRowS = nullptr;
int * workRowE = nullptr;
int * workColS = nullptr;
int * workColE = nullptr;
int * devWorkRowS = nullptr;
int * devWorkRowE = nullptr;
int * devWorkColS = nullptr;
int * devWorkColE = nullptr;
/* HOST(CPU) FUNCTIONS */
/* This function intialises the temperature on the given grid with higher
temperatures at the centre, progressively lower ones until the sides
and 0s at the perimetre
*/
static inline void heatInit(double * heatGrid,
const int gridRows, const int gridCols)
{
for(int i = 0; i < gridRows; ++i)//avoid halo area
for(int j = 0; j < gridCols; ++j)
heatGrid[i*gridCols+j] = (double) (i * (gridRows - i - 1) * j * (gridCols - j - 1));
}
/* This function writes out the input grid to a .dat file in current path */
static inline int heatWrite(const double * heatGrid, const int flag,
const int gridRows, const int gridCols,
const int threadsPerBlock, const int blocksPerGrid)
{
char filePath[70] = "";
if(flag == 0)
{
sprintf(filePath, "%d_%d_cuda_%d_%d_Initial.dat", gridRows, gridCols,
threadsPerBlock, blocksPerGrid);
}
else
{
sprintf(filePath, "%d_%d_cuda_%d_%d_Final.dat", gridRows, gridCols,
threadsPerBlock, blocksPerGrid);
}
FILE * fp = fopen(filePath, "w");
if(fp == nullptr)
return -1;
for(int i = 0; i < gridRows; ++i)
{
for(int j = 0; j < gridCols; ++j)
{
fprintf(fp, "%-.1lf", fabs(heatGrid[i*gridCols+j]));//some 0.0s appear as -0.0s
if(j != (gridCols - 1))
fprintf(fp, " ");
}
fprintf(fp, "\n");
}
fclose(fp);
return 0;//all ok
}
/* This function swaps between the 2 grids to avoid assignmenets */
static inline void heatSwap(double ** a, double ** b)
{
double *temp = *a;
*a = *b;
*b = temp;
}
/* This function cleans up memory to prevent leaks on any exit error */
static inline void heatCleanExit(const int errorCode)
{
if(heatGrid != nullptr)
{
free(heatGrid);
heatGrid = nullptr;
}
if(devOldHeatGrid != nullptr)
{
CUDA_CHECK(cudaFree(devOldHeatGrid));
devOldHeatGrid = nullptr;
}
if(devNewHeatGrid != nullptr)
{
CUDA_CHECK(cudaFree(devNewHeatGrid));
devNewHeatGrid = nullptr;
}
if(workRowS != nullptr)
{
free(workRowS);
workRowS = nullptr;
}
if(devWorkRowS != nullptr)
{
CUDA_CHECK(cudaFree(devWorkRowS));
devWorkRowS = nullptr;
}
if(workRowE != nullptr)
{
free(workRowE);
workRowE = nullptr;
}
if(devWorkRowE != nullptr)
{
CUDA_CHECK(cudaFree(devWorkRowE));
devWorkRowE = nullptr;
}
if(workColS != nullptr)
{
free(workColS);
workColS = nullptr;
}
if(devWorkColS != nullptr)
{
CUDA_CHECK(cudaFree(devWorkColS));
devWorkColS = nullptr;
}
if(workColE != nullptr)
{
free(workColE);
workColE = nullptr;
}
if(devWorkColE != nullptr)
{
CUDA_CHECK(cudaFree(devWorkColE));
devWorkColE = nullptr;
}
exit(errorCode);
}
/* Main program function */
int main(int argc, char *argv[])
{
//get the properties
cudaDeviceProp deviceProp;
CUDA_CHECK(cudaGetDeviceProperties(&deviceProp, 0));
cout << "GPU PROPERTIES\n";
cout << "******************************************************************\n";
cout << "Cuda Device prop succeeded" << endl;
cout << "System minor " << deviceProp.minor << endl;
cout << "System major " << deviceProp.major << endl;
cout << "Agent Prop Name " << deviceProp.name << endl;
cout << "Total Global Memory " << deviceProp.totalGlobalMem << " bytes\n";
cout << "Shared Memory Per Block " << deviceProp.sharedMemPerBlock << " bytes\n";
cout << "Registers per block " << deviceProp.regsPerBlock << endl;
cout << "Warp size " << deviceProp.warpSize << endl;
cout << "Max Threads Per Block " << deviceProp.maxThreadsPerBlock << endl;
cout << "Max clock frequency of the multiProcessors " << deviceProp.clockRate << " kHz\n";
cout << "Size of shared memory region " << deviceProp.totalConstMem << " bytes\n";
cout << "Number of multi-processors (compute units) " << deviceProp.multiProcessorCount << endl;
cout << "******************************************************************\n\n";
//get properties to check on input data possible run scenarios
const int devMaxThreadsPerBlock = deviceProp.maxThreadsPerBlock;
const int devMaxConcurrentThreads = deviceProp.multiProcessorCount *
deviceProp.warpSize;
//get the command line input data and do initial checks
if(argc != 5)
{
cerr << "Not enough input data, need 4\n";
cerr << "Grid_Rows Grid_Collumns Threads_Per_Block Blocks_Per_Grid\n";
cerr << "Aborting...\n";
heatCleanExit(-1);
}
const int gridRows = atoi(argv[1]);
const int gridCols = atoi(argv[2]);
const int threadsPerBlock = atoi(argv[3]);
const int blocksPerGrid = atoi(argv[4]);
const int gridSize = gridRows * gridCols;
const int totalThreads = threadsPerBlock * blocksPerGrid;
if(gridRows < 0 || gridCols < 0 || threadsPerBlock < 1 || blocksPerGrid < 1)
{
cerr << "Invalid Input Data\n";
cerr << "Grid Rows = " << gridRows << endl;
cerr << "Grid Cols = " << gridCols << endl;
cerr << "Threads Per Block = " << threadsPerBlock << endl;
cerr << "Blocks Per Grid = " << blocksPerGrid << endl;
cerr << "Aborting...\n";
heatCleanExit(-1);
}
//do checks based on device(GPU) capabilities
if(threadsPerBlock > devMaxThreadsPerBlock)
{
cerr << "Maximum threads per block exceeded for current device\n";
cerr << "Aborting...\n";
heatCleanExit(-2);
}
if(totalThreads > devMaxConcurrentThreads)
{
cerr << "Maximum concurrent threads exceeded for current device\n";
cerr << "Aborting...\n";
heatCleanExit(-2);
}
//allocate host(CPU) memory
heatGrid = (double *) malloc(gridSize * sizeof(double));
if(heatGrid == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(1);
}
//initialise with 0.0s the heat grid
for(int i = 0; i < gridRows; ++i)
for(int j = 0; j < gridCols; ++j)
heatGrid[i*gridCols+j] = 0.0;
//allocate device(GPU) global memory
CUDA_CHECK(cudaMalloc((void **)&devNewHeatGrid, gridSize * sizeof(double)));
//transfer data from host(CPU) to device(GPU) memory
CUDA_CHECK(cudaMemcpy(devNewHeatGrid, heatGrid, gridSize * sizeof(double),
cudaMemcpyHostToDevice));
//initialise the heat grid with actual data
heatInit(heatGrid, gridRows, gridCols);
//allocate device(GPU) global memory
CUDA_CHECK(cudaMalloc((void **)&devOldHeatGrid, gridSize * sizeof(double)));
//transfer data from host(CPU) to device(GPU) memory
CUDA_CHECK(cudaMemcpy(devOldHeatGrid, heatGrid, gridSize * sizeof(double),
cudaMemcpyHostToDevice));
//write out the initial grid to the corresponding file
// if(heatWrite(heatGrid, 0, gridRows, gridCols,
// threadsPerBlock, blocksPerGrid) == -1)
// {
// cerr << "Error, could not create the initial file...\nAborting...\n";
// heatCleanExit(2);
// }
//calculate the kernel dimensions (x,y,z) threads/block and blocks/grid
dim3 cudaThreads(threadsPerBlock, 1, 1);
dim3 cudaBlocks(blocksPerGrid, 1 ,1);
//define the timer structs to be used
// struct timespec start;
// struct timespec end;
// double totalTime = 0.0;
if(totalThreads == 1)//serial execution
{
cout << "Serial execution with 1 cuda thread\n";
//start the timer
//clock_gettime(CLOCK_MONOTONIC, &start);
clock_t begin = clock();
//solve the problem
for(int steps = 0; steps < STEPS; ++steps)
{
//launch the kernel
heatUpdateSerial<<<cudaBlocks, cudaThreads>>>(devOldHeatGrid, devNewHeatGrid,
gridRows, gridCols);
//wait for device(GPU) to finish it's work
CUDA_CHECK(cudaDeviceSynchronize());
//old = new
heatSwap(&devNewHeatGrid, &devOldHeatGrid);
}
//stop the timer and print the result
//clock_gettime(CLOCK_MONOTONIC, &end);
clock_t end = clock();
double totalTime = (double)(end - begin) / CLOCKS_PER_SEC;
//totalTime = ((end.tv_sec - start.tv_sec) * 1000.0) +
// ((end.tv_nsec - start.tv_nsec) / 1000000.0);
cout << "\nElapsed time was " << totalTime << " ms\n";
}
else//parallel execution
{
//initial check on number of threads
if(totalThreads % 2 != 0)
{
cout << "Can't parition grid fairly with odd number of threads = "
<< totalThreads
<< "\nAborting...\n";
heatCleanExit(4);
}
cout << "Parallel execution with ";
cout << "Threads Per Block : " << threadsPerBlock << endl;
cout << "Blocks Per Grid : " << blocksPerGrid << endl;
cout << "Total Cuda Threads : " << totalThreads << endl;
//allocate arrays for neighbour discovery
//working border rows
workRowS = (int *) malloc(totalThreads * sizeof(int));
if(workRowS == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(5);
}
workRowE = (int *) malloc(totalThreads * sizeof(int));
if(workRowE == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(5);
}
//working border collumns
workColS = (int *) malloc(totalThreads * sizeof(int));
if(workColS == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(5);
}
workColE = (int *) malloc(totalThreads * sizeof(int));
if(workColE == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(5);
}
int blockRows = 0;//total rows for each block of threads data block
int blockCols = 0;//total cols for each block of threads data block
int threadsPerRow = 0, threadsPerCol = 0;//vertical and horizontal distrib
//classify problem cases based on total threads
int cut = (int) sqrt(totalThreads);
double cutF = sqrt(totalThreads);
int probCase;
if(totalThreads == 2)//case 1 : handling 2 threads
{
blockRows = gridRows;
blockCols = gridCols / totalThreads;
threadsPerRow = gridRows / blockRows;
threadsPerCol = gridCols / blockCols;
if((threadsPerRow * threadsPerCol) != totalThreads)
{
cout << "Grid partitioning results to remains...\nAborting...\n";
heatCleanExit(6);
}
probCase = 1;
}
else if(cutF > (double) cut)//case 2.1 : handling 6, 8, 10, ... threads
{
if(gridSize % totalThreads != 0)//can't cut it without remains
{
cout << "Grid partitioning results to remains...\nAborting...\n";
heatCleanExit(7);
}
const int localProbSize = gridSize / totalThreads;
int spread = gridSize;
//find the best possible partition
for(int i = gridRows; i > 0; --i)//priority to rows
{
for(int j = gridCols; j > 0; --j)
{
if((i * j) == localProbSize)
{
if(gridRows % i != 0 || gridCols % j != 0)
continue;
if(abs(i -j) < spread)
{
spread = abs(i - j);
blockRows = i;
blockCols = j;
}
}
}
}
threadsPerRow = gridRows / blockRows;
threadsPerCol = gridCols / blockCols;
if((threadsPerRow * threadsPerCol) != totalThreads)
{
cout << "Grid partitioning results to remains...\nAborting...\n";
heatCleanExit(7);
}
probCase = 2;
}
else//case 2.2 : handling 4, 9, 16, ... threads
{
threadsPerRow = cut;
threadsPerCol = cut;
if((gridRows % cut != 0) || (gridCols % cut != 0))//can't cut even blocks
{
cout << "Grid partitioning results to remains...\nAborting...\n";
heatCleanExit(8);
}
blockRows = gridRows / cut;
blockCols = gridCols / cut;
probCase = 2;
}
printf("Grid can be partioned without remains...\n"
"Rows per block : %d, Columns per block : %d\n"
"Vertical threads : %d, Horizontal threads : %d\n\n",
blockRows, blockCols, threadsPerRow, threadsPerCol);
//working and global discovery phase
//allocate device(GPU) global memory
CUDA_CHECK(cudaMalloc((void **)&devWorkRowS, totalThreads * sizeof(int)));
CUDA_CHECK(cudaMalloc((void **)&devWorkRowE, totalThreads * sizeof(int)));
CUDA_CHECK(cudaMalloc((void **)&devWorkColS, totalThreads * sizeof(int)));
CUDA_CHECK(cudaMalloc((void **)&devWorkColE, totalThreads * sizeof(int)));
//transfer data from host(CPU) to device(GPU) memory
CUDA_CHECK(cudaMemcpy(devWorkRowS, workRowS, totalThreads * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(devWorkRowE, workRowE, totalThreads * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(devWorkColS, workColS, totalThreads * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(devWorkColE, workColE, totalThreads * sizeof(int),
cudaMemcpyHostToDevice));
heatDiscover<<<cudaBlocks, cudaThreads>>>
(devWorkRowS, devWorkRowE, devWorkColS, devWorkColE,
blockRows, blockCols,
threadsPerRow, threadsPerCol, probCase,
gridRows, gridCols);
//wait for device(GPU) to finish it's work
CUDA_CHECK(cudaDeviceSynchronize());
//start the timer
// clock_gettime(CLOCK_MONOTONIC, &start);
clock_t begin = clock();
//launch the kernel
for(int steps = 0; steps < STEPS; ++steps)
{
heatUpdateParallel<<<cudaBlocks, cudaThreads>>>
(devOldHeatGrid, devNewHeatGrid,
devWorkRowS, devWorkRowE, devWorkColS, devWorkColE,
gridCols);
//wait for device(GPU) to finish it's work
CUDA_CHECK(cudaDeviceSynchronize());
//old = new
heatSwap(&devNewHeatGrid, &devOldHeatGrid);
}
//stop the timer and print the result
clock_t end = clock();
double totalTime = (double)(end - begin) / CLOCKS_PER_SEC;
// clock_gettime(CLOCK_MONOTONIC, &end);
// totalTime = ((end.tv_sec - start.tv_sec) * 1000.0) +
// ((end.tv_nsec - start.tv_nsec) / 1000000.0);
cout << "\nElapsed time was " << totalTime << " ms\n";
}
if(STEPS % 2 == 0)//get the correct version
{
CUDA_CHECK(cudaMemcpy(heatGrid, devOldHeatGrid, gridSize * sizeof(double),
cudaMemcpyDeviceToHost));
}
else
{
CUDA_CHECK(cudaMemcpy(heatGrid, devNewHeatGrid, gridSize * sizeof(double),
cudaMemcpyDeviceToHost));
}
//write out the final grid to the corresponding file
// if(heatWrite(heatGrid, 1, gridRows, gridCols,
// threadsPerBlock, blocksPerGrid) == -1)
// {
// cerr << "Error, could not create the initial file...\nAborting...\n";
// heatCleanExit(3);
// }
//clear memory and exit
heatCleanExit(0);
} |
6,102 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
int main1()
{
// generate 100 random numbers serially
thrust::host_vector<int> h_vec(100);
std::generate(h_vec.begin(), h_vec.end(), rand);
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
// sort data on the device
thrust::sort(d_vec.begin(), d_vec.end());
//@@ transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(),h_vec.begin());
// print h_vec
for (int i = 0; i < h_vec.size(); i++)
std::cout << "_vec[" << i << "] = " << h_vec[i] << std::endl;
return 0;
}
|
6,103 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/extrema.h>
#include <thrust/random.h>
#include <iostream>
#include <cuda.h>
#include <cuda_fp16.h>
template <typename T>
struct asum_amax_type
{
T asum_val;
T amax_val;
int nnz;
};
template <typename T>
struct asum_amax_binary_op
: public thrust::binary_function< asum_amax_type<T>, asum_amax_type<T>, asum_amax_type<T> >
{
__host__ __device__
asum_amax_type<T> operator()(const asum_amax_type<T>& x, const asum_amax_type<T>& y) const
{
asum_amax_type<T> result;
result.nnz = x.nnz + y.nnz;
result.asum_val = x.asum_val + y.asum_val;
result.amax_val = thrust::max(x.amax_val, y.amax_val);
return result;
}
};
struct h2f_unary_op
: public thrust::unary_function<unsigned int, float>
{
__device__
asum_amax_type<float> operator()(const unsigned int& x) const
{
half2 val = *( (half2*) &x);
float2 fval = __half22float2(val);
fval.x = fabsf(fval.x);
fval.y = fabsf(fval.y);
asum_amax_type<float> result;
result.nnz = (fval.x == 0.f) ? 0 : 1;
result.nnz = (fval.y == 0.f) ? result.nnz : result.nnz + 1;
result.asum_val = fval.x+fval.y;
result.amax_val = thrust::max(fval.x, fval.y);
return result;
}
};
typedef struct float_pair{
float aave;
float amax;
} float_pair_t;
extern "C"
float_pair_t half2_stats(half* d_data, int N){
if((uintptr_t)(const void *)(d_data) % 4 == 0) std::cout<<"Aligned at 4Byte boundary"<<std::endl;
else if( (uintptr_t)(const void *)(d_data) % 2 == 0) std::cout<<"Aligned at 2Byte boundary"<<std::endl;
if(N%2 != 0){
std::cerr<<"Odd sized tensors are not supported at the moment"<<std::endl;
throw(-1);
}
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
thrust::device_ptr<unsigned int> d_ptr = thrust::device_pointer_cast((unsigned int*)d_data);
h2f_unary_op unary_op;
asum_amax_binary_op<float> binary_op;
asum_amax_type<float> init;
init.amax_val = 0;
init.nnz=0;
init.asum_val = 0;
asum_amax_type<float> result = thrust::transform_reduce(d_ptr, d_ptr+(N/2), unary_op, init, binary_op);
float_pair_t return_result;
return_result.aave = result.asum_val/(float)result.nnz;
return_result.amax = result.amax_val;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Time to reduce %f GB: %f ms \n", ((float)N*2)/(1024*1024*1024), time);
printf("Bandwidth is: %f GB/s \n", ((float)N*2)/(time/1000)/(1024*1024*1024) );
return return_result;
}
|
6,104 | #include "PiecewiseConstant.cuh"
inline CUDA_FUNC float rgb2y_xyz(const float3 &rgb)
{
return rgb.x + 4.5906f * rgb.y + 0.06007 * rgb.z;
}
CUDA_FUNC Distribution::Distribution(int num, float *v) : n(num)
{
value = new float[n];
memcpy(value, v, sizeof(float) * n);
cdf = new float[n + 1];
cdf[0] = 0.0f;
for (int i = 1; i <= n; i++)
cdf[i] = cdf[i - 1] + value[i - 1] / n;
funInt = cdf[n];
for (int i = 1; i <= n; i++)
cdf[i] = funInt == 0 ? (float) i / n : cdf[i] / funInt;
cdf[n] = 1.0f;
}
CUDA_FUNC float Distribution::sample(const float &u, float *pdf, int *offset) const
{
int l = 0, r = n;
int mid;
while (r > l)
{
mid = l + (r - l) / 2;
if (cdf[mid] > u)
r = mid - 1;
else l = mid + 1;
}
if (r == n)
--r;
*offset = r;
*pdf = value[r] / funInt;
return cdf[r + 1] == cdf[r] ? r : r + (u - cdf[r]) / (cdf[r + 1] - cdf[r]);
}
CUDA_FUNC float Distribution::PDF(float sample) const
{
return value[(int)sample] / (funInt == 0 ? n : funInt);
}
__host__ bool Distribution::load2Device()
{
float *tmp_v = value;
float *tmp_cdf = cdf;
value = cdf = nullptr;
cudaMalloc(&value, sizeof(float) * n);
cudaMalloc(&cdf, sizeof(float) * (n + 1));
auto error = cudaMemcpy(value, tmp_v, sizeof(float) * n, cudaMemcpyHostToDevice);
error = cudaMemcpy(cdf, tmp_cdf, sizeof(float) * (n + 1), cudaMemcpyHostToDevice);
free(tmp_v);
free(tmp_cdf);
return error == cudaSuccess;
}
CUDA_FUNC Distribution2D::Distribution2D(float *img, int w, int h):width(w), height(h)
{
funInt = 0.0f;
float *v_tmp = (float*)malloc(sizeof(float)* height);
float *condition_tmp = (float *)malloc(sizeof(float) * width);
float tmp;
condition_w = (Distribution*)malloc(sizeof(Distribution) * height);
for (int i = 0; i < height; i++)
{
//P[Height]
float sum = 0.0f;
//width phi[0, 2PI], height theta [0, PI]
for (int j = 0; j < width; j++)
{
int idx = 4 * (i * width + j);
tmp = rgb2y_xyz(make_float3(img[idx], img[idx + 1], img[idx + 2]));
condition_tmp[j] = tmp;
funInt += tmp;
}
//p(phi | theta)
condition_w[i] = Distribution(width, condition_tmp);
v_tmp[i] = condition_w[i].getfunInt();
}
funInt /= width * height;
hmargin = (Distribution*)malloc(sizeof(Distribution));
//p(theta)
*hmargin = Distribution(height, v_tmp);
free(v_tmp);
free(condition_tmp);
}
CUDA_FUNC float Distribution2D::PDF(const float2 &sample) const
{
//sample.x corresponding to phi
int i1 = condition_w[0].getCount() * sample.x;
//while sample.y ... to theta
int i2 = hmargin -> getCount() * sample.y;
return condition_w[i2].value[i1] / hmargin->getfunInt();
}
CUDA_FUNC float2 Distribution2D::sample(const float2 &u, float *pdf) const
{
float pdfs[2];
int v;
float d1 = hmargin->sample(u.y, pdfs + 1, &v);
float d0 = condition_w[v].sample(u.x, pdfs, &v);
*pdf = pdfs[0] * pdfs[1];
return make_float2(d0, d1);
}
CUDA_FUNC bool Distribution2D::load2Device()
{
bool err;
err = hmargin->load2Device();
for (int i = 0; i < height; i++)
condition_w[i].load2Device();
Distribution *a = condition_w, *b = hmargin;
auto error = cudaMalloc(&hmargin, sizeof(Distribution));
error = cudaMalloc(&condition_w, height * sizeof(Distribution));
cudaMemcpy(hmargin, b, sizeof(Distribution), cudaMemcpyHostToDevice);
cudaMemcpy(condition_w, a, height * sizeof(Distribution), cudaMemcpyHostToDevice);
free(a);
free(b);
return err & error == cudaSuccess;
} |
6,105 | #include <stdio.h>
// includes CUDA Runtime
#include <cuda_runtime.h>
int main(int argc, char *argv[])
{
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Max Threads per block:%d\n",prop.maxThreadsPerBlock);
printf(" Max Threads dimensions x:%d\n",prop.maxThreadsDim[0]);
printf(" Max Threads dimensions y:%d\n",prop.maxThreadsDim[1]);
printf(" Max Threads dimensions z:%d\n",prop.maxThreadsDim[2]);
printf(" Max Grid size x:%d\n",prop.maxGridSize[0]);
printf(" Max Grid size y:%d\n",prop.maxGridSize[1]);
printf(" Max Grid size z:%d\n",prop.maxGridSize[2]);
}
}
|
6,106 | #include <stdio.h>
#include <math.h>
#include <assert.h>
__global__ void partial_sum(long num, double *out) {
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y, index = x + y*blockDim.x*gridDim.x;
double sum = 0.0;
double cur = index*num + 1;
for (long i = 0; i < num; ++i) {
sum += 1.0/cur;
cur += 1.0;
}
out[index] = sum;
}
__global__ void add_harmonics(double start, double *partials, long num) {
partials[num] = start;
for (long i = 0; i < num; ++i) {
partials[num] += partials[i];
}
}
int main(int argc, char **argv) {
if (argc < 2) {
printf("usage:\n%s <N_ITERATIONS>\n", *argv);
return -1;
}
dim3 block(32, 8);
long threads_per_block = block.x * block.y, block_w = 6, block_h = 2, blocks = block_w * block_h, threads = threads_per_block*blocks;
long terms = (long)strtod(argv[1], 0), iterations_per_thread = terms/threads, iterations_left = terms%threads;
long bytes = (threads+1) * sizeof(double); // last elem is sum of all
dim3 grid(block_w, block_h);
double *partials, harmonics = 0.0;
for (long i = terms-iterations_left; i <= terms; ++i) {
harmonics += 1.0/i;
}
cudaMalloc(&partials, bytes);
partial_sum <<<grid, block>>> (iterations_per_thread, partials);
cudaDeviceSynchronize();
add_harmonics <<<1, 1>>> (harmonics, partials, threads); // we want to compute the sum of partial sums on the device
cudaMemcpy(&harmonics, partials+threads, sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(partials);
double gamma = harmonics - log(terms);
printf("%.17f\n", gamma);
return 0;
}
|
6,107 | #define __rose_lt(x,y) ((x)<(y)?(x):(y))
#define __rose_gt(x,y) ((x)>(y)?(x):(y))
#define D__(solventMol) D_[solventMol]
__global__ void Action_No_image_GPU(double *D_,double *maskCenter,double (*SolventMols_)[1024][3]);
//this is only used for cuda-chill
//heavy simplification
#define NsolventMolecules_ 1024
#define NsolventAtoms_ 1024
// struct MolDist {
// int mol; ///< Original solvent molecule number (starts from 1).
// double D; ///< Closest distance of solvent molecule to atoms in distanceMask.
// //AtomMask mask; ///< Original topology solvent molecule atom mask.
// double solventAtoms[NsolventAtoms_][3]; ///< Actual solvent atom #s to loop over.
// };
//using dist for no image
// and kernel for when we use solute molecule center
//extracting pulling out arrays out from struct
void Action_NoImage_Center(double SolventMols_[1024][1024][3],double D_[1024],double maskCenter[3],double maxD)
{
double *devI2Ptr;
double *devI1Ptr;
double *devO1Ptr;
int t8;
int t10;
int t4;
int t2;
double Dist;
int solventMol;
int solventAtom;
cudaMalloc(((void **)(&devO1Ptr)),3072 * sizeof(double ));
cudaMalloc(((void **)(&devI1Ptr)),3 * sizeof(double ));
cudaMemcpy(devI1Ptr,maskCenter,3 * sizeof(double ),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI2Ptr)),3145728 * sizeof(double ));
cudaMemcpy(devI2Ptr,SolventMols_,3145728 * sizeof(double ),cudaMemcpyHostToDevice);
dim3 dimGrid0 = dim3(32,32);
dim3 dimBlock0 = dim3(32,32);
Action_No_image_GPU<<<dimGrid0,dimBlock0>>>(devO1Ptr,devI1Ptr,((double (*)[1024][3])devI2Ptr));
cudaMemcpy(D_,devO1Ptr,3072 * sizeof(double ),cudaMemcpyDeviceToHost);
cudaFree(devO1Ptr);
cudaFree(devI1Ptr);
cudaFree(devI2Ptr);
}
__global__ void Action_No_image_GPU(double *D_,double *maskCenter,double (*SolventMols_)[1024][3])
{
int bx;
bx = blockIdx.x;
int by;
by = blockIdx.y;
int tx;
tx = threadIdx.x;
int ty;
ty = threadIdx.y;
double maxD;
double Dist;
int t2;
int t4;
int t10;
int t8;
D_[32 * bx + tx] = maxD;
//main dist2_noImage code
//double *a1 = maskCenter.Dptr(); //center of solute molecule
//double *a2 = frmIn.XYZ(*solvent_atom);
//double *a1 = maskCenter; //center of solute molecule
//double *a2 = SolventMols_[solventMol][solventAtom];
//double x = a1[0] - a2[0];
//double y = a1[1] - a2[1];
//double z = a1[2] - a2[2];
//Dist = (x*x + y*y + z*z);
Dist = maskCenter[0] * SolventMols_[32 * bx + tx][ty][0] + maskCenter[1] * SolventMols_[32 * bx + tx][ty][1] + maskCenter[2] * SolventMols_[32 * bx + tx][ty][2];
if (Dist + 1 <= D__(tx + 32 * bx))
D_[32 * bx + tx] = Dist;
}
|
6,108 | #include "includes.h"
__device__ void warpReduce(volatile int* sdata, int tid, int n) {
if(tid + 32 < n)
sdata[tid] += sdata[tid+32];
if(tid + 16 < n)
sdata[tid] += sdata[tid+16];
if(tid + 8 < n)
sdata[tid] += sdata[tid+8];
if(tid + 4 < n)
sdata[tid] += sdata[tid+4];
}
__global__ void ReduceRowMajor5(int *g_idata, int *g_odata, int size) {
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
extern __shared__ int sdata[];
sdata[tid] = 0;
if(i < size)
sdata[tid] = g_idata[i];
__syncthreads();
for(unsigned int s = blockDim.x/2; s >= 32; s/=2) {
if(tid < s) {
sdata[tid] += sdata[tid+s];
}
__syncthreads();
}
if(tid < 32) {
warpReduce(sdata, tid, size);
}
if(tid == 0) {
g_odata[blockIdx.x*4] = sdata[0];
g_odata[blockIdx.x*4+1] = sdata[1];
g_odata[blockIdx.x*4+2] = sdata[2];
g_odata[blockIdx.x*4+3] = sdata[3];
}
} |
6,109 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,int var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) {
for (int i=0; i < var_1; ++i) {
if (comp > (var_2 * (var_3 - (+1.8532E-43f / +1.9626E14f * (+1.7886E-37f * var_4))))) {
if (comp >= (var_5 * (var_6 - var_7))) {
comp += (-0.0f - sinhf((-0.0f / var_9 + floorf(+1.5832E-42f))));
float tmp_1 = -1.1252E-15f;
float tmp_2 = -1.3006E-24f;
comp += tmp_2 - tmp_1 - sinhf((-0.0f - (var_10 / var_11 + (var_12 / +1.1462E34f + var_13))));
if (comp >= atanf(+1.0472E-42f)) {
float tmp_3 = -1.4667E36f;
comp += tmp_3 + (var_14 + (var_15 * var_16 * var_17));
}
for (int i=0; i < var_8; ++i) {
comp = log10f(+1.2622E-35f);
}
if (comp == atan2f(floorf(+1.8171E-8f), (-1.3632E-43f / var_18 - (+0.0f - +1.9495E-41f)))) {
comp = (-1.3993E25f / (+1.0080E-42f * var_19 + var_20));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
int tmp_9 = atoi(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21);
cudaDeviceSynchronize();
return 0;
}
|
6,110 | #include "includes.h"
__global__ void inclusive_scan(const unsigned int *input, unsigned int *result)
{
extern __shared__ unsigned int sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
unsigned int sum = input[i];
sdata[threadIdx.x] = sum;
__syncthreads();
for(int offset = 1; offset < blockDim.x; offset <<= 1)
{
if(threadIdx.x >= offset)
{
sum += sdata[threadIdx.x - offset];
}
// wait until every thread has updated its partial sum
__syncthreads();
// write my partial sum
sdata[threadIdx.x] = sum;
// wait until every thread has written its partial sum
__syncthreads();
}
// we're done! each thread writes out its result
result[i] = sdata[threadIdx.x];
} |
6,111 | // CUDA programming
// Exercise n. 09
#include <errno.h>
#include <cuda.h>
#include <stdio.h>
#define BLOCKS 2
#define THREADS 2
// Prototypes
__global__ void square_matrix_transpose(int *d_X, int *d_Y, int N);
__host__ void ints(int *m, int N);
__host__ void print_matrix(int *A, int N);
int main(void)
{
int *A, *B; // host copies of A, B
int *d_A, *d_B; // device copies of A, B
int N = BLOCKS * THREADS;
int size = N * N * sizeof(int);
// Allocate space for host copies of A, B
A = (int *)malloc(size);
B = (int *)malloc(size);
// Setup input values
ints(A, N * N);
// Allocate space for device copies of A, B
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
// Copy inputs to device
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
// Setup the execution configuration
dim3 dim_grid(BLOCKS, BLOCKS, 1); // size: BLOCKS x BLOCKS x 1
dim3 dim_block(THREADS, THREADS, 1); // size: THREADS x THREADS x 1
// Call the kernel on GPU
square_matrix_transpose<<< dim_grid, dim_block >>>(d_A, d_B, N);
// Copy result back to host
cudaMemcpy(B, d_B, size, cudaMemcpyDeviceToHost);
// Check the result
print_matrix(A, N);
print_matrix(B, N);
// Cleanup
free(A);
free(B);
cudaFree(d_A);
cudaFree(d_B);
return(EXIT_SUCCESS);
}
// Transpose of a square matrix (on device)
__global__ void square_matrix_transpose(int *d_X, int *d_Y, int N)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Avoid accessing beyond the end of the matrices
if(row < N && col < N)
{
for(int k = 0; k < N; k++)
{
unsigned int pos = row * N + col;
unsigned int tr_pos = col * N + row;
d_Y[tr_pos] = d_X[pos];
}
}
}
// Initialisation
__host__ void ints(int *m, int N)
{
int i;
for(i = 0; i < N; i++)
m[i] = i;
}
// Print the elements of the matrix
__host__ void print_matrix(int *A, int N)
{
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
{
printf("%d\t", A[i * N + j]);
}
printf("\n");
}
printf("\n");
}
|
6,112 | //Cuda hello world
#include<stdio.h>
#define N 10
#define THREADS_PER_BLOCK 1
#define BLOCK_SIZE THREADS_PER_BLOCK
// calculation of loss
__global__ void cal_loss(float *err, float *label, int n){
printf("threadIdx:(%d) blockIdx:(%d)\n "
, threadIdx.x, blockIdx.x);
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int totalPos = blockDim.x * gridDim.x;
//for (int idx = N * pos / totalPos; idx < N * (pos+1) / totalPos; ++idx) {
// err[idx] = ((Y == idx ? 1.0f : 0.0f) - output[idx]); // calculation of error
//}
}
// initialize the label array
void init_float(float * arr, int size){
for (int i = 0; i < size; ++i)
{
if(i == 3)
arr[i] = 1;
else
arr[i] = 0;
}
}
int main()
{
// host data
int size = 10 * sizeof(float);
float *label = (float*)malloc(size);
init_float(label, 10);
float *err;
err = (float*)malloc(size);
label = (float*)malloc(size);
// copy data to device
float *d_label, *d_err;
cudaMalloc(&d_label, size);
cudaMalloc(&d_err, sizeof(float) * 10);
cudaMemcpy(d_label, label, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_err, err, size, cudaMemcpyHostToDevice);
cal_loss <<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_err, d_label, 10);// kernel function
//cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); // copy the result from GPU to CPU
free(label); free(err);
cudaFree(d_label); cudaFree(d_err);
return 0;
}
|
6,113 | #include <chrono>
#include <iostream>
#include <math.h>
typedef std::chrono::high_resolution_clock Clock;
#define NUM_THREADS_IN_BLOCK 256
__global__
//runtime GPU 195.58us
//runtime CPU 3015 microseconds
void daxpyGPU(int arraySize, float *a, float *b, float *c, float *result)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < arraySize; i+=stride) {
result[i] = a[i] * b[i] + c[i];
}
// Insert your code here.
} // daxpyGPU()
//runtime 3015 microseconds
void daxpyCPU(int arraySize, float *a, float *x, float *y, float *result)
{
for(int index = 0; index < arraySize; index++)
{
result[index] = a[index] * x[index] + y[index];
}
} // daxpyCPU()
int main(void)
{
int arraySize = 1 << 20;
float *a, *x, *y;
float *cpuResult, *gpuResult;
cpuResult = new float[arraySize];
// Allocate unified memory, accessible from CPU or GPU.
cudaMallocManaged(&a, arraySize * sizeof(float));
cudaMallocManaged(&x, arraySize * sizeof(float));
cudaMallocManaged(&y, arraySize * sizeof(float));
cudaMallocManaged(&gpuResult, arraySize * sizeof(float));
// Initialize arrays on the host.
for(int index = 0; index < arraySize; index++)
{
a[index] = 5.0f;
x[index] = 10.0f;
y[index] = 20.0f;
}
int blockSize = NUM_THREADS_IN_BLOCK;
int numBlocks = (arraySize + blockSize - 1) / blockSize;
auto start = Clock::now();
daxpyCPU(arraySize, a, x, y, cpuResult);
auto end = Clock::now();
daxpyGPU<<<numBlocks, blockSize>>>(arraySize, a, x, y, gpuResult);
// Wait for GPU to finish before accessing values on the host.
cudaDeviceSynchronize();
// Check for errors. All values should be 70.0f.
float maxError = 0.0f;
for (int index = 0; index < arraySize; index++)
{
maxError = fmax(maxError, fabs(cpuResult[index] - gpuResult[index]));
}
std::cout << "Max error: " << maxError << std::endl
<< "CPU time: "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - start).count()
<< " microseconds." << std::endl;
// Free memory.
cudaFree(a);
cudaFree(x);
cudaFree(y);
cudaFree(gpuResult);
delete(cpuResult);
return 0;
} // main()
|
6,114 | #include "basic_conv.cuh"
#include "assert.h"
#include "real.h"
#include <iostream>
void trial(){
constexpr int asize=10^5;
constexpr int bsize=1000;
real A[asize];
for(int i=0; i< asize; i++){
A[i]=1;
}
real M[bsize];
for (int i=0; i<bsize; ++i){
M[i]=i;
}
real P[asize];
basic_conv(A,M,P,bsize,asize);
}
int main(){
trial();
}
|
6,115 | #include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h>
// Device code
__global__ void MyKernel()
{
//int idx = threadIdx.x + blockIdx.x * blockDim.x;
}
// Host code
int main()
{
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)MyKernel, 0);
printf("blockSize= %d\n", blockSize);
printf("minGridSize= %d\n", minGridSize);
int numBlocks; // Occupancy in terms of active blocks
// These variables are used to convert occupancy to warps
int device;
cudaDeviceProp prop;
int activeWarps;
int maxWarps;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, MyKernel, blockSize, 0);
activeWarps = numBlocks * blockSize / prop.warpSize;
maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize;
printf("numBlocks: %d\n", numBlocks);
printf("warpSize: %d\n", prop.warpSize);
printf("maxThreadsPerMultiProcessor: %d\n", prop.maxThreadsPerMultiProcessor);
printf("activeWarps: %d\n", activeWarps);
printf("maxWarps: %d\n", maxWarps);
printf("Occupancy: %f %\n", (double)activeWarps / maxWarps * 100);
return 0;
}
|
6,116 | #include "includes.h"
extern "C"
{
}
__global__ void updateEst(int N, int M, float beta2, float scale, float *PARAMS, float *AVG, float *EST)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j*N + i;
float beta2a = __fsub_rn(1.0, beta2);
if (i < N && j < M)
{
//AVG[index] = beta2*AVG[index] + (1.0-beta2)*PARAMS[index];
//EST[index] = scale*AVG[index];
AVG[index] = __fmaf_rn(beta2a,PARAMS[index],__fmul_rn(beta2,AVG[index]));
EST[index] = __fmul_rn(scale, AVG[index]);
}
} |
6,117 | #include <stdio.h>
#include <cuda.h>
#include <sys/time.h>
#define N 1024*1024 //array size
__global__ void read_alloc_kernel1(int *A, int *B, int *time){
int x1, x2, x3, x4, x5, x6, x7, x8, x9;
int t0, t1, t2, t3, t4, t5;
t0 = clock();
x1 = A[64];
x2 = A[1088];
x3 = A[2144];
x4 = A[3168];
x1 ++;
x2 ++;
x3 ++;
x4 ++;
t1 = clock();
t2 = clock();
x5 = A[4096];
t3 = clock();
x7 = A[1088];
x8 = A[2144];
x9 = A[3168];
t4 = clock();
x6 = A[64];
t5 = clock();
B[0] = x1;
B[1] = x2;
B[2] = x3;
B[3] = x4;
B[4] = x5;
B[5] = x6;
B[6] = x7;
B[7] = x8;
B[8] = x9;
time[0] = t1 - t0;
time[1] = t3 - t2;
time[2] = t5 - t4;
}
int main(int argc, char **argv) {
int *A, *B, *A_gpu, *B_gpu;
int *time, *time_gpu;
int i;
A = (int *)malloc(sizeof(int) * N);
B = (int *)malloc(sizeof(int) * N);
time = (int *)malloc(sizeof(int) * N);
cudaMalloc((void **)&A_gpu, sizeof(int) * N);
cudaMalloc((void **)&B_gpu, sizeof(int) * N);
cudaMalloc((void **)&time_gpu, sizeof(int) * N);
for (i = 0; i < N; i++)
B[i] = 0;
cudaMemcpy(B_gpu, B, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaThreadSynchronize();
dim3 block(1);
dim3 grid(1);
read_alloc_kernel1<<< grid, block >>>(A_gpu, B_gpu, time_gpu);
cudaThreadSynchronize();
cudaMemcpy(time, time_gpu, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
free(A);
free(B);
free(time);
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(time_gpu);
return 0;
}
|
6,118 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/* Template project which demonstrates the basics on how to setup a project
* example application.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h> /* time */
// includes CUDA
#include <cuda_runtime.h>
// includes, project
//#include <helper_cuda.h>
//#include <helper_functions.h> // helper functions for SDK examples
#define matrixType float // definition of matrixType variables
#define BLOCK_SIZE 16 //256 threads per block
#define SIZE 16 //size of square matrix
//functions' declarations & definitions
void MatrixMul(float *A, float *B, float *C, int size);
void MatrixPrint(float *C, int size);
void MatrixMul(float *, float *, float *, int);
//Definition of naive kernel to matrix multiplication
__global__ void MatrixMulKernel(float *M, float *N, float *P, int Width)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if((Row < Width) && (Col < Width))
{
float Pvalue = 0;
for(int k = 0; k < Width; ++k)
{
Pvalue += M[Row*Width+k]*N[k*Width+Col];
}
P[Row*Width+Col] = Pvalue;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
/* initialize random seed: */
srand (time(NULL));
int deviceCount = 0;
//Check available memory
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
if(deviceProp.totalGlobalMem < SIZE * SIZE * sizeof(matrixType))
{
fprintf(stderr, "Data size too large\n");
exit(EXIT_FAILURE);
}
//Allocate memory
matrixType *matA;
matrixType *matB;
matrixType *matC;
matrixType *matC2 = (matrixType*)calloc(SIZE * SIZE, sizeof(matrixType));
cudaMallocManaged(&matA, SIZE * SIZE * sizeof(matrixType));
cudaMallocManaged(&matB, SIZE * SIZE * sizeof(matrixType));
cudaMallocManaged(&matC, SIZE * SIZE * sizeof(matrixType));
// Verify that allocations succeeded
if (matA == NULL || matB == NULL || matC == NULL || matC2 == NULL)
{
fprintf(stderr, "Failed to allocate matrices!\n");
exit(EXIT_FAILURE);
}
//Initialize matrices
for(int i = 0; i < SIZE * SIZE; i++)
{
matA[i] = rand()/(float)RAND_MAX;
matB[i] = rand()/(float)RAND_MAX;
}
//Launch naive Kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((SIZE + dimBlock.x - 1)/dimBlock.x,(SIZE + dimBlock.x - 1)/dimBlock.y);
MatrixMulKernel<<<dimGrid, dimBlock>>>(matA, matB, matC, SIZE);
cudaDeviceSynchronize();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch Matrixmultiplication kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Check results on CPU
MatrixMul(matA, matB, matC2, SIZE);
MatrixPrint(matC, SIZE);
MatrixPrint(matC2, SIZE);
for(int i = 0; i < SIZE * SIZE; ++i){
if(fabs(matC[i] - matC2[i]) > 1e-4){
fprintf(stderr, "Result verification failed at element %d!\t%f|%f\n", i, matC[i], matC2[i]);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
cudaFree(matA);
cudaFree(matB);
cudaFree(matC);
free(matC2);
printf("Done\n");
return 0;
}
//Definition of signle threaded matrix multiplication function
void MatrixMul(float *A, float *B, float *C, int size)
{
for(int row = 0; row < size; row ++)
{
for(int col = 0; col < size; col++)
{
for(int k = 0; k < size; k++)
C[(row*size) + col] += A[(row*size) + k] * B[col + (k*size)];
}
}
}
//Declaration of function to extract matrix
void MatrixPrint(float *C, int size)
{
for(int row = 0; row < size; row ++)
{
for(int col = 0; col < size; col++)
{
printf("M[%d;%d] = %f\t", row + 1, col + 1, C[(row*size) + col]);
}
printf("\n");
}
}
|
6,119 | #include "includes.h"
__global__ void gpuIt3(float *tNew,float *tOld,float *tOrig,int x,int y,int z,float k,float st) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < x*y*z){
if(i == 0){ // front upper left corner
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i]+tOld[i+x]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 0;
}
else if(i == x-1){ // front upper right corner
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i]+tOld[i+x]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = .1;
}
else if(i == x*y-1){ // front lower right corner
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i-x]+tOld[i]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = .2;
}
else if(i == x*y-x){ // front lower left corner
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i-x]+tOld[i]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = .3;
}
else if(i == x*y*(z-1) ){ // back upper left corner
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i]+tOld[i+x]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = .4;
}
else if(i == x*y*(z-1) + x-1){ // back upper right corner
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i]+tOld[i+x]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = .5;
}
else if(i == x*y*z-1){ // back lower right corner
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i-x]+tOld[i]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = .6;
}
else if(i == x*y*z - x){ // back lower left corner
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i-x]+tOld[i]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = .7;
}
else if(i - x < 0){ // front top edge
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i]+tOld[i+x]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = .8;
}
else if(i%x == x-1 && i<x*y){ // front right edge
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i-x]+tOld[i+x]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = .9;
}
else if(i+x > x*y && i < (x*y)){ // front bottom edge
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i-x]+tOld[i]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 1;
}
else if(i%x == 0 && i<x*y){ // front left edge
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i-x]+tOld[i+x]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 2;
}
else if(i > (x*y*z - x*y) && i < (x*y*z - (x-1)*y)){ // back top edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i]+tOld[i+x]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 3;
}
else if(i%x == x-1 && i > (x*y*(z-1))){ // back right edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i-x]+tOld[i+x]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = 4;
}
else if(i+x > x*y*z){ // back bottom edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i-x]+tOld[i]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 5;
}
else if(i%x == 0 && i > x*y*(z-1)){ // back left edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i-x]+tOld[i+x]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 6;
}
// the corner sides going front to back
else if(i%(x*y) == 0){ // upper left edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i]+tOld[i+x]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 7;
}
else if(i%(x*y) == x-1){ // upper right edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i]+tOld[i+x]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = 8;
}
else if(i%(x*y) == x*y-1){ // lower right edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i-x]+tOld[i]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = 9;
}
else if(i%(x*y) == x*y-x){ // lower left edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i-x]+tOld[i]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 9.1;
}
// else ifs here are vague because other options already completed
else if(i < x*y){ // front face
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i-x]+tOld[i+x]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 1.1;
}
else if(i > x*y*(z-1)){ // back face
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i-x]+tOld[i+x]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 1.2;
}
else if(i%(x*y) < x){ // top face
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i]+tOld[i+x]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 1.3;
}
else if(i%(x*y) > x*(y-1)){ // bottom face
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i-x]+tOld[i]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 1.4;
}
else if(i%(x) == x-1){ // right face
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i-x]+tOld[i+x]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = 1.5;
}
else if(i%(x) == 0){ // left face
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i-x]+tOld[i+x]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 1.6;
}
else{ // all in the middle
// front back top bottom left right
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i-x]+tOld[i+x]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
}
//tNew[i] = i%(x*y);
// replace heaters
if(tOrig[i] != st){
tNew[i] = tOrig[i];
}
}
} |
6,120 | #include "includes.h"
__global__ void selection_k_radius_gpu(int b, int m, int k, float radius, const int* idx, const float* val, int* idx_out, float* val_out){
int batch_index = blockIdx.x;
int stride = batch_index * m * k;
idx += stride;
val += stride;
idx_out += stride;
val_out += stride;
for(int i = threadIdx.x; i < m;i += blockDim.x) {
for(int j = 0;j < k;j ++) {
if(val[i * k + j] < radius) {
idx_out[i * k + j] = idx[i * k + j];
val_out[i * k + j] = val[i * k + j];
} else {
idx_out[i * k + j] = idx[i * k ];
val_out[i * k + j] = val[i * k ];
}
}
}
} |
6,121 | // Assignment 1: ParallelSine
// CSCI 415: Networking and Parallel Computation
// Spring 2017
// Name(s):
//
// Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch
// standard imports
#include <stdio.h>
#include <math.h>
#include <iomanip>
#include <iostream>
#include <string>
#include <sys/time.h>
// problem size (vector length) N
static const int N = 12345678; //#of threads?
// Number of terms to use when approximating sine
static const int TERMS = 6; //# of blocks
// kernel function (CPU - Do not modify)
void sine_serial(float *input, float *output)
{
int i;
for (i=0; i<N; i++) {
float value = input[i]; //0.1f * i ;i=(0-N)
float numer = input[i] * input[i] * input[i]; //input^3
int denom = 6; // 3!
int sign = -1;
//std::cout << input[i] << std::endl;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= input[i] * input[i]; //(input^2 * input^3)*blockIdx.x
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
output[i] = value;
//std::cout << output[i] << std::endl;
}
}
// kernel function (CUDA device)
// TODO: Implement your graphics kernel here. See assignment instructions for method information
__global__ void paralellSine(float *input, float *output)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x; //Proper indexing of elements.
float value = input[idx];
float numer = input[idx] * input[idx] * input[idx];
int denom = 6;
int sign = -1;
for (int j=1; j<=TERMS; j++)
{
value += sign * numer/denom;
numer *= input[idx] * input[idx];
denom *= (2 * j + 2) * (2 * j + 3);
sign *= -1;
}
output[idx] = value;
}
// BEGIN: timing and error checking routines (do not modify)
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
void checkErrors(const char label[])
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
// END: timing and error checking routines (do not modify)
int main (int argc, char **argv)
{
//BEGIN: CPU implementation (do not modify)
float *d_input;
float *d_output;
int size = N * sizeof(float);
//Initialize data on CPU
float *h_input = (float*)malloc(N*sizeof(float));
int i;
for (i=0; i<N; i++)
{
h_input[i] = 0.1f * i;
}
//Execute and time the CPU version
long long CPU_start_time = start_timer();
float *h_cpu_result = (float*)malloc(N*sizeof(float));
sine_serial(h_input, h_cpu_result);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time");
//END: CPU implementation (do not modify)
long long GPU_Total_start = start_timer();
long long GPU_Malloc_Start = start_timer();
float *h_gpu_result = (float*)malloc(N*sizeof(float));
cudaMalloc((void **) &d_input, size);
cudaMalloc((void **) &d_output, size);
long long GPU_Malloc = stop_timer(GPU_Malloc_Start, "\nGPU Memory Allocation");
long long GPU_Memcpy_start = start_timer();
cudaMemcpy(d_input, h_input,size, cudaMemcpyHostToDevice);
cudaMemcpy(d_output,h_gpu_result, size, cudaMemcpyHostToDevice);
long long GPU_Memcpy = stop_timer(GPU_Memcpy_start, "GPU Memory Copy to Device");
long long GPU_start_time = start_timer();
paralellSine <<< 12057,1024 >>> (d_input, d_output); //Blocks must be < # CUDA cores
cudaThreadSynchronize();
long long GPU_time = stop_timer(GPU_start_time, "GPU Kernel Run Time");
long long GPU_MemcpytoHost_start = start_timer();
cudaMemcpy(h_gpu_result, d_output, size, cudaMemcpyDeviceToHost);
long long GPU_MemcpytoHost = stop_timer(GPU_MemcpytoHost_start, "GPU Memory Copy to Host");
long long GPU_Time = stop_timer(GPU_Total_start, "GPU Total run time");
// Checking to make sure the CPU and GPU results match - Do not modify
int errorCount = 0;
for (i=0; i<N; i++)
{
if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6)
errorCount = errorCount + 1;
}
if (errorCount > 0)
printf("\nResult comparison failed.\n");
else
printf("\nResult comparison passed.\n");
// Cleaning up memory
free(h_input);
free(h_cpu_result);
free(h_gpu_result);
cudaFree(d_input);
return 0;
}
|
6,122 | #include "includes.h"
#define FALSE 0
#define TRUE !FALSE
#define NUMTHREADS 16
#define THREADWORK 32
__global__ void noNAsPmccMeans(int nRows, int nCols, float * a, float * means)
{
int
col = blockDim.x * blockIdx.x + threadIdx.x,
inOffset = col * nRows,
outOffset = threadIdx.x * blockDim.y,
j = outOffset + threadIdx.y;
float sum = 0.f;
if(col >= nCols) return;
__shared__ float threadSums[NUMTHREADS*NUMTHREADS];
for(int i = threadIdx.y; i < nRows; i += blockDim.y)
sum += a[inOffset + i];
threadSums[j] = sum;
__syncthreads();
for(int i = blockDim.y >> 1; i > 0; i >>= 1) {
if(threadIdx.y < i) {
threadSums[outOffset+threadIdx.y]
+= threadSums[outOffset+threadIdx.y + i];
}
__syncthreads();
}
if(threadIdx.y == 0)
means[col] = threadSums[outOffset] / (float)nRows;
} |
6,123 | #include <stdio.h>
#include <time.h>
#include <iostream>
#include <vector>
#include <math.h>
#include <fstream>
void checkCUDAError(const char *msg);
#include <cuda_runtime.h>
using namespace std;
// --------------------INPUT DATA---------------------
const int Nx = 24, Ny = 120, Nz = 20; // Number of mass points
float maxtime = 60; // End time [sec]
const int Nstep = 1200; // Number of time steps
__device__ __constant__ float dt = 0.05; // maxtime / Nstep; // Time step size [sec]
float dtcpu=0.05;
const int xlength = (4 + 2 * Nx)*(2 + Ny)*(2 + Nz); // Solution array in x-direction
const int ylength = (2 + Nx)*(4 + 2 * Ny)*(2 + Nz); // Solution array in y-direction
const int zlength = (2 + Nx)*(2 + Ny)*(4 + 2 * Nz); // Solution array in z-direction
const int masslength = Nx * Ny * Nz;
const int kxlength = Nz * Ny * (Nx + 1);
const int kylength = Nz * Nx * (Ny + 1);
const int kzlength = Ny * Nx * (Nz + 1);
const int bxlength = kxlength;
const int bylength = kylength;
const int bzlength = kzlength;
//------------------------DEVICE FUNCTIONS----------------------------//
//x-displacement
__device__ float fxx(int n, int i, int j, int k, float*xold)
{
return xold[ (Ny + 2)*(4 + 2*Nx) + (k - 1)*(Ny + 2)*(4 + 2*Nx) + 4 +
2*Nx + (i - 1)*(4 + 2*Nx) + 2 + (2*j - 1)-1];
}
//x-velocity
__device__ float fvx(int n, int i, int j, int k, float*xold)
{
return xold[ (Ny + 2)*(4 + 2*Nx) + (k - 1)*(Ny + 2)*(4 + 2*Nx) + 4 +
2*Nx + (i - 1)*(4 + 2*Nx) + 2 + (2*j)-1];
}
//y-displacement
__device__ float fyy(int n, int i, int j, int k, float*yold)
{
return yold[ (Nx + 2)*(4 + 2*Ny) + (k - 1)*(Nx + 2)*(4 + 2*Ny) + 4 +
2*Ny + (j - 1)*(4 + 2*Ny) + 2 + (2*i - 1)-1];
}
//y-velocity
__device__ float fvy(int n, int i, int j, int k, float*yold)
{
return yold[ (Nx + 2)*(4 + 2 * Ny) + (k - 1)*(Nx + 2)*(4 + 2 * Ny) + 4 +
2 * Ny + (j - 1)*(4 + 2 * Ny) + 2 + (2 * i)-1];
}
//z-displacement
__device__ float fzz(int n, int i, int j, int k, float*zold)
{
return zold[ (Nx + 2)*(4 + 2*Nz) + (i - 1)*(Nx + 2)*(4 + 2*Nz) + 4 +
2*Nz + (j - 1)*(4 + 2*Nz) + 2 + (2*k - 1)-1];
}
//z-velocity
__device__ float fvz(int n, int i, int j, int k, float*zold)
{
return zold[ (Nx + 2)*(4 + 2 * Nz) + (i - 1)*(Nx + 2)*(4 + 2 * Nz) + 4 +
2 * Nz + (j - 1)*(4 + 2 * Nz) + 2 + (2 * k)-1];
}
//mass
__device__ float fm(int i, int j, int k, float*m)
{
return m[(k - 1)*Ny*Nx + (i - 1)*Nx + j-1];
}
//x-stiffness
__device__ float fkx(int i, int j, int k, float*kx)
{
return kx[(k - 1)*Ny*(Nx + 1) + (i - 1)*(Nx + 1) + j-1];
}
//y-stiffness
__device__ float fky(int i, int j, int k, float*ky)
{
return ky[(k - 1)*Nx*(Ny + 1) + (i - 1)*Nx + j-1];
}
//z-stiffness
__device__ float fkz(int i, int j, int k, float*kz)
{
return kz[(k - 1)*Nx*Ny + (i - 1)*Nx + j-1];
}
//x-damping
__device__ float fbx(int i, int j, int k, float*bx)
{
return bx[(k - 1)*Ny*(Nx + 1) + (i - 1)*(Nx + 1) + j-1];
}
//y-damping
__device__ float fby(int i, int j, int k, float*by)
{
return by[(k - 1)*Nx*(Ny + 1) + (i - 1)*Nx + j-1];
}
//z-damping
__device__ float fbz(int i, int j, int k, float*bz)
{
return bz[(k - 1)*Nx*Ny + (i - 1)*Nx + j-1];
}
//x-force
__device__ float fFx(int i, int j, int k, float*Fx)
{
return Fx[(k - 1)*Ny*Nx + (i - 1)*Nx + j-1];
}
//y-force
__device__ float fFy(int i, int j, int k, float*Fy)
{
return Fy[(k - 1)*Ny*Nx + (i - 1)*Nx + j-1];
}
//z-force
__device__ float fFz(int i, int j, int k, float*Fz)
{
return Fz[(k - 1)*Ny*Nx + (i - 1)*Nx + j-1];
}
//x-acceleration
__device__ float ax(int i, int j, int k, float*Fx, float*xold, float*kx, float*ky, float*kz, float*bx, float*by, float*bz, float*m)
{
return (fFx(i, j, k, Fx) - fby(i, j, k, by)*(-fvx(1, -1 + i, j, k, xold) + fvx(1, i, j, k, xold)) -
fbx(i, j, k, bx)*(-fvx(1, i, -1 + j, k, xold) + fvx(1, i, j, k, xold)) - fbz(i, j, k, bz)*(-fvx(1, i, j, -1 + k, xold) + fvx(1, i, j, k, xold)) +
fbz(i, j, 1 + k, bz)*(-fvx(1, i, j, k, xold) + fvx(1, i, j, 1 + k, xold)) +
fbx(i, 1 + j, k, bx)*(-fvx(1, i, j, k, xold) + fvx(1, i, 1 + j, k, xold)) +
fby(1 + i, j, k, by)*(-fvx(1, i, j, k, xold) + fvx(1, 1 + i, j, k, xold)) -
fky(i, j, k, ky)*(-fxx(1, -1 + i, j, k, xold) + fxx(1, i, j, k, xold)) - fkx(i, j, k, kx)*(-fxx(1, i, -1 + j, k, xold) + fxx(1, i, j, k, xold)) -
fkz(i, j, k, kz)*(-fxx(1, i, j, -1 + k, xold) + fxx(1, i, j, k, xold)) +
fkz(i, j, 1 + k, kz)*(-fxx(1, i, j, k, xold) + fxx(1, i, j, 1 + k, xold)) +
fkx(i, 1 + j, k, kx)*(-fxx(1, i, j, k, xold) + fxx(1, i, 1 + j, k, xold)) +
fky(1 + i, j, k, ky)*(-fxx(1, i, j, k, xold) + fxx(1, 1 + i, j, k, xold))) / fm(i, j, k, m);
}
//y-acceleration
__device__ float ay(int i, int j, int k, float*Fy, float*yold, float*kx, float*ky, float*kz, float*bx, float*by, float*bz, float*m)
{
return (fFy(i, j, k, Fy) - fby(i, j, k, by)*(-fvy(1, -1 + i, j, k, yold) + fvy(1, i, j, k, yold)) -
fbx(i, j, k, bx)*(-fvy(1, i, -1 + j, k, yold) + fvy(1, i, j, k, yold)) - fbz(i, j, k, bz)*(-fvy(1, i, j, -1 + k, yold) + fvy(1, i, j, k, yold)) +
fbz(i, j, 1 + k, bz)*(-fvy(1, i, j, k, yold) + fvy(1, i, j, 1 + k, yold)) +
fbx(i, 1 + j, k, bx)*(-fvy(1, i, j, k, yold) + fvy(1, i, 1 + j, k, yold)) +
fby(1 + i, j, k, by)*(-fvy(1, i, j, k, yold) + fvy(1, 1 + i, j, k, yold)) -
fky(i, j, k, ky)*(-fyy(1, -1 + i, j, k, yold) + fyy(1, i, j, k, yold)) - fkx(i, j, k, kx)*(-fyy(1, i, -1 + j, k, yold) + fyy(1, i, j, k, yold)) -
fkz(i, j, k, kz)*(-fyy(1, i, j, -1 + k, yold) + fyy(1, i, j, k, yold)) +
fkz(i, j, 1 + k, kz)*(-fyy(1, i, j, k, yold) + fyy(1, i, j, 1 + k, yold)) +
fkx(i, 1 + j, k, kx)*(-fyy(1, i, j, k, yold) + fyy(1, i, 1 + j, k, yold)) +
fky(1 + i, j, k, ky)*(-fyy(1, i, j, k, yold) + fyy(1, 1 + i, j, k, yold))) / fm(i, j, k, m);
}
//z-acceleration
__device__ float az(int i, int j, int k, float*Fz, float*zold, float*kx, float*ky, float*kz, float*bx, float*by, float*bz, float*m)
{
return (fFz(i, j, k, Fz) - fby(i, j, k, by)*(-fvz(1, -1 + i, j, k, zold) + fvz(1, i, j, k, zold)) -
fbx(i, j, k, bx)*(-fvz(1, i, -1 + j, k, zold) + fvz(1, i, j, k, zold)) - fbz(i, j, k, bz)*(-fvz(1, i, j, -1 + k, zold) + fvz(1, i, j, k, zold)) +
fbz(i, j, 1 + k, bz)*(-fvz(1, i, j, k, zold) + fvz(1, i, j, 1 + k, zold)) +
fbx(i, 1 + j, k, bx)*(-fvz(1, i, j, k, zold) + fvz(1, i, 1 + j, k, zold)) +
fby(1 + i, j, k, by)*(-fvz(1, i, j, k, zold) + fvz(1, 1 + i, j, k, zold)) -
fky(i, j, k, ky)*(-fzz(1, -1 + i, j, k, zold) + fzz(1, i, j, k, zold)) - fkx(i, j, k, kx)*(-fzz(1, i, -1 + j, k, zold) + fzz(1, i, j, k, zold)) -
fkz(i, j, k, kz)*(-fzz(1, i, j, -1 + k, zold) + fzz(1, i, j, k, zold)) +
fkz(i, j, 1 + k, kz)*(-fzz(1, i, j, k, zold) + fzz(1, i, j, 1 + k, zold)) +
fkx(i, 1 + j, k, kx)*(-fzz(1, i, j, k, zold) + fzz(1, i, 1 + j, k, zold)) +
fky(1 + i, j, k, ky)*(-fzz(1, i, j, k, zold) + fzz(1, 1 + i, j, k, zold))) / fm(i, j, k, m);
}
__global__ void SolveKernel(int dimBlockX,int dimBlockY,int dimBlockZ,float*xoldd,float*yoldd,float*zoldd,float*xnewd,float*ynewd,float*znewd,float*md,float*kxd,float*kyd,float*kzd,float*bxd,float*byd,float*bzd,float*Fxd,float*Fyd,float*Fzd)
{
// int tx=threadIdx.x;
// int ty=threadIdx.y;
int tx=blockIdx.x*dimBlockX+threadIdx.x;
int ty=blockIdx.y*dimBlockY+threadIdx.y;
int tz=blockIdx.z*dimBlockZ+threadIdx.z;
int i=ty+1;
int j=tx+1;
int k=tz+1;
xnewd[ (Ny + 2)*(4 + 2*Nx) + (k - 1)*(Ny + 2)*(4 + 2*Nx) + 4 +
2 * Nx + (i - 1)*(4 + 2 * Nx) + 2 + (2 * j - 1) - 1] = fxx(1, i, j, k, xoldd) + fvx(1, i, j, k, xoldd)*dt;
xnewd[ (Ny + 2)*(4 + 2 * Nx) + (k - 1)*(Ny + 2)*(4 + 2 * Nx) + 4 +
2 * Nx + (i - 1)*(4 + 2 * Nx) + 2 + (2 * j) - 1] = fvx(1, i, j, k, xoldd) + ax(i, j, k, Fxd, xoldd, kxd, kyd, kzd, bxd, byd, bzd, md)*dt;
ynewd[ (Nx + 2)*(4 + 2*Ny) + (k - 1)*(Nx + 2)*(4 + 2*Ny) + 4 +
2*Ny + (j - 1)*(4 + 2*Ny) + 2 + (2*i - 1)-1] = fyy(1, i, j, k, yoldd) + fvy(1, i, j, k, yoldd)*dt;
ynewd[ (Nx + 2)*(4 + 2*Ny) + (k - 1)*(Nx + 2)*(4 + 2*Ny) + 4 +
2*Ny + (j - 1)*(4 + 2*Ny) + 2 + (2*i)-1] = fvy(1, i, j, k, yoldd) + ay(i, j, k, Fyd, yoldd, kxd, kyd, kzd, bxd, byd, bzd, md)*dt;
znewd[ (Nx + 2)*(4 + 2*Nz) + (i - 1)*(Nx + 2)*(4 + 2*Nz) + 4 +
2*Nz + (j - 1)*(4 + 2*Nz) + 2 + (2*k - 1)-1] = fzz(1, i, j, k, zoldd) + fvz(1, i, j, k, zoldd)*dt;
znewd[ (Nx + 2)*(4 + 2*Nz) + (i - 1)*(Nx + 2)*(4 + 2*Nz) + 4 +
2*Nz + (j - 1)*(4 + 2*Nz) + 2 + (2*k)-1] = fvz(1, i, j, k, zoldd) + az(i, j, k, Fzd, zoldd, kxd, kyd, kzd, bxd, byd, bzd, md)*dt;
}
void Solve(float*xold,float*yold,float*zold,float*xnew,float*ynew,float*znew,float*m,float*kx,float*ky,float*kz,float*bx,float*by,float*bz,float*Fx,float*Fy,float*Fz)
{
float *xoldd,*yoldd,*zoldd,*xnewd,*ynewd,*znewd,*md,*kxd,*kyd,*kzd,*bxd,*byd,*bzd,*Fxd,*Fyd,*Fzd;
int sizexoldd=xlength*sizeof(float);
cudaMalloc((void**)&xoldd,sizexoldd);
cudaMemcpy(xoldd,xold,sizexoldd,cudaMemcpyHostToDevice);
int sizeyoldd=ylength*sizeof(float);
cudaMalloc((void**)&yoldd,sizeyoldd);
cudaMemcpy(yoldd,yold,sizeyoldd,cudaMemcpyHostToDevice);
int sizezoldd=zlength*sizeof(float);
cudaMalloc((void**)&zoldd,sizezoldd);
cudaMemcpy(zoldd,zold,sizezoldd,cudaMemcpyHostToDevice);
int sizexnewd=xlength*sizeof(float);
cudaMalloc((void**)&xnewd,sizexnewd);
cudaMemcpy(xnewd,xnew,sizexnewd,cudaMemcpyHostToDevice);
int sizeynewd=ylength*sizeof(float);
cudaMalloc((void**)&ynewd,sizeynewd);
cudaMemcpy(ynewd,ynew,sizeynewd,cudaMemcpyHostToDevice);
int sizeznewd=zlength*sizeof(float);
cudaMalloc((void**)&znewd,sizeznewd);
cudaMemcpy(znewd,znew,sizeznewd,cudaMemcpyHostToDevice);
int sizemd=masslength*sizeof(float);
cudaMalloc((void**)&md,sizemd);
cudaMemcpy(md,m,sizemd,cudaMemcpyHostToDevice);
int sizekxd=kxlength*sizeof(float);
cudaMalloc((void**)&kxd,sizekxd);
cudaMemcpy(kxd,kx,sizekxd,cudaMemcpyHostToDevice);
int sizekyd=kylength*sizeof(float);
cudaMalloc((void**)&kyd,sizekyd);
cudaMemcpy(kyd,ky,sizekyd,cudaMemcpyHostToDevice);
int sizekzd=kzlength*sizeof(float);
cudaMalloc((void**)&kzd,sizekzd);
cudaMemcpy(kzd,kz,sizekzd,cudaMemcpyHostToDevice);
int sizebxd=bxlength*sizeof(float);
cudaMalloc((void**)&bxd,sizebxd);
cudaMemcpy(bxd,bx,sizebxd,cudaMemcpyHostToDevice);
int sizebyd=bylength*sizeof(float);
cudaMalloc((void**)&byd,sizebyd);
cudaMemcpy(byd,by,sizebyd,cudaMemcpyHostToDevice);
int sizebzd=bzlength*sizeof(float);
cudaMalloc((void**)&bzd,sizebzd);
cudaMemcpy(bzd,bz,sizebzd,cudaMemcpyHostToDevice);
int sizeFxd=masslength*sizeof(float);
cudaMalloc((void**)&Fxd,sizeFxd);
cudaMemcpy(Fxd,Fx,sizeFxd,cudaMemcpyHostToDevice);
int sizeFyd=masslength*sizeof(float);
cudaMalloc((void**)&Fyd,sizeFyd);
cudaMemcpy(Fyd,Fy,sizeFyd,cudaMemcpyHostToDevice);
int sizeFzd=masslength*sizeof(float);
cudaMalloc((void**)&Fzd,sizeFzd);
cudaMemcpy(Fzd,Fz,sizeFzd,cudaMemcpyHostToDevice);
//Malloc result
//cudaMalloc((void**)&Pd,size);
//Dimensions of the run
//int SubMtxWidth=SubWidth;
int NBlockX=4;
int NBlockY=3;
int NBlockZ=5;
int dimBlockX=Nx/NBlockX;
int dimBlockY=Ny/NBlockY;
int dimBlockZ=Nz/NBlockZ;
dim3 dimBlock(dimBlockX,dimBlockY,dimBlockZ);
dim3 dimGrid(NBlockX,NBlockY,NBlockZ);
//Running Kernel
SolveKernel<<<dimGrid,dimBlock>>>(dimBlockX,dimBlockY,dimBlockZ,xoldd,yoldd,zoldd,xnewd,ynewd,znewd,md,kxd,kyd,kzd,bxd,byd,bzd,Fxd,Fyd,Fzd);
cudaThreadSynchronize();
//Copy data back
cudaMemcpy(xnew,xnewd,sizexnewd,cudaMemcpyDeviceToHost);
cudaMemcpy(ynew,ynewd,sizeynewd,cudaMemcpyDeviceToHost);
cudaMemcpy(znew,znewd,sizeznewd,cudaMemcpyDeviceToHost);
checkCUDAError("memcpy");
//Free memory
//cudaFree(Md);
//cudaFree(Nd);
//cudaFree(Pd);
//NEWSHIT
cudaFree(xoldd);
cudaFree(yoldd);
cudaFree(zoldd);
cudaFree(xnewd);
cudaFree(ynewd);
cudaFree(znewd);
cudaFree(md);
cudaFree(kxd);
cudaFree(kyd);
cudaFree(kzd);
cudaFree(bxd);
cudaFree(byd);
cudaFree(bzd);
cudaFree(Fxd);
cudaFree(Fyd);
cudaFree(Fzd);
}
int main(int argc,char* argv[])
{
float *xold,*yold,*zold,*xnew,*ynew,*znew,*m,*kx,*ky,*kz,*bx,*by,*bz,*Fx,*Fy,*Fz;
//----------------------------------INITIALIZATION START----------------------------------
// Solution vectors
xold=(float *)malloc(xlength*sizeof(float));
yold=(float *)malloc(ylength*sizeof(float));
zold=(float *)malloc(zlength*sizeof(float));
xnew=(float *)malloc(xlength*sizeof(float));
ynew=(float *)malloc(ylength*sizeof(float));
znew=(float *)malloc(zlength*sizeof(float));
// Mass vector
m=(float *)malloc(masslength*sizeof(float));
// Stiffness vectors
kx=(float *)malloc(kxlength*sizeof(float));
ky=(float *)malloc(kylength*sizeof(float));
kz=(float *)malloc(kzlength*sizeof(float));
// Damping vectors
bx=(float *)malloc(bxlength*sizeof(float));
by=(float *)malloc(bylength*sizeof(float));
bz=(float *)malloc(bzlength*sizeof(float));
// Force vectors
Fx=(float *)malloc(masslength*sizeof(float));
Fy=(float *)malloc(masslength*sizeof(float));
Fz=(float *)malloc(masslength*sizeof(float));
// Initial conditions
for (int i = 0; i < xlength ; i++)
{
xold[i]=0.0f;
xnew[i]=0.0f;
}
for (int i = 0; i < ylength ; i++)
{
yold[i]=0.0f;
ynew[i]=0.0f;
}
for (int i = 0; i < zlength ; i++)
{
zold[i]=0.0f;
znew[i]=0.0f;
}
// Mass [kg] and forces
for (int i = 0; i < masslength ; i++)
{
m[i]=1.0f;
Fx[i]=0.0f;
Fy[i]=0.0f;
Fz[i]=0.0f;
}
// Stiffness [N/m] and damping [N sec/m] in x-direction
for (int i = 0; i < kxlength ; i++)
{
kx[i]=0.2f;
bx[i]=0.05f;
}
// Stiffness [N/m] and damping [N sec/m] in y-direction
for (int i = 0; i < kylength ; i++)
{
ky[i]=0.2f;
by[i]=0.05f;
}
// Stiffness [N/m] and damping [N sec/m] in z-direction
for (int i = 0; i < kzlength ; i++)
{
kz[i]=0.2f;
bz[i]=0.05f;
}
//----------------------------------INITIALIZATION END--------------------------------------
//-------------------------------BOUNDARY CONDITIONS START----------------------------------
// No connections with Top wall B.C.'s
for (int i = 1; i <= Nx; i++)
{
for (int k = 1; k <= Nz; k++)
{
ky[i + Nx*Ny + (-1 + k)*Nx*(1 + Ny) - 1] = 0.0f;
by[i + Nx*Ny + (-1 + k)*Nx*(1 + Ny) - 1] = 0.0f;
}
}
//--------------------------------BOUNDARY CONDITIONS END-----------------------------------
//--------------------------------------SOLVER START-----------------------------------------
clock_t t;
t=clock();
for (int n = 1; n <= Nstep-1; n++)
{
// Excitation
Fx[(2 - 1)*Ny*Nx + (6 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu); // omega = 3 [rad/sec]
Fy[(2 - 1)*Ny*Nx + (6 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fz[(2 - 1)*Ny*Nx + (6 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fx[(2 - 1)*Ny*Nx + (7 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fy[(2 - 1)*Ny*Nx + (7 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fz[(2 - 1)*Ny*Nx + (7 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fx[(2 - 1)*Ny*Nx + (5 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fy[(2 - 1)*Ny*Nx + (5 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Fz[(2 - 1)*Ny*Nx + (5 - 1)*Nx + 8 - 1] = sin(3 * n*dtcpu);
Solve(xold,yold,zold,xnew,ynew,znew,m,kx,ky,kz,bx,by,bz,Fx,Fy,Fz);
cudaThreadSynchronize();
// OLD=NEW
for (int ix = 0; ix < xlength; ix++)
{
xold[ix] = xnew[ix];
}
for (int iy = 0; iy < ylength; iy++)
{
yold[iy] = ynew[iy];
}
for (int iz = 0; iz < zlength; iz++)
{
zold[iz] = znew[iz];
}
}
ofstream fout("test.txt");
if (fout.is_open())
{
//file opened successfully so we are here
cout << "File Opened successfully!!!. Writing data from array to file" << endl;
for (int j = 0; j < zlength; j++)
{
fout << znew[j] << ' '; //writing ith character of array in the file
}
fout << '\n';
cout << "Array data successfully saved into the file test.txt" << endl;
}
else //file could not be opened
{
cout << "File could not be opened." << endl;
}
t=clock()-t;
printf("%f seconds\n",((float)t)/CLOCKS_PER_SEC);
printf("%f,%f,%f\n",xold[60],yold[60],zold[60]);
free(xold);
free(yold);
free(zold);
free(xnew);
free(ynew);
free(znew);
free(m);
free(kx);
free(ky);
free(kz);
free(bx);
free(by);
free(bz);
free(Fx);
free(Fy);
free(Fz);
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if(cudaSuccess!= err)
{
fprintf(stderr,"Cuda error: %s: %s.\n",msg,cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
} |
6,124 | # include<stdio.h>
__global__ void print_thread_ids() {
printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d <-> blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d <-> blockDim.x: %d, blockDim.y: %d, blockDim.z: %d <-> gridDim.x: %d, gridDim.y: %d, gridDim.z: %d\n",
threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
// printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d\n", threadIdx.x, threadIdx.y, threadIdx.z);
}
int main() {
int nx, ny;
nx = 16;
ny = 16;
dim3 block(8,8);
dim3 grid(nx/block.x, ny/block.y);
print_thread_ids<<<grid, block>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
6,125 | #pragma once
#ifndef BLOCK_MATCHING_KERNEL
#define BLOCK_MATCHING_KERNEL
#define INDXs(s,i,j) ((s) * (i) + (j) + 0)
__device__ double computeMatchKernel(unsigned char *im,
int im_step,
unsigned char *bl,
int bl_step,
int bl_cols,
int bl_rows,
int oi,
int oj,
int stride){
if (!im || !bl) return 0.0;
double nb = (bl_cols*bl_rows);
double x = 0;
for(int i = 0;i < bl_rows-stride+1;i+= stride){
for(int j = 0;j < bl_cols-stride+1;j+= stride){
unsigned char v1 = im[INDXs(im_step,oi+i,oj+j)];
unsigned char v2 = bl[INDXs(bl_step,i,j)];
x += (v2-v1)*(v2-v1);
//im[INDXs(im_step,oi+i,oj+j)] = ABS(v2-v1);
}
}
x = x / nb;
// printf("%f\n",x);
return x;
}
struct DataOut{
double minVal;
int coord_i_min;
int coord_j_min;
};
__global__ void blockMatching_kernel(int jend,int stride,unsigned char* im, int im_step, unsigned char *bl, int bl_step,int bl_cols,int bl_rows, DataOut* result){
__shared__ DataOut tab_data_out[1024];
DataOut temp;
temp.minVal=1000000000000000;// a changer
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for(int j = 0;j < jend-stride+1;j+=stride){
double x = computeMatchKernel(im,im_step,
bl,bl_step,bl_cols,bl_rows,
tid,j,stride);
if(x<temp.minVal){
temp.minVal=x;
temp.coord_i_min=tid;
temp.coord_j_min=j;
}
}
tab_data_out[tid]=temp;
__syncthreads();
for (unsigned int s = 1; s<blockDim.x * blockDim.y; s *=2){
int index = 2 * s * threadIdx.x;
if (index < blockDim.x * blockDim.y){
if (tab_data_out[index].minVal > tab_data_out[index + s].minVal){
if((index + s) < blockDim.x * blockDim.y){
tab_data_out[index] = tab_data_out[index + s];
}
}
}
__syncthreads();
}
if(threadIdx.x==0){
result[blockIdx.x] = tab_data_out[0];
}
}
#endif
|
6,126 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
//#include<stdio.h>
#include<assert.h>
/*------------------------------------------------------------------------------------------*/
/**
*
*
*
*/
/*------------------------------------------------------------------------------------------*/
extern "C" bool cudaInit_CUI(void);
/*------------------------------------------------------------------------------------------*/
/**
*
*
*
*/
/*------------------------------------------------------------------------------------------*/
extern "C" bool cudaDeInit_CUI(void);
/*------------------------------------------------------------------------------------------*/
/**
*
*
*
*/
/*------------------------------------------------------------------------------------------*/
extern "C" void cudaGetLastError_Sync_CUI(void);
/*------------------------------------------------------------------------------------------*/
/**
*
*
*
*/
/*------------------------------------------------------------------------------------------*/
__global__ void HarrTransformLet_MatrixSub_thread(
unsigned char *matrixA,
unsigned char *matrixB,
int height,
int width,
unsigned char *result)
{
int x =blockIdx.x*blockDim.x+threadIdx.x;
int y =blockIdx.y*blockDim.y+threadIdx.y;
if(x>=0&&x<width&&y>=0&&y<height){
int index=y * width + x;
result[index]= matrixA[index] - matrixB[index] + 128;
}
}
/*------------------------------------------------------------------------------------------*/
/**
*
*
*
*/
/*------------------------------------------------------------------------------------------*/
void HarrTransformLet_MatrixSub_gpu(
unsigned char *matrixA,
unsigned char *matrixB,
int height,
int width,
unsigned char *result)
{
unsigned char *matixaA_dev;
unsigned char *matrixB_dev;
unsigned char *result_dev;
cudaError_t cudaStatus;
assert(cudaInit_CUI()==true);
//////////////////////////////////////////////////////////////////////////////
cudaStatus=cudaMalloc((void**)&matixaA_dev, width*height*sizeof(unsigned char));
cudaStatus=cudaMalloc((void**)&matrixB_dev,width*height*sizeof(unsigned char));
cudaStatus=cudaMalloc((void**)&result_dev,width*height*sizeof(unsigned char));
cudaStatus = cudaMemcpy(matixaA_dev,matrixA, width*height*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaStatus = cudaMemcpy(matrixB_dev,matrixB,width*height*sizeof(unsigned char), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(16,16);
dim3 numBlock((width+threadsPerBlock.x-1)/threadsPerBlock.x,(height+threadsPerBlock.y-1)/threadsPerBlock.y);
HarrTransformLet_MatrixSub_thread<<<numBlock,threadsPerBlock>>>(
matixaA_dev,
matrixB_dev,
height,
width,
result_dev);
cudaGetLastError_Sync_CUI();
cudaMemcpy(result,result_dev,width*height*sizeof(unsigned char),cudaMemcpyDeviceToHost);
cudaFree(matixaA_dev);
cudaFree(matrixB_dev);
cudaFree(result_dev);
////////////////////////////////////////////////////////////////////////////
assert(cudaDeInit_CUI()==true);
}
/*------------------------------------------------------------------------------------------*/
/**
*
*
*
*/
/*------------------------------------------------------------------------------------------*/
/*------------------------------------------------------------------------------------------*/
/**
*
*
*
*/
/*------------------------------------------------------------------------------------------*/ |
6,127 | #include "includes.h"
__global__ void SumCentroids(float* delta, float* sumDelta, int numOfCentroids, int numOfElements)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
if (id < numOfCentroids * NUM_SUMS)
{
float sum = 0;
for (int i = 0; i < numOfElements; i++)
{
sum += delta[numOfElements * id + i];
}
sumDelta[id] = sum;
}
} |
6,128 | #include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#define BLOCK_SIZE 256
__global__ void dot(int numElements, const float3* a, const float3* b, float* c)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < numElements)
{
c[i] = a[i].x*b[i].x + a[i].y*b[i].y + a[i].z*b[i].z;
}
}
int main()
{
int numElements = 10000;
float3* h_a = (float3*)calloc(numElements, sizeof(float3));
float3* h_b = (float3*)calloc(numElements, sizeof(float3));
float* h_c = (float*)calloc(numElements, sizeof(float));
srand(1214134);
for (int i = 0; i < numElements; i++)
{
h_a[i].x = float(rand())/float(RAND_MAX + 1.0);
h_a[i].y = float(rand())/float(RAND_MAX + 1.0);
h_a[i].z = float(rand())/float(RAND_MAX + 1.0);
h_b[i].x = float(rand())/float(RAND_MAX + 1.0);
h_b[i].y = float(rand())/float(RAND_MAX + 1.0);
h_b[i].z = float(rand())/float(RAND_MAX + 1.0);
}
float3* d_a;
float3* d_b;
float* d_c;
cudaMalloc((void**)&d_a, numElements*sizeof(float3));
cudaMalloc((void**)&d_b, numElements*sizeof(float3));
cudaMalloc((void**)&d_c, numElements*sizeof(float));
cudaMemcpy(d_a, h_a, numElements*sizeof(float3), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, numElements*sizeof(float3), cudaMemcpyHostToDevice);
dot<<<numElements/BLOCK_SIZE + 1, BLOCK_SIZE>>>(numElements, d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, numElements*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < std::min(10, numElements); i++)
{
printf("%f*%f + %f*%f + %f*%f = %f\n", h_a[i].x, h_b[i].x, h_a[i].y, h_b[i].y, h_a[i].z, h_b[i].z, h_c[i]);
}
printf("...\n");
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
6,129 | #include <stdio.h>
#include <cuda_runtime.h>
#define threadsPerBlock 512
//Device code
__global__ void calculateCCoeff(const int* AdjMatrix, int numElements, float* globalSum)
{
__shared__ float local[threadsPerBlock];
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < numElements)
{
int degree, edgeCount = 0;
float coeff = 0;
for(int j=0; j<numElements; j++)
{
//If they are connected
if(AdjMatrix[i*numElements + j] == 1)
{
degree++;
for(int k=j+1; k<numElements; k++)
{
edgeCount += (AdjMatrix[i*numElements + k] == 1 && AdjMatrix[j*numElements + k] == 1) ? 1 : 0; //If they are neighbors, but not connected
}
}
}
coeff = degree >=2 ? (2.0f * edgeCount) / (degree * (degree - 1)) : 0; //Calculate the CC
local[threadIdx.x] = coeff;
}
__syncthreads();
//Send sum of threads back to host. Will divide sum by numElements
if(threadIdx.x == 0 ) {
//Use thread 0 to calculate total sum
float sum = 0;
for( int i = 0; i < threadsPerBlock; i++ )
{
int currentIndex = (threadIdx.x + blockIdx.x * blockDim.x) + i;
sum += currentIndex < numElements ? local[i] : 0;
}
atomicAdd(&globalSum[0],sum);
}
}
int main(int argc, char* argv[]){
if(argc < 2){
printf("Argument format must be: ./compiledCode inputFile.txt\n");
exit(1);
}
char* inputFile = argv[1];
FILE* file = fopen(inputFile, "r");
int u, v;
int maxNode = 0;
fscanf(file, "%d %d", &u, &v);
//Find max node
while(!feof(file)){
maxNode = u > maxNode ? u : 0;
maxNode = v > maxNode ? v : 0;
fscanf(file, "%d %d", &u, &v);
}
int n = maxNode + 1;
printf("Total number of elements: %d\n", n);
fclose(file);
//Unsigned value of size of adj matrix
size_t amSize = n * n * sizeof(int);
//Allocate memory for n * n vector given size
int* adjVector = (int*) malloc(amSize);
file = fopen(inputFile, "r");
int index = 0;
fscanf(file, "%d %d", &u, &v);
while(!feof(file)){
index = u*n + v;
adjVector[index] = 1;
index = v*n + u;
adjVector[index] = 1;
fscanf(file, "%d %d", &u, &v);
}
fclose(file);
cudaError_t err = cudaSuccess;
int* d_adjMatrix = NULL;
err = cudaMalloc((void **)&d_adjMatrix, amSize);
if (err != cudaSuccess){
fprintf(stderr, "Could not allocate memory for Adjacency Matrix (error %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Copy host matrix of size 'amSize' to GPU memory
err = cudaMemcpy(d_adjMatrix, adjVector, amSize, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
fprintf(stderr, "Could not copy Adjacency Matrix to device memory (error %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA device with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
float *globalCC = NULL;
err = cudaMalloc((void **)&globalCC, sizeof(float));
if (err != cudaSuccess){
fprintf(stderr, "Could not allocate memory for global Clustering Coefficient variable (error %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemset(globalCC, 0, sizeof(float));
//Calling device method "calculateCCoeff" from host
calculateCCoeff<<<blocksPerGrid, threadsPerBlock>>>(d_adjMatrix, n, globalCC);
err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Could not call device code kernel (error %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float hostGlobalCC = 0;
err = cudaMemcpy(&hostGlobalCC, globalCC, sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess){
fprintf(stderr, "Could not copy global Clustering Coefficient variable back from device (error %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
hostGlobalCC = hostGlobalCC / n;
printf("\n\nTotal Clustering Coefficient: %f\n\n", hostGlobalCC);
err = cudaFree(d_adjMatrix);
if (err != cudaSuccess){
fprintf(stderr, "Error freeing Adjacency Matrix from device (error %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(adjVector);
err = cudaDeviceReset();
if (err != cudaSuccess){
fprintf(stderr, "Error freeing local Adjacency Matrix (error %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
|
6,130 | /* This program takes a matrix transpose using shared memory.
* It takes care of memory coalescence as both memory read and memory write are
* coalesced by accessing in colum major.
* It takes care of bank conflicts by padding the shared memory by 1 to get optimum performance.
* There is no thread divergence in the program.
* Each thread works on multiple elements, which in this case = 4.
* Restriction - Rows and Columns should be multiple of Tile Dimension
* Implemented in CUDA
*
*
*
* code by Anand Goyal. Dated: 12/13/2014
*/
#include<stdio.h>
#include<cuda.h>
#include<time.h>
#include<sys/time.h>
#define ROW 1024
#define COL 1024
#define TILE_DIM 32
#define BLOCK_ROWS 8
__global__ void transposeKernel(float *inData, float *outData)
{
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
/* Copying data into shared memory - each thread copies 4 elements : read & write coalesced */
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y + j][threadIdx.x] = inData[(y+j) * width + x];
__syncthreads();
/* x,y modified according to the new transposed matrix */
x = blockIdx.y * TILE_DIM + threadIdx.x;
y = blockIdx.x * TILE_DIM + threadIdx.y;
/* Copying data to output array - each thread copies 4 elemets : read & write coalesced */
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
outData[(y+j) * width + x] = tile[threadIdx.x][threadIdx.y + j];
}
int main()
{
int numX = ROW, numY = COL;
int size = numX * numY * sizeof(float);
float *input, *output;
float *d_input, *d_output;
int i, j;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start, 0);
cudaEventCreate(&stop, 0);
dim3 numOfBlocks(numX/TILE_DIM, numY/TILE_DIM);
dim3 numOfThreads(TILE_DIM, BLOCK_ROWS);
input = (float *)malloc(size);
output = (float *)malloc(size);
cudaMalloc((void **)&d_input, size);
cudaMalloc((void **)&d_output, size);
/* Generating the input array */
for (i = 0; i < numX; i++)
for (j = 0; j < numY; j++)
input[i*numY + j] = rand()%20 + 1;
/* //Printing input array
for(i = 0; i < numX; i++) {
for(j = 0; j < numY; j++)
printf("%0.3f\t", input[i*numY + j]);
printf("\n");
}
printf("*******************************\n");
*/
cudaEventRecord(start, 0);
cudaMemcpy(d_input, input, size, cudaMemcpyHostToDevice);
transposeKernel<<<numOfBlocks, numOfThreads>>>(d_input, d_output);
cudaMemcpy(output, d_output, size, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
//Printing output array
/* for(i = 0; i < numX; i++) {
for(j = 0; j < numY; j++)
printf("%0.3f\t", output[i*numY + j]);
printf("\n");
}
*/
printf("Time : %3.1f ms \n", elapsedTime);
cudaFree(d_input);
cudaFree(d_output);
free(input);
free(output);
return 0;
}
|
6,131 | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
Parallel reduction kernels
*/
#ifndef _REDUCE_KERNEL_OLD_H_
#define _REDUCE_KERNEL_OLD_H_
#include <stdio.h>
#include <cooperative_groups.h>
#define FULL_MASK 0xffffffff
namespace cg = cooperative_groups;
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
bool isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
template <class T>
__global__ void
reduceRegr0(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? -log(1+exp(g_idata[i])) : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0)
{
sdata[tid] += sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/* This version uses contiguous threads, but its interleaved
addressing results in many shared memory bank conflicts.
*/
template <class T>
__global__ void
reduceRegr1(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? -log(1+exp(g_idata[i])) : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
template <class T>
__global__ void
reduceRegr2(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? -log(1+exp(g_idata[i])) : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
template <class T>
__global__ void
reduceRegr3(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
T mySum = (i < n) ? -log(1+exp(g_idata[i])) : 0;
if (i + blockDim.x < n)
mySum += -log(1+exp(g_idata[i+blockDim.x]));
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version uses the warp shuffle operation if available to reduce
warp synchronization. When shuffle is not available the final warp's
worth of work is unrolled to reduce looping overhead.
See http://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
for additional information about using shuffle to perform a reduction
within a warp.
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize>
__global__ void
reduceRegr4(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
T mySum = (i < n) ? -log(1+exp(g_idata[i])) : 0;
if (i + blockSize < n)
mySum += -log(1+exp(g_idata[i+blockSize]));
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>32; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
cg::sync(cta);
}
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
cg::coalesced_group active = cg::coalesced_threads();
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += active.shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
cg::sync(cta);
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
cg::sync(cta);
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
cg::sync(cta);
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
cg::sync(cta);
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
cg::sync(cta);
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
cg::sync(cta);
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version is completely unrolled, unless warp shuffle is available, then
shuffle is used within a loop. It uses a template parameter to achieve
optimal code for any (power of 2) number of threads. This requires a switch
statement in the host code to handle all the different thread block sizes at
compile time. When shuffle is available, it is used to reduce warp synchronization.
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize>
__global__ void
reduceRegr5(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
T mySum = (i < n) ? -log(1+exp(g_idata[i])) : 0;
if (i + blockSize < n)
mySum += -log(1+exp(g_idata[i+blockSize]));
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
cg::sync(cta);
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
cg::sync(cta);
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
cg::coalesced_group active = cg::coalesced_threads();
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += active.shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
cg::sync(cta);
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
cg::sync(cta);
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
cg::sync(cta);
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
cg::sync(cta);
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
cg::sync(cta);
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
cg::sync(cta);
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void
reduceRegr6(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += -log(1+exp(g_idata[i]));
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += -log(1+exp(g_idata[i+blockSize]));
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
cg::sync(cta);
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
cg::sync(cta);
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
cg::coalesced_group active = cg::coalesced_threads();
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += active.shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
cg::sync(cta);
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
cg::sync(cta);
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
cg::sync(cta);
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
cg::sync(cta);
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
cg::sync(cta);
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
cg::sync(cta);
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/* This reduction interleaves which threads are active by using the modulo
operator. This operator is very expensive on GPUs, and the interleaved
inactivity means that no whole warps are active, which is also very
inefficient */
template <class T>
__global__ void
reduce0(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0)
{
sdata[tid] += sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/* This version uses contiguous threads, but its interleaved
addressing results in many shared memory bank conflicts.
*/
template <class T>
__global__ void
reduce1(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
template <class T>
__global__ void
reduce2(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
template <class T>
__global__ void
reduce3(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
T mySum = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
mySum += g_idata[i+blockDim.x];
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version uses the warp shuffle operation if available to reduce
warp synchronization. When shuffle is not available the final warp's
worth of work is unrolled to reduce looping overhead.
See http://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
for additional information about using shuffle to perform a reduction
within a warp.
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize>
__global__ void
reduce4(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
T mySum = (i < n) ? g_idata[i] : 0;
if (i + blockSize < n)
mySum += g_idata[i+blockSize];
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>32; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
cg::sync(cta);
}
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
cg::coalesced_group active = cg::coalesced_threads();
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += active.shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
cg::sync(cta);
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
cg::sync(cta);
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
cg::sync(cta);
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
cg::sync(cta);
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
cg::sync(cta);
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
cg::sync(cta);
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version is completely unrolled, unless warp shuffle is available, then
shuffle is used within a loop. It uses a template parameter to achieve
optimal code for any (power of 2) number of threads. This requires a switch
statement in the host code to handle all the different thread block sizes at
compile time. When shuffle is available, it is used to reduce warp synchronization.
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize>
__global__ void
reduce5(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
T mySum = (i < n) ? g_idata[i] : 0;
if (i + blockSize < n)
mySum += g_idata[i+blockSize];
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
cg::sync(cta);
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
cg::sync(cta);
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
cg::coalesced_group active = cg::coalesced_threads();
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += active.shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
cg::sync(cta);
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
cg::sync(cta);
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
cg::sync(cta);
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
cg::sync(cta);
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
cg::sync(cta);
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
cg::sync(cta);
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void
reduce6(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += g_idata[i+blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
cg::sync(cta);
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
cg::sync(cta);
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
cg::coalesced_group active = cg::coalesced_threads();
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += active.shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
cg::sync(cta);
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
cg::sync(cta);
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
cg::sync(cta);
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
cg::sync(cta);
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
cg::sync(cta);
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
cg::sync(cta);
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
template <class T>
__global__ void
reduceLog0(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? log(g_idata[i]) : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0)
{
sdata[tid] += sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/* This version uses contiguous threads, but its interleaved
addressing results in many shared memory bank conflicts.
*/
template <class T>
__global__ void
reduceLog1(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? log(g_idata[i]) : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
template <class T>
__global__ void
reduceLog2(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? log(g_idata[i]) : 0;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
template <class T>
__global__ void
reduceLog3(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
T mySum = (i < n) ? log(g_idata[i]) : 0;
if (i + blockDim.x < n)
mySum += log(g_idata[i+blockDim.x]);
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
cg::sync(cta);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version uses the warp shuffle operation if available to reduce
warp synchronization. When shuffle is not available the final warp's
worth of work is unrolled to reduce looping overhead.
See http://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
for additional information about using shuffle to perform a reduction
within a warp.
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize>
__global__ void
reduceLog4(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
T mySum = (i < n) ? log(g_idata[i]) : 0;
if (i + blockSize < n)
mySum += log(g_idata[i+blockSize]);
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>32; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
cg::sync(cta);
}
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
cg::coalesced_group active = cg::coalesced_threads();
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += active.shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
cg::sync(cta);
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
cg::sync(cta);
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
cg::sync(cta);
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
cg::sync(cta);
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
cg::sync(cta);
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
cg::sync(cta);
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version is completely unrolled, unless warp shuffle is available, then
shuffle is used within a loop. It uses a template parameter to achieve
optimal code for any (power of 2) number of threads. This requires a switch
statement in the host code to handle all the different thread block sizes at
compile time. When shuffle is available, it is used to reduce warp synchronization.
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize>
__global__ void
reduceLog5(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
T mySum = (i < n) ? log(g_idata[i]) : 0;
if (i + blockSize < n)
mySum += log(g_idata[i+blockSize]);
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
cg::sync(cta);
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
cg::sync(cta);
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
cg::coalesced_group active = cg::coalesced_threads();
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += active.shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
cg::sync(cta);
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
cg::sync(cta);
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
cg::sync(cta);
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
cg::sync(cta);
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
cg::sync(cta);
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
cg::sync(cta);
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void
reduceLog6(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += log(g_idata[i]);
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += log(g_idata[i+blockSize]);
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
cg::sync(cta);
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
cg::sync(cta);
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
cg::coalesced_group active = cg::coalesced_threads();
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += active.shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
cg::sync(cta);
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
cg::sync(cta);
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
cg::sync(cta);
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
cg::sync(cta);
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
cg::sync(cta);
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
cg::sync(cta);
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
void reduceSum_d(int size, int threads, int blocks,
int whichKernel, double *d_idata, double *d_odata,
int type) // 0=transform & addition, 1=addition, 2=log_addition
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(double) : threads * sizeof(double);
switch(type)
{
case 0:
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
reduceRegr0<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceRegr1<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceRegr2<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 3:
reduceRegr3<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
switch (threads)
{
case 1024:
reduceRegr4<double, 1024><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceRegr4<double, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceRegr4<double, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceRegr4<double, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceRegr4<double, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceRegr4<double, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceRegr4<double, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceRegr4<double, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceRegr4<double, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceRegr4<double, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceRegr4<double, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 5:
switch (threads)
{
case 1024:
reduceRegr5<double, 1024><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceRegr5<double, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceRegr5<double, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceRegr5<double, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceRegr5<double, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceRegr5<double, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceRegr5<double, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceRegr5<double, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceRegr5<double, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceRegr5<double, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceRegr5<double, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 1024:
reduceRegr6<double, 1024, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceRegr6<double, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceRegr6<double, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceRegr6<double, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceRegr6<double, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceRegr6<double, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceRegr6<double, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceRegr6<double, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceRegr6<double, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceRegr6<double, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceRegr6<double, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 1024:
reduceRegr6<double, 1024, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceRegr6<double, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceRegr6<double, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceRegr6<double, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceRegr6<double, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceRegr6<double, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceRegr6<double, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceRegr6<double, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceRegr6<double, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceRegr6<double, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceRegr6<double, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
break;
}
break;
case 1:
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
reduce0<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce1<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce2<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 3:
reduce3<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
switch (threads)
{
case 1024:
reduce4<double, 1024><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduce4<double, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce4<double, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce4<double, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce4<double, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce4<double, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce4<double, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce4<double, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce4<double, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce4<double, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce4<double, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 5:
switch (threads)
{
case 1024:
reduce5<double, 1024><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduce5<double, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce5<double, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce5<double, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce5<double, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce5<double, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce5<double, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce5<double, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce5<double, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce5<double, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce5<double, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 1024:
reduce6<double, 1024, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduce6<double, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<double, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<double, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<double, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<double, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<double, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<double, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<double, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<double, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<double, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 1024:
reduce6<double, 1024, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduce6<double, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<double, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<double, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<double, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<double, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<double, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<double, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<double, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<double, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<double, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
break;
}
break;
case 2:
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
reduceLog0<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceLog1<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceLog2<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 3:
reduceLog3<double><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
switch (threads)
{
case 1024:
reduceLog4<double, 1024><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceLog4<double, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceLog4<double, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceLog4<double, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceLog4<double, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceLog4<double, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceLog4<double, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceLog4<double, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceLog4<double, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceLog4<double, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceLog4<double, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 5:
switch (threads)
{
case 1024:
reduceLog5<double, 1024><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceLog5<double, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceLog5<double, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceLog5<double, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceLog5<double, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceLog5<double, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceLog5<double, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceLog5<double, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceLog5<double, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceLog5<double, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceLog5<double, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 1024:
reduceLog6<double, 1024, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceLog6<double, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceLog6<double, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceLog6<double, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceLog6<double, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceLog6<double, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceLog6<double, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceLog6<double, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceLog6<double, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceLog6<double, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceLog6<double, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 1024:
reduceLog6<double, 1024, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceLog6<double, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceLog6<double, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceLog6<double, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceLog6<double, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceLog6<double, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceLog6<double, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceLog6<double, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceLog6<double, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceLog6<double, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceLog6<double, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
break;
}
break;
}
}
void reduceSum_f(int size, int threads, int blocks,
int whichKernel, float *d_idata, float *d_odata,
int type) // 0=transform & addition, 1=addition, 2=mult
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
switch(type)
{
case 0:
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
reduceRegr0<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceRegr1<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceRegr2<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 3:
reduceRegr3<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
switch (threads)
{
case 1024:
reduceRegr4<float, 1024><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceRegr4<float, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceRegr4<float, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceRegr4<float, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceRegr4<float, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceRegr4<float, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceRegr4<float, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceRegr4<float, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceRegr4<float, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceRegr4<float, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceRegr4<float, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 5:
switch (threads)
{
case 1024:
reduceRegr5<float, 1024><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceRegr5<float, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceRegr5<float, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceRegr5<float, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceRegr5<float, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceRegr5<float, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceRegr5<float, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceRegr5<float, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceRegr5<float, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceRegr5<float, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceRegr5<float, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 1024:
reduceRegr6<float, 1024, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceRegr6<float, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceRegr6<float, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceRegr6<float, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceRegr6<float, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceRegr6<float, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceRegr6<float, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceRegr6<float, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceRegr6<float, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceRegr6<float, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceRegr6<float, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 1024:
reduceRegr6<float, 1024, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceRegr6<float, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceRegr6<float, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceRegr6<float, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceRegr6<float, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceRegr6<float, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceRegr6<float, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceRegr6<float, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceRegr6<float, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceRegr6<float, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceRegr6<float, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
break;
}
break;
case 1:
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
reduce0<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce1<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce2<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 3:
reduce3<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
switch (threads)
{
case 1024:
reduce4<float, 1024><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduce4<float, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce4<float, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce4<float, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce4<float, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce4<float, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce4<float, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce4<float, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce4<float, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce4<float, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce4<float, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 5:
switch (threads)
{
case 1024:
reduce5<float, 1024><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduce5<float, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce5<float, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce5<float, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce5<float, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce5<float, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce5<float, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce5<float, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce5<float, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce5<float, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce5<float, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 1024:
reduce6<float, 1024, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduce6<float, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<float, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<float, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<float, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<float, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<float, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<float, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<float, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<float, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<float, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 1024:
reduce6<float, 1024, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduce6<float, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<float, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<float, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<float, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<float, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<float, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<float, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<float, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<float, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<float, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
break;
}
break;
case 2:
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
reduceLog0<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceLog1<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceLog2<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 3:
reduceLog3<float><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
switch (threads)
{
case 1024:
reduceLog4<float, 1024><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceLog4<float, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceLog4<float, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceLog4<float, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceLog4<float, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceLog4<float, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceLog4<float, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceLog4<float, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceLog4<float, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceLog4<float, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceLog4<float, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 5:
switch (threads)
{
case 1024:
reduceLog5<float, 1024><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceLog5<float, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceLog5<float, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceLog5<float, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceLog5<float, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceLog5<float, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceLog5<float, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceLog5<float, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceLog5<float, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceLog5<float, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceLog5<float, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 1024:
reduceLog6<float, 1024, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceLog6<float, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceLog6<float, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceLog6<float, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceLog6<float, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceLog6<float, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceLog6<float, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceLog6<float, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceLog6<float, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceLog6<float, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceLog6<float, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 1024:
reduceLog6<float, 1024, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 512:
reduceLog6<float, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduceLog6<float, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduceLog6<float, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduceLog6<float, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduceLog6<float, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduceLog6<float, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduceLog6<float, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduceLog6<float, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduceLog6<float, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduceLog6<float, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
break;
}
break;
}
}
#endif // #ifndef _REDUCE_KERNEL_H_
|
6,132 | #include<iostream>
#include <fstream>
#include <string>
#include <stdio.h>
#include <stdlib.h>
using namespace std;
__global__ void kernel( float* r_gpu, float* g_gpu, float* b_gpu, int N) {
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if(tId < N) {
r_gpu[tId] = 1 - r_gpu[tId];
g_gpu[tId] = 1 - g_gpu[tId];
b_gpu[tId] = 1 - b_gpu[tId];
}
}
void CambiarColores(float* r, float* g, float* b) {
//cout << *r << "|" << 1-*r << endl;
*r = 1 - *r;
*g = 1 - *g;
*b = 1 - *b;
}
int main(int argc, char const *argv[]) {
FILE * pFile;
int n, m;
float* r, *g, *b;
pFile = fopen ("img.txt","r");
fscanf(pFile, "%d %d", &m, &n);
int block_size = 256;
int grid_size = (int) ceil((float) n*m / block_size);
float* r_gpu, *g_gpu, *b_gpu;
cudaMalloc(&r_gpu, sizeof(float) * n * m);
cudaMalloc(&g_gpu, sizeof(float) * n * m);
cudaMalloc(&b_gpu, sizeof(float) * n * m);
r = new float[n*m];
g = new float[n*m];
b = new float[n*m];
for (int i = 0; i < n*m; ++i) {
fscanf (pFile, "%f", &r[i]);
}
for (int i = 0; i < n*m; ++i) {
fscanf (pFile, "%f", &g[i]);
}
for (int i = 0; i < n*m; ++i) {
fscanf (pFile, "%f", &b[i]);
}
fclose (pFile);
cudaMemcpy(r_gpu, r, sizeof(float) * n * m, cudaMemcpyHostToDevice);
cudaMemcpy(g_gpu, g, sizeof(float) * n * m, cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu, b, sizeof(float) * n * m, cudaMemcpyHostToDevice);
int tamanio = n * m;
cudaEvent_t ct1, ct2;
float dt;
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
kernel<<<grid_size, block_size>>>(r_gpu, g_gpu, b_gpu, tamanio);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
cout << "Tiempo GPU: " << dt << " [ms]" << endl;
cudaMemcpy(r, r_gpu, sizeof(float) * n * m, cudaMemcpyDeviceToHost);
cudaMemcpy(g, g_gpu, sizeof(float) * n * m, cudaMemcpyDeviceToHost);
cudaMemcpy(b, b_gpu, sizeof(float) * n * m, cudaMemcpyDeviceToHost);
cudaFree(r_gpu);
cudaFree(g_gpu);
cudaFree(b_gpu);
FILE * pSalida;
pSalida = fopen ("img_salida.txt","w");
fprintf(pSalida, "%d %d\n", m, n);
for (int i = 0; i < n*m; ++i) {
if(i == n*m - 1) {
fprintf(pSalida, "%f", r[i]);
} else {
fprintf(pSalida, "%f ", r[i]);
}
}
fprintf(pSalida, "\n");
for (int i = 0; i < n*m; ++i) {
if(i == n*m - 1) {
fprintf(pSalida, "%f", g[i]);
} else {
fprintf(pSalida, "%f ", g[i]);
}
}
fprintf(pSalida, "\n");
for (int i = 0; i < n*m; ++i) {
if(i == n*m - 1) {
fprintf(pSalida, "%f", b[i]);
} else {
fprintf(pSalida, "%f ", b[i]);
}
}
delete r;
delete g;
delete b;
//cin.get();
return 0;
} |
6,133 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26) {
comp = (-0.0f + +1.6332E-20f + +1.3849E36f);
comp += var_2 + (var_3 + -1.0825E-36f - -1.9374E-35f);
float tmp_1 = +1.5474E26f / var_4 * +0.0f - (var_5 - var_6);
comp = tmp_1 * +1.8058E-37f + (var_7 / -0.0f);
if (comp < fmodf((var_8 * expf(+1.7210E-36f)), (var_9 * var_10 / var_11))) {
float tmp_2 = -0.0f;
comp = tmp_2 * var_12 / atan2f(var_13 - (+0.0f / +1.8699E12f * powf(var_14 / +0.0f - -1.9623E-35f, coshf((-1.3684E-36f * (var_15 / (var_16 / (-1.0025E36f + -1.6202E5f))))))), (var_17 + var_18 * -1.0397E36f * -0.0f / var_19));
}
for (int i=0; i < var_1; ++i) {
comp += (-1.5087E14f * (-1.8772E-35f + var_20 * var_21));
}
if (comp < (var_22 - var_23 + (var_24 / (var_25 * -1.0020E-36f)))) {
comp = (+1.0563E36f * +1.8716E-41f - coshf(ceilf((var_26 + -1.1669E-43f / (-1.2822E34f / +1.3133E-44f)))));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27);
cudaDeviceSynchronize();
return 0;
}
|
6,134 | #include "includes.h"
__constant__ float *c_Kernel;
__global__ void average(float *d_ip_v, float *d_ip_ir, int app_len) {
const int X = blockIdx.x * blockDim.x + threadIdx.x;
if (X < app_len)
{
d_ip_v[X] = (d_ip_v[X] + d_ip_ir[X]) / 2;
}
} |
6,135 | #include "includes.h"
__global__ static void findNew(double* cCurr, double* cBar, double* cHalf, int nx)
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set index being computed
int index = globalIdy * nx + globalIdx;
// Recover the new data
cCurr[index] = cBar[index] + cHalf[index];
} |
6,136 | #include <iostream>
using namespace std;
__global__ void multiply(int *ad, int *bd, int *cd, int n)
{
int row = blockIdx.x ;
int col = blockIdx.y ;
int sum = 0;
for (int i = 0; i < n; i++)
{
sum = sum + ad[row * n + i] * bd[i * n + col];
}
cd[row * n + col] = sum;
}
int main()
{
cout << "Enter the size" << endl;
int n;
cin >> n;
int a[n * n], b[n * n], c[n * n];
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
a[i * n + j] = i;
b[i * n + j] = i;
}
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
cout << a[i * n + j] << " ";
}
cout << endl;
}
int size = n * n * sizeof(int);
int *ad, *bd, *cd;
cudaEvent_t start, end;
cudaMalloc(&ad, size);
cudaMemcpy(ad, a, size, cudaMemcpyHostToDevice);
cudaMalloc(&bd, size);
cudaMemcpy(bd, b, size, cudaMemcpyHostToDevice);
cudaMalloc(&cd, size);
dim3 grid(n, n, 1);
dim3 block(1, 1, 1);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
multiply<<<grid, block>>>(ad, bd, cd, n);
cudaEventRecord(end);
cudaEventSynchronize(end);
float time = 0;
cudaEventElapsedTime(&time, start, end);
cudaMemcpy(c, cd, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
cout << c[i * n + j] << " ";
}
cout << endl;
}
cout << "The time required is " << time << endl;
}
|
6,137 | #include <stdio.h>
#include <time.h>
#include<math.h>
void load_matrix_from_file(FILE * file, int nb_rows,int nb_cols, double* mat){
for(int i = 0; i < nb_rows; i++){
for(int j = 0; j < nb_cols; j++){
//Use lf format specifier, %c is for character
//if (!fscanf(file, "%lf", &mat[i][j])){
int index=i*nb_cols + j;
if (!fscanf(file, "%lf", &mat[index])){
break;
}
}
}
}
void load_matrix_2D_from_file(FILE * file, int nb_rows,int nb_cols, double* matrix){
for(int i = 0; i < nb_rows; i++){
for(int j = 0; j < nb_cols; j++){
int index=i*nb_cols + j;
if (!fscanf(file, "%lf", &matrix[index])){
break;
}
}
}
}
void load_matrix_1D_from_file(FILE * file, int n, double* matrix){
for(int j = 0; j < n; j++){
if (!fscanf(file, "%lf", &matrix[j])){
break;
}
}
}
__global__ void kernel_gpu(int N, double *A, double *B, double *result){
int r = blockIdx.x*blockDim.x+ threadIdx.x;
int s = blockIdx.y*blockDim.y+ threadIdx.y;
if(r < N && s<N){
atomicAdd(result, A[r*N+s]+B[r*N+s]);
}
}
int main(){
int N=1000;
double *h_A, *h_B, *h_result;
double *d_A, *d_B, *d_result;
//khai bao vung nho trong host
h_A = (double*)malloc(N*N*sizeof(double));
h_B = (double*)malloc(N*N*sizeof(double));
h_result = (double*)malloc(sizeof(double));
//khai bao vung nho trong device
cudaMalloc(&d_A, N*N*sizeof(double));
cudaMalloc(&d_B, N*N*sizeof(double));
cudaMalloc(&d_result,sizeof(double));
//load matrix from file
FILE *file;
file=fopen("matrix_1000_1000_001.txt", "r");
load_matrix_2D_from_file(file, N, N,h_A);
fclose(file);
FILE *file2;
file2=fopen("matrix_1000_1000_002.txt", "r");
load_matrix_2D_from_file(file2, N,N,h_B);
fclose(file2);
//printf("gia tri A[500][500] la %f",h_A[500*100+500]);
//====start log time
clock_t begin=clock();
cudaMemcpy(d_A, h_A, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, N*N*sizeof(double), cudaMemcpyHostToDevice);
//execute kernel
int nb_hyper=8;
dim3 nb_block(int(N/nb_hyper)+1,int(N/nb_hyper)+1,1);
dim3 nb_thread_per_block(nb_hyper, nb_hyper,1);
kernel_gpu<<<nb_block, nb_thread_per_block>>>(N, d_A, d_B, d_result);
//copy result from device to host
cudaMemcpy(h_result, d_result, sizeof(double), cudaMemcpyDeviceToHost);
//====end log time
clock_t end = clock();
double time_spent=(double) (end-begin)/CLOCKS_PER_SEC;
printf("Total time:%f\n",time_spent);
printf("result :%f\n",h_result[0]/(N*N));
return 0;
}
|
6,138 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void someKernel(int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N)
printf("Hello from thread # %i (block #: %i)\n", idx, blockIdx.x);
}
extern void cuda_doStuff(void)
{
int numberOfBlocks = 2;
int threadsPerBlock = 5;
int maxNumberOfThreads = 10;
someKernel<<<numberOfBlocks, threadsPerBlock>>>(maxNumberOfThreads);
cudaDeviceSynchronize();
}
|
6,139 | #include <iostream>
#include <unistd.h>
#include <sys/time.h>
#define tile_width 32
__global__
void normal_square_matrix_mult_kernel(int *m, int *n, int *p, unsigned width){
unsigned col = threadIdx.x+(blockIdx.x*blockDim.x);
unsigned row = threadIdx.y+(blockIdx.y*blockDim.y);
if(col<width and row<width){
int pvalue = 0;
for(unsigned k=0; k<width; k++){
pvalue += m[(row*width)+k]*n[(k*width)+col];
}
p[(row*width)+col] = pvalue;
}
}
__global__
void tile_square_matrix_mult_kernel(int *m, int *n, int *p, unsigned width){
__shared__ int mds[tile_width][tile_width];
__shared__ int nds[tile_width][tile_width];
unsigned bx = blockIdx.x;
unsigned by = blockIdx.y;
unsigned tx = threadIdx.x;
unsigned ty = threadIdx.y;
unsigned row = (by*tile_width)+ty;
unsigned col = (bx*tile_width)+tx;
int pvalue = 0;
unsigned ph, k;
for(ph=0; ph<width/tile_width; ph++){
mds[ty][tx] = m[(row*width)+(ph*tile_width) + tx];
nds[ty][tx] = n[(((ph*tile_width)+ty)*width) + col];
__syncthreads();
for(k=0; k<tile_width; k++){
pvalue += mds[ty][k]*nds[k][tx];
}
__syncthreads();
}
p[(row*width)+col] = pvalue;
}
void square_matrix_mult(int *m, int *n, int *p, unsigned width, unsigned block, char type){
unsigned size = width*width*sizeof(int);
int *d_m, *d_n, *d_p;
cudaMalloc((void **)&d_m, size);
cudaMalloc((void **)&d_n, size);
cudaMalloc((void **)&d_p, size);
cudaMemcpy(d_m, m, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_n, n, size, cudaMemcpyHostToDevice);
dim3 dimGrid(ceil(width/block), ceil(width/block), 1);
dim3 dimBlock(block, block, 1);
switch(type){
case 'n':
normal_square_matrix_mult_kernel<<<dimGrid, dimBlock>>>(d_m, d_n, d_p, width);
break;
case 't':
tile_square_matrix_mult_kernel<<<dimGrid, dimBlock>>>(d_m, d_n, d_p, width);
break;
default:
break;
}
cudaMemcpy(p, d_p, size, cudaMemcpyDeviceToHost);
cudaFree(d_m);
cudaFree(d_n);
cudaFree(d_p);
}
int main(int argc, char const *argv[]){
unsigned width = 1024;
unsigned block = 32;
unsigned ntotal = width*width;
int *h_m = new int[ntotal];
int *h_n = new int[ntotal];
int *h_p = new int[ntotal];
int *h_r = new int[ntotal];
unsigned i, j;
for(i=0; i<width; i++){
for(j=0; j<width; j++){
h_m[(i*width)+j] = j;
h_n[(i*width)+j] = j;
h_p[(i*width)+j] = 0;
h_r[(i*width)+j] = 0;
}
}
struct timeval ti, tf;
double time;
gettimeofday(&ti, NULL);
square_matrix_mult(h_m, h_n, h_p, width, block, 'n');
gettimeofday(&tf, NULL);
time = (tf.tv_sec - ti.tv_sec)*1000 + (tf.tv_usec - ti.tv_usec)/1000;
printf("[%ix%i] memoria global: %.8lf s\n", width, width, time/1000);
gettimeofday(&ti, NULL);
square_matrix_mult(h_m, h_n, h_r, width, block, 't');
gettimeofday(&tf, NULL);
time = (tf.tv_sec - ti.tv_sec)*1000 + (tf.tv_usec - ti.tv_usec)/1000;
printf("[%ix%i] memoria compartida: %.8lf s\n", width, width, time/1000);
delete h_m;
delete h_n;
delete h_p;
delete h_r;
return 0;
}
|
6,140 | #include <stdlib.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <pthread.h>
#include "cuda_runtime.h"
#include <stdio.h>
#include <tgmath.h>
#include <sys/time.h>
#include <assert.h>
extern "C" {
void threshold_ecg(float * output1,
float * output2,
float * output3,
float * samples,
int * output_len,
float * input1,
float * input2,
float * input3,
int input_len,
float threshold)
{
float neg_threshold = - threshold;
int i = 0;
int idx = 0;
for (i = 0; i < input_len; i++) {
float val1 = input1[i];
float val2 = input2[i];
float val3 = input3[i];
if (val1 < neg_threshold || val1 > threshold) {
output1[idx] = val1;
output2[idx] = val2;
output3[idx] = val3;
samples[idx++] = i;
}
}
* output_len = idx;
}
double get_time(void) {
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec*1000000.0 + ((double)t.tv_usec);
}
double elapsed_time(double start_time, double end_time) {
// Get the elapsed time
return ((end_time - start_time) / 1000.0);
}
void turning_point_compress(float * output,
float * input,
int input_len)
{
int idx;
int output_len = input_len / 2;
output[0] = input[0];
for (idx = 1; idx < output_len; idx++) {
if ((input[2*idx]-output[idx-1])*(input[2*idx+1]-input[2*idx]) < 0) {
output[idx] = input[2*idx];
} else {
output[idx] = input[2*idx+1];
}
}
}
struct tp_arg {
float * output;
float * input;
int len;
};
void * tp_worker(void * _args) {
struct tp_arg * args = (struct tp_arg *) _args;
float * output = args -> output;
float * input = args -> input;
int len = args -> len;
turning_point_compress(output, input, len);
pthread_exit(NULL);
}
void parallel_turning_point_compress(float * output,
float * input,
int input_len)
{
int num_threads = 8;
int tid;
struct tp_arg thread_args[num_threads];
pthread_t threads[num_threads];
pthread_attr_t th_attr;
pthread_attr_init(&th_attr);
pthread_attr_setdetachstate(&th_attr, PTHREAD_CREATE_JOINABLE);
int chunk_size = input_len / num_threads;
for (tid = 0; tid < num_threads; tid++) {
(&thread_args[tid]) -> output = & output[chunk_size * tid / 2];
(&thread_args[tid]) -> input = & input[chunk_size * tid];
(&thread_args[tid]) -> len = chunk_size;
pthread_create(&threads[tid], &th_attr, tp_worker, (void *) & thread_args[tid]);
}
for (tid = 0; tid < num_threads; tid++) {
pthread_join(threads[tid], NULL);
}
pthread_attr_destroy(&th_attr);
}
void inclusive_scan(int * out, int * in, int len) {
thrust::device_ptr<int> in_p = thrust::device_pointer_cast(in);
thrust::device_ptr<int> out_p = thrust::device_pointer_cast(out);
thrust::inclusive_scan(in_p, in_p+len, out_p);
}
void exclusive_scan(int * out, int * in, int len) {
thrust::device_ptr<int> in_p = thrust::device_pointer_cast(in);
thrust::device_ptr<int> out_p = thrust::device_pointer_cast(out);
thrust::exclusive_scan(in_p, in_p+len, out_p);
}
void device_index(int * ary, int * last_val, int idx) {
cudaMemcpy(last_val, & ary[idx], sizeof(int), cudaMemcpyDeviceToHost);
}
}
|
6,141 | //
// fast_transpose.cu
//
//
// Created by Laura Balasso on 13/05/2019.
//
#include <stdio.h>
#include <stdlib.h>
#define TILE_DIM 32
/* function that fills an array with random doubles */
void random_doubles(double *p, int n) {
int i;
for(i=0; i<n; i++) {
p[i]= ( (double)rand() * 100 ) / (double)RAND_MAX ;
}
}
/* function that tests the equality between two martices */
void equality_test(double* M1, double* M2, int N){
long int i;
for(i=0; i<N*N; i++){
if(M1[i]!=M2[i]){
printf("Error! the two methods produce different results. \n"\
);
break;
}
}
if(i == N*N) printf("Correct result! \n");
}
/* kernel that implements the fast transpose */
__global__ void fast_transpose(double * M_in , double * M_out, int block_rows){
__shared__ double tile[TILE_DIM][TILE_DIM]; // allocate the tile in shared memory (one per block)
int x = blockIdx.x * TILE_DIM + threadIdx.x; // define index
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM; // compute matrix width
/* each block of threads copies a TILE_DIM x TILE_DIM submatrix in the tile */
/* some threads handle more than one element since THREADS_PER_BLOCK < TILE_DIM*TILE_DIM */
for (int j = 0; j < TILE_DIM; j += block_rows){
tile[threadIdx.y+j][threadIdx.x] = M_in[(y+j) * width + x];
}
__syncthreads(); // ensures that all the threads copied the values in the tile
x = blockIdx.y * TILE_DIM + threadIdx.x;
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += block_rows){
M_out[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
/* kernel that implements a naive algorithm for matrix transpose */
__global__ void naive_transpose(double * M_in, double * M_out, int block_rows)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= block_rows){
M_out[x*width + (y+j)] = M_in[(y+j)*width + x];
}
}
/* function that runs both the naive and the blocking kernels for a given number of threads per block and matrix size */
void run_kernel(int threads_per_block, int dimx, int dimy){
/* allocate host matrices */
int num_bytes = dimx*dimy*sizeof(double);
double *h_in = (double*)malloc(num_bytes);
double *h_out_block = (double*)malloc(num_bytes);
double *h_out_naive = (double*)malloc(num_bytes);
/* allocate davice matrices */
double *d_in, *d_out_block, *d_out_naive ;
cudaMalloc(&d_in, num_bytes);
cudaMalloc(&d_out_block, num_bytes);
cudaMalloc(&d_out_naive, num_bytes);
/* fill input matrix with random floats */
random_doubles(h_in , dimx*dimy);
/* copy matrices in device memory */
cudaMemcpy( d_in, h_in, num_bytes, cudaMemcpyHostToDevice );
cudaMemcpy( d_out_block, h_out_block, num_bytes, cudaMemcpyHostToDevice );
cudaMemcpy( d_out_naive, h_out_naive, num_bytes, cudaMemcpyHostToDevice );
/* set threads and blocks grids */
int block_rows = threads_per_block/TILE_DIM;
dim3 grid, block;
block.x = TILE_DIM;
block.y = block_rows;
grid.x = dimx/TILE_DIM;
grid.y = dimy/TILE_DIM;
/* cuda events for timing */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
/* run blocking transpose kernel */
fast_transpose<<< grid, block >>>(d_in, d_out_block, block_rows);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
float bw = 2 * dimx * dimy * sizeof(double) * 1e-6 / ms;
printf("Fast transpose: \t %lf \t %lf \n", ms, bw);
/* copy the result */
cudaMemcpy( h_out_block, d_out_block, num_bytes, cudaMemcpyDeviceToHost );
cudaEventRecord(start);
/* run naive transpose kernel */
naive_transpose<<< grid, block >>>(d_in, d_out_naive, block_rows) ;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float ms_naive = 0;
cudaEventElapsedTime(&ms_naive, start, stop);
float bw_naive = 2 * dimx*dimy * sizeof(double) * 1e-6 / ms_naive;
printf("Naive transpose: \t %lf \t %lf \n", ms_naive, bw_naive);
/* copy result */
cudaMemcpy( h_out_naive, d_out_naive, num_bytes, cudaMemcpyDeviceToHost) ;
equality_test(h_out_naive, h_out_block, dimx);
printf("\n ");
/* deallocate memory */
free(h_in); free(h_out_naive); free(h_out_block);
cudaFree(d_in);
cudaFree(d_out_naive);
cudaFree(d_out_block);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
int main(int argc, char * argv[]){
int dimx = 8192;
int dimy = 8192;
printf(" \t \t \t GPU TIME (ms) BANDWIDTH (GB/s) ");
printf("\n\n");
printf("64 threads per block: \n");
run_kernel(64, dimx, dimy);
printf("512 threads per block:\n");
run_kernel(512, dimx, dimy);
printf("1024 threads per block:\n");
run_kernel(1024, dimx, dimy);
return 0;
}
|
6,142 | #include "stdio.h"
__global__ void cuda_hello(){
printf("Hello World! My thread ID is %d\n\n", threadIdx.x);
}
int main() {
cuda_hello<<<1,256>>>();
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
return 0;
}
|
6,143 | #include <iostream>
#include <stdio.h>
#define checkCudaError(status) { \
if(status != cudaSuccess) { \
std::cout << "CUDA Error " << __FILE__ << ", " << __LINE__ \
<< ": " << cudaGetErrorString(status) << "\n"; \
exit(-1); \
} \
}
__global__ void vecAdd(int * a, int * b, int * c, int size) {
//ADD CODE HERE
int i = threadIdx.x;
int j = blockIdx.x*blockDim.x;
printf("I am in: %d, %d\n", i , j);
c[i + j] = a[i + j] + b[i + j];
}
int main() {
//checkCudaError(cudaSetDevice(1));
int device;
checkCudaError(cudaGetDevice(&device));
cudaDeviceProp prop;
checkCudaError(cudaGetDeviceProperties(&prop, device));
std::cout << "Device " << device << ": " << prop.name << "\n";
std::cout << "GPU Cores: " << prop.multiProcessorCount << "\n";
std::cout << "Compute Capability: " << prop.major << "." << prop.minor << "\n";
const int GRID_SIZE = 16;
const int CTA_SIZE = 128;
const int size = GRID_SIZE * CTA_SIZE;
int * a, * b, * c;
int * dev_a, * dev_b, * dev_c;
a = (int *) malloc (sizeof(int) * size);
b = (int *) malloc (sizeof(int) * size);
c = (int *) malloc (sizeof(int) * size);
if(!a || !b || !c) {
std::cout << "Error: out of memory\n";
exit(-1);
}
for(int i = 0; i < size; i++) {
a[i] = i;
b[i] = i+1;
}
memset(c, 0, sizeof(int) * size);
checkCudaError(cudaMalloc(&dev_a, sizeof(int) * size));
checkCudaError(cudaMalloc(&dev_b, sizeof(int) * size));
checkCudaError(cudaMalloc(&dev_c, sizeof(int) * size));
checkCudaError(cudaMemcpy(dev_a, a, sizeof(int) * size, cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpy(dev_b, b, sizeof(int) * size, cudaMemcpyHostToDevice));
checkCudaError(cudaMemset(dev_c, 0, sizeof(int) * size));
vecAdd<<<GRID_SIZE, CTA_SIZE>>>(dev_a, dev_b, dev_c, size);
checkCudaError(cudaDeviceSynchronize());
checkCudaError(cudaMemcpy(c, dev_c, sizeof(int) * size, cudaMemcpyDeviceToHost));
for(int i = 0; i < size; i++) {
// std::cout << i << ": " << c[i] << "\n";
if(c[i] != i*2+1) {
std::cout << "Error: c[" << i << "] != " <<
i*2+1 << "\n";
exit(-1);
}
}
std::cout << "Pass\n";
} |
6,144 | /*
* usage: nvcc ./stream_test.cu -o ./stream_legacy
* nvvp ./stream_legacy ( or as root:
* nvvp -vm /usr/lib64/jvm/jre-1.8.0/bin/java ./stream_legacy )
* ... versus ...
* nvcc --default-stream per-thread ./stream_test.cu -o ./stream_per-thread
* nvvp ./stream_per-thread ( or as root:
* nvvp -vm /usr/lib64/jvm/jre-1.8.0/bin/java ./stream_per-thread )
*/
const int N = 1 << 20;
__global__ void kernel(float *x, int n)
{
int tid = threadIdx.x;
for (int i = tid; i < n; i += blockDim.x) {
x[i] = sqrt(pow(3.14159,i));
}
}
int main()
{
const int num_streams = 8;
cudaStream_t streams[num_streams];
float *data[num_streams];
for (int i = 0; i < num_streams; i++) {
cudaStreamCreate(&streams[i]);
cudaMalloc(&data[i], N * sizeof(float));
// launch one worker kernel per stream
kernel<<<1, 64, 0, streams[i]>>>(data[i], N);
// launch a dummy kernel on the default stream
kernel<<<1, 1>>>(0, 0);
}
cudaDeviceReset();
return 0;
}
|
6,145 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
//#define B_T
//#define DEBUG
#define L1 1024
#define L2 1024
#define L3 1024
#define TILE_WIDTH 32
/* ========== Multiple block, Multiple threads ========== */
/* ========== Tile multiplication ========== */
/* ========== Can change different matrix length and width ========== */
/* ========== B matrix doesn't transposed ========== */
/* ========== fixed block dimension as 32 * 32 ========== */
/* ========== Max array length: 1024 due to MaxThread per side is 1024 ========== */
__device__ float GetElement(float *matrix, int row, int col, int width);
__device__ void SetElement(float *matrix, int row, int col, int width, float value);
__device__ float *GetSubMatrix(float *matrix, int blockrow, int blockcol, int width);
__global__ void MatMulKernel(float *Ad, float *Bd, float *Cd);
void MatMul(float *A, float *B, float *C);
int main(int argc, char *argv[])
{
int pass = 1;
float *A = (float *)calloc(L1 * L2, sizeof(float));
float *B = (float *)calloc(L2 * L3, sizeof(float));
float *C = (float *)calloc(L1 * L3, sizeof(float));
float *AxB = (float *)calloc(L1 * L3, sizeof(float));
/* ========== Assign values to array A and B ========== */
for (int i = 0; i < L1; ++i) {
for (int j = 0; j < L2; ++j) {
A[i * L2 + j] = rand() % 30;
}
}
for (int i = 0; i < L2; ++i) {
for (int j = 0; j < L3; ++j) {
B[i * L3 + j] = rand() % 30;
}
}
#ifdef DEBUG
printf("Matrix A:\n");
for (int i = 0; i < L1; i++) {
for (int j = 0; j < L2; j++) {
printf("%3.0f", A[i * L2 + j]);
}
printf("\n");
}
printf("Matrix B:\n");
for (int i = 0; i < L2; i++) {
for (int j = 0; j < L3; j++) {
printf("%3.0f", B[i * L3 + j]);
}
printf("\n");
}
#endif
/* ========== Calculate correct answers by CPU ========== */
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
#ifdef B_T
for (int i = 0; i < L2; i++) {
for (int j = 0; j < L3; j++) {
//B_t[j * L2 + i] = B[i * L3 + j];
}
}
#endif
#ifdef DEBUG
// printf("Matrix B_t:\n");
// for (int i = 0; i < L3; i++) {
// for (int j = 0; j < L2; j++) {
// printf("%5.0f", B_t[i * L2 + j]);
// }
// printf("\n");
// }
#endif
for (int i = 0; i < L1; ++i) {
for (int j = 0; j < L3; ++j) {
for (int k = 0; k < L2; ++k) {
#ifdef B_T
//AxB[i * L3 + j] += A[i * L2 + k] * B_t[j * L2 + k];
#endif
#ifndef B_T
AxB[i * L3 + j] += A[i * L2 + k] * B[k * L3 + j];
#endif
}
}
}
gettimeofday(&endtime, NULL);
double executime;
executime = (endtime.tv_sec - starttime.tv_sec) * 1000.0;
executime += (endtime.tv_usec - starttime.tv_usec) / 1000.0;
printf("CPU time: %13lf msec\n", executime);
#ifdef DEBUG
printf("Matrix AxB:\n");
for (int i = 0; i < L1; i++) {
for (int j = 0; j < L3; j++) {
printf("%5.0f", AxB[i * L3 + j]);
}
printf("\n");
}
#endif
/* ========== Calculate answers by GPU ========== */
MatMul((float *)A, (float *)B, (float *)C);
#ifdef DEBUG
printf("Matrix C:\n");
for (int i = 0; i < L1; i++) {
for (int j = 0; j < L3; j++) {
printf("%12.0f", C[i * L3 + j]);
}
printf("\n");
}
#endif
/* ========== Check if answers correct ========== */
for (int i = 0; i < L1; ++i) {
for (int j = 0; j < L3; ++j) {
if(AxB[i * L3 + j] != C[i * L3 + j]) {
printf("AxB[%d][%d] = %2.0f C[%d][%d] = %2.0f\n", i, j, AxB[i * L3 + j], i, j, C[i * L3 + j]);
pass = 0;
}
}
}
printf("Test %s\n", (pass)?"PASSED":"FAILED");
free(A);
free(B);
free(C);
free(AxB);
return 0;
}
// Get a matrix element
__device__ float GetElement(float *matrix, int row, int col, int width)
{
return *(matrix + row*width + col);
}
// Set a matrix element
__device__ void SetElement(float *matrix, int row, int col, int width, float value)
{
*(matrix + row*width + col) = value;
}
// Get the TILE_WIDTHxTILE_WIDTH sub-matrix matsub of matrix that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of matrix
__device__ float *GetSubMatrix(float *matrix, int blockrow, int blockcol, int width)
{
return (matrix + blockrow*TILE_WIDTH*width + blockcol*TILE_WIDTH);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(float *Ad, float *Bd, float *Cd)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
float *Cd_sub = GetSubMatrix(Cd, blockRow, blockCol, L3);
int row = threadIdx.y;
int col = threadIdx.x;
int iter = (L2 + TILE_WIDTH - 1) / TILE_WIDTH;
int residue = L2 % TILE_WIDTH;
float *Ad_sub, *Bd_sub;
float Aelement, Belement;
if ((blockRow * blockDim.y + row) < L1 && (blockCol * blockDim.x + col) < L3) {
float Cvalue = 0;
for (int m = 0; m < iter; ++m) {
Ad_sub = GetSubMatrix(Ad, blockRow, m, L2);
Bd_sub = GetSubMatrix(Bd, m, blockCol, L3);
for (int k = 0; k < TILE_WIDTH; ++k) {
if (m == iter - 1 && k >= residue && residue != 0) break;
Aelement = GetElement(Ad_sub, row, k, L2);
Belement = GetElement(Bd_sub, k, col, L3);
Cvalue += Aelement * Belement;
}
__syncthreads();
}
SetElement(Cd_sub, row, col, L3, Cvalue);
}
}
/* ========== Matrix multiplication - Host code ========== */
void MatMul(float *A, float *B, float *C)
{
size_t size_1 = L1 * L2 * sizeof(float);
size_t size_2 = L2 * L3 * sizeof(float);
size_t size_3 = L1 * L3 * sizeof(float);
float *Ad, *Bd, *Cd;
/* ========== Allocate and Load A, B to device memory ========== */
cudaMalloc((void **)&Ad, size_1);
cudaMemcpy(Ad, A, size_1, cudaMemcpyHostToDevice);
cudaMalloc((void **)&Bd, size_2);
cudaMemcpy(Bd, B, size_2, cudaMemcpyHostToDevice);
/* ========== Allocate C on the device ========== */
cudaMalloc((void **)&Cd, size_3);
/* ========== Setup the execution configuration ========== */
int GridDim_x = (L3 + TILE_WIDTH - 1) / TILE_WIDTH;
int GridDim_y = (L1 + TILE_WIDTH - 1) / TILE_WIDTH;
printf("%d, %d\n", GridDim_x, GridDim_y);
dim3 dimGrid(GridDim_x, GridDim_y);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
/* ========== Get start time event ========== */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* ========== Invoke kernel ========== */
MatMulKernel<<<dimGrid, dimBlock>>>(Ad, Bd, Cd);
cudaError_t cuda_err = cudaGetLastError();
if ( cudaSuccess != cuda_err ){
printf("before kernel call: error = %s\n", cudaGetErrorString (cuda_err));
exit(1) ;
}
/* ========== Get stop time event ========== */
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
/* ========== Compute execution time ========== */
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
/* ========== Read C from device memory ========== */
cudaMemcpy(C, Cd, size_3, cudaMemcpyDeviceToHost);
/* ========== Free device memory ========== */
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
} |
6,146 | #include <cmath>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <cassert>
#include <chrono>
#include <random>
#include <string>
#include <ctime>
#include <algorithm>
#include <fcntl.h>
#include <unistd.h>
//constants
#define TRAINING_SET_SIZE 60000
#define TEST_SET_SIZE 10000
#define COLS 28
#define ROWS 28
#define IMG_MAGIC_NUM 0x00000803
#define LABEL_MAGIC_NUM 0x00000801
#define NUM_LABELS 60000
#define NUM_NEURONS 1024
#define EPOCH_SIZE 100
#define BATCH_SIZE 100
//All the integers in the files are stored in the MSB first (high endian) format
void toLittleEndian(int &num){
num = (0xFF&(num >> 24)) |
(0xFF00&(num >> 8)) |
(0xFF0000&(num << 8)) |
(0xFF000000&(num << 24));
}
void read_images(const std::string &file_name, float*** (&imgs)){
int fd;
fd = open(file_name.c_str(), O_RDONLY);
assert(fd >= 0);
int rv, magic_num, num_imgs, num_cols, num_rows;
rv = read(fd, &magic_num, 4);
assert(rv == 4);
//change endianess
toLittleEndian(magic_num);
assert(magic_num == 0x803);
rv = read(fd, &num_imgs, 4);
assert(rv == 4);
//change endianess
toLittleEndian(num_imgs);
rv = read(fd, &num_rows, 4);
assert(rv == 4);
//change endianness
toLittleEndian(num_rows);
rv = read(fd, &num_cols, 4);
assert(rv == 4);
//change endianness
toLittleEndian(num_cols);
imgs = new float**[num_imgs]();
for(int i = 0; i < num_imgs; i++){
imgs[i] = new float*[num_rows]();
//read whole image at once to minimize IO since that takes time
unsigned char tmp_img[num_rows][num_cols];
rv = read(fd, tmp_img, num_rows*num_cols);
assert(rv == num_rows*num_cols);
for(int r = 0; r < num_rows; r++){
imgs[i][r] = new float[num_cols]();
for(int c = 0; c < num_cols; c++){
imgs[i][r][c] = double(tmp_img[r][c])/127.5 - 1;
}
}
}
rv = close(fd);
assert(rv == 0);
}
void read_labels(const std::string &file_name, unsigned char* (&labels)){
int fd;
fd = open(file_name.c_str(), O_RDONLY);
assert(fd >= 0);
int magic_num, num_labels;
int rv = read(fd, &magic_num, 4);
assert(rv == 4);
//change endianess
toLittleEndian(magic_num);
rv = read(fd, &num_labels, 4);
assert(rv == 4);
//change endianess
toLittleEndian(num_labels);
labels = new unsigned char[num_labels]();
rv = read(fd, labels, num_labels);
for(int i = 0; i < num_labels; i++){
//all labels are 0-9
//assert(labels[i] >= 0 && labels[i] <= 9);
}
rv = close(fd);
assert(rv == 0);
}
void generateWeights(float*** (&ilw), float*** (&ild), float** (&fclw), float** (&fcld)){
//unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
unsigned seed = 8493;
std::default_random_engine generator(seed);
std::normal_distribution<float> distribution;
ilw = new float**[NUM_NEURONS]();
ild = new float**[NUM_NEURONS]();
fclw = new float*[(int) NUM_NEURONS/EPOCH_SIZE]();
fcld = new float*[(int) NUM_NEURONS/EPOCH_SIZE]();
for(int n = 0; n < NUM_NEURONS; n++){
ilw[n] = new float*[ROWS]();
ild[n] = new float*[ROWS]();
for(int r = 0; r < ROWS; r++){
ilw[n][r] = new float[COLS]();
ild[n][r] = new float[COLS]();
for(int c = 0; c < COLS; c++){
//normal_distribution represents unbownded distribution, divide by sqrt(N)
ilw[n][r][c] = distribution(generator) / sqrt(NUM_NEURONS);
//std::cout << distribution(generator) / sqrt(NUM_NEURONS) << std::endl;
//initially weights are 0
ild[n][r][c] = 0;
}
}
}
for(int i = 0; i < (int) NUM_NEURONS/EPOCH_SIZE; i++){
fclw[i] = new float[NUM_NEURONS]();
fcld[i] = new float[NUM_NEURONS]();
for (int n = 0; n < NUM_NEURONS; n++){
fclw[i][n] = distribution(generator) / sqrt((int) NUM_NEURONS/EPOCH_SIZE);
fcld[i][n] = 0;
}
}
}
//based on softmax from prof. Chiu's examples
float* softmax(float *in){
// Use identity softmax(x) == softmax(x - C)
const auto C = *std::max_element(in, in+((int) NUM_NEURONS/EPOCH_SIZE));
//std::cout << "Max element: " << C << std::endl;
//same length as in
float* out = new float[(int) NUM_NEURONS/EPOCH_SIZE];
float sum = 0;
for(size_t i = 0; i < (int) NUM_NEURONS/EPOCH_SIZE; i++){
//std::cout << in[i] - C << std::endl;
out[i] = std::exp(in[i] - C);
//assert(out[i] != 0);
sum += out[i];
}
/*
for(size_t i = 0; i < (int) NUM_NEURONS/EPOCH_SIZE; i++){
out[i] = out[i]/sum;
}
*/
// for(size_t i = 0; i < (int) NUM_NEURONS/EPOCH_SIZE; i++){
// assert(out[i] != 0);
// std::cout << "GOOD" << std::endl;
// }
std::transform(out, out + ((int) NUM_NEURONS/EPOCH_SIZE), out, [sum](float e) {return e/sum;});
// for(size_t i = 0; i < (int) NUM_NEURONS/EPOCH_SIZE; i++){
// assert(out[i] != 0);
// if(out[i] != 0){
// std::cout << out[i] << std::endl;
// }
// }
return out;
}
float* softmax_ds(float* out, float* us){
float* sm_ds = new float[(int) NUM_NEURONS/EPOCH_SIZE]();
for(size_t i = 0; i < (int) NUM_NEURONS/EPOCH_SIZE; i++){
for(size_t j = 0; j < (int) NUM_NEURONS/EPOCH_SIZE; j++){
if( i == j) {
sm_ds[i] += (out[j]*(1 - out[i])) * us[j];
} else {
sm_ds[i] += (-out[i]*out[j])*us[j];
}
}
}
return sm_ds;
}
__global__ void update_dense_weights(float *w1, float *ds1){
//current thread and node num
int tid = blockIdx.x * blockDim.x + threadIdx.x;
w1[tid] -= (BATCH_SIZE/1000)*ds1[tid];
ds1[tid] = 0;
}
int main(int argc, char** argv){
if(argc != 3){
std::cerr << "Wrong number of inputs. Usage: ./parallelNN <images> <labels>" << std::endl;
exit(1);
}
//read training
static float ***training_images;
static unsigned char *training_labels;
read_images(std::string(argv[1]), training_images);
read_labels(std::string(argv[2]), training_labels);
float ***input_layer_w, ***input_layer_ds;
float **fully_connected_layer_w, **fully_connected_layer_ds;
//std::cout << input_layer_w[0][0][0] << std::endl;
generateWeights(input_layer_w, input_layer_ds, fully_connected_layer_w, fully_connected_layer_ds);
//First fully connected layer
float *first_layer = new float[NUM_NEURONS]();
float *first_layer_ds = new float[NUM_NEURONS]();
//Second fully connected layer
float *second_layer = new float[(int)NUM_NEURONS/EPOCH_SIZE]();
float *second_layer_ds = new float[NUM_NEURONS]();
//Softmax layer
float *soft_max_layer = new float[(int)NUM_NEURONS/EPOCH_SIZE]();
float *soft_max_layer_ds = new float[(int)NUM_NEURONS/EPOCH_SIZE]();
//Cross-entropy layer
float* cross_ent_layer = new float[(int)NUM_NEURONS/EPOCH_SIZE]();
//CUDA
float *dense_layer_w1, *dense_layer_ds1, *dense_layer_w2, *dense_layer_ds2;
//place them in contiguoys memory
float *hidden_layer_w1 = new float[NUM_NEURONS*ROWS*COLS]();
float *hidden_layer_ds1 = new float[NUM_NEURONS*ROWS*COLS]();
float *hidden_layer_w2 = new float[NUM_NEURONS* ((int)NUM_NEURONS/EPOCH_SIZE)]();
float *hidden_layer_ds2 = new float[NUM_NEURONS* ((int)NUM_NEURONS/EPOCH_SIZE)]();
cudaMalloc(&dense_layer_w1, NUM_NEURONS*ROWS*COLS*(sizeof(float)));
cudaMalloc(&dense_layer_ds1, NUM_NEURONS*ROWS*COLS*(sizeof(float)));
cudaMalloc(&dense_layer_w2, NUM_NEURONS* ((int)NUM_NEURONS/EPOCH_SIZE) *(sizeof(float)));
cudaMalloc(&dense_layer_ds2, NUM_NEURONS* ((int)NUM_NEURONS/EPOCH_SIZE) *(sizeof(float)));
//to generate random number for dropout
std::srand(std::time(0));
for(int e = 0; e < EPOCH_SIZE; e++){
//rounds
for(int j = 0; j < EPOCH_SIZE; j++){
//FORWARD
//initialize values
int correct = 0, total = 0;
//loop through images in batch
for(int i = 0; i < BATCH_SIZE; i++){
for(int k = 0; k < (int) NUM_NEURONS/EPOCH_SIZE; k++){
cross_ent_layer[k] = 0;
}
//current label and img displaced by i (the images already processed)
int current_label = (int) training_labels[EPOCH_SIZE*j + i];
float** current_image = training_images[EPOCH_SIZE*j + i];
for(int n = 0; n < NUM_NEURONS; n++){
float temp_result = 0;
//dropout rate of 0.4%
if(std::rand() % 1000 < 4){
first_layer[n] = 0;
} else{
for(int r = 0; r < ROWS; r++){
for(int c = 0; c < COLS; c++){
//calculate results of the first layer
temp_result += input_layer_w[n][r][c] * current_image[r][c];
}
}
//ReLU
if(temp_result < 0){
first_layer[n] = 0;
} else{
first_layer[n] = temp_result;
}
}
}
//std::cout << "1" << std::endl;
//std::cout << input_layer_w[0][0][0] << std::endl;
for(int k = 0; k < (int) NUM_NEURONS/EPOCH_SIZE; k++){
for(int n = 0; n < NUM_NEURONS; n++){
//second_layer weights are too large/small
second_layer[k] += fully_connected_layer_w[k][n] * first_layer[n];
}
}
soft_max_layer = softmax(second_layer);
if(std::distance(soft_max_layer, std::max_element(soft_max_layer, soft_max_layer+(int) NUM_NEURONS/EPOCH_SIZE)) == current_label){
correct++;
}
total++;
cross_ent_layer[current_label] = -1 / soft_max_layer[current_label];
//BACK-PROPAGATION
soft_max_layer_ds = softmax_ds(soft_max_layer, cross_ent_layer);
for(int k = 0; k < (int) NUM_NEURONS/EPOCH_SIZE; k++){
for(int n = 0; n < NUM_NEURONS; n++){
second_layer_ds[n] = 0;
}
for(int n = 0; n < NUM_NEURONS; n++){
fully_connected_layer_ds[k][n] += ((first_layer[n] * soft_max_layer_ds[k]) / BATCH_SIZE);
second_layer_ds[n] += fully_connected_layer_w[k][n] * soft_max_layer_ds[k];
}
}
for(int n = 0; n < NUM_NEURONS; n++){
for(int r = 0; r < ROWS; r++){
for(int c = 0; c < COLS; c++){
input_layer_ds[n][r][c] += (current_image[r][c] * second_layer_ds[n])/BATCH_SIZE;
}
}
}
//UPDATE WEIGHTS
//copy to contiguous array to copy to CUDA mem
for(int k = 0; k < (int) NUM_NEURONS/EPOCH_SIZE; k++){
for(int n = 0; n < NUM_NEURONS; n++){
hidden_layer_w2[k*NUM_NEURONS + n] = fully_connected_layer_w[k][n] ;
hidden_layer_ds2[k*NUM_NEURONS + n] = fully_connected_layer_ds[k][n];
}
}
cudaMemcpy(dense_layer_w2, hidden_layer_w2, NUM_NEURONS* ((int)NUM_NEURONS/EPOCH_SIZE) *(sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dense_layer_ds2, hidden_layer_ds2, NUM_NEURONS* ((int)NUM_NEURONS/EPOCH_SIZE) *(sizeof(float)), cudaMemcpyHostToDevice);
update_dense_weights<<<(int)NUM_NEURONS/EPOCH_SIZE, NUM_NEURONS>>>(dense_layer_w2, dense_layer_ds2);
//copy back
cudaMemcpy(hidden_layer_w2, dense_layer_w2, NUM_NEURONS* ((int)NUM_NEURONS/EPOCH_SIZE) *(sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(hidden_layer_ds2, dense_layer_ds2, NUM_NEURONS* ((int)NUM_NEURONS/EPOCH_SIZE) *(sizeof(float)), cudaMemcpyHostToDevice);
for(int k = 0; k < (int) NUM_NEURONS/EPOCH_SIZE; k++){
for(int n = 0; n < NUM_NEURONS; n++){
fully_connected_layer_w[k][n] = hidden_layer_w2[k*NUM_NEURONS + n];
fully_connected_layer_ds[k][n] = hidden_layer_ds2[k*NUM_NEURONS + n];
}
}
/*----------------------*/
for(int n = 0; n < NUM_NEURONS; n++){
for(int r = 0; r < ROWS; r++){
for(int c = 0; c < COLS; c++){
hidden_layer_w1[n*ROWS*COLS + r*ROWS + c] = input_layer_w[n][r][c];
//std::cout << input_layer_ds[n][r][c] << std::endl;
//std::cout << input_layer_w[n][r][c] << std::endl;
hidden_layer_ds1[n*ROWS*COLS + r*ROWS + c] = input_layer_ds[n][r][c];
}
}
}
cudaMemcpy(dense_layer_w1, hidden_layer_w1, NUM_NEURONS*ROWS*COLS*(sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dense_layer_ds1, hidden_layer_ds1, NUM_NEURONS*ROWS*COLS*(sizeof(float)), cudaMemcpyHostToDevice);
update_dense_weights<<<ROWS*COLS, NUM_NEURONS>>>(dense_layer_w1, dense_layer_ds1);
//copy back
cudaMemcpy(hidden_layer_w1, dense_layer_w1, NUM_NEURONS* ((int)NUM_NEURONS/EPOCH_SIZE) *(sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(hidden_layer_ds1, dense_layer_ds1, NUM_NEURONS* ((int)NUM_NEURONS/EPOCH_SIZE) *(sizeof(float)), cudaMemcpyHostToDevice);
for(int n = 0; n < NUM_NEURONS; n++){
for(int r = 0; r < ROWS; r++){
for(int c = 0; c < COLS; c++){
input_layer_w[n][r][c] = hidden_layer_w1[n*ROWS*COLS + r*ROWS + c];
//std::cout << input_layer_ds[n][r][c] << std::endl;
//std::cout << input_layer_w[n][r][c] << std::endl;
input_layer_ds[n][r][c] = hidden_layer_ds1[n*ROWS*COLS + r*ROWS + c];
}
}
}
}
if(j % 100 == 0){
//std::cout << input_layer_w[0][0][0] << std::endl;
printf("Epoch %d: Round %d: accuracy=%f\n", e, j, correct/total);
}
}
}
return 0;
}
//
|
6,147 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
__device__ float logsumexp(float a, float b)
{
if(a > b)
{
return a + log(1.0+exp(b-a));
}
else
{
return b + log(1.0+exp(a-b));
}
}
/*
__global__ void felsensteinfast(const int alphabet, const int numcols, const int numnodes, const int startnode, const int* left, const int* right, const float* transprobs, const float* freqs, float* data, float* collogliks)
{
__shared__ float liks [8192];
__shared__ int indices[512];
__shared__ int revindices[512];
__shared__ int index = 0;
for(int i = 0 ; i < 512 ; i++)
{
indices[i] = -1;
revindices[i] = -1;
}
__syncthreads();
int col = blockIdx.x*blockDim.x + threadIdx.x; // column
int nodeindex = 0;
int lindex = 0;
int rindex = 0;
int leftindex = 0;
int rightindex = 0;
float m = 0.0;
float bsum = 0.0;
float csum = 0.0;
float v = 0.0;
int alphabetplus1 = alphabet+1;
if(col < numcols)
{
for(int node = startnode ; node < numnodes ; node += 1) // post-order tree traversal, calculate starting at tips and ending at root.
{
nodeindex = threadIdx.x*numnodes*alphabetplus1 + node*alphabetplus1;
lindex = left[node]; // left child
rindex = right[node]; // right child
leftindex = threadIdx.x*numnodes*alphabetplus1 + lindex*alphabetplus1;
rightindex = threadIdx.x*numnodes*alphabetplus1 + rindex*alphabetplus1;
m = 0.0;
int li = indices[leftindex];
if(li == -1)
{
for(int a = 0 ; a < alphabet ; a++)
{
liks[index*al
}
index++;
}
int ri = indices[rightindex];
for(int a = 0 ; a < alphabet ; a++)
{
bsum = 0.0;
csum = 0.0;
for(int d = 0 ; d < alphabet ; d++)
{
bsum += transprobs[lindex*alphabet*alphabet + a*alphabet + d]*liks[leftindex+d];
csum += transprobs[rindex*alphabet*alphabet + a*alphabet + d]*liks[rightindex+d];
}
v = bsum*csum;
liks[nodeindex+a] = v;
if(v > m)
{
m = v;
}
}
for(int a = 0 ; a < alphabet ; a++)
{
liks[nodeindex+a] /= m;
}
liks[nodeindex+alphabet] = log(m) + liks[leftindex+alphabet] + liks[rightindex+alphabet];
}
float logm = liks[threadIdx.x*numnodes*alphabetplus1 + (numnodes-1)*alphabetplus1 + alphabet];
collogliks[col] = 0.0;
for(int a = 0 ; a < alphabet ; a++)
{
collogliks[col] += freqs[a]*liks[threadIdx.x*numnodes*alphabetplus1 + (numnodes-1)*alphabetplus1 + a];
}
collogliks[col] = log(collogliks[col]) + logm;
}
}*/
__global__ void felsensteinleaves(const int alphabet, const int numcols, const int numpairs, const int* x, const int* y, float* data, float* res)
{
int alphabetplus1 = alphabet+1;
int a = 0;
int e = 0;
int f = 0;
int thread = blockIdx.x*blockDim.x + threadIdx.x;
if (thread < numpairs)
{
for(a = 0 ; a < alphabet ; a++)
{
e = a / 4;
f = a % 4;
res[thread*alphabetplus1+a] = data[x[thread]*4 + e]*data[y[thread]*4 + f];
}
res[thread*alphabetplus1+alphabet] = 0.0;
}
}
__global__ void felsensteinleaves16(const int numcols, const int numpairs, const int* x, const int* y, float* data, float* res)
{
int a = 0;
int thread = blockIdx.x*blockDim.x + threadIdx.x;
int startcol = 0;
if (thread < numpairs)
{
int xindex = x[thread]*4;
int yindex = y[thread]*4;
#pragma unroll
for(a = 0 ; a < 16 ; a++)
{
res[thread*17+a] = data[xindex + (a / 4)]*data[yindex + (a % 4)];
}
res[thread*17+16] = 0.0;
}
}
__global__ void storearr(const int numpairs, const float logw, float* dest, float* src)
{
int thread = blockIdx.x*blockDim.x + threadIdx.x;
if (thread < numpairs)
{
dest[thread] = logw+src[thread];
}
}
__global__ void logsumexparr(const int numpairs, const float logw1, float* dest, const float logw2, float* src)
{
int thread = blockIdx.x*blockDim.x + threadIdx.x;
if (thread < numpairs)
{
dest[thread] = logsumexp(logw1+dest[thread], logw2+src[thread]);
}
}
__global__ void felsensteinhelper16(const int numcols, const int numpairs, const int numnodes, const int node, const int lindex, const int rindex, const int* leftindices, const int* rightindices, const float* transprobs, const float* left, const float* right, float* res)
{
float m = 0.0;
int thread = blockIdx.x*blockDim.x + threadIdx.x;
float bsum = 0.0;
float csum = 0.0;
int lefttransstride = lindex*16*16;
int lefttrans = 0;
int righttransstride = rindex*16*16;
int righttrans = 0;
float v = 0.0;
int a = 0;
int d = 0;
int leftindex = leftindices[thread]*17;
int rightindex = rightindices[thread]*17;
int nodeindex = thread*17;
if (thread < numpairs)
{
#pragma unroll
for(a = 0 ; a < 16 ; a++)
{
bsum = 0.0;
csum = 0.0;
lefttrans = lefttransstride + a*16;
righttrans = righttransstride + a*16;
#pragma unroll
for(d = 0 ; d < 16 ; d++)
{
bsum += transprobs[lefttrans+d]*left[leftindex+d];
csum += transprobs[righttrans+d]*right[rightindex+d];
}
v = bsum*csum;
res[nodeindex+a] = v;
if(v > m)
{
m = v;
}
}
#pragma unroll
for(a = 0 ; a < 16 ; a++)
{
res[nodeindex+a] /= m;
}
res[nodeindex+16] = log(m) + left[leftindex+16] + right[rightindex+16];
}
}
__global__ void felsensteinleaves16paired(const int numcols, int numratecats, const int numpairs, const int* x, const int* y, float* data, float* res)
{
int a = 0;
int thread = blockIdx.x*blockDim.x + threadIdx.x;
int pair = thread % numpairs;
if (thread < numratecats*numpairs)
{
int xindex = x[pair]*4;
int yindex = y[pair]*4;
#pragma unroll
for(a = 0 ; a < 16 ; a++)
{
res[thread*17+a] = data[xindex + (a / 4)]*data[yindex + (a % 4)];
}
res[thread*17+16] = 0.0;
}
}
__global__ void felsensteinhelper16paired(const int numcols, int numratecats, const int numpairs, const int leftnumpairs, const int rightnumpairs, const int numnodes, const int node, const int lindex, const int rindex, const int* leftindices, const int* rightindices, const float* transprobs, const float* left, const float* right, float* res)
{
float m = 0.0;
int thread = blockIdx.x*blockDim.x + threadIdx.x;
float bsum = 0.0;
float csum = 0.0;
int lefttrans = 0;
int righttrans = 0;
float v = 0.0;
int a = 0;
int d = 0;
int ratecat = thread / numpairs;
int pair = thread % numpairs;
int leftindex = ratecat*leftnumpairs*17 + leftindices[pair]*17;
int rightindex = ratecat*rightnumpairs*17 + rightindices[pair]*17;
int nodeindex = thread*17;
int lefttranstride = ratecat*numnodes*16*16 + lindex*16*16;
int righttranstride = ratecat*numnodes*16*16 + rindex*16*16;
if (thread < numratecats*numpairs)
{
#pragma unroll
for(a = 0 ; a < 16 ; a++)
{
lefttrans = lefttranstride + a*16;
righttrans = righttranstride + a*16;
bsum = 0.0;
csum = 0.0;
#pragma unroll
for(d = 0 ; d < 16 ; d++)
{
bsum += transprobs[lefttrans+d]*left[leftindex+d];
csum += transprobs[righttrans+d]*right[rightindex+d];
}
v = bsum*csum;
res[nodeindex+a] = v;
if(v > m)
{
m = v;
}
}
#pragma unroll
for(a = 0 ; a < 16 ; a++)
{
res[nodeindex+a] /= m;
}
res[nodeindex+16] = log(m) + left[leftindex+16] + right[rightindex+16];
}
}
__global__ void sumfinalpaired(const int alphabet, const int numratecats, const int numpairs, float* freqs, float* rootliks, float* finallogliks)
{
int alphabetplus1 = alphabet+1;
int thread = blockIdx.x*blockDim.x + threadIdx.x; // column
int freqindex = (thread / numpairs)*alphabet;
int a = 0;
if(thread < numratecats*numpairs)
{
finallogliks[thread] = 0.0;
for(a = 0 ; a < alphabet ; a++)
{
finallogliks[thread] += freqs[freqindex+a]*rootliks[thread*alphabetplus1+a];
}
finallogliks[thread] = log(finallogliks[thread]) + rootliks[thread*alphabetplus1+alphabet];
}
}
__global__ void sumcats(const int numratecats, const int numpairs, float* logweights, float* logliks, float* logfinalliks)
{
int thread = blockIdx.x*blockDim.x + threadIdx.x; // column
if(thread < numpairs)
{
logfinalliks[thread] = logweights[0] + logliks[0*numpairs + thread];
for(int r = 1 ; r < numratecats ; r++)
{
logfinalliks[thread] = logsumexp(logfinalliks[thread], logweights[r] + logliks[r*numpairs + thread]);
}
}
}
__global__ void felsensteinhelper(const int alphabet, const int numcols, const int numpairs, const int numnodes, const int node, const int lindex, const int rindex, const int* leftindices, const int* rightindices, const float* transprobs, const float* left, const float* right, float* res)
{
int alphabetplus1 = alphabet+1;
float m = 0.0;
int thread = blockIdx.x*blockDim.x + threadIdx.x;
float bsum = 0.0;
float csum = 0.0;
int lefttrans = 0;
int righttrans = 0;
float v = 0.0;
int a = 0;
int d = 0;
int leftindex = 0;
int rightindex = 0;
int nodeindex = 0;
if (thread < numpairs)
{
leftindex = leftindices[thread]*alphabetplus1;
rightindex = rightindices[thread]*alphabetplus1;
nodeindex = thread*alphabetplus1;
for(a = 0 ; a < alphabet ; a++)
{
bsum = 0.0;
csum = 0.0;
lefttrans = lindex*alphabet*alphabet + a*alphabet;
righttrans = rindex*alphabet*alphabet + a*alphabet;
for(d = 0 ; d < alphabet ; d++)
{
bsum += transprobs[lefttrans+d]*left[leftindex+d];
csum += transprobs[righttrans+d]*right[rightindex+d];
}
v = bsum*csum;
res[nodeindex+a] = v;
if(v > m)
{
m = v;
}
}
for(a = 0 ; a < alphabet ; a++)
{
res[nodeindex+a] /= m;
}
res[nodeindex+alphabet] = log(m) + left[leftindex+alphabet] + right[rightindex+alphabet];
}
}
__global__ void felsensteindinucleotide(const int alphabet, const int numcols, const int numcategories, const int numpairs, const int numnodes, const int numleaves, const int* left, const int* right, const float* transprobs, const float* freqs, int* pairs, float* data, float* liks, float* collogliks)
{
int thread = blockIdx.x*blockDim.x + threadIdx.x;
int nodeindex = 0;
int lindex = 0;
int rindex = 0;
int leftindex = 0;
int rightindex = 0;
float m = 0.0;
float bsum = 0.0;
float csum = 0.0;
float v = 0.0;
int alphabetplus1 = alphabet+1;
int node = 0;
int a = 0;
int e = 0;
int f = 0;
if(thread < numpairs*numcategories)
{
int cat = thread / numpairs;
int col = thread % numpairs;
int pair = pairs[col];
int col1 = pair / numcols;
int col2 = pair % numcols;
int stride = cat*numpairs*numnodes*alphabetplus1 + col*numnodes*alphabetplus1;
for(node = 0 ; node < numleaves ; node += 1)
{
nodeindex = stride + node*alphabetplus1;
for(a = 0 ; a < alphabet ; a++)
{
e = a / 4;
f = a % 4;
liks[nodeindex+a] = data[node*numcols*4 + col1*4 + e]*data[node*numcols*4 + col2*4+f];
}
liks[nodeindex+alphabet] = 0.0;
}
int stridetrans = cat*numnodes*alphabet*alphabet;
for(node = numleaves ; node < numnodes ; node += 1)
{
nodeindex = stride + node*alphabetplus1;
lindex = left[node];
rindex = right[node];
leftindex = stride + lindex*alphabetplus1;
rightindex = stride + rindex*alphabetplus1;
m = 0.0;
for(a = 0 ; a < alphabet ; a++)
{
bsum = 0.0;
csum = 0.0;
for(int d = 0 ; d < alphabet ; d++)
{
bsum += transprobs[stridetrans + lindex*alphabet*alphabet + a*alphabet + d]*liks[leftindex+d];
csum += transprobs[stridetrans + rindex*alphabet*alphabet + a*alphabet + d]*liks[rightindex+d];
}
v = bsum*csum;
liks[nodeindex+a] = v;
if(v > m)
{
m = v;
}
}
for(a = 0 ; a < alphabet ; a++)
{
liks[nodeindex+a] /= m;
}
liks[nodeindex+alphabet] = log(m) + liks[leftindex+alphabet] + liks[rightindex+alphabet];
}
float logm = liks[stride + (numnodes-1)*alphabetplus1 + alphabet];
collogliks[thread] = 0.0;
for(a = 0 ; a < alphabet ; a++)
{
collogliks[thread] += freqs[cat*alphabet+a]*liks[stride + (numnodes-1)*alphabetplus1 + a];
}
collogliks[thread] = log(collogliks[thread]) + logm;
}
}
__global__ void sumfinal(const int alphabet, const int numpairs, float* freqs, float* rootliks, float* finallogliks)
{
int alphabetplus1 = alphabet+1;
int thread = blockIdx.x*blockDim.x + threadIdx.x; // column
int a = 0;
if(thread < numpairs)
{
finallogliks[thread] = 0.0;
for(a = 0 ; a < alphabet ; a++)
{
finallogliks[thread] += freqs[a]*rootliks[thread*alphabetplus1+a];
}
finallogliks[thread] = log(finallogliks[thread]) + rootliks[thread*alphabetplus1+alphabet];
}
}
__global__ void sumcategories(const int numcategories, const int numcols, const float* catlogprobs, const float* collogliks, float* finallogliks)
{
int col = blockIdx.x*blockDim.x + threadIdx.x; // column
if(col < numcols)
{
finallogliks[col] = catlogprobs[0] + collogliks[col];
for(int cat = 1 ; cat < numcategories ; cat++)
{
finallogliks[col] = logsumexp(finallogliks[col], catlogprobs[cat] + collogliks[cat*numcols + col]);
}
}
}
__global__ void felsensteinfast(const int alphabet, const int numcols, const int numnodes, const int startnode, const int* left, const int* right, const float* transprobs, const float* freqs, float* liks, float* collogliks)
{
int col = blockIdx.x*blockDim.x + threadIdx.x; // column
int nodeindex = 0;
int lindex = 0;
int rindex = 0;
int leftindex = 0;
int rightindex = 0;
float m = 0.0;
float bsum = 0.0;
float csum = 0.0;
float v = 0.0;
int alphabetplus1 = alphabet+1;
if(col < numcols)
{
for(int node = startnode ; node < numnodes ; node += 1) // post-order tree traversal, calculate starting at tips and ending at root.
{
nodeindex = col*numnodes*alphabetplus1 + node*alphabetplus1;
lindex = left[node]; // left child
rindex = right[node]; // right child
leftindex = col*numnodes*alphabetplus1 + lindex*alphabetplus1;
rightindex = col*numnodes*alphabetplus1 + rindex*alphabetplus1;
m = 0.0;
for(int a = 0 ; a < alphabet ; a++)
{
bsum = 0.0;
csum = 0.0;
for(int d = 0 ; d < alphabet ; d++)
{
bsum += transprobs[lindex*alphabet*alphabet + a*alphabet + d]*liks[leftindex+d];
csum += transprobs[rindex*alphabet*alphabet + a*alphabet + d]*liks[rightindex+d];
}
v = bsum*csum;
liks[nodeindex+a] = v;
if(v > m)
{
m = v;
}
}
for(int a = 0 ; a < alphabet ; a++)
{
liks[nodeindex+a] /= m;
}
liks[nodeindex+alphabet] = log(m) + liks[leftindex+alphabet] + liks[rightindex+alphabet];
}
float logm = liks[(col*numnodes*alphabetplus1) + (numnodes-1)*alphabetplus1 + alphabet];
collogliks[col] = 0.0;
for(int a = 0 ; a < alphabet ; a++)
{
collogliks[col] += freqs[a]*liks[(col*numnodes*alphabetplus1) + (numnodes-1)*alphabetplus1 + a];
}
collogliks[col] = log(collogliks[col]) + logm;
}
}
/*
__global__ void felsenstein(int numcols, int numnodes, int* left, int* right, float* logtransprobs, float* logfreqs, float* logliks, float* collogliks)
{
int col = blockIdx.x*blockDim.x + threadIdx.x; // column
if(col < numcols)
{
for(int node = 0 ; node < numnodes ; node += 1) // post-order tree traversal, calculate starting at tips and ending at root.
{
int lognodeindex = col*numnodes*alphabet + node*alphabet;
int lindex = left[node]; // left child
int rindex = right[node]; // right child
if(lindex == -1 && rindex == -1) // if 'node' is leaf node
{
}
else // if 'node' is internal node
{
int logleftindex = col*numnodes*alphabet + lindex*alphabet;
int logrightindex = col*numnodes*alphabet + rindex*alphabet;
for(int a = 0 ; a < alphabet ; a++)
{
float bsum = -1e10;
float csum = -1e10;
for(int d = 0 ; d < alphabet ; d++)
{
int transindex = node*alphabet*alphabet + a*alphabet + d;
float logtransprob = logtransprobs[transindex]; // transition probability
bsum = logsumexp(bsum, logtransprob+logliks[logleftindex+d]);
csum = logsumexp(csum, logtransprob+logliks[logrightindex+d]);
}
logliks[lognodeindex+a] = bsum+csum;
}
}
}
collogliks[col] = -1e10;
for(int a = 0 ; a < alphabet ; a++)
{
collogliks[col] = logsumexp(collogliks[col], logfreqs[a] + logliks[col*numnodes*alphabet + (numnodes-1)*alphabet + a]);
}
}
}
float randfloat()
{
return ((float)rand()/(float)(RAND_MAX)) * 1.0;
}
int main(void)
{
int code = 0;
int numcols = 10000;
int numnodes = 250;
int *left, *right, *d_left, *d_right;
left = (int*)malloc(numnodes*sizeof(int));
right = (int*)malloc(numnodes*sizeof(int));
for(int node = 0 ; node < numnodes ; node += 1)
{
//left[node] = rand() % numnodes;
//right[node] = rand() % numnodes;
}
code = cudaMalloc(&d_left, numnodes*sizeof(int));
printf("A %d\n", code);
code = cudaMalloc(&d_right, numnodes*sizeof(int));
printf("B %d\n", code);
code = cudaMemcpy(d_left, left, numnodes*sizeof(int), cudaMemcpyHostToDevice);
printf("C %d\n", code);
code = cudaMemcpy(d_right, right, numnodes*sizeof(int), cudaMemcpyHostToDevice);
printf("D %d\n", code);
float *logtransprobs, *d_logtransprobs;
logtransprobs = (float*)malloc(numnodes*alphabet*alphabet*sizeof(float));
for(int node = 0 ; node < numnodes ; node += 1)
{
for(int a = 0 ; a < alphabet ; a++)
{
float sum = 0.0;
for(int b = 0 ; b < alphabet ; b++)
{
logtransprobs[node*alphabet*alphabet+a*alphabet+b] = randfloat();
sum += logtransprobs[node*alphabet*alphabet+a*alphabet+b];
}
for(int b = 0 ; b < alphabet ; b++)
{
logtransprobs[node*alphabet*alphabet+a*alphabet+b] = log(logtransprobs[node*alphabet*alphabet+a*alphabet+b]/sum);
}
}
}
float *logfreqs, *d_logfreqs;
logfreqs = (float*)malloc(alphabet*sizeof(float));
for(int a = 0 ; a < alphabet ; a++)
{
logfreqs[a] = log(randfloat());
}
cudaMalloc(&d_logfreqs, alphabet*sizeof(float));
cudaMemcpy(d_logfreqs, logfreqs, alphabet*sizeof(float), cudaMemcpyHostToDevice);
code = cudaMalloc(&d_logtransprobs, numnodes*alphabet*alphabet*sizeof(float));
printf("E %d\n", code);
code = cudaMemcpy(d_logtransprobs, logtransprobs, numnodes*alphabet*alphabet*sizeof(float), cudaMemcpyHostToDevice);
printf("F %d\n", code);
float *logliks, *d_logliks;
logliks = (float*)malloc(numcols*numnodes*alphabet*sizeof(float));
for(int col = 0 ; col < numcols ; col += 1)
{
for(int node = 0 ; node < numnodes ; node += 1)
{
int lognodeindex = col*numnodes*alphabet + node*alphabet;
for(int a = 0 ; a < alphabet ; a += 1)
{
logliks[lognodeindex+a] = -1e10;
}
logliks[lognodeindex + (rand() % alphabet)] = 0.0;
}
}
cudaMalloc(&d_logliks, numcols*numnodes*alphabet*sizeof(float));
cudaMemcpy(d_logliks, logliks, numcols*numnodes*alphabet*sizeof(float), cudaMemcpyHostToDevice);
float *collogliks, *d_collogliks;
collogliks = (float*)malloc(numcols*sizeof(float));
for(int col = 0 ; col < numcols ; col++)
{
collogliks[col] = 20.0;
}
cudaMalloc(&d_collogliks, numcols*sizeof(float));
cudaMemcpy(d_collogliks, collogliks, numcols*sizeof(float), cudaMemcpyHostToDevice);
felsenstein<<<(numcols+255)/256, 256>>>(numcols, numnodes, d_left, d_right, d_logtransprobs, d_logfreqs, d_logliks, d_collogliks);
code = cudaMemcpy(collogliks, d_collogliks, numcols*sizeof(float), cudaMemcpyDeviceToHost);
printf("finished %d\n", code);
for(int col = 0 ; col < numcols ; col++)
{
printf("%d\t%lf\n",col,collogliks[col]);
}
cudaFree(d_left);
cudaFree(d_right);
cudaFree(d_logtransprobs);
cudaFree(d_logliks);
cudaFree(d_collogliks);
free(left);
free(right);
free(logtransprobs);
free(logliks);
free(collogliks);
return 0;
}*/
|
6,148 | /*
Cource - "Разработка приложений на CUDA "
Task 1:
Выделить на GPU массив arr из 10^9 элементов типа
float и инициализировать его с помощью ядра следующим образом:
arr[i] = sin((i%360)*Pi/180). Скопировать массив в память центрального
процессора и посчитать ошибку err = sum_i(abs(sin((i%360)*Pi/180)
- arr[i]))/10^9. Провести исследование зависимости результата от использования
функций: sin, sinf, __sin. Объяснить результат. Проверить результат
при использовании массива типа double.
Written by Pavel Santaev
*/
#include <stdio.h>
#include <unistd.h>
#include <math.h>
typedef double arrType;
__global__ void calcSin(arrType * a, size_t len){
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int i = index;
size_t threadsCount = blockDim.x * gridDim.x;
while (i < len){
double value = ((arrType)(i % 360)) * M_PI / 180;
a[i] = __sinf(value);
i = i + threadsCount;
}
}
double calcErr(arrType * arr, size_t len){
double sum = 0;
for (int i = 0; i < len; i++){
sum += abs(sin((i % 360) * M_PI / 180) - arr[i]);
}
return sum / len;
}
int main(){
size_t N = 1000 * 1000 * 100;
size_t size = sizeof(arrType) * N;
arrType * ptr;
cudaError_t error;
int deviceCount = 0;
error = cudaGetDeviceCount(&deviceCount);
if (cudaSuccess != error){
printf("Error in cudaGetDeviceCount: %s\n", cudaGetErrorString(error));
return 0;
}
printf("cudaGetDeviceCount = %x\n", deviceCount);
int deviceID = 1;
cudaDeviceProp devProp;
error = cudaGetDeviceProperties(&devProp, deviceID);
if (cudaSuccess != error){
printf("Error in cudaGetDeviceProperties: %d\n", cudaGetErrorString(error));
return 0;
}
printf ( "Device %d\n", 0 );
printf ( "Compute capability : %d.%d\n", devProp.major, devProp.minor );
printf ( "Name : %s\n", devProp.name );
printf ( "Total Global Memory : %d\n", devProp.totalGlobalMem );
printf ( "Shared memory per block: %d\n", devProp.sharedMemPerBlock );
printf ( "Registers per block : %d\n", devProp.regsPerBlock );
printf ( "Warp size : %d\n", devProp.warpSize );
printf ( "Max threads per block : %d\n", devProp.maxThreadsPerBlock );
printf ( "Total constant memory : %d\n", devProp.totalConstMem );
printf ( "Max Grid Size : %d %d %d\n", devProp.maxGridSize[0], devProp.maxGridSize[1], devProp.maxGridSize[2]);
printf ( "Max Threads Dim : %d %d %d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
cudaSetDevice(deviceID);
printf("sizeof size_t %d \n", sizeof(size_t));
printf("sizeof type %d \n", sizeof(arrType));
printf("allocating %u memory\n", size);
cudaMalloc(&ptr, size);
dim3 threads = dim3(devProp.maxThreadsPerBlock, 1);
dim3 blocks = dim3(128, 1);
calcSin<<<blocks, threads>>>(ptr, N);
int i = 0;
arrType * hostPtr;
hostPtr = (arrType *)malloc(size);
cudaMemcpy(hostPtr, ptr, size, cudaMemcpyDeviceToHost);
for (i = 0; i < 10; i++){
printf("%f ", hostPtr[i]);
}
printf("\nerror = %0.10f ", calcErr(hostPtr, N));
cudaFree(ptr);
free(hostPtr);
printf("\nfinished\n");
}
|
6,149 | #ifndef picket_fence_cuda
#define picket_fence_cuda
#pragma once
#include <cuda_runtime.h>
#include <math.h>
//////// kernel version ///////////////////////////////////////////
// Calculates the IR band Rosseland mean opacity (local T) according to the
// Freedman et al. (2014) fit and coefficents
__device__ void kernel_k_Ross_Freedman(double Tin, double Pin, double met, double &k_IR) {
// dependcies
//// powl from math
//// log10l from math
//// atan from math
//// onedivpi -> namespace constants::onedivpi
// Input:
// T - Local gas temperature [K]
// P - Local gas pressure [pa]
// met - Local metallicity [M/H] (log10l from solar, solar [M/H] = 0.0)
// Call by reference (Input&Output):
// k_IR - IR band Rosseland mean opacity [m2 kg-1]
const double pi = atan((double)(1)) * 4;
const double onedivpi = 1.0 / pi;
// Coefficent parameters for Freedman et al. (2014) table fit
double c1 = 10.602;
double c2 = 2.882;
double c3 = 6.09e-15;
double c4 = 2.954;
double c5 = -2.526;
double c6 = 0.843;
double c7 = -5.490;
double c8_l = -14.051, c8_h = 82.241;
double c9_l = 3.055, c9_h = -55.456;
double c10_l = 0.024, c10_h = 8.754;
double c11_l = 1.877, c11_h = 0.7048;
double c12_l = -0.445, c12_h = -0.0414;
double c13_l = 0.8321, c13_h = 0.8321;
// work variables
double k_lowP;
double k_hiP;
double T;
double P;
double Tl10;
double Pl10;
// start operations
T = Tin;
P = Pin * ((double)10.0); // Convert to dyne cm-2
Tl10 = log10((double)(T));
Pl10 = log10((double)(P));
// Low pressure expression
k_lowP = c1 * atan((double)(Tl10 - c2)) -
(c3 / (Pl10 + c4)) * exp((double)(pow((double)(Tl10 - c5), 2.0))) + c6 * met + c7;
// De log10l
k_lowP = pow((double)(10.0), k_lowP);
// Temperature split for coefficents = 800 K
if (T <= 800.0)
{
k_hiP = c8_l + c9_l * Tl10 + c10_l * pow((double)(Tl10), 2.0) +
Pl10 * (c11_l + c12_l * Tl10) +
c13_l * met * (0.5 + onedivpi * atan((double)((Tl10 - ((double)2.5)) / (double)0.2)));
}
else
{
k_hiP = c8_h + c9_h * Tl10 +
c10_h * pow((double)(Tl10), 2.0) + Pl10 * (c11_h + c12_h * Tl10) +
c13_h * met * (0.5 + onedivpi * atan((double)((Tl10 - ((double)2.5)) / (double)0.2)));
}
// De log10l
k_hiP = pow((double)(10.0), k_hiP);
// Total Rosseland mean opacity - converted to m2 kg-1
k_IR = (k_lowP + k_hiP) / ((double)10.0);
// Avoid divergence in fit for large values
if (k_IR > 1.0e10)
{
k_IR = 1.0e10;
}
}
__device__ void Ray_dry_adj(int id, int nlay, int nlay1, double t_step, double kappa,
double* Tl, double* pl,
double* pe, double*& dT_conv, double* Tl_cc__df_l, double* d_p__df_l) {
// dependcies
//// main_parameters::nlay -> "FMS_RC_para_&_const.cpp"
//// powl -> math
//// logl10 -> math
//// expl -> math
// Input:
//
// Call by reference (Input & Output):
//
// constants & parameters
int itermax = 5;
const double small = 1e-6;
// work variables
int i, iter;
bool did_adj;
double pfact, Tbar;
double condi;
// start operations
for (i = 0; i < nlay; i++)
{
Tl_cc__df_l[id * nlay + i] = Tl[id * nlay + i];
d_p__df_l[id * nlay + i] = pe[id * nlay1 + i + 1] - pe[id * nlay1 + i];
}
for (iter = 0; iter < itermax; iter++)
{
did_adj = false;
// Downward pass
for (i = 0; i < nlay - 1; i++)
{
pfact = pow((double)(pl[id * nlay + i] / pl[id * nlay + i + 1]), kappa);
condi = (Tl_cc__df_l[id * nlay + i + 1] * pfact - small);
if (Tl_cc__df_l[id * nlay + i] < condi) {
Tbar = (d_p__df_l[id * nlay + i] * Tl_cc__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1] * Tl_cc__df_l[id * nlay + i + 1]) /
(d_p__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1]);
Tl_cc__df_l[id * nlay + i + 1] = (d_p__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1]) * Tbar /
(d_p__df_l[id * nlay + i + 1] + pfact * d_p__df_l[id * nlay + i]);
Tl_cc__df_l[id * nlay + i] = Tl_cc__df_l[id * nlay + i + 1] * pfact;
did_adj = true;
}
}
// Upward pass
for (i = nlay - 2; i > -1; i--) {
pfact = pow((double)(pl[id * nlay + i] / pl[id * nlay + i + 1]), kappa);
condi = (Tl_cc__df_l[id * nlay + i + 1] * pfact - small);
if (Tl_cc__df_l[id * nlay + i] < condi) {
Tbar = (d_p__df_l[id * nlay + i] * Tl_cc__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1] * Tl_cc__df_l[id * nlay + i + 1]) /
(d_p__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1]);
Tl_cc__df_l[id * nlay + i + 1] = (d_p__df_l[id * nlay + i] + d_p__df_l[id * nlay + i + 1]) * Tbar /
(d_p__df_l[id * nlay + i + 1] + pfact * d_p__df_l[id * nlay + i]);
Tl_cc__df_l[id * nlay + i] = Tl_cc__df_l[id * nlay + i + 1] * pfact;
did_adj = true;
}
}
// ! If no adjustment required, exit the loop
if (did_adj == false)
{
break;
}
}
// Change in temperature is Tl_cc - Tl
// adjust on timescale of 1 timestep
for (i = 0; i < nlay; i++)
{
dT_conv[id * nlay + i] = (Tl_cc__df_l[id * nlay + i] - Tl[id * nlay + i]) / t_step;
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void linear_log_interp(double xval, double x1, double x2, double y1, double y2, double& yval) {
// dependcies
//// powll from math
//// log10f from math
// work variables
double lxval;
double ly1;
double ly2;
double lx1;
double lx2;
double norm;
// start operations
lxval = log10((double)(xval));
lx1 = log10((double)(x1));
lx2 = log10((double)(x2));
ly1 = log10((double)(y1));
ly2 = log10((double)(y2));
norm = ((double)1.0) / (lx2 - lx1);
yval = pow((double)(10.0), ((ly1 * (lx2 - lxval) + ly2 * (lxval - lx1)) * norm));
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void tau_struct(int id, int nlev, double grav,
double* p_half, double* kRoss,
int channel, double* tau_struc_e) {
// dependencies
//// nlay -> namespace main_parameters
//// nlay1 -> namespace main_parameters
// work variables
double tau_sum;
double tau_lay;
double delP;
int k;
// running sum of optical depth
tau_sum = 0.0;
// start operations
// Upper most tau_struc is given by some low pressure value (here 1e-9 bar = 1e-4 pa)
//dP = (p_half(1) - 1e-4)
//tau_lay = (kRoss(1) * dP) / grav
//tau_sum = tau_sum + tau_lay
tau_struc_e[id*(nlev+1)+0] = tau_sum;
// Integrate from top to bottom
for (k = 0; k < nlev; k++)
{
// Pressure difference between layer edges
delP = (p_half[id*(nlev+1)+ k + 1] - p_half[id*(nlev+1)+k]);
// Optical depth of layer assuming hydrostatic equilibirum
tau_lay = (kRoss[id*nlev*3+channel * nlev + k] * delP) / grav;
// Add to running sum
tau_sum = tau_sum + tau_lay;
// Optical depth structure is running sum
tau_struc_e[id*(nlev+1)+k + 1] = tau_sum;
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void sw_grey_down(int id, int nlay1, double solar,
double* solar_tau, double* sw_down__df_e, double mu) {
// dependencies
//// expll -> math
// work variables
int i;
// start operations
for (i = 0; i < nlay1; i++)
{
sw_down__df_e[id * nlay1 + i] = solar * mu * exp((double)(-solar_tau[id * nlay1 + i] / mu));
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void lw_grey_updown_linear(int id, int nlay, int nlay1,
double* be__df_e, double* tau_IRe__df_e,
double* lw_up__df_e, double* lw_down__df_e,
double* dtau__dff_l, double* del__dff_l,
double* edel__dff_l, double* e0i__dff_l, double* e1i__dff_l,
double* Am__dff_l, double* Bm__dff_l,
double* lw_up_g__dff_l, double* lw_down_g__dff_l) {
// dependencies
//// expll -> math
//// main_parameters::nlay1
//// main_parameters::nlay
//// constants::gauss_ng
//// constants::twopi
const double pi = atan((double)(1)) * 4;
const double twopi = 2.0 * pi;
// Work variables and arrays
int k, g;
//Gauss quadrature variables
const int gauss_ng = 2;
double uarr[gauss_ng];
double w[gauss_ng];
uarr[0] = 0.21132487;
uarr[1] = 0.78867513;
w[0] = 0.5;
w[1] = 0.5;
for (k = 0; k < nlay; k++)
{
dtau__dff_l[id*nlay + k] = (tau_IRe__df_e[id*(nlay+1)+k + 1] - tau_IRe__df_e[id*(nlay +1)+k]);
}
// Zero the flux arrays
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[id*(nlay+1)+k] = 0.0;
lw_up__df_e[id*nlay1 + k] = 0.0;
}
// Start loops to integrate in mu space
for (g = 0; g < gauss_ng; g++)
{
// Prepare loop
for (k = 0; k < nlay; k++)
{
// Olson & Kunasz (1987) parameters
del__dff_l[id*nlay + k] = dtau__dff_l[id * nlay + k] / uarr[g];
edel__dff_l[id * nlay + k] = exp((double)(-del__dff_l[id * nlay + k]));
e0i__dff_l[id * nlay + k] = 1.0 - edel__dff_l[id * nlay + k];
e1i__dff_l[id * nlay + k] = del__dff_l[id * nlay + k] - e0i__dff_l[id * nlay + k];
Am__dff_l[id * nlay + k] = e0i__dff_l[id * nlay + k] - e1i__dff_l[id * nlay + k] / del__dff_l[id * nlay + k]; // Am[k] = Gp[k], just indexed differently
Bm__dff_l[id * nlay + k] = e1i__dff_l[id * nlay + k] / del__dff_l[id * nlay + k]; // Bm[k] = Bp[k], just indexed differently
}
// Peform downward loop first
// Top boundary condition
lw_down_g__dff_l[0] = 0.0;
for (k = 0; k < nlay; k++)
{
lw_down_g__dff_l[id * nlay + k + 1] = lw_down_g__dff_l[id * nlay + k] * edel__dff_l[id * nlay + k] + Am__dff_l[id * nlay + k] * be__df_e[id * nlay1 + k] + Bm__dff_l[id * nlay + k] * be__df_e[id * nlay1 + k + 1]; // TS intensity
}
// Peform upward loop
// Lower boundary condition
lw_up_g__dff_l[id * nlay1 + nlay1 - 1] = be__df_e[id * nlay1 + nlay1 - 1];
for (k = nlay - 1; k > -1; k--)
{
lw_up_g__dff_l[id * nlay + k] = lw_up_g__dff_l[id * nlay + k + 1] * edel__dff_l[id * nlay + k] +
Bm__dff_l[id * nlay + k] * be__df_e[id * nlay1 + k] + Am__dff_l[id * nlay + k] * be__df_e[id * nlay1 + k + 1]; // TS intensity
}
// Sum up flux arrays with Gauss weights and points
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[id * nlay1 + k] = lw_down__df_e[id * nlay1 + k] + lw_down_g__dff_l[id * nlay + k] * w[g] * uarr[g];
lw_up__df_e[id * nlay1 + k] = lw_up__df_e[id * nlay1 + k] + lw_up_g__dff_l[id * nlay + k] * w[g] * uarr[g];
}
}
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[id * nlay1 + k] = twopi * lw_down__df_e[id * nlay1 + k];
lw_up__df_e[id * nlay1 + k] = twopi * lw_up__df_e[id * nlay1 + k];
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void lw_grey_updown_poly(int nlay, int nlay1, double* be__df_e,
double* tau_IRe__df_e, double* lw_up__df_e,
double* lw_down__df_e, double* dtau__dff_l, double* del__dff_l,
double* edel__dff_l, double* e0i__dff_l, double* e1i__dff_l,
double* e2i__dff_l, double* Am__dff_l, double* Bm__dff_l,
double* Gm__dff_l, double* lw_up_g__dff_l, double* lw_down_g__dff_l) {
// dependencies
//// expll -> math
//// powll -> math
//// main_parameters::nlay1
//// main_parameters::nlay
//// constants::gauss_ng
//// constants::twopi
const double pi = atan((double)(1)) * 4;
const double twopi = 2.0 * pi;
// Work variables and arrays
int k, g;
//Gauss quadrature variables
const int gauss_ng = 2;
double uarr[gauss_ng];
double w[gauss_ng];
uarr[0] = 0.21132487;
uarr[1] = 0.78867513;
w[0] = 0.5;
w[1] = 0.5;
for (k = 0; k < nlay; k++)
{
dtau__dff_l[k] = (tau_IRe__df_e[k + 1] - tau_IRe__df_e[k]);
}
// Zero the flux arrays
for (k = 0; k < nlay1; k++)
{
lw_up__df_e[k] = 0.0;
lw_down__df_e[k] = 0.0;
}
// Start loops to integrate in mu space
for (g = 0; g < gauss_ng; g++)
{
// Prepare loop
for (k = 0; k < nlay; k++)
{
// Olson & Kunasz (1987) parameters
del__dff_l[k] = dtau__dff_l[k] / uarr[g];
edel__dff_l[k] = exp((double)(-del__dff_l[k]));
e0i__dff_l[k] = ((double)(1.0)) - edel__dff_l[k];
e1i__dff_l[k] = del__dff_l[k] - e0i__dff_l[k];
e2i__dff_l[k] = pow((double)(del__dff_l[k]), 2) - 2.0 * e1i__dff_l[k];
}
for (k = 0; k < nlay; k++) {
// For boundary conditions assume linear interpolation at edges
if (k == 1 || k == nlay)
{
Am__dff_l[k] = e0i__dff_l[k] - e1i__dff_l[k] / del__dff_l[k]; // Am[k] = Gp[k], just indexed differently
Bm__dff_l[k] = e1i__dff_l[k] / del__dff_l[k]; // Bm[k] = Bp[k], just indexed differently
Gm__dff_l[k] = 0.0;// Gm(k) = Ap(k)
}
else
{
Am__dff_l[k] = e0i__dff_l[k] + (e2i__dff_l[k] - (del__dff_l[k + 1] + 2.0 * del__dff_l[k]) * e1i__dff_l[k]) / (del__dff_l[k] * (del__dff_l[k + 1] + del__dff_l[k])); // Am[k] = Gp[k], just indexed differently
Bm__dff_l[k] = ((del__dff_l[k + 1] + del__dff_l[k]) * e1i__dff_l[k] - e2i__dff_l[k]) / (del__dff_l[k] * del__dff_l[k + 1]); // Bm[k] = Bp[k], just indexed differently
Gm__dff_l[k] = (e2i__dff_l[k] - del__dff_l[k] * e1i__dff_l[k]) / (del__dff_l[k + 1] * (del__dff_l[k + 1] + del__dff_l[k])); // Gm[k] = Ap[k], just indexed differently
}
}
// Peform downward loop first
// Top boundary condition
lw_down_g__dff_l[0] = 0.0;
lw_down_g__dff_l[1] = lw_down_g__dff_l[0] * edel__dff_l[0] + Am__dff_l[0] * be__df_e[0] + Bm__dff_l[0] * be__df_e[1];
for (k = 1; k < nlay - 1; k++)
{
lw_down_g__dff_l[k + 1] = lw_down_g__dff_l[k] * edel__dff_l[k] + Am__dff_l[k] * be__df_e[k] + Bm__dff_l[k] * be__df_e[k + 1] +
Gm__dff_l[k] * be__df_e[k - 1]; // TS intensity
}
lw_down_g__dff_l[nlay1 - 1] = lw_down_g__dff_l[nlay - 1] * edel__dff_l[nlay - 1] + Am__dff_l[nlay - 1] * be__df_e[nlay - 1] + Bm__dff_l[nlay - 1] * be__df_e[nlay1 - 1];
// Peform upward loop
// Lower boundary condition
lw_up_g__dff_l[nlay1 - 1] = be__df_e[nlay1 - 1];
lw_up_g__dff_l[nlay - 1] = lw_up_g__dff_l[nlay1 - 1] * edel__dff_l[nlay - 1] + Bm__dff_l[nlay - 1] * be__df_e[nlay - 1] + Am__dff_l[nlay - 1] * be__df_e[nlay1 - 1];
for (k = nlay - 2; k > 0; k--)
{
lw_up_g__dff_l[k] = lw_up_g__dff_l[k + 1] * edel__dff_l[k] + Gm__dff_l[k] * be__df_e[k - 1] + Bm__dff_l[k] * be__df_e[k] + Am__dff_l[k] * be__df_e[k + 1]; // TS intensity
}
lw_up_g__dff_l[0] = lw_up_g__dff_l[1] * edel__dff_l[0] + Bm__dff_l[0] * be__df_e[0] + Am__dff_l[0] * be__df_e[1];
// Sum up flux arrays with Gauss weights and points
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[k] = lw_down__df_e[k] + lw_down_g__dff_l[k] * w[g] * uarr[g];
lw_up__df_e[k] = lw_up__df_e[k] + lw_up_g__dff_l[k] * w[g] * uarr[g];
}
}
for (k = 0; k < nlay1; k++)
{
lw_down__df_e[k] = twopi * lw_down__df_e[k];
lw_up__df_e[k] = twopi * lw_up__df_e[k];
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
__device__ void Kitzmann_TS_noscatt(int id, const int nlay, const int nlay1, double *Tl,
double *pl, double *pe,
double *k_V_l, double *k_IR_l,
double *Beta_V, double *Beta, double *&net_F,
double mu_s, double Finc, double Fint, double grav, double AB,
double *tau_Ve__df_e, double *tau_IRe__df_e, double *Te__df_e, double *be__df_e, //Kitzman working variables
double *sw_down__df_e, double *sw_down_b__df_e, double *sw_up__df_e,
double *lw_down__df_e, double *lw_down_b__df_e,
double *lw_up__df_e, double *lw_up_b__df_e,
double *lw_net__df_e, double *sw_net__df_e,
double *dtau__dff_l, double *del__dff_l, // lw_grey_updown_linear working variables
double *edel__dff_l, double *e0i__dff_l, double *e1i__dff_l,
double *Am__dff_l, double *Bm__dff_l,
double *lw_up_g__dff_l, double *lw_down_g__dff_l) {
// dependcies
//// powll -> include math
//// log10f -> include math
//// nlay -> namespace main_parameters
//// nlay1 -> namespace main_parameters
//// linear_log_interp -> namespace Kitsmann
//// tau_struct -> namespace Kitsmann
//// sw_grey_down -> namespace Kitsmann
//// lw_grey_updown_linear -> namespace Kitsmann
//// (lw_grey_updown_poly) -> namespace Kitsmann
const double pi = atan((double)(1)) * 4;
const double twopi = 2.0 * pi;
const double StBC = 5.670374419e-8;
// work variables
double Finc_B;
// start operation
// Find temperature at layer edges through linear interpolation and extrapolation
for (int i = 1; i < nlay; i++)
{
linear_log_interp(pe[id*nlay + i], pl[id * nlay + i - 1], pl[id * nlay + i], Tl[id * nlay + i - 1], Tl[id * nlay + i], Te__df_e[id * nlay + i]);
}
Te__df_e[id * nlay + 0] = Tl[id * nlay + 0] + (pe[id * nlay + 0] - pe[id * nlay + 1]) /
(pl[id * nlay + 0] - pe[id * nlay + 1]) * (Tl[id * nlay + 0] - Te__df_e[id * nlay + 1]);
Te__df_e[id * nlay1 + nlay1 - 1] = Tl[id * nlay + nlay - 1] + (pe[id * nlay1 + nlay1 - 1] - pe[id * nlay + nlay - 1]) /
(pl[id * nlay + nlay - 1] - pe[id * nlay + nlay - 1]) *
(Tl[id * nlay + nlay - 1] - Te__df_e[id * nlay + nlay - 1]);
// Shortwave fluxes
for (int i = 0; i < nlay1; i++)
{
sw_down__df_e[id * nlay1 + i] = 0.0;
sw_up__df_e[id * nlay1 + i] = 0.0;
}
for (int channel = 0; channel < 3; channel++)
{
// Find the opacity structure
tau_struct(id, nlay, grav, pe, k_V_l, channel, tau_Ve__df_e);
// Incident flux in band
Finc_B = Finc * Beta_V[id * 3 + channel];
// Calculate sw flux
sw_grey_down(id, nlay, Finc_B, tau_Ve__df_e, sw_down_b__df_e, mu_s);
// Sum all bands
for (int i = 0; i < nlay1; i++)
{
sw_down__df_e[id * nlay1 + i] = sw_down__df_e[id * nlay1 + i] + sw_down_b__df_e[id * nlay1 + i];
}
}
// Long wave two-stream fluxes
for (int i = 0; i < nlay1; i++)
{
lw_down__df_e[id * nlay1 + i] = 0.0;
lw_up__df_e[id * nlay1 + i] = 0.0;
}
for (int channel = 0; channel < 2; channel++)
{
// Find the opacity structure
tau_struct(id,nlay, grav, pe, k_IR_l, channel, tau_IRe__df_e);
// Blackbody fluxes (note divide by pi for correct units)
for (int i = 0; i < nlay1; i++)
{
be__df_e[id * nlay1 + i] = StBC * pow((double)(Te__df_e[id * nlay1 + i]), ((double)4.0)) / pi * Beta[id * 2 + channel];
}
// Calculate lw flux
lw_grey_updown_linear(id,nlay, nlay1, be__df_e, tau_IRe__df_e, lw_up_b__df_e, lw_down_b__df_e,
dtau__dff_l, del__dff_l, edel__dff_l, e0i__dff_l, e1i__dff_l,
Am__dff_l, Bm__dff_l, lw_up_g__dff_l, lw_down_g__dff_l);
//lw_grey_updown_poly(nlay, nlay1, be__df_e, tau_IRe__df_e, lw_up_b__df_e, lw_down_b__df_e,
//dtau__dff_l, del__dff_l, edel__dff_l, e0i__dff_l, e1i__dff_l,
// e2i__dff_l, Am__dff_l, Bm__dff_l, Gm__dff_l, lw_up_g__dff_l, lw_down_g__dff_l);
// Sum all bands
for (int i = 0; i < nlay1; i++)
{
lw_up__df_e[id * nlay1 + i] = lw_up__df_e[id * nlay1 + i] + lw_up_b__df_e[id * nlay1 + i];
lw_down__df_e[id * nlay1 + i] = lw_down__df_e[id * nlay1 + i] + lw_down_b__df_e[id * nlay1 + i];
}
}
// Net fluxes
for (int i = 0; i < nlay1; i++)
{
lw_net__df_e[id * nlay1 + i] = lw_up__df_e[id * nlay1 + i] - lw_down__df_e[id * nlay1 + i];
sw_net__df_e[id * nlay1 + i] = sw_up__df_e[id * nlay1 + i] - sw_down__df_e[id * nlay1 + i];
net_F[id * nlay1 + i] = lw_net__df_e[id * nlay1 + i] + sw_net__df_e[id * nlay1 + i];
}
net_F[id * nlay1 + nlay1 - 1] = Fint;
}
#endif // picket_fence_cuda
|
6,150 | #include <math.h>
void getRowsNnzPerProc(int *rowsPP, int *nnzPP, const int *global_n, const int *global_nnz, const int *row_Ptr)
{
int worldSize=1;
double nnzIncre = (double) *global_nnz/ (double) worldSize;
double lookingFor=nnzIncre;
int startRow=0, endRow;
int partition=0;
for (int row=0; row<*global_n; ++row) {
if ( (double) row_Ptr[row+1] >= lookingFor ) {
// search for smallest difference
if (fabs ( lookingFor - row_Ptr[row+1]) <= fabs ( lookingFor - row_Ptr[row]) ) {
endRow = row;
} else {
endRow = row-1;
} // end if //
rowsPP[partition] = endRow-startRow+1;
nnzPP[partition] = row_Ptr[endRow+1] - row_Ptr[startRow];
startRow = endRow+1;
++partition;
if (partition < worldSize-1) {
lookingFor += nnzIncre;
} else {
lookingFor=*global_nnz;
} // end if //
} // end if //
} // end for //
} // end of getRowsPerProc //
|
6,151 | #include <stdio.h>
#include <stdlib.h>
bool verify(int data[], int length)
{
for (int i = 1 ; i < length; ++i)
{
if (data[i] - data [i - 1] != i )
{ printf("error %d\n", i); return false; }
}
return true;
}
#define DUMP(x) printf("%s %d\n", #x, props.x)
void dumpCUDAProps(cudaDeviceProp & props)
{
DUMP(canMapHostMemory);
DUMP(clockRate);
DUMP(computeMode);
DUMP(deviceOverlap);
DUMP(integrated);
DUMP(kernelExecTimeoutEnabled);
DUMP(major);
DUMP(maxGridSize[0]);
DUMP(maxGridSize[1]);
DUMP(maxGridSize[2]);
DUMP(maxThreadsDim[0]);
DUMP(maxThreadsDim[1]);
DUMP(maxThreadsDim[2]);
DUMP(maxThreadsPerBlock);
DUMP(memPitch);
DUMP(minor);
DUMP(multiProcessorCount);
printf("name %s\n", props.name);
DUMP(regsPerBlock);
DUMP(sharedMemPerBlock);
DUMP(textureAlignment);
DUMP(totalConstMem);
DUMP(totalGlobalMem);
DUMP(warpSize);
}
#define BLOCK_SIZE 64
__global__ void prefixsumblock(int *in, int *out, int length)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x < length)
out[x] = in[x];
__syncthreads();
for ( int i = 1; i < BLOCK_SIZE; i <<= 1)
{
if (threadIdx.x + i < BLOCK_SIZE && x + i < length)
{
out[x + i] = in[x] + in[x + i];
}
__syncthreads();
if (x < length)
in[x] = out[x];
__syncthreads();
}
}
__global__ void correctsumends(int *ends, int *in, int *out)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
int end = ends[blockIdx.x];
out[x] = in[x] + end;
}
__global__ void gathersumends(int *in, int *out)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x > 0)
out[x] = in[x * BLOCK_SIZE - 1];
else
out[x] = 0;
}
__global__ void zarro(int *data, int length)
{
int x = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (x < length)
data[x] = 0;
}
void prefixsum(int* in, int *out, int length)
{
int blocks = (length + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(blocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
zarro<<<dimGrid, dimBlock>>>(out, length);
prefixsumblock<<<dimGrid, dimBlock>>>(in, out, length);
if (blocks > 1) {
int *devEnds;
int *devTmpEnds;
cudaMalloc((void**) &devEnds, blocks * sizeof(int));
cudaMalloc((void**) &devTmpEnds, blocks * sizeof(int));
int subblocks = (blocks + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 subgrid(subblocks, 1, 1);
dim3 subblock(BLOCK_SIZE, 1, 1);
gathersumends<<<subgrid, subblock>>>(out, devEnds);
prefixsum(devEnds, devTmpEnds, blocks);
cudaFree(devEnds);
correctsumends<<<dimGrid, dimBlock>>>(devTmpEnds, in, out);
cudaFree(devTmpEnds);
}
}
void cudasummer(int data[], int length)
{
int *devIn, *devOut;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**) &devIn, length * sizeof(int));
cudaMalloc((void**) &devOut, length * sizeof(int));
cudaMemcpy(devIn, data, length * sizeof(int), cudaMemcpyHostToDevice);
prefixsum(devIn, devOut, length);
cudaMemcpy(data, devOut, length * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(devIn);
cudaFree(devOut);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float t;
cudaEventElapsedTime(&t, start, stop);
printf("Elapsed time %3fms\n", t);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void devicesDump()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
dumpCUDAProps(deviceProp);
}
}
int main(int argc, char *argv[])
{
int length;
if (argc < 2) {
length = 500;
}
else length = atoi(argv[1]);
int *data = (int*) malloc(length * sizeof(int));
for (int i = 0; i < length; ++i) {
data[i] = i; //rand();
}
devicesDump();
cudasummer(data, length);
if (length < 1000)
for (int i = 0 ; i < length; ++i)
{
printf("%d\n", data[i]);
}
verify(data, length);
}
|
6,152 | #include <iostream>
#include <cuda.h>
using namespace std;
__global__ void reduce_kernel(const int* g_idata, int* g_odata, unsigned int n) {
extern __shared__ int shared_arr[];
int *sdata = shared_arr;
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int tidx = threadIdx.x;
if(idx < n) {
sdata[tidx] = g_idata[idx];
}
else
sdata[tidx] = 0;
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tidx < s) {
sdata[tidx] += sdata[tidx + s];
}
__syncthreads();
}
if (tidx == 0) g_odata[blockIdx.x] = sdata[0];
}
__host__ int reduce(const int* arr, unsigned int N, unsigned int threads_per_block) {
int *g_idata, *g_odata;
unsigned int m = threads_per_block;
size_t shared_array_size = m*sizeof(int);
int blockdim = (N + m-1)/m;
int *sum = new int[1];
cudaMalloc((void **)&g_idata, N*sizeof(int));
cudaMalloc((void **)&g_odata, blockdim*sizeof(int));
cudaMemcpy(g_idata, arr, N*sizeof(int), cudaMemcpyHostToDevice);
unsigned int nexti = 0;
for(unsigned int i = N; i > 1; i=(i+ m-1)/m) {
nexti = (i + m-1)/m;
cudaMemset(g_odata, 0, blockdim*sizeof(int));
reduce_kernel<<< blockdim, m, shared_array_size>>>(g_idata, g_odata, N);
cudaDeviceSynchronize();
cudaMemset(g_idata, 0, N*sizeof(int));
if(nexti != 0) {
cudaMemcpy(g_idata, g_odata, nexti*sizeof(int), cudaMemcpyDeviceToDevice);
}
}
cudaMemcpy(sum, g_odata, 1*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(g_idata);
cudaFree(g_odata);
return *sum;
}
|
6,153 |
namespace fastertransformer {
const unsigned int WARP_REDUCE_MASK = 0xffffffff;
const float CUDA_FLOAT_INF_NEG = -100000000.f;
const unsigned int WARP_SIZE = 32;
template <typename T>
__forceinline__ __device__ T warpReduceMax(T val) {
for (int mask = (WARP_SIZE >> 1); mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, WARP_SIZE));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__forceinline__ __device__ T blockReduceMax(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax<T>(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
val = (threadIdx.x < ((blockDim.x + 31) >> 5)) ? shared[lane]
: CUDA_FLOAT_INF_NEG;
val = warpReduceMax<T>(val);
return val;
}
/* Calculate the rough topk-th value in a block, rough but safe */
template <typename T, int K>
__forceinline__ __device__ T blockRoughTopK(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
// we do not care about result of threadIdx.x bigger than (blockDim.x >> 5)
val = (threadIdx.x < (blockDim.x >> 5)) ? shared[lane] : CUDA_FLOAT_INF_NEG;
// K should be 2, 4, 6, 8, 16 or 32
for (int mask = 16; mask >= K; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
for (int mask = (K >> 1); mask > 0; mask >>= 1)
val = min(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
return val;
}
} |
6,154 | #include <unistd.h>
#include <sys/stat.h>
#include <string.h>
int main(int argc, char **argv)
{
unsigned short newmode;
int i, er=0;
newmode = 0666 & ~umask(0);
for (i = 1; i < argc; i++)
{
// The first line below mith mkfifo is used in the GNU version but there is no mkfifo call in elks libc yet
/*if (mkfifo (argv[i],newmode))*/
if (mknod(argv[i], newmode | S_IFIFO, 0))
{
write(STDERR_FILENO,"mkfifo: cannot make fifo ",25);
write(STDERR_FILENO,argv[i], strlen(argv[i]));
write(STDERR_FILENO,"\n",1);
er &= 1;
}
}
exit(er);
}
|
6,155 | #include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
#include <stdio.h>
#include <cuda_runtime.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
printf("Error: %s : %d,", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int idx = 0; idx < N; idx++) {
C[idx] = A[idx] + B[idx];
}
}
bool checkResult(float *A, float *B, int size) {
double epsilon = 1.0E-8;
for (int idx = 0; idx < size; idx++) {
if (abs(A[idx] - B[idx]) > epsilon) {
return false;
}
//printf("%d : %f %f\n", idx, A[idx], B[idx]);
}
return true;
}
__global__ void sumArraysOnDevice(float *A, float *B, float*C) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
C[idx] = A[idx] + B[idx];
}
void initialData(float *ip, int size) {
time_t t;
srand((unsigned int) time(&t));
for (int i=0; i<size; i++) {
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
int main(int argc, char **argv) {
int nElem = 1 << 24;
printf("Vector size %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *h_C;
float *h_C1;
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
h_C = (float*)malloc(nBytes);
h_C1 = (float*)malloc(nBytes);
double iStart, iElaps;
iStart = cpuSecond();
initialData(h_A, nElem);
initialData(h_B, nElem);
iElaps = cpuSecond() - iStart;
printf("sumArrays Initial data, Time elapsed %f sec\n", iElaps);
iStart = cpuSecond();
sumArraysOnHost(h_A, h_B, h_C, nElem);
iElaps = cpuSecond() - iStart;
printf("sumArraysOnCpu, Time elapsed %f sec\n", iElaps);
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
int iLen = 1024;
dim3 block(iLen);
dim3 grid((nElem + block.x -1) / block.x);
iStart = cpuSecond();
sumArraysOnDevice<<<grid, block>>>(d_A, d_B, d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("sumArraysOnGpu <<<%d, %d>>> Time elapsed %f sec\n", grid.x, block.x, iElaps);
cudaMemcpy(h_C1, d_C, nBytes, cudaMemcpyDeviceToHost);
if (!checkResult(h_C, h_C1, nElem)) {
printf("Result is not identity!\n");
} else {
printf("Result is identity!\n");
}
free(h_A);
free(h_B);
free(h_C);
free(h_C1);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
} |
6,156 | __global__ void conv8(int *inp, int *out)
{
int i;
int sum = 0;
do {
sum += inp[i];
i++;
} while(i < inp[threadIdx.x]);
__syncthreads();
out[0] = sum;
}
|
6,157 | __global__ void ftcsKernel(float *Cxn, float *Cyn, float *Cxo, float *Cyo, float *diffu,float *diffd,float *diffl, float *diffr, float *T2val, float Adx, int dimX)
{
int x = threadIdx.x + blockDim.x*blockIdx.x;// place in x dim
int y = blockIdx.y; // place in y dim
int ind = x+y*dimX; // current index in linear space
int yp1 = y+1;
int ym1 = y-1;
int xp1 = x+1;
int xm1 = x-1;
// periodic bc
if (y==0) ym1 = gridDim.y-1;
if (y==gridDim.y-1) yp1 = 0;
if (x==0) xm1 = dimX-1;
if (x==dimX-1) xp1 = 0;
if (x >= 0 && x <= (dimX-1) && y >= 0 && y <= (gridDim.y-1) )
{
Cxn[ind] = Cxo[ind] - T2val[ind]*Cxo[ind]
+ diffu[ind]*(cos(Adx)*Cxo[yp1*dimX+x] + sin(Adx)*Cyo[yp1*dimX+x] - Cxo[ind])
+ diffd[ind]*(cos(Adx)*Cxo[ym1*dimX+x] - sin(Adx)*Cyo[ym1*dimX+x] - Cxo[ind])
+ diffl[ind]*(Cxo[y*dimX+xp1] - Cxo[ind])
+ diffr[ind]*(Cxo[y*dimX+xm1] - Cxo[ind]);
Cyn[ind] = Cyo[ind] - T2val[ind]*Cyo[ind]
+ diffu[ind]*(cos(Adx)*Cyo[yp1*dimX+x] - sin(Adx)*Cxo[yp1*dimX+x] - Cyo[ind])
+ diffd[ind]*(cos(Adx)*Cyo[ym1*dimX+x] + sin(Adx)*Cxo[ym1*dimX+x] - Cyo[ind])
+ diffl[ind]*(Cyo[y*dimX+xp1] - Cyo[ind])
+ diffr[ind]*(Cyo[y*dimX+xm1] - Cyo[ind]);
}
}
|
6,158 | #include<stdio.h>
#include<cuda.h>
# define N 10000
__global__ void add( int * a, int *b, int *c)
{
unsigned int y= blockDim.x *blockIdx.x + threadIdx.x;
if(y<N)
c[y]=a[y]+b[y];
}
int check(int *a, int *b, int *c)
{
for(int i=0;i<N;i++)
{
if(c[i] !=a[i]+b[i])
return 0;
}
return 1;
}
int main()
{
int *h_a, *h_b, *h_c;
int *d_a, *d_b, *d_c;
// allocating memory on host
h_a = (int *)malloc(N * sizeof(int));
h_b = (int *)malloc(N * sizeof(int));
h_c = (int *)malloc(N * sizeof(int));
//assigning random values to the array elements
for(int i=0;i<N;i++)
{
h_a[i]=1;
h_b[i]=2;
}
//assigning memory on the device
cudaMalloc((void **)&d_a, N*sizeof(int));
cudaMalloc((void **)&d_b, N*sizeof(int));
cudaMalloc((void **)&d_c, N*sizeof(int));
//copying elements from host to device
cudaMemcpy(d_a, h_a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N*sizeof(int), cudaMemcpyHostToDevice);
//calling the function and calculating the sum on device
add<<< N/1024 +1, 1024 >>>(d_a, d_b, d_c);
//copying the result to host memory
cudaMemcpy(h_c, d_c, N*sizeof(int), cudaMemcpyDeviceToHost);
if(check(h_a, h_b, h_c))
printf("Array sum is correct\n");
else
printf("Array sum is incorrect\n");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
}
|
6,159 | #include <stdio.h>
__global__ void modifyArray (int *modArray) {
int i = threadIdx.x;
modArray[i] = modArray[i] + 100;
}
__host__ int main (void) {
int lenArray = 10;
int *modArray, *gpu_modArray;
size_t sizeArray;
sizeArray = lenArray * sizeof(int);
modArray = (int*) malloc( sizeArray );
cudaMalloc( &gpu_modArray, sizeArray );
printf("original values\n");
for ( int i = 0; i < lenArray; i++ ) {
modArray[i] = i + 1;
printf("%d ", modArray[i]);
}
cudaMemcpy( gpu_modArray, modArray,
sizeArray, cudaMemcpyHostToDevice );
modifyArray <<< 1, lenArray >>> (gpu_modArray);
cudaMemcpy( modArray, gpu_modArray,
sizeArray, cudaMemcpyDeviceToHost );
printf("\nfinal values\n");
for ( int i = 0; i < lenArray; i++ )
printf("%d ", modArray[i]);
printf("\n");
free( modArray );
cudaFree( gpu_modArray );
return 0;
}
|
6,160 | #include "includes.h"
__global__ void addKernel(float *c, float *a, float *b, int size)
{
int i = blockIdx.x * blockDim.x *blockDim.y + blockDim.x * threadIdx.y * threadIdx.x;
while(i < size)
{
c[i] = a[i] + b[i];
i += gridDim.x * blockDim.x * blockDim.y;
}
} |
6,161 | #include <iostream>
#include <array>
#include <fstream>
#include <vector>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/device_new.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
using host_buffer=thrust::host_vector<float>;
using device_buffer=thrust::device_vector<float>;
using host_mask=thrust::host_vector<int>;
using device_mask=thrust::device_vector<int>;
constexpr int args_padded_size=0;
constexpr int local_size=32;
namespace kernel
{
__device__
int to_1d(const uint2 &index, const int width)
{
return index.y*width+index.x;
}
__device__
int to_1d(const int x, int y, const int width)
{
return y*width+x;
}
__device__
void block_solver(float *buffer, float *delta, int *mask, int *args, const int offset)
{
const int padded_size=args[args_padded_size];
const int global_1d=to_1d(make_uint2(local_size*blockIdx.x+1+2*threadIdx.x+offset, local_size*blockIdx.y+1+threadIdx.y), padded_size);
const float prev=buffer[global_1d];
buffer[global_1d]=(1-mask[global_1d])*prev+mask[global_1d]*(
buffer[global_1d-padded_size]+buffer[global_1d-1]+
buffer[global_1d+padded_size]+buffer[global_1d+1]
)/4;
delta[global_1d]=fabsf(buffer[global_1d]-prev)/100;
}
__global__
void even_solver(float *buffer, float *delta, int *mask, int *args)
{
block_solver(buffer, delta, mask, args, threadIdx.y%2);
}
__global__
void odd_solver(float *buffer, float *delta, int *mask, int *args)
{
block_solver(buffer, delta, mask, args, 1-threadIdx.y%2);
}
}
template <class T>
auto append_padding(const thrust::host_vector<T> &buffer, const T &val, const int original_size)
{
const int padded_size=original_size+2;
thrust::host_vector<T> padded_buffer(padded_size*padded_size, val);
for(int y=1; y<=original_size; ++y)
{
for(int x=1; x<=original_size; ++x)
{
padded_buffer[y*padded_size+x]=buffer[(y-1)*original_size+x-1];
}
}
return padded_buffer;
}
template <class T>
auto remove_padding(const thrust::host_vector<T> &padded_buffer, const int original_size)
{
const int padded_size=original_size+2;
std::vector<T> buffer(original_size*original_size);
for(int y=1; y<=original_size; ++y)
{
for(int x=1; x<=original_size; ++x)
{
buffer[(y-1)*original_size+x-1]=padded_buffer[y*padded_size+x];
}
}
return buffer;
}
__host__
auto new_solver(const host_buffer &h_buffer, const host_mask &h_mask, const int global_size)
{
const int padded_size=global_size+2;
host_mask h_args;
h_args.push_back(padded_size);
device_mask d_args=h_args;
device_buffer d_buffer=append_padding<float>(h_buffer, 0, global_size);
device_mask d_mask=append_padding<int>(h_mask, 0, global_size);
device_buffer d_delta(padded_size*padded_size, 0.f);
float max_delta=0;
const float min_delta=std::pow(10, -6);
const dim3 grid(global_size/local_size, global_size/local_size);
const dim3 block(local_size/2, local_size);
int ctr=0;
try
{
do
{
kernel::even_solver<<<grid, block>>>(
thrust::raw_pointer_cast(d_buffer.data()),
thrust::raw_pointer_cast(d_delta.data()),
thrust::raw_pointer_cast(d_mask.data()),
thrust::raw_pointer_cast(d_args.data())
);
kernel::odd_solver<<<grid, block>>>(
thrust::raw_pointer_cast(d_buffer.data()),
thrust::raw_pointer_cast(d_delta.data()),
thrust::raw_pointer_cast(d_mask.data()),
thrust::raw_pointer_cast(d_args.data())
);
max_delta=*thrust::max_element(thrust::device, d_delta.begin(), d_delta.end());
if(ctr%100==0)
{
std::cout<<"loop:"<<ctr<<" max_delta="<<max_delta<<std::endl;
}
++ctr;
}
while(min_delta<max_delta);
}
catch(thrust::system_error &e)
{
std::cerr<<"Exception:\n"<<e.what()<<std::endl;
}
host_buffer new_h_buffer=d_buffer;
return new_h_buffer;
}
class cpu_potential_solver
{
public:
cpu_potential_solver(const int h)
: h_(h), buffer_(h, std::vector<float>(h, 0)), h_buffer_(h *h, 0), circles_({circle(0.25*h, 0.75*h, 100, 0.125*h), circle(0.875*h, 0.125*h, 20, 0.05*h)}), h_mask_(h *h, 1)
{
for(int y=0; y<h; ++y)
{
for(int x=0; x<h; ++x)
{
if(x==0 || x==h-1 || y==0 || y==h-1)
{
this->h_mask_[y*this->h_+x]=0;
continue;
}
for(const auto &c:this->circles_)
{
if(c.includes(x, y))
{
this->h_buffer_[y*this->h_+x]=c.v_;
this->buffer_[y][x]=c.v_;
this->h_mask_[y*this->h_+x]=0;
break;
}
}
}
}
}
auto solve()
{
return new_solver(this->h_buffer_, this->h_mask_, this->h_);
}
private:
const int h_;
std::vector<std::vector<float>> buffer_;
host_buffer h_buffer_;
host_mask h_mask_;
class circle
{
public:
circle(const float x, const float y, const int v, const float radius) noexcept
: x_(x), y_(y), v_(v), radius_(radius)
{}
bool includes(const int x, const int y) const noexcept
{
return std::pow(x-this->x_, 2)+std::pow(y-this->y_, 2)<=std::pow(this->radius_, 2);
}
const float x_, y_;
const int v_;
const float radius_;
};
std::array<circle, 2> circles_;
};
int main()
{
constexpr int h=512;
auto solver=cpu_potential_solver(h);
const auto result=solver.solve();
std::ofstream os("./out.csv");
for(int y=h; 1<=y; --y)
{
for(int x=1; x<=h; ++x)
{
os<<result[y*(h+2)+x];
if(x<h)
{
os<<",";
}
}
os<<std::endl;
}
return 0;
} |
6,162 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 512
#define MAX_ERR 1e-6
//__global__ void vector_add(float *out, float *a, float *b, int n) {
// int stride = 1;
// int tid = blockIdx.x * blockDim.x + threadIdx.x;
// 0 * 256 + 1 = 1 | BLOCK0 |
// 0 * 256 + 2 = 2
// 1 * 256 + 1 = 257 | BLOCK1 |
// 1 * 256 + 2 = 258
// out[tid] = a[tid] + b[tid];
//}
void print_results(float *C){
printf("ADDING\n");
printf("[");
for(int i = 0 ; i < 3; i++){
printf("%f,",C[i]);
}
printf("]\n");
}
void print_results_sub(float *C){
printf("SUBSTRACTING\n");
printf("[");
for(int i = 0 ; i < 3; i++){
printf("%f,",C[i]);
}
printf("]\n");
}
__global__ void vector_add(float *CUDA_A, float *CUDA_B, float *CUDA_C, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
CUDA_C[tid] = CUDA_A[tid] + CUDA_B[tid];
}
__global__ void vector_sub(float *CUDA_A, float *CUDA_B, float *CUDA_C, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
CUDA_C[tid] = CUDA_A[tid] - CUDA_B[tid];
}
__global__ void vector_dot_product(float *CUDA_A, float *CUDA_B, float *CUDA_C,float *CUDA_K, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
CUDA_C[tid] = CUDA_A[tid] * CUDA_B[tid];
// Only one kernel should apply the dot product
__syncthreads();
if(tid == 0){
*CUDA_K = CUDA_C[tid] + CUDA_C[tid+1] + CUDA_C[tid+2];
}
}
int main(){
float *C, *K;
float *CUDA_A, *CUDA_B, *CUDA_C, *CUDA_K;
// Allocate host memory
float A[3]= {2.0,4.0,6.0};
printf("A = {2.0,4.0,6.0}\n");
float B[3]= {1.0,2.0,3.0};
printf("B = {1.0,2.0,3.0}\n");
C = (float*)malloc(sizeof(float) * N);
K = (float*)malloc(sizeof(float));
// Allocate device memory
cudaMalloc((void**)&CUDA_A, sizeof(float) * N);
cudaMalloc((void**)&CUDA_B, sizeof(float) * N);
cudaMalloc((void**)&CUDA_C, sizeof(float) * N);
cudaMalloc((void**)&CUDA_C, sizeof(float) * N);
cudaMalloc((void**)&CUDA_K, sizeof(float));
// Transfer data from host to device memory
cudaMemcpy(CUDA_A, A, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(CUDA_B, B, sizeof(float) * N, cudaMemcpyHostToDevice);
// Executing kernel
vector_add<<<1,3>>>(CUDA_A, CUDA_B, CUDA_C, N);
cudaMemcpy(C, CUDA_C, sizeof(float) * N, cudaMemcpyDeviceToHost);
//Executing kernel
print_results(C);
vector_sub<<<1,3>>>(CUDA_A, CUDA_B, CUDA_C, N);
cudaMemcpy(C, CUDA_C, sizeof(float) * N, cudaMemcpyDeviceToHost);
print_results_sub(C);
vector_dot_product<<<1,3>>>(CUDA_A, CUDA_B, CUDA_C, CUDA_K, N);
cudaMemcpy(C, CUDA_C, sizeof(float) * N, cudaMemcpyDeviceToHost);
print_results(C);
//cudaMemcpy(K, CUDA_K, sizeof(float), cudaMemcpyDeviceToHost);
//printf("Dot product result %f", *K);
// Deallocate device memory
cudaFree(CUDA_A);
cudaFree(CUDA_B);
cudaFree(CUDA_C);
// Deallocate host memory
//free(A);
//free(B);
free(C);
}
|
6,163 | #include "kernel.cuh"
namespace gpu {
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
// 使用CUDA并行添加矢量的辅助函数。
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// 选择在哪个GPU上运行,在多GPU系统上更改。
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// 为三个向量分配GPU缓冲区(两个输入,一个输出)。
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// 将输入向量从主机内存复制到GPU缓冲区。
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// 在GPU上启动一个内核,每个元素使用一个线程。
addKernel << <1, size >> > (dev_c, dev_a, dev_b);
// 检查启动内核时是否有错误
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize等待内核完成,并返回在启动过程中遇到的任何错误。
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// 从GPU缓冲区复制输出矢量到主机内存。
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
} |
6,164 | extern "C"
__global__ void createKernels(
float* kernels,
int size,
int nrOfOrientations,
int nrOfScales,
float sigma_min,
int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int orientation = threadIdx.x;
int scale = blockIdx.x;
if (index < N)
{
int s2 = size / 2;
int nn = 11;
float gamma = 20;
// see: http://en.wikipedia.org/wiki/Gabor_filter
float alpha = (3.141592654 * orientation) / nrOfOrientations;
float sigma_adjusted = 0.6 * sigma_min * scale + sigma_min;
float s2d_min = 2 * sigma_adjusted * sigma_adjusted;
float s2d_max = 200;
float lambda = 5 * sigma_adjusted;
float totalSum = 0;
for (int j = -s2; j <= s2; j++) {
for (int i = -s2; i <= s2; i++) {
float sum = 0;
for (int ii = 0; ii < nn; ii++) {
float xx = i - 0.5 + (1 + 2 * ii) / (2.0 * nn);
for (int jj = 0; jj < nn; jj++)
{
float yy = j - 0.5 + (1 + 2 * jj) / (2.0 * nn);
float xx_ = yy * sinf(alpha) + xx * cosf(alpha);
float yy_ = yy * cosf(alpha) - xx * sinf(alpha);
//sum += expf(-(xx_ * xx_) / s2d_max - (yy_ * yy_) / s2d_min) * cosf(2 * 3.141592654 * yy_ / (size));
// Gabor filter
sum += expf(-(xx_ * xx_) / s2d_min / gamma - (yy_ * yy_) / s2d_min) * cosf(2 * 3.141592654 * yy_ / lambda);
}
}
kernels[(i + s2) + (j + s2) * size + size * size * orientation + size * size * nrOfOrientations * scale] = sum;
totalSum += sum;
}
}
for (int j = -s2; j <= s2; j++) {
for (int i = -s2; i <= s2; i++) {
//kernels[(i + s2) + (j + s2) * size + size * size * orientation + size * size * nrOfOrientations * scale] /= totalSum;
}
}
}
}
extern "C"
__global__ void applyKernels(
float* kernels,
float* inimg,
float* outimg,
int* positions,
int size,
int nrOfOrientations,
int scale,
int dimsx,
int dimsy,
int N_threads,
int NN)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int tid = index;
while (tid < NN)
{
int x = positions[2 * tid];
int y = positions[2 * tid + 1];
int s2 = size / 2;
int bestOrientation = 0;
float bestResponse = 0;
for (int r = 0; r < nrOfOrientations; r++) {
float sum = 0;
for (int j = -s2; j <= s2; j++) {
for (int i = -s2; i <= s2; i++) {
sum += inimg[x + i + dimsx * (y + j)] * kernels[i + s2 + size * (j + s2) + size * size * r + size * size * nrOfOrientations * scale];
}
}
if (sum > bestResponse) {
bestResponse = sum;
bestOrientation = r;
}
}
if (outimg[x + dimsx * y] < bestResponse)
{
outimg[x + dimsx * y] = bestResponse;
}
tid += N_threads;
}
} |
6,165 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
#define HISTOGRAM_SIZE 64
#define TILE_WITDH 16
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
__global__ void add_to_hist(PPMImage *d_image, float* hist) {
__shared__ float private_hist[HISTOGRAM_SIZE];
float size = d_image->y*d_image->x*1.0;
if(threadIdx.x * TILE_WITDH + threadIdx.y < HISTOGRAM_SIZE) private_hist[threadIdx.x * TILE_WITDH + threadIdx.y] = 0;
__syncthreads();
// Get col
int col = blockDim.x * blockIdx.x + threadIdx.x;
// Get row
int row = blockDim.y * blockIdx.y + threadIdx.y;
// Get index
int index = row * d_image->x + col;
if((row < d_image->y && col < d_image->x) && (index < d_image->x*d_image->y)) {
// Sum
atomicAdd(&(private_hist[16*d_image->data[index].red + 4 * d_image->data[index].green + d_image->data[index].blue]), 1);
}
__syncthreads();
if(threadIdx.x * TILE_WITDH + threadIdx.y < HISTOGRAM_SIZE) {
atomicAdd(&(hist[threadIdx.x * TILE_WITDH + threadIdx.y]), (private_hist[threadIdx.x * TILE_WITDH + threadIdx.y]/size));
}
}
void Histogram(PPMImage *image, float *h) {
//Init variables;
int i;
unsigned int rows, cols, img_size;
PPMImage *d_image;
PPMPixel *d_pixels;
float *d_hist;
// CUDA TIMERS
/*
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float tbuffer, tenviar, tk, treceber, ttotal;
*/
// Get data
cols = image->x;
rows = image->y;
img_size = cols * rows;
//Preprocess data
for (i = 0; i < img_size; i++) {
image->data[i].red = floor((image->data[i].red * 4) / 256);
image->data[i].blue = floor((image->data[i].blue * 4) / 256);
image->data[i].green = floor((image->data[i].green * 4) / 256);
}
//cudaEventRecord(start);
// Alloc structure to devise
cudaMalloc((void **)&d_image, sizeof(PPMImage));
// Alloc image to devise
cudaMalloc((void **)&d_pixels, sizeof(PPMPixel) * img_size);
//alloc histogram to devise
cudaMalloc((void **)&d_hist, HISTOGRAM_SIZE*sizeof(float));
/*
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tbuffer, start, stop);
printf("Alloc Time: %f ", tbuffer);
*/
//cudaEventRecord(start);
// cpy stucture to devise
cudaMemcpy(d_image, image, sizeof(PPMImage), cudaMemcpyHostToDevice);
cudaMemcpy(d_pixels, image->data, sizeof(PPMPixel) * img_size, cudaMemcpyHostToDevice);
cudaMemcpy(&(d_image->data), &d_pixels, sizeof(PPMPixel *), cudaMemcpyHostToDevice);
// cpy histogram to devise
cudaMemcpy(d_hist, h, HISTOGRAM_SIZE*sizeof(float), cudaMemcpyHostToDevice);
/*
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tenviar, start, stop);
printf("copy Time: %f ", tenviar);
*/
//Init dimGrid and dimBlocks
//cudaEventRecord(start);
dim3 dimGrid(ceil((float)cols / TILE_WITDH), ceil((float)rows / TILE_WITDH), 1);
dim3 dimBlock(TILE_WITDH, TILE_WITDH, 1);
// Call function
add_to_hist<<<dimGrid, dimBlock>>>(d_image, d_hist);
/*
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tk, start, stop);
printf("CUDA KERNEL: %f\n", tk);
*/
//cudaEventRecord(start);
// Copy result to local array
cudaMemcpy(h, d_hist, HISTOGRAM_SIZE*sizeof(float), cudaMemcpyDeviceToHost);
/*
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&treceber, start, stop);
printf("receber Time: %f ", treceber);
ttotal = tbuffer + tenviar + tk + treceber;
printf("Total: %f\n", ttotal);
*/
//Free memory
cudaFree(d_image);
cudaFree(d_pixels);
cudaFree(d_hist);
/*
cudaEventDestroy(start);
cudaEventDestroy(stop);
*/
}
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
double t_start, t_end;
int i;
char *filename = argv[1];
PPMImage *image = readPPM(filename);
float *h = (float*)malloc(sizeof(float) * 64);
//Inicializar h
for(i=0; i < 64; i++) h[i] = 0.0;
t_start = rtclock();
Histogram(image, h);
t_end = rtclock();
for (i = 0; i < 64; i++){
printf("%0.3f ", h[i]);
}
printf("\n");
fprintf(stdout, "%0.6lf\n", t_end - t_start);
free(h);
return (0);
}
/*
ts = tempo_serial
tbuffer = tempo_GPU_criar_buffer
tenviar = tempo_GPU_offload_enviar
tk = tempo_kernel
treceber = tempo_GPU_offload_receber
ttotal = GPU_total
speedup = speedup (tempo_serial / GPU_total).
arqX | ts | tbuffer | tenviar | tk | treceber | GPU_total | speedup |
-------------------------------------------------------------------------------------------------
arq1 | 0.320540 s | 1.62240 ms | 0.80217 ms | 0.67052 ms | 0.02444 ms | 3.11955 ms | 102.751 |
-------------------------------------------------------------------------------------------------
arq2 | 0.585691 s | 1.29168 ms | 1.07040 ms | 1.83283 ms | 0.02112 ms | 4.21603 ms | 138.920 |
-------------------------------------------------------------------------------------------------
arq3 | 1.676812 s | 1.30262 ms | 3.99257 ms | 7.15699 ms | 0.02096 ms | 12.47315 ms | 134.433 |
-------------------------------------------------------------------------------------------------
*/
|
6,166 | #include "includes.h"
__global__ void scale_centroids(int d, int k, int* counts, double* centroids) {
int global_id_x = threadIdx.x + blockIdx.x * blockDim.x;
int global_id_y = threadIdx.y + blockIdx.y * blockDim.y;
if ((global_id_x < d) && (global_id_y < k)) {
int count = counts[global_id_y];
//To avoid introducing divide by zero errors
//If a centroid has no weight, we'll do no normalization
//This will keep its coordinates defined.
if (count < 1) {
count = 1;
}
double scale = 1.0/double(count);
centroids[global_id_x + d * global_id_y] *= scale;
}
} |
6,167 | /*
Print Hello World
also print the block id and thread id within the block
*/
#include <stdio.h>
const int Nthread = 3;
const int Nblock = 2;
__global__ void hello(void){
printf("Hello world! block ID %d, thread ID %d\n",blockIdx.x,threadIdx.x);
}
int main() {
hello<<<Nblock,Nthread>>>();
} |
6,168 | #include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/fill.h>
#include <thrust/transform.h>
#include <iostream>
template <typename T>
class saxpy : public thrust::binary_function<T, T, T>
{
private:
T factor;
public :
__host__ __device__
saxpy(const T& factor) : factor(factor){}
__host__ __device__
T operator()(const T& x, const T& y) const{
return factor*x+y;
}
};
int main()
{
const int N = 100000;
thrust::device_vector<int> x_dev(N);
thrust::device_vector<int> y_dev(N);
for(int i = 0; i < N; ++i)
x_dev[i] = i;
saxpy<int> func(1);
thrust::fill(y_dev.begin(), y_dev.end(), 2);
thrust::transform(x_dev.begin(), x_dev.end(), y_dev.begin(), y_dev.begin(), func);
thrust::copy(y_dev.begin(), y_dev.end(), std::ostream_iterator<int>(std::cout, " "));
return 0;
}
|
6,169 | #include <cuda_runtime.h>
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <iostream>
#define THREAD_SIZE 256
using namespace std;
void matgen(float* a, int lda, int n) {
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
a[i * lda + j] = (float)rand() / RAND_MAX +
(float)rand() / (RAND_MAX * RAND_MAX);
}
//printf("%.2f\n", a[i]);
}
}
void matmult(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n) {
int i, j, k;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
double t = 0;
for (int k = 0; k < n; ++k) {
t += a[i * lda + k] * b[k * ldb + j];
}
c[i * ldc + j] = t;
}
}
}
void compare_mat(const float* a, int lda, const float* b, int ldb, int n) {
float max_err = 0;
float average_err = 0;
for (int i = 0; i < n; ++i){
for (int j = 0; j < n; ++j) {
if (b[i * ldb + j] != 0) {
float err = fabs((a[i * lda + j] - b[i * ldb + j]) / b[i * ldb + j]);
//printf("%.2f\n", max_err);
if (max_err < err) max_err = err;
average_err += err;
}
}
}
printf("max_err : %f, average_err: %.2f\n", max_err, average_err);
}
__global__ static void matMultCUDA(const float* a, size_t lda,
const float* b, size_t ldb, float* c, size_t ldc, int n) {
extern __shared__ float data[];
const int tid = threadIdx.x;
const int row = blockIdx.x;
int i, j;
for (i = tid; i < n; i += blockDim.x) {
data[i] = a[row * lda + i];
}
__syncthreads();
for (j = tid; j < n; j += blockDim.x) {
float t = 0;
float y = 0;
for (i = 0; i < n; ++i) {
float r;
y -= data[i] * b[i * ldb + j];
r = t - y;
y = (r - t) + y;
t = r;
}
c[row * ldc + j] = t;
}
}
clock_t matmultCUDA(const float* a, int lda,
const float* b, int ldb, float*c, int ldc, int n) {
float *ac, *bc, *cc;
clock_t start, end;
start = clock();
size_t pitch_a, pitch_b, pitch_c;
cudaMallocPitch((void**)&ac, &pitch_a, sizeof(float)* n, n);
cudaMallocPitch((void**)&bc, &pitch_b, sizeof(float)* n, n);
cudaMallocPitch((void**)&cc, &pitch_c, sizeof(float)* n, n);
cudaMemcpy2D(ac, pitch_a, a, sizeof(float)* lda, sizeof(float)* n, n, cudaMemcpyHostToDevice);
cudaMemcpy2D(bc, pitch_b, b, sizeof(float)* ldb, sizeof(float)* n, n, cudaMemcpyHostToDevice);
int blocks = (n + THREAD_SIZE - 1) / THREAD_SIZE;
matMultCUDA<<<n, THREAD_SIZE, sizeof(float) * n >>>
(ac, pitch_a / sizeof(float), bc, pitch_b / sizeof(float), cc, pitch_c / sizeof(float), n);
cudaMemcpy2D(c, sizeof(float)* n, cc, pitch_c, sizeof(float) * n, n, cudaMemcpyDeviceToHost);
cudaFree(ac);
cudaFree(bc);
cudaFree(cc);
end = clock();
return end - start;
}
int main(int argc, char** argv) {
float *a, *b, *c, *d;
const int n = 1000;
a = (float*)malloc(sizeof(float)* n * n);
b = (float*)malloc(sizeof(float)* n * n);
c = (float*)malloc(sizeof(float)* n * n);
d = (float*)malloc(sizeof(float)* n * n);
srand(10);
matgen(a, n, n);
matgen(b, n, n);
clock_t time = matmultCUDA(a, n, b, n, c, n, n);
matmult(a, n, b, n, d, n, n);
compare_mat(c, n, d, n, n);
double sec = (double)time / CLOCKS_PER_SEC;
printf("Time used: %.2f (%.2lf GFLOATS)\n", sec, 2.0 * n * n * n / (sec * 1e9));
system("pause");
return 0;
} |
6,170 | #include "includes.h"
__global__ void initialSpikeIndCopyKernel( unsigned short* pLastSpikeInd, const unsigned int noReal)
{
unsigned int globalIndex = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int spikeNo = globalIndex / noReal;
if (globalIndex<noReal*noSpikes)
{
pLastSpikeInd[globalIndex] = pLastSpikeInd[spikeNo*noReal];
}
} |
6,171 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
//Code written by Alan Fleming
//CONSTANTS
#define MATRIXSIZE 8
#define BLOCKSIZE 4
void mul_matrix_cpu(float *M, float *N, float *P, int width){
for( int i = 0; i<width; i++){
for( int j = 0; j<width; j++){
float sum = 0;
for (int k = 0; k < width; k++){
sum += M[i * width + k] * N[k * width + j];
}
P[i * width + j] = sum;
}
}
}
__global__ void mul_matrix_gpu(float *M, float *N, float *P, int width) {
//Assuming matrix is width x width
//Assuming tile size = blockdim.x
__shared__ float ds_M[BLOCKSIZE * BLOCKSIZE];
__shared__ float ds_N[BLOCKSIZE * BLOCKSIZE];
//Calculate row and collumn
int row = blockIdx.y * blockDim.x + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//initialize Pvalue
float Pvalue = 0;
for (int i = 0; i < (width / blockDim.x); ++i) {
//copy global memory into shared memory
ds_M[threadIdx.y * blockDim.x + threadIdx.x] = M[row * width + i * blockDim.x + threadIdx.x];
ds_N[threadIdx.y * blockDim.x + threadIdx.x] = N[col + (i * blockDim.x + threadIdx.y) * width];
//ensure all data is copied
__syncthreads();
//Preform partial multiplications
for(int k = 0; k < blockDim.x; ++k) {
Pvalue += ds_M[threadIdx.y * blockDim.x + k] * ds_N[k * blockDim.x + threadIdx.x];
}
__syncthreads();
}
//Load final product into output memory
P[row * width + col] = Pvalue;
}
bool verify(float *A, float *B, float *C, int width) {
//Tolerance to check
const float tolerance = 1e-6;
for(int i = 0; i < width; ++i){
for(int k = 0; k < width; ++k) {
float sum = 0;
for(int j = 0; j < width; ++j) {
sum += A[i * width + j] * B[j * width + k];
}
//get the absolute value of the error for comparison
float error = fabs(sum - C[i * width + k])/sum;
//Check if error is too large
if(error > tolerance) {
printf("TEST FAILED\n\n");
return false;
}
}
}
printf("TEST PASSED\n\n");
return true;
}
int main(int argc, char *argv[]){
//allocate system memory for array
float *a = (float *)malloc(sizeof(float) * MATRIXSIZE * MATRIXSIZE ); //first matrix
float *b = (float *)malloc(sizeof(float) * MATRIXSIZE * MATRIXSIZE ); //second matrix
float *c = (float *)malloc(sizeof(float) * MATRIXSIZE * MATRIXSIZE ); //resulting matrix
int init =1325;
for (int i=0;i<MATRIXSIZE;i++){
for (int j=0;j<MATRIXSIZE;j++){
init= 3125 * init % 6553;
a[i * MATRIXSIZE + j]= ( init -1000 ) % 6553;
b[i * MATRIXSIZE + j]= init % 251;
}
}
//get cpu start time
clock_t t1 = clock();
//run function
mul_matrix_cpu(a, b, c, MATRIXSIZE);
//get cpu stop time
clock_t t2 = clock();
//calculate runtime
float cpuTime = (float(t2 - t1)/CLOCKS_PER_SEC*1000);
//allocate memory on gpu
float *dev_a, *dev_b, *dev_c;
cudaMalloc((void **)(&dev_a),MATRIXSIZE * MATRIXSIZE * sizeof(float));
cudaMalloc((void **)(&dev_b),MATRIXSIZE * MATRIXSIZE * sizeof(float));
cudaMalloc((void **)(&dev_c),MATRIXSIZE * MATRIXSIZE * sizeof(float));
//copy matrices to gpu
cudaMemcpy(dev_a,a, MATRIXSIZE * MATRIXSIZE * sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b, MATRIXSIZE * MATRIXSIZE * sizeof(float),cudaMemcpyHostToDevice);
//calculate dimensions for gpu
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
dim3 dimGrid( ceil(double(MATRIXSIZE)/dimBlock.x), ceil(double(MATRIXSIZE) /dimBlock.y));
//Set up cuda events for recording runtime
cudaEvent_t start,stop;
float gpuTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
// do some work on the GPU
mul_matrix_gpu<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, MATRIXSIZE);
//calculate runtime
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuTime,start,stop);
//destroy cuda events
cudaEventDestroy(start);
cudaEventDestroy(stop);
//copy memory from device
cudaMemcpy(c,dev_c, MATRIXSIZE * MATRIXSIZE * sizeof(int),cudaMemcpyDeviceToHost);
//print results
printf("CPU Runtime: %f\nGpu Runtime: %f\nSpeedup: %f\n", (double)cpuTime, (double)gpuTime, double(cpuTime / gpuTime));
//verify results
verify(a,b,c, MATRIXSIZE);
//free memory
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
//exit program
return 0;
}
|
6,172 | #include "includes.h"
__global__ void matrixMultiKernel(float *C, float *A, float *B, int Width) {
const int BLOCK_SIZE = 16; // NOTE: This must be similar to line 338
// block indexes
int bx = blockIdx.x;
int by = blockIdx.y;
// thread indexes
int tx = threadIdx.x;
int ty = threadIdx.y;
// int col = bx * TILE_WIDTH + tx
// int row = by * TILE_WIDTH + ty
// Dividing the matrices into sub sections
// Dividing the matrix A
int a_begin = Width * BLOCK_SIZE * by;
int a_end = a_begin + Width - 1;
int a_step = BLOCK_SIZE;
// Dividing the matrix B
int b_begin = BLOCK_SIZE * bx;
int b_step = BLOCK_SIZE * Width;
float temp_c = 0;
// loop throught the submatrices
for (int a = a_begin, b = b_begin; a <= a_end;
a += a_step, b += b_step) {
// sub matrices
__shared__ float sub_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float sub_b[BLOCK_SIZE][BLOCK_SIZE];
sub_a[ty][tx] = A[a + Width * ty + tx];
sub_b[ty][tx] = A[b + Width * ty + tx];
__syncthreads();
// loop unroll may not work on cuda if compilation level -O3
// effects cuda code as wll in the assignment
// sub matrix multiplication
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
temp_c += sub_a[ty][k] * sub_b[k][tx];
}
// sync all the global threads running the computations
__syncthreads();
}
int c = Width * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + Width * ty + tx] = temp_c;
// printf("kernel Done \n");
} |
6,173 | #include "includes.h"
__global__ void matrixMultiply(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by *TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
//int Row = blockIdx.y*blockDim.y+threadIdx.y;
//int Col = blockIdx.x*blockDim.x+threadIdx.x;
float Cvalue = 0;
// Loop over the A and B tiles required to compute the C element
for (int t = 0; t < (numBRows-1)/TILE_WIDTH + 1; ++t)
{
if(Row < numARows && t*TILE_WIDTH+tx < numBRows)
{
// Collaborative loading of A
ds_A[ty][tx] = A[Row*numAColumns + t*TILE_WIDTH+tx];
}
else
{ // Control divergence at the edge
ds_A[ty][tx]= 0.0;
}
if ( t*TILE_WIDTH+ty < numBRows && Col < numBColumns)
{
// Collaborative loading of B if within range of matrix
ds_B[ty][tx] = B[(t*TILE_WIDTH+ty)*numBColumns + Col];
}
else
{
ds_B[ty][tx] = 0.0;
}
__syncthreads();
for (int i = 0; i < TILE_WIDTH; ++i)
{
Cvalue += ds_A[ty][i] * ds_B[i][tx];
}
__syncthreads();
}
if ( Row < numARows && Col < numBColumns)
C[Row*numBColumns+Col] = Cvalue;
} |
6,174 | #include "includes.h"
//----------------------------------------------------------------------------------------------------------------------
/// @file CudaSPHKernals.cu
/// @author Declan Russell
/// @date 08/03/2015
/// @version 1.0
//----------------------------------------------------------------------------------------------------------------------
#define pi 3.14159265359f
//----------------------------------------------------------------------------------------------------------------------
/// @brief Kernal designed to produce a has key based on the location of a particle
/// @brief Hash function taken from Teschner, M., Heidelberger, B., Mueller, M., Pomeranets, D. and Gross, M.
/// @brief (2003). Optimized spatial hashing for collision detection of deformable objects
/// @param d_hashArray - pointer to a buffer to output our hash keys
/// @param d_posArray - pointer to the buffer that holds our particle positions
/// @param numParticles - the number of particles in our buffer
/// @param resolution - the resolution of our hash table
/// @param _gridScaler - Scales our points to between 0-1.
//----------------------------------------------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------------------
/// @brief This kernal is designed to count the cell occpancy of a hash table
/// @param d_hashArray - pointer to hash table buffer
/// @param d_cellOccArray - output array of cell occupancy count
/// @param _hashTableSize - the size of our hash table
/// @param _numPoints - the number of particles in our hashed array
//----------------------------------------------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------------------
/// @brief This is our desity weighting kernal used in our navier stokes equations
/// @param _dst - the distance away of the neighbouring
/// @param _smoothingLength - the smoothing length of our simulation. Can be thought of a hash cell size.
/// @param _densKernCosnt - constant part of our kernal. Easier to calculate once on CPU and have loaded into device kernal.
/// @return return the weighting that our neighbouring particle has on our current particle
//----------------------------------------------------------------------------------------------------------------------
__global__ void countCellOccKernal(unsigned int *d_hashArray, unsigned int *d_cellOccArray, int _hashTableSize, unsigned int _numPoints){
//Create our idx
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// Make sure our idx is valid and add the occupancy count to the relevant cell
if ((idx < _numPoints) && (d_hashArray[idx] < _hashTableSize)) {
atomicAdd(&(d_cellOccArray[d_hashArray[idx]]), 1);
}
} |
6,175 | #include <iostream>
#include <stdlib.h>
#include <cuda_runtime.h>
using namespace std;
template<typename T> T* flatten(T** M, int mWidth,int mHeight){
T* result = (T*)malloc((mWidth*mHeight)*sizeof(T));
for(int i = 0; i < mHeight; i++){
memcpy(result + (i*mWidth),M[i],(mWidth*sizeof(T)));
}
return result;
}
__global__ void mvmult(float* M, float* v, int mvWidth, int mHeight, float* t){
int tIndex = (blockIdx.x*blockDim.x) + threadIdx.x;
int result = 0;
#pragma unroll
for(int i = 0; i < mHeight; i++){
int index = (i*mHeight) + tIndex;
result += M[index] * v[tIndex];
}
t[tIndex] = result;
}
int main(int argc, char** argv){
int mvWidth = 3, mHeight = 3;
//Host memory
float **M, *Mf,*v, *t;
//Device Memory
float *M_d, *v_d, *t_d;
v = (float*)malloc(mvWidth*sizeof(float));
t = (float*)malloc(mvWidth*sizeof(float));
M = (float**)malloc(mHeight*sizeof(float*));
cudaMalloc(&M_d,mvWidth*mHeight*sizeof(float));
cudaMalloc(&v_d,mvWidth*sizeof(float));
cudaMalloc(&t_d,mvWidth*sizeof(float));
for(int i = 0; i < mHeight; i++){
M[i] = (float*)malloc(mvWidth*sizeof(float));
for(int j = 0; j < mvWidth; j++){
M[i][j] = 3;
}
}
for(int i = 0; i < mvWidth; i++){
v[i] = 4;
}
Mf = flatten(M,mvWidth,mHeight);
cudaMemcpy(M_d,Mf,mvWidth*mHeight*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(v_d,v,mvWidth*sizeof(float),cudaMemcpyHostToDevice);
mvmult<<<1,mvWidth>>>(M_d,v_d,mvWidth,mHeight,t_d);
cudaMemcpy(t,t_d,mvWidth*sizeof(float),cudaMemcpyDeviceToHost);
free(Mf);
free(v);
cudaFree(M_d);
cudaFree(v_d);
cudaFree(t_d);
cout << "| " << t[0] << ' ' << t[1] << ' ' << t[2] << " |" << endl;
free(t);
for(int i = 0; i < mHeight; i++){
free(M[i]);
}
free(M);
free(t);
return EXIT_SUCCESS;
} |
6,176 | // Liam Wynn, 3/23/2021, CUDA Learning
/*
* Demo taken from Kirk & Hwu's Programming Massively Parallel Processors, Third Edition.
*
* To compile do:
* nvcc vec_add.cu
*
* You may get an error about a lack of Microsoft Visual Studio or whatever. In that case
* do:
*
* nvcc -allow-unsupported-compiler vec_add.cu
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
// Add the elements of h_A, h_B and put the result in h_C
void vecAdd(float *h_A, float *h_B, float *h_C, const int n);
// Prints a given vector of size n.
void vecPrint(float *v, const int n);
int main() {
const int N = 1000000;
float *h_A, *h_B, *h_C;
h_A = (float*)malloc(N * sizeof(float));
h_B = (float*)malloc(N * sizeof(float));
h_C = (float*)malloc(N * sizeof(float));
int i;
for(i = 0; i < N; i++) {
h_A[i] = i;
h_B[i] = 1;
h_C[i] = 0;
}
vecAdd(h_A, h_B, h_C, N);
printf("h_A: "); vecPrint(h_A, 10);
printf("h_B: "); vecPrint(h_B, 10);
printf("h_C: "); vecPrint(h_C, 10);
}
__global__
void vecAddKernel(float *A, float *B, float *C, const int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < n) C[i] = A[i] + B[i];
}
void vecAdd(float *h_A, float *h_B, float *h_C, const int n) {
float *d_A, *d_B, *d_C;
const int size = sizeof(float) * n;
cudaError_t err;
err = cudaMalloc((void**)&d_A, size);
if(err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_B, size);
if(err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_C, size);
if(err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Must copy the data from the host to the device
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke the kernel. We want ceil(n / 256) blocks, each with 256 threads in them.
vecAddKernel<<<(int)ceil(n/256.0), 256>>>(d_A, d_B, d_C, n);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
void vecPrint(float *v, const int n) {
int i;
for(i = 0; i < n; i++)
printf("%f,", v[i]);
printf("\n");
}
|
6,177 | #include "includes.h"
/*
* CudaOperations.cu
*
* Created on: Feb 6, 2019
* Author: alexander
*/
__global__ void cudaKernelPull(float* mat, float* spins, int size, float* temp, float tempStep, float* meanFieldElements, bool* continueIteration, float minDiff, int* unemptyCells, float linearCoef) {
int blockId = blockIdx.x;
int thrId = threadIdx.x;
do {
// Lessen temperature
if (thrId == 0)
temp[blockId] = temp[blockId] - tempStep;
// Stabilize
do {
__syncthreads();
// By default current iteration is the last one
if (thrId == 0)
continueIteration[blockId] = false;
for (int spinId = 0; spinId < size; ++spinId) {
__syncthreads();
// Transitional value assignment
int wIndex = thrId;
while (wIndex < unemptyCells[spinId * (size + 1)]) {
meanFieldElements[wIndex + blockId * size] =
spins[unemptyCells[spinId * (size + 1) + wIndex + 1]
+ blockId * size]
* mat[spinId * size
+ unemptyCells[spinId * (size + 1)
+ wIndex + 1]];
// BEWARE: Matrix is symmetrical!
wIndex = wIndex + blockDim.x;
}
__syncthreads();
// Parallelized mean-field computation
long long offset = 1;
while (offset < unemptyCells[spinId * (size + 1)]) {
wIndex = thrId;
while ((wIndex * 2 + 1) * offset
< unemptyCells[spinId * (size + 1)]) {
meanFieldElements[wIndex * 2 * offset + blockId * size] +=
meanFieldElements[(wIndex * 2 + 1) * offset
+ blockId * size];
wIndex = wIndex + blockDim.x;
}
offset *= 2;
__syncthreads();
}
__syncthreads();
// Mean-field calculation complete - write new spin and delta
if (thrId == 0) {
float meanField = meanFieldElements[blockId * size];
float old = spins[spinId + blockId * size];
if (temp[blockId] > 0) {
spins[spinId + blockId * size] = -1
* tanh(meanField / temp[blockId]) * linearCoef
+ spins[spinId + blockId * size]
* (1 - linearCoef);
} else if (meanField > 0)
spins[spinId + blockId * size] = -1;
else
spins[spinId + blockId * size] = 1;
if (minDiff < fabs(old - spins[spinId + blockId * size]))
continueIteration[blockId] = true; // Too big delta. One more iteration needed
}
__syncthreads();
}
} while (continueIteration[blockId]);
} while (temp[blockId] >= 0);
} |
6,178 | #include "includes.h"
__global__ void kern_MinBuffers(float* b1, float* b2, int size)
{
int idx = CUDASTDOFFSET;
float value1 = b1[idx];
float value2 = b2[idx];
float minVal = (value1 < value2) ? value1 : value2;
if( idx < size )
{
b1[idx] = minVal;
}
} |
6,179 | #include "includes.h"
using namespace std;
__device__ void swap(int *a, int *b) {
int temp = *a;
*a = *b;
*b = temp;
}
__global__ void sort(int *d_arr, int n, bool isEven) {
int i;
if (isEven) {
i = threadIdx.x * 2;
} else {
i = threadIdx.x * 2 + 1;
}
if (i < n -1) {
if (d_arr[i] > d_arr[i + 1]) {
swap(&d_arr[i], &d_arr[i + 1]);
}
}
} |
6,180 | /*
============================================================================
Name : SpikeSorting.cu
Author : John
Version :
Copyright :
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
/**
* CUDA kernel that computes reciprocal values for a given vector
*/
__global__ void reciprocalKernel(float *data, unsigned vectorSize) {
unsigned idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx < vectorSize)
data[idx] = 1.0/data[idx];
}
/**
* Host function that copies the data and launches the work on GPU
*/
float *gpuReciprocal(float *data, unsigned size)
{
float *rc = new float[size];
float *gpuData;
CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuData, sizeof(float)*size));
CUDA_CHECK_RETURN(cudaMemcpy(gpuData, data, sizeof(float)*size, cudaMemcpyHostToDevice));
static const int BLOCK_SIZE = 256;
const int blockCount = (size+BLOCK_SIZE-1)/BLOCK_SIZE;
reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (gpuData, size);
CUDA_CHECK_RETURN(cudaMemcpy(rc, gpuData, sizeof(float)*size, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaFree(gpuData));
return rc;
}
float *cpuReciprocal(float *data, unsigned size)
{
float *rc = new float[size];
for (unsigned cnt = 0; cnt < size; ++cnt) rc[cnt] = 1.0/data[cnt];
return rc;
}
void initialize(float *data, unsigned size)
{
for (unsigned i = 0; i < size; ++i)
data[i] = .5*(i+1);
}
int main(void)
{
static const int WORK_SIZE = 65530;
float *data = new float[WORK_SIZE];
initialize (data, WORK_SIZE);
float *recCpu = cpuReciprocal(data, WORK_SIZE);
float *recGpu = gpuReciprocal(data, WORK_SIZE);
float cpuSum = std::accumulate (recCpu, recCpu+WORK_SIZE, 0.0);
float gpuSum = std::accumulate (recGpu, recGpu+WORK_SIZE, 0.0);
/* Verify the results */
std::cout<<"gpuSum = "<<gpuSum<< " cpuSum = " <<cpuSum<<std::endl;
/* Free memory */
delete[] data;
delete[] recCpu;
delete[] recGpu;
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
6,181 | /*-----------------------------------------------------------*/
/* Block Sorting, Lossless Data Compression Library. */
/* Sort Transform (GPU version) */
/*-----------------------------------------------------------*/
/*--
This file is a part of bsc and/or libbsc, a program and a library for
lossless, block-sorting data compression.
Copyright (c) 2009-2021 Ilya Grebnov <ilya.grebnov@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Please see the file LICENSE for full copyright information and file AUTHORS
for full list of contributors.
See also the bsc and libbsc web site:
http://libbsc.com/ for more information.
--*/
#if defined(LIBBSC_SORT_TRANSFORM_SUPPORT) && defined(LIBBSC_CUDA_SUPPORT)
#if defined(_MSC_VER)
#pragma warning(disable : 4267)
#endif
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include "st.cuh"
#include "../libbsc.h"
#include "../platform/platform.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <cub/cub.cuh>
#ifdef LIBBSC_OPENMP
omp_lock_t st_cuda_lock;
cudaStream_t st_cuda_stream;
int bsc_st_cuda_init(int features)
{
if (features & LIBBSC_FEATURE_CUDA)
{
omp_init_lock(&st_cuda_lock);
st_cuda_stream = NULL;
}
return LIBBSC_NO_ERROR;
}
#else
int bsc_st_cuda_init(int features)
{
return LIBBSC_NO_ERROR;
}
#endif
#ifndef __CUDA_ARCH__
#define CUDA_DEVICE_ARCH 0
#else
#define CUDA_DEVICE_ARCH __CUDA_ARCH__
#endif
#define CUDA_DEVICE_PADDING 1024
#define CUDA_NUM_THREADS_IN_BLOCK 256
cudaError_t bsc_cuda_safe_call(const char * filename, int line, cudaError_t result, cudaError_t status = cudaSuccess)
{
if (result != cudaSuccess)
{
fprintf(stderr, "\n%s(%d): bsc_cuda_safe_call failed %d: '%s'.", filename, line, result, cudaGetErrorString(result));
fflush(stderr);
}
return result != cudaSuccess ? result : status;
}
__global__ __launch_bounds__(CUDA_NUM_THREADS_IN_BLOCK)
void bsc_st567_encode_cuda_presort(unsigned char * RESTRICT T_device, unsigned long long * RESTRICT K_device)
{
__shared__ unsigned int staging[1 + CUDA_NUM_THREADS_IN_BLOCK + 6];
unsigned int * RESTRICT thread_staging = &staging[threadIdx.x];
{
int index = blockIdx.x * CUDA_NUM_THREADS_IN_BLOCK + threadIdx.x;
{
thread_staging[0 ] = T_device[index - 1 ];
if (threadIdx.x < 7) thread_staging[CUDA_NUM_THREADS_IN_BLOCK] = T_device[index - 1 + CUDA_NUM_THREADS_IN_BLOCK];
__syncthreads();
}
{
unsigned int lo = __byte_perm(thread_staging[4], thread_staging[5], 0x0411) | __byte_perm(thread_staging[6], thread_staging[7], 0x1104);
unsigned int hi = __byte_perm(thread_staging[0], thread_staging[1], 0x0411) | __byte_perm(thread_staging[2], thread_staging[3], 0x1104);
K_device[index] = (((unsigned long long)hi) << 32) | ((unsigned long long)lo);
}
}
}
__global__ __launch_bounds__(CUDA_NUM_THREADS_IN_BLOCK)
void bsc_st8_encode_cuda_presort(unsigned char * RESTRICT T_device, unsigned long long * RESTRICT K_device, unsigned char * RESTRICT V_device)
{
__shared__ unsigned int staging[1 + CUDA_NUM_THREADS_IN_BLOCK + 7];
unsigned int * RESTRICT thread_staging = &staging[threadIdx.x];
{
int index = blockIdx.x * CUDA_NUM_THREADS_IN_BLOCK + threadIdx.x;
{
thread_staging[0 ] = T_device[index - 1 ];
if (threadIdx.x < 8) thread_staging[CUDA_NUM_THREADS_IN_BLOCK] = T_device[index - 1 + CUDA_NUM_THREADS_IN_BLOCK];
__syncthreads();
}
{
unsigned int lo = __byte_perm(thread_staging[5], thread_staging[6], 0x0411) | __byte_perm(thread_staging[7], thread_staging[8], 0x1104);
unsigned int hi = __byte_perm(thread_staging[1], thread_staging[2], 0x0411) | __byte_perm(thread_staging[3], thread_staging[4], 0x1104);
K_device[index] = (((unsigned long long)hi) << 32) | ((unsigned long long)lo); V_device[index] = thread_staging[0];
}
}
}
__global__ __launch_bounds__(CUDA_NUM_THREADS_IN_BLOCK)
void bsc_st567_encode_cuda_postsort(unsigned char * RESTRICT T_device, unsigned long long * RESTRICT K_device, unsigned long long lookup, int * RESTRICT I_device)
{
int index = blockIdx.x * CUDA_NUM_THREADS_IN_BLOCK + threadIdx.x;
if (K_device[index] == lookup) { atomicMin(I_device, index); }
T_device[index] = (unsigned char)(K_device[index] >> 56);
}
__global__ __launch_bounds__(CUDA_NUM_THREADS_IN_BLOCK)
void bsc_st8_encode_cuda_postsort(unsigned long long * RESTRICT K_device, unsigned long long lookup, int * RESTRICT I_device)
{
int index = blockIdx.x * CUDA_NUM_THREADS_IN_BLOCK + threadIdx.x;
if (K_device[index] == lookup) { atomicMin(I_device, index); }
}
int bsc_st567_encode_cuda(unsigned char * T, unsigned char * T_device, int n, int num_blocks, int k, cudaStream_t st_cuda_stream)
{
int index = LIBBSC_GPU_NOT_ENOUGH_MEMORY;
{
unsigned long long * K_device = NULL;
unsigned long long * K_device_sorted = NULL;
if (bsc_cuda_safe_call(__FILE__, __LINE__, cudaMallocAsync((void **)&K_device, 2 * (n + 2 * CUDA_DEVICE_PADDING) * sizeof(unsigned long long), st_cuda_stream)) == cudaSuccess)
{
index = LIBBSC_GPU_ERROR;
cudaError_t status = cudaSuccess;
bsc_st567_encode_cuda_presort<<<num_blocks, CUDA_NUM_THREADS_IN_BLOCK, 0, st_cuda_stream>>>(T_device, K_device);
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
K_device_sorted = K_device + ((n + 2 * CUDA_DEVICE_PADDING) / CUDA_DEVICE_PADDING) * CUDA_DEVICE_PADDING;
cub::DoubleBuffer<unsigned long long> d_keys(K_device, K_device_sorted);
void * d_temp_storage = NULL; size_t temp_storage_bytes = 0;
status = bsc_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys, n, (7 - k) * 8, 56, st_cuda_stream), status);
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMallocAsync(&d_temp_storage, temp_storage_bytes, st_cuda_stream), status);
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
status = bsc_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys, n, (7 - k) * 8, 56, st_cuda_stream), status);
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
K_device_sorted = d_keys.Current();
unsigned long long lookup;
{
unsigned int lo = (T[3 ] << 24) | (T[4] << 16) | (T[5] << 8) | T[6];
unsigned int hi = (T[n - 1] << 24) | (T[0] << 16) | (T[1] << 8) | T[2];
lookup = (((unsigned long long)hi) << 32) | ((unsigned long long)lo);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(T_device - sizeof(int), &n, sizeof(int), cudaMemcpyHostToDevice, st_cuda_stream), status);
}
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
bsc_st567_encode_cuda_postsort<<<num_blocks, CUDA_NUM_THREADS_IN_BLOCK, 0, st_cuda_stream>>>(T_device, K_device_sorted, lookup, (int *)(T_device - sizeof(int)));
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(T_device + n, T_device - sizeof(int), sizeof(int), cudaMemcpyDeviceToDevice, st_cuda_stream), status);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(T, T_device, n + sizeof(int), cudaMemcpyDeviceToHost, st_cuda_stream), status);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaStreamSynchronize(st_cuda_stream), status);
}
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaFreeAsync(d_temp_storage, st_cuda_stream), status);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaFreeAsync(K_device, st_cuda_stream), status);
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
index = *(int *)(T + n);
}
return index;
}
}
cudaFreeAsync(d_temp_storage, st_cuda_stream);
}
}
}
cudaFreeAsync(K_device, st_cuda_stream);
}
}
return index;
}
int bsc_st8_encode_cuda(unsigned char * T, unsigned char * T_device, int n, int num_blocks, cudaStream_t st_cuda_stream)
{
int index = LIBBSC_GPU_NOT_ENOUGH_MEMORY;
{
unsigned char * V_device = NULL;
unsigned char * V_device_sorted = NULL;
if (bsc_cuda_safe_call(__FILE__, __LINE__, cudaMallocAsync((void **)&V_device, 2 * (n + 2 * CUDA_DEVICE_PADDING) * sizeof(unsigned char), st_cuda_stream)) == cudaSuccess)
{
unsigned long long * K_device = NULL;
unsigned long long * K_device_sorted = NULL;
if (bsc_cuda_safe_call(__FILE__, __LINE__, cudaMallocAsync((void **)&K_device, 2 * (n + 2 * CUDA_DEVICE_PADDING) * sizeof(unsigned long long), st_cuda_stream)) == cudaSuccess)
{
index = LIBBSC_GPU_ERROR;
cudaError_t status = cudaSuccess;
bsc_st8_encode_cuda_presort<<<num_blocks, CUDA_NUM_THREADS_IN_BLOCK, 0, st_cuda_stream>>>(T_device, K_device, V_device);
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
K_device_sorted = K_device + ((n + 2 * CUDA_DEVICE_PADDING) / CUDA_DEVICE_PADDING) * CUDA_DEVICE_PADDING;
V_device_sorted = V_device + ((n + 2 * CUDA_DEVICE_PADDING) / CUDA_DEVICE_PADDING) * CUDA_DEVICE_PADDING;
cub::DoubleBuffer<unsigned long long> d_keys(K_device, K_device_sorted);
cub::DoubleBuffer<unsigned char> d_values(V_device, V_device_sorted);
void * d_temp_storage = NULL; size_t temp_storage_bytes = 0;
status = bsc_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, n, 0, 64, st_cuda_stream), status);
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMallocAsync(&d_temp_storage, temp_storage_bytes, st_cuda_stream), status);
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
status = bsc_cuda_safe_call(__FILE__, __LINE__, cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, n, 0, 64, st_cuda_stream), status);
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
K_device_sorted = d_keys.Current();
V_device_sorted = d_values.Current();
unsigned long long lookup;
{
unsigned int lo = (T[4] << 24) | (T[5] << 16) | (T[6] << 8) | T[7];
unsigned int hi = (T[0] << 24) | (T[1] << 16) | (T[2] << 8) | T[3];
lookup = (((unsigned long long)hi) << 32) | ((unsigned long long)lo);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(V_device_sorted + ((n + sizeof(int) - 1) / sizeof(int)) * sizeof(int), &n, sizeof(int), cudaMemcpyHostToDevice, st_cuda_stream), status);
}
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
bsc_st8_encode_cuda_postsort<<<num_blocks, CUDA_NUM_THREADS_IN_BLOCK, 0, st_cuda_stream>>>(K_device_sorted, lookup, (int *)(V_device_sorted + ((n + sizeof(int) - 1) / sizeof(int)) * sizeof(int)));
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(T, V_device_sorted, n + 2 * sizeof(int), cudaMemcpyDeviceToHost, st_cuda_stream), status);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaStreamSynchronize(st_cuda_stream), status);
}
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaFreeAsync(d_temp_storage, st_cuda_stream), status);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaFreeAsync(K_device, st_cuda_stream), status);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaFreeAsync(V_device, st_cuda_stream), status);
if (bsc_cuda_safe_call(__FILE__, __LINE__, status) == cudaSuccess)
{
index = *(int *)(T + ((n + sizeof(int) - 1) / sizeof(int)) * sizeof(int));
}
return index;
}
}
cudaFreeAsync(d_temp_storage, st_cuda_stream);
}
}
}
cudaFreeAsync(K_device, st_cuda_stream);
}
cudaFreeAsync(V_device, st_cuda_stream);
}
}
return index;
}
int bsc_st_encode_cuda(unsigned char * T, int n, int k, int features)
{
if ((T == NULL) || (n < 0)) return LIBBSC_BAD_PARAMETER;
if ((k < 5) || (k > 8)) return LIBBSC_BAD_PARAMETER;
if (n <= 1) return 0;
int num_blocks = 1;
{
cudaDeviceProp deviceProperties;
{
int deviceId; if (cudaGetDevice(&deviceId) != cudaSuccess || cudaGetDeviceProperties(&deviceProperties, deviceId) != cudaSuccess)
{
return LIBBSC_GPU_NOT_SUPPORTED;
}
}
if (deviceProperties.major * 10 + deviceProperties.minor < 35) return LIBBSC_GPU_NOT_SUPPORTED;
num_blocks = (n + CUDA_NUM_THREADS_IN_BLOCK - 1) / CUDA_NUM_THREADS_IN_BLOCK;
}
#ifdef LIBBSC_OPENMP
omp_set_lock(&st_cuda_lock);
#else
cudaStream_t st_cuda_stream = NULL;
#endif
if (st_cuda_stream == NULL)
{
if (bsc_cuda_safe_call(__FILE__, __LINE__, cudaStreamCreate(&st_cuda_stream)) != cudaSuccess)
{
st_cuda_stream = NULL;
}
}
int index = LIBBSC_GPU_NOT_ENOUGH_MEMORY;
{
unsigned char * T_device = NULL;
if (st_cuda_stream != NULL && cudaMallocAsync((void **)&T_device, n + 2 * CUDA_DEVICE_PADDING, st_cuda_stream) == cudaSuccess)
{
index = LIBBSC_GPU_ERROR;
cudaError_t status = cudaSuccess;
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(T_device + CUDA_DEVICE_PADDING , T , n , cudaMemcpyHostToDevice , st_cuda_stream), status);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(T_device + CUDA_DEVICE_PADDING + n, T_device + CUDA_DEVICE_PADDING, CUDA_DEVICE_PADDING, cudaMemcpyDeviceToDevice, st_cuda_stream), status);
status = bsc_cuda_safe_call(__FILE__, __LINE__, cudaMemcpyAsync(T_device , T_device + n , CUDA_DEVICE_PADDING, cudaMemcpyDeviceToDevice, st_cuda_stream), status);
if (status == cudaSuccess)
{
if (k >= 5 && k <= 7) index = bsc_st567_encode_cuda(T, T_device + CUDA_DEVICE_PADDING, n, num_blocks, k, st_cuda_stream);
if (k == 8) index = bsc_st8_encode_cuda (T, T_device + CUDA_DEVICE_PADDING, n, num_blocks , st_cuda_stream);
}
cudaFreeAsync(T_device, st_cuda_stream);
}
}
#ifdef LIBBSC_OPENMP
omp_unset_lock(&st_cuda_lock);
#else
if (st_cuda_stream != NULL)
{
bsc_cuda_safe_call(__FILE__, __LINE__, cudaStreamDestroy(st_cuda_stream));
}
#endif
return index;
}
#endif
/*-----------------------------------------------------------*/
/* End st.cu */
/*-----------------------------------------------------------*/
|
6,182 | #include "includes.h"
__global__ void cunn_SpatialLogSoftMax_updateOutput_kernel(float *output, float *input, int classSize, int height, int width)
{
int batchIndex = blockIdx.x;
int index = threadIdx.x;
while (index < height*width) {
int y = index / width;
int x = index % width;
if (y >= height)
break;
// calculate input starting index in cuda layout (B x H x W x C)
int inputStartIndex =
(height*width*classSize)*batchIndex +
(width*classSize)*y +
(classSize)*x;
float sum = 0;
for (int i = 0; i < classSize; i++) {
sum += __expf(input[inputStartIndex + i]);
}
sum = 1.0f / sum;
for (int i = 0; i < classSize; i++) {
// calculate output index in torch layout (B x C x H x W)
int outputIndex =
(classSize*height*width)*batchIndex +
(height*width)*i +
(width)*y +
x;
output[outputIndex] = logf(sum * __expf(input[inputStartIndex + i]));
}
index += blockDim.x;
}
} |
6,183 | #include <iostream>
int testKernel();
namespace DOKTests { void buildAndPrint(); void testSlicing(); void testConversionToCSR(); void testConversionToELL(); }
namespace CSRTests { void spMVTest(); }
namespace ELLTests { void buildAndPrintMatrix(); void spMVTest(); }
namespace CusparseCSRTests { void cusparseTest(); void printMatrix(); void spMVTest(); }
namespace AbaqusReaderTests { void ReadMatrix(); }
namespace BenchmarkTests { void runBenchmark(); }
int main()
{
//testKernel();
//DOKTests::testSlicing();
//DOKTests::testConversionToCSR();
DOKTests::testConversionToELL();
//CSRTests::spMVTest();
//ELLTests::buildAndPrintMatrix();
//ELLTests::spMVTest();
//CusparseCSRTests:cusparseTest();
//CusparseCSRTests::spMVTest();
//AbaqusReaderTests::ReadMatrix();
//BenchmarkTests::runBenchmark();
std::cout << "\n\nPress any key to exit: ";
char a;
std::cin >> a;
return 0;
} |
6,184 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
int n_block = 512;
int n_thread_block = 512;
__global__
void histogram_kernel(PPMPixel *data, float n, float *hist) {
__shared__ float hist_private[64];
if (threadIdx.x < 64)
hist_private[threadIdx.x] = 0; //Inicializa o histograma privado
__syncthreads();
int i, j, k, l, x, count;
count = 0;
x = 0;
int begin = threadIdx.x + blockIdx.x * blockDim.x; //Index do inicio
int stride = blockDim.x * gridDim.x; // stride is total number of threads
for (j = 0; j <= 3; j++) {
for (k = 0; k <= 3; k++) {
for (l = 0; l <= 3; l++) {
for (i = begin; i < n; i += stride ) {
if (data[i].red == j && data[i].green == k && data[i].blue == l)
count++;
}
//printf("Bd: %d Bi: %03d Ti: %03d st: %d h: %.6f\n", blockDim.x, blockIdx.x, threadIdx.x, stride, ((float) count)/n);
atomicAdd(hist_private + x, ((float) count)/n);
count = 0;
x++;
}
}
}
__syncthreads();
if (threadIdx.x < 64)
atomicAdd(hist + threadIdx.x, (float) hist_private[threadIdx.x] ); //Juntando os histogramas
}
void Histogram(PPMImage *image, float *h) {
float n = image->y * image->x;
for (int i = 0; i < n; i++) {
image->data[i].red = floor((image->data[i].red * 4) / 256);
image->data[i].blue = floor((image->data[i].blue * 4) / 256);
image->data[i].green = floor((image->data[i].green * 4) / 256);
}
//Alocando memória na GPU e Coping inputs to device******************
PPMPixel *d_data;
float *d_h;
int size_hist = 64 * sizeof(float);
cudaMalloc((void **)&d_data, n * sizeof(PPMPixel));
cudaMalloc((void **)&d_h, size_hist);
cudaMemcpy(d_data, image->data, n * sizeof(PPMPixel), cudaMemcpyHostToDevice);
//******************************************************************
histogram_kernel <<< n_block, n_thread_block >>> (d_data, n, d_h);
//cudaDeviceSynchronize();
//Copia resultado do device para host
cudaMemcpy(h, d_h, size_hist, cudaMemcpyDeviceToHost);
//Liberando a memória alocada
cudaFree(d_data);
cudaFree(d_h);
}
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
double t_start, t_end;
int i;
char *filename = argv[1]; //Recebendo o arquivo!;
//scanf("%s", filename);
PPMImage *image = readPPM(filename);
float *h = (float*)malloc(sizeof(float) * 64);
//Inicializar h
for(i=0; i < 64; i++) h[i] = 0.0;
t_start = rtclock();
Histogram(image, h);
t_end = rtclock();
for (i = 0; i < 64; i++){
printf("%0.3f ", h[i]);
}
printf("\n");
fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(h);
}
|
6,185 | #include "includes.h"
__device__ float Hue_2_RGB_gpu( float v1, float v2, float vH ) //Function Hue_2_RGB
{
if ( vH < 0 ) vH += 1;
if ( vH > 1 ) vH -= 1;
if ( ( 6 * vH ) < 1 ) return ( v1 + ( v2 - v1 ) * 6 * vH );
if ( ( 2 * vH ) < 1 ) return ( v2 );
if ( ( 3 * vH ) < 2 ) return ( v1 + ( v2 - v1 ) * ( ( 2.0f/3.0f ) - vH ) * 6 );
return ( v1 );
}
__global__ void hsl2rgb_gpu_son(float * d_h , float * d_s ,unsigned char * d_l , unsigned char * d_r, unsigned char * d_g, unsigned char * d_b, int size)
{
int x = threadIdx.x + blockDim.x*blockIdx.x;
if (x >= size) return;
float H = d_h[x];
float S = d_s[x];
float L = d_l[x]/255.0f;
float var_1, var_2;
unsigned char r,g,b;
if ( S == 0 )
{
r = L * 255;
g = L * 255;
b = L * 255;
}
else
{
if ( L < 0.5 )
var_2 = L * ( 1 + S );
else
var_2 = ( L + S ) - ( S * L );
var_1 = 2 * L - var_2;
r = 255 * Hue_2_RGB_gpu( var_1, var_2, H + (1.0f/3.0f) );
g = 255 * Hue_2_RGB_gpu( var_1, var_2, H );
b = 255 * Hue_2_RGB_gpu( var_1, var_2, H - (1.0f/3.0f) );
}
d_r[x] = r;
d_g[x] = g;
d_b[x] = b;
} |
6,186 | #include <cstdio>
#include <stdio.h>
#define SIZE 256*1024*64
__global__ void input(int *a, int *b)
{
int i=blockIdx.x*blockDim.x + threadIdx.x;
a[i]=b[i];
}
int main(void)
{
int *arr;
int *arr2;
int *carr=0;
int *carr2=0;
arr= (int *)malloc(sizeof(int)*SIZE);
arr2= (int *)malloc(sizeof(int)*SIZE);
for(int i=0; i<SIZE; i++)
{
arr[i] = 2;
}
printf("%d %d",arr[SIZE-1],arr[SIZE-100]);
cudaMalloc((void**)&carr2,sizeof(int)*SIZE);
cudaMalloc((void**)&carr,sizeof(int)*SIZE);
cudaMemcpy(carr,arr,sizeof(int)*SIZE,cudaMemcpyHostToDevice);
input<<<9096,512>>>(carr2,carr);
cudaMemcpy(arr2,carr2,sizeof(int)*SIZE,cudaMemcpyDeviceToHost);
printf("output : %d %d %d",arr2[0],arr2[10000],arr2[1000]);
cudaFree(carr2);
cudaFree(carr);
free(arr2);
free(arr);
return 0;
}
|
6,187 |
__global__ void tsortSmall(int *input0,int *result0){
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
extern __shared__ unsigned char sbase[];
(( int *)sbase)[(tid<<1)] = min(input0[((bid*512)+(tid<<1))],input0[((bid*512)+((tid<<1)^1))]);
(( int *)sbase)[((tid<<1)^1)] = max(input0[((bid*512)+(tid<<1))],input0[((bid*512)+((tid<<1)^1))]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967294))] = min((( int *)sbase)[(tid+(tid&4294967294))],(( int *)sbase)[((tid+(tid&4294967294))^3)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967294))^3)] = max((( int *)sbase)[(tid+(tid&4294967294))],(( int *)sbase)[((tid+(tid&4294967294))^3)]);
__syncthreads();
(( int *)sbase)[(tid<<1)] = min((( int *)(sbase+2048))[(tid<<1)],(( int *)(sbase+2048))[((tid<<1)^1)]);
(( int *)sbase)[((tid<<1)^1)] = max((( int *)(sbase+2048))[(tid<<1)],(( int *)(sbase+2048))[((tid<<1)^1)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967292))] = min((( int *)sbase)[(tid+(tid&4294967292))],(( int *)sbase)[((tid+(tid&4294967292))^7)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967292))^7)] = max((( int *)sbase)[(tid+(tid&4294967292))],(( int *)sbase)[((tid+(tid&4294967292))^7)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967294))] = min((( int *)(sbase+2048))[(tid+(tid&4294967294))],(( int *)(sbase+2048))[((tid+(tid&4294967294))^2)]);
(( int *)sbase)[((tid+(tid&4294967294))^2)] = max((( int *)(sbase+2048))[(tid+(tid&4294967294))],(( int *)(sbase+2048))[((tid+(tid&4294967294))^2)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid<<1)] = min((( int *)sbase)[(tid<<1)],(( int *)sbase)[((tid<<1)^1)]);
(( int *)(sbase + 2048))[((tid<<1)^1)] = max((( int *)sbase)[(tid<<1)],(( int *)sbase)[((tid<<1)^1)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967288))] = min((( int *)(sbase+2048))[(tid+(tid&4294967288))],(( int *)(sbase+2048))[((tid+(tid&4294967288))^15)]);
(( int *)sbase)[((tid+(tid&4294967288))^15)] = max((( int *)(sbase+2048))[(tid+(tid&4294967288))],(( int *)(sbase+2048))[((tid+(tid&4294967288))^15)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967292))] = min((( int *)sbase)[(tid+(tid&4294967292))],(( int *)sbase)[((tid+(tid&4294967292))^4)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967292))^4)] = max((( int *)sbase)[(tid+(tid&4294967292))],(( int *)sbase)[((tid+(tid&4294967292))^4)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967294))] = min((( int *)(sbase+2048))[(tid+(tid&4294967294))],(( int *)(sbase+2048))[((tid+(tid&4294967294))^2)]);
(( int *)sbase)[((tid+(tid&4294967294))^2)] = max((( int *)(sbase+2048))[(tid+(tid&4294967294))],(( int *)(sbase+2048))[((tid+(tid&4294967294))^2)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid<<1)] = min((( int *)sbase)[(tid<<1)],(( int *)sbase)[((tid<<1)^1)]);
(( int *)(sbase + 2048))[((tid<<1)^1)] = max((( int *)sbase)[(tid<<1)],(( int *)sbase)[((tid<<1)^1)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967280))] = min((( int *)(sbase+2048))[(tid+(tid&4294967280))],(( int *)(sbase+2048))[((tid+(tid&4294967280))^31)]);
(( int *)sbase)[((tid+(tid&4294967280))^31)] = max((( int *)(sbase+2048))[(tid+(tid&4294967280))],(( int *)(sbase+2048))[((tid+(tid&4294967280))^31)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967288))] = min((( int *)sbase)[(tid+(tid&4294967288))],(( int *)sbase)[((tid+(tid&4294967288))^8)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967288))^8)] = max((( int *)sbase)[(tid+(tid&4294967288))],(( int *)sbase)[((tid+(tid&4294967288))^8)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967292))] = min((( int *)(sbase+2048))[(tid+(tid&4294967292))],(( int *)(sbase+2048))[((tid+(tid&4294967292))^4)]);
(( int *)sbase)[((tid+(tid&4294967292))^4)] = max((( int *)(sbase+2048))[(tid+(tid&4294967292))],(( int *)(sbase+2048))[((tid+(tid&4294967292))^4)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967294))] = min((( int *)sbase)[(tid+(tid&4294967294))],(( int *)sbase)[((tid+(tid&4294967294))^2)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967294))^2)] = max((( int *)sbase)[(tid+(tid&4294967294))],(( int *)sbase)[((tid+(tid&4294967294))^2)]);
__syncthreads();
(( int *)sbase)[(tid<<1)] = min((( int *)(sbase+2048))[(tid<<1)],(( int *)(sbase+2048))[((tid<<1)^1)]);
(( int *)sbase)[((tid<<1)^1)] = max((( int *)(sbase+2048))[(tid<<1)],(( int *)(sbase+2048))[((tid<<1)^1)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967264))] = min((( int *)sbase)[(tid+(tid&4294967264))],(( int *)sbase)[((tid+(tid&4294967264))^63)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967264))^63)] = max((( int *)sbase)[(tid+(tid&4294967264))],(( int *)sbase)[((tid+(tid&4294967264))^63)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967280))] = min((( int *)(sbase+2048))[(tid+(tid&4294967280))],(( int *)(sbase+2048))[((tid+(tid&4294967280))^16)]);
(( int *)sbase)[((tid+(tid&4294967280))^16)] = max((( int *)(sbase+2048))[(tid+(tid&4294967280))],(( int *)(sbase+2048))[((tid+(tid&4294967280))^16)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967288))] = min((( int *)sbase)[(tid+(tid&4294967288))],(( int *)sbase)[((tid+(tid&4294967288))^8)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967288))^8)] = max((( int *)sbase)[(tid+(tid&4294967288))],(( int *)sbase)[((tid+(tid&4294967288))^8)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967292))] = min((( int *)(sbase+2048))[(tid+(tid&4294967292))],(( int *)(sbase+2048))[((tid+(tid&4294967292))^4)]);
(( int *)sbase)[((tid+(tid&4294967292))^4)] = max((( int *)(sbase+2048))[(tid+(tid&4294967292))],(( int *)(sbase+2048))[((tid+(tid&4294967292))^4)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967294))] = min((( int *)sbase)[(tid+(tid&4294967294))],(( int *)sbase)[((tid+(tid&4294967294))^2)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967294))^2)] = max((( int *)sbase)[(tid+(tid&4294967294))],(( int *)sbase)[((tid+(tid&4294967294))^2)]);
__syncthreads();
(( int *)sbase)[(tid<<1)] = min((( int *)(sbase+2048))[(tid<<1)],(( int *)(sbase+2048))[((tid<<1)^1)]);
(( int *)sbase)[((tid<<1)^1)] = max((( int *)(sbase+2048))[(tid<<1)],(( int *)(sbase+2048))[((tid<<1)^1)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967232))] = min((( int *)sbase)[(tid+(tid&4294967232))],(( int *)sbase)[((tid+(tid&4294967232))^127)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967232))^127)] = max((( int *)sbase)[(tid+(tid&4294967232))],(( int *)sbase)[((tid+(tid&4294967232))^127)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967264))] = min((( int *)(sbase+2048))[(tid+(tid&4294967264))],(( int *)(sbase+2048))[((tid+(tid&4294967264))^32)]);
(( int *)sbase)[((tid+(tid&4294967264))^32)] = max((( int *)(sbase+2048))[(tid+(tid&4294967264))],(( int *)(sbase+2048))[((tid+(tid&4294967264))^32)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967280))] = min((( int *)sbase)[(tid+(tid&4294967280))],(( int *)sbase)[((tid+(tid&4294967280))^16)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967280))^16)] = max((( int *)sbase)[(tid+(tid&4294967280))],(( int *)sbase)[((tid+(tid&4294967280))^16)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967288))] = min((( int *)(sbase+2048))[(tid+(tid&4294967288))],(( int *)(sbase+2048))[((tid+(tid&4294967288))^8)]);
(( int *)sbase)[((tid+(tid&4294967288))^8)] = max((( int *)(sbase+2048))[(tid+(tid&4294967288))],(( int *)(sbase+2048))[((tid+(tid&4294967288))^8)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967292))] = min((( int *)sbase)[(tid+(tid&4294967292))],(( int *)sbase)[((tid+(tid&4294967292))^4)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967292))^4)] = max((( int *)sbase)[(tid+(tid&4294967292))],(( int *)sbase)[((tid+(tid&4294967292))^4)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967294))] = min((( int *)(sbase+2048))[(tid+(tid&4294967294))],(( int *)(sbase+2048))[((tid+(tid&4294967294))^2)]);
(( int *)sbase)[((tid+(tid&4294967294))^2)] = max((( int *)(sbase+2048))[(tid+(tid&4294967294))],(( int *)(sbase+2048))[((tid+(tid&4294967294))^2)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid<<1)] = min((( int *)sbase)[(tid<<1)],(( int *)sbase)[((tid<<1)^1)]);
(( int *)(sbase + 2048))[((tid<<1)^1)] = max((( int *)sbase)[(tid<<1)],(( int *)sbase)[((tid<<1)^1)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967168))] = min((( int *)(sbase+2048))[(tid+(tid&4294967168))],(( int *)(sbase+2048))[((tid+(tid&4294967168))^255)]);
(( int *)sbase)[((tid+(tid&4294967168))^255)] = max((( int *)(sbase+2048))[(tid+(tid&4294967168))],(( int *)(sbase+2048))[((tid+(tid&4294967168))^255)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967232))] = min((( int *)sbase)[(tid+(tid&4294967232))],(( int *)sbase)[((tid+(tid&4294967232))^64)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967232))^64)] = max((( int *)sbase)[(tid+(tid&4294967232))],(( int *)sbase)[((tid+(tid&4294967232))^64)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967264))] = min((( int *)(sbase+2048))[(tid+(tid&4294967264))],(( int *)(sbase+2048))[((tid+(tid&4294967264))^32)]);
(( int *)sbase)[((tid+(tid&4294967264))^32)] = max((( int *)(sbase+2048))[(tid+(tid&4294967264))],(( int *)(sbase+2048))[((tid+(tid&4294967264))^32)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967280))] = min((( int *)sbase)[(tid+(tid&4294967280))],(( int *)sbase)[((tid+(tid&4294967280))^16)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967280))^16)] = max((( int *)sbase)[(tid+(tid&4294967280))],(( int *)sbase)[((tid+(tid&4294967280))^16)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967288))] = min((( int *)(sbase+2048))[(tid+(tid&4294967288))],(( int *)(sbase+2048))[((tid+(tid&4294967288))^8)]);
(( int *)sbase)[((tid+(tid&4294967288))^8)] = max((( int *)(sbase+2048))[(tid+(tid&4294967288))],(( int *)(sbase+2048))[((tid+(tid&4294967288))^8)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967292))] = min((( int *)sbase)[(tid+(tid&4294967292))],(( int *)sbase)[((tid+(tid&4294967292))^4)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967292))^4)] = max((( int *)sbase)[(tid+(tid&4294967292))],(( int *)sbase)[((tid+(tid&4294967292))^4)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967294))] = min((( int *)(sbase+2048))[(tid+(tid&4294967294))],(( int *)(sbase+2048))[((tid+(tid&4294967294))^2)]);
(( int *)sbase)[((tid+(tid&4294967294))^2)] = max((( int *)(sbase+2048))[(tid+(tid&4294967294))],(( int *)(sbase+2048))[((tid+(tid&4294967294))^2)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid<<1)] = min((( int *)sbase)[(tid<<1)],(( int *)sbase)[((tid<<1)^1)]);
(( int *)(sbase + 2048))[((tid<<1)^1)] = max((( int *)sbase)[(tid<<1)],(( int *)sbase)[((tid<<1)^1)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967040))] = min((( int *)(sbase+2048))[(tid+(tid&4294967040))],(( int *)(sbase+2048))[((tid+(tid&4294967040))^511)]);
(( int *)sbase)[((tid+(tid&4294967040))^511)] = max((( int *)(sbase+2048))[(tid+(tid&4294967040))],(( int *)(sbase+2048))[((tid+(tid&4294967040))^511)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967168))] = min((( int *)sbase)[(tid+(tid&4294967168))],(( int *)sbase)[((tid+(tid&4294967168))^128)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967168))^128)] = max((( int *)sbase)[(tid+(tid&4294967168))],(( int *)sbase)[((tid+(tid&4294967168))^128)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967232))] = min((( int *)(sbase+2048))[(tid+(tid&4294967232))],(( int *)(sbase+2048))[((tid+(tid&4294967232))^64)]);
(( int *)sbase)[((tid+(tid&4294967232))^64)] = max((( int *)(sbase+2048))[(tid+(tid&4294967232))],(( int *)(sbase+2048))[((tid+(tid&4294967232))^64)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967264))] = min((( int *)sbase)[(tid+(tid&4294967264))],(( int *)sbase)[((tid+(tid&4294967264))^32)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967264))^32)] = max((( int *)sbase)[(tid+(tid&4294967264))],(( int *)sbase)[((tid+(tid&4294967264))^32)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967280))] = min((( int *)(sbase+2048))[(tid+(tid&4294967280))],(( int *)(sbase+2048))[((tid+(tid&4294967280))^16)]);
(( int *)sbase)[((tid+(tid&4294967280))^16)] = max((( int *)(sbase+2048))[(tid+(tid&4294967280))],(( int *)(sbase+2048))[((tid+(tid&4294967280))^16)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967288))] = min((( int *)sbase)[(tid+(tid&4294967288))],(( int *)sbase)[((tid+(tid&4294967288))^8)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967288))^8)] = max((( int *)sbase)[(tid+(tid&4294967288))],(( int *)sbase)[((tid+(tid&4294967288))^8)]);
__syncthreads();
(( int *)sbase)[(tid+(tid&4294967292))] = min((( int *)(sbase+2048))[(tid+(tid&4294967292))],(( int *)(sbase+2048))[((tid+(tid&4294967292))^4)]);
(( int *)sbase)[((tid+(tid&4294967292))^4)] = max((( int *)(sbase+2048))[(tid+(tid&4294967292))],(( int *)(sbase+2048))[((tid+(tid&4294967292))^4)]);
__syncthreads();
(( int *)(sbase + 2048))[(tid+(tid&4294967294))] = min((( int *)sbase)[(tid+(tid&4294967294))],(( int *)sbase)[((tid+(tid&4294967294))^2)]);
(( int *)(sbase + 2048))[((tid+(tid&4294967294))^2)] = max((( int *)sbase)[(tid+(tid&4294967294))],(( int *)sbase)[((tid+(tid&4294967294))^2)]);
__syncthreads();
(( int *)sbase)[(tid<<1)] = min((( int *)(sbase+2048))[(tid<<1)],(( int *)(sbase+2048))[((tid<<1)^1)]);
(( int *)sbase)[((tid<<1)^1)] = max((( int *)(sbase+2048))[(tid<<1)],(( int *)(sbase+2048))[((tid<<1)^1)]);
__syncthreads();
result0[((bid*512)+tid)] = (( int *)sbase)[tid];
result0[((bid*512)+(tid+256))] = (( int *)sbase)[(tid+256)];
}
|
6,188 | #include <iostream>
/**
* @brief Perform general 1-D grid, 2-D block reduce, along X-direction.
*
* @details This device function implements the reduce algorithms. The grid is
* in 1-D X-direction, i.e. `gridDim.x >= 1`,`gridDim.y == 1` and
* `gridDim.z == 1`. The block is in 2-D X- and Y-direction, i.e.
* `blockDim.x >= 1` and `blockDim.y >= 1`, `blockDim.z == 1`. To achieve higher
* parallelization, the shared memory will have size
* `blockDim.x * blockDim.y * 2`. The input array will be *divided* into smaller
* parts and assigned to each thread block. That is, each block will be assigned
* an *sub-array* with length at most `blockDim.x * blockDim.y * 2`.
*
* @tparam DataType The type of data, which is processed.
* @tparam Operation The operation type, it is related to the lambda
* function parameter.
*
* @param[in] d_inputArr The input array, type is determined by the
* template.
*
* @param[in] inputArrLength The length of the input array.
* @param[out] d_outputArr The output array, in device memory, type is
* determined by the template. The length of it must be `gridDim.x * blockDim.y`
*
* @param[in] oper The operation performed on two elements in the
* array. It should be a function or lambda expression defined in a
* `__global__` function, which has two reference parameters. The result on
* these two parameters will be stored in the first parameter, the second
* parameter should be set to identity.
*
* @param[in] identity The identity of the operation. (The same concept
* in group theory.)
*
*/
template < class DataType, class Operation >
__device__
void
g1b2_reduce_x(
const DataType *const d_inputArr,
const size_t inputArrLength,
DataType *const d_outputArr,
const Operation &oper,
const DataType identity
) {
// Make shared memory visible in kernel
extern __shared__ DataType sdata[];
// Calculate indices
int sdataAbsIdx = threadIdx.x * blockDim.y + threadIdx.y;
int initStride = blockDim.x * blockDim.y;
int inputArrAbsIdx = 2 * blockIdx.x * initStride + sdataAbsIdx;
// Initialize shared memory according to the absolute location of
// thread-releated element in the input array. If a thread-related element
// does not exist, the corresponding shared memory will be filled with
// identity value of the operation.
if (inputArrAbsIdx >= inputArrLength) {
// The thread-related left element does not exist in the input array.
sdata[sdataAbsIdx] = identity;
sdata[sdataAbsIdx + initStride] = identity;
}
else if (inputArrAbsIdx + initStride >= inputArrLength) {
// The thread-related right element does not exist in the input array.
sdata[sdataAbsIdx] = d_inputArr[inputArrAbsIdx];
sdata[sdataAbsIdx + initStride] = identity;
}
else {
// The thread-related elements exist in the input array.
sdata[sdataAbsIdx] = d_inputArr[inputArrAbsIdx];
sdata[sdataAbsIdx + initStride]
= d_inputArr[inputArrAbsIdx + initStride];
}
// Thread synchronization, wait for shared memory initialization's finish.
__syncthreads();
// Perform reduce, the initial stride is blockDim.x
for (unsigned int stride = static_cast<unsigned int>(blockDim.x);
stride > 0;
stride = (stride & 0x1) ? stride >> 1 + 1 : stride >> 1) {
// The thread with X-ID smaller than the stride will be omitted.
if (threadIdx.x < stride)
// Compute absolute stride and perform operation
oper(sdata[sdataAbsIdx], sdata[sdataAbsIdx + stride * blockDim.y]);
// Thread synchronization after every stride.
__syncthreads();
}
// Because reduce is executed in X-direction, only the elements in shared
// memory related to the `threadIdx.x == 0` stored the results. Here only
// the corresponding elements will be written into the output array.
if (0 == threadIdx.x)
d_outputArr[blockIdx.x * blockDim.y + threadIdx.y] = sdata[sdataAbsIdx];
}
#define HIST_WIDTH 128
#define HIST_NUM 8
#define BLOCK_NUM 512
#define DATA_LENGTH (2 * HIST_WIDTH * HIST_NUM * BLOCK_NUM + HIST_WIDTH * 3)
#define RESULT_LENGTH HIST_WIDTH * (BLOCK_NUM + 1)
__global__
void
add_oper(
const int *const d_inputArr,
const size_t inputArrLength,
int *const d_outputArr
) {
// Identity and lambda of the operation.
const int identity = 0;
auto oper = [](int &l, int &r) -> void { l += r; r = 0; };
// Launch the device function.
g1b2_reduce_x(d_inputArr, DATA_LENGTH, d_outputArr, oper, identity);
}
int main() {
// All kinds of sizes.
// Shared memory's size (in bytes).
int sharedBytes = 2 * sizeof(int) * HIST_WIDTH * HIST_NUM;
// Input array's size (in bytes).
int dataBytes = sizeof(int) * DATA_LENGTH;
// Result array's size (in bytes).
int resultBytes = sizeof(int) * RESULT_LENGTH;
// Data arrays
// Input array, host.
int h_inputArr[DATA_LENGTH];
// Output array, host.
int h_outputArr[RESULT_LENGTH];
// Initialization of input array.
for (int idx = 0; idx < DATA_LENGTH; ++idx)
h_inputArr[idx] = idx % HIST_WIDTH;
// CUDA kernel sizes
// Grid size
const dim3 gridSize(BLOCK_NUM);
// Thread block size
const dim3 blockSize(HIST_NUM, HIST_WIDTH);
// Allocating device memory
int *d_inputArr, *d_outputArr;
int ret = cudaMalloc(&d_inputArr, dataBytes);
if (cudaSuccess != ret)
std::cout << "Cannot allocate d_inputArr, ret: " << ret << std::endl;
ret = cudaMalloc(&d_outputArr, resultBytes);
if (cudaSuccess != ret)
std::cout << "Cannot allocate d_outputArr, ret: " << ret << std::endl;
// Copy host array to device
ret = cudaMemcpy(d_inputArr, h_inputArr, dataBytes, cudaMemcpyHostToDevice);
if (cudaSuccess != ret)
std::cout << "Cannot memcpy to device, ret: " << ret << std::endl;
add_oper<<< BLOCK_NUM + 1, blockSize, sharedBytes >>>(
d_inputArr, DATA_LENGTH, d_outputArr);
ret = cudaMemcpy(h_outputArr, d_outputArr, resultBytes,
cudaMemcpyDeviceToHost);
if (cudaSuccess != ret)
std::cout << "Cannot memcpy to host, ret: " << ret << std::endl;
for (int idx = RESULT_LENGTH - 512; idx < RESULT_LENGTH; ++idx)
std::cout << h_outputArr[idx] << " ";
std::cout << std::endl;
cudaFree(d_inputArr);
cudaFree(d_outputArr);
return 0;
}
|
6,189 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <chrono>
using namespace std::chrono;
// 0,0 0,1 0,2
// 1,0 1,1 1,2
// => 0, 1, 2, 3, 4, 5
// => numberOfColumns * currentRow + currentColumn
__global__ void matrixmult(float* Cptr, float* Aptr, float* Bptr, int m, int n) {
// blockDim.x = number of threads in the current Block
// threadIdx.x = index of current thread
int Cidx = blockIdx.x * blockDim.x + threadIdx.x; // ^= n * i + k
int i = Cidx / n;
int k = Cidx - n * i;
if (n * m > Cidx) {
for (int j = 0; j < n; j++) {
Cptr[Cidx] += Aptr[n * i + j] * Bptr[n * j + k];
}
}
}
float* createRandomMatrix(float *matrix, int m, int n) {
matrix = new float[m * n];
for (int r = 0; r < m; r++) {
for (int c = 0; c < n; c++) {
matrix[n * r + c] = static_cast <float> (rand() % 10) / 1.0;
}
}
return matrix;
}
float* createEmptyMatrix(float* matrix, int m, int n) {
matrix = new float[m * n];
for (int r = 0; r < m; r++) {
for (int c = 0; c < n; c++) {
matrix[n * r + c] = 0.0;
}
}
return matrix;
}
void print(float* matrix, int m, int n) {
for (int r = 0; r < m; r++) {
for (int c = 0; c < n; c++) {
std::cout << matrix[n * r + c] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
}
void deleteMatrix(float* matrix) {
delete[] matrix;
}
int main() {
int m = 1440;
int n = 1440;
int block_size = 512;
//float pointer initialisieren und Speicher fr den Array reservieren
float* matrixA = (float*)malloc(m * n);
float* matrixB = (float*)malloc(m * n);
float* h_matrixC = (float*)malloc(m * n);
float* d_matrixA;
float* d_matrixB;
float* d_matrixC;
/*lowerbound = 0;
upperbound = m;*/
matrixA = createRandomMatrix(matrixA, m, n);
matrixB = createRandomMatrix(matrixB, m, n);
h_matrixC = createEmptyMatrix(h_matrixC, m, n);
//Allocate space for device copies in device memory
cudaMalloc(&d_matrixA, (m * n) * sizeof(float));
cudaMalloc(&d_matrixB, (m * n) * sizeof(float));
cudaMalloc(&d_matrixC, (m * n) * sizeof(float));
//cudaMalloc(&d_lowerbound, sizeof(int));
//cudaMalloc(&d_upperbound, sizeof(int));
//print(matrixA, m, n);
//print(matrixB, m, n);
cudaMemcpy(d_matrixA, matrixA, (m * n) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixB, matrixB, (m * n) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixC, h_matrixC, (m * n) * sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(d_lowerbound, &lowerbound, sizeof(int), cudaMemcpyHostToDevice);
//cudaMemcpy(d_upperbound, &upperbound, sizeof(int), cudaMemcpyHostToDevice);
int Blocks = ((n*m) + block_size - 1) / block_size;
std::cout << "[+] Calculation started with " << (Blocks * block_size) << " Threads";
auto start = high_resolution_clock::now();
//Run Kernel on GPU
matrixmult <<<Blocks, block_size >>> (d_matrixC, d_matrixA, d_matrixB, m, n);
//Wait for GPU to finish
cudaDeviceSynchronize();
auto stop = high_resolution_clock::now();
cudaMemcpy(h_matrixC, d_matrixC, (m * n) * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "\n[+] Multithreaded calculation finished \n[+] Duration: " << duration<double>(stop - start).count() << " seconds";
/*print(h_matrixC, m, n);*/
//Free memory
cudaFree(d_matrixA);
cudaFree(d_matrixB);
cudaFree(d_matrixC);
delete[] matrixA;
delete[] matrixB;
delete[] h_matrixC;
} |
6,190 | #include "includes.h"
__global__ void g_FullConnectWgrad(float* wgrad, float* w, int len, float lambda, int batch)
{
for(int i = 0; i < len; i += blockDim.x * gridDim.x)
{
int id = i + blockDim.x * blockIdx.x + threadIdx.x;
if(id < len)
{
if(fabs(lambda) < 1e-10)
wgrad[id] = wgrad[id] / batch /** dropM[id]*/;
else
wgrad[id] = (wgrad[id] / batch + lambda * w[id]) /** dropM[id]*/;
}
}
} |
6,191 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N (1<<24)
#define THREADS_PER_BLOCK 512
#define BLOCK_NUM (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK // 1<<15 block
void random_floats(float *x, int Num);
__global__ void kernel1(float *a, float *b, float *out, int n);
__global__ void kernel2WithAtomicOp(float *a, float *b, float *out, int n);
int main(void){
printf("N: %d, block num: %d\n", N, BLOCK_NUM);
// initialization
float *a, *b, *reduce, *sum;
a = (float *)malloc(N * sizeof(float));
random_floats(a, N);
b = (float *)malloc(N * sizeof(float));
random_floats(b, N);
reduce = (float *)malloc(BLOCK_NUM * sizeof(float));
sum = (float *)malloc(sizeof(float));
*sum = 0;
// cudaEvent initialization
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// create space on gpu side
float *da, *db, *dreduce, *dsum;
cudaMalloc((void **)&da, N * sizeof(float));
cudaMalloc((void **)&db, N * sizeof(float));
cudaMalloc((void **)&dreduce, BLOCK_NUM * sizeof(float));
cudaMalloc((void **)&dsum, sizeof(float));
// copy from cpu to gpu
cudaMemcpy(da, a, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(db, b, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dsum, sum, sizeof(float), cudaMemcpyHostToDevice);
// kernel1: shared memory + parallel reduction
cudaEventRecord(start);
kernel1<<<BLOCK_NUM, THREADS_PER_BLOCK>>>(da, db, dreduce, N);
cudaEventRecord(stop);
//// copy back to cpu
cudaMemcpy(reduce, dreduce, BLOCK_NUM*sizeof(float), cudaMemcpyDeviceToHost);
//// add up all elements in reduce
*sum = 0;
for (int i = 0; i < BLOCK_NUM; i++)
*sum += reduce[i];
printf("result from Kernel1 with sum on CPU side: %f\n", *sum);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("kernel 1 execution time: %f milliseconds\n\n", milliseconds);
// kernel2: shared memory + parallel reduction + atomic operation
cudaEventRecord(start);
kernel2WithAtomicOp<<<BLOCK_NUM, THREADS_PER_BLOCK>>>(da, db, dsum, N);
cudaEventRecord(stop);
//// copy back to cpu
cudaMemcpy(sum, dsum, sizeof(float), cudaMemcpyDeviceToHost);
printf("result from Kernel2 with sum on GPU side: %f\n", *sum);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("kernel 2 execution time: %f milliseconds\n\n", milliseconds);
free(a); free(b); free(reduce); free(sum);
cudaFree(da); cudaFree(db); cudaFree(dreduce); cudaFree(dsum);
cudaEventDestroy(start); cudaEventDestroy(stop);
return 0;
}
void random_floats(float *x, int Num)
{
for (int i = 0; i < Num; i++)
{
x[i] = (float)rand() / RAND_MAX;
}
}
__global__ void kernel1(float *a, float *b, float *out, int n){
__shared__ float sdata[THREADS_PER_BLOCK];
int tid = threadIdx.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
sdata[tid] = 0.0;
if (index < n)
sdata[tid] = a[index] * b[index];
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2){
int ix = 2 * s * tid;
if (ix < blockDim.x)
sdata[ix] += sdata[ix+s];
__syncthreads();
}
if (tid == 0) out[blockIdx.x] = sdata[0];
}
__global__ void kernel2WithAtomicOp(float *a, float *b, float *out, int n){
__shared__ float sdata[THREADS_PER_BLOCK];
int tid = threadIdx.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
sdata[tid] = 0.0;
if (index < n)
sdata[tid] = a[index] * b[index];
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2){
int ix = 2 * s * tid;
if (ix < blockDim.x)
sdata[ix] += sdata[ix+s];
__syncthreads();
}
if (tid == 0)
atomicAdd(out, sdata[0]);
}
|
6,192 | #include "includes.h"
__global__ void update_bins(float *vec, int *bin, int *bin_counters, const int num_bins, const int n, const float slope, const float intercept)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n ){
int bin_new_val;
float temp = abs(vec[xIndex]);
if ( temp > (intercept *.000001) ){
bin_new_val=slope * (intercept - temp);
}
else bin_new_val = num_bins;
if ( bin[xIndex] != bin_new_val ){
if (bin[xIndex] < num_bins)
atomicAdd(bin_counters+bin[xIndex],-1);
if ( bin_new_val < num_bins )
atomicAdd(bin_counters+bin[xIndex],1);
bin[xIndex]=bin_new_val;
}
}
} |
6,193 | #include "includes.h"
__global__ void orthogonalize( float *eigvec, float *Qi_gdof, int cdof, int *blocksizes, int *blocknums, int largestblock ) {
int blockNum = blockIdx.x * blockDim.x + threadIdx.x;
// orthogonalize original eigenvectors against gdof
// number of evec that survive orthogonalization
int curr_evec = 6;
int size = blocksizes[blockNum];
int startatom = blocknums[blockNum] / 3;
for( int j = 0; j < size; j++ ) { // <-- vector we're orthogonalizing
// to match ProtoMol we only include size instead of size + cdof vectors
// Note: for every vector that is skipped due to a low norm,
// we add an additional vector to replace it, so we could actually
// use all size original eigenvectors
if( curr_evec == size ) {
break;
}
// orthogonalize original eigenvectors in order from smallest magnitude
// eigenvalue to biggest
// TMC The eigenvectors are sorted now
//int col = sortedPairs.at( j ).second;
// copy original vector to Qi_gdof -- updated in place
for( int l = 0; l < size; l++ ) {
//Qi_gdof[blockNum*6*largestblock+l*6+curr_evec] = eigvec[blocknums[blockNum]+l][j];
Qi_gdof[blockNum * 6 * largestblock + l * 6 + curr_evec] = eigvec[( blocknums[blockNum] + l ) * largestblock + j];
}
// get dot products with previous vectors
for( int k = 0; k < curr_evec; k++ ) { // <-- vector orthog against
// dot product between original vector and previously
// orthogonalized vectors
double dot_prod = 0.0;
for( int l = 0; l < size; l++ ) {
//dot_prod += Qi_gdof[blockNum*6*largestblock+l*6+k] * eigvec[blocknums[blockNum]+l][j];
dot_prod += Qi_gdof[blockNum * 6 * largestblock + l * 6 + k] * eigvec[( blocknums[blockNum] + l ) * largestblock + j];
}
// subtract from current vector -- update in place
for( int l = 0; l < size; l++ ) {
Qi_gdof[blockNum * 6 * largestblock + l * 6 + curr_evec] = Qi_gdof[blockNum * 6 * largestblock + l * 6 + curr_evec] - Qi_gdof[blockNum * 6 * largestblock + l * 6 + k] * dot_prod;
}
}
//normalize residual vector
double norm = 0.0;
for( int l = 0; l < size; l++ ) {
norm += Qi_gdof[blockNum * 6 * largestblock + l * 6 + curr_evec] * Qi_gdof[blockNum * 6 * largestblock + l * 6 + curr_evec];
}
// if norm less than 1/20th of original
// continue on to next vector
// we don't update curr_evec so this vector
// will be overwritten
if( norm < 0.05 ) {
continue;
}
// scale vector
norm = sqrt( norm );
for( int l = 0; l < size; l++ ) {
Qi_gdof[blockNum * 6 * largestblock + l * 6 + curr_evec] = Qi_gdof[blockNum * 6 * largestblock + l * 6 + curr_evec] / norm;
}
curr_evec++;
}
// 4. Copy eigenpairs to big array
// This is necessary because we have to sort them, and determine
// the cutoff eigenvalue for everybody.
// we assume curr_evec <= size
for( int j = 0; j < curr_evec; j++ ) {
//eval[startatom + j] = di[col]; No longer necessary
// orthogonalized eigenvectors already sorted by eigenvalue
for( int k = 0; k < size; k++ ) {
//eigvec[startatom + k][startatom + j] = Qi_gdof[blockNum*6*largestblock+k*6+j];
eigvec[( startatom + k )*largestblock + ( startatom + j )] = Qi_gdof[blockNum * 6 * largestblock + k * 6 + j];
}
}
} |
6,194 | #include "includes.h"
/** Modifed version of knn-CUDA from https://github.com/vincentfpgarcia/kNN-CUDA
* The modifications are
* removed texture memory usage
* removed split query KNN computation
* added feature extraction with bilinear interpolation
*
* Last modified by Christopher B. Choy <chrischoy@ai.stanford.edu> 12/23/2016
*/
// Includes
// Constants used by the program
#define BLOCK_DIM 16
//-----------------------------------------------------------------------------------------------//
// KERNELS //
//-----------------------------------------------------------------------------------------------//
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
/**
* Gathers k-th smallest distances for each column of the distance matrix in the top.
*
* @param dist distance matrix
* @param ind index matrix
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix and of the index matrix
* @param k number of neighbors to consider
*/
/**
* Computes the square root of the first line (width-th first element)
* of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param k number of neighbors to consider
*/
//-----------------------------------------------------------------------------------------------//
// K-th NEAREST NEIGHBORS //
//-----------------------------------------------------------------------------------------------//
/**
* Prints the error message return during the memory allocation.
*
* @param error error value return by the memory allocation function
* @param memorySize size of memory tried to be allocated
*/
__global__ void extract_with_interpolation( int nthreads, float *data, float *n_xy_coords, float *extracted_data, int n_max_coord, int channels, int height, int width) {
int x0, x1, y0, y1, nc;
float wx0, wx1, wy0, wy1;
int n, nd;
float x, y;
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < (nthreads);
index += blockDim.x * gridDim.x) {
n = (index / n_max_coord);
nd = n * n_max_coord * channels;
x = n_xy_coords[index * 2];
y = n_xy_coords[index * 2 + 1];
x0 = static_cast<int>(floor(x));
x1 = x0 + 1;
y0 = static_cast<int>(floor(y));
y1 = y0 + 1;
x0 = x0 <= 0 ? 0 : (x0 >= (width - 1) ? (width - 1) : x0);
y0 = y0 <= 0 ? 0 : (y0 >= (height - 1) ? (height - 1) : y0);
x1 = x1 <= 0 ? 0 : (x1 >= (width - 1) ? (width - 1) : x1);
y1 = y1 <= 0 ? 0 : (y1 >= (height - 1) ? (height - 1) : y1);
wx0 = static_cast<float>(x1) - x;
wx1 = x - x0;
wy0 = static_cast<float>(y1) - y;
wy1 = y - y0;
if(x0 == x1){ wx0 = 1; wx1 = 0; }
if(y0 == y1){ wy0 = 1; wy1 = 0; }
for(int c=0; c < channels; c++) {
nc = (n * channels + c) * height;
// extracted_data[index * channels + c] = wy0 * wx0 * data[(nc + y0) * width + x0]
// extracted_data[nd + index % n_max_coord + n_max_coord * c] = index;
extracted_data[nd + index % n_max_coord + n_max_coord * c] = wy0 * wx0 * data[(nc + y0) * width + x0]
+ wy1 * wx0 * data[(nc + y1) * width + x0]
+ wy0 * wx1 * data[(nc + y0) * width + x1]
+ wy1 * wx1 * data[(nc + y1) * width + x1];
}
}
} |
6,195 | #include<cuda_runtime.h>
#include<stdio.h>
#include<iostream>
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
using namespace std;
struct saxpy_functor
{
const float a;
saxpy_functor(float _a): a(_a) {}
__host__ __device__
float operator()(const float& x, const float& b) const
{
return a * x + b;
}
};
void saxpy_fast(float a , thrust::device_vector<float>& X, thrust::device_vector<float>& Y)
{
// Y <- A * X + Y
thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), saxpy_functor(a));
}
int main(){
printf("Ready to test thrust\n");
int N = 10;
thrust::host_vector<float> h(N);
for(int i = 0; i < N; i++)
h[i] = i;
thrust::device_vector<float> d = h;
for(int i = 0; i < N / 2; i++)
d[i] = d[i] * -1;
for(int i = 0; i < N; i++){
std::cout<<d[i]<<' ';
}
std::cout<<endl;
saxpy_fast(2.0, d, d);
for(int i = 0; i < N; i++){
std::cout<<d[i]<<' ';
}
std::cout<<endl;
return 0;
} |
6,196 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <ctime>
#include <stdio.h>
#include <iostream>
#include <math.h>
using namespace std;
__global__ void MulKernel(int *c, const int *a, const int *b, const int P)
{
int tempsum=0;
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if (row < P && col < P){
for (int i = 0; i < P; i++){
tempsum += a[row*P + i] * b[i*P + col];
}
c[row*P + col] = tempsum;
}
}
int main()
{
srand(time(NULL));
int N = 16;
int SIZE = N*N;
int *h_a = new int[SIZE];
int *h_b = new int[SIZE];
int *h_c = new int[SIZE];
for (int i = 0; i < SIZE; i++) {
h_a[i] = rand() % 1000;
h_b[i] = rand() % 1000;
}
cout << "First values " << h_a[0] << " " << h_b[0] << endl;
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, sizeof(int)*SIZE);
cudaMalloc(&d_b, sizeof(int)*SIZE);
cudaMalloc(&d_c, sizeof(int)*SIZE);
cout << "Second values " << h_a[0] << " " << h_b[0] << endl;
cudaMemcpy(d_a, h_a, sizeof(int)*SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int)*SIZE, cudaMemcpyHostToDevice);
cout << "Third values " << h_a[0] <<" "<< h_b[0] << endl;
MulKernel <<<1, dim3(N,N) >>>(d_c, d_a, d_b, N);
cudaMemcpy(h_c, d_c, sizeof(int)*SIZE, cudaMemcpyDeviceToHost);
cudaMemcpy(h_a, d_a, sizeof(int)*SIZE, cudaMemcpyDeviceToHost);
cudaMemcpy(h_b, d_b, sizeof(int)*SIZE, cudaMemcpyDeviceToHost);
for (int i = 0; i < 5; i++){
cout << h_c[i] << "=" << h_a[i] << h_b[i] << endl;
}
cout << h_c[1] << endl;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
6,197 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_my_index()
{
int tid = threadIdx.x;
int bid = blockIdx.x;
printf("my id :%d , block_id :%d \n",tid,bid);
}
//int main()
//{
// printf("hello from main \n");
// print_my_index << <2, 10 >> > ();
// cudaDeviceSynchronize();
// return 0;
//} |
6,198 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__
void mul(float *d_A, float *d_B, float *d_C, int n);
void matMul(float **h_Mat1, float **h_Mat2, float **h_Mat3, int n);
int main()
{
int n;
int i, j;
float **h_Mat1, **h_Mat2, **h_Mat3;
printf("Enter the dimension of square matrix, n for n X n: ");
scanf("%d", &n);
h_Mat1 = (float **) malloc(n * sizeof(float *));
for (i = 0; i < n; ++i)
{
h_Mat1[i] = (float *) malloc(n * sizeof(float));
}
h_Mat2 = (float **) malloc(n * sizeof(float *));
for (i = 0; i < n; ++i)
{
h_Mat2[i] = (float *) malloc(n * sizeof(float));
}
h_Mat3 = (float **) malloc(n * sizeof(float *));
for (i = 0; i < n; ++i)
{
h_Mat3[i] = (float *) malloc(n * sizeof(float));
}
srand(time(0));
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
h_Mat1[i][j] = rand() % 1000;
h_Mat2[i][j] = rand() % 1000;
}
}
matMul(h_Mat1, h_Mat2, h_Mat3, n);
return 0;
}
__global__
void mul(float *d_A, float *d_B, float *d_C, int n)
{
int i, j, k;
i = blockIdx.y * blockDim.y + threadIdx.y;
j = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n || j >= n)
{
return;
}
d_C[i * n + j] = 0;
for (k = 0; k < n; ++k)
{
d_C[i * n + j] += d_A[i * n + k] * d_B[k * n + j];
}
return;
}
void matMul(float **h_Mat1, float **h_Mat2, float **h_Mat3, int n)
{
int size = n * n * sizeof(float);
int i, j, k;
float *h_A, *h_B, *h_C;
float *d_A = NULL, *d_B = NULL, *d_C = NULL;
cudaError_t err = cudaSuccess;
h_A = (float *) malloc(size);
h_B = (float *) malloc(size);
h_C = (float *) malloc(size);
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
h_A[i * n + j] = h_Mat1[i][j];
h_B[i * n + j] = h_Mat2[i][j];
}
}
err = cudaMalloc((void **) &d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **) &d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **) &d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Launching CUDA mul kernel with (%d, %d, %d) blocks and (%d, %d, %d) threads per block.\n", (n + 15) / 16, (n + 15) / 16, 1, 16, 16, 1);
dim3 grid((n + 15) / 16, (n + 15) / 16, 1);
dim3 block(16, 16, 1);
mul<<<block, grid>>>(d_A, d_B, d_C, n);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch mul kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
h_Mat3[i][j] = 0;
for (k = 0; k < n; ++k)
{
h_Mat3[i][j] += h_Mat1[i][k] * h_Mat2[k][j];
}
if (fabs(h_C[i * n + j] - h_Mat3[i][j]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element (%d, %d)!\n", i, j);
exit(EXIT_FAILURE);
}
h_Mat3[i][j] = h_C[i * n + j];
}
}
printf("TEST PASSED\n");
return;
}
|
6,199 | #include "includes.h"
__global__ void rgbToGreyKernel(int height,int width ,unsigned char *input_img, unsigned char *output_img)
{
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
if(row<height && col<width)
{
int idx = row*width + col;
float red = (float)input_img[3*idx];
float green = (float)input_img[3*idx+1];
float blue = (float)input_img[3*idx+2];
output_img[idx] = 0.21*red + 0.71*green + 0.07*blue;
}
} |
6,200 | #include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
//Good Reference: http://developer.download.nvidia.com/compute/cuda/3_2_prod/toolkit/docs/CUDA_C_Programming_Guide.pdf
//Resource for multiply: https://github.com/sashasyedin/matrix-multiplication-with-cuda
#include <cstdlib>
#include <iostream>
#include <time.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
double Afull[12][12] = {{0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.9988, -0.0009, 0.0493, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 3.3325, 0.0000, -67.5899, 0.0000, -0.9998, -0.0175, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000, 67.5899, 0.0000, 0.0493, 0.0175, -0.9986, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000, -0.0009, 0.0494},
{0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.9998, 0.0175},
{0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, -0.0176, 1.0011},
{-0.0000, -0.0000, 0.0002, -1.7910, -28.7478, 0.0000, -0.0156, -0.0014, 0.0264, -1.0130, 1.1241, -0.4113},
{0.0000, 0.0000, 0.0001, 33.1424, -0.3921, -0.0000, 0.0079, -0.1015, 0.0165, -1.5512, -1.5106, -65.9068},
{-0.0000, -0.0000, -0.0033, -2.6711, 1.7629, -0.0000, -0.1352, 0.0043, -0.6177, 8.8359, 68.2588, 2.0707},
{0.0000, 0.0000, 0.0002, -0.7252, -0.6595, -0.0000, 0.0058, -0.0292, 0.0424, -7.1172, -1.6572, 0.1983},
{-0.0000, -0.0000, 0.0000, 0.0642, -0.7286, 0.0000, 0.0019, 0.0059, -0.0013, 0.0097, -1.5146, -0.0941},
{-0.0000, -0.0000, 0.0000, -0.8489, 0.0071, 0.0000, -0.0036, 0.0167, -0.0027, -0.1995, -0.0263, -0.6115}};
double Bfull[12][4] = {{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{-0.1577, -0.0046, 0.0529, 0.0601},
{0.0197, 0.0977, -0.0752, 0.0082},
{-0.1559, 0.0106, 0.0865, -0.8894},
{0.0307, 0.1185, -0.0356, 0.0067},
{0.0339, 0.0007, -0.0022, 0.0024},
{-0.0004, 0.0037, 0.0222, 0.0058}};
__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
__global__
void fill1D(int length, double matrix[], double num) {
for (int i = 0; i < length; i++) {
matrix[i] = num;
}
}
__global__
void fill2D(double* devPtr, size_t pitch, int width, int height) {
for (int r = 0; r < height; ++r) {
double * row = (double*)((char*)devPtr + r * pitch);
for (int c = 0; c < width; ++c) {
double element = row[c];
// printf("%f", row[c]);
}
}
}
__global__
void fill2D_specific(double* devPtr, size_t pitch, int width, int height) {
double Afull[12][12] = {{0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.9988, -0.0009, 0.0493, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 3.3325, 0.0000, -67.5899, 0.0000, -0.9998, -0.0175, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000, 67.5899, 0.0000, 0.0493, 0.0175, -0.9986, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000, -0.0009, 0.0494},
{0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.9998, 0.0175},
{0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, -0.0176, 1.0011},
{-0.0000, -0.0000, 0.0002, -1.7910, -28.7478, 0.0000, -0.0156, -0.0014, 0.0264, -1.0130, 1.1241, -0.4113},
{0.0000, 0.0000, 0.0001, 33.1424, -0.3921, -0.0000, 0.0079, -0.1015, 0.0165, -1.5512, -1.5106, -65.9068},
{-0.0000, -0.0000, -0.0033, -2.6711, 1.7629, -0.0000, -0.1352, 0.0043, -0.6177, 8.8359, 68.2588, 2.0707},
{0.0000, 0.0000, 0.0002, -0.7252, -0.6595, -0.0000, 0.0058, -0.0292, 0.0424, -7.1172, -1.6572, 0.1983},
{-0.0000, -0.0000, 0.0000, 0.0642, -0.7286, 0.0000, 0.0019, 0.0059, -0.0013, 0.0097, -1.5146, -0.0941},
{-0.0000, -0.0000, 0.0000, -0.8489, 0.0071, 0.0000, -0.0036, 0.0167, -0.0027, -0.1995, -0.0263, -0.6115}};
double Bfull[12][4] = {{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{-0.1577, -0.0046, 0.0529, 0.0601},
{0.0197, 0.0977, -0.0752, 0.0082},
{-0.1559, 0.0106, 0.0865, -0.8894},
{0.0307, 0.1185, -0.0356, 0.0067},
{0.0339, 0.0007, -0.0022, 0.0024},
{-0.0004, 0.0037, 0.0222, 0.0058}};
if (width == 12) {
for (int r = 0; r < height; ++r) {
double * row = (double*)((char*)devPtr + r * pitch);
for (int c = 0; c < width; ++c) {
// double element = row[c];
row[c] = Afull[r][c];
// printf("%f", row[c]);
}
}
} else {
for (int r = 0; r < height; ++r) {
double * row = (double*)((char*)devPtr + r * pitch);
for (int c = 0; c < width; ++c) {
// double element = row[c];
row[c] = Bfull[r][c];
// printf("%f", row[c]);
}
}
}
}
__global__ void fill3D(cudaPitchedPtr devPitchedPtr, int width, int height, int depth) {
char* devPtr = (char*)devPitchedPtr.ptr;
size_t pitch = devPitchedPtr.pitch;
size_t slicePitch = pitch * height;
for (int z = 0; z < depth; ++z) {
char* slice = devPtr + z * slicePitch;
for (int y = 0; y < height; ++y) {
double* row = (double*)(slice + y * pitch);
for (int x = 0; x < width; ++x) {
double element = row[x];
}
}
}
}
__global__
void fill3Dto2D(cudaPitchedPtr devPitchedPtr, int width, int height, int depth, double* devPtr2, double factor) {
char* devPtr = (char*)devPitchedPtr.ptr;
size_t pitch = devPitchedPtr.pitch;
size_t slicePitch = pitch * height;
for (int z = 0; z < depth; ++z) {
char* slice = devPtr + z * slicePitch;
for (int y = 0; y < height; ++y) {
double* row = (double*)(slice + y * pitch);
double * row_devPtr2 = (double*)((char*)devPtr2 + y * pitch);
for (int x = 0; x < width; ++x) {
// double element = row[x];
row_devPtr2[x] = factor * abs(row[x]);
}
}
}
}
__global__
void func1(cudaPitchedPtr devPitchedPtr, int width, int height, int depth, double* delPtr, double* ddelPtr, double factor) {
char* devPtr = (char*)devPitchedPtr.ptr;
size_t pitch = devPitchedPtr.pitch;
size_t slicePitch = pitch * height;
for (int z = 0; z < depth; ++z) {
char* slice = devPtr + z * slicePitch;
for (int y = 0; y < height; ++y) {
double* row = (double*)(slice + y * pitch);
double * delPtr_row = (double*)((char*)delPtr + y * pitch);
double * ddelPtr_row = (double*)((char*)ddelPtr + y * pitch);
for (int x = 0; x < width; ++x) {
// double element = row[x];
// factor is the random number
ddelPtr_row[x] = 2 * delPtr_row[x] * factor * row[x] - delPtr_row[x];
}
}
}
}
__global__
void func2(cudaPitchedPtr devPitchedPtr, int width, int height, int depth, double* delPtr, double* ddelPtr, double factor) {
char* devPtr = (char*)devPitchedPtr.ptr;
size_t pitch = devPitchedPtr.pitch;
size_t slicePitch = pitch * height;
for (int z = 0; z < depth; ++z) {
char* slice = devPtr + z * slicePitch;
for (int y = 0; y < height; ++y) {
double* row = (double*)(slice + y * pitch);
double * delPtr_row = (double*)((char*)delPtr + y * pitch);
double * ddelPtr_row = (double*)((char*)ddelPtr + y * pitch);
for (int x = 0; x < width; ++x) {
// double element = row[x];
// factor is the random number
delPtr_row[x] = row[x] + ddelPtr_row[x];
}
}
}
}
// Matrix multiplication functions
// c = a * b
// a: m x n
// b: n x k
// c: m x k
__global__ void mmult_kernel(int m, int n, int k, const double * a, const double * b, double * c)
{
int globx = blockIdx.x * blockDim.x + threadIdx.x;
int globy = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ int l;
for (l = 0; l < n; l++)
c[globx * k + globy] += a[globx * n + l] * b[l * k + globy];
}
void mmult_gpu(int m, int n, int k, const double * a, const double * b, double * c)
{
dim3 dim_Grid(m, k);
// dim3 dim_Block(BLOCK_SIZE,BLOCK_SIZE);
mmult_kernel<<<1, 13>>>(m, n, k, a, b, c);
}
// __device__
// void pvs_helper1(cudaPitchedPtr devPitchedPtr, int width, int height, int depth, double* Afull_ptr, double* Bfull_ptr, double* del_bi, double* del_pi, double* del_ci, int k, int l, int dt) {
// char* devPtr = (char*)devPitchedPtr.ptr;
// size_t pitch = devPitchedPtr.pitch;
// size_t slicePitch = pitch * height;
// for (int z = l; z < l + 1; ++z) {
// char* slice = devPtr + z * slicePitch;
// for (int y = 0; y < height; ++y) {
// double* row = (double*)(slice + y * pitch);
// for (int x = k; x < k + 1; ++x) {
// // double element = row[x];
// // states_p(:,k,l) = states_p(:,k-1,l) + dt*(Afull*states_p(:,k-1,l)+Bfull*0.5*([del_bi(1,k-1,l);del_ai(1,k-1,l);del_pi(1,k-1,l);del_ci(1,k-1,l)]+[del_bi(1,k,l);del_ai(1,k,l);del_pi(1,k,l);del_ci(1,k,l)]));
//
// // row[x] = row[x - 1] + dt *
//
// }
// }
// }
// }
__global__
void predicted_vehicle_state(cudaPitchedPtr states_p, cudaPitchedPtr out_states_p, double* del_bi, double* del_ai, double* del_pi, double* del_ci, double dt, int j, double* Afull_ptr, double* Bfull_ptr, double trim_val[]) {
double Afull[12][12] = {{0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.9988, -0.0009, 0.0493, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 3.3325, 0.0000, -67.5899, 0.0000, -0.9998, -0.0175, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000, 67.5899, 0.0000, 0.0493, 0.0175, -0.9986, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000, -0.0009, 0.0494},
{0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.9998, 0.0175},
{0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, -0.0176, 1.0011},
{-0.0000, -0.0000, 0.0002, -1.7910, -28.7478, 0.0000, -0.0156, -0.0014, 0.0264, -1.0130, 1.1241, -0.4113},
{0.0000, 0.0000, 0.0001, 33.1424, -0.3921, -0.0000, 0.0079, -0.1015, 0.0165, -1.5512, -1.5106, -65.9068},
{-0.0000, -0.0000, -0.0033, -2.6711, 1.7629, -0.0000, -0.1352, 0.0043, -0.6177, 8.8359, 68.2588, 2.0707},
{0.0000, 0.0000, 0.0002, -0.7252, -0.6595, -0.0000, 0.0058, -0.0292, 0.0424, -7.1172, -1.6572, 0.1983},
{-0.0000, -0.0000, 0.0000, 0.0642, -0.7286, 0.0000, 0.0019, 0.0059, -0.0013, 0.0097, -1.5146, -0.0941},
{-0.0000, -0.0000, 0.0000, -0.8489, 0.0071, 0.0000, -0.0036, 0.0167, -0.0027, -0.1995, -0.0263, -0.6115}};
double Bfull[12][4] = {{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{0.0000, 0.0000, 0.0000, 0.0000},
{-0.1577, -0.0046, 0.0529, 0.0601},
{0.0197, 0.0977, -0.0752, 0.0082},
{-0.1559, 0.0106, 0.0865, -0.8894},
{0.0307, 0.1185, -0.0356, 0.0067},
{0.0339, 0.0007, -0.0022, 0.0024},
{-0.0004, 0.0037, 0.0222, 0.0058}};
double u0 = trim_val[0];
double v0 = trim_val[1];
double w0 = trim_val[2];
double phi0 = trim_val[3];
double theta0 = trim_val[4];
double psi0 = trim_val[5];
char* states_p_ptr = (char*) states_p.ptr;
size_t states_p_pitch = states_p.pitch;
size_t states_p_slicePitch = states_p_pitch * 12;
char* out_states_p_ptr = (char*) out_states_p.ptr;
size_t out_states_p_pitch = out_states_p.pitch;
size_t out_states_p_slicePitch = out_states_p_pitch * 6;
//depth
for (int l = 0; l < 100; ++l) {
char* states_p_slice = states_p_ptr + l * states_p_slicePitch;
char* out_states_p_slice = out_states_p_ptr + l * out_states_p_slicePitch;
double* row1 = (double*)(states_p_slice + 0 * states_p_pitch);
double* row2 = (double*)(states_p_slice + 1 * states_p_pitch);
double* row3 = (double*)(states_p_slice + 2 * states_p_pitch);
double* row4 = (double*)(states_p_slice + 3 * states_p_pitch);
double* row5 = (double*)(states_p_slice + 4 * states_p_pitch);
double* row6 = (double*)(states_p_slice + 5 * states_p_pitch);
double* row7 = (double*)(states_p_slice + 6 * states_p_pitch);
double* row8 = (double*)(states_p_slice + 7 * states_p_pitch);
double* row9 = (double*)(states_p_slice + 8 * states_p_pitch);
double* row10 = (double*)(states_p_slice + 9 * states_p_pitch);
double* row11 = (double*)(states_p_slice + 10 * states_p_pitch);
double* row12 = (double*)(states_p_slice + 11 * states_p_pitch);
double* row_out1 = (double*)(out_states_p_slice + 0 * out_states_p_pitch);
double* row_out2 = (double*)(out_states_p_slice + 1 * out_states_p_pitch);
double* row_out3 = (double*)(out_states_p_slice + 2 * out_states_p_pitch);
double* row_out4 = (double*)(out_states_p_slice + 3 * out_states_p_pitch);
double* row_out5 = (double*)(out_states_p_slice + 4 * out_states_p_pitch);
double* row_out6 = (double*)(out_states_p_slice + 5 * out_states_p_pitch);
//height or row
for (int m = 0; m < 12; ++m) {
double* row = (double*)(states_p_slice + m * states_p_pitch);
// double* row_out = (double*)(out_states_p_slice + m * out_states_p_pitch);
double * row_del_bi = (double*)((char*)del_bi + 0 * states_p_pitch);
double * row_del_ai = (double*)((char*)del_ai + 0 * states_p_pitch);
double * row_del_pi = (double*)((char*)del_pi + 0 * states_p_pitch);
double * row_del_ci = (double*)((char*)del_ci + 0 * states_p_pitch);
//width or column
for (int k = j - 1; k < 400; ++k) {
//Afull * states_p[k -1]
double temp1 = 0;
for(int indx1 = 0; indx1 < 12; indx1++) {
temp1 += Afull[m][indx1] * row[k - 1];
}
double temp2 = 0;
temp2 = Bfull[m][0] * 0.5 * (row_del_bi[k - 1] + row_del_bi[k])
+ Bfull[m][1] * 0.5 * (row_del_ai[k - 1] + row_del_ai[k])
+ Bfull[m][2] * 0.5 * (row_del_pi[k - 1] + row_del_pi[k])
+ Bfull[m][3] * 0.5 * (row_del_ci[k - 1] + row_del_ci[k]);
row[k] = row[k - 1] + dt * (temp1 + temp2);
row_out1[k] = cos(theta0+row5[k])*cos(psi0+row6[k])*(u0+row7[k]) + (sin(phi0+row4[k])*sin(theta0+row5[k])*cos(psi0+row6[k])
- cos(phi0+row4[k])*sin(psi0+row6[k]))*(v0+row8[k]) + (cos(phi0+row4[k])*sin(theta0+row5[k])*cos(psi0+row6[k])
+ sin(phi0+row4[k])*sin(psi0+row6[k]))*(w0+row9[k]);
row_out2[k] = -(cos(theta0+row5[k])*sin(psi0+row6[k])*(u0+row7[k]) + (sin(phi0+row4[k])*sin(theta0+row5[k])*sin(psi0+row6[k])
+ cos(phi0+row4[k])*cos(psi0+row6[k]))*(v0+row8[k]) + (cos(phi0+row4[k])*sin(theta0+row5[k])*sin(psi0+row6[k])
- sin(phi0+row4[k])*cos(psi0+row6[k]))*(w0+row9[k]));
row_out3[k] = -(-sin(theta0+row5[k])*(u0+row7[k]) + sin(phi0+row4[k])*cos(theta0+row5[k])*(v0+row8[k])
+ cos(phi0+row4[k])*cos(theta0+row5[k])*(w0+row9[k]));
row_out4[k] = row1[k];
row_out5[k] = row2[k];
row_out6[k] = row3[k];
// break;
}
}
}
}
__global__
void helper1(cudaPitchedPtr Ji, cudaPitchedPtr out_states_p, double dt, int j, double* del_bi, double* del_ai,
double* del_pi, double* del_ci, double* X_optdel, double* U_opt, double* V_opt, double* Y_opt, double* W_opt, double* Z_opt,
int* n) {
// int n = 0; //Starts counter for trajectories with final position outside of specified band
double U0_opt = 67.5;
double V0_opt = 0;
double W0_opt = 0;
double X0_opt = -1100;
double Y0_opt = 550;
double Z0_opt = 100;
char* out_states_p_ptr = (char*) out_states_p.ptr;
size_t out_states_p_pitch = out_states_p.pitch;
size_t out_states_p_slicePitch = out_states_p_pitch * 6;
char* Ji_ptr = (char*) Ji.ptr;
size_t Ji_pitch = Ji.pitch;
size_t Ji_slicePitch = Ji_pitch * 6;
double band_u = 3;
double band_v = 3;
double band_w = 3;
double band_x = 5;
double band_y = 5;
double band_z = 5;
double us0 = 33.49;
//Calculates the performance index for each trajectory
//These should be fixed by Malloc
double pathcost[4][400][100];
double us[400];
double xeq[400];
double xseq[400];
double xs[400];
double vs[400];
double ys[400];
double ws[400];
double zs[400];
double F[400];
for (int l = 1; l < 100; l++) {
double pc[4] = {};
char* out_states_p_slice = out_states_p_ptr + l * out_states_p_slicePitch;
double* row_out1 = (double*)(out_states_p_slice + 0 * out_states_p_pitch);
double* row_out2 = (double*)(out_states_p_slice + 1 * out_states_p_pitch);
double* row_out3 = (double*)(out_states_p_slice + 2 * out_states_p_pitch);
double* row_out4 = (double*)(out_states_p_slice + 3 * out_states_p_pitch);
double* row_out5 = (double*)(out_states_p_slice + 4 * out_states_p_pitch);
double* row_out6 = (double*)(out_states_p_slice + 5 * out_states_p_pitch);
double * row_del_bi = (double*)((char*)del_bi + 0 * out_states_p_pitch);
double * row_del_ai = (double*)((char*)del_ai + 0 * out_states_p_pitch);
double * row_del_pi = (double*)((char*)del_pi + 0 * out_states_p_pitch);
double * row_del_ci = (double*)((char*)del_ci + 0 * out_states_p_pitch);
char* Ji_slice = Ji_ptr + l * Ji_slicePitch;
for (int k = j - 1; k < 400; k++) {
// pc(1)=pc(1)+0.5*dt*(0.025*del_bi(1,k,l)^2+0.25*((out_states_p(1,j,l)-(U0_opt+U_opt(j)))^2)+(1e-06)*((out_states_p(4,j,l)-X_optdel(j))^2)); %Calculates path cost for each trajectory
pc[0] = pc[0] + 0.5 * dt * (0.025 * row_del_bi[k] * row_del_bi[k] + 0.25 * ((row_out1[j - 1]) - (U0_opt + U_opt[j - 1])) * ((row_out1[j - 1]) - (U0_opt + U_opt[j - 1])) + (0.000001) *(row_out4[j - 1] - X_optdel[j - 1]) * (row_out4[j - 1] - X_optdel[j - 1]));
// pc(2)=pc(2)+0.5*dt*(0.025*del_ai(1,k,l)^2+0.0167*((out_states_p(2,j,l)-(V0_opt+V_opt(j)))^2)+(2.5e-05)*((out_states_p(5,j,l)-Y_opt(j))^2)); %Calculates path cost for each trajectory
pc[1] = pc[1] + 0.5 * dt * (0.025 * row_del_ai[k] * row_del_bi[k] + 0.0167 * ((row_out2[j - 1]) - (V0_opt + V_opt[j - 1])) * ((row_out1[j - 1]) - (V0_opt + V_opt[j - 1])) + (0.000025) *(row_out5[j - 1] - Y_opt[j - 1]) * (row_out5[j - 1] - Y_opt[j - 1]));
// pc(3)=pc(3)+0.5*dt*(0.05*del_pi(1,k,l)^2); %Calculates path cost for each trajectory
pc[2] = pc[2] + 0.5 * dt * (0.05 * row_del_pi[k] * row_del_pi[k]) * (0.05 * row_del_pi[k] * row_del_pi[k]);
// pc(4)=pc(4)+0.5*dt*(0.025*del_ci(1,k,l)^2+0.25*((out_states_p(4,j,l)-(W0_opt+W_opt(j)))^2)+0.0025*((out_states_p(6,j,l)-Z_opt(j))^2)); %Calculates path cost for each trajectory
pc[3] = pc[3] + 0.5 * dt * (0.025 * row_del_ci[k] * row_del_ci[k] + 0.25 * ((row_out4[j - 1]) - (W0_opt + W_opt[j - 1])) * ((row_out4[j - 1]) - (W0_opt + W_opt[j - 1])) + (0.00025) *(row_out6[j - 1] - Z_opt[j - 1]) * (row_out5[j - 1] - Z_opt[j - 1]));
}
pathcost[0][j - 1][l] = pc[0];
pathcost[1][j - 1][l] = pc[1];
pathcost[2][j - 1][l] = pc[2];
pathcost[3][j - 1][l] = pc[3];
if (abs(row_out1[399]-(us0+us[399]))>band_u && abs(row_out4[399] + xeq[399]+X0_opt-(xseq[399]+xs[399]))>band_x &&
abs(row_out2[399]-vs[399])>band_v && abs(row_out5[399]+Y0_opt-ys[399])>band_y &&
abs(row_out3[399]-ws[j - 1])>band_w && abs(row_out6[399]+Z0_opt-zs[j - 1])>band_z) {
for (int indx = 0; indx < 4; indx++) {
double* row_Ji = (double*)(Ji_slice + indx * Ji_pitch);
row_Ji[j - 1] = 0;
}
*n = *n + 1;
} else {
double* row_Ji1 = (double*)(Ji_slice + 0 * Ji_pitch);
double* row_Ji2 = (double*)(Ji_slice + 1 * Ji_pitch);
double* row_Ji3 = (double*)(Ji_slice + 2 * Ji_pitch);
double* row_Ji4 = (double*)(Ji_slice + 3 * Ji_pitch);
row_Ji1[j - 1] = pathcost[0][j - 1][l] + pathcost[1][j - 1][l] + pathcost[2][j - 1][l] + pathcost[3][j - 1][l] + F[j - 1] *
((row_out1[399] - (us0 + us[399])) * (row_out1[399] - (us0 + us[399]))
+ (row_out4[399] + xeq[399] + X0_opt - xseq[399] - xs[399]) * (row_out4[399] + xeq[399] + X0_opt - xseq[399] - xs[399]));
row_Ji2[j - 1] = pathcost[0][j - 1][l] + pathcost[1][j - 1][l] + pathcost[2][j - 1][l] + pathcost[3][j - 1][l] + F[j - 1] *
(row_out2[399] - vs[399]) * (row_out2[399] - vs[399]) +
(row_out5[399] + Y0_opt - ys[399]) * (row_out5[399] + Y0_opt - ys[399]);
row_Ji3[j - 1] = pathcost[0][j - 1][l] + pathcost[1][j - 1][l] + pathcost[2][j - 1][l] + pathcost[3][j - 1][l];
row_Ji4[j - 1] = pathcost[0][j - 1][l] + pathcost[1][j - 1][l] + pathcost[2][j - 1][l] + pathcost[3][j - 1][l] + F[j - 1] *
(row_out3[399] - ws[j - 1]) * (row_out3[399] - ws[j - 1]) +
(row_out6[399] + Z0_opt - zs[j - 1]) * (row_out6[399] + Z0_opt - zs[j - 1]);
}
}
}
int main(void)
{
clock_t begin = clock();
//variables
double rb_inc = 0.1;
double ra_inc = 0.1;
double rp_inc = 0.1;
double rc_inc = 0.1;
//some variables to add values
double *rb, *ra, *rp, *rc;
cudaMallocManaged(&rb, 400 * sizeof(double));
cudaMallocManaged(&ra, 400 * sizeof(double));
cudaMallocManaged(&rp, 400 * sizeof(double));
cudaMallocManaged(&rc, 400 * sizeof(double));
fill1D<<<1, 256>>>(400, rb, 0.1);
cudaDeviceSynchronize();
fill1D<<<1, 256>>>(400, ra, 0.1);
cudaDeviceSynchronize();
fill1D<<<1, 256>>>(400, rp, 0.1);
cudaDeviceSynchronize();
fill1D<<<1, 256>>>(400, rc, 0.1);
cudaDeviceSynchronize();
// Host code
//width is columns and height is rows
int height = 400, width = 100, depth = 4;
//1D stuff
double* X_optdel;
size_t optdel_size = 400 * sizeof(double);
cudaMalloc(&X_optdel, optdel_size);
double* U_opt;
cudaMalloc(&U_opt, optdel_size);
double* V_opt;
cudaMalloc(&V_opt, optdel_size);
double* Y_opt;
cudaMalloc(&Y_opt, optdel_size);
double* W_opt;
cudaMalloc(&W_opt, optdel_size);
double* Z_opt;
cudaMalloc(&Z_opt, optdel_size);
double* cycles;
cudaMalloc(&cycles, optdel_size);
double* np;
cudaMalloc(&np, optdel_size);
//2D stuff
size_t pitch;
double* delbmax;
cudaMallocPitch(&delbmax, &pitch, width * sizeof(double), height);
// fill2D<<<100, 400000>>>(delbmax, pitch, width, height);
double* delamax;
cudaMallocPitch(&delamax, &pitch, width * sizeof(double), height);
double* delpmax;
cudaMallocPitch(&delpmax, &pitch, width * sizeof(double), height);
double* delcmax;
cudaMallocPitch(&delcmax, &pitch, width * sizeof(double), height);
double* ddelb;
cudaMallocPitch(&ddelb, &pitch, width * sizeof(double), height);
double* ddela;
cudaMallocPitch(&ddela, &pitch, width * sizeof(double), height);
double* ddelp;
cudaMallocPitch(&ddelp, &pitch, width * sizeof(double), height);
double* ddelc;
cudaMallocPitch(&ddelc, &pitch, width * sizeof(double), height);
double* del_bi;
cudaMallocPitch(&del_bi, &pitch, width * sizeof(double), height);
double* del_ai;
cudaMallocPitch(&del_ai, &pitch, width * sizeof(double), height);
double* del_pi;
cudaMallocPitch(&del_pi, &pitch, width * sizeof(double), height);
double* del_ci;
cudaMallocPitch(&del_ci, &pitch, width * sizeof(double), height);
double* Afull_ptr;
cudaMallocPitch(&Afull_ptr, &pitch, 12 * sizeof(double), 12);
fill2D_specific<<<100, 40000>>>(Afull_ptr, pitch, 12, 12);
double* Bfull_ptr;
cudaMallocPitch(&Bfull_ptr, &pitch, 4 * sizeof(double), 12);
fill2D_specific<<<100, 40000>>>(Bfull_ptr, pitch, 4, 12);
//3D stuff
cudaExtent extent = make_cudaExtent(400 * sizeof(double), 4, 100);
cudaPitchedPtr del_con;
cudaMalloc3D(&del_con, extent);
fill3D<<<100, 40000>>>(del_con, 400, 4, 100);
cudaExtent extent2 = make_cudaExtent(400 * sizeof(double), 12, 100);
cudaPitchedPtr states_p;
cudaMalloc3D(&states_p, extent2);
fill3D<<<100, 40000>>>(states_p, 400, 12, 100);
cudaExtent extent3 = make_cudaExtent(400 * sizeof(double), 6, 100);
cudaPitchedPtr out_states_p;
cudaMalloc3D(&out_states_p, extent3);
fill3D<<<100, 40000>>>(out_states_p, 400, 6, 100);
cudaExtent extent4 = make_cudaExtent(400 * sizeof(double), 4, 100);
cudaPitchedPtr Ji;
cudaMalloc3D(&Ji, extent4);
fill3D<<<100, 40000>>>(Ji, 400, 4, 100);
//other stuff
double trim_val[] = {67.5077, -0.0585, 3.3319, -0.0175, 0.0493, 0};
for (int j = 2; j <= 400; j++) {
int n = 100; //Counter of trajectories with final x-position outside of specified band in relation to ship's landing position
int m = 1; //Counter for number of cycles in while loop
while (n > 90 && m < 10) {
fill3Dto2D<<<100, 40000>>>(del_con, width, height, 1, delbmax, rb[j - 1]);
fill3Dto2D<<<100, 40000>>>(del_con, width, height, 2, delamax, ra[j - 1]);
fill3Dto2D<<<100, 40000>>>(del_con, width, height, 3, delpmax, rp[j - 1]);
fill3Dto2D<<<100, 40000>>>(del_con, width, height, 4, delcmax, rc[j - 1]);
srand(time(0));
func1<<<100, 40000>>>(del_con, width, height, 1, delbmax, ddelb, (double)rand() / 32767.0);
srand(time(0));
func1<<<100, 40000>>>(del_con, width, height, 2, delamax, ddela, (double)rand() / 32767.0);
srand(time(0));
func1<<<100, 40000>>>(del_con, width, height, 3, delpmax, ddelp, (double)rand() / 32767.0);
srand(time(0));
func1<<<100, 40000>>>(del_con, width, height, 4, delcmax, ddelc, (double)rand() / 32767.0);
func2<<<100, 40000>>>(del_con, width, height, 4, del_bi, ddelb, 1.0);
func2<<<100, 40000>>>(del_con, width, height, 4, del_ai, ddela, 1.0);
func2<<<100, 40000>>>(del_con, width, height, 4, del_pi, ddelp, 1.0);
func2<<<100, 40000>>>(del_con, width, height, 4, del_ci, ddelc, 1.0);
//Predicted vehicle state -------------------
// void predicted_vehicle_state(double* states_p, cudaPitchedPtr out_states_p, double* del_bi, double* del_ai, double* del_pi, double* del_ci, double dt, int j, double* Afull_ptr, double* Bfull_ptr, double trim_val[])
double dt = 0.1;
predicted_vehicle_state<<<100, 40000>>>(states_p, out_states_p, del_bi, del_ai, del_pi, del_ci, dt, j, Afull_ptr, Bfull_ptr, trim_val);
helper1<<<100, 40000>>>(Ji, out_states_p, dt, j, del_bi, del_ai,
del_pi, del_ci, X_optdel, U_opt, V_opt, Y_opt, W_opt, Z_opt,
&n);
rb[j - 1] = rb[j - 1] + rb_inc;
ra[j - 1] = ra[j - 1] + ra_inc;
rp[j - 1] = rp[j - 1] + rp_inc;
rc[j - 1] = rc[j - 1] + rc_inc;
m++;
// break;
}
//Calculation of Cost is done here
// cycles[j - 1] = m;
// np[j - 1] = 100 - n;
}
cudaFree(U_opt);
cudaFree(V_opt);
cudaFree(W_opt);
cudaFree(X_optdel);
cudaFree(Y_opt);
cudaFree(Z_opt);
cudaFree(rb);
cudaFree(ra);
cudaFree(rp);
cudaFree(rc);
cudaFree(delbmax);
cudaFree(delamax);
cudaFree(delpmax);
cudaFree(delcmax);
cudaFree(ddelb);
cudaFree(ddela);
cudaFree(ddelp);
cudaFree(ddelc);
cudaFree(del_bi);
cudaFree(del_ai);
cudaFree(del_pi);
cudaFree(del_ci);
// cudaFree(del_con);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Path Integral took: %f seconds \n", time_spent);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.