serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
12,401 | #include "includes.h"
__global__ void TgvSolveEtaMaskedKernel(float* mask, float alpha0, float alpha1, float* atensor, float *btensor, float* ctensor, float* etau, float* etav1, float* etav2, int width, int height, int stride)
{
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((iy >= height) && (ix >= width)) return;
int pos = ix + iy * stride;
if (mask[pos] == 0.0f) return;
float a = atensor[pos];
float b = btensor[pos];
float c = ctensor[pos];
etau[pos] = (a*a + b * b + 2 * c*c + (a + c)*(a + c) + (b + c)*(b + c)) * (alpha1 * alpha1);
etav1[pos] = (alpha1 * alpha1)*(b * b + c * c) + 4 * alpha0 * alpha0;
etav2[pos] = (alpha1 * alpha1)*(a * a + c * c) + 4 * alpha0 * alpha0;
} |
12,402 | /*
*
* New York University
* GPUs Fall 2017
* Steven Adam & Michael Corso
* Quantifying the Relationship Between Occupancy and Performance
*
*
* Code Explanation:
*
* (1) The first function call is to initDeviceVars() which checks the system for CUDA devices and
* chooses the device with the highest Compute Capability (CC). Device-specific variables are then
* populated based on the CC and variables stored in the cudaDeviceProp struct. This information
* allows for grid and block dimensions to be constructed according to the specific device this code
* is running on.
*
* (2) The code then scales the kernel's parameters based on the device's specifications using the
* user-specified values provided in the program's arguments:
*
* (a) occupancyMethod:
*
* (0) Blocks per SM: Determines the maximum number of blocks assignable (IE [number of SMs] *
* [max blocks assignable to each SM]) and scales it based on the specified targetOccupancy.
* The number of threads per block is maxed.
*
* (1) Threads per Block: Determines the maximum number of threads assignable to a block (IE
* 1024 is common) and scales this based on the specified targetOccupancy. The number of blocks
* is equal to ([number of SMs] * [max blocks assignable to each SM]).
*
* (b) The work being performed by the threads is user-specified in the program's arguments:
*
* (0) doubleInt(): No memory accesses, simply multiples its thread id by 2
*
* (1) memoryBound(): Memory-bound vector addition, 3 memory accesses, 1 floating-point addition
* CGMA = 1/3
*
* (2) computeBound(): Compute-bound vector math, 3 memory accesses, 90 floating-point operations
* CGMA = 90/3 = 30
*
* (c) targetOccupancy: An integer value of 1 - 100 which specifies the percentage of the maximum
* occupancy for this test.
*
* (d) problemSize: An integer value which specifies the amount of work to be performed by the kernel
*
*
*/
#include <cuda.h>
#include <iostream>
#include <iomanip>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <algorithm>
#include <vector>
// DEBUG/TEST
#define TESTING false
#define PRINTTIME false
//
void howToUse();
// OCCUPANCY FUNCTIONS
double test_BlocksPerGrid();
double test_ThreadsPerBlock();
// GPU SPEC FUNCTIONS
void initDeviceVars();
void getGPU();
void getMaxBlocksPerSM();
void getMaxWarpsPerSM();
// GPU FUNCTIONS
__global__
void doubleInt (int, int);
__global__
void memoryBound (float *, float *, float *, int, int);
__global__
void computeBound (float *, float *, float *, int, int);
// TEST VARIABLES
int problemSize, occupancyMethod, functionToUse;
double targetOccupancy;
// DEVICE VARIABLES
char * deviceName;
int maxThreadsPerBlock,
maxThreadsPerSM, maxBlocksPerSM, maxWarpsPerSM, numSMs,
maxThreadsPerGrid,
compCapMajor, compCapMinor;
int main(int argc, char * argv[]) {
// HOW TO USE
if(argc != 5) howToUse();
// UPDATE DEVICE VARIABLES
initDeviceVars();
// OUTPUT DEVICE-SPECIFIC VALUES
if (TESTING) {
printf("\nGPU Info:\n\t%-15s %s\n\t%-15s %d.%d\n\t%-15s %d\n\t%-15s %d\n\t%-15s %d\n\t%-15s %d\n",
"Device ID", deviceName,
"Compute C.", compCapMajor, compCapMinor,
"Grid Size", maxThreadsPerGrid,
"Block Size", maxThreadsPerBlock,
"# SMs", numSMs,
"# Warps", maxWarpsPerSM
);
}
// GET USER-SPECIFIED VARIABLES
occupancyMethod = (int) atoi(argv[1]);
functionToUse = (int) atoi(argv[2]);
if (functionToUse > 2)
howToUse();
targetOccupancy = ((double) (atoi(argv[3]) / 100.0));
if (targetOccupancy > 1.0) targetOccupancy = 1.0;
if (targetOccupancy == 0.0) targetOccupancy = 0.01;
problemSize = (int) atoi(argv[4]);
// MAX BLOCKS THAT CAN RUN SIMULTANEOUSLY
if (occupancyMethod == 0) {
test_BlocksPerGrid();
}
// MAX THREADS PER BLOCK
else if (occupancyMethod == 1) {
test_ThreadsPerBlock();
}
else {
printf("\nNot an acceptable occupancyMethod!\n");
howToUse();
}
return 0;
}
// BLOCKS PER SM / TOTAL BLOCKS IN THE KERNEL (USES MAX NUMBER OF THREADS PER BLOCK)
double test_BlocksPerGrid() {
// NUMBER OF BLOCKS
int totalBlocks = ((numSMs * maxBlocksPerSM) * targetOccupancy);
if (totalBlocks < 1) totalBlocks = 1;
// ATTEMPT TO DISTRIBUTE THREADS EVENLY
int threadsPerBlock = (maxThreadsPerSM * (numSMs / totalBlocks));
while (threadsPerBlock % 32 != 0) threadsPerBlock -= 1;
if (threadsPerBlock > maxThreadsPerBlock) threadsPerBlock = maxThreadsPerBlock;
if (threadsPerBlock < 1) threadsPerBlock = 32;
// TOTAL NUMBER OF THREADS IN THE GRID
int totalThreads = totalBlocks * threadsPerBlock;
dim3 dimGrid(totalBlocks, 1, 1);
dim3 dimBlock(threadsPerBlock, 1, 1);
if (TESTING) printf("\ntest_MaxBlocksPerSM running with:\n\ttotalBlocks\t%d\t%d%%\n\tblockSize\t%d\t%.01f%%\n",
totalBlocks, ((int) (targetOccupancy * 100)), threadsPerBlock, (((float) threadsPerBlock / (float) maxThreadsPerBlock) * 100));
// ARRAYS FOR PERFORMING VECTOR MATH
float * in1 = (float *) calloc((problemSize), sizeof(float));
float * in2 = (float *) calloc((problemSize), sizeof(float));
float * out = (float *) calloc((problemSize), sizeof(float));
float * in1D; float * in2D; float * outD;
for (int i = 0; i < problemSize; i++) {
in1[i] = (i * 0.99);
in2[i] = ((problemSize - i - 1) * 0.99);
out[i] = -1;
}
cudaMalloc((void **) &in1D, problemSize);
cudaMemcpy(in1D, in1, problemSize, cudaMemcpyHostToDevice);
cudaMalloc((void **) &in2D, problemSize);
cudaMemcpy(in2D, in2, problemSize, cudaMemcpyHostToDevice);
cudaMalloc((void **) &outD, problemSize);
cudaMemcpy(outD, out, problemSize, cudaMemcpyHostToDevice);
// INITIALIZE TIMER BEFORE CALLING KERNEL
clock_t start = clock();
if (functionToUse == 0) {
doubleInt<<<dimGrid, dimBlock>>>(problemSize, totalThreads);
}
else if (functionToUse == 1) {
memoryBound<<<dimGrid, dimBlock>>>(in1D, in2D, outD, problemSize, totalThreads);
}
else if (functionToUse == 2) {
computeBound<<<dimGrid, dimBlock>>>(in1D, in2D, outD, problemSize, totalThreads);
}
// SYNC DEVICE AND GET TIME TAKEN
cudaDeviceSynchronize();
clock_t end = clock();
double time_taken = ((double)(end - start)) / CLOCKS_PER_SEC;
// CLEANUP
free(in1); free(in2); free(out);
cudaFree(in1D); cudaFree(in2D); cudaFree(outD);
return time_taken;
}
// THREADS PER BLOCK (USES MAX NUMBER OF BLOCKS)
double test_ThreadsPerBlock() {
// NUMBER OF THREADS PER BLOCK
int threadsPerBlock = (maxThreadsPerBlock * targetOccupancy);
while (threadsPerBlock % 32 != 0) threadsPerBlock -= 1;
if (threadsPerBlock < 1) threadsPerBlock = 1;
// NUMBER OF BLOCKS
int totalBlocks = (maxThreadsPerSM / threadsPerBlock) * numSMs;
if (totalBlocks < 1) totalBlocks = 1;
// TOTAL NUMBER OF THREADS IN THE GRID
int totalThreads = totalBlocks * threadsPerBlock;
dim3 dimGrid(totalBlocks, 1, 1);
dim3 dimBlock(threadsPerBlock, 1, 1);
if (TESTING) printf("\ntest_ThreadsPerBlock running with:\n\ttotalBlocks\t%d\t100%%\n\tblockSize\t%d\t%d%%\n",
totalBlocks, threadsPerBlock, ((int) (targetOccupancy * 100)));
// ARRAYS FOR PERFORMING VECTOR MATH
float * in1 = (float *) calloc((problemSize), sizeof(float));
float * in2 = (float *) calloc((problemSize), sizeof(float));
float * out = (float *) calloc((problemSize), sizeof(float));
float * in1D; float * in2D; float * outD;
for (int i = 0; i < problemSize; i++) {
in1[i] = (i * 0.99);
in2[i] = ((problemSize - i - 1) * 0.99);
out[i] = -1;
}
cudaMalloc((void **) &in1D, problemSize);
cudaMemcpy(in1D, in1, problemSize, cudaMemcpyHostToDevice);
cudaMalloc((void **) &in2D, problemSize);
cudaMemcpy(in2D, in2, problemSize, cudaMemcpyHostToDevice);
cudaMalloc((void **) &outD, problemSize);
cudaMemcpy(outD, out, problemSize, cudaMemcpyHostToDevice);
// INITIALIZE TIMER BEFORE CALLING KERNEL
clock_t start = clock();
if (functionToUse == 0) {
doubleInt<<<dimGrid, dimBlock>>>(problemSize, totalThreads);
}
else if (functionToUse == 1) {
memoryBound<<<dimGrid, dimBlock>>>(in1D, in2D, outD, problemSize, totalThreads);
}
else if (functionToUse == 2) {
computeBound<<<dimGrid, dimBlock>>>(in1D, in2D, outD, problemSize, totalThreads);
}
// SYNC DEVICE AND GET TIME TAKEN
cudaDeviceSynchronize();
clock_t end = clock();
double time_taken = ((double)(end - start)) / CLOCKS_PER_SEC;
// CLEANUP
free(in1); free(in2); free(out);
cudaFree(in1D); cudaFree(in2D); cudaFree(outD);
return time_taken;
}
// SIMPLE FP MULTIPLICATION, NO MEMORY ACCESS
__global__
void doubleInt (int N, int totalThreads) {
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
int val = id;
while (id < N) {
val = id;
val *= 2;
id += totalThreads;
}
}
// MEMORY BOUND VECTOR
__global__
void memoryBound (float * in1, float * in2, float * out, int N, int totalThreads) {
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
while (id < N) {
float t1 = in1[id];
float t2 = in2[id];
out[id] = t1 + t2;
id += totalThreads;
}
}
__global__
void computeBound (float * in1, float * in2, float * out, int N, int totalThreads) {
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
while (id < N) {
float t1 = in1[id];
float t2 = in2[id];
float tt = t1 + t2;
for (int i = 0; i < 10; i++)
tt *= 3.14 * 2.718 / .57721 - 4.6692 + 1.61803 * 131.7 - 530.1874 / 51.9;
out[id] = tt;
id += totalThreads;
}
}
// MAIN FUNCTION CALL TO GET DEVICE-SPECIFIC DATA
void initDeviceVars() {
getGPU();
getMaxBlocksPerSM();
getMaxWarpsPerSM();
}
// SET DEVICE WITH HIGHEST COMPUTE CAPABILITY
void getGPU() {
int dev_count, deviceToUse, maxCCmajor, maxCCminor;
dev_count = deviceToUse = maxCCmajor = maxCCminor = 0;
// GET NUMBER OF DEVICES
cudaDeviceProp dev_prop;
cudaGetDeviceCount(&dev_count);
if (dev_count < 1) {
printf("No CUDA enabled devices on this system!\n");
exit(1);
}
// WHICH DEVICE HAS HIGHEST COMPUTE CAPABILITY
for (int i = 0; i < dev_count; i++) {
cudaGetDeviceProperties(&dev_prop, i);
if ((dev_prop.major > maxCCmajor) || ((dev_prop.major == maxCCmajor) && (dev_prop.minor > maxCCminor))) {
deviceToUse = i;
maxCCmajor = dev_prop.major;
maxCCminor = dev_prop.minor;
}
}
// SET DEVICE/DEVICE-SPECIFIC VARIABLES
cudaGetDeviceProperties(&dev_prop, deviceToUse);
cudaSetDevice(deviceToUse);
deviceName = &dev_prop.name[0];
compCapMajor = maxCCmajor;
compCapMinor = maxCCminor;
maxThreadsPerGrid = dev_prop.maxGridSize[0];
numSMs = dev_prop.multiProcessorCount;
maxThreadsPerBlock = dev_prop.maxThreadsPerBlock;
}
// GET MAX NUMBER OF BLOCKS ASSIGNABLE TO AN SM
void getMaxBlocksPerSM() {
if (compCapMajor == 2) maxBlocksPerSM = 8;
else if (compCapMajor == 3) maxBlocksPerSM = 16;
else if ((compCapMajor == 5) || (compCapMajor == 6)) maxBlocksPerSM = 32;
else {
printf("\n No max blocks settings for Compute Capability %d.%d\n",
compCapMajor, compCapMinor);
exit(0);
}
}
// GET MAX NUMBER OF WARPS AND THREADS THAT CAN RUN ON AN SM
void getMaxWarpsPerSM() {
if (compCapMajor == 2) maxWarpsPerSM = 48;
else if ((compCapMajor == 3) || (compCapMajor == 5)) maxWarpsPerSM = 64;
else if (compCapMajor == 6) {
if (compCapMinor == 2) maxWarpsPerSM = 128;
else maxWarpsPerSM = 64;
}
else {
printf("\n No max warp settings for Compute Capability %d.%d\n",
compCapMajor, compCapMinor);
exit(0);
}
// ASSIGN MAX THREADS PER SM
maxThreadsPerSM = (maxWarpsPerSM * 32);
}
void howToUse() {
fprintf( stderr, "\nUsage: './occupancy [occupancyMethod] [functionToUse] [targetOccupancy] [problemSize]'");
fprintf( stderr, "\n\tOccupancy Method:\n\t\t0: %% of max blocks that can run simultaneously\n\t\t1: %% of max threads per block");
fprintf( stderr, "\n\tFunction to Use:\n\t\t0: doubleInt\n\t\t1: memoryBound\n\t\t2: computeBound");
fprintf( stderr, "\n\n\tIE: './occupancy 0 0 75 100000' runs the kernel with doubleInt() and 75%% of max blocks simultaneously assignable to all SMs and a problem size of 100,000");
exit( 1 );
}
|
12,403 | // Program to encapsulate Poisson Solver
#include <stdio.h>
#include <string>
#include <string.h>
#include <iostream>
using namespace std;
#define CST_ME 9.109e-31 // electron mass (kg)
#define CST_E 1.602e-19 // electron charge (C)
#define CST_KB 1.381e-23 // boltzmann constant (m^2 kg s^-2 K^-1)
#define CST_EPSILON 8.854e-12 // free space electric permittivity (s^2 C^2 m^-3 kg^-1)
#define CHARGE_DEP_BLOCK_DIM 512 //block dimension for particle2grid kernel
#define JACOBI_BLOCK_DIM 128 //block dimension for jacobi_iteration kernel
#define CN_SCAN_BLOCK_DIM 64 //block dimension for Crank-Nicolson scan kernel
#define CN_MAP_BLOCK_DIM 64 //block dimension for Crank-Nicolson map kernel
extern __shared__ double sh_mem[];
/******************************************************************************
* Helper function to check cuda errors
******************************************************************************/
void cu_check(cudaError_t cuError, const string file, const int line)
{
// function variables
// function body
if (0 == cuError)
{
return;
} else
{
cout << "CUDA error found in file " << file << " at line " << line << ". (error code: " << cuError << ")" << endl;
cout << "Exiting simulation" << endl;
exit(1);
}
}
/******************************************************************************
* Helper function to synchronize the threads
******************************************************************************/
void cu_sync_check(const string file, const int line)
{
// function variables
cudaError_t cuError;
// function body
cudaDeviceSynchronize();
cuError = cudaGetLastError();
if (0 == cuError)
{
return;
} else
{
cout << "CUDA error found in file " << file << " at line " << line << ". (error code: " << cuError << ")" << endl;
cout << "Exiting simulation" << endl;
exit(1);
}
}
/******************************************************************************
* Helper functions for the global static variables
******************************************************************************/
double init_ds(void)
{
// function variables
static double ds = 0.0;
// function body
if (ds == 0.0) ds = 0.1;
return ds;
}
/**********************************************************/
double init_Dl(void)
{
// function variables
static double Dl = 0.0;
// function body
if (Dl == 0.0) {
double ne = 1.0e9;
double Te = 1000.0;
Dl = sqrt(CST_EPSILON*CST_KB*Te/(ne*CST_E*CST_E));
}
return Dl;
}
/**********************************************************/
double init_epsilon0(void)
{
// function variables
double Te;
const double Dl = init_Dl();
static double epsilon0 = 0.0;
// function body
if (epsilon0 == 0.0) {
Te = 1000.0;
epsilon0 = CST_EPSILON; // SI units
epsilon0 /= pow(Dl*sqrt(CST_ME/(CST_KB*Te)),2); // time units
epsilon0 /= CST_E*CST_E; // charge units
epsilon0 *= Dl*Dl*Dl; // length units
epsilon0 *= CST_ME; // mass units
}
//Si epsilon es un double, tal y como están realizadas las operaciones, vale inf
//return epsilon0;
return 1;
}
/**********************************************************/
int init_nn(void)
{
// function variables
static int nn = 201;
// function body
return nn;
}
/******************************************************************************
*
* Jacobi iteration of the Jacobi method, full version that calculates the
* maximum error of the solution, in order to test convergence of the method
*
******************************************************************************/
__global__ void jacobi_iteration (int nn, double ds, double epsilon0, double *g_rho, double *g_phi, double *g_error)
{
/*----------------------------- function body -------------------------*/
// shared memory pointers
double *sh_old_phi= (double *) sh_mem;
double *sh_error = (double *) &sh_old_phi[JACOBI_BLOCK_DIM+2]; // manually set up shared memory
// registers
double new_phi, dummy_rho;
int tid = (int) threadIdx.x;
int sh_tid = (int) threadIdx.x + 1;
int g_tid = (int) (threadIdx.x + blockDim.x * blockIdx.x) + 1;
int bdim = (int) blockDim.x;
int bid = (int) blockIdx.x;
int gdim = (int) gridDim.x;
/*------------------------------ kernel body --------------------------*/
// load phi data from global to shared memory
if (g_tid < nn - 1) sh_old_phi[sh_tid] = g_phi[g_tid];
// load comunication zones, load the edges of the tile of data
if (bid < gdim-1) {
if (sh_tid == 1) sh_old_phi[sh_tid-1] = g_phi[g_tid-1];
if (sh_tid == bdim) sh_old_phi[sh_tid+1] = g_phi[g_tid+1];
} else {
if (sh_tid == 1) sh_old_phi[sh_tid-1] = g_phi[g_tid-1];
if (g_tid == nn-2) sh_old_phi[sh_tid+1] = g_phi[g_tid+1];
}
// load charge density data into registers
if (g_tid < nn - 1) {
dummy_rho = ds*ds*g_rho[g_tid]/epsilon0;
}
__syncthreads();
// actualize interior mesh points
if (g_tid < nn - 1) {
new_phi = 0.5*(dummy_rho + sh_old_phi[sh_tid-1] + sh_old_phi[sh_tid+1]);
// store new values of phi in global memory
g_phi[g_tid] = new_phi;
// evaluate local errors
sh_error[tid] = fabs(new_phi-sh_old_phi[sh_tid]);
}
__syncthreads();
// reduction for obtaining maximum error in current block
for (int stride = 1; stride < bdim; stride <<= 1) {
if ((tid%(stride*2) == 0) && (tid+stride < bdim) && (g_tid+stride < nn-1)) {
if (sh_error[tid]<sh_error[tid+stride]) sh_error[tid] = sh_error[tid+stride];
}
__syncthreads();
}
// store maximun error in global memory
if (tid == 0) g_error[bid] = sh_error[tid];
return;
}
/******************************************************************************
*
* Jacobi iteration of the Jacobi method, version that does not calculates
* maximum error
*
******************************************************************************/
__global__ void jacobi_iter_no_error (int nn, double ds, double epsilon0, double *g_rho, double *g_phi)
{
/*----------------------------- function body -------------------------*/
// shared memory pointers
double *sh_old_phi= (double *) sh_mem;
// registers
double new_phi, dummy_rho;
int sh_tid = (int) threadIdx.x + 1;
int g_tid = (int) (threadIdx.x + blockDim.x * blockIdx.x) + 1;
int bdim = (int) blockDim.x;
int bid = (int) blockIdx.x;
int gdim = (int) gridDim.x;
/*------------------------------ kernel body --------------------------*/
// load phi data from global to shared memory
if (g_tid < nn - 1) sh_old_phi[sh_tid] = g_phi[g_tid];
// load comunication zones, load the edges of the tile of data
if (bid < gdim-1) {
if (sh_tid == 1) sh_old_phi[sh_tid-1] = g_phi[g_tid-1];
if (sh_tid == bdim) sh_old_phi[sh_tid+1] = g_phi[g_tid+1];
} else {
if (sh_tid == 1) sh_old_phi[sh_tid-1] = g_phi[g_tid-1];
if (g_tid == nn-2) sh_old_phi[sh_tid+1] = g_phi[g_tid+1];
}
// load charge density data into registers
if (g_tid < nn - 1) {
dummy_rho = ds*ds*g_rho[g_tid]/epsilon0;
}
__syncthreads();
// actualize interior mesh points
if (g_tid < nn - 1) {
new_phi = 0.5*(dummy_rho + sh_old_phi[sh_tid-1] + sh_old_phi[sh_tid+1]);
// store new values of phi in global memory
g_phi[g_tid] = new_phi;
}
return;
}
/******************************************************************************
*
* This function solves Poisson's equation by means of the Jacobi method in the
* GPU. Based in the work by Antonio Tejero-del-Caz for his PhD. Jacobi method
* is iterative, and the error has to be calculated every iteration until it
* reaches an acceptable value.
*
* Checking error takes around 9 times longer than no checking, so we could
* check every 9 iterations or more.
*
* In practice, as there is a minimum number of iterations according to theory,
* the minimum number of iterations is always enough to obtain an acceptable
* error.
*
******************************************************************************/
void poisson_solver_jacobi(double max_error, double *d_rho, double *d_phi)
{
/*--------------------------- function variables -----------------------*/
// host memory pointers
static const double ds = init_ds(); // spatial step
static const int nn = init_nn(); // number of nodes
static const double epsilon0 = init_epsilon0(); // electric permitivity of free space
double *h_error;
double t_error = max_error*10;
// Jacobi method is iterative, here we save the min number of iteration according
// to theory so that the solution is valid. It turns out to be enough to have
// the minimum error that it can be obtained using this method, so that it always
// performs the same number of iterations
int min_iteration = 2*nn;
dim3 blockdim, griddim;
size_t sh_mem_size;
cudaError_t cuError;
// device memory pointers
double *d_error;
/*----------------------------- function body -------------------------*/
// set dimensions of grid of blocks and blocks of threads for jacobi kernel
blockdim = JACOBI_BLOCK_DIM;
griddim = (int) ((nn-2)/JACOBI_BLOCK_DIM) + 1;
// define size of shared memory for jacobi_iteration kernel
sh_mem_size = (2*JACOBI_BLOCK_DIM+2)*sizeof(double);
// allocate host and device memory for vector of errors
cuError = cudaMalloc((void **) &d_error, griddim.x*sizeof(double));
cu_check(cuError, __FILE__, __LINE__);
h_error = (double*) malloc(griddim.x*sizeof(double));
int iter_count = 0;
// execute jacobi iterations until solved
// no need to check error until minimum number of interations is reached
while(min_iteration>0) {
// execute all but one iteration without checking errors
cudaGetLastError();
jacobi_iter_no_error<<<griddim, blockdim, sizeof(double)>>>(nn, ds, epsilon0, d_rho, d_phi);
min_iteration--;
}
while (t_error>=max_error) {
// execute at least one iteration checking errors
cudaGetLastError();
jacobi_iteration<<<griddim, blockdim, sh_mem_size>>>(nn, ds, epsilon0, d_rho, d_phi, d_error);
cu_sync_check(__FILE__, __LINE__);
// copy error vector from device to host memory
cuError = cudaMemcpy(h_error, d_error, griddim.x*sizeof(double), cudaMemcpyDeviceToHost);
cu_check(cuError, __FILE__, __LINE__);
// evaluate max error of the iteration
t_error = 0;
for (int i = 0; i<griddim.x; i++)
{
if (h_error[i] > t_error) t_error = h_error[i];
}
iter_count++;
}
printf("iter_count = %i\n", iter_count);
// free device memory
cudaFree(d_error);
free(h_error);
return;
}
/*************************************************************************
*
* Here we find the modification to include the Crank-Nicholson method
* for Poisson's equation solver. These kernels are executed in the same
* order as they are declared.
*
* The Crank-Nocholson algorithm is basicly a exact method that performs
* two sums over the elements of the input array, with coefficients that
* depend on the system of differental equations. As Poisson's equation
* is the same in all the iterations, the method does not change. Moreover,
* as the system has certain periodicities, the coefficients can be
* calculated before programming time and introduced in the sums.
*
* The sums over many elements are performed using the scan algorithm,
* modified with the coefficients for the method. The synchronization
* threads can be performed in the same kernel, but the synchronazation
* between the blocks required ending the kernels and running the next.
*
*************************************************************************/
__global__ void cn_map_rho(double* d_temp1, double* d_rho, double ds, double epsilon0, int max_idx) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx<max_idx) {
d_temp1[idx] = -((double) idx+1)*ds*ds*d_rho[idx]/epsilon0;
}
}
/**********************************************************/
__global__ void cn_fw_part_scan(double *d_des, double *d_src, int max_idx) {
// We start the algorithm knowing that we will need to use more than one block
double *sh_src= (double *) sh_mem;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx<max_idx) {
sh_src[threadIdx.x] = d_src[idx];
__syncthreads();
// SCAN OVER SH_src:
// reduction over the elements on the left
for (int stride = 1; stride<blockDim.x; stride = stride*2) {
int k = threadIdx.x - stride;
if (k>=0) {
sh_src[threadIdx.x] += sh_src[k];
}
__syncthreads();
}
d_des[idx] = sh_src[threadIdx.x];
}
return;
}
/**********************************************************/
__global__ void cn_fw_glob_scan(double *d_des, double* d_src, int max_idx, double* d_phi) {
// It is necessary to synchronize all the blocks, which can only be done by
// ending the kernel and starting a new one
double *sh_acum = (double *) sh_mem;
double *sh_phi_0 = (double *) &sh_mem[1];
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < max_idx) {
// first thread saves the acum
if (threadIdx.x == 0) {
double r_acum = 0.0;
for (int i = blockDim.x-1; i<blockIdx.x*blockDim.x; i+=blockDim.x) {
r_acum += d_src[i];
}
sh_acum[0] = r_acum;
}
// first thread of the other blocks
if ((threadIdx.x == blockDim.x-1)||(idx==max_idx-1)) {
sh_phi_0[0] = d_phi[0];
}
}
__syncthreads();
if (idx < max_idx) {
//d_des[idx] = sh_acum[0] + d_src[idx]; // this line woould be a simple scan, no coefs
d_des[idx] = (sh_acum[0] + d_src[idx]- sh_phi_0[0])/((double) (idx+1)*(idx+2));
}
}
/**********************************************************/
__global__ void cn_bw_part_scan(double *d_des, double *d_src, int max_idx) {
// block kernel, d_des can be equal to d_src
double *sh_src= (double *) sh_mem;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx<max_idx) {
sh_src[threadIdx.x] = d_src[idx];
__syncthreads();
// SCAN over SH_src inverted:
// reduction over the elements on the left
for (int stride = 1; stride<blockDim.x; stride = stride*2) {
int k = threadIdx.x + stride;
// it is necessary to check if it is the last block
if (blockIdx.x ==gridDim.x - 1) {
if (k<max_idx%blockDim.x) {
sh_src[threadIdx.x] += sh_src[k];
}
} else {
if (k<blockDim.x) {
sh_src[threadIdx.x] += sh_src[k];
}
}
__syncthreads();
}
d_des[idx] = sh_src[threadIdx.x];
}
}
/**********************************************************/
__global__ void cn_bw_glob_scan(double *d_des, double* d_src, int max_idx, double* d_phi_L) {
// It is necessary to synchronize all the blocks, which can only be done by
// ending the kernel and starting a new one
// global kernel, d_des cannot be the same as d_src
double *sh_acum= (double *) sh_mem;
double *sh_phi_L = (double *) &sh_mem[1];
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx<max_idx) {
if (threadIdx.x == 0) {
double r_acum = 0.0;
for (int i = (blockIdx.x+1)*blockDim.x; i<max_idx; i+=blockDim.x) {
r_acum += d_src[i];
}
sh_acum[0] = r_acum;
}
if ((threadIdx.x == blockDim.x-1)||(idx==max_idx-1)) {
sh_phi_L[0] = d_phi_L[0]/((double) max_idx+1); // max_idx+1 == nn-1
}
__syncthreads();
d_des[idx] = (sh_phi_L[0]-sh_acum[0]-d_src[idx])*((double) idx+1);
}
}
/**********************************************************/
void poisson_solver_cn(double max_error, double *d_rho, double *d_phi) {
// It is necessary to synchronize all the blocks, which can only be done by
// ending the kernel and starting a new one
// max_error is conserved for compatibility with poisson_solver_jacobi
// In PIC code, just change the library, it has the same function poisson_solver
// global variables in host
int nn = init_nn();
double epsilon0 = init_epsilon0();
double ds = init_ds();
cudaError_t cuError;
// Allocate device memory
double* d_temp1;
cuError = cudaMalloc ((void **) &d_temp1, (nn-2)*sizeof(double));
cu_check(cuError, __FILE__, __LINE__);
double* d_temp2;
cuError = cudaMalloc ((void **) &d_temp2, (nn-2)*sizeof(double));
cu_check(cuError, __FILE__, __LINE__);
// Size of grids for each part of the algorithm
int map_blocks_per_grid = (nn+CN_MAP_BLOCK_DIM-1)/CN_MAP_BLOCK_DIM;
int scan_blocks_per_grid = (nn-2+CN_SCAN_BLOCK_DIM-1)/CN_SCAN_BLOCK_DIM;
// Shared mem for scan part
size_t sh_mem_size;
sh_mem_size = (CN_SCAN_BLOCK_DIM)*sizeof(double);
// Obtain rho_1, stored in d_temp1
cn_map_rho<<<map_blocks_per_grid, CN_MAP_BLOCK_DIM>>>(d_temp1, &d_rho[1], ds, epsilon0, nn-2);
// Obtain D_2, stored in d_temp1. Modified to substract d_phi[0], even if it is not part of scan
cn_fw_part_scan<<<scan_blocks_per_grid, CN_SCAN_BLOCK_DIM, sh_mem_size>>>(d_temp2, d_temp1, nn-2);
cn_fw_glob_scan<<<scan_blocks_per_grid, CN_SCAN_BLOCK_DIM, 2*sizeof(double)>>>(d_temp1, d_temp2, nn-2, d_phi);
// Obtain d_phi. Modified to add d_phi[nn-1]
cn_bw_part_scan<<<scan_blocks_per_grid, CN_SCAN_BLOCK_DIM, sh_mem_size>>>(d_temp2, d_temp1, nn-2);
cn_bw_glob_scan<<<scan_blocks_per_grid, CN_SCAN_BLOCK_DIM, 2*sizeof(double)>>>(&d_phi[1], d_temp2, nn-2, &d_phi[nn-1]);
cudaFree(d_temp1);
cudaFree(d_temp2);
}
/******************************************************************************
* main
******************************************************************************/
int main(int argc, char** argv) {
printf("Initiating Poisson Solver Capsule to compare different algorithms\n\n");
cudaDeviceReset();
int nn = init_nn();
int nc = nn-1;
double max_error = 1.0e-4;
cudaError_t cuError;
// Initialize the memories
double* h_rho = (double*) malloc(nn*sizeof(double));
double* h_phi = (double*) malloc(nn*sizeof(double));
double* d_rho;
double* d_phi;
cuError = cudaMalloc ((void **) &d_rho, nn*sizeof(double));
cu_check(cuError, __FILE__, __LINE__);
cuError = cudaMalloc ((void **) &d_phi, nn*sizeof(double));
cu_check(cuError, __FILE__, __LINE__);
double phi_0 = 0.5;
double phi_L = 2.0;
double x_0 = 0.0;
double ds = init_ds();
double x_L = nn*ds;
// Initialize h_rho anyway, no influence in the calculation time (Care with overflows)
for (int i=0; i<nn; i++) {
h_rho[i] = 1.0;
h_phi[i] = phi_0 + ((double) i)*(phi_L - phi_0)/((double) nc);
}
// Copy to device
cuError = cudaMemcpy(d_rho, h_rho, nn*sizeof(double), cudaMemcpyHostToDevice);
cu_check(cuError, __FILE__, __LINE__);
cuError = cudaMemcpy(d_phi, h_phi, nn*sizeof(double), cudaMemcpyHostToDevice);
cu_check(cuError, __FILE__, __LINE__);
// RUNS PoissonSolver once, uncomment to run
poisson_solver_cn(max_error, d_rho, d_phi);
// RUNS PoissonSolver many times, uncomment to run
// for (int i=0;i<100000;i++) poisson_solver_cn(max_error, d_rho, d_phi);
// RUNS PoissonSolver many times, uncomment to run
// for (int i=0;i<1000;i++) poisson_solver_jacobi(max_error, d_rho, d_phi);
// FROM WHERE we obtain the following measures
// 1e6 repetitions of poisson_solver_cn takes 108 seconds
// => each repetition takes 0.1ms (as expected)
// 1000 repetitions of jacobi with error calculation every iteration (original)
// take 37 seconds
// => each repetition takes 37 ms
// 10000 iteraciones de jacobi poisson_solver, modified for no error checking
// take 42 segundos
// => cada una 4.2 ms, great improvement!!
// CONCLUSION: Jacobi method without error checking is 9 times faster
// Crank-Nicholson method is 370 times faster (programming time is worth the effort)
// Bring back d_phi to host
cuError = cudaMemcpy(h_phi, d_phi, nn*sizeof(double), cudaMemcpyDeviceToHost);
cu_check(cuError, __FILE__, __LINE__);
cuError = cudaMemcpy(h_rho, d_rho, nn*sizeof(double), cudaMemcpyDeviceToHost);
cu_check(cuError, __FILE__, __LINE__);
for (int i=0; i<nn; i++) {
printf("%g\t%g\t%g\n",x_0 + ((double) i)*(x_L-x_0)/((double) nc), h_rho[i], h_phi[i]);
}
// Free memory
free(h_rho);
free(h_phi);
cudaFree (d_rho);
cudaFree (d_phi);
}
|
12,404 | /* MAD Filter on GPU
Version 1.0
Runs on single bin size
Input: filename
Number of samples to filter
Bin size
Threshold (multiple of sigma)
Option for filtering
Header size in bytes
Basic version using histogram method for median. 24/01/13
Replace with random numbers. Added 28/01/13
Finding mean and rms before and after filtering. Added 02/02/13
Copy back only flags file (bool)
Compile it with following line:
nvcc -Xptxas="-v" -o madfilter_small madfilter_small.cu -arch=sm_20
(Rohini Joshi, 2013 - rmjoshi.06@gmail.com)
*/
/* Modified the code to work with 8-bit unsigned data
Kaustubh Rajwade (Sept 2017 - Manchester)*/
#include<cuda.h>
#include<curand.h> // random num generation in cuda
#include<curand_kernel.h> // random num generation in cuda
#include<stdio.h>
#include<sys/time.h>
#include<iostream>
#include<math.h>
#include<string.h>
using std::cerr;
using std::cout;
using std::endl;
// Setting up device
bool SetGPU()
{
int devicesCount;
cudaGetDeviceCount(&devicesCount);
char desiredDeviceName[1024];
strcpy(desiredDeviceName,"GeForce GTX 1080 Ti");
for(int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex)
{
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, deviceIndex);
if (deviceProperties.name == desiredDeviceName)
{
cudaSetDevice(1);
return true;
}
}
return false;
}
// __device__ --> is a dev fn to be run on GRID and can be called only from kernel or device fn
__device__ float randomnumber(int t, int i){
curandState s;
float x;
// curand_init() sets up an initial state s. with seed t(thread id) and sequence number 0 and offset i
// Each bin is filtered with a separate thread. Thus normal distribution of random numbers is preserved within a bin
curand_init(t, 0, i, &s); // t is a seed, i is offset in seq of random numbers
// Generate random number from normal distribution
x = curand_normal(&s);
return x;
}
__global__ void madfilter( int *d_data, int binsize, int bins, int op, float *dev, int *not_flagged_data, bool *d_flag, float *d_rms_b, float *d_rms_a, float *d_mad, int mult, time_t currTime){
// {0} initialised the whole array. blockDim = number of threads/block=32, tid indexes all threads in the grid
// everything below runs for each thread through threadIdx.x and blockIdx.x
int i, j=0, c=0,d,flag=0,odd=0,sum=0, sumsq=0,histdev[256] = {0},hist[256] = {0}, tid = threadIdx.x + blockIdx.x * blockDim.x;
int lw = tid * (binsize); // the index in the original data array for each bin beginning
int up = lw + (binsize); // same end
float mean, med, mad, thresh;
// variable j is to store effective size of bin (after flagging extremities)
if (tid < bins){ // end crap is not accessed
/* Flagging and generating histogram */
for ( i=lw; i<up; i++){
sum += d_data[i];
sumsq += d_data[i]*d_data[i];
// Flag extremities
if((d_data[i]==0) || (d_data[i] == 255)){
continue;
}else{
hist[d_data[i]] += 1;
not_flagged_data[lw+j] = d_data[i];
j+=1;
}
}
/* Find RMS before filtering */
mean = sum/(binsize);
d_rms_b[tid] = sqrtf( sumsq/(binsize) - mean*mean );
sum = 0;sumsq = 0;
/* Find median. Two methods for even/odd sizes. Modify if data is 4 bit
flag = 1/0 if median is floating point/int
odd = 1/0 if data set is odd/even
median can only be float if data set is even */
if (j%2 == 0){
d = j/2;
for ( i=0; i<(256); i++){
c = c + hist[i];
if (c==d){
med =(float)( (2*(i) + 1)*0.5 );
flag = 1;
break;
}else if (c>d){
med = i;
break;
}else
continue;
}
}else{
d = (j + 1)/2;
odd = 1;
c = 0;
for ( i=0; i<(256); i++){
c = c + hist[i];
if (c >= d){
med = i;
break;
}
}
}
// MAD
int s = 0, ii;
if (flag == 0){
for ( i=lw; i<lw+j; i++){
dev[i] = fabs( not_flagged_data[i] - med );
ii = (int)(ceil(dev[i]));
histdev[ii] += 1;
}
/* two submethods for even/odd data sets */
if (odd == 0){
for ( i=0; i<(256); i++){
s = s+histdev[i];
if (s == d){
mad = (float)( (2*(i) + 1)*0.5 );
break;
}else if (s > d ){
mad = i;
break;
}else
continue;
}
}else{
for ( i=0; i<(256); i++){
s = s + histdev[i];
if(s >= d){
mad = i;
break;
}
}
}
}else{
int p;
for ( i=lw; i<lw+j; i++){
dev[i] = (float)fabs( not_flagged_data[i] - med );
p = (int) dev[i];
histdev[p] += 1;
}
int s = 0;
d = j/2;
for ( i=0; i<(256); i++){
s = s+histdev[i];
if (s == d){
mad = (float)( (2*(i) + 1)*0.5 + 0.5 );
break;
}else if (s > d){
mad = (float)( i + 0.5 );
break;
}else
continue;
}
}
d_mad[tid] = mad;
thresh = mult*1.4826*mad;
//filtering
// thresh = mult*1.48*mad
// if abs(d-med) > thresh ---> flag
for( i=lw; i<up; i++){
if ( (fabsf(d_data[i]-med) > thresh) || (d_data[i] == 0) || (d_data[i] == 255) ){
if(op == 0){
d_data[i] = 0;
}else if(op == 1){
d_data[i] = med;
}else if(op == 2){
d_data[i] = rint(mean + 1.4826*mad*randomnumber(currTime, i-lw));
}else if(op == 1){
d_data[i] = thresh;
}d_flag[i] = 0;
}
else{
d_flag[i] = 1;
}
sum += d_data[i];
sumsq += d_data[i]*d_data[i];
}
/* Find RMS after filtering */
mean = sum/(binsize);
d_rms_a[tid] = sqrtf( sumsq/(binsize) - mean*mean );
/*if(*op == 0){
printf("replace with zero\n");
}else if(*op == 1){
printf("replace with median\n");
}else if(*op == 2){
printf("replace with random number\n");
}
*/
}
}
int main(int argc, char *argv[]){
int i, k, mult_thresh, size, bsize, bins, headersize;
int *h_data, *d_data, *not_flagged_data, op_int, num,ex;
float *h_rms_b, *h_rms_a, *d_rms_b, *d_rms_a, *h_mad, *d_mad, *dev;
double time1, time2;
FILE *fp;
char *fname, *ffname,*op;
struct timeval tim;
float time_initial_host, time_initial_dev, time_kernel, time_copyback;
bool *h_flag, *d_flag;
//Define and create CUDA events start and stop for timing GPU activity
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Parse input arguments
// filename
fname = argv[1];
// Size of data to filter from the input file
size = atoi( argv[2] );
// Bin size to use
bsize = atoi( argv[3] );
// Multiple of MAD to use as threshold
mult_thresh = atoi( argv[4] );
// Option to use for filtering (what to replace RFI with)
op = argv[5];
// Header size of filterbank file
headersize = atoi(argv[6]);
if (argc <= 5 ){
system("./help_small.sh");
exit(0);
}
SetGPU();
// Number of whole bins that can be filtered in the dataset
bins = (int)size/bsize;
// size is now made a multiple of the bin size
size = bins*bsize;
gettimeofday(&tim, NULL);
time1 = tim.tv_sec + (tim.tv_usec/1000000.0);
/* Allocate and store input on host */
h_data = (int *)malloc(size*sizeof(int)); // actual data - will be read from SHM
h_rms_b = (float *)malloc(bins*sizeof(float)); // RMS before filtering for each bin - for checking
h_rms_a = (float *)malloc(bins*sizeof(float)); // RMS after filtering for each bin - for checking
h_mad = (float *)malloc(bins*sizeof(float)); // MAD value for each bin - for checking
h_flag = (bool *)malloc(size*sizeof(bool)); // Flags
ffname = (char *)malloc(256*sizeof(char)); // New file name in which filtered data will be written out - will become SHM
sprintf(ffname, "%s_filtered", fname);
// Store data in host memory from file --- will change to reading from SHM continuously
fp = fopen(fname, "r");
if (fp == NULL){
printf("Error in opening input file\n");
}
// Skipping the header
fseek(fp,headersize,SEEK_SET);
// Reading data as 4 byte integers
for(i = 0; i < size/4; i++) {
fread((void*) &num,sizeof(int), 1, fp);
// Bit shift operation to parse 8 bit numbers
for (k = 0; k < 4; k++) {
ex = pow(256, k);
h_data[4*i+k] = (num & (255 * ex)) >> 8 * k;
}
}
fclose(fp);
// As strcmp cannot be used in a kernel, convert the filtering option from char to integer here itself
if(!strcmp(op, "-z")){
op_int=0;
}else if(!strcmp(op, "-m")){
op_int=1;
}else if(!strcmp(op, "-r")){
op_int=2;
}else if(!strcmp(op, "-c")){
op_int=3;
}
gettimeofday(&tim, NULL);
time2 = tim.tv_sec + (tim.tv_usec/1000000.0);
time_initial_host = time2 - time1;
/* Allocate i/o and store input on device */
cudaEventRecord( start, 0 ); // Start CUDA timer
cudaMalloc( (void **)&d_data, size*sizeof(int) );
cudaMalloc( (void **)&d_rms_b, bins*sizeof(float) ); // dont need this
cudaMalloc( (void **)&d_rms_a, bins*sizeof(float) ); // dont need this
cudaMalloc( (void **)&d_mad, bins*sizeof(float) );
cudaMalloc( (void **)&dev, bins*sizeof(float)*bsize );
cudaMalloc( (void **)¬_flagged_data, bins*sizeof(int)*bsize );
cudaMalloc( (void **)&d_flag, size*sizeof(bool) ); // actual flags, can be bool
cudaMemcpy( d_data, h_data, size*sizeof(int), cudaMemcpyHostToDevice );
cudaEventRecord( stop, 0 ); // Stop and store time elapsed
cudaEventSynchronize(stop);
cudaEventElapsedTime( &time_initial_dev, start, stop);
/* Setup grid and run kernel */
int blocks, threads = 32;
blocks = (bins + threads - 1)/threads; // Mathematically equivalent to a ceil(bins/threads) = number of blocks so that 1 thread/bin
printf("Grid dim [%d 1] Block dim [%d 1]\n", blocks, threads);
cudaEventRecord(start, 0); // to start timing
// Seeding using current time
time_t currTime = time(NULL);
// send # of blocks and threads to the cuda kernel. dev is y-median(y), d_flag is array of bools, mult_thresh=3
// is asynchronous => comes back to cpu even before finishing
madfilter<<<blocks, threads>>>( d_data, bsize, bins, op_int, dev, not_flagged_data, d_flag, d_rms_b, d_rms_a, d_mad, mult_thresh, currTime);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop); // makes sure gpu is done, and is part of the timing module. but can synchronise in other ways instead
//if not insterested in timing
cudaEventElapsedTime( &time_kernel, start, stop);
printf("Number of Bins = %d\n", bins);
printf("Time for executing kernel = %f msec\n", time_kernel);
/* Copy data back to host */
cudaEventRecord(start, 0);
cudaMemcpy( h_flag, d_flag, size*sizeof(bool), cudaMemcpyDeviceToHost );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &time_copyback, start, stop);
cudaMemcpy( h_mad, d_mad, bins*sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( h_data, d_data, size*sizeof(int), cudaMemcpyDeviceToHost );
cudaMemcpy( h_rms_b, d_rms_b, bins*sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( h_rms_a, d_rms_a, bins*sizeof(float), cudaMemcpyDeviceToHost );
// Free memory on the device
cudaFree(d_data);
cudaFree(d_mad);
cudaFree(d_rms_b);
cudaFree(d_rms_a);
cudaFree(dev);
cudaFree(not_flagged_data);
cudaFree(d_flag);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cerr << "Time to copyback = " << time_copyback << " ms" << endl;
cerr << "Total time = " << time_copyback + time_kernel << " ms" << endl;
// Write out to file
fp = fopen("mad.dat", "w");
if (fp == NULL){
printf("Error in opening output file\n");
}
for(i=0; i<bins; i++){
fprintf(fp, "%f\t%f\t%f\n", h_rms_b[i], h_rms_a[i], h_mad[i]);
}
fclose(fp);
fp = fopen(ffname, "wb");
if (fp == NULL){
printf("Error in opening output file\n");
}
// Changing int to unsigned char
unsigned char* h_data_u;
h_data_u=(unsigned char *)malloc(sizeof(unsigned char)*size);
for (i=0;i<size;i++){
h_data_u[i] = (h_data[i] & (255));
}
// Writing out the filtered file
fwrite(h_data_u, sizeof(unsigned char), size, fp);
// Free all arrays on the host
free(h_data_u);
free(h_data);
free(h_rms_a);
free(h_rms_b);
free(h_flag);
free(h_mad);
free(ffname);
//Closing file
fclose(fp);
printf("Data copied back to host\n");
return(0);
}
|
12,405 | #include<stdio.h>
#include<cuda.h>
#define BLOCKSIZE 1024
__global__ void initialize(unsigned* matrix, unsigned N) {
unsigned id = threadIdx.x * blockDim.y + threadIdx.y;
matrix[id] = id;
}
__global__ void square_v1(unsigned* matrix, unsigned* result, unsigned N) {
}
int main(int nn, char *str[]) {
unsigned N = atoi(str[1]);
unsigned *hmatrix, *matrix;
dim3 block(N, N, 1);
cudaMalloc(&matrix, N*N*sizeof(unsigned));
hmatrix = (unsigned*)malloc(N*N*sizeof(unsigned));
// unsigned nblocks = ceil((float)N/BLOCKSIZE);
// printf("nblocks = %d\n", nblocks);
initialize<<<1, block>>>(matrix, N);
cudaMemcpy(hmatrix, matrix, N*N*sizeof(unsigned), cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++) {
for(int j=0; j<N; j++) {
printf("%4d ", hmatrix[i*N + j]);
}
printf("\n");
}
return 0;
}
|
12,406 | /*
Erick Juarez
CPSC 479 Sec 1
HOMEWORK 5 - 3/25/20
tested using nvcc - CUDA compiler driver release 9.0, V9.0.176
*/
#include <stdio.h>
#include <cuda.h>
#define P1 16 // Size of array for problem 1
#define P2 1024 // Size of array for problems 2 & 3
#define P4 8000 // Size of array for problem 4
// device function to initialize array - used in problems 1 & 2
__global__ void initialize(int *array){
array[threadIdx.x] = 0;
}
// device function will add values to array - used in problems 3 & 4
__global__ void add(int *array){
array[threadIdx.x] += threadIdx.x;
}
// Main function
int main(int argc, char * argv[]) {
int *host_array; // host copy of array
int *dev_array; // device copy of array
int byte_size; // size in bytes of an object
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// Problem 1 - initialize array of size 16 to all zeroes
byte_size = P1 * sizeof(int); // get size of 16 integers in bytes
dim3 blockd(P1); // Create dim3 type with value of 16 on the first dimension
cudaMalloc((void **) &dev_array, byte_size); // Allocate memory on the device
initialize<<<1, blockd>>>(dev_array); // Launch new kernel on device with 16 threads
host_array = (int *)malloc(byte_size); // Allocate memory for the host
cudaMemcpy(host_array, dev_array, byte_size, cudaMemcpyDeviceToHost); // Copy from device to host
// Verify that array was allocated and copied properly to the host, then cleanup
try {
for(int c = 0; c < P1; c++){
if(host_array[c] != 0) {throw "NON_ZERO_ELEM";}
}
} catch (...) {
printf("Problem 1 - Elements not initialized properly!\n");
cudaFree(dev_array);
free(host_array);
return 0;
}
printf("Problem 1 - Successfully initialized array with %d elements\n", P1);
cudaFree(dev_array);
free(host_array);
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// Problem 2 - initialize array of size 1024 to all zeroes
byte_size = P2 * sizeof(int); // get size of 1024 integers in bytes
dim3 bd(P2); // create dim3 object with 1024 as the first dimensoin
cudaMalloc((void**) &dev_array, byte_size); // Allocate memory on device
initialize<<<1, bd>>>(dev_array); // Launch kernel on device with 1024 threads
host_array = (int*)malloc(byte_size); // Allocate memory on the host
cudaMemcpy(host_array, dev_array, byte_size, cudaMemcpyDeviceToHost); // Copy from device to host
// Verify that array was allocated and copied properly, then cleanup
try {
for(int c = 0; c < P2; c++){
if(host_array[c] != 0) {throw "NON_ZERO_ELEM";}
}
} catch (...) {
printf("Problem 2 - Elements not initialized properly!\n");
cudaFree(dev_array);
free(host_array);
return 0;
}
printf("Problem 2 - Successfully initialized array with %d elements\n", P2);
/////////////////////////////////////////////////////////////////////////////////////////////////////////
// Problem 3 - add i to array[i]
add<<<1, bd>>>(dev_array); // Launch add kernel with array from the previous problem
cudaMemcpy(host_array, dev_array, byte_size, cudaMemcpyDeviceToHost); // Copy from modified vaues to host
// Verify values are correct
try {
for(int c = 0; c < P2; c++){
if(host_array[c] != c) {throw "ADD_OP_MATCH";}
}
} catch (...) {
printf("Problem 3 - Elements not added properly!\n");
cudaFree(dev_array);
free(host_array);
return 0;
}
printf("Problem 3 - Successfully added i to array[i] with %d elements\n", P2);
cudaFree(dev_array);
free(host_array);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Problem 4 - same as problem 3, but with 8000 elements
byte_size = P4 * sizeof(int);
dim3 gd(P4);
cudaMalloc((void**) &dev_array, byte_size);
initialize<<<1, gd>>>(dev_array); // Initialize array
add<<<1, gd>>>(dev_array); // Add index to array
host_array = (int*)malloc(byte_size);
cudaMemcpy(host_array, dev_array, byte_size, cudaMemcpyDeviceToHost);
// Verify values are correct, 8000 threads might be too much for the device and the answer is incorrect
try {
for(int c = 0; c < P4; c++){
if(host_array[c] != c) {throw "ADD_OP_MATCH";}
}
} catch (...) {
printf("Problem 4 - Elements not added properly!\n");
cudaFree(dev_array);
free(host_array);
return 0;
}
printf("Problem 4 - Successfully added i to array[i] with %d elements\n", P4);
cudaFree(dev_array);
free(host_array);
return 0;
} |
12,407 | __global__ void
mat_scale(float alpha, float *a, float *c, int rows, int columns, int depth)
{
const int i = blockDim.y * blockIdx.y + threadIdx.y,
j = blockDim.x * blockIdx.x + threadIdx.x;
if (i < rows && j < columns)
for (int k = 0; k < depth; k++)
c[(i * columns + j)*depth + k] = alpha * a[(i * columns + j)*depth + k];
}
|
12,408 | #include<iostream>
#include<string>
using namespace std;
int tc = 0;
int ts = 0;
__host__ int* MakeCarretera(){
//srand(time(NULL));
int *res = (int *)malloc((tc + 1) * sizeof(int));
for(int i = 0; i < tc ; i++){
res[i] = (rand() % 10) % 2 ;
}
return res;
}
__host__ void MostrarCarretera(int* crt){
for(int i = 0; i < tc; i++){
cout << crt[i] << " ";
}
cout << "\n";
}
__global__ void prueba(int *ctr,int n){
//cout << to_string(blockIdx.x) + "\n";
ctr[blockIdx.x] +=2;
/*
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
ctr[index] +=1;*/
//threadIdx.x
}
int main(int argc, char *argv[]) {
int *carretera, *res;
int *d_carretera;
int size = tc + 1;
int sizeint = size * sizeof(int);
try{
tc = stoi(argv[1]);// el tamaño de la carretera
ts = stoi(argv[2]);// el numero de iteraciones
}
catch(const exception& e)
{
cout << "No se ingresaron variables, no se hara nada \n\t tc : tamaño de la carreter entero \n\t ts numero de iteraciones que va realizar la simulacion entero"<< '\n';
}
if (tc != 0 && ts != 0) {
carretera = MakeCarretera();
MostrarCarretera(carretera);
//allocate space for devide copies of carretera
cudaMalloc((void **) &d_carretera, sizeint);
//copiando datos al device
cudaMemcpy(d_carretera, carretera, sizeint ,cudaMemcpyHostToDevice);
//int hilos = 10;
// lanzar una funcion
//prueba<<<tc/hilos, hilos >>>(carretera, tc);
prueba<<<1, tc >>>(carretera, tc);
//copiando resultados desde el device al host
cudaMemcpy(carretera, d_carretera, sizeint, cudaMemcpyDeviceToHost);
MostrarCarretera(d_carretera);
//liberando el espacio
free(carretera);
cudaFree(d_carretera);
}
return 0;
}
|
12,409 | //each kernel process one node
__global__ void largeKernel(int *offset, int *col_id, int *large, int sizeLarge, int *color, int currentColor)
{
__shared__ bool set[1];
//get the node from large array
if(blockIdx.x < sizeLarge)
{
set[0]=1;
int node = large[blockIdx.x];
if(color[node]==0)
{
int neighLen = offset[node+1]-offset[node];
for(int i = threadIdx.x; i<neighLen; i=i+blockDim.x)
{
int item = col_id[offset[node]+i];
if(item >= node && color[item]==0)
set[0]=0;
}
__syncthreads();
if(threadIdx.x == 0){
if(set[0] == 1)
color[node]=currentColor;
}
}
}
}
|
12,410 | #include "includes.h"
__global__ void fillIntZeroKernal(int *_bufferPtr,int size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx<size)
{
_bufferPtr[idx]=0;
}
} |
12,411 | /*******************************************
* vect_add.cu
* By: Thomas Kinch
* 4/11/18
* A basic add vector program using CUDA.
*******************************************/
#include <cuda.h>
#include <stdio.h>
#define n 512
__global__ void add(float *d_a, float *d_b, float *d_c){
d_c[blockIdx.x] = d_a[blockIdx.x] + d_b[blockIdx.x];
}
int main(){
float *h_a, *h_b, *h_c; //Host variables
//Malloc memory for host variables
h_a = (float*)malloc(n * sizeof(float));
h_b = (float*)malloc(n * sizeof(float));
h_c = (float*)malloc(n * sizeof(float));
float *d_a, *d_b, *d_c; //Device
int size = n * sizeof(float);
//Malloc memory for device variables
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
//Memcpy - copy host values to device
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
//Add the Vectors
add<<<size, 1>>>(d_a, d_b, d_c);
//Copy device result to the host
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
//Define host variables
for(int i = 0; i < 10; i++){
printf("h_c[%d] = %.1f\n", i, h_c[i]);
}
//Free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
12,412 | // Homework_5
// Problem_3
// create another kernel that adds i to array[i]
// RUN as:
// nvcc prob3.cu
// ./a.out
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
//Kernel function to initialize array
__global__
void initialize(int *arr, int size){
int indexing = blockIdx.x * blockDim.x + threadIdx.x;
int increment = gridDim.x * blockDim.x;
//loop for index then increment
for (int i = indexing; i < size; i += increment){
arr[i] = 0;
}
}
//Kernel function to add i to a[i]
__global__
void add_I_Value(int *arr, int size){
int indexing = blockIdx.x * blockDim.x + threadIdx.x;
int increment = gridDim.x * blockDim.x;
for (int i = indexing; i < size; i+= increment){
arr[i] += i;
}
}
//loop
void print(int *ar, int size){
printf("\n");
for (int i = 0; i < size; i++){
printf("%d ", ar[i]);
}
printf("\n");
}
int main(void){
printf("Homework#5\nProblem 3: create another kernel that adds i to array[i]\n---Successfully initiated---\n---Check the code---");
//here we declare int array
int size = 1024;
int *array;
int gpuThread = 32;
int arraySize = size * sizeof(int);
cudaMallocManaged(&array, arraySize);
int sectors = (size + gpuThread - 1) / gpuThread;
initialize<<<sectors, gpuThread>>>(array, size);
//here we add value of i to array
add_I_Value<<<sectors, gpuThread>>>(array, size);
cudaDeviceSynchronize();
//prints the array and takes the size
print(array, size);
cudaFree(array);
cudaDeviceReset();
return 0;
}
|
12,413 | #include <cassert>
#include <cstddef>
#include <vector>
#include <memory>
#include <cmath>
#include <vector>
#include <iostream>
#include <cstdio>
#include <mutex>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
// The default num threads per 2D block.
const int TPB_X = 32;
const int TPB_Y = 32;
#define CheckError(ans) \
{ neuralnetwork::cuda::util::OutputError((ans), __FILE__, __LINE__); }
namespace math {
struct MatrixView {
unsigned rows;
unsigned cols;
float *data; // row major order.
static MatrixView Create(unsigned rows, unsigned cols) {
assert(rows > 0 && cols > 0);
MatrixView result;
result.rows = rows;
result.cols = cols;
result.data = new float[rows * cols];
return result;
}
static void Release(MatrixView &view) {
assert(view.data != nullptr);
delete[] view.data;
view.data = nullptr;
}
};
}
namespace neuralnetwork {
namespace cuda {
enum class LayerActivation { TANH, LOGISTIC, RELU, LEAKY_RELU, LINEAR, SOFTMAX };
struct LayerWeights {
unsigned inputSize; // this includes the bias. So it should be equal to prev layer size + 1
unsigned layerSize;
// Data pointers allocated with cudaMallocPitch. Logical size is (inputSize * layerSize)
// num rows = layerSize, num cols = inputSize
float *weights;
// The pitch of the rows of the weights matrix in bytes.
size_t pitch;
__device__ float *Elem(unsigned r, unsigned c) {
assert(r < layerSize && c < inputSize);
return (float *)((char *)weights + r * pitch) + c;
}
};
struct Random {
curandState *d_state;
unsigned numStates;
__device__ void Initialise(unsigned threadIndex, unsigned seed) {
curand_init(seed, threadIndex, 0, &d_state[threadIndex]);
}
__device__ float SampleUniform(unsigned threadIndex) {
return curand_uniform(&d_state[threadIndex % numStates]);
}
static Random Create(unsigned numStates, unsigned seed);
static void Cleanup(Random &rnd);
};
struct SamplesBatch {
unsigned maxBatchSize; // number of rows allocated in memory.
unsigned batchSize; // equal to the number of rows in the matrix.
unsigned inputDim; // equal to the number of columns in the matrix.
unsigned targetOutputDim;
float *input; // matrix sized batchSize(rows) * sampleDim(cols)
size_t ipitch;
float *targetOutput; // matrix sized batchSize(rows) * sampleDim(cols)
size_t opitch;
__device__ float *InputElem(unsigned r, unsigned c) {
assert(r < maxBatchSize && c < inputDim);
return (float *)((char *)input + r * ipitch) + c;
}
__device__ float *TargetOutputElem(unsigned r, unsigned c) {
assert(r < maxBatchSize && c < targetOutputDim);
return (float *)((char *)targetOutput + r * opitch) + c;
}
};
struct LayerBatchOutputs {
unsigned maxBatchSize;
unsigned batchSize;
// layer size includes the bias term, so it will be equal to the number of nodes + 1
unsigned layerSize;
float *output; // matrix sized batchSize(rows) * layerSize(cols)
size_t opitch;
float *derivative; // matrix sized batchSize(rows) * layerSize(cols)
size_t dpitch;
__device__ float *OutputElem(unsigned r, unsigned c) {
assert(r < maxBatchSize && c < layerSize);
return (float *)((char *)output + r * opitch) + c;
}
__device__ float *DerivativeElem(unsigned r, unsigned c) {
assert(r < maxBatchSize && c < layerSize);
return (float *)((char *)derivative + r * dpitch) + c;
}
};
struct LayerBatchDeltas {
unsigned maxBatchSize;
unsigned batchSize;
unsigned layerSize;
float *delta; // matrix sized batchSize(rows) * layerSize(cols)
size_t pitch;
__device__ float *Elem(unsigned r, unsigned c) {
assert(r < maxBatchSize && c < layerSize);
return (float *)((char *)delta + r * pitch) + c;
}
};
namespace GradientKernel {
void Apply(LayerBatchDeltas layerDeltas, LayerBatchOutputs layerOutputs, LayerWeights outGradient,
cudaStream_t stream);
}
namespace ForwardPassKernel {
void Apply(LayerWeights layerWeights, LayerBatchOutputs input, LayerBatchOutputs output,
LayerActivation activation, Random rnd, float nodeActivationRate, bool isOutputLayer,
cudaStream_t stream);
}
namespace BackwardDeltaKernel {
void Apply(LayerBatchDeltas nextDelta, LayerWeights transposedWeights,
LayerBatchOutputs layerOutput, LayerBatchDeltas outDelta, cudaStream_t stream);
}
struct NetworkSpec {
unsigned numInputs;
unsigned numOutputs;
std::vector<unsigned> hiddenLayers;
float nodeActivationRate;
unsigned maxBatchSize;
LayerActivation hiddenActivation;
LayerActivation outputActivation;
};
class CudaNetwork {
public:
CudaNetwork(const NetworkSpec &spec);
~CudaNetwork();
void SetWeights(const std::vector<math::MatrixView> &weights);
void GetWeights(std::vector<math::MatrixView> &outWeights);
void Train(const math::MatrixView &batchInputs, const math::MatrixView &batchOutputs);
private:
struct CudaNetworkImpl;
std::unique_ptr<CudaNetworkImpl> impl;
};
namespace util {
void OutputError(cudaError_t code, const char *file, int line);
void *AllocPushBuffer(size_t bufSize);
void FreePushBuffer(void *buf);
LayerWeights NewLayerWeights(unsigned inputSize, unsigned layerSize);
void DeleteLayerWeights(LayerWeights &lw);
SamplesBatch NewSamplesBatch(unsigned maxBatchSize, unsigned inputDim, unsigned targetOutputDim);
void DeleteSamplesBatch(SamplesBatch &sb);
LayerBatchOutputs NewLayerBatchOutputs(unsigned maxBatchSize, unsigned layerSize);
void DeleteLayerBatchOutputs(LayerBatchOutputs &lbo);
LayerBatchDeltas NewLayerBatchDeltas(unsigned maxBatchSize, unsigned layerSize);
void DeleteLayerBatchDeltas(LayerBatchDeltas &lbd);
void PrintMatrixView(math::MatrixView view);
void PrintLayerWeights(LayerWeights d_weights);
void PrintLayerOutputs(LayerBatchOutputs d_outputs);
void PrintLayerDeltas(LayerBatchDeltas d_deltas);
}
namespace TransposeKernel {
void Apply(LayerWeights layerWeights, LayerWeights transposedWeights, cudaStream_t stream);
}
namespace SoftmaxKernel {
void Apply(const LayerBatchOutputs &lastLayer, cudaStream_t stream);
}
}
}
using namespace neuralnetwork;
using namespace neuralnetwork::cuda;
__device__ float activationValue(float in, const LayerActivation activation) {
switch(activation) {
case LayerActivation::TANH:
return tanhf(in);
case LayerActivation::LOGISTIC:
return 1.0f / (1.0f + expf(-in));
case LayerActivation::RELU:
return fmaxf(0.0f, in);
case LayerActivation::LEAKY_RELU:
return fmaxf(0.01f * in, in);
case LayerActivation::LINEAR:
case LayerActivation::SOFTMAX:
return in;
}
assert(false); // should never get here.
return in;
}
__device__ float activationDerivative(float in, float out, const LayerActivation activation) {
switch(activation) {
case LayerActivation::TANH:
return 1.0f - out * out;
case LayerActivation::LOGISTIC:
return out * (1.0f - out);
case LayerActivation::RELU:
return in > 0.0f ? 1.0f : 0.0f;
case LayerActivation::LEAKY_RELU:
return in > 0.0f ? 1.0f : 0.01f;
case LayerActivation::LINEAR:
case LayerActivation::SOFTMAX:
return 1.0f;
}
assert(false); // should never get here.
return 1.0f;
}
__global__ void transposeKernel(LayerWeights lw, LayerWeights out, unsigned bufStride) {
extern __shared__ float buf[];
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < lw.inputSize && y < lw.layerSize) {
buf[threadIdx.x + threadIdx.y * bufStride] = *lw.Elem(y, x);
}
__syncthreads();
x = blockIdx.y * blockDim.y + threadIdx.x; // transpose block offset
y = blockIdx.x * blockDim.x + threadIdx.y;
if (x < out.inputSize && y < out.layerSize) {
*(out.Elem(y, x)) = buf[threadIdx.y + threadIdx.x * bufStride];
}
}
__global__ void forwardPassKernel(LayerWeights lw, LayerBatchOutputs prevOutputs,
LayerBatchOutputs out, const LayerActivation activation,
Random rnd, const float nodeActivationRate, const bool isOutput,
const unsigned spitch) {
extern __shared__ float buf[]; // shared memory buffer
const unsigned row = blockDim.y * blockIdx.y + threadIdx.y;
const unsigned col = blockDim.x * blockIdx.x + threadIdx.x;
// TODO: can implement a "fast path" and "slow path" versions of the below code and branch here.
// Fast path can assume that the entire block will fall within the bounds of all of the matrices
// and dispense with a whole bunch of the below checks.
const int numChunks = (lw.inputSize + blockDim.x - 1) / blockDim.x;
// buffer for holding the layer weight matrix chunk
float *lwChunk = (float *) buf;
// buffer for holding the prev outputs matrix chunk
float *poChunk = (float *) &buf[spitch * blockDim.y];
float sum = 0.0f;
const int lwRow = blockDim.x * blockIdx.x + threadIdx.y;
const int poRow = row;
const int chunkIndex = threadIdx.x + threadIdx.y * spitch;
const int lim = numChunks * blockDim.x;
for (int chunkOffset = 0; chunkOffset < lim; chunkOffset += blockDim.x) {
const int lwCol = chunkOffset + threadIdx.x;
if (lwRow < lw.layerSize && lwCol < lw.inputSize) {
lwChunk[chunkIndex] = *lw.Elem(lwRow, lwCol);
}
const int poCol = lwCol;
if (poRow < prevOutputs.batchSize && poCol < prevOutputs.layerSize) {
poChunk[chunkIndex] = *prevOutputs.OutputElem(poRow, poCol);
}
__syncthreads();
int chunkLim = min(blockDim.x, lw.inputSize - chunkOffset);
for (int j = 0; j < chunkLim; j++) {
sum += lwChunk[j + threadIdx.x * spitch] * poChunk[j + threadIdx.y * spitch];
}
__syncthreads();
}
if (row < out.batchSize && col < out.layerSize - 1) {
float *outElem = out.OutputElem(row, col);
float *dElem = out.DerivativeElem(row, col);
if (isOutput || rnd.SampleUniform(col + row * out.layerSize) < nodeActivationRate) {
*outElem = activationValue(sum, activation);
*dElem = activationDerivative(sum, *outElem, activation);
} else {
*outElem = 0.0f;
*dElem = 0.0f;
}
}
}
// computes outDelta = tw * nextDelta (elemwisemul) layerOutput.derivatives
__global__ void backwardDeltaKernel(LayerBatchDeltas nextDelta, LayerWeights tw,
LayerBatchOutputs layerOutput, LayerBatchDeltas outDelta,
unsigned spitch) {
extern __shared__ float buf[]; // shared memory buffer
const unsigned row = blockDim.y * blockIdx.y + threadIdx.y;
const unsigned col = blockDim.x * blockIdx.x + threadIdx.x;
const int numChunks = (tw.inputSize + blockDim.x - 1) / blockDim.x;
// buffer for holding the layer weight matrix chunk
float *twChunk = (float *) buf;
// buffer for holding the prev outputs matrix chunk
float *ndChunk = (float *) &buf[spitch * blockDim.y];
const int twRow = blockDim.x * blockIdx.x + threadIdx.y;
const int ndRow = row;
const int chunkIndex = threadIdx.x + threadIdx.y * spitch;
const int lim = numChunks * blockDim.x;
float sum = 0.0f;
for (int chunkOffset = 0; chunkOffset < lim; chunkOffset += blockDim.x) {
const int twCol = chunkOffset + threadIdx.x;
if (twRow < tw.layerSize && twCol < tw.inputSize) {
twChunk[chunkIndex] = *tw.Elem(twRow, twCol);
}
const int ndCol = twCol;
if (ndRow < nextDelta.batchSize && ndCol < nextDelta.layerSize) {
ndChunk[chunkIndex] = *nextDelta.Elem(ndRow, ndCol);
}
__syncthreads();
int chunkLim = min(blockDim.x, tw.inputSize - chunkOffset);
for (int j = 0; j < chunkLim; j++) {
sum += twChunk[j + threadIdx.x * spitch] * ndChunk[j + threadIdx.y * spitch];
}
__syncthreads();
}
if (row < outDelta.batchSize && col < outDelta.layerSize) {
float od = *layerOutput.DerivativeElem(row, col);
*outDelta.Elem(row, col) = sum * od;
}
}
__global__ void gradientKernel(LayerBatchDeltas layerDeltas, LayerBatchOutputs layerOutputs,
LayerWeights outGradient, unsigned spitch) {
extern __shared__ float buf[]; // shared memory buffer
const unsigned row = blockDim.y * blockIdx.y + threadIdx.y;
const unsigned col = blockDim.x * blockIdx.x + threadIdx.x;
// buffer for holding the layer weight matrix chunk
float *ldChunk = (float *) buf;
// buffer for holding the prev outputs matrix chunk
float *loChunk = (float *) &buf[spitch * blockDim.y];
const int ldCol = blockDim.y * blockIdx.y + threadIdx.x;
const int loCol = col;
const int numChunks = (layerDeltas.batchSize + blockDim.y - 1) / blockDim.y;
const int chunkIndex = threadIdx.x + threadIdx.y * spitch;
const int lim = numChunks * blockDim.y;
float sum = 0.0f;
for (int chunkOffset = 0; chunkOffset < lim; chunkOffset += blockDim.y) {
const int ldRow = chunkOffset + threadIdx.y;
if (ldRow < layerDeltas.batchSize && ldCol < layerDeltas.layerSize) {
ldChunk[chunkIndex] = *layerDeltas.Elem(ldRow, ldCol);
}
const int loRow = ldRow;
if (loRow < layerOutputs.batchSize && loCol < layerOutputs.layerSize) {
loChunk[chunkIndex] = *layerOutputs.OutputElem(loRow, loCol);
}
__syncthreads();
int chunkLim = min(blockDim.x, layerDeltas.batchSize - chunkOffset);
for (int j = 0; j < chunkLim; j++) {
sum += ldChunk[threadIdx.y + j * spitch] * loChunk[threadIdx.x + j * spitch];
}
__syncthreads();
}
if (row < outGradient.layerSize && col < outGradient.inputSize) {
*outGradient.Elem(row, col) = sum / layerDeltas.batchSize;
}
}
// This softmax code assumes that the output layer is smaller than the maximum number of threads
// in a block. For ease of implementation, we assume this and do the whole thing in a single block.
// This allows easy synchronization and easy algorithm. Most problems wont have >1024 outputs.
// Separate blocks can do separate batch rows.
__global__ void softmaxKernel(LayerBatchOutputs outputs) {
extern __shared__ float buf[]; // shared memory buffer
const unsigned outIndex = threadIdx.x;
const unsigned batchIndex = blockIdx.x;
assert(blockDim.x <= outputs.layerSize && gridDim.x == outputs.batchSize);
// A single float to hold data to exchange between threads in this block.
float *sharedVar = (float *) &buf[0];
// Buffer to hold all of the output elements for this batch element.
float *outElems = (float *) &buf[1];
// 1. Copy the row for the current batch into shared memory.
float val = *(outputs.OutputElem(batchIndex, outIndex));
outElems[outIndex] = val;
__syncthreads();
// 2. Find the max element in the row, done by a single thread per block while all others wait.
float maxValue;
if (outIndex == 0) {
maxValue = outElems[0];
for (unsigned i = 1; i < blockDim.x; i++) {
maxValue = fmaxf(maxValue, outElems[i]);
}
*sharedVar = maxValue;
}
__syncthreads();
maxValue = *sharedVar;
// 3. Calc the unnormalised exponent offset by the max value and write it to shared mem.
val = expf(val - maxValue);
outElems[outIndex] = val;
__syncthreads();
// 4. Calculate the sum across the batch, done by a single thread per block.
float sum = 0.0f;
if (outIndex == 0) {
for (unsigned i = 0; i < blockDim.x; i++) {
sum += outElems[i];
}
*sharedVar = sum;
}
__syncthreads();
sum = *sharedVar;
// 5. Calc the normalised value for each output elem and write it out to global mem.
*(outputs.OutputElem(batchIndex, outIndex)) = val / sum;
}
void TransposeKernel::Apply(LayerWeights layerWeights, LayerWeights transposedWeights,
cudaStream_t stream) {
int bpgX = (layerWeights.inputSize + TPB_X - 1) / TPB_X;
int bpgY = (layerWeights.layerSize + TPB_Y - 1) / TPB_Y;
unsigned stride = TPB_X + 1;
size_t sharedMemSize = stride * TPB_Y * sizeof(float);
transposeKernel<<<dim3(bpgX, bpgY, 1), dim3(TPB_X, TPB_Y, 1), sharedMemSize, stream>>>(
layerWeights, transposedWeights, stride);
}
void ForwardPassKernel::Apply(LayerWeights layerWeights, LayerBatchOutputs input,
LayerBatchOutputs output, LayerActivation activation,
Random rnd, float nodeActivationRate, bool isOutputLayer,
cudaStream_t stream) {
assert(layerWeights.inputSize == input.layerSize);
assert(layerWeights.layerSize == output.layerSize - 1);
// -1 is here since we dont need to compute the bias term for the output vector.
int bpgX = (output.layerSize - 1 + TPB_X - 1) / TPB_X;
int bpgY = (output.batchSize + TPB_Y - 1) / TPB_Y;
unsigned spitch = (TPB_X + 1);
size_t sharedMemSize = 2 * spitch * TPB_Y * sizeof(float);
forwardPassKernel<<<dim3(bpgX, bpgY, 1), dim3(TPB_X, TPB_Y, 1), sharedMemSize, stream>>>(
layerWeights, input, output, activation, rnd, nodeActivationRate, isOutputLayer, spitch);
}
void BackwardDeltaKernel::Apply(LayerBatchDeltas nextDelta, LayerWeights transposedWeights,
LayerBatchOutputs layerOutput, LayerBatchDeltas outDelta,
cudaStream_t stream) {
// TODO: handle bank conflicts. Do the same in the forward kernel.
assert(nextDelta.layerSize == transposedWeights.inputSize);
assert(outDelta.layerSize == transposedWeights.layerSize - 1);
assert(outDelta.layerSize == layerOutput.layerSize - 1);
assert(nextDelta.batchSize == layerOutput.batchSize);
assert(nextDelta.batchSize == outDelta.batchSize);
int bpgX = (outDelta.layerSize + TPB_X - 1) / TPB_X;
int bpgY = (outDelta.batchSize + TPB_Y - 1) / TPB_Y;
unsigned spitch = (TPB_X + 1);
size_t sharedMemSize = 2 * spitch * TPB_Y * sizeof(float);
backwardDeltaKernel<<<dim3(bpgX, bpgY, 1), dim3(TPB_X, TPB_Y, 1), sharedMemSize, stream>>>(
nextDelta, transposedWeights, layerOutput, outDelta, spitch);
}
void GradientKernel::Apply(LayerBatchDeltas layerDeltas, LayerBatchOutputs layerOutputs,
LayerWeights outGradient, cudaStream_t stream) {
assert(layerDeltas.batchSize == layerOutputs.batchSize);
assert(layerDeltas.layerSize == outGradient.layerSize);
assert(layerOutputs.layerSize == outGradient.inputSize);
int bpgX = (outGradient.inputSize + TPB_X - 1) / TPB_X;
int bpgY = (outGradient.layerSize + TPB_Y - 1) / TPB_Y;
unsigned spitch = (TPB_X + 1);
size_t sharedMemSize = 2 * spitch * TPB_Y * sizeof(float);
gradientKernel<<<dim3(bpgX, bpgY, 1), dim3(TPB_X, TPB_Y, 1), sharedMemSize, stream>>>(
layerDeltas, layerOutputs, outGradient, spitch);
}
void SoftmaxKernel::Apply(const LayerBatchOutputs &lastLayer, cudaStream_t stream) {
size_t sharedMemSize = (lastLayer.layerSize + 1) * sizeof(float);
// We dont want to include the bias part of the output in the processing of the softmax.
int tpb = lastLayer.layerSize - 1;
int bpg = lastLayer.batchSize;
softmaxKernel<<<bpg, tpb, sharedMemSize, stream>>>(lastLayer);
}
using namespace std;
// ADAM trainer parameters
static constexpr float adamBeta1 = 0.9f;
static constexpr float adamBeta2 = 0.999f;
static constexpr float adamEpsilon = 10e-8;
static constexpr float adamLearnRate = 0.001f;
static Random rnd;
static std::once_flag stateFlag;
static void initialiseSharedState(void) {
std::call_once(stateFlag, [](){
rnd = Random::Create(2048, 1337);
});
}
__global__ void initialiseLayerWeights(LayerWeights layer, const float initRange, Random rnd) {
const unsigned row = blockDim.y * blockIdx.y + threadIdx.y;
const unsigned col = blockDim.x * blockIdx.x + threadIdx.x;
if (row >= layer.layerSize || col >= layer.inputSize) {
return;
}
float *out = layer.Elem(row, col);
*out = initRange * (rnd.SampleUniform(col + row * layer.inputSize) * 2.0f - 1.0f);
}
__global__ void initialiseLayerOutputs(LayerBatchOutputs outputs) {
const unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
if (id >= outputs.maxBatchSize) {
return;
}
*(outputs.OutputElem(id, outputs.layerSize - 1)) = 1.0f;
}
__global__ void initialiseAdamWeights(LayerWeights momentum, LayerWeights rms) {
assert(momentum.inputSize == rms.inputSize);
assert(momentum.layerSize == rms.layerSize);
const unsigned row = blockDim.y * blockIdx.y + threadIdx.y;
const unsigned col = blockDim.x * blockIdx.x + threadIdx.x;
if (row >= rms.layerSize || col >= rms.inputSize) {
return;
}
*momentum.Elem(row, col) = 0.0f;
*rms.Elem(row, col) = 0.0f;
}
__global__ void lastLayerDeltasKernel(LayerBatchOutputs networkOutput, SamplesBatch samples,
LayerBatchDeltas out) {
assert(networkOutput.layerSize == samples.targetOutputDim + 1);
assert(out.layerSize == samples.targetOutputDim);
const unsigned row = blockDim.y * blockIdx.y + threadIdx.y;
const unsigned col = blockDim.x * blockIdx.x + threadIdx.x;
if (row >= out.batchSize || col >= out.layerSize) {
return;
}
// TODO: check whether reading into shared mem, doing computation, then writing to global mem
// is faster. You never know.
*out.Elem(row, col) = *networkOutput.OutputElem(row, col) - *samples.TargetOutputElem(row, col);
}
__global__ void updateMomentumAndRMS(LayerWeights gradient, LayerWeights momentum, LayerWeights rms,
const float beta1, const float beta2) {
const unsigned row = blockDim.y * blockIdx.y + threadIdx.y;
const unsigned col = blockDim.x * blockIdx.x + threadIdx.x;
if (row >= gradient.layerSize || col >= gradient.inputSize) {
return;
}
float g = *gradient.Elem(row, col);
float m = *momentum.Elem(row, col);
float r = *rms.Elem(row, col);
*momentum.Elem(row, col) = m * beta1 + g * (1.0f - beta1);
*rms.Elem(row, col) = r * beta2 + g * g * (1.0f - beta2);
}
__global__ void updateWeightsWithAdam(LayerWeights weights, LayerWeights momentum, LayerWeights rms,
const float beta1, const float beta2,
const float lr, const float epsilon) {
const unsigned row = blockDim.y * blockIdx.y + threadIdx.y;
const unsigned col = blockDim.x * blockIdx.x + threadIdx.x;
if (row >= rms.layerSize || col >= rms.inputSize) {
return;
}
float mc = *momentum.Elem(row, col) / (1.0f - beta1);
float rc = *rms.Elem(row, col) / (1.0f - beta2);
*weights.Elem(row, col) -= lr * mc / sqrtf(rc + epsilon);
}
struct CudaNetwork::CudaNetworkImpl {
NetworkSpec networkSpec;
vector<LayerWeights> d_layerWeights;
vector<LayerWeights> d_layerWeightsBridge;
vector<LayerWeights> d_layerGradients;
vector<LayerBatchOutputs> d_layerOutputs;
vector<LayerBatchDeltas> d_layerDeltas;
SamplesBatch d_samplesBatch;
LayerWeights d_transposeScratch;
// TODO: this stuff should go into a separate file. Trainer code/variables should be
// separate from network code.
vector<LayerWeights> d_adamMomentum;
vector<LayerWeights> d_adamRMS;
cudaStream_t uploadStream;
cudaStream_t computeStream;
CudaNetworkImpl(const NetworkSpec &spec) : networkSpec(spec) {
assert(networkSpec.hiddenActivation != LayerActivation::SOFTMAX);
initialiseSharedState();
uploadStream = 0;
computeStream = 0;
allocDeviceMemory();
initialiseWeights();
initialiseOutputs();
initialiseADAM();
}
~CudaNetworkImpl() {
for (auto& lw : d_layerWeights) { util::DeleteLayerWeights(lw); }
for (auto& lw : d_layerWeightsBridge) { util::DeleteLayerWeights(lw); }
for (auto& lg : d_layerGradients) { util::DeleteLayerWeights(lg); }
for (auto& lo : d_layerOutputs) { util::DeleteLayerBatchOutputs(lo); }
for (auto& ld : d_layerDeltas) { util::DeleteLayerBatchDeltas(ld); }
for (auto& am : d_adamMomentum) { util::DeleteLayerWeights(am); }
for (auto& am : d_adamRMS) { util::DeleteLayerWeights(am); }
util::DeleteSamplesBatch(d_samplesBatch);
util::DeleteLayerWeights(d_transposeScratch);
}
void SetWeights(const std::vector<math::MatrixView> &weights) {
assert(d_layerWeights.size() == weights.size());
for (unsigned i = 0; i < weights.size(); i++) {
assert(weights[i].rows == d_layerWeights[i].layerSize);
assert(weights[i].cols == d_layerWeights[i].inputSize);
cudaError_t err = cudaMemcpy2D(
d_layerWeights[i].weights, d_layerWeights[i].pitch,
weights[i].data, weights[i].cols * sizeof(float),
weights[i].cols * sizeof(float), weights[i].rows,
cudaMemcpyHostToDevice);
CheckError(err);
}
}
void GetWeights(std::vector<math::MatrixView> &outWeights) {
assert(outWeights.size() == d_layerWeightsBridge.size());
for (unsigned i = 0; i < outWeights.size(); i++) {
assert(outWeights[i].rows == d_layerWeightsBridge[i].layerSize);
assert(outWeights[i].cols == d_layerWeightsBridge[i].inputSize);
cudaError_t err = cudaMemcpy2DAsync(
outWeights[i].data, outWeights[i].cols * sizeof(float), // dst
d_layerWeightsBridge[i].weights, d_layerWeightsBridge[i].pitch, // src
outWeights[i].cols * sizeof(float), outWeights[i].rows, // width, height
cudaMemcpyDeviceToHost, uploadStream);
CheckError(err);
}
}
void Train(const math::MatrixView &batchInputs, const math::MatrixView &batchOutputs) {
uploadSamplesBatch(batchInputs, batchOutputs);
forwardPass();
backwardPass();
updateAdamParams();
updateWeights();
for (unsigned i = 0; i < d_layerWeights.size(); i++) {
cudaError_t err = cudaMemcpy2D(
d_layerWeightsBridge[i].weights, d_layerWeightsBridge[i].pitch,
d_layerWeights[i].weights, d_layerWeights[i].pitch,
d_layerWeights[i].inputSize * sizeof(float), d_layerWeights[i].layerSize,
cudaMemcpyDeviceToDevice);
CheckError(err);
}
}
private:
void uploadSamplesBatch(const math::MatrixView &batchInputs,
const math::MatrixView &batchOutputs) {
assert(batchInputs.rows == batchOutputs.rows);
assert(batchInputs.rows <= d_samplesBatch.maxBatchSize);
assert(batchInputs.cols == d_samplesBatch.inputDim);
assert(batchOutputs.cols == d_samplesBatch.targetOutputDim);
d_samplesBatch.batchSize = batchInputs.rows;
cudaError_t err = cudaMemcpy2D(
d_samplesBatch.input, d_samplesBatch.ipitch, // dst
batchInputs.data, batchInputs.cols * sizeof(float), // src
batchInputs.cols * sizeof(float), batchInputs.rows, // width, height
cudaMemcpyHostToDevice);
CheckError(err);
err = cudaMemcpy2D(
d_samplesBatch.targetOutput, d_samplesBatch.opitch, // dst
batchOutputs.data, batchOutputs.cols * sizeof(float), // src
batchOutputs.cols * sizeof(float), batchOutputs.rows, // width, height
cudaMemcpyHostToDevice);
CheckError(err);
}
void forwardPass(void) {
for (auto& lo : d_layerOutputs) {
lo.batchSize = d_samplesBatch.batchSize;
}
// copy the batch inputs into the first layer outputs.
cudaError_t err = cudaMemcpy2DAsync(
d_layerOutputs[0].output, d_layerOutputs[0].opitch, // dst
d_samplesBatch.input, d_samplesBatch.ipitch, // src
d_samplesBatch.inputDim * sizeof(float), d_samplesBatch.batchSize, // width, height
cudaMemcpyDeviceToDevice, computeStream);
CheckError(err);
for (unsigned i = 1; i < d_layerOutputs.size(); i++) {
LayerActivation activation = (i == d_layerOutputs.size() - 1) ?
networkSpec.outputActivation : networkSpec.hiddenActivation;
ForwardPassKernel::Apply(d_layerWeights[i-1], d_layerOutputs[i-1], d_layerOutputs[i],
activation, rnd, networkSpec.nodeActivationRate, i == (d_layerOutputs.size() - 1),
computeStream);
}
LayerBatchOutputs lastLayer = d_layerOutputs[d_layerOutputs.size() - 1];
if (networkSpec.outputActivation == LayerActivation::SOFTMAX) {
SoftmaxKernel::Apply(lastLayer, computeStream);
}
}
void backwardPass(void) {
generateLayerDeltas();
generateGradient();
}
void generateLayerDeltas(void) {
for (auto& ld : d_layerDeltas) {
ld.batchSize = d_samplesBatch.batchSize;
}
LayerBatchDeltas lastLayerDeltas = d_layerDeltas[d_layerDeltas.size() - 1];
LayerBatchOutputs networkOutput = d_layerOutputs[d_layerOutputs.size() - 1];
int bpgX = (lastLayerDeltas.layerSize + TPB_X - 1) / TPB_X;
int bpgY = (lastLayerDeltas.batchSize + TPB_Y - 1) / TPB_Y;
lastLayerDeltasKernel<<<dim3(bpgX, bpgY, 1), dim3(TPB_X, TPB_Y, 1)>>>(
networkOutput, d_samplesBatch, lastLayerDeltas);
for (int i = d_layerDeltas.size() - 2; i >= 0; i--) {
LayerWeights transposedWeights;
transposedWeights.inputSize = d_layerWeights[i + 1].layerSize;
transposedWeights.layerSize = d_layerWeights[i + 1].inputSize;
transposedWeights.weights = d_transposeScratch.weights;
transposedWeights.pitch = d_transposeScratch.pitch;
TransposeKernel::Apply(d_layerWeights[i + 1], transposedWeights, computeStream);
BackwardDeltaKernel::Apply(d_layerDeltas[i + 1], transposedWeights, d_layerOutputs[i+1],
d_layerDeltas[i], computeStream);
}
}
void generateGradient(void) {
for (unsigned i = 0; i < d_layerWeights.size(); i++) {
GradientKernel::Apply(d_layerDeltas[i], d_layerOutputs[i], d_layerGradients[i], computeStream);
}
}
void updateAdamParams(void) {
for (unsigned i = 0; i < d_layerGradients.size(); i++) {
int bpgX = (d_layerGradients[i].inputSize + TPB_X - 1) / TPB_X;
int bpgY = (d_layerGradients[i].layerSize + TPB_Y - 1) / TPB_Y;
updateMomentumAndRMS<<<dim3(bpgX, bpgY, 1), dim3(TPB_X, TPB_Y, 1)>>>(
d_layerGradients[i], d_adamMomentum[i], d_adamRMS[i], adamBeta1, adamBeta2);
}
}
void updateWeights(void) {
for (unsigned i = 0; i < d_layerWeights.size(); i++) {
int bpgX = (d_layerWeights[i].inputSize + TPB_X - 1) / TPB_X;
int bpgY = (d_layerWeights[i].layerSize + TPB_Y - 1) / TPB_Y;
updateWeightsWithAdam<<<dim3(bpgX, bpgY, 1), dim3(TPB_X, TPB_Y, 1)>>>(
d_layerWeights[i], d_adamMomentum[i], d_adamRMS[i],
adamBeta1, adamBeta2, adamLearnRate, adamEpsilon);
}
}
void initialiseADAM(void) {
assert(d_adamRMS.size() == d_adamMomentum.size());
for (unsigned i = 0; i < d_adamRMS.size(); i++) {
int bpgX = (d_adamRMS[i].inputSize + TPB_X - 1) / TPB_X;
int bpgY = (d_adamRMS[i].layerSize + TPB_Y - 1) / TPB_Y;
initialiseAdamWeights<<<dim3(bpgX, bpgY, 1), dim3(TPB_X, TPB_Y, 1)>>>(
d_adamMomentum[i], d_adamRMS[i]);
}
}
void initialiseOutputs(void) {
// We initialise the outputs array for each layer to have a 1.0 at the end so that it can
// be used as the bias input for the next layer.
for (auto& lo : d_layerOutputs) {
int bpgX = (lo.maxBatchSize + TPB_X - 1) / TPB_X;
initialiseLayerOutputs<<<bpgX, TPB_X>>>(lo);
}
}
void initialiseWeights(void) {
for (auto& lw : d_layerWeights) {
// Blocks per grid in X and Y dimensions.
int bpgX = (lw.inputSize + TPB_X - 1) / TPB_X;
int bpgY = (lw.layerSize + TPB_Y - 1) / TPB_Y;
float initRange = 1.0f / sqrtf(lw.inputSize);
initialiseLayerWeights<<<dim3(bpgX, bpgY, 1), dim3(TPB_X, TPB_Y, 1)>>>(lw, initRange, rnd);
}
}
// Pre-allocated all of the device memory we will need. We should never have to malloc device
// memory after this function is called.
void allocDeviceMemory(void) {
vector<unsigned> layerSizes(networkSpec.hiddenLayers.size() + 1);
for (unsigned i = 0; i < networkSpec.hiddenLayers.size(); i++) {
layerSizes[i] = networkSpec.hiddenLayers[i];
}
layerSizes[networkSpec.hiddenLayers.size()] = networkSpec.numOutputs;
// This is for the input layer
d_layerOutputs.push_back(
util::NewLayerBatchOutputs(networkSpec.maxBatchSize, networkSpec.numInputs + 1));
unsigned maxInputSize = 0;
unsigned maxLayerSize = 0;
for (unsigned i = 0; i < layerSizes.size(); i++) {
unsigned prevLayerSize = i == 0 ? networkSpec.numInputs : layerSizes[i-1];
maxInputSize = max(maxInputSize, prevLayerSize + 1);
maxLayerSize = max(maxLayerSize, layerSizes[i]);
d_layerWeights.push_back(util::NewLayerWeights(prevLayerSize + 1, layerSizes[i]));
d_layerWeightsBridge.push_back(util::NewLayerWeights(prevLayerSize + 1, layerSizes[i]));
d_layerGradients.push_back(util::NewLayerWeights(prevLayerSize + 1, layerSizes[i]));
d_layerOutputs.push_back(util::NewLayerBatchOutputs(networkSpec.maxBatchSize, layerSizes[i] + 1));
d_layerDeltas.push_back(util::NewLayerBatchDeltas(networkSpec.maxBatchSize, layerSizes[i]));
d_adamMomentum.push_back(util::NewLayerWeights(prevLayerSize + 1, layerSizes[i]));
d_adamRMS.push_back(util::NewLayerWeights(prevLayerSize + 1, layerSizes[i]));
}
d_samplesBatch =
util::NewSamplesBatch(networkSpec.maxBatchSize, networkSpec.numInputs, networkSpec.numOutputs);
d_transposeScratch = util::NewLayerWeights(maxLayerSize, maxInputSize);
}
};
CudaNetwork::CudaNetwork(const NetworkSpec &spec) : impl(new CudaNetworkImpl(spec)) {}
CudaNetwork::~CudaNetwork() = default;
void CudaNetwork::SetWeights(const std::vector<math::MatrixView> &weights) {
impl->SetWeights(weights);
}
void CudaNetwork::GetWeights(std::vector<math::MatrixView> &outWeights) {
impl->GetWeights(outWeights);
}
void CudaNetwork::Train(const math::MatrixView &batchInputs, const math::MatrixView &batchOutputs) {
impl->Train(batchInputs, batchOutputs);
}
int main(){
cout<<"HI \n"<<endl;
}
|
12,414 | // (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences
// This is supplement to the paper:
// L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs".
// e-mail: barash @ itp.ac.ru (remove space)
#include<stdio.h>
#define gm29_CUDA_CALL(x) do { if((x) != cudaSuccess) { printf("Error: %s at %s:%d\n",cudaGetErrorString(cudaGetLastError()),__FILE__,__LINE__); exit(1);}} while(0)
#define gm29_BLOCKS 512
#define gm29_THREADS 128
#define gm29_ARRAY_SECTIONS (gm29_BLOCKS*gm29_THREADS/32)
#define gm29_k 4
#define gm29_q 2
#define gm29_g 536870909U
#define gm29_halfg 268435456U
typedef struct{
unsigned xN[32] __attribute__ ((aligned(16))),
xP[32] __attribute__ ((aligned(16)));
} gm29_state;
typedef gm29_state gm29_sse_state;
unsigned gm29_sse_Consts[16] __attribute__ ((aligned(16))) =
{536870911,536870911,536870911,536870911,1073741818,1073741818,1073741818,1073741818,
536870908,536870908,536870908,536870908,536870909,536870909,536870909,536870909};
__host__ unsigned int gm29_sse_generate_(gm29_sse_state* state){
unsigned output1; unsigned output2 __attribute__ ((unused));
asm volatile("movaps (%4),%%xmm7\n" \
"movaps 16(%4),%%xmm6\n" \
"movaps 32(%4),%%xmm4\n" \
"movaps (%2),%%xmm0\n" \
"movaps (%3),%%xmm5\n" \
"movaps %%xmm0,(%3)\n" \
"pslld $2,%%xmm0\n" \
"paddd %%xmm6,%%xmm0\n" \
"pslld $1,%%xmm5\n" \
"psubd %%xmm5,%%xmm0\n" \
"movaps %%xmm0,%%xmm5\n" \
"psrld $29,%%xmm5\n" \
"pand %%xmm7,%%xmm0\n" \
"paddd %%xmm5,%%xmm0\n" \
"paddd %%xmm5,%%xmm0\n" \
"paddd %%xmm5,%%xmm0\n" \
"movaps %%xmm0,%%xmm5\n" \
"pcmpgtd %%xmm4,%%xmm5\n" \
"pand 48(%4),%%xmm5\n" \
"psubd %%xmm5,%%xmm0\n" \
"movaps %%xmm0,(%2)\n" \
"movaps 16(%2),%%xmm1\n" \
"movaps 16(%3),%%xmm5\n" \
"movaps %%xmm1,16(%3)\n" \
"pslld $2,%%xmm1\n" \
"paddd %%xmm6,%%xmm1\n" \
"pslld $1,%%xmm5\n" \
"psubd %%xmm5,%%xmm1\n" \
"movaps %%xmm1,%%xmm5\n" \
"psrld $29,%%xmm5\n" \
"pand %%xmm7,%%xmm1\n" \
"paddd %%xmm5,%%xmm1\n" \
"paddd %%xmm5,%%xmm1\n" \
"paddd %%xmm5,%%xmm1\n" \
"movaps %%xmm1,%%xmm5\n" \
"pcmpgtd %%xmm4,%%xmm5\n" \
"pand 48(%4),%%xmm5\n" \
"psubd %%xmm5,%%xmm1\n" \
"movaps %%xmm1,16(%2)\n" \
"movaps 32(%2),%%xmm2\n" \
"movaps 32(%3),%%xmm5\n" \
"movaps %%xmm2,32(%3)\n" \
"pslld $2,%%xmm2\n" \
"paddd %%xmm6,%%xmm2\n" \
"pslld $1,%%xmm5\n" \
"psubd %%xmm5,%%xmm2\n" \
"movaps %%xmm2,%%xmm5\n" \
"psrld $29,%%xmm5\n" \
"pand %%xmm7,%%xmm2\n" \
"paddd %%xmm5,%%xmm2\n" \
"paddd %%xmm5,%%xmm2\n" \
"paddd %%xmm5,%%xmm2\n" \
"movaps %%xmm2,%%xmm5\n" \
"pcmpgtd %%xmm4,%%xmm5\n" \
"pand 48(%4),%%xmm5\n" \
"psubd %%xmm5,%%xmm2\n" \
"movaps %%xmm2,32(%2)\n" \
"movaps 48(%2),%%xmm3\n" \
"movaps 48(%3),%%xmm5\n" \
"movaps %%xmm3,48(%3)\n" \
"pslld $2,%%xmm3\n" \
"paddd %%xmm6,%%xmm3\n" \
"pslld $1,%%xmm5\n" \
"psubd %%xmm5,%%xmm3\n" \
"movaps %%xmm3,%%xmm5\n" \
"psrld $29,%%xmm5\n" \
"pand %%xmm7,%%xmm3\n" \
"paddd %%xmm5,%%xmm3\n" \
"paddd %%xmm5,%%xmm3\n" \
"paddd %%xmm5,%%xmm3\n" \
"movaps %%xmm3,%%xmm5\n" \
"pcmpgtd %%xmm4,%%xmm5\n" \
"pand 48(%4),%%xmm5\n" \
"psubd %%xmm5,%%xmm3\n" \
"movaps %%xmm3,48(%2)\n" \
"psrld $28,%%xmm0\n" \
"psrld $28,%%xmm1\n" \
"psrld $28,%%xmm2\n" \
"psrld $28,%%xmm3\n" \
"packssdw %%xmm1,%%xmm0\n" \
"packssdw %%xmm3,%%xmm2\n" \
"packsswb %%xmm2,%%xmm0\n" \
"psllw $7,%%xmm0\n" \
"pmovmskb %%xmm0,%0\n" \
"movaps 64(%2),%%xmm0\n" \
"movaps 64(%3),%%xmm5\n" \
"movaps %%xmm0,64(%3)\n" \
"pslld $2,%%xmm0\n" \
"paddd %%xmm6,%%xmm0\n" \
"pslld $1,%%xmm5\n" \
"psubd %%xmm5,%%xmm0\n" \
"movaps %%xmm0,%%xmm5\n" \
"psrld $29,%%xmm5\n" \
"pand %%xmm7,%%xmm0\n" \
"paddd %%xmm5,%%xmm0\n" \
"paddd %%xmm5,%%xmm0\n" \
"paddd %%xmm5,%%xmm0\n" \
"movaps %%xmm0,%%xmm5\n" \
"pcmpgtd %%xmm4,%%xmm5\n" \
"pand 48(%4),%%xmm5\n" \
"psubd %%xmm5,%%xmm0\n" \
"movaps %%xmm0,64(%2)\n" \
"movaps 80(%2),%%xmm1\n" \
"movaps 80(%3),%%xmm5\n" \
"movaps %%xmm1,80(%3)\n" \
"pslld $2,%%xmm1\n" \
"paddd %%xmm6,%%xmm1\n" \
"pslld $1,%%xmm5\n" \
"psubd %%xmm5,%%xmm1\n" \
"movaps %%xmm1,%%xmm5\n" \
"psrld $29,%%xmm5\n" \
"pand %%xmm7,%%xmm1\n" \
"paddd %%xmm5,%%xmm1\n" \
"paddd %%xmm5,%%xmm1\n" \
"paddd %%xmm5,%%xmm1\n" \
"movaps %%xmm1,%%xmm5\n" \
"pcmpgtd %%xmm4,%%xmm5\n" \
"pand 48(%4),%%xmm5\n" \
"psubd %%xmm5,%%xmm1\n" \
"movaps %%xmm1,80(%2)\n" \
"movaps 96(%2),%%xmm2\n" \
"movaps 96(%3),%%xmm5\n" \
"movaps %%xmm2,96(%3)\n" \
"pslld $2,%%xmm2\n" \
"paddd %%xmm6,%%xmm2\n" \
"pslld $1,%%xmm5\n" \
"psubd %%xmm5,%%xmm2\n" \
"movaps %%xmm2,%%xmm5\n" \
"psrld $29,%%xmm5\n" \
"pand %%xmm7,%%xmm2\n" \
"paddd %%xmm5,%%xmm2\n" \
"paddd %%xmm5,%%xmm2\n" \
"paddd %%xmm5,%%xmm2\n" \
"movaps %%xmm2,%%xmm5\n" \
"pcmpgtd %%xmm4,%%xmm5\n" \
"pand 48(%4),%%xmm5\n" \
"psubd %%xmm5,%%xmm2\n" \
"movaps %%xmm2,96(%2)\n" \
"movaps 112(%2),%%xmm3\n" \
"movaps 112(%3),%%xmm5\n" \
"movaps %%xmm3,112(%3)\n" \
"pslld $2,%%xmm3\n" \
"paddd %%xmm6,%%xmm3\n" \
"pslld $1,%%xmm5\n" \
"psubd %%xmm5,%%xmm3\n" \
"movaps %%xmm3,%%xmm5\n" \
"psrld $29,%%xmm5\n" \
"pand %%xmm7,%%xmm3\n" \
"paddd %%xmm5,%%xmm3\n" \
"paddd %%xmm5,%%xmm3\n" \
"paddd %%xmm5,%%xmm3\n" \
"movaps %%xmm3,%%xmm5\n" \
"pcmpgtd %%xmm4,%%xmm5\n" \
"pand 48(%4),%%xmm5\n" \
"psubd %%xmm5,%%xmm3\n" \
"movaps %%xmm3,112(%2)\n" \
"psrld $28,%%xmm0\n" \
"psrld $28,%%xmm1\n" \
"psrld $28,%%xmm2\n" \
"psrld $28,%%xmm3\n" \
"packssdw %%xmm1,%%xmm0\n" \
"packssdw %%xmm3,%%xmm2\n" \
"packsswb %%xmm2,%%xmm0\n" \
"psllw $7,%%xmm0\n" \
"pmovmskb %%xmm0,%1\n" \
"shll $16,%1\n" \
"addl %1,%0\n" \
"":"=&r"(output1),"=&r"(output2):"r"(state->xN),"r"(state->xP),"r"(gm29_sse_Consts));
return output1;
}
__device__ __host__ void gm29_get_sse_state_(gm29_state* state,gm29_sse_state* sse_state){
int i; for(i=0;i<32;i++) {sse_state->xN[i]=state->xN[i]; sse_state->xP[i]=state->xP[i];}
}
__device__ __host__ unsigned gm29_CNext(unsigned N,unsigned P){
return (gm29_k*N+gm29_q*(gm29_g-P))%gm29_g;
}
__device__ __host__ unsigned gm29_CNext2(unsigned N,unsigned P,unsigned myk,unsigned myq){
unsigned long long NNN,PP,kk,qq,gg,rr; // returns (myk*N-myq*P) (mod gm29_g)
NNN=N; PP=P; kk=myk; qq=myq; gg=gm29_g;
rr=(kk*NNN+qq*(gg-PP));
NNN=rr>>29;
PP=rr-(NNN*gg);
PP-=((PP>>29)*gg);
return (unsigned)PP;
}
__device__ __host__ unsigned gm29_GetNextN(unsigned x0,unsigned x1,unsigned n){ //returns x_{2^n}
unsigned myk=gm29_k,myq=gm29_q,i,x=x1;
for(i=0;i<n;i++){
x=gm29_CNext2(x,x0,myk,myq);
myk=gm29_CNext2(myk,2,myk,myq);
myq=gm29_CNext2(myq,0,myq,0);
}
return x;
}
__device__ __host__ unsigned gm29_GetNextAny(unsigned x0,unsigned x1,unsigned long long N){ // returns x_N
unsigned long long i; unsigned xp=x0,xn=x1,xpnew,xnnew,shift=0;
i=N; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gm29_GetNextN(xp,xn,shift);
xnnew=gm29_GetNextN(xn,gm29_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
return xp;
}
__device__ __host__ void gm29_skipahead_(gm29_state* state, unsigned long long offset){
unsigned xn,xp,j;
for(j=0;j<32;j++){
xp=gm29_GetNextAny(state->xP[j],state->xN[j],offset);
xn=gm29_GetNextAny(state->xP[j],state->xN[j],offset+1);
state->xP[j]=xp; state->xN[j]=xn;
}
}
__device__ __host__ void gm29_init_(gm29_state* state){
unsigned x0=514932,x1=127293,xp,xn,j;
for(j=0;j<32;j++){
xp=gm29_GetNextAny(x0,x1,9007198285571818UL);
xn=gm29_GetNextAny(x0,x1,9007198285571819UL);
state->xP[j]=xp; state->xN[j]=xn; x0=xp; x1=xn;
}
}
__device__ __host__ void gm29_init_short_sequence_(gm29_state* state,unsigned SequenceNumber){
gm29_init_(state); // 0 <= SequenceNumber < 10^8; length of each sequence <= 8*10^7
gm29_skipahead_(state,82927047ULL*(unsigned long long)SequenceNumber);
}
__device__ __host__ void gm29_init_medium_sequence_(gm29_state* state,unsigned SequenceNumber){
gm29_init_(state); // 0 <= SequenceNumber < 10^6; length of each sequence <= 8*10^9
gm29_skipahead_(state,8799201913ULL*(unsigned long long)SequenceNumber);
}
__device__ __host__ void gm29_init_long_sequence_(gm29_state* state,unsigned SequenceNumber){
gm29_init_(state); // 0 <= SequenceNumber < 10^4; length of each sequence <= 8*10^11
gm29_skipahead_(state,828317697521ULL*(unsigned long long)SequenceNumber);
}
__device__ __host__ unsigned int gm29_generate_(gm29_state* state){
unsigned sum=0, i, temp, bit=1;
for(i=0;i<32;i++){
temp=(gm29_k*state->xN[i]+gm29_q*(gm29_g-state->xP[i]))%gm29_g;
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum+= ((temp<gm29_halfg)?0:bit); bit*=2;
}
return sum;
}
__device__ __host__ float gm29_generate_uniform_float_(gm29_state* state){
unsigned sum=0, i, temp,bit=1;
for(i=0;i<32;i++){
temp=(gm29_k*state->xN[i]+gm29_q*(gm29_g-state->xP[i]))%gm29_g;
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum+= ((temp<gm29_halfg)?0:bit); bit*=2;
}
return ((float) sum) * 2.3283064365386963e-10;
}
__host__ void gm29_print_state_(gm29_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<32;i++) {printf("%u",state->xN[i]%gm29_g); printf((i<31)?",":"}\nxP={");}
for(i=0;i<32;i++) {printf("%u",state->xP[i]%gm29_g); printf((i<31)?",":"}\n\n");}
}
__host__ void gm29_print_sse_state_(gm29_sse_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<32;i++) {printf("%u",state->xN[i]%gm29_g); printf((i<31)?",":"}\nxP={");}
for(i=0;i<32;i++) {printf("%u",state->xP[i]%gm29_g); printf((i<31)?",":"}\n\n");}
}
__global__ void gm29_kernel_generate_array(gm29_state* state, unsigned int* out, long* length) {
unsigned temp,sum,i,orbit,seqNum; long offset;
__shared__ unsigned xP[gm29_THREADS]; // one generator per s=32 threads, i.e. one orbit
__shared__ unsigned xN[gm29_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm29_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 32;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset);
xN[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1);
for(i=0;i<(*length);i++){
temp = gm29_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = (temp < gm29_halfg ? 0 : (1<<orbit) );
__syncthreads(); // each s=32 threads result in "length" values in the output array
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=sum; }
}
}
__host__ void gm29_generate_gpu_array_(gm29_state* state, unsigned int* dev_out, long length){
long mylength = length/gm29_ARRAY_SECTIONS;
gm29_state* dev_state;
long* dev_length;
if((mylength*gm29_ARRAY_SECTIONS)<length) mylength++;
gm29_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm29_state)));
gm29_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm29_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm29_state),cudaMemcpyHostToDevice));
gm29_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm29_kernel_generate_array<<<gm29_BLOCKS,gm29_THREADS>>>(dev_state,dev_out,dev_length);
gm29_CUDA_CALL(cudaGetLastError());
gm29_CUDA_CALL(cudaFree(dev_state)); gm29_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gm29_kernel_generate_array_float(gm29_state* state, float* out, long* length) {
unsigned temp,sum,i,orbit,seqNum; long offset;
__shared__ unsigned xP[gm29_THREADS]; // one generator per s=32 threads, i.e. one orbit
__shared__ unsigned xN[gm29_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm29_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 32;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset);
xN[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1);
for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array
temp = gm29_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = (temp < gm29_halfg ? 0 : (1<<orbit) );
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=((float)sum) * 2.3283064365386963e-10; }
}
}
__host__ void gm29_generate_gpu_array_float_(gm29_state* state, float* dev_out, long length){
long mylength = length/gm29_ARRAY_SECTIONS;
gm29_state* dev_state;
long* dev_length;
if((mylength*gm29_ARRAY_SECTIONS)<length) mylength++;
gm29_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm29_state)));
gm29_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm29_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm29_state),cudaMemcpyHostToDevice));
gm29_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm29_kernel_generate_array_float<<<gm29_BLOCKS,gm29_THREADS>>>(dev_state,dev_out,dev_length);
gm29_CUDA_CALL(cudaGetLastError());
gm29_CUDA_CALL(cudaFree(dev_state)); gm29_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gm29_kernel_generate_array_double(gm29_state* state, double* out, long* length) {
unsigned temp,sum,i,orbit,seqNum; long offset;
__shared__ unsigned xP[gm29_THREADS]; // one generator per s=32 threads, i.e. one orbit
__shared__ unsigned xN[gm29_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm29_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 32;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset);
xN[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1);
for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array
temp = gm29_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = (temp < gm29_halfg ? 0 : (1<<orbit) );
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=((double)sum) * 2.3283064365386963e-10; }
}
}
__host__ void gm29_generate_gpu_array_double_(gm29_state* state, double* dev_out, long length){
long mylength = length/gm29_ARRAY_SECTIONS;
gm29_state* dev_state;
long* dev_length;
if((mylength*gm29_ARRAY_SECTIONS)<length) mylength++;
gm29_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm29_state)));
gm29_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm29_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm29_state),cudaMemcpyHostToDevice));
gm29_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm29_kernel_generate_array_double<<<gm29_BLOCKS,gm29_THREADS>>>(dev_state,dev_out,dev_length);
gm29_CUDA_CALL(cudaGetLastError());
gm29_CUDA_CALL(cudaFree(dev_state)); gm29_CUDA_CALL(cudaFree(dev_length));
}
__host__ void gm29_generate_array_(gm29_state* state, unsigned int* out, long length){
long mylength = length/gm29_ARRAY_SECTIONS;
gm29_state* dev_state;
unsigned int* dev_out;
long* dev_length;
if((mylength*gm29_ARRAY_SECTIONS)<length) mylength++;
gm29_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm29_state)));
gm29_CUDA_CALL(cudaMalloc((void**)&dev_out,mylength*gm29_ARRAY_SECTIONS*sizeof(unsigned int)));
gm29_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm29_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm29_state),cudaMemcpyHostToDevice));
gm29_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm29_kernel_generate_array<<<gm29_BLOCKS,gm29_THREADS>>>(dev_state,dev_out,dev_length);
gm29_CUDA_CALL(cudaGetLastError());
gm29_CUDA_CALL(cudaMemcpy(out,dev_out,length*sizeof(unsigned int),cudaMemcpyDeviceToHost));
gm29_CUDA_CALL(cudaFree(dev_state)); gm29_CUDA_CALL(cudaFree(dev_out));
gm29_CUDA_CALL(cudaFree(dev_length));
}
|
12,415 | #include "includes.h"
__global__ void update_linear_and_quadratic_terms_kernel( int32_t n, float prior_offset, float* cur_tot_weight, int32_t max_count, float* quadratic, float* linear) {
float val = 1.0f;
float cur_weight = *cur_tot_weight;
if (max_count > 0.0f) {
float new_scale = max((float)cur_weight, (float)max_count) / max_count;
float prior_scale_change = new_scale - 1.0f;
val += prior_scale_change;
}
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x) {
int32_t diag_idx = ((i + 1) * (i + 2) / 2) - 1;
quadratic[diag_idx] += val;
}
if (threadIdx.x == 0) {
linear[0] += val * prior_offset;
}
} |
12,416 | //
// Created by igor on 26.03.2021.
//
#include "ColorC.cuh"
__host__ __device__ ColorC operator* (const ColorC &c, float f) {
return {(unsigned char)(c.r * f),
(unsigned char)(c.g * f),
(unsigned char)(c.b * f)};
}
__host__ __device__ ColorC operator+ (const ColorC &l, const ColorC &r) {
return {(unsigned char)(l.r + r.r),
(unsigned char)(l.g + r.g),
(unsigned char)(l.b + r.b)};
}
__host__ __device__ ColorC::ColorC(unsigned char r, unsigned char g, unsigned char b) : r(r), g(g), b(b) {}
__host__ __device__ ColorC::ColorC(ColorF f): r(f.r), g(f.g), b(f.b) {
}
|
12,417 | #include <stdio.h>
#include <cuda.h>
/*
Tested blocks/threads:
256/256
256/1024
128/1024
512/1024
768/1024
*/
#define NUM_BLOCK 768
#define NUM_THREAD 1024
#define PI 3.14159265358979323846
/* Kernel function */
__global__ void cal_pi(double *mypi, int iter,
double m, int nthreads, int nblocks) {
int i;
double ni;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Sequential thread index across the blocks
for (i = idx; i< iter; i += nthreads * nblocks) {
ni = (i + 0.5) * m;
mypi[idx] += 4.0/(1.0 + ni * ni);
}
}
int main(void) {
double pi = 0;
int iteArr[3] = { 24000000, 48000000, 94000000 };
//double iteArr[3] = { 24000000000, 48000000000, 94000000000 };
//int iteArr[3] = { 1, 2, 4 };
/* Setting upt grid and block dimesions */
dim3 dimGrid(NUM_BLOCK,1,1);
dim3 dimBlock(NUM_THREAD,1,1);
printf("REPORT # of blocks = %d, # of threads/block = %d\n", NUM_BLOCK, NUM_THREAD);
/* Host and Device variables (arrays) */
double *h_pi, *d_pi;
for (int i = 0; i < 3; i++){
int currentIter = iteArr[i];
double step = 1.0 / currentIter;
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(double);
/* Allocate on host*/
h_pi = (double *)malloc(size);
/*Allocate on device*/
cudaMalloc((void **) &d_pi, size);
/* Set d_pi to zero */
cudaMemset(d_pi, 0, size);
/* Run Kernel */
cal_pi <<<dimGrid, dimBlock>>> (d_pi, currentIter,
step, NUM_THREAD, NUM_BLOCK);
/* Copy results from device to the host*/
cudaMemcpy(h_pi, d_pi, size, cudaMemcpyDeviceToHost);
/* Finish pi in host */
for( int j = 0; j < NUM_THREAD*NUM_BLOCK; j++)
pi += h_pi[j];
pi *= step;
printf("\tMyPI = %20.18f \n",pi);
printf("\tMyPI - PI = %20.18f \n",pi-PI);
}
printf("\tCheck nvprof for more time estimation.\n\n");
/* Clean host and device var*/
free(h_pi);
cudaFree(d_pi);
return 0;
} |
12,418 | #include "includes.h"
__global__ void gpu_add(float* first, float* second, size_t sizeFirst)
{
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
while (threadId < sizeFirst) {
first[threadId] = (first[threadId] + second[threadId]);
threadId += ( blockDim.x * gridDim.x );
}
} |
12,419 | #include "includes.h"
__global__ void copyChunks_kernel(void *d_source, int startPos, int2* d_Rin, int rLen, int *d_sum, void *d_dest)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int2 value=d_Rin[pos];
int offset=value.x;
int size=value.y;
int startWritePos=d_sum[pos];
int i=0;
char *source=(char*)d_source;
char *dest=(char*)d_dest;
for(i=0;i<size;i++)
{
dest[i+startWritePos]=source[i+offset];
}
value.x=startWritePos;
d_Rin[pos]=value;
}
} |
12,420 | #include "includes.h"
#define BLOCK_SIZE 16
__global__ void MatrixMulKernel(float *M, float *N, float *P, int Width)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if( Col < Width && Row < Width)
{
float Pvalue = 0;
for(int k = 0; k < Width; ++k)
{
Pvalue += M[Row * Width + k] * N[k * Width + Col];
}
P[Row * Width + Col] = Pvalue;
}
} |
12,421 | #include <stdio.h>
#include <iostream>
#include <vector>
#include <time.h>
#include <math.h>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
const int block_num = 512;
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
const int threadsPerBlock = sizeof(unsigned long long) * 8;
__global__ void three_nn_gpu(const int b, const int n, const int m, const float* xyz1, const float* xyz2, float* dist, int* idx){
// Find three nearest neighbors with square distance, from xyz1 to xyz2
// input: xyz1: (b, n, 3), xyz2: (b, m, 3)
// output: dist: (b, n, 3), idx: (b, n, 3)
int total_idx = b * n;
CUDA_1D_KERNEL_LOOP(point_inds, total_idx){
int cur_batch_idx = point_inds / n;
const float* cur_xyz1 = xyz1 + point_inds * 3;
const float cur_xyz1_x = cur_xyz1[0];
const float cur_xyz1_y = cur_xyz1[1];
const float cur_xyz1_z = cur_xyz1[2];
float cur_xyz2_x, cur_xyz2_y, cur_xyz2_z;
const float* cur_xyz2 = xyz2 + cur_batch_idx * m * 3;
float* cur_dist = dist + point_inds * 3;
int* cur_idx = idx + point_inds * 3;
double best1 = 1e40;
double best2 = 1e40;
double best3 = 1e40;
double d;
int besti1 = 0;
int besti2 = 0;
int besti3 = 0;
for (int i = 0; i < m; i++){
// compare the distance to each xyz2 points
cur_xyz2_x = cur_xyz2[i * 3 + 0];
cur_xyz2_y = cur_xyz2[i * 3 + 1];
cur_xyz2_z = cur_xyz2[i * 3 + 2];
d = (cur_xyz2_x - cur_xyz1_x) * (cur_xyz2_x - cur_xyz1_x) + (cur_xyz2_y - cur_xyz1_y) * (cur_xyz2_y - cur_xyz1_y) + (cur_xyz2_z - cur_xyz1_z) * (cur_xyz2_z - cur_xyz1_z);
if (d < best1){
best3=best2;
besti3=besti2;
best2=best1;
besti2=besti1;
best1=d;
besti1=i;
}
else if (d < best2){
best3=best2;
besti3=besti2;
best2=d;
besti2=i;
}
else if (d < best3){
best3=d;
besti3=i;
}
}
cur_dist[0] = best1;
cur_dist[1] = best2;
cur_dist[2] = best3;
cur_idx[0] = besti1;
cur_idx[1] = besti2;
cur_idx[2] = besti3;
}
}
__global__ void three_interpolate_gpu(const int b, const int m, const int c, const int n, const float* points, const int* idx, const float* weight, float* out){
// input: points: (b, m, c), idx: (b, n, 3), weight: (b, n, 3)
// out: (b, n, c)
int total_idx = b * n * c;
CUDA_1D_KERNEL_LOOP(point_inds, total_idx){
int cur_batch_inds = point_inds / (n * c);
int cur_point_inds = point_inds / c;
int cur_channel_inds = point_inds % c;
const float* cur_points = points + cur_batch_inds * m * c;
const int* cur_idx = idx + cur_point_inds * 3;
const float* cur_weight = weight + cur_point_inds * 3;
float w1 = cur_weight[0];
float w2 = cur_weight[1];
float w3 = cur_weight[2];
int i1 = cur_idx[0];
int i2 = cur_idx[1];
int i3 = cur_idx[2];
float c1 = cur_points[i1 * c + cur_channel_inds];
float c2 = cur_points[i2 * c + cur_channel_inds];
float c3 = cur_points[i3 * c + cur_channel_inds];
out[point_inds] = c1 * w1 + c2 * w2 + c3 * w3;
}
}
__global__ void three_interpolate_grad_gpu(const int b, const int n, const int c, const int m, const float* grad_out, const int* idx, const float* weight, float* grad_points){
// input: grad_out: [b, n, c] idx [b, n, 3], weight [b, n, 3]
// output: grad_points [b, m, c]
int total_idx = b * n * c;
CUDA_1D_KERNEL_LOOP(points_inds, total_idx){
int cur_batch_inds = points_inds / (n * c);
int cur_points_inds = points_inds / c;
int cur_channel_inds = points_inds % c;
float* cur_grad_points = grad_points + cur_batch_inds * m * c;
const float* cur_grad_out = grad_out + points_inds;
const int* cur_idx = idx + cur_points_inds * 3;
const float* cur_weight = weight + cur_points_inds * 3;
float w1 = cur_weight[0];
float w2 = cur_weight[1];
float w3 = cur_weight[2];
int i1 = cur_idx[0];
int i2 = cur_idx[1];
int i3 = cur_idx[2];
atomicAdd(&cur_grad_points[i1 * c + cur_channel_inds], cur_grad_out[0] * w1);
atomicAdd(&cur_grad_points[i2 * c + cur_channel_inds], cur_grad_out[0] * w2);
atomicAdd(&cur_grad_points[i3 * c + cur_channel_inds], cur_grad_out[0] * w3);
}
}
__global__ void k_interpolate_gpu(const int b, const int m, const int c, const int n, const int k, const float* points, const int* idx, const float* weight, float* out){
// input: points: (b, m, c), idx: (b, n, k), weight: (b, n, k)
// out: (b, n, c)
int total_idx = b * n * c;
CUDA_1D_KERNEL_LOOP(point_inds, total_idx){
int cur_batch_inds = point_inds / (n * c);
int cur_point_inds = point_inds / c;
int cur_channel_inds = point_inds % c;
const float* cur_points = points + cur_batch_inds * m * c;
const int* cur_idx = idx + cur_point_inds * k;
const float* cur_weight = weight + cur_point_inds * k;
float w, ci;
int index;
out[point_inds] = 0;
for (int i=0; i < k; i++){
index = cur_idx[i];
w = cur_weight[i];
ci = cur_points[index * c + cur_channel_inds];
out[point_inds] += w * ci;
}
}
}
__global__ void k_interpolate_grad_gpu(const int b, const int n, const int c, const int m, const int k, const float* grad_out, const int* idx, const float* weight, float* grad_points){
// input: grad_out: [b, n, c] idx [b, n, k], weight [b, n, k]
// output: grad_points [b, m, c]
int total_idx = b * n * c;
CUDA_1D_KERNEL_LOOP(points_inds, total_idx){
int cur_batch_inds = points_inds / (n * c);
int cur_points_inds = points_inds / c;
int cur_channel_inds = points_inds % c;
float* cur_grad_points = grad_points + cur_batch_inds * m * c;
const float* cur_grad_out = grad_out + points_inds;
const int* cur_idx = idx + cur_points_inds * k;
const float* cur_weight = weight + cur_points_inds * k;
float w;
int index;
for (int i=0; i<k; i++){
w = cur_weight[i];
index = cur_idx[i];
atomicAdd(&cur_grad_points[index * c + cur_channel_inds], cur_grad_out[0] * w);
}
}
}
void ThreeNNLauncher(const int b, const int n, const int m, const float* xyz1, const float* xyz2, float* dist, int* idx){
//std::cout << "beginning forwarding" << std::endl;
three_nn_gpu<<<block_num, threadsPerBlock>>>(b, n, m, xyz1, xyz2, dist, idx);
//std::cout << "Finishing forwarding" << std::endl;
}
void ThreeInterpolateLauncher(const int b, const int m, const int c, const int n, const float* points, const int* idx, const float* weight, float* out){
three_interpolate_gpu<<<block_num, threadsPerBlock>>>(b, m, c, n, points, idx, weight, out);
}
void ThreeInterpolateGradLauncher(const int b, const int n, const int c, const int m, const float* grad_out, const int* idx, const float* weight, float* grad_points){
// grad_out: [b, n, c]
// idx: [b, n, 3], weight: [b. n, 3], grad_points: [b, m, c]
three_interpolate_grad_gpu<<<block_num, threadsPerBlock>>>(b, n, c, m, grad_out, idx, weight, grad_points);
}
void KInterpolateLauncher(const int b, const int m, const int c, const int n, const int k, const float* points, const int* idx, const float* weight, float* out){
k_interpolate_gpu<<<block_num, threadsPerBlock>>>(b, m, c, n, k, points, idx, weight, out);
}
void KInterpolateGradLauncher(const int b, const int n, const int c, const int m, const int k, const float* grad_out, const int* idx, const float* weight, float* grad_points){
// grad_out: [b, n, c]
// idx: [b, n, 3], weight: [b. n, 3], grad_points: [b, m, c]
k_interpolate_grad_gpu<<<block_num, threadsPerBlock>>>(b, n, c, m, k, grad_out, idx, weight, grad_points);
}
|
12,422 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
__device__ unsigned short RD(int i, int j, int W, int H) {
#define D(x) (x-W/2.)/(W/2.)
float x = D(i), y = D(j), X, Y, n = 0; while (n++<200 && (X = x*x) + (Y = y*y)<4) { x = X - Y + .36237; y = 2 * x*y + .32; }return log(n) * 256;
}
__device__ unsigned short GR(int i, int j, int W, int H) {
float x = D(i), y = D(j), X, Y, n = 0; while (n++<200 && (x*x + y*y)<4) { X = x; Y = y; x = X*X - Y*Y + -.7; y = 2 * X*Y + .27015; }return log(n) * 128;
}
__device__ unsigned short BL(int i, int j, int W, int H) {
float x = D(i), y = D(j), X, Y, n = 0; while (n++<600 && (x*x + y*y)<4) { X = x; Y = y; x = X*X - Y*Y + .36237; y = 2 * X*Y + .32; }return log(n) * 128;
}
__global__ void RGBKernel(float* R, float* G, float* B, int W, int H)
{
int x = blockIdx.x;
int y = threadIdx.x;
R[x * W + y] = RD(x, y, W, H) / 255.0f;
G[x * W + y] = GR(x, y, W, H) / 255.0f;
B[x * W + y] = BL(x, y, W, H) / 255.0f;
}
extern "C" cudaError_t cuadRGB(int W, int H, float* R, float* G, float* B)
{
float* DevR;
float* DevG;
float* DevB;
auto cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)(&DevR), sizeof(float) * W * H);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)(&DevG), sizeof(float) * W * H);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)(&DevB), sizeof(float) * W * H);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
RGBKernel<<<W, H>>> (DevR, DevG, DevB, W, H);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(R, DevR, sizeof(float) * W * H, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(G, DevG, sizeof(float) * W * H, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(B, DevB, sizeof(float) * W * H, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(DevR);
cudaFree(DevG);
cudaFree(DevB);
return cudaStatus;
} |
12,423 |
#include <stdlib.h>
#include <stdio.h>
//
// these libraries are for CUDA RNG
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
// define number of cores to use
#define N 20
// define number of trials to use
#define M 10000000
// we will be using unsigned ints because we want a random positive number between 0 and 1
// kernel for generating random number then executing monte carlo method
__global__ void mcarlo( float *d_area, int totarea ) {
int i;
totarea = 0;
unsigned int seed = threadIdx.x;
curandState s;
// seed a random number generator
curand_init(seed, 0, 0, &s);
// for loop to implement monte carlo
for(i = 0; i < M; ++i) {
int ind = blockIdx.x * blockDim.x + threadIdx.x;
float x = curand_uniform(&s);
float y = curand_uniform(&s);
if( x*x + y*y <= 1.0f) {
d_area[ind] += 1;
}
}
// sum up elements in array
i = 0;
while( i < M )
{
totarea += d_area[i];
i++;
}
}
// main function
int main( void ) {
// initiate host variables
int h_tot;
int totarea = 0;
//allocate host memory (unnecessary?)
//h_area = (float*) malloc(M * sizeof(float));
// initiate device
int deviceid = 0;
int devCount;
cudaGetDeviceCount(&devCount);
if(deviceid<devCount) cudaSetDevice(deviceid);
else return(1);
// define and allocate memory on the device
float *d_area;
cudaMalloc(&d_area, M*sizeof(float));
// execute kernel to implement mcarlo
mcarlo<<<N , 1>>>( d_area, totarea );
// transfer result back to host
cudaMemcpy(&h_tot, &totarea, sizeof(int), cudaMemcpyDeviceToHost);
// calculate pi
int PI = 4 * h_tot / N;
// display result
printf("\nPI is %f\n", PI);
// free memory
cudaFree(d_area);
return 0;
}
|
12,424 | # include <getopt.h>
# include <ctype.h>
# include <stdlib.h>
# include <stdio.h>
# include <unistd.h>
# include <string.h>
# include <pmmintrin.h>
# include <time.h>
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void suma2D_CPU(float* A, float* B, int N, int V);
void getParams (int argc, char** argv, char* nValue, char* bValue, char* vValue);
int isInteger (char* input);
float pixelSum (float* image, int N);
void printImage (float* image, int N);
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void suma2D_SHMEM (float* A, float* B, int N, int V) {
// Se definen variables para realizar la suma de la vecindad de un determinado pixel de la imagen
int offset, neighbour, mid_row, neigh_row, center_neigh;
// Se declara el identificador local de cada hebra en un boque, considerando su coordenada x e y.
int local_i = threadIdx.x;
int local_j = threadIdx.y;
// Se define el identificador local de la hebra en base a sus coordenadas locales.
int local_id = local_i + local_j * blockDim.y;
// Se declara el identificador global de cada hebra en un boque, considerando su coordenada x e y.
int global_i = blockDim.x * blockIdx.x + local_i;
int global_j = blockDim.y * blockIdx.y + local_j;
// Se determina el identificador global de cada hebra, basandose en los identificadores globales en
// termino de sus coordenadas.
int global_id = global_i + global_j * N;
// Se declara un arreglo en memoria compartida para cada bloque con
// el maximo de hebras posibles para un bloque en CUDA.
__shared__ float temp[1024];
// Se inicializa un vaor inicial 0.0 para la imagen de salida, a la cual se iran sumando los vecinos,
// asi tambien como el pixel central
B[global_id] = 0.0;
// Al igual que el caso anterior, se inicialida en 0.0 el arreglo en memoria compartida, utilizando esta vez
// el identificador del bloque de la hebra actual
temp[local_id] = 0.0;
// Se recorren el arreglo desde el pixel con la primera coordenada, hasta la ultima coordenada,
// pasando por pixels que no son de la vecindad inclusive.
for (offset = -V * (1 + N); offset <= V * (1 + N); offset++) {
neighbour = global_id + offset; // Se determina la posicion del vecino del pixel central.
neigh_row = neighbour / N; // La fila del vecino
mid_row = global_id / N; // La fila del pixel central
// Condicion para no considerar vecinos fuera de los limites de la imagen
if ( (neighbour >= 0) && (neighbour < (N * N)) ) {
center_neigh = global_id - (mid_row - neigh_row) * N; // Se determina el indice del pixel
// central de cada fila de la imagen
// Condicion para no considerar pixeles fuera de la vecindad
if ( (neighbour >= (center_neigh - V)) && (neighbour <= (center_neigh + V)) ) {
temp[local_id] = temp[local_id] + A[neighbour]; // Se suma el vecino al arreglo en
// memoria compartida
}
}
}
B[global_id] = temp[local_id]; // Cuando se han sumando todos los pixel de la vecindad se almacena el
// resultado en memoria global
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__host__ int main(int argc, char** argv) {
clock_t start_t, end_t; // Variables para catpurar el tiempo de ejecucion secuencial
float sum_gpu, sum_seq, gpu_time, cpu_time; // Variables para almacenar la suma y los tiempos de ejecucion finales
char* nValue = (char*)malloc(sizeof(char));
char* bValue = (char*)malloc(sizeof(char));
char* vValue = (char*)malloc(sizeof(char));
// Se capturan los parametros de entrada
getParams (argc, argv, nValue, bValue, vValue);
// Se transforman a numeros enteroros para su mejor manipulacion
int N = atoi(nValue); // Tamaño de la imagen
int Bs = atoi(bValue); // Tamaño de bloque
int V = atoi(vValue); // Radio de la vecindad
dim3 gridSize = dim3(N / Bs, N / Bs); // Se declara una grilla bidimensional dimension N/Bs x N/Bs
dim3 blockSize = dim3(Bs, Bs); // Se declaran bloques bidimensionales de Bs x Bs
float* h_a = (float*)malloc( (N * N) * sizeof(float)); // Se aloja memoria para la imagen en host
float* h_b = (float*)malloc( (N * N) * sizeof(float)); // Se aloja memoria para la imagen de salida en host
float* seq_b = (float*)malloc( (N * N) * sizeof(float));// Se aloja memoria para la imagen de salida secuencial en host
// Se declaran las variables para las variables de la imagen, y la imagen de salida en device
float* d_a;
float* d_b;
// Se rellena la imagen con valores aleatorios entre 0 y 1
for (int index = 0; index < (N * N); index++) {
h_a[index] = (float) rand() / RAND_MAX;
}
// Se crean y se inicializan los eventos para capturar el tiempo de ejecucion para
// todas las operaciones que se realizan utilizando la GPU.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Se aloja memoria para ls variables en device
cudaMalloc((void**) &d_a, (N * N) * sizeof(float));
cudaMalloc((void**) &d_b, (N * N) * sizeof(float));
// Se traspasa el contenido desde host hacia device
cudaMemcpy(d_a, h_a, (N * N) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, (N * N) * sizeof(float), cudaMemcpyHostToDevice);
// Se ejecuta el kernel
suma2D_SHMEM<<<gridSize, blockSize>>>(d_a, d_b, N, V);
// Se traspasa el contenido desde device hacia host
cudaMemcpy(h_b, d_b, (N * N) * sizeof(float), cudaMemcpyDeviceToHost);
// Se detienen los eventos para obtener el tiempo de ejecucion final en GPU
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time, start, stop);
// Se realiza la suma de los pixeles de la matriz resultante por el metodo paralelo
sum_gpu = pixelSum (h_b, N);
// Se muestran los tiempos de ejecucion y la suma final por consola
printf("Tiempo GPU: %f (ms)\n", gpu_time);
printf("Suma GPU: %f\n", sum_gpu);
// Se inicia el reloj para obtener el tiempo de ejecuion de la solucion secuencial en CPU
start_t = clock();
// Se ejecuta la funcion secuencial
suma2D_CPU (h_a, seq_b, N, V);
// Se detiene el reloj y se obtiene el tiempo de ejecucion en milisegundos
end_t = clock();
cpu_time = (float)(end_t - start_t) / CLOCKS_PER_SEC;
cpu_time *= 1000;
// Se realiza la suma de los pixeles de la matriz resultante por el metodo secuencial
sum_seq = pixelSum (seq_b, N);
// Se muestran los tiempos de ejecucion y la suma final por consola
printf("Tiempo CPU: %f (ms)\n", cpu_time);
printf("Suma CPU: %f\n", sum_seq);
// Destruccion de los eventos iniciados
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Liberacion de memoria para el host y el device.
cudaFree(d_a);
cudaFree(d_b);
free(h_a);
free(h_b);
free(seq_b);
// Liberacion de memoria para la recepcion de parametros de entrada.
free(nValue);
free(bValue);
free(vValue);
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void suma2D_CPU(float* A, float* B, int N, int V) {
// Se definen variables para realizar la suma de la vecindad de un determinado pixel de la imagen
int index, offset, neighbour, mid_row, neigh_row, center_neigh;
// Se recorre la imagen
for (index = 0; index < (N * N); index++){
B[index] = 0.0; // Se inicializa en 0.0 cada elemento del arreglo de salida
for (offset = -V * (1 + N); offset <= V * (1 + N); offset++) {
neighbour = index + offset;
neigh_row = neighbour / N;
mid_row = index / N;
// Condicion para no considerar vecinos fuera de los limites de la imagen
if ( (neighbour >= 0) && (neighbour < (N * N)) ) {
center_neigh = index - (mid_row - neigh_row) * N;
// Condicion para no considerar vecinos fuera de la vecindad
if ( (neighbour >= (center_neigh - V)) && (neighbour <= (center_neigh + V)) ) {
B[index] = B[index] + A[neighbour];
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// - INPUTS: - argc: Largo del arreglo de argumentos argv.
// - argv: Arreglo con los argumentos de entrada incluyendo en nombre del archivo.
// - nValue: Tamaño de la imagen de entrada
// - bValue: Tamaño de bloque de entrada
// - vValue: Radio de la vecindad
// - OUTPUTS: -
void getParams (int argc, char** argv, char* nValue, char* bValue, char* vValue) {
int c;
while ( (c = getopt (argc, argv, "N:B:V:")) != -1) {
switch (c) {
case 'N':
strcpy(nValue, optarg);
if (!isInteger(nValue)) {
printf ("%s\n", "-------------------------------------------------------------------------");
printf (" => El argumento de -%c debe ser un ENTERO POSITIVO.\n", c);
printf (" => Programa abortado\n");
printf ("%s\n", "-------------------------------------------------------------------------");
exit(EXIT_FAILURE);
}
break;
case 'B':
strcpy(bValue, optarg);
if (!isInteger(bValue)) {
printf ("%s\n", "-------------------------------------------------------------------------");
printf (" => El argumento de -%c debe ser un ENTERO POSITIVO.\n", c);
printf (" => Programa abortado\n");
printf ("%s\n", "-------------------------------------------------------------------------");
exit(EXIT_FAILURE);
}
break;
case 'V':
strcpy(vValue, optarg);
if (!isInteger(vValue)) {
printf ("%s\n", "-------------------------------------------------------------------------");
printf (" => El argumento de -%c debe ser un ENTERO POSITIVO.\n", c);
printf (" => Programa abortado\n");
printf ("%s\n", "-------------------------------------------------------------------------");
exit(EXIT_FAILURE);
}
break;
case '?':
if ( (optopt == 'N') || (optopt == 'B') || (optopt == 'V') ) {
printf ("%s\n", "-------------------------------------------------------------------------");
printf (" => La opcion -%c requiere un argumento.\n", optopt);
printf (" => Programa abortado\n");
printf ("%s\n", "-------------------------------------------------------------------------");
exit(EXIT_FAILURE);
}
else if (isprint (optopt)) {
printf ("%s\n", "-------------------------------------------------------------------------");
printf (" => Opcion -%c desconocida.\n", optopt);
printf (" => Programa abortado\n");
printf ("%s\n", "-------------------------------------------------------------------------");
exit(EXIT_FAILURE);
}
default:
break;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// - INPUTS: - input: Cadena de caracteres a evaluar si corresponde a un numero entero positivo o no
// - OUTPUTS: Valor booleano 1 si es entero positivo, 0 en caso contrario
// - DESCRIPTION: Verifica si una cadena de caracteres de entrada posee en cada una de sus posiciones un caracter que es
// digito y es positivo
int isInteger (char* input) {
int c;
// Recorrer el argumento entragado en cadena de caracteres, verificando que cada uno de estos corresponde a un numero.
for (c = 0; c < strlen(input); c++) {
// Si no se cumple para alguno de los caracteres, significa que el argumento no corresponde a un entero positivo y retorna 0.
if (!isdigit(input[c]))
return 0;
}
return 1;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// - DESCRIPTION: Determina la suma de los elementos de una imagen, de largo y ancho N.
float pixelSum (float* image, int N) {
int index;
float sum = 0.0;
for (index = 0; index < (N * N); index++) {
sum += image[index];
}
return sum;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// - DESCRIPTION: Mostrar una matriz por consola
void printImage (float* image, int N) {
for (int index = 0; index < (N * N); index++) {
printf("%f ", image[index]);
if ( (index + 1) % N == 0)
printf("\n");
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////// END ////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
12,425 | typedef unsigned char Rgb[3];
typedef float Vec2[2];
typedef float Vec3[3];
#include <stdio.h>
#define ww 16
#define hh 16
__device__ __host__ inline
float edgeFunction(const Vec2 &a, const Vec2 &b, const Vec2 &c)
{ return (c[0] - a[0]) * (b[1] - a[1]) - (c[1] - a[1]) * (b[0] - a[0]); }
__global__ void rasterize_triangle(unsigned char * framebuffer_d, const float * x0_d, const float * x1_d, const float * x2_d,
const float * y0_d, const float * y1_d, const float * y2_d, const int w, const int h, const int num_triangles){
for(int k = 0; k < num_triangles; k++){
int tx = threadIdx.x;
int ty = threadIdx.y;
int j = ty + blockIdx.y * blockDim.y; // rows
int i = tx + blockIdx.x * blockDim.x; // cols
Vec2 a = {x0_d[k], y0_d[k]};
Vec2 b = {x1_d[k], y1_d[k]};
Vec2 c = {x2_d[k], y2_d[k]};
float area = edgeFunction(a,b,c);
Vec2 p = {i + 0.5f, j + 0.5f};
int index = (i + j * w)*3;
float alpha = edgeFunction(b,c,p);
float beta = edgeFunction(c,a,p);
float gamma = edgeFunction(a,b,p);
if(alpha >= 0 && beta >= 0 && gamma >= 0){
alpha = alpha / area;
beta = beta / area;
gamma = gamma / area;
float r = alpha;
float g = beta;
float bb = gamma;
if(i < 512 && j < 512){
framebuffer_d[index] = (unsigned char)(r * 255);
framebuffer_d[index + 1] = (unsigned char)(g * 255);
framebuffer_d[index + 2] = (unsigned char)(bb * 255);
}
}
}
}
void basicTriRast(unsigned char * framebuffer_d, const float * x0_d, const float * x1_d, const float * x2_d,
const float * y0_d, const float * y1_d, const float * y2_d, const int w, const int h, const int num_triangles){
const unsigned int BLOCK_SIZE = 32;
dim3 BlocksPerGrid(ceil(double(512)/BLOCK_SIZE),ceil(double(512)/BLOCK_SIZE),1);
dim3 ThreadsPerBlock(BLOCK_SIZE,BLOCK_SIZE,1);
rasterize_triangle<<<BlocksPerGrid, ThreadsPerBlock>>>(framebuffer_d, x0_d, x1_d, x2_d, y0_d, y1_d, y2_d, w, h, num_triangles);
} |
12,426 | #include<stdio.h>
__global__ void sumKernel(double *d_a, double *d_b, double *d_c, int n);
double *h_a, *h_b, *h_c;
const int N = 1000;
const int M = sizeof(double) * N;
int main(){
h_a = (double*)malloc(M);
h_b = (double*)malloc(M);
h_c = (double*)malloc(M);
for(int i = N - 1; i >= 0; i--)
h_a[i] = i;
for(int i = 0; i < N; i++)
h_b[i] = i;
double *d_a, *d_b, *d_c;
cudaMalloc((void**)&d_a, M);
cudaMalloc((void**)&d_b, M);
cudaMalloc((void**)&d_c, M);
cudaMemcpy(d_a, h_a, M, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, M, cudaMemcpyHostToDevice);
const int block_size = 64;
const int grid_size = N / block_size + 1;
sumKernel<<<grid_size, block_size>>>(d_a, d_b, d_c, N);
cudaMemcpy(h_c, d_c, M, cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%lf + %lf = %lf\n",h_a[i], h_b[i], h_c[i]);
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
__global__ void sumKernel(double *d_a, double *d_b, double *d_c, int n){
int index = blockDim.x * blockIdx.x + threadIdx.x;
d_c[index] = d_a[index] + d_b[index];
} |
12,427 | #include "includes.h"
__global__ void get_temp_grad (const int n, const float *gradOutput, const float *mask, float *top_grad, const int mask_index){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
if (((int) mask[index]) == mask_index)
top_grad[index] = gradOutput[index];
} |
12,428 | // Program by Arthur Alves Araujo Ferreira - All rights reserved
// ITESM ID: A01022593
// nvcc -o test matrix_mult_tiling.cu -std=c++11
#include <iostream>
#include <stdio.h>
#include <cstdlib>
#include <chrono>
#include <cuda_runtime.h>
#define TILE_SIZE 32
using namespace std;
// Function that multiplies 2 matrices using gpu tiling
__global__ void matrixMultiplyGPUTiling(double *A, double *B, double *C, const int n) {
__shared__ double tileA[TILE_SIZE * TILE_SIZE];
__shared__ double tileB[TILE_SIZE * TILE_SIZE];
unsigned int x = threadIdx.x;
unsigned int y = threadIdx.y;
unsigned int col = x + blockIdx.x * blockDim.x;
unsigned int row = y + blockIdx.y * blockDim.y;
double sum = 0;
for (int i = 0; i < (n + TILE_SIZE - 1)/TILE_SIZE; i++) {
if (row < n && i * TILE_SIZE + x < n)
tileA[y*TILE_SIZE+x] = A[row*n + i*TILE_SIZE+x];
else
tileA[y*TILE_SIZE+x] = 0.0f;
if (col < n && i * TILE_SIZE + y < n)
tileB[y*TILE_SIZE+x] = B[(i*TILE_SIZE+y)*n + col];
else
tileB[y*TILE_SIZE+x] = 0.0f;
__syncthreads();
for (int idx = 0; idx < TILE_SIZE; idx++) {
sum += tileA[y*TILE_SIZE+idx] * tileB[idx*TILE_SIZE+x];
}
__syncthreads();
}
if (col < n && row < n) {
C[col*n+row] += sum;
}
}
// Function that multiplies 2 matrices using gpu
__global__ void matrixMultiplyGPU(double *A, double *B, double *C, const int n) {
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix < n && iy < n) {
for(int k = 0; k < n; k++) {
C[iy * n + ix] += A[iy * n + k] * B[k * n + ix];
}
}
}
// Function that multiplies 2 matrices on cpu
void matrixMultiply(double *A, double *B, double *C, const int n) {
for(int i = 0; i < n; i++) {
for(int j = 0; j < n; j++) {
for(int k = 0; k < n; k++) {
C[i * n + j] += A[i * n + k] * B[j + k * n];
}
}
}
}
// Function that runs through a matrix and prints it
void printMatrix(double *matrix, const int n) {
// Prints the matrix with numbers shortened to 2 decimals and tabs separating them
int size = n*n;
for (int i = 0; i < size; i++) {
std::cout << matrix[i] << " ";
if (i != 0 && i % n == 0)
std::cout << std::endl;
}
return;
}
// Funtion that compares two matrices and returns boolean
bool matrixCompare(double *m_A, double *m_B, const int n) {
bool result = true;
int size = n*n;
for (int i = 0; i < size; i++) {
if (m_A[i] != m_B[i]) {
result = false;
break;
}
}
return result;
}
int main(int argc, char *argv[]) {
// Set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Using Device %d: %s\n", dev, deviceProp.name);
cudaSetDevice(dev);
// Matrix information
int n = 1000;
int bytes = n*n * sizeof(double *);
// Host matrices
double *h_A = (double *)malloc(bytes);
double *h_B = (double *)malloc(bytes);
// Results
double *hostRef = (double *)malloc(bytes);
double *gpuRef = (double *)malloc(bytes);
// Fill result matrices w zeros
memset(hostRef, 0, bytes);
memset(gpuRef, 0, bytes);
// Fill input matrices with random nums between 1 and 10
for (int i = 0; i < n; i++) {
h_A[i] = 1 + static_cast <double> (rand()) / (static_cast <double> (RAND_MAX/9));
h_B[i] = 1 + static_cast <double> (rand()) / (static_cast <double> (RAND_MAX/9));
}
// Set up device
double *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, bytes);
cudaMalloc((void **)&d_B, bytes);
cudaMalloc((void **)&d_C, bytes);
cudaMemcpy(d_A, h_A, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, bytes, cudaMemcpyHostToDevice);
cudaMemset(d_C, 0, bytes);
dim3 block(TILE_SIZE, TILE_SIZE);
dim3 grid((n + block.x - 1) / block.x, (n + block.y - 1) / block.y);
cout <<"grid.x "<<grid.x<<" grid.y "<<grid.y<<" block.x "<<block.x<<" block.y "<<block.y<< endl;
// Multiply and time CPU
auto start = std::chrono::high_resolution_clock::now();
matrixMultiply(h_A, h_B, hostRef, n);
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end - start;
double totalTime = duration_ms.count();
cout << "Time for multiplying on cpu: " << totalTime << endl;
// Multiply matrices in gpu
start = std::chrono::high_resolution_clock::now();
matrixMultiplyGPU<<<grid, block>>>(d_A, d_B, d_C, n);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
cudaMemcpy(gpuRef, d_C, bytes, cudaMemcpyDeviceToHost);
cout << (matrixCompare(hostRef, gpuRef, n) ? "Correctly multiplied both matrices (comparing GPU and CPU)" : "Incorrect GPU multiplication") << endl;
duration_ms = end - start;
totalTime = duration_ms.count();
cout << "Time for multiplying on gpu: " << totalTime << endl;
// Multiply matrices in gpu with tiling
start = std::chrono::high_resolution_clock::now();
matrixMultiplyGPUTiling<<<grid, block>>>(d_A, d_B, d_C, n);
cudaDeviceSynchronize();
end = std::chrono::high_resolution_clock::now();
cudaMemcpy(gpuRef, d_C, bytes, cudaMemcpyDeviceToHost);
cout << (matrixCompare(hostRef, gpuRef, n) ? "Correctly multiplied both matrices (comparing GPU with tiling and CPU)" : "Incorrect GPU tiling multiplication") << endl;
duration_ms = end - start;
totalTime = duration_ms.count();
cout << "Time for multiplying on gpu with tiling: " << totalTime << endl;
// Free memory that was allocated for matrixes
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
cudaDeviceReset();
return 0;
}
|
12,429 | #include "includes.h"
__global__ void unique_index_kernel(const char* flag, const int* flag_inc_sum, int* unique_index, int num_elems) {
int gid_base = blockIdx.x * blockDim.x + threadIdx.x;
for (int gid = gid_base; gid < num_elems; gid += blockDim.x * gridDim.x) {
if (flag[gid] == 1) {
int id = flag_inc_sum[gid] - 1;
unique_index[id] = gid;
}
}
} |
12,430 | #include "includes.h"
// Calculate d[n] = a[n]*b[n] + c[n]
__global__ void custom_kernel(float *a, float *b, float *c, float *d, int N) {
int idx = blockDim.x*blockIdx.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
while(idx < N) {
d[idx] = a[idx]*b[idx]+c[idx];
idx += num_threads;
}
} |
12,431 | #include "includes.h"
__global__ void WinnersKernel( float *winner, float *vertexData, int vertexOffset, float *pointsCoordinates, float cubeSize, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
if(winner[threadId] == 1.00f)
{
float x = pointsCoordinates[threadId * 3];
float y = pointsCoordinates[threadId * 3 + 1];
float z = pointsCoordinates[threadId * 3 + 2];
float side = 1.2f * cubeSize;
float halfSize = 0.50f * side;
// bottom side
vertexData[vertexOffset] = x - halfSize;
vertexData[vertexOffset + 1] = y - halfSize;
vertexData[vertexOffset + 2] = z - halfSize;
vertexData[vertexOffset + 3] = x - halfSize;
vertexData[vertexOffset + 4] = y - halfSize;
vertexData[vertexOffset + 5] = z + halfSize;
vertexData[vertexOffset + 6] = x + halfSize;
vertexData[vertexOffset + 7] = y - halfSize;
vertexData[vertexOffset + 8] = z + halfSize;
vertexData[vertexOffset + 9] = x + halfSize;
vertexData[vertexOffset + 10] = y - halfSize;
vertexData[vertexOffset + 11] = z - halfSize;
}
}
} |
12,432 | #include <iostream>
#include <cuda.h>
using namespace std;
__global__ void matmul_kernel(const float* A, const float* B, float* C, size_t n) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int row_idx = idx/n;
unsigned int col_idx = idx%n;
C[idx] = 0;
if(idx < n*n) {
for(unsigned int k = 0; k < n; k++) {
C[idx] += A[n*row_idx + k] * B[n*k + col_idx];
}
}
}
void matmul(const float* A, const float* B, float* C, size_t n, unsigned int threads_per_block) {
unsigned int m = threads_per_block;
matmul_kernel<<<(n*n + m-1)/m, m>>>(A, B, C, n);
cudaDeviceSynchronize();
}
|
12,433 | #pragma once
#ifndef __SHUFFLE_KERNELS_CUH
#define __SHUFFLE_KERNELS_CUH
#include <cstddef>
#include <cuda.h>
#include <iostream>
#include <string.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "curand_kernel.h"
#ifndef CUDA_CALL
#define CUDA_CALL(x) do { auto y = (x); if(y != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
std::cout << "Error is: " << cudaGetErrorString(y) << std::endl; \
exit(EXIT_FAILURE);}} while(0)
#endif
typedef unsigned int uint;
__global__ void setup_kernel(curandState *state)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets same seed, a different sequence
number, no offset */
curand_init(clock64(), id, 0, &state[id]);
}
template <typename T>
__device__ void shuffle_vector(T* __restrict vect, const std::size_t LENGTH, const std::size_t idx_2, curandState *state)
{
std::size_t id = threadIdx.x + blockIdx.x * blockDim.x;
T* my_v = &vect[id * LENGTH];
float rand_num = curand_uniform(&state[id]);
// random index for swap in range [idx_2, last]
// simply picking a number in range [0, last] will
// create bias in the shuffled list
rand_num *= ((LENGTH - 1) - idx_2) + 0.999999;
rand_num += idx_2;
std::size_t idx_1 = (std::size_t)truncf(rand_num);
// swap current element and random element
T temp = my_v[idx_1];
my_v[idx_1] = my_v[idx_2];
my_v[idx_2] = temp;
}
template <typename T>
__global__ void duplicate_n(T* __restrict vect, const std::size_t LENGTH, const std::size_t N)
{
std::size_t id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < LENGTH)
{
for (auto i = 1; i < N; i++)
{
std::size_t idx = id + i * LENGTH;
vect[idx] = vect[id];
}
}
}
template <typename T>
__global__ void shuffle(T* __restrict vect, const std::size_t LENGTH, curandState* state)
{
for (std::size_t i = 0; i < LENGTH; i++)
{
shuffle_vector(vect, LENGTH, i, state);
}
}
void shuffle_wrapper(uint n_blocks, uint n_threads, cudaStream_t& stream, int* __restrict vect, const std::size_t LENGTH, curandState* state)
{
shuffle<int><<<n_blocks, n_threads, 0, stream>>>(vect, LENGTH, state);
}
void duplicate_n_wrapper(uint n_blocks, uint n_threads, cudaStream_t& stream, int* __restrict vect, const std::size_t LENGTH, const std::size_t N)
{
duplicate_n<int><<<n_blocks, n_threads, 0, stream>>>(vect, LENGTH, N);
}
void setup_kernel_wrapper(uint n_blocks, uint n_threads, cudaStream_t& stream, curandState *state)
{
setup_kernel<<<n_blocks, n_threads, 0, stream>>>(state);
}
#endif |
12,434 | #include <cstdlib>
#include <iostream>
#include <stdio.h>
#include <cmath>
#include <cuda_runtime.h>
#include <time.h>
#include "device_launch_parameters.h"
#include "cuda.h"
using namespace std;
#define Nn 4096
#define Ni 25088
#define BATCH_SIZE 16
#define BLOCK_SIZE 32
#define BlockSize2D 16
#define VTYPE float
/*
* synapse (w) is (Nn x Ni)^T
* neuron_i (x) is (BATCH_SIZE x Ni)
* neuron_n (y) is (BATCH_SIZE x Nn)
*
* y = Xw^T
*/
void init_layer(VTYPE* h_neuron_i, VTYPE* h_neuron_n, VTYPE* synapse) {
for (int i = 0; i < Nn; i++) {
h_neuron_n[i] = rand() / (VTYPE)RAND_MAX;
}
for (int i = 0; i < Ni * BATCH_SIZE; i++) {
h_neuron_i[i] = rand() / (VTYPE)RAND_MAX;
}
for (int i = 0; i < Ni * Nn; i++) {
synapse[i] = rand() / (VTYPE)RAND_MAX;
}
}
__launch_bounds__(1024,2)
__global__ void d_MatMul_simple1(const VTYPE* d_neuron_i, VTYPE* d_neuron_n, const VTYPE* synapse) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
VTYPE temp = 0.0f;
if (col < Nn && row < BATCH_SIZE) {
#pragma unroll
for (int i = 0; i < Ni; i++) {
temp += d_neuron_i[row * Ni + i] * synapse[col + Nn * i];
}
d_neuron_n[row * Nn + col] = temp;
}
}
__global__ void d_MatMul_simple2(const VTYPE* d_neuron_i, VTYPE* d_neuron_n, const VTYPE* d_synapse) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ VTYPE neuron_i[BlockSize2D][BlockSize2D];
VTYPE temp = 0.0f;
#pragma unroll
for (int i = 0; i < Ni; i += BlockSize2D) {
if (row < BATCH_SIZE && i + threadIdx.x < Ni) {
neuron_i[threadIdx.y][threadIdx.x] = d_neuron_i[row * Ni + i + threadIdx.x];
}
else {
neuron_i[threadIdx.y][threadIdx.x] = 0.0f;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BlockSize2D; j++) {
temp += neuron_i[threadIdx.y][j] * d_synapse[(j + i)* Nn + col];
}
__syncthreads();
}
if (col < Nn && row < BATCH_SIZE) {
d_neuron_n[row * Nn + col] = temp;
}
}
__global__ void d_MatMul_simple3(const VTYPE* d_neuron_i, VTYPE* d_neuron_n, const VTYPE* d_synapse) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ VTYPE synapse[BlockSize2D][BlockSize2D];
__shared__ VTYPE neuron[BlockSize2D][BlockSize2D];
// MxK = MxN * NxK
VTYPE temp = 0.0f;
#pragma unroll
for (int i = 0; i < (Ni - 1) / BlockSize2D + 1; i++) {
if (row < BATCH_SIZE && i * BlockSize2D + threadIdx.x < Ni) {
neuron[threadIdx.y][threadIdx.x] = d_neuron_i[row * Ni + i * BlockSize2D + threadIdx.x];
}
else {
neuron[threadIdx.y][threadIdx.x] = 0.0f;
}
if (i * BlockSize2D + threadIdx.y < Ni && col < Nn) {
synapse[threadIdx.y][threadIdx.x] = d_synapse[(i * BlockSize2D + threadIdx.y) * Nn + col];
}
else {
synapse[threadIdx.y][threadIdx.x] = 0.0f;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BlockSize2D; j++) {
temp += neuron[threadIdx.y][j] * synapse[j][threadIdx.x];
}
__syncthreads();
}
if (row < BATCH_SIZE && col < Nn) {
d_neuron_n[row * Nn + col] = temp;
}
}
__global__ void d_test(VTYPE* d_synapse, VTYPE* d_neuron_i) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_neuron_i[idx] *= 1.1f;
}
bool compare(VTYPE* neuron1, VTYPE* neuron2) {
bool good = true;
#pragma unroll
for (int k = 0; k < BATCH_SIZE; k++) {
#pragma unroll
for (int i = 0; i < Nn; i++) {
if (fabs(neuron1[k * Nn + i] - neuron2[k * Nn + i]) > 1e-2)
{
good = false;
printf("At index (%d, %d) \t Host result: %lf \t Device result: %lf \n", k, i, neuron1[k * Nn + i], neuron2[k * Nn + i]);
}
}
}
return good;
}
int main()
{
// Initialize arrays on host
VTYPE* h_neuron_i = (VTYPE*)malloc(Ni * BATCH_SIZE *sizeof(VTYPE));
VTYPE* h_neuron_n1 = (VTYPE*)malloc(Nn * BATCH_SIZE *sizeof(VTYPE));
VTYPE* h_synapse = (VTYPE*)malloc(Nn * Ni * sizeof(VTYPE));
VTYPE* h_neuron_n2 = (VTYPE*)malloc(Nn * BATCH_SIZE * sizeof(VTYPE));
VTYPE* h_neuron_n3 = (VTYPE*)malloc(Nn * BATCH_SIZE * sizeof(VTYPE));
VTYPE* h_neuron_n = (VTYPE*)malloc(Nn * BATCH_SIZE * sizeof(VTYPE));
init_layer(h_neuron_i, h_neuron_n, h_synapse);
// Allocate memory on device
VTYPE* d_neuron_i = NULL;
VTYPE* d_neuron_n1 = NULL;
VTYPE* d_neuron_n2 = NULL;
VTYPE* d_neuron_n3 = NULL;
VTYPE* d_synapse = NULL;
VTYPE* test_var = NULL;
cudaMalloc((void**)&d_neuron_i, Ni * BATCH_SIZE * sizeof(VTYPE));
cudaMalloc((void**)&d_neuron_n1, Nn * BATCH_SIZE * sizeof(VTYPE));
cudaMalloc((void**)&d_neuron_n2, Nn * BATCH_SIZE * sizeof(VTYPE));
cudaMalloc((void**)&d_neuron_n3, Nn * BATCH_SIZE * sizeof(VTYPE));
cudaMalloc((void**)&d_synapse, Nn * Ni * sizeof(VTYPE));
cudaMalloc((void**)&test_var, sizeof(VTYPE));
// Copy arrays from host to device
cudaMemcpy(d_neuron_i, h_neuron_i, Ni * BATCH_SIZE * sizeof(VTYPE), cudaMemcpyHostToDevice);
cout << "Copy from Host to Device: " << cudaGetErrorString(cudaGetLastError()) << endl;
cudaMemcpy(d_synapse, h_synapse, Nn * Ni * sizeof(VTYPE), cudaMemcpyHostToDevice);
cout << "Copy from Host to Device: " << cudaGetErrorString(cudaGetLastError()) << endl;
//Define kernel launch parameters
dim3 ThreadsPerBlock2D = dim3(BlockSize2D, BlockSize2D);
dim3 BlocksPerGrid2D = dim3((Nn + BlockSize2D - 1) / BlockSize2D, (BATCH_SIZE + BlockSize2D - 1) / BlockSize2D);
//Launch kernel #1#
d_MatMul_simple1<<<BlocksPerGrid2D, ThreadsPerBlock2D>>>(d_neuron_i, d_neuron_n1, d_synapse);
cout << "MatMul_simple1: " << cudaGetErrorString(cudaGetLastError()) << endl;
// Copy results from device back to host
cudaMemcpy(h_neuron_n1, d_neuron_n1, Nn * BATCH_SIZE * sizeof(VTYPE), cudaMemcpyDeviceToHost);
cout << "Copy from Device to Host: " << cudaGetErrorString(cudaGetLastError()) << endl;
//Launch kernel #2#
d_MatMul_simple2<<<BlocksPerGrid2D, ThreadsPerBlock2D >>>(d_neuron_i, d_neuron_n2, d_synapse);
cout << "MatMul_simple1: " << cudaGetErrorString(cudaGetLastError()) << endl;
// Copy results from device back to host
cudaMemcpy(h_neuron_n2, d_neuron_n2, Nn * BATCH_SIZE * sizeof(VTYPE), cudaMemcpyDeviceToHost);
cout << "Copy from Device to Host: " << cudaGetErrorString(cudaGetLastError()) << endl;
//Launch kernel #3#
d_MatMul_simple3<<<BlocksPerGrid2D, ThreadsPerBlock2D >>>(d_neuron_i, d_neuron_n3, d_synapse);
cout << "MatMul_simple1: " << cudaGetErrorString(cudaGetLastError()) << endl;
// Copy results from device back to host
cudaMemcpy(h_neuron_n3, d_neuron_n3, Nn * BATCH_SIZE * sizeof(VTYPE), cudaMemcpyDeviceToHost);
cout << "Copy from Device to Host: " << cudaGetErrorString(cudaGetLastError()) << endl;
// Run and time on host
clock_t begin = clock();
#pragma unroll
for (int k = 0; k < BATCH_SIZE; k++) {
#pragma unroll
for (int i = 0; i < Nn; i++) {
VTYPE temp = 0.0f;
#pragma unroll
for (int j = 0; j < Ni; j++) {
temp += h_neuron_i[k * Ni + j] * h_synapse[i + Nn * j];
}
h_neuron_n[k * Nn + i] = temp;
}
/*
* h_neuron_i 16 x 25088
* h_synapse 4096 x 25088
* h_neuron_n 16 x 4096
*/
}
double elapsed = ((double)clock() - (double)begin) / (double)CLOCKS_PER_SEC;
printf("Took CPU %lf seconds to run\n", elapsed);
/*
VTYPE temp = 0.0f;
int k = 1;
int phase = 0;
for (int i = phase * BlockSize2D; i < (phase + 1) * BlockSize2D; i++) {
temp += h_synapse[k * Ni + i];
}
printf("temp in host : %lf\n", temp);
*/
//Compare host and device results
if (compare(h_neuron_n, h_neuron_n1)) {
printf("1 Passed!\n");
}
if (compare(h_neuron_n, h_neuron_n2)) {
printf("2 Passed!\n");
}
if (compare(h_neuron_n, h_neuron_n3)) {
printf("3 Passed!\n");
}
cout << "Host output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n[i]);
}
cout << endl;
cout << "Kernel1 output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n1[i]);
}
cout << endl;
cout << "Kernel2 output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n2[i]);
}
cout << endl;
cout << "Kernel3 output[0][6:9]: ";
for (int i = 6; i < 9; i++) {
printf("%lf, ", h_neuron_n3[i]);
}
cout << endl;
// Free up memory
cudaFree(d_neuron_i);
cudaFree(d_neuron_n1);
cudaFree(d_neuron_n2);
cudaFree(d_neuron_n3);
cudaFree(d_synapse);
cudaFree(test_var);
free(h_neuron_i);
free(h_neuron_n);
free(h_synapse);
free(h_neuron_n1);
free(h_neuron_n2);
free(h_neuron_n3);
cout << "done\n";
return 0;
}
|
12,435 | /*! example.cu
*
* Example to compute Jacobi iterations for specific size of grid discretization.
*
* \author Matthew McGonagle
*/
#include <iostream>
#include "jacobi.cuh"
#include <cuda_runtime.h>
#include <cmath>
#include <string>
//! Harmonic function of x and y is used to compute true values and the boundary values.
__host__
float getHarmonic(float x, float y)
{
//! Real Part ((z - 0.5 - 0.5i)^5) = (x - 0.5)^5 - 10 (x - 0.5)^3 (y - 0.5)^2 + 5 (x - 0.5) (y - 0.5)^4.
x -= 0.5;
y -= 0.5;
return pow(x, 5) - 10 * pow(x, 3) * pow(y, 2) + 5 * pow(x, 1) * pow(y, 4);
}
/*! Compute order of N^2 Jacobi iterations for harmonic solution on xy unit square for boundary values where
* we divide the square into an NxN grid; save the results to file.
*
* The number N is sent to the executable as a string and as the first and only parameter. The default value
* is 20 if no parameter is given. Also we require N > 1.
*/
int main(int argc, char * argv[])
{
// First get the dimensions from command line arguments.
int N;
if(argc < 2) // default vale for no parameters.
N = 20;
else {
N = std::stoi(argv[1]);
if( N < 2) // Use default for not good values of N.
N = 20;
}
int nIterations = 3 * N * N, // For good convergence the number of iterations is of the same order as gridsize.
dimensions[2] = {N, N}, // The dimensions of the grid to approximate PDE (not the CUDA execution grid).
nThreads = N / 10 + 1, // Number of CUDA threads per CUDA block dimension.
memSize = dimensions[0] * dimensions[1] * sizeof(float);
const float lowerLeft[2] = {0, 0}, // Lower left coordinate of rectangular domain.
upperRight[2] = {1, 1}; // Upper right coordinate of rectangular domain.
//! We use flat arrays, because CUDA uses flat arrays.
float * values, * trueValues, * in, * out, * errors, * relErrors;
const dim3 blockSize( nThreads , nThreads), // The size of CUDA block of threads.
gridSize( (dimensions[0] + nThreads - 1) / nThreads, (dimensions[1] + nThreads - 1) / nThreads);
/* The number of blocks in CUDA execution grid; make sure there is atleast enough
* threads to have one for each point in our differential equation discretization grid.
* There be extra threads that are unnecessary.
*/
std::cout << "Making initial values and true values" << std::endl;
// Initial values includes boundary values.
values = makeInitialValues( dimensions, lowerLeft, upperRight, & getHarmonic );
// Find the true values of harmonic function using the boundary values function.
trueValues = makeTrueValues( dimensions, lowerLeft, upperRight, & getHarmonic );
std::cout << "Before Average Error = "
<< getAverageError(values, trueValues, dimensions) //dimensions[0], dimensions[1])
<< std::endl;
// Need to copy values from host to CUDA device.
std::cout << "Copying to Device" << std::endl;
try
{
copyToDevice(values, dimensions, &in, &out);
}
catch( ... )
{
std::cout << "Exception happened while copying to device" << std::endl;
}
// At end of loop, output is inside pointer *in.
std::cout << "Doing Jacobi Iterations" << std::endl;
for( int i = 0; i < nIterations; i++)
{
// Call CUDA device kernel to a Jacobi iteration.
doJacobiIteration<<< gridSize, blockSize >>>(dimensions[0], dimensions[1], in, out);
cudaDeviceSynchronize();
if(cudaGetLastError() != cudaSuccess)
{
std::cout << "Error Launching Kernel" << std::endl;
return 1;
}
std::swap(in, out);
}
// Get the result from the CUDA device.
std::cout << "Copying result to values" << std::endl;
if(cudaMemcpy( values, in, memSize, cudaMemcpyDeviceToHost ) != cudaSuccess)
{
std::cout << "There was a problem retrieving the result from the device" << std::endl;
return 1;
}
// Now compute errors and save important data to file.
std::cout << "Copying to file 'values.dat'" << std::endl;
saveToFile( values, dimensions, lowerLeft, upperRight, "data/values.dat");
std::cout << "Now getting errors" << std::endl;
errors = getErrors(values, trueValues, dimensions);
saveToFile( errors, dimensions, lowerLeft, upperRight, "data/errors.dat");
std::cout << "After Average Error = "
<< getAverageError(values, trueValues, dimensions)
<< std::endl;
std::cout << "Now getting relative errors" << std::endl;
relErrors = getRelativeErrors(errors, trueValues, dimensions);
saveToFile( relErrors, dimensions, lowerLeft, upperRight, "data/log10RelErrors.dat");
// Clean up memory.
cudaFree(in); // First clean up on CUDA device.
cudaFree(out);
delete[] values; // Now clean up on host.
delete[] errors;
delete[] relErrors;
delete[] trueValues;
return 0;
}
|
12,436 |
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
unsigned char *input_gpu;
unsigned char *output_gpu;
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// GPU kernel and functions
__global__ void kernel(unsigned char *input,
unsigned char *output,
unsigned int height,
unsigned int width){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
// http://homepages.inf.ed.ac.uk/rbf/HIPR2/sobel.htm
// https://stackoverflow.com/questions/14358916/applying-sobel-edge-detection-with-cuda-and-opencv-on-a-grayscale-jpg-image
if (x < width && y < height ){
int gX[3][3] = {{-1,0,1},{-2,0,2},{-1,0,1}};
int gY[3][3] = {{-1,-2,-1},{0,0,0},{1,2,1}};
double x_sum = 0.0;
double y_sum = 0.0;
int index;
for (int j = -1; j < 2; j++) {
for (int i = -1; i < 2; i++) {
index = width * (j + y) + i + x;
x_sum += input[index] * gX[j+1][i+1];
y_sum += input[index] * gY[j+1][i+1];
}
}
double answer = sqrt(x_sum * x_sum + y_sum * y_sum);
// keep exceeding values, so correct that, and apply filter
if (answer < 128) {answer = 0;} else {
answer = 255;
}
output[x*height+y] = answer;
}
}
void transpose_img(unsigned char *in_mat,
unsigned char *out_mat,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
in_mat,
height*width*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// Kernel Call
kernel<<<dimGrid, dimBlock>>>(input_gpu, output_gpu, height, width);
checkCuda(cudaDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(cudaMemcpy(out_mat,
output_gpu,
height*width*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
|
12,437 | #include <algorithm>
#include <cassert>
#include <cfloat>
#include <cub/cub.cuh>
#include <curand.h>
#include <iomanip>
#include <iostream>
#include <limits>
#include <math.h>
#include <stdio.h>
#include <string>
#include <tuple>
#include <vector>
#define CUDA_CHECK(callstr) {cudaError_t error_code = callstr; if (error_code != cudaSuccess) { std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; assert(0); } }
#define CURAND_CHECK(callstr) {curandStatus_t error_code = callstr; if (error_code != CURAND_STATUS_SUCCESS) { std::cerr << "cuRAND error " << error_code << " at " << __FILE__ << ":" << __LINE__; assert(0); } }
const int MAX_K=5;
enum SOFTMAX_TYPE
{
SOFTMAX_TYPE_NAIVE,
SOFTMAX_TYPE_SAFE,
SOFTMAX_TYPE_ONLINE
};
enum SOFTMAX_TOPK_TYPE
{
SOFTMAX_TOPK_TYPE_TOPK_ONLY,
SOFTMAX_TOPK_TYPE_SAFE_UNFUSED,
SOFTMAX_TOPK_TYPE_SAFE_FUSED,
SOFTMAX_TOPK_TYPE_ONLINE_FUSED
};
std::string getSoftmaxTypeName(SOFTMAX_TYPE t)
{
switch (t)
{
case SOFTMAX_TYPE_NAIVE:
return "Naive Softmax";
case SOFTMAX_TYPE_SAFE:
return "Safe Softmax";
case SOFTMAX_TYPE_ONLINE:
return "Online Softmax";
default:
assert(0);
break;
}
return "";
}
std::string getSoftmaxTopkTypeName(SOFTMAX_TOPK_TYPE t)
{
switch (t)
{
case SOFTMAX_TOPK_TYPE_TOPK_ONLY:
return "TopK";
case SOFTMAX_TOPK_TYPE_SAFE_UNFUSED:
return "Safe Softmax + TopK unfused";
case SOFTMAX_TOPK_TYPE_SAFE_FUSED:
return "Safe Softmax + TopK fused";
case SOFTMAX_TOPK_TYPE_ONLINE_FUSED:
return "Online Softmax + TopK fused";
default:
assert(0);
break;
}
return "";
}
template<int THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE)
__global__ void naive_softmax(
const float * __restrict x,
float * __restrict y,
int V)
{
int thread_id = threadIdx.x;
int vector_id = blockIdx.x;
// reposition x and y to data for the current vector
x += vector_id * V;
y += vector_id * V;
typedef cub::BlockReduce<float, THREADBLOCK_SIZE> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ float d_total_inverse;
float d_partial = 0.0F;
for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE)
d_partial += __expf(x[elem_id]);
float d = BlockReduce(temp_storage).Sum(d_partial);
if (thread_id == 0)
d_total_inverse = __fdividef(1.0F, d);
__syncthreads();
for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE)
y[elem_id] = __expf(x[elem_id]) * d_total_inverse;
}
__device__ __forceinline__ float max_op(float a, float b)
{
return fmaxf(a, b);
}
template<int THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE)
__global__ void safe_softmax(
const float * __restrict x,
float * __restrict y,
int V)
{
int thread_id = threadIdx.x;
int vector_id = blockIdx.x;
// reposition x and y to data for the current vector
x += vector_id * V;
y += vector_id * V;
typedef cub::BlockReduce<float, THREADBLOCK_SIZE> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ float m_total;
__shared__ float d_total_inverse;
float m_partial = -FLT_MAX;
for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE)
m_partial = max_op(m_partial, x[elem_id]);
float m = BlockReduce(temp_storage).Reduce(m_partial, max_op);
if (thread_id == 0)
m_total = m;
__syncthreads();
float d_partial = 0.0F;
for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE)
d_partial += __expf(x[elem_id] - m_total);
float d = BlockReduce(temp_storage).Sum(d_partial);
if (thread_id == 0)
d_total_inverse = __fdividef(1.0F, d);
__syncthreads();
for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE)
y[elem_id] = __expf(x[elem_id] - m_total) * d_total_inverse;
}
struct __align__(8) MD
{
float m;
float d;
};
__device__ __forceinline__ MD reduce_md_op(MD a, MD b)
{
bool a_bigger = (a.m > b.m);
MD bigger_m = a_bigger ? a : b;
MD smaller_m = a_bigger ? b : a;
MD res;
res.d = bigger_m.d + smaller_m.d * __expf(smaller_m.m - bigger_m.m);
res.m = bigger_m.m;
return res;
}
template<int THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE)
__global__ void online_softmax(
const float * __restrict x,
float * __restrict y,
int V)
{
int thread_id = threadIdx.x;
int vector_id = blockIdx.x;
// reposition x and y to data for the current vector
x += vector_id * V;
y += vector_id * V;
typedef cub::BlockReduce<MD, THREADBLOCK_SIZE> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ MD md_total;
MD md_partial;
md_partial.m = -FLT_MAX;
md_partial.d = 0.0F;
for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE)
{
MD new_elem;
new_elem.m = x[elem_id];
new_elem.d = 1.0F;
md_partial = reduce_md_op(md_partial, new_elem);
}
MD md = BlockReduce(temp_storage).Reduce(md_partial, reduce_md_op);
if (thread_id == 0)
md_total = md;
__syncthreads();
float d_total_inverse = __fdividef(1.0F, md_total.d);
for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE)
y[elem_id] = __expf(x[elem_id] - md_total.m) * d_total_inverse;
}
template<int MAX_K>
struct TopK
{
int p[MAX_K];
float u[MAX_K];
__device__ __forceinline__ void insert(float elem, int elem_id)
{
if (elem > u[MAX_K-1])
{
u[MAX_K-1] = elem;
p[MAX_K-1] = elem_id;
}
for(int k = MAX_K - 2; k >= 0; --k)
{
if (u[k+1] > u[k])
{
float u2 = u[k];
int p2 = p[k];
u[k] = u[k+1];
p[k] = p[k+1];
u[k+1] = u2;
p[k+1] = p2;
}
}
}
};
template<int MAX_K>
__device__ __forceinline__ TopK<MAX_K> reduce_topk_op(const TopK<MAX_K>& a, const TopK<MAX_K>& b)
{
TopK<MAX_K> res = a;
for(int i = 0; i < MAX_K; ++i)
res.insert(b.u[i], b.p[i]);
return res;
}
template<int MAX_K, int THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE)
__global__ void topk(
const float * __restrict y,
int * __restrict z,
float * __restrict v,
int V,
int K)
{
int thread_id = threadIdx.x;
int vector_id = blockIdx.x;
// reposition y to data for the current vector
y += vector_id * V;
typedef cub::BlockReduce<TopK<MAX_K>, THREADBLOCK_SIZE> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
TopK<MAX_K> partial;
for(int i = 0; i < MAX_K; ++i)
partial.p[i] = -1;
for(int i = 0; i < MAX_K; ++i)
partial.u[i] = -FLT_MAX;
for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE)
{
float elem = y[elem_id];
partial.insert(elem, elem_id);
}
TopK<MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op<MAX_K>);
if (thread_id == 0)
{
z += vector_id * K;
v += vector_id * K;
for(int i = 0; i < MAX_K; ++i)
{
if (i < K)
{
z[i] = total.p[i];
v[i] = total.u[i];
}
}
}
}
template<int MAX_K>
struct TopKD
{
float d;
TopK<MAX_K> topk;
};
template<int MAX_K>
__device__ __forceinline__ TopKD<MAX_K> reduce_topk_d_op(const TopKD<MAX_K>& a, const TopKD<MAX_K>& b)
{
TopKD<MAX_K> res;
res.d = a.d + b.d;
res.topk = reduce_topk_op(a.topk, b.topk);
return res;
}
template<int MAX_K, int THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE)
__global__ void safe_softmax_topk(
const float * __restrict x,
int * __restrict z,
float * __restrict v,
int V,
int K)
{
int thread_id = threadIdx.x;
int vector_id = blockIdx.x;
// reposition y to data for the current vector
x += vector_id * V;
typedef cub::BlockReduce<float, THREADBLOCK_SIZE> MaxValBlockReduce;
typedef cub::BlockReduce<TopKD<MAX_K>, THREADBLOCK_SIZE> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_temp_storage;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ float m_total;
float m_partial = -FLT_MAX;
for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE)
m_partial = max_op(m_partial, x[elem_id]);
float m = MaxValBlockReduce(max_val_temp_storage).Reduce(m_partial, max_op);
if (thread_id == 0)
m_total = m;
__syncthreads();
TopKD<MAX_K> partial;
for(int i = 0; i < MAX_K; ++i)
partial.topk.p[i] = -1;
for(int i = 0; i < MAX_K; ++i)
partial.topk.u[i] = -FLT_MAX;
partial.d = 0.0F;
for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE)
{
float elem = x[elem_id];
partial.d += __expf(elem - m_total);
partial.topk.insert(elem, elem_id);
}
TopKD<MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_d_op<MAX_K>);
if (thread_id == 0)
{
z += vector_id * K;
v += vector_id * K;
float d_total_inverse = __fdividef(1.0F, total.d);
for(int i = 0; i < MAX_K; ++i)
{
float val = __expf(total.topk.u[i] - m_total) * d_total_inverse;
if (i < K)
{
z[i] = total.topk.p[i];
v[i] = val;
}
}
}
}
template<int MAX_K>
struct TopKMD
{
MD md;
TopK<MAX_K> topk;
};
template<int MAX_K>
__device__ __forceinline__ TopKMD<MAX_K> reduce_topk_md_op(const TopKMD<MAX_K>& a, const TopKMD<MAX_K>& b)
{
TopKMD<MAX_K> res;
res.md = reduce_md_op(a.md, b.md);
res.topk = reduce_topk_op(a.topk, b.topk);
return res;
}
template<int MAX_K, int THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE)
__global__ void online_softmax_topk(
const float * __restrict x,
int * __restrict z,
float * __restrict v,
int V,
int K)
{
int thread_id = threadIdx.x;
int vector_id = blockIdx.x;
// reposition y to data for the current vector
x += vector_id * V;
typedef cub::BlockReduce<TopKMD<MAX_K>, THREADBLOCK_SIZE> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
TopKMD<MAX_K> partial;
for(int i = 0; i < MAX_K; ++i)
partial.topk.p[i] = -1;
for(int i = 0; i < MAX_K; ++i)
partial.topk.u[i] = -FLT_MAX;
partial.md.m = -FLT_MAX;
partial.md.d = 0.0F;
for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE)
{
float elem = x[elem_id];
MD new_elem{elem, 1.0F};
partial.md = reduce_md_op(partial.md, new_elem);
partial.topk.insert(elem, elem_id);
}
TopKMD<MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_md_op<MAX_K>);
if (thread_id == 0)
{
z += vector_id * K;
v += vector_id * K;
float d_total_inverse = __fdividef(1.0F, total.md.d);
for(int i = 0; i < MAX_K; ++i)
{
float val = __expf(total.topk.u[i] - total.md.m) * d_total_inverse;
if (i < K)
{
z[i] = total.topk.p[i];
v[i] = val;
}
}
}
}
void fill_random_values(float * x, int count)
{
curandGenerator_t gen;
CURAND_CHECK(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(gen, 1234ULL));
CURAND_CHECK(curandGenerateUniform(gen, x, count));
CURAND_CHECK(curandDestroyGenerator(gen));
}
std::vector<float> run_softmax(int V, int batch_size, SOFTMAX_TYPE t)
{
float * x;
float * y;
CUDA_CHECK(cudaMalloc(&x, (size_t)V * batch_size * sizeof(float)));
fill_random_values(x, V * batch_size);
CUDA_CHECK(cudaMalloc(&y, (size_t)V * batch_size * sizeof(float)));
switch (t)
{
case SOFTMAX_TYPE_NAIVE:
naive_softmax<256><<<batch_size,256>>>(x, y, V);
break;
case SOFTMAX_TYPE_SAFE:
safe_softmax<256><<<batch_size,256>>>(x, y, V);
break;
case SOFTMAX_TYPE_ONLINE:
online_softmax<256><<<batch_size,256>>>(x, y, V);
break;
default:
assert(0);
}
std::vector<float> res(V * batch_size);
CUDA_CHECK(cudaMemcpy(&res[0], y, V * batch_size * sizeof(float), cudaMemcpyDeviceToHost));
return res;
}
void compare_softmax_results(int V, int batch_size, SOFTMAX_TYPE t1, SOFTMAX_TYPE t2)
{
std::vector<float> res1 = run_softmax(V, batch_size, t1);
std::vector<float> res2 = run_softmax(V, batch_size, t2);
float max_diff = 0.0F;
double total_diff = 0.0F;
for(int i = 0; i < res1.size(); ++i)
{
float diff = fabs(res1[i] - res2[i]);
max_diff = std::max(max_diff, diff);
total_diff += diff;
}
std::cout << "Comparing " << getSoftmaxTypeName(t1) << " and " << getSoftmaxTypeName(t2)
<< ": Max diff = " << max_diff << ", Avg diff = " << (float)(total_diff / res1.size()) << std::endl;
}
// Returns runtime, in seconds
float benchmark_softmax(int V, int batch_size, SOFTMAX_TYPE t, int run_iterations)
{
float * x;
float * y;
CUDA_CHECK(cudaMalloc(&x, (size_t)V * batch_size * sizeof(float)));
fill_random_values(x, V * batch_size);
CUDA_CHECK(cudaMalloc(&y, (size_t)V * batch_size * sizeof(float)));
// Heuristic to have at least 8 iterations of the loop
int max_threadblock_size = V / 8;
cudaEvent_t start, stop;
CUDA_CHECK(cudaEventCreate(&start));
CUDA_CHECK(cudaEventCreate(&stop));
CUDA_CHECK(cudaEventRecord(start, 0));
for(int i = 0; i < run_iterations; ++i)
{
switch (t)
{
case SOFTMAX_TYPE_NAIVE:
if (max_threadblock_size >= 256)
naive_softmax<256><<<batch_size,256>>>(x, y, V);
else if (max_threadblock_size >= 128)
naive_softmax<128><<<batch_size,128>>>(x, y, V);
else if (max_threadblock_size >= 64)
naive_softmax<64><<<batch_size,64>>>(x, y, V);
else
naive_softmax<32><<<batch_size,32>>>(x, y, V);
break;
case SOFTMAX_TYPE_SAFE:
if (max_threadblock_size >= 256)
safe_softmax<256><<<batch_size,256>>>(x, y, V);
else if (max_threadblock_size >= 128)
safe_softmax<128><<<batch_size,128>>>(x, y, V);
else if (max_threadblock_size >= 64)
safe_softmax<64><<<batch_size,64>>>(x, y, V);
else
safe_softmax<32><<<batch_size,32>>>(x, y, V);
break;
case SOFTMAX_TYPE_ONLINE:
if (max_threadblock_size >= 256)
online_softmax<256><<<batch_size,256>>>(x, y, V);
else if (max_threadblock_size >= 128)
online_softmax<128><<<batch_size,128>>>(x, y, V);
else if (max_threadblock_size >= 64)
online_softmax<64><<<batch_size,64>>>(x, y, V);
else
online_softmax<32><<<batch_size,32>>>(x, y, V);
break;
default:
assert(0);
}
CUDA_CHECK(cudaGetLastError());
}
CUDA_CHECK(cudaEventRecord(stop, 0));
CUDA_CHECK(cudaEventSynchronize(stop));
float elapsedTime;
CUDA_CHECK(cudaEventElapsedTime(&elapsedTime, start, stop));
CUDA_CHECK(cudaEventDestroy(start));
CUDA_CHECK(cudaEventDestroy(stop));
CUDA_CHECK(cudaFree(x));
CUDA_CHECK(cudaFree(y));
return elapsedTime / run_iterations * 0.001F;
}
// Returns runtime, in seconds
float benchmark_softmax_topk(int V, int K, int batch_size, SOFTMAX_TOPK_TYPE t, int run_iterations)
{
assert(K<=MAX_K);
float * x;
float * y;
int * z;
float * v;
CUDA_CHECK(cudaMalloc(&x, (size_t)V * batch_size * sizeof(float)));
fill_random_values(x, V * batch_size);
CUDA_CHECK(cudaMalloc(&y, (size_t)V * batch_size * sizeof(float)));
fill_random_values(y, V * batch_size);
CUDA_CHECK(cudaMalloc(&z, (size_t)K * batch_size * sizeof(int)));
CUDA_CHECK(cudaMalloc(&v, (size_t)K * batch_size * sizeof(float)));
// Heuristic to have at least 16 iterations of the loop
int max_threadblock_size = V / 16;
cudaEvent_t start, stop;
CUDA_CHECK(cudaEventCreate(&start));
CUDA_CHECK(cudaEventCreate(&stop));
CUDA_CHECK(cudaEventRecord(start, 0));
for(int i = 0; i < run_iterations; ++i)
{
switch (t)
{
case SOFTMAX_TOPK_TYPE_TOPK_ONLY:
if (max_threadblock_size >= 256)
topk<MAX_K,256><<<batch_size,256>>>(y, z, v, V, K);
else if (max_threadblock_size >= 128)
topk<MAX_K,128><<<batch_size,128>>>(y, z, v, V, K);
else if (max_threadblock_size >= 64)
topk<MAX_K,64><<<batch_size,64>>>(y, z, v, V, K);
else
topk<MAX_K,32><<<batch_size,32>>>(y, z, v, V, K);
break;
case SOFTMAX_TOPK_TYPE_SAFE_UNFUSED:
if (max_threadblock_size >= 256)
{
safe_softmax<256><<<batch_size,256>>>(x, y, V);
topk<MAX_K,256><<<batch_size,256>>>(y, z, v, V, K);
}
else if (max_threadblock_size >= 128)
{
safe_softmax<128><<<batch_size,128>>>(x, y, V);
topk<MAX_K,128><<<batch_size,128>>>(y, z, v, V, K);
}
else if (max_threadblock_size >= 64)
{
safe_softmax<64><<<batch_size,64>>>(x, y, V);
topk<MAX_K,64><<<batch_size,64>>>(y, z, v, V, K);
}
else
{
safe_softmax<32><<<batch_size,32>>>(x, y, V);
topk<MAX_K,32><<<batch_size,32>>>(y, z, v, V, K);
}
break;
case SOFTMAX_TOPK_TYPE_SAFE_FUSED:
if (max_threadblock_size >= 256)
safe_softmax_topk<MAX_K,256><<<batch_size,256>>>(x, z, v, V, K);
else if (max_threadblock_size >= 128)
safe_softmax_topk<MAX_K,128><<<batch_size,128>>>(x, z, v, V, K);
else if (max_threadblock_size >= 64)
safe_softmax_topk<MAX_K,64><<<batch_size,64>>>(x, z, v, V, K);
else
safe_softmax_topk<MAX_K,32><<<batch_size,32>>>(x, z, v, V, K);
break;
case SOFTMAX_TOPK_TYPE_ONLINE_FUSED:
if (max_threadblock_size >= 256)
online_softmax_topk<MAX_K,256><<<batch_size,256>>>(x, z, v, V, K);
else if (max_threadblock_size >= 128)
online_softmax_topk<MAX_K,128><<<batch_size,128>>>(x, z, v, V, K);
else if (max_threadblock_size >= 64)
online_softmax_topk<MAX_K,64><<<batch_size,64>>>(x, z, v, V, K);
else
online_softmax_topk<MAX_K,32><<<batch_size,32>>>(x, z, v, V, K);
break;
default:
assert(0);
}
CUDA_CHECK(cudaGetLastError());
}
CUDA_CHECK(cudaEventRecord(stop, 0));
CUDA_CHECK(cudaEventSynchronize(stop));
float elapsedTime;
CUDA_CHECK(cudaEventElapsedTime(&elapsedTime, start, stop));
CUDA_CHECK(cudaEventDestroy(start));
CUDA_CHECK(cudaEventDestroy(stop));
CUDA_CHECK(cudaFree(x));
CUDA_CHECK(cudaFree(y));
CUDA_CHECK(cudaFree(z));
CUDA_CHECK(cudaFree(v));
return elapsedTime / run_iterations * 0.001F;
}
std::tuple<std::vector<float>,std::vector<int>,std::vector<float>> run_topk(int V, int K, int batch_size, SOFTMAX_TOPK_TYPE t)
{
assert(K<=MAX_K);
float * y;
int * z;
float * v;
CUDA_CHECK(cudaMalloc(&y, (size_t)V * batch_size * sizeof(float)));
fill_random_values(y, V * batch_size);
CUDA_CHECK(cudaMalloc(&z, (size_t)K * batch_size * sizeof(int)));
CUDA_CHECK(cudaMalloc(&v, (size_t)K * batch_size * sizeof(float)));
switch (t)
{
case SOFTMAX_TOPK_TYPE_TOPK_ONLY:
topk<MAX_K,256><<<batch_size,256>>>(y, z, v, V, K);
break;
case SOFTMAX_TOPK_TYPE_SAFE_FUSED:
safe_softmax_topk<MAX_K,256><<<batch_size,256>>>(y, z, v, V, K);
break;
case SOFTMAX_TOPK_TYPE_ONLINE_FUSED:
online_softmax_topk<MAX_K,256><<<batch_size,256>>>(y, z, v, V, K);
break;
default:
assert(0);
}
std::vector<float> yh(V * batch_size);
std::vector<int> zh(K * batch_size);
std::vector<float> vh(K * batch_size);
CUDA_CHECK(cudaMemcpy(&yh[0], y, (size_t)V * batch_size * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(&zh[0], z, (size_t)K * batch_size * sizeof(int), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(&vh[0], v, (size_t)K * batch_size * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaFree(y));
CUDA_CHECK(cudaFree(z));
CUDA_CHECK(cudaFree(v));
return std::make_tuple(yh, zh, vh);
}
void compare_topk_results(int V, int K, int batch_size, SOFTMAX_TOPK_TYPE t)
{
std::vector<float> yh;
std::vector<int> zh;
std::vector<float> vh;
std::tie(yh, zh, vh) = run_topk(V, K, batch_size, t);
auto y = yh.begin();
auto z = zh.begin();
auto v = vh.begin();
int mismatches = 0;
for(int i = 0; i < batch_size; ++i, y += V, z += K, v += K)
{
std::vector<std::pair<float,int>> elemsWithIndices;
for(int j = 0; j < V; ++j)
elemsWithIndices.push_back(std::make_pair(*(y+j), j));
std::partial_sort(elemsWithIndices.begin(), elemsWithIndices.begin() + K, elemsWithIndices.end(),
[] (const std::pair<float,int>& a, const std::pair<float,int>& b) { if (a.first > b.first) return true; if (a.first < b.first) return false; return a.second < b.second; });
for(int j = 0; j < K; ++j)
{
if ((*(z+j) != elemsWithIndices[j].second) || (*(v+j) != elemsWithIndices[j].first))
{
std::cout << getSoftmaxTopkTypeName(t) << " mismatch for vector " << i << ", reference (" << elemsWithIndices[j].second << "," << elemsWithIndices[j].first
<< "), GPU (" << *(z+j) << "," << *(v+j) << ")" << std::endl;
++mismatches;
}
}
}
std::cout << getSoftmaxTopkTypeName(t) << ": " << mismatches << " mismatches" << std::endl;
}
void compare_softmax_topk_results(int V, int K, int batch_size, SOFTMAX_TOPK_TYPE t)
{
std::vector<float> xh;
std::vector<int> zh;
std::vector<float> vh;
std::tie(xh, zh, vh) = run_topk(V, K, batch_size, t);
auto x = xh.begin();
auto z = zh.begin();
auto v = vh.begin();
int mismatches = 0;
float max_diff = 0.0F;
double total_diff = 0.0F;
for(int i = 0; i < batch_size; ++i, x += V, z += K, v += K)
{
// Compute reference softmax
float m = 0.0F;
for(int j = 0; j < V; ++j)
m = std::max(m, *(x+j));
float d = 0.0F;
for(int j = 0; j < V; ++j)
d += expf(*(x+j) - m);
for(int j = 0; j < V; ++j)
*(x+j) = expf(*(x+j) - m) / d;
std::vector<std::pair<float,int>> elemsWithIndices;
for(int j = 0; j < V; ++j)
elemsWithIndices.push_back(std::make_pair(*(x+j), j));
std::partial_sort(elemsWithIndices.begin(), elemsWithIndices.begin() + K, elemsWithIndices.end(),
[] (const std::pair<float,int>& a, const std::pair<float,int>& b) { if (a.first > b.first) return true; if (a.first < b.first) return false; return a.second < b.second; });
for(int j = 0; j < K; ++j)
{
float diff = fabs(*(v+j) - elemsWithIndices[j].first);
max_diff = std::max(max_diff, diff);
total_diff += diff;
if (*(z+j) != elemsWithIndices[j].second)
{
std::cout << getSoftmaxTopkTypeName(t) << " mismatch for vector " << i << ", reference (" << elemsWithIndices[j].second << "," << elemsWithIndices[j].first
<< "), GPU (" << *(z+j) << "," << *(v+j) << ")" << std::endl;
++mismatches;
}
}
}
std::cout << getSoftmaxTopkTypeName(t) << ": " << mismatches << " mismatches, comparing to CPU reference implementation: Max diff = " << max_diff << ", Avg diff = " << (float)(total_diff / (batch_size * K)) << std::endl;
}
void run_benchmark(int batch_size, int start_V, int K, int end_V, int average_run_iterations, int min_run_iteration)
{
std::cout << "Batch size = " << batch_size << std::endl;
std::cout << std::setw(12) << "V";
std::cout << std::setw(20) << "NaiveSoftmax";
std::cout << std::setw(20) << "SafeSoftmax";
std::cout << std::setw(20) << "OnlineSoftmax";
std::cout << std::setw(20) << "TopK";
std::cout << std::setw(30) << "SafeSoftmaxUnfusedTopK";
std::cout << std::setw(30) << "SafeSoftmaxFusedTopK";
std::cout << std::setw(30) << "OnlineSoftmaxFusedTopK";
std::cout << std::endl;
float average_V = sqrtf(static_cast<float>(end_V)*static_cast<float>(start_V));
for(int V = start_V; V < end_V; V *= 2)
{
int run_iterations = std::max(static_cast<int>(static_cast<float>(average_run_iterations) * average_V / static_cast<float>(V)), min_run_iteration);
std::cout << std::setw(12) << V;
{
float runtime = benchmark_softmax(V, batch_size, SOFTMAX_TYPE_NAIVE, run_iterations);
std::cout << std::setw(20) << (V * batch_size / runtime);
}
{
float runtime = benchmark_softmax(V, batch_size, SOFTMAX_TYPE_SAFE, run_iterations);
std::cout << std::setw(20) << (V * batch_size / runtime);
}
{
float runtime = benchmark_softmax(V, batch_size, SOFTMAX_TYPE_ONLINE, run_iterations);
std::cout << std::setw(20) << (V * batch_size / runtime);
}
{
float runtime = benchmark_softmax_topk(V, K, batch_size, SOFTMAX_TOPK_TYPE_TOPK_ONLY, run_iterations);
std::cout << std::setw(20) << (V * batch_size / runtime);
}
{
float runtime = benchmark_softmax_topk(V, K, batch_size, SOFTMAX_TOPK_TYPE_SAFE_UNFUSED, run_iterations);
std::cout << std::setw(30) << (V * batch_size / runtime);
}
{
float runtime = benchmark_softmax_topk(V, K, batch_size, SOFTMAX_TOPK_TYPE_SAFE_FUSED, run_iterations);
std::cout << std::setw(30) << (V * batch_size / runtime);
}
{
float runtime = benchmark_softmax_topk(V, K, batch_size, SOFTMAX_TOPK_TYPE_ONLINE_FUSED, run_iterations);
std::cout << std::setw(30) << (V * batch_size / runtime);
}
std::cout << std::endl;
}
}
int main(int argc, char *argv[])
{
std::cout << "Softmax correctness check:" << std::endl;
compare_softmax_results(300, 100, SOFTMAX_TYPE_NAIVE, SOFTMAX_TYPE_SAFE);
compare_softmax_results(300, 100, SOFTMAX_TYPE_NAIVE, SOFTMAX_TYPE_ONLINE);
std::cout << "TopK correctness check:" << std::endl;
compare_topk_results(300, MAX_K, 100, SOFTMAX_TOPK_TYPE_TOPK_ONLY);
std::cout << "Softmax+TopK correctness check:" << std::endl;
compare_softmax_topk_results(300, MAX_K, 100, SOFTMAX_TOPK_TYPE_SAFE_FUSED);
compare_softmax_topk_results(300, MAX_K, 100, SOFTMAX_TOPK_TYPE_ONLINE_FUSED);
int large_batch_size = 4000;
int small_batch_size = 10;
size_t max_V = 10000000;
int start_V = 63;
int device_id;
CUDA_CHECK(cudaGetDevice(&device_id));
cudaDeviceProp device_prop;
CUDA_CHECK(cudaGetDeviceProperties(&device_prop, device_id));
int large_batch_end_V = std::min(static_cast<size_t>(device_prop.totalGlobalMem * 0.9F) / (sizeof(float) * 3 * large_batch_size), max_V);
int small_batch_end_V = std::min(static_cast<size_t>(device_prop.totalGlobalMem * 0.9F) / (sizeof(float) * 3 * small_batch_size), max_V);
std::cout << "Softmax benchmark:" << std::endl;
run_benchmark(large_batch_size, start_V, MAX_K, large_batch_end_V, 100, 10);
run_benchmark(small_batch_size, start_V, MAX_K, small_batch_end_V, 4000, 800);
return 0;
}
|
12,438 | #include "includes.h"
extern "C"
__global__ void vectorAdditionCUDA(const float* a, const float* b, float* c, int n)
{
int ii = blockDim.x * blockIdx.x + threadIdx.x;
if (ii < n)
c[ii] = a[ii] + b[ii];
} |
12,439 | #include <cuda.h>
#include <cuda_runtime.h>
#include <string>
#include <math.h>
using namespace std;
__global__ void grey(const uchar4* input, uchar4* output, size_t num_rows, size_t num_cols) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int idx = y * num_cols + x;
if(x < num_cols && y < num_rows) {
unsigned char val = 0.299 * input[idx].x + 0.587 * input[idx].y + 0.114 * input[idx].z;
output[idx] = make_uchar4(val, val, val, 255);
}
}
uchar4* greyscale(uchar4 *d_in, const size_t num_rows, const size_t num_cols)
{
uchar4 *d_out;
cudaMalloc((void **) &d_out, num_rows * num_cols * sizeof(uchar4));
const dim3 block_size(16, 16, 1);
const dim3 grid_size(num_cols/16 + 1, num_rows/16 + 1, 1);
grey<<<grid_size, block_size>>>(d_in, d_out, num_rows, num_cols);
uchar4* h_out = new uchar4[num_rows * num_cols];
cudaMemcpy(h_out, d_out, num_rows * num_cols * sizeof(uchar4), cudaMemcpyDeviceToHost);
cudaFree(d_out);
return h_out;
}
|
12,440 | #include <cassert>
#include <stdio.h>
#include <cuda_runtime.h>
#include <math.h>
#include <float.h>
#include "raytraceCuda.cuh"
// flags as to whether or not reflection and refraction are included in the
// raytracing
#define REFLECTION 1
#define REFRACTION 1
#define SINGLETHREADMODE 0
#define RECURSIONDEPTH 3
#define gpuErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code,
const char *file,
int line,
bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n",
cudaGetErrorString(code), file, line);
exit(code);
}
}
struct Point_Light
{
double position[3]; //3-vector
double color[3]; //3-vector
double attenuation_k;
};
struct Material
{
double diffuse[3]; //3-vector
double ambient[3]; //3-vector
double specular[3]; //3-vector
double shine;
double snell;
double opacity;
double reflectivity;
};
struct Object
{
double e;
double n;
Material mat;
double scale[9]; //3x3-matrix
double unScale[9]; //3x3-matrix
double rotate[9]; //3x3-matrix
double unRotate[9]; //3x3-matrix
double translate[3]; //3-vector
double unTranslate[3]; //3-vector
};
/********** Helper Functions **************************************************/
/* Stores the component-wise product of a and b into c. */
__device__
void cProduct(double *a, double *b, double *c)
{
c[0] = a[0] * b[0];
c[1] = a[1] * b[1];
c[2] = a[2] * b[2];
}
/* Stores the component-wise minimum of a and b into out. */
__device__
void cWiseMin(double *a, double *b, double *out)
{
out[0] = min(a[0], b[0]);
out[1] = min(a[1], b[1]);
out[2] = min(a[2], b[2]);
}
/* Returns -1 for negative numbers, 1 for positive numbers, and 0 for zero. */
__device__
int sign(double s)
{
if(s > 0) return 1;
if(s < 0) return -1;
return 0;
}
/* Returns the norm of the given vector. */
__device__
double d_norm(double *vec)
{
return sqrt((vec[0] * vec[0]) + (vec[1] * vec[1]) + (vec[2] * vec[2]));
}
/* Normalizes the given vector. */
__device__
void d_normalize(double *vec)
{
double n = d_norm(vec);
vec[0] = vec[0] / (double) n;
vec[1] = vec[1] / (double) n;
vec[2] = vec[2] / (double) n;
}
/* Returns the dot product of the given vectors. */
__device__
double d_dot(double *a, double *b)
{
return (a[0] * b[0]) + (a[1] * b[1]) + (a[2] * b[2]);
}
/* Implicit Superquadric function. */
// vec is a 3-vector
__device__
double isq(double *vec, double *e, double *n)
{
// Test for n = 0 now to prevent divide-by-zero errors.
if (n == 0)
return FLT_MAX;
double zTerm = pow(pow(vec[2], 2.0), 1.0 / (double) *n);
// Test for e = 0 now to prevent divide-by-zero errors.
if (e == 0)
return zTerm;
double xTerm = pow(pow(vec[0], 2.0), 1.0 / (double) *e);
double yTerm = pow(pow(vec[1], 2.0), 1.0 / (double) *e);
double xyTerm = pow(xTerm + yTerm, *e / (double) *n);
return xyTerm + zTerm - 1.0;
}
/* Ray Equation */
// a and b are both 3-vectors
__device__
void findRay(double *a, double *b, double *c, double t)
{
c[0] = (a[0] * t) + b[0];
c[1] = (a[1] * t) + b[1];
c[2] = (a[2] * t) + b[2];
}
/* Apply the Inverse Transform to a to get a new, usable a. */
// unScale and unRotate are 3x3 matrices. a and newA are 3-vectors
__device__
void newa(double *unScale, double *unRotate, double *a, double *newA)
{
double a0 = (unRotate[0] * a[0]) + (unRotate[1] * a[1]) + (unRotate[2] * a[2]);
double a1 = (unRotate[3] * a[0]) + (unRotate[4] * a[1]) + (unRotate[5] * a[2]);
double a2 = (unRotate[6] * a[0]) + (unRotate[7] * a[1]) + (unRotate[8] * a[2]);
newA[0] = (unScale[0] * a0) + (unScale[1] * a1) + (unScale[2] * a2);
newA[1] = (unScale[3] * a0) + (unScale[4] * a1) + (unScale[5] * a2);
newA[2] = (unScale[6] * a0) + (unScale[7] * a1) + (unScale[8] * a2);
}
/* Apply the Inverse Transform to b to get a new, usable b. */
// unScale and unRotate are 3x3 matrices. unTranslate, b, and newB are 3-vectors
__device__
void newb(double *unScale, double *unRotate, double *unTranslate, double *b,
double *newB)
{
// b + unTranslate
double b0 = b[0] + unTranslate[0];
double b1 = b[1] + unTranslate[1];
double b2 = b[2] + unTranslate[2];
// unRotate * (b + unTranslate)
newB[0] = (unRotate[0] * b0) + (unRotate[1] * b1) + (unRotate[2] * b2);
newB[1] = (unRotate[3] * b0) + (unRotate[4] * b1) + (unRotate[5] * b2);
newB[2] = (unRotate[6] * b0) + (unRotate[7] * b1) + (unRotate[8] * b2);
b0 = newB[0];
b1 = newB[1];
b2 = newB[2];
// unScale * (unRotate * (b + unTranslate))
newB[0] = (unScale[0] * b0) + (unScale[1] * b1) + (unScale[2] * b2);
newB[1] = (unScale[3] * b0) + (unScale[4] * b1) + (unScale[5] * b2);
newB[2] = (unScale[6] * b0) + (unScale[7] * b1) + (unScale[8] * b2);
}
/* Finds the scalar coefficients of the quadratic equation with the two given
* vectors. If positiveb is true then the returned coeffs will all be multiplied
* by -1 if b is negative, to ensure that b is positive. */
// a, b, and c are 3-vectors
__device__
void findCoeffs(double *a, double *b, double *c, bool positiveb)
{
c[0] = d_dot(a, a);
c[1] = 2 * d_dot(a, b);
c[2] = d_dot(b, b) - 3;
if (positiveb && c[1] < 0){
c[0] *= -1;
c[1] *= -1;
c[2] *= -1;
}
}
/* Finds the roots of the quadratic with the coefficients specified by the input
* Vector3d. If one of the roots is complex then FLT_MAX is returned instead. */
// coeffs is a 3-vector, roots is a 2-vector
__device__
void findRoots(double *coeffs, double *roots)
{
double interior = pow(coeffs[1], 2) - (4 * coeffs[0] * coeffs[2]);
if (interior < 0)
{
roots[0] = FLT_MAX;
roots[1] = FLT_MAX;
}
else
{
roots[0] = (-coeffs[1] - sqrt(interior)) / (double) (2 * coeffs[0]);
roots[1] = (2 * coeffs[2]) / (double) (-coeffs[1] - sqrt(interior));
}
}
/* Gradient of the isq function. */
// vec and grad are 3-vectors
__device__
void isqGradient(double *vec, double *grad, double e, double n)
{
double xval = 0.0, yval = 0.0, zval = 0.0;
// Check for n = 0 to prevent divide-by-zero errors
if (n == 0)
{
xval = yval = zval = FLT_MAX;
}
// Check for e = 0 to prevent divide-by-zero errors
else if (e == 0)
{
xval = yval = FLT_MAX;
zval = (2 * vec[2] * pow(pow(vec[2], 2), ((double) 1 / n) - 1)) / (double) n;
}
else
{
double xterm = pow(pow(vec[0], 2.0), (double) 1 / e);
double yterm = pow(pow(vec[1], 2.0), (double) 1 / e);
double xyterm = pow(xterm + yterm, ((double) e / n) - 1);
double x2term = (2 * vec[0] * pow(pow(vec[0], 2.0), ((double) 1 / e) - 1));
double y2term = (2 * vec[1] * pow(pow(vec[1], 2.0), ((double) 1 / e) - 1));
xval = x2term * xyterm / (double) n;
yval = y2term * xyterm / (double) n;
zval = (2 * vec[2] * pow(pow(vec[2], 2.0), ((double) 1 / n) - 1)) / (double) n;
}
grad[0] = xval;
grad[1] = yval;
grad[2] = zval;
}
/* Derivative of the isq function. */
// vec and a are 3-vectors
__device__
double gPrime(double *vec, double *a, double e, double n)
{
double tmp[3];
isqGradient(vec, &tmp[0], e, n);
double val = d_dot(a, &tmp[0]);
return val;
}
/* Uses Newton's method to find the t value at which a ray hits the superquadric.
* If the ray actually misses the superquadric then FLT_MAX is returned instead.*/
// a and b are 3-vectors
__device__
double updateRule(double *a, double *b, double *e, double *n, double t, double epsilon)
{
double vec[3];
findRay(a, b, &vec[0], t);
double gP = gPrime(&vec[0], a, *e, *n);
double gPPrevious = gP;
double g = 0.0;
double tnew = t, told = t;
bool stopPoint = false;
while (!stopPoint)
{
told = tnew;
findRay(a, b, &vec[0], told);
gP = gPrime(&vec[0], a, *e, *n);
g = isq(&vec[0], e, n);
if ((g - epsilon) <= 0)
{
stopPoint = true;
}
else if (sign(gP) != sign(gPPrevious) || gP == 0)
{
stopPoint = true;
tnew = FLT_MAX;
}
else
{
tnew = told - (g / gP);
gPPrevious = gP;
}
}
return tnew;
}
/* Unit normal vector at a point on the superquadric */
// r is a 3x3 matrix
// vec1, vec2, and un are 3-vectors
__device__
void unitNormal(double *r, double *vec1, double *vec2, double *un, double tt, double e, double n)
{
findRay(vec1, vec2, un, tt);
isqGradient(un, un, e, n);
double un0 = un[0];
double un1 = un[1];
double un2 = un[2];
un[0] = (r[0] * un0) + (r[1] * un1) + (r[2] * un2);
un[1] = (r[3] * un0) + (r[4] * un1) + (r[5] * un2);
un[2] = (r[6] * un0) + (r[7] * un1) + (r[8] * un2);
d_normalize(un);
}
// Returns the angle between two vectors.
// Both a and b are 3-vectors.
__device__
double vectorAngle(double *a, double *b)
{
double d = d_dot(a, b);
double mag = d_norm(a) * d_norm(b);
return acos(d / (double) mag);
}
// Calculates the refracted ray from an input ray and normal and a snell ratio
// If there is total internal reflection, then a vector of FLT_MAX is returned
// instead.
// a, n, and ref are 3-vectors
__device__
void refractedRay(double *a, double *n, double *ref, double snell)
{
double tmp = d_dot(n, a);
n[0] *= -1;
n[1] *= -1;
n[2] *= -1;
double cos1 = d_dot(n, a);
if (cos1 < 0)
{
cos1 = tmp;
}
else {
n[0] *= -1;
n[1] *= -1;
n[2] *= -1;
}
double radicand = 1 - (pow(snell, 2) * (1 - pow(cos1,2)));
if (radicand < 0)
{
ref[0] = FLT_MAX;
ref[1] = FLT_MAX;
ref[2] = FLT_MAX;
}
else
{
double cos2 = sqrt(radicand);
ref[0] = (snell * a[0]) + (((snell * cos1) - cos2) * n[0]);
ref[1] = (snell * a[1]) + (((snell * cos1) - cos2) * n[1]);
ref[2] = (snell * a[2]) + (((snell * cos1) - cos2) * n[2]);
}
}
/* debugging purposes */
__device__
void print_objects(Object *p_objects, int numObjects)
{
for (int i = 0; i < numObjects; i++) {
Object *o = &p_objects[i];
printf("\nObject %d\n", i);
printf("e: %f\t n: %f\n", o->e, o->n);
printf("scale: [%f, %f, %f] unScale: [%f, %f, %f]\n", o->scale[0],
o->scale[1], o->scale[2], o->unScale[0], o->unScale[1], o->unScale[2]);
printf(" [%f, %f, %f] [%f, %f, %f]\n", o->scale[3],
o->scale[4], o->scale[5], o->unScale[3], o->unScale[4], o->unScale[5]);
printf(" [%f, %f, %f] [%f, %f, %f]\n", o->scale[6],
o->scale[7], o->scale[8], o->unScale[6], o->unScale[7], o->unScale[8]);
printf("rotate: [%f, %f, %f] unRotate: [%f, %f, %f]\n", o->rotate[0],
o->rotate[1], o->rotate[2], o->unRotate[0], o->unRotate[1], o->unRotate[2]);
printf(" [%f, %f, %f] [%f, %f, %f]\n", o->rotate[3],
o->rotate[4], o->rotate[5], o->unRotate[3], o->unRotate[4], o->unRotate[5]);
printf(" [%f, %f, %f] [%f, %f, %f]\n", o->rotate[6],
o->rotate[7], o->rotate[8], o->unRotate[6], o->unRotate[7], o->unRotate[8]);
printf("translate: (%f, %f, %f) unTranslate: (%f, %f, %f)\n",
o->translate[0], o->translate[1], o->translate[2], o->unTranslate[0],
o->unTranslate[1], o->unTranslate[2]);
printf("Material-\n");
printf("Diffuse: (%f, %f, %f)\n", o->mat.diffuse[0], o->mat.diffuse[1],
o->mat.diffuse[2]);
printf("Ambient: (%f, %f, %f)\n", o->mat.ambient[0], o->mat.ambient[1],
o->mat.ambient[2]);
printf("Specular: (%f, %f, %f)\n", o->mat.specular[0], o->mat.specular[1],
o->mat.specular[2]);
printf("shine: %f\t snell: %f\t opacity: %f\n", o->mat.shine, o->mat.snell, o->mat.opacity);
}
}
__device__
void print_lights(Point_Light *p_lights, int numLights)
{
for (int i = 0; i < numLights; i++) {
Point_Light *l = &p_lights[i];
printf("\nLight %d\n", i);
printf("Position: (%f, %f, %f)\n", l->position[0], l->position[1], l->position[2]);
printf("Color: (%f, %f, %f)\n", l->color[0], l->color[1], l->color[2]);
printf("Attenuation Factor: %f\n", l->attenuation_k);
}
}
/********** Actual Raytracing Functions ***************************************/
__device__
// n is the normal. e is the eye. ind is the index of the object we're lighting.
void lighting(double *point, double *n, double *e, Material *mat,
Point_Light *l, int numLights,
Object *objects, int numObjects,
double epsilon, int ind, int generation, double *res,
double *lightDoubles)
{
double diffuseSum[3] = {0.0, 0.0, 0.0};
double specularSum[3] = {0.0, 0.0, 0.0};
double refractedLight[3] = {0.0, 0.0, 0.0};
double reflectedLight[3] = {0.0, 0.0, 0.0};
double* dif = &mat->diffuse[0];
double* spec = &mat->specular[0];
double shine = mat->shine;
double *newA = &lightDoubles[0];
double *newB = &lightDoubles[3];
double *coeffs = &lightDoubles[6];
double *roots = &lightDoubles[30];
// Get the unit direction from the point to the camera
double eDirection[3];
for (int i = 0; i < 3; i++)
eDirection[i] = e[i] - point[i];
d_normalize(&eDirection[0]);
for (int i = 0; i < numLights && generation > 0; i++)
{
// Retrieve the light's postion, color, and attenuation factor
double attenuation = l[i].attenuation_k;
// Get the unit direction and the distance between the light and the
// point
double lDirection[3];
lDirection[0] = l[i].position[0] - point[0];
lDirection[1] = l[i].position[1] - point[1];
lDirection[2] = l[i].position[2] - point[2];
double lightDist = d_norm(&lDirection[0]);
d_normalize(&lDirection[0]);
// Check to see that the light isn't blocked before considering it
// further.
// The i > 0 condition is present to prevent the program from blocking
// anything from the eyelight, for the obvious reason that anything we
// can see will be illuminated by the eyelight.
bool useLight = true;
for (int k = 0; k < numObjects && useLight && i > 0; k++)
{
if (k != ind)
{
// Find the ray equation transformations
newa(&objects[k].unScale[0], &objects[k].unRotate[0],
&lDirection[0], &newA[0]);
newb(&objects[k].unScale[0], &objects[k].unRotate[0],
&objects[k].unTranslate[0], point, &newB[0]);
// Find the quadratic equation coefficients
findCoeffs(&newA[0], &newB[0], &coeffs[0], true);
// Using the coefficients, find the roots
findRoots(&coeffs[0], &roots[0]);
// Check to see if the roots are FLT_MAX - if they are then the
// ray missed the superquadric. If they haven't missed then we
// can continue with the calculations.
if (roots[0] != FLT_MAX)
{
// Use the update rule to find tfinal
double tini = min(roots[0], roots[1]);
double tfinal = updateRule(&newA[0], &newB[0], &objects[k].e,
&objects[k].n, tini, epsilon);
/* Check to see if tfinal is FLT_MAX - if it is then the ray
* missed the superquadric. Additionally, if tfinal is
* negative then either the ray has started inside the
* object or is pointing away from the object; in both cases
* the ray has "missed". Also check to see if the object is
* farther away than the light - if it is then it isn't
* actually blocking the light. */
double ray[3];
findRay(&lDirection[0], point, &ray[0], tfinal);
double objDist = d_norm(&ray[0]);
if (tfinal != FLT_MAX && tfinal >= 0 && objDist < lightDist)
useLight = false;
}
}
}
if (useLight)
{
// Find tthe attenuation term
double atten = 1 / (double) (1 + (attenuation * pow(lightDist, 2)));
// Add the attenuation factor to the light's color
// Add the diffuse factor to the diffuse sum
double nDotl = d_dot(n, &lDirection[0]);
//Vector3d lDiffuse = lC * atten * ((0 < nDotl) ? nDotl : 0);
//diffuseSum = diffuseSum + lDiffuse;
if (0 < nDotl) {
diffuseSum[0] += l[i].color[0] * atten * nDotl;
diffuseSum[1] += l[i].color[1] * atten * nDotl;
diffuseSum[2] += l[i].color[2] * atten * nDotl;
}
// Add the specular factor to the specular sum
double dirDif[3];
dirDif[0] = eDirection[0] + lDirection[0];
dirDif[1] = eDirection[1] + lDirection[1];
dirDif[2] = eDirection[2] + lDirection[2];
d_normalize(&dirDif[0]);
double nDotDir = d_dot(n, &dirDif[0]);
//Vector3d lSpecular = lC * atten *
// pow(((0 < nDotDir && 0 < nDotl) ? nDotDir : 0), shine);
//specularSum = specularSum + lSpecular;
if (0 < nDotDir && 0 < nDotl) {
specularSum[0] += l[i].color[0] * atten * pow(nDotDir, shine);
specularSum[1] += l[i].color[1] * atten * pow(nDotDir, shine);
specularSum[2] += l[i].color[2] * atten * pow(nDotDir, shine);
}
}
}
/* Find the light contribution from reflection */
// Find the reflected ray
#if REFLECTION
double ttrueFinal = 0.0;
int finalObj = 0;
bool hitObject = false;
double *finalNewA = &lightDoubles[12];
double *finalNewB = &lightDoubles[15];
// If the object's reflectivity is 0 then just don't bother
if (objects[ind].mat.reflectivity != 0) {
double eDotN = d_dot(n, &eDirection[0]);
double *reflected = &lightDoubles[9];
reflected[0] = (2 * n[0] * eDotN) - eDirection[0];
reflected[1] = (2 * n[1] * eDotN) - eDirection[1];
reflected[2] = (2 * n[2] * eDotN) - eDirection[2];
d_normalize(&reflected[0]);
for (int k = 0; k < numObjects && generation > 0 ; k++)
{
if (k != ind)
{
// Find the ray equation transformations
newa(objects[k].unScale, objects[k].unRotate, &reflected[0],
&newA[0]);
newb(objects[k].unScale, objects[k].unRotate,
objects[k].unTranslate, point, &newB[0]);
// Find the quadratic equation coefficients
findCoeffs(&newA[0], &newB[0], &coeffs[0], true);
// Using the coefficients, find the roots
findRoots(&coeffs[0], &roots[0]);
// Check to see if the roots are FLT_MAX - if they are then the
// ray missed the superquadric. If they haven't missed then we
// can continue with the calculations.
if (roots[0] != FLT_MAX)
{
// Use the update rule to find tfinal
double tini = min(roots[0], roots[1]);
double tfinal = updateRule(&newA[0], &newB[0], &objects[k].e,
&objects[k].n, tini, epsilon);
/* Check to see if tfinal is FLT_MAX - if it is then the ray
* missed the superquadric. Additionally, if tfinal is negative
* then either the ray has started inside the object or is
* pointing away from the object; in both cases the ray has
* "missed". */
if (tfinal != FLT_MAX && tfinal >= 0)
{
if(hitObject && tfinal < ttrueFinal)
{
ttrueFinal = tfinal;
finalObj = k;
finalNewA[0] = newA[0];
finalNewA[1] = newA[1];
finalNewA[2] = newA[2];
finalNewB[0] = newB[0];
finalNewB[1] = newB[1];
finalNewB[2] = newB[2];
}
else if (!hitObject)
{
hitObject = true;
ttrueFinal = tfinal;
finalObj = k;
finalNewA[0] = newA[0];
finalNewA[1] = newA[1];
finalNewA[2] = newA[2];
finalNewB[0] = newB[0];
finalNewB[1] = newB[1];
finalNewB[2] = newB[2];
}
}
}
}
}
if (hitObject)
{
double intersectR[3];
double intersectRNormal[3];
findRay(&reflected[0], point, &intersectR[0], ttrueFinal);
unitNormal(objects[finalObj].rotate, &finalNewA[0], &finalNewB[0],
&intersectRNormal[0], ttrueFinal, objects[finalObj].e,
objects[finalObj].n);
lighting(&intersectR[0], &intersectRNormal[0], e,
&objects[finalObj].mat,
l, numLights, objects, numObjects, epsilon,
finalObj, generation-1, &reflectedLight[0], lightDoubles);
// Multiply by the object's reflectivity
reflectedLight[0] *= objects[ind].mat.reflectivity;
reflectedLight[1] *= objects[ind].mat.reflectivity;
reflectedLight[2] *= objects[ind].mat.reflectivity;
}
}
#endif
#if REFRACTION
/* Find the refraction contribution. */
// If the object's opacity is zero then just don't bother
if (objects[ind].mat.opacity != 0) {
// Change the eye-direction vector so that it points at the surface instead
// of at the eye
eDirection[0] *= -1;
eDirection[1] *= -1;
eDirection[2] *= -1;
// Find the refracted ray
double *refracted1 = &lightDoubles[9];
refractedRay(&eDirection[0], n, &refracted1[0], objects[ind].mat.snell);
d_normalize(&refracted1[0]);
ttrueFinal = 0.0;
finalObj = 0;
hitObject = false;
for (int k = 0; k < numObjects && generation > 0; k++)
{
if (k != ind)
{
// Find the ray equation transformations
newa(objects[k].unScale, objects[k].unRotate, &refracted1[0], &newA[0]);
newb(objects[k].unScale, objects[k].unRotate,
objects[k].unTranslate, point, &newB[0]);
// Find the quadratic equation coefficients
findCoeffs(&newA[0], &newB[0], &coeffs[0], true);
// Using the coefficients, find the roots
findRoots(&coeffs[0], &roots[0]);
// Check to see if the roots are FLT_MAX - if they are then the
// ray missed the superquadric. If they haven't missed then we
// can continue with the calculations.
if (roots[0] != FLT_MAX)
{
// Use the update rule to find tfinal
double tini = min(roots[0], roots[1]);
double tfinal = updateRule(&newA[0], &newB[0], &objects[k].e,
&objects[k].n, tini, epsilon);
/* Check to see if tfinal is FLT_MAX - if it is then the ray
* missed the superquadric. Additionally, if tfinal is negative
* then either the ray has started inside the object or is
* pointing away from the object; in both cases the ray has
* "missed". */
if (tfinal != FLT_MAX && tfinal >= 0)
{
if(hitObject && tfinal < ttrueFinal)
{
ttrueFinal = tfinal;
finalObj = k;
finalNewA[0] = newA[0];
finalNewA[1] = newA[1];
finalNewA[2] = newA[2];
finalNewB[0] = newB[0];
finalNewB[1] = newB[1];
finalNewB[2] = newB[2];
}
else if (!hitObject)
{
hitObject = true;
ttrueFinal = tfinal;
finalObj = k;
finalNewA[0] = newA[0];
finalNewA[1] = newA[1];
finalNewA[2] = newA[2];
finalNewB[0] = newB[0];
finalNewB[1] = newB[1];
finalNewB[2] = newB[2];
}
}
}
}
}
if (hitObject)
{
double intersectR[3];
double intersectRNormal[3];
findRay(&refracted1[0], point, &intersectR[0], ttrueFinal);
unitNormal(objects[finalObj].rotate, &finalNewA[0], &finalNewB[0],
&intersectRNormal[0], ttrueFinal, objects[finalObj].e,
objects[finalObj].n);
lighting(&intersectR[0], &intersectRNormal[0], e,
&objects[finalObj].mat,
l, numLights, objects, numObjects, epsilon,
finalObj, generation-1, &refractedLight[0], lightDoubles);
refractedLight[0] *= objects[ind].mat.opacity;
refractedLight[1] *= objects[ind].mat.opacity;
refractedLight[2] *= objects[ind].mat.opacity;
}
else
{
double *refA = &lightDoubles[18];
double *refB = &lightDoubles[21];
double *refCoeffs = &lightDoubles[24];
double *refRoots = &lightDoubles[27];
newa(objects[ind].unScale, objects[ind].unRotate, &refracted1[0], &refA[0]);
newb(objects[ind].unScale, objects[ind].unRotate,
objects[ind].unTranslate, point, &refB[0]);
findCoeffs(&refA[0], &refB[0], &refCoeffs[0], true);
findRoots(&refCoeffs[0], &refRoots[0]);
double tini = max(refRoots[0], refRoots[1]);
double tfinalRef = updateRule(&refA[0], &refB[0], &objects[ind].e,
&objects[ind].n, tini, epsilon);
bool isRefracted = true;
double outNormal[3];
double *outPoint = &lightDoubles[24];
double *outRay = &lightDoubles[27];
if (isRefracted) // the fuck is the point of this?
{
findRay(&refracted1[0], point, &outPoint[0], tfinalRef);
unitNormal(objects[ind].rotate, &refA[0], &refB[0], &outNormal[0], tfinalRef,
objects[ind].e, objects[ind].n);
refractedRay(&refracted1[0], &outNormal[0], &outRay[0],
(double) 1 / objects[ind].mat.snell);
// If the point has total internal reflection, then don't bother
// with the rest of the refraction calculations.
if(outRay[0] == FLT_MAX)
isRefracted = false;
}
// Now that we've found where the ray exits, check to see if it hits any
// objects; if it does, find the color contribution from that object
ttrueFinal = 0.0;
finalObj = 0;
hitObject = false;
for (int k = 0; k < numObjects && generation > 0 && isRefracted; k++)
{
if (k != ind)
{
// Find the ray equation transformations
newa(objects[k].unScale, objects[k].unRotate,
&outRay[0], &newA[0]);
newb(objects[k].unScale, objects[k].unRotate,
objects[k].unTranslate, &outPoint[0], &newB[0]);
// Find the quadratic equation coefficients
findCoeffs(&newA[0], &newB[0], &coeffs[0], true);
// Using the coefficients, find the roots
findRoots(&coeffs[0], &roots[0]);
// Check to see if the roots are FLT_MAX - if they are then the
// ray missed the superquadric. If they haven't missed then we
// can continue with the calculations.
if (roots[0] != FLT_MAX)
{
// Use the update rule to find tfinal
double tini = min(roots[0], roots[1]);
double tfinal = updateRule(&newA[0], &newB[0], &objects[k].e,
&objects[k].n, tini, epsilon);
/* Check to see if tfinal is FLT_MAX - if it is then the ray
* missed the superquadric. Additionally, if tfinal is negative
* then either the ray has started inside the object or is
* pointing away from the object; in both cases the ray has
* "missed". */
if (tfinal != FLT_MAX && tfinal >= 0)
{
if(hitObject && tfinal < ttrueFinal)
{
ttrueFinal = tfinal;
finalObj = k;
finalNewA[0] = newA[0];
finalNewA[1] = newA[1];
finalNewA[2] = newA[2];
finalNewB[0] = newB[0];
finalNewB[1] = newB[1];
finalNewB[2] = newB[2];
}
else if (!hitObject)
{
hitObject = true;
ttrueFinal = tfinal;
finalObj = k;
finalNewA[0] = newA[0];
finalNewA[1] = newA[1];
finalNewA[2] = newA[2];
finalNewB[0] = newB[0];
finalNewB[1] = newB[1];
finalNewB[2] = newB[2];
}
}
}
}
}
if (hitObject)
{
double intersectR[3];
double intersectRNormal[3];
findRay(&outRay[0], &outPoint[0], &intersectR[0], ttrueFinal);
unitNormal(objects[finalObj].rotate, &finalNewA[0], &finalNewB[0],
&intersectRNormal[0], ttrueFinal, objects[finalObj].e,
objects[finalObj].n);
lighting(&intersectR[0], &intersectRNormal[0], e,
&objects[finalObj].mat,
l, numLights, objects, numObjects, epsilon,
finalObj, generation - 1, &refractedLight[0], lightDoubles);
refractedLight[0] *= objects[ind].mat.opacity;
refractedLight[1] *= objects[ind].mat.opacity;
refractedLight[2] *= objects[ind].mat.opacity;
}
}
}
#endif
double *minVec = &lightDoubles[0];
double *maxVec = &lightDoubles[3];
minVec[0] = 1;
minVec[1] = 1;
minVec[2] = 1;
cProduct(&diffuseSum[0], dif, &diffuseSum[0]);
cProduct(&specularSum[0], spec, &specularSum[0]);
maxVec[0] = diffuseSum[0] + specularSum[0] + reflectedLight[0] + refractedLight[0];
maxVec[1] = diffuseSum[1] + specularSum[1] + reflectedLight[1] + refractedLight[1];
maxVec[2] = diffuseSum[2] + specularSum[2] + reflectedLight[2] + refractedLight[2];
cWiseMin(&minVec[0], &maxVec[0], res);
}
__global__
void raytraceKernel(double *grid, Object *objects, Point_Light *lightsPPM,
double *data, double *bgColor, double *e1, double *e2,
double *e3, double *lookFrom, double *rayDoubles,
double *lightDoubles,
int Nx, int Ny, bool antiAliased)
{
/* data[0] = numObjects
* data[1] = numLights
* data[2] = filmX
* data[3] = filmY
* data[4] = epsilon
* data[5] = filmDepth
*/
// Parallize by screen pixel
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
// Debugging
/*if (i == 0 && j == 0) {
print_objects(objects, data[0]);
print_lights(lightsPPM, data[1]);
}
__syncthreads();*/
#if SINGLETHREADMODE
if (i == 0 && j == 0) {
for(i = 0; i < Nx; i++)
{
for(j = 0; j < Ny; j++)
#else
while (i < Nx)
{
j = threadIdx.y + blockDim.y * blockIdx.y;
while (j < Ny)
#endif
{
/* Do all of this within the while loop to prevent threads with i's
* and j's outside of the image boundaris from accessing rayDoubles.
*/
double dx = data[2] / (double) Nx;
double dy = data[3] / (double) Ny;
double ttrueFinal = 0.0;
int finalObj = 0;
bool hitObject = false;
int rayInd = (j * Nx + i) * 26;
double *finalNewA = &rayDoubles[rayInd];
double *finalNewB = &rayDoubles[rayInd + 3];
double *pointA = &rayDoubles[rayInd + 6];
double *newA = &rayDoubles[rayInd + 9];
double *newB = &rayDoubles[rayInd + 12];
double *coeffs = &rayDoubles[rayInd + 15];
double *intersect = &rayDoubles[rayInd + 18];
double *intersectNormal = &rayDoubles[rayInd + 21];
double *roots = &rayDoubles[rayInd + 24];
double *lDoubles = &lightDoubles[(j * Nx + i) * 32];
// The positions are subtracted by a Nx/2 or Ny/2 term to center
// the film plane
double px = (i * dx) - (data[2] / (double) 2);
double py = (j * dy) - (data[3] / (double) 2);
double pxColor[] = {bgColor[0], bgColor[1], bgColor[2]};
if (!antiAliased)
{
// Transform point A into film coordinates
pointA[0] = (data[5] * e3[0]) + (px * e1[0]) + (py * e2[0]);
pointA[1] = (data[5] * e3[1]) + (px * e1[1]) + (py * e2[1]);
pointA[2] = (data[5] * e3[2]) + (px * e1[2]) + (py * e2[2]);
hitObject = false;
finalObj = 0, ttrueFinal = 0;
for (int k = 0; k < data[0]; k++)
{
// Find the ray equation transformations
newa(objects[k].unScale, objects[k].unRotate, pointA, newA);
newb(objects[k].unScale, objects[k].unRotate,
objects[k].unTranslate, lookFrom, newB);
// Find the quadratic equation coefficients
findCoeffs(newA, newB, coeffs, true);
// Using the coefficients, find the roots
findRoots(coeffs, roots);
// Check to see if the roots are FLT_MAX - if they are then the
// ray missed the superquadric. If they haven't missed then we
// can continue with the calculations.
if (roots[0] != FLT_MAX)
{
// Use the update rule to find tfinal
double tini = min(roots[0], roots[1]);
double tfinal = updateRule(newA, newB, &objects[k].e,
&objects[k].n, tini, data[4]);
/* Check to see if tfinal is FLT_MAX - if it is then the ray
* missed the superquadric. Additionally, if tfinal is negative
* then either the ray has started inside the object or is
* pointing away from the object; in both cases the ray has
* "missed". */
if (tfinal != FLT_MAX && tfinal >= 0)
{
if(hitObject && tfinal < ttrueFinal)
{
ttrueFinal = tfinal;
finalObj = k;
finalNewA[0] = newA[0];
finalNewA[1] = newA[1];
finalNewA[2] = newA[2];
finalNewB[0] = newB[0];
finalNewB[1] = newB[1];
finalNewB[2] = newB[2];
}
else if (!hitObject)
{
hitObject = true;
ttrueFinal = tfinal;
finalObj = k;
finalNewA[0] = newA[0];
finalNewA[1] = newA[1];
finalNewA[2] = newA[2];
finalNewB[0] = newB[0];
finalNewB[1] = newB[1];
finalNewB[2] = newB[2];
}
}
}
}
if(hitObject)
{
findRay(pointA, lookFrom, intersect, ttrueFinal);
unitNormal(objects[finalObj].rotate, finalNewA, finalNewB,
intersectNormal, ttrueFinal, objects[finalObj].e,
objects[finalObj].n);
lighting(intersect, intersectNormal, lookFrom,
&objects[finalObj].mat,
lightsPPM, data[1], objects, data[0], data[4],
finalObj, RECURSIONDEPTH,
&pxColor[0], lDoubles);
}
}
else
{
double denom = 3 + (2 / sqrt((double) 2));
double pxCoeffs[] = {(1 / (2 * sqrt((double) 2))) / denom,
(1 / (double) 2) / denom,
(1 / (2 * sqrt((double) 2))) / denom,
(1 / (double) 2) / denom,
1 / denom,
(1 / (double) 2) / denom,
(1 / (2 * sqrt((double) 2))) / denom,
(1 / (double) 2) / denom,
(1 / (2 * sqrt((double) 2))) / denom};
int counter = 0;
for (int g = -1; g <= 1; g++)
{
for (int h = -1; h <= 1; h++)
{
double thisPx = px + (g * (dx / (double) 2));
double thisPy = py + (h * (dy / (double) 2));
// Transform point A into film Coordinates
pointA[0] = (data[5] * e3[0]) + (thisPx * e1[0]) + (thisPy * e2[0]);
pointA[1] = (data[5] * e3[1]) + (thisPx * e1[1]) + (thisPy * e2[1]);
pointA[2] = (data[5] * e3[2]) + (thisPx * e1[2]) + (thisPy * e2[2]);
hitObject = false;
finalObj = 0, ttrueFinal = 0;
for (int k = 0; k < data[0]; k++)
{
// Find the ray equation transformations
newa(objects[k].unScale, objects[k].unRotate,
pointA, newA);
newb(objects[k].unScale, objects[k].unRotate,
objects[k].unTranslate, lookFrom, newB);
// Find the quadratic equation coefficients
findCoeffs(newA, newB, coeffs, true);
// Using the coefficients, find the roots
findRoots(coeffs, roots);
// Check to see if the roots are FLT_MAX - if they are then the
// ray missed the superquadric. If they haven't missed then we
// can continue with the calculations.
if (roots[0] != FLT_MAX)
{
// Use the update rule to find tfinal
double tini = min(roots[0], roots[1]);
double tfinal = updateRule(newA, newB, &objects[k].e,
&objects[k].n, tini, data[4]);
/* Check to see if tfinal is FLT_MAX - if it is then the ray
* missed the superquadric. Additionally, if tfinal is negative
* then either the ray has started inside the object or is
* pointing away from the object; in both cases the ray has
* "missed". */
if (tfinal != FLT_MAX && tfinal >= 0)
{
if(hitObject && tfinal < ttrueFinal)
{
ttrueFinal = tfinal;
finalObj = k;
finalNewA[0] = newA[0];
finalNewA[1] = newA[1];
finalNewA[2] = newA[2];
finalNewB[0] = newB[0];
finalNewB[1] = newB[1];
finalNewB[2] = newB[2];
}
else if (!hitObject)
{
hitObject = true;
ttrueFinal = tfinal;
finalObj = k;
finalNewA[0] = newA[0];
finalNewA[1] = newA[1];
finalNewA[2] = newA[2];
finalNewB[0] = newB[0];
finalNewB[1] = newB[1];
finalNewB[2] = newB[2];
}
}
}
}
if(hitObject)
{
findRay(pointA, lookFrom, intersect, ttrueFinal);
unitNormal(objects[finalObj].rotate, finalNewA,
finalNewB, intersectNormal, ttrueFinal,
objects[finalObj].e, objects[finalObj].n);
double color[] = {0, 0, 0};
lighting(intersect, intersectNormal, lookFrom,
&objects[finalObj].mat,
lightsPPM, data[1], objects, data[0],
data[4],
finalObj, RECURSIONDEPTH,
&color[0], lDoubles);
pxColor[0] += color[0] * pxCoeffs[counter];
pxColor[1] += color[1] * pxCoeffs[counter];
pxColor[2] += color[2] * pxCoeffs[counter];
}
counter++;
}
}
}
int index = (j * Nx + i) * 3;
grid[index] = pxColor[0];
grid[index + 1] = pxColor[1];
grid[index + 2] = pxColor[2];
#if SINGLETHREADMODE
}
}
}
}
#else
j += blockDim.y * gridDim.y;
}
i += blockDim.x * gridDim.x;
}
}
#endif
void callRaytraceKernel(double *grid, Object *objects, Point_Light *lightsPPM,
double *data, double *bgColor, double *e1, double *e2,
double *e3, double *lookFrom, int Nx, int Ny,
bool antiAliased, int blockPower)
{
int blockSize = pow(2, blockPower);
dim3 blocks;
blocks.x = blockSize;
blocks.y = blockSize;
int gx = (Nx / blockSize);
int gy = (Ny / blockSize);
if (gx < 1) gx = 1;
if (gy < 1) gy = 1;
dim3 gridSize;
gridSize.x = gx;
gridSize.y = gy;
// Mostly debug info, but possibly interesting
int numThreads = (blockSize * gx) * (blockSize * gy);
printf("Image size: %d x %d (%d Pixels)\n", Nx, Ny, Nx * Ny);
printf("Total number of threads: %d\n", (blockSize * gx) * (blockSize * gy));
float factor = numThreads / (float) (1024 * 1024);
size_t deviceLimit;
gpuErrChk(cudaDeviceGetLimit(&deviceLimit, cudaLimitStackSize));
printf("Original Device stack size: %d\n", (int) deviceLimit);
printf("Total Device stack memory: %0.2f MB\n", (int) deviceLimit * factor);
// Recursion's a bitch, gotta increase that stack size
// (Also relevant for images larger than 400 x 400 or so, I suppose)
gpuErrChk(cudaDeviceSetLimit(cudaLimitStackSize, 4096));
gpuErrChk(cudaDeviceGetLimit(&deviceLimit, cudaLimitStackSize));
printf("New Device stack size: %d\n", (int) deviceLimit);
printf("Total Device stack memory: %0.2f MB\n", (int) deviceLimit * factor);
// Allocate space on the gpu for the double arrays in the kernel
double *rayDoubles;
gpuErrChk(cudaMalloc(&rayDoubles, sizeof(double) * Nx * Ny * 26));
gpuErrChk(cudaMemset(rayDoubles, 0, sizeof(double) * Nx * Ny * 26));
double *lightDoubles;
gpuErrChk(cudaMalloc(&lightDoubles, sizeof(double) * Nx * Ny * 32));
gpuErrChk(cudaMemset(lightDoubles, 0, sizeof(double) * Nx * Ny * 32));
raytraceKernel<<<gridSize, blocks>>>(grid, objects, lightsPPM, data,
bgColor, e1, e2, e3, lookFrom,
rayDoubles, lightDoubles, Nx, Ny,
antiAliased);
gpuErrChk(cudaPeekAtLastError());
gpuErrChk(cudaDeviceSynchronize());
gpuErrChk(cudaFree(rayDoubles));
gpuErrChk(cudaFree(lightDoubles));
}
|
12,441 | /******************************************************************************/
/* CUDA Sample Program (Vector add) monotone-RK 2014.08.21 */
/******************************************************************************/
#include <stdio.h>
#include <vector>
__global__
void vecadd(float *a, float *b, float *c) {
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
int main(int argc, char *argv[]) {
const int num = 16;
std::vector<float> a(num, 1.0);
std::vector<float> b(num, 2.0);
std::vector<float> c(num, 0.0);
float *d_a;
float *d_b;
float *d_c;
cudaMalloc(&d_a, num * sizeof(float));
cudaMalloc(&d_b, num * sizeof(float));
cudaMalloc(&d_c, num * sizeof(float));
cudaMemcpy(d_a, &a[0], num*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b[0], num*sizeof(float), cudaMemcpyHostToDevice);
dim3 grid_size = dim3(1, 1, 1); // determine the number of blocks
dim3 block_size = dim3(num, 1, 1); // determine the number of threads
vecadd<<<grid_size, block_size>>>(d_a, d_b, d_c);
cudaMemcpy(&c[0], d_c, num*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
for (int i=0; i<num; i++) printf("c[%2d]: %f\n", i, c[i]);
return 0;
} |
12,442 | #include "includes.h"
__global__ void stencilReadOnly2(float *src, float *dst, int size, float* stencilWeight)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx += 11;
if (idx >= size)
return;
float out = 0;
#pragma unroll
for(int i = -10;i < 10; i++)
{
out += src[idx+i] * stencilWeight[i+10];
}
dst[idx] = out;
} |
12,443 | #include "includes.h"
__global__ void dense_add_conv(size_t sz, float_t* src, float_t* dest, size_t bias_dim)
{
size_t index = blockIdx.x*blockDim.x + threadIdx.x;
// size_t src_index = index%bias_dim;
if(index < sz)
{
dest[index] += src[threadIdx.x];
}
} |
12,444 | #include "includes.h"
/*
Detected 1 CUDA Capable device(s)
Device 0: "GeForce GT 320M"
CUDA Driver Version / Runtime Version 5.0 / 5.0
CUDA Capability Major/Minor version number: 1.2
Total amount of global memory: 1024 MBytes (1073741824 bytes)
( 3) Multiprocessors x ( 8) CUDA Cores/MP: 24 CUDA Cores
GPU Clock rate: 1100 MHz (1.10 GHz)
Memory Clock rate: 790 Mhz
Memory Bus Width: 128-bit
Max Texture Dimension Size (x,y,z) 1D=(8192), 2D=(65536,32768), 3D=(2048,2048,2048)
Max Layered Texture Size (dim) x layers 1D=(8192) x 512, 2D=(8192,8192) x 512
Total amount of constant memory: 65536 bytes
Total amount of shared memory per block: 16384 bytes
Total number of registers available per block: 16384
Warp size: 32
Maximum number of threads per multiprocessor: 1024
Maximum number of threads per block: 512
Maximum sizes of each dimension of a block: 512 x 512 x 64
Maximum sizes of each dimension of a grid: 65535 x 65535 x 1
Maximum memory pitch: 2147483647 bytes
Texture alignment: 256 bytes
Concurrent copy and kernel execution: Yes with 1 copy engine(s)
Run time limit on kernels: Yes
Integrated GPU sharing Host Memory: No
Support host page-locked memory mapping: Yes
Alignment requirement for Surfaces: Yes
Device has ECC support: Disabled
CUDA Device Driver Mode (TCC or WDDM): WDDM (Windows Display Driver Model)
Device supports Unified Addressing (UVA): No
Device PCI Bus ID / PCI location ID: 2 / 0
Compute Mode:
< Default (multiple host threads can use ::cudaSetDevice() with device simultaneously) >
deviceQuery, CUDA Driver = CUDART, CUDA Driver Version = 5.0, CUDA Runtime Version = 5.0, NumDevs = 1, Device0 = GeForce GT 320M
*/
__global__ void freqencyStep1(char *d_dat,int len, int *d_freq)
{//²½ÖèÒ»£¬ÏȽ«Êý¾Ý¼ÓºÍµ½share memoryÖУ¬È»ºóÔÙÀÛ¼Óµ½ÏÔ´æÉÏ¡£
///ÕâÀïÒ²ÓÐÁ½ÖÖ·½·¨£¬ÕâÊÇ·½·¨Ò»£¬share memoryºá×ÅÓá£ÁíÒ»ÖÖ·½·¨£¬½«share memoryÊú×ÅÓã¨ÔÚ½øÐпéÄÚÀÛ¼Óʱ£¬Ö»ÓÃǰ26¸öÏß³ÌÍê³É0µ½127µÄÀÛ¼Ó¡£¡£
///·½·¨¶þµÄÀÛ¼Óʱ£¬×îºÃÀÛ¼Óµ½¶Ô½ÇÏßÉÏ£¬È»ºóÔÚд³öʱ£¬¿ÉÒÔ±ÜÃâbank conflict¡£
__shared__ int sfreq[3456];//27*128////share memoryºá×Å·Å£¬Ã¿Ïß³Ì27¸öint.
for(int i=threadIdx.x ;i< 3456;i += blockDim.x)
sfreq[i] = 0;////ÏÈÇå¿Õ¡£
__syncthreads();
int *myfreq = &sfreq[27*threadIdx.x];
int gridsize = blockDim.x * gridDim.x;
for(int i=threadIdx.x + blockIdx.x*blockDim.x; i< len; i += gridsize)
//if((d_dat[i]>='a')&&(d_dat[i]<='z'))//Èç¹ûÈ·¶¨Êý¾ÝÖ»ÊÇa--z£¬¿ÉÒÔ°ÑifÈ¥µô¡£
myfreq[d_dat[i]-'a']++;
__syncthreads();///¸÷Ïß³Ìͳ¼Æµ½×Ô¼ºµÄsharememoryÖС£
///ÓÃÒ»¸öÑ»·ÊµÏÖÕÛ°ë¼Ó¡£
for(int roll = 64;roll>=1; roll>>=1)
{
if(threadIdx.x <roll)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+roll)+i];
}
__syncthreads();
}
#if 0
if(threadIdx.x<64)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+64)+i];
}
__syncthreads();
if(threadIdx.x<32)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+32)+i];
}
__syncthreads();
if(threadIdx.x<16)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+16)+i];
}
if(threadIdx.x< 8)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+ 8)+i];
}
if(threadIdx.x< 4)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+ 4)+i];
}
if(threadIdx.x< 2)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x+ 2)+i];
}
if(threadIdx.x == 0)
{
for(int i=0;i<26;i++)
myfreq[i] += sfreq[27*(threadIdx.x )+i];
}
#endif
__syncthreads();
if(threadIdx.x<26)///Èç¹ûÏÔ¿¨Ö§³ÖÔ×Ó¼Ó£¬¿ÉÒÔʹÓÃÔ×Ó¼Ó£¬Ö±½Ó¼Óµ½ÏÔ´æÉÏ¡£ÄÇÑù¾ÍûÓеڶþ²½¡£ 1.1¼°ÒÔÉÏÖ§³ÖÈ«¾ÖÏÔ´æµÄ32λÔ×Ó²Ù×÷¡£
atomicAdd(&d_freq[threadIdx.x],sfreq[threadIdx.x]);
} |
12,445 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <cuda.h>
#include <thrust/for_each.h>
typedef unsigned int uint;
typedef unsigned char uchar;
void checkCudaError(cudaError_t error, const char* const filename, const int linenum)
{
if(error != cudaSuccess){
fprintf(stderr, "File %s, line %d, CUDA error: %s\n", filename, linenum, cudaGetErrorString(error));
exit(-1);
}
}
#define CHECK_CUDA_ERROR(error) checkCudaError(error, __FILE__, __LINE__)
///////////////////////////////////////////////////////
// CPU methods
void histOnCPU(const uchar* const src, uint* hist, const int N)
{
int i = 0;
for(; i < N; ++i){
hist[src[i]]++;
}
}
bool checkCorrectness(const uint* hist1, const uint* hist2, const int N)
{
return (memcmp(hist1, hist2, sizeof(uint) * N) == 0) ? true : false;
}
/////////////////////////////////////////////////////
__global__ void histKernel_1(const uchar* src, uint* hist, int N)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
if(index >= N)
return;
const uchar val = src[index];
atomicAdd(&hist[val], 1);
}
// once read 32x4 = 128 byte
__global__ void histKernel_2(const uchar* src, uint* hist, int N)
{
int index = (blockIdx.x*blockDim.x+threadIdx.x)*4;
if(index >= N)
return;
uchar val[4];
val[0] = src[index];
val[1] = src[index+1];
val[2] = src[index+2];
val[3] = src[index+3];
atomicAdd(&hist[val[0]], 1);
atomicAdd(&hist[val[1]], 1);
atomicAdd(&hist[val[2]], 1);
atomicAdd(&hist[val[3]], 1);
}
//using shared memory
__shared__ uint histTmp[256];
__global__ void histKernel_3(const uchar* src, uint* hist, int N)
{
int index = (blockIdx.x*blockDim.x+threadIdx.x)*4;
if(index >= N)
return;
histTmp[threadIdx.x] = 0;
uchar val[4];
val[0] = src[index];
val[1] = src[index+1];
val[2] = src[index+2];
val[3] = src[index+3];
__syncthreads();
atomicAdd(&histTmp[val[0]], 1);
atomicAdd(&histTmp[val[1]], 1);
atomicAdd(&histTmp[val[2]], 1);
atomicAdd(&histTmp[val[3]], 1);
__syncthreads();
atomicAdd(&hist[threadIdx.x], histTmp[threadIdx.x]);
}
void computeHist(const uchar* src, uint* hist, int N)
{
const int threadPerBlock = 256;
const int nByteSrc = sizeof(uchar)*N;
const int nByteHist = sizeof(uint)*256;
uchar* dev_src;
uint* dev_hist;
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_src, nByteSrc));
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_hist, nByteHist));
CHECK_CUDA_ERROR(cudaMemcpy(dev_src, src, nByteSrc, cudaMemcpyHostToDevice));
CHECK_CUDA_ERROR(cudaMemset(dev_hist, 0, nByteHist));
// histKernel_1<<<(N+threadPerBlock-1)/threadPerBlock, threadPerBlock>>>(dev_src, dev_hist, N);
histKernel_2<<<(N+4*threadPerBlock-1)/(4*threadPerBlock), threadPerBlock>>>(dev_src, dev_hist, N);
// histKernel_3<<<(N+threadPerBlock-1)/threadPerBlock, threadPerBlock>>>(dev_src, dev_hist, N);
CHECK_CUDA_ERROR(cudaMemcpy(hist, dev_hist, nByteHist, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERROR(cudaFree(dev_src));
CHECK_CUDA_ERROR(cudaFree(dev_hist));
uint* histCPU = (uint*)malloc(nByteHist);
memset(histCPU, 0, 256*sizeof(int));
histOnCPU(src, histCPU, N);
if(checkCorrectness(hist, histCPU, 256))
printf("Correct\n");
else
printf("Error\n");
}
void randomFillArray(uchar* src, int N)
{
srand(time(NULL));
for(int i = 0; i < N; ++i)
src[i] = (rand()%256);
}
int main()
{
const int N = 256;
const int nByte = sizeof(uchar)*N;
uchar* src = (uchar*)malloc(nByte);
uint* hist = (uint*)malloc(256*sizeof(uint));
randomFillArray(src, N);
computeHist(src, hist, N);
free(src);
free(hist);
return 0;
}
|
12,446 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void checkIdx() {
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
int bx = blockIdx.x;
int by = blockIdx.y;
int bz = blockIdx.z;
printf("threadIdx (%d,%d,%d), gridIdx (%d,%d,%d)\n",
tx,ty,tz,bx,by,bz);
}
int main(){
int nElem = 15;
dim3 dimBlock(4,1,1);
dim3 dimGrid( (nElem + dimBlock.x - 1)/dimBlock.x, 1, 1);
printf("blockdim = (%d, %d, %d)\n", dimBlock.x, dimBlock.y, dimBlock.z);
printf("griddim = (%d, %d, %d)\n", dimGrid.x, dimGrid.y, dimGrid.z);
checkIdx<<<dimBlock, dimGrid>>>();
cudaDeviceReset();
return 0;
}
|
12,447 | #include "includes.h"
__global__ void tanh_grad(float *pre_grad, float *output, int rows, int cols) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (j >= cols || i >= rows) return;
float t = output[i * cols + j];
pre_grad[i * cols + j] *= 1 - t * t;
} |
12,448 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <float.h>
#include <math.h>
#include <sys/time.h>
#include <assert.h>
#define BLOCK_SIZE 24
__global__ void square_dgemm(float* devM, float* devN, float* devP, int width)
{
__shared__ float A[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float B[BLOCK_SIZE][BLOCK_SIZE];
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
float sum = 0;
for( int i = 0; i < width / BLOCK_SIZE; i++ ){
A[threadIdx.y][threadIdx.x] = devM[row * width + (i * BLOCK_SIZE + threadIdx.x)];
B[threadIdx.y][threadIdx.x] = devN[col + (i * BLOCK_SIZE + threadIdx.y) * width];
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; ++j){
sum += A[threadIdx.y][j] * B[j][threadIdx.x];
__syncthreads();
}
}
devP[row * width + col] = sum;
}
double timer()
{
static bool initialized = false;
static struct timeval start_event;
struct timeval end_event;
if( !initialized )
{
gettimeofday( &start_event, NULL );
initialized = true;
}
gettimeofday( &end_event, NULL );
return 1.0e-6 * (end_event.tv_usec - start_event.tv_usec) + (end_event.tv_sec - start_event.tv_sec);
}
void fill( float *p, int n){
for (int i = 0; i < n; i++)
p[i] = 2 * (float) drand48() - 1;
}
bool check( float *C, int n, int k) {
for (int i = 0; i < n; ++i){
for (int j = 0; j < k; ++j){
if (C[i * k + j] != C[i * k + j]){
return false;
}
}
}
return true;
}
int main( int argc, char **argv )
{
int n = 1600;
int m = 1600;
int k = 1600;
float *A, *B, *C;
// Mem aloc
A = (float *)malloc( n * n * sizeof(float) );
B = (float *)malloc( n * n * sizeof(float) );
C = (float *)malloc( n * n * sizeof(float) );
float *A_cuda, *B_cuda, *C_cuda;
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
dim3 dimGrid((n / BLOCK_SIZE), (n / BLOCK_SIZE));
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
cudaMalloc((void **) &A_cuda, sizeof(float) * n * m);
cudaMalloc((void **) &B_cuda, sizeof(float) * k * n);
cudaMalloc((void **) &C_cuda, sizeof(float) * k * m);
fill(A, n * n);
fill(B, n * n);
fill(C, n * n);
// Timer: copy time
double time_cpu = -1.0;
double time_total = timer();
cudaMemcpy(A_cuda, A, sizeof(float) * m * n, cudaMemcpyHostToDevice);
cudaMemcpy(B_cuda, B, sizeof(float) * m * n, cudaMemcpyHostToDevice);
double time_copy = timer() - time_total;
time_total = timer() - time_total;
// Timer: CPU time; Gflops
double Gigaflops = 0.0, Gigaflops_noCopy = 0.0;
for (int fresh = 1; time_cpu < 0.1; fresh *= 2){
square_dgemm<<<dimGrid,dimBlock>>>(A_cuda, B_cuda, C_cuda, n);
time_cpu = timer();
for(int i = 0; i < fresh; i++){
square_dgemm<<<dimGrid,dimBlock>>>(A_cuda, B_cuda, C_cuda, n);
time_cpu = timer() - time_cpu;
}
Gigaflops_noCopy = (2e-9 * n * n * n * fresh) / (time_cpu);
}
cudaMemcpy(C, C_cuda, sizeof(float) * m * k, cudaMemcpyDeviceToHost);
time_total += time_cpu;
Gigaflops = (Gigaflops_noCopy * time_cpu) / (time_total);
cudaThreadSynchronize();
cudaEventSynchronize(stop_event);
// Info
printf("Total CPU time is %f s\n", time_total);
printf("GPU CPU time is %f s\n", time_cpu);
printf("Copy time is %f s\n", time_copy);
printf("Total GPU Gigaflops is %f \n", Gigaflops);
printf("No copy GPU Gigaflops is %f \n", Gigaflops_noCopy);
// Check
bool check_matrix = check(C, m, k);
if (!check_matrix){
printf("Wrong\n");
}
cudaFree( A_cuda );
cudaFree( B_cuda );
cudaFree( C_cuda );
free(A);
free(B);
free(C);
return 0;
}
|
12,449 | //pass
//--blockDim=32 --gridDim=2
#include <cuda.h>
__global__ void test_Prog(int *A, int N) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
int alpha = A[idx + 1];
if (idx >= 0)
{
int temp2 = A[idx + 2];
A[idx] += temp2;
}
A[idx] += alpha;
} |
12,450 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <sys/time.h>
/*
Aim : To benchmark the GPU in terms of FLOPS and IOPS with.
Description : This program finds the FLOPS and IOPS of GPU.
The gpuIntFun and gpuFloatFun are the functions which perform the operation on GPU environment.
The host variables are first declared and initialized. Same number of variables are allocated memory on GPU,
and data is copied fro host to device variables.
The result variable is the copied back to host.
Contributor : Vivek Pabani (A20332117)
*/
#define LIMIT 500
__global__ void gpuIntFun(int *a, int *b, int *c)
{
int tid = blockIdx.x*blockDim.x+threadIdx.x;
c[tid] = a[tid] + b[tid];
}
__global__ void gpuFloatFun(float *a, float *b, float *c)
{
int tid = blockIdx.x*blockDim.x+threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main(void)
{
int numberOfThreads;
int blocks, threads;
int i=0;
int choice=0;
double time_s;
long start_time,end_time;
struct timeval start,stop;
struct cudaDeviceProp gpuDetails;
int numberOfDevices, device=0;
cudaError_t cudaResultCode = cudaGetDeviceCount(&numberOfDevices);
//To get device info on run time
for (device = 0; device < numberOfDevices; ++device) {
cudaGetDeviceProperties(&gpuDetails, device);
//To set device variables to be used for thread creation.
if (gpuDetails.major != 9999)
{
blocks=gpuDetails.multiProcessorCount;
threads=gpuDetails.maxThreadsPerMultiProcessor;
numberOfThreads=blocks * threads;
}
}
printf("Processor Count %d\n",blocks);
printf("Thread per Processor %d\n",threads);
printf("Total Threads %d\n",numberOfThreads);
/*
choice
0 - IOPS
1 - FLOPS
*/
for(choice=0; choice<2; ++choice)
{
if(choice == 0)
{
int *host_a, *host_b, *host_c;
int *dev_a, *dev_b, *dev_c;
//assign memory to host variables.
host_a = (int*) malloc(numberOfThreads*sizeof(int));
host_b = (int*) malloc(numberOfThreads*sizeof(int));
host_c = (int*) malloc(numberOfThreads*sizeof(int));
//Initialize host variables.
for(i=0;i<numberOfThreads;++i)
{
host_a[i] = (i*25)+25;
host_b[i] = (i*36)+36;
}
//assign memory to device variables
cudaMalloc((void**)&dev_a, numberOfThreads*sizeof(int));
cudaMalloc((void**)&dev_b, numberOfThreads*sizeof(int));
cudaMalloc((void**)&dev_c, numberOfThreads*sizeof(int));
// copy variables to device memory
cudaMemcpy(dev_a,host_a,numberOfThreads*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,host_b,numberOfThreads*sizeof(int),cudaMemcpyHostToDevice);
//Calculation Time Starts
gettimeofday(&start,NULL);
start_time=start.tv_sec*1000000 + start.tv_usec;
for (i=0; i<LIMIT; ++i)
{
gpuIntFun<<<blocks,threads>>>(dev_a,dev_b,dev_c);
}
gettimeofday(&stop,NULL);
//Calculation Time Ends
end_time=stop.tv_sec*1000000 + stop.tv_usec;//get end time
cudaMemcpy(host_c,dev_c,numberOfThreads*sizeof(int),cudaMemcpyDeviceToHost);
time_s=end_time-start_time;
printf("\nTime taken: %lf",time_s);
printf("\nIOPS: %lf",(double)(LIMIT*numberOfThreads*1.024*0.1048576)/(time_s*2.0));
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
else if(choice == 1)
{
float *host_a, *host_b, *host_c;
float *dev_a, *dev_b, *dev_c;
host_a = (float*) malloc(numberOfThreads*sizeof(float));
host_b = (float*) malloc(numberOfThreads*sizeof(float));
host_c = (float*) malloc(numberOfThreads*sizeof(float));
for(i=0;i<numberOfThreads;++i)
{
host_a[i] = (i*25.5)+25.5;
host_b[i] = (i*36.6)+36.6;
}
cudaMalloc((void**)&dev_a, numberOfThreads*sizeof(float));
cudaMalloc((void**)&dev_b, numberOfThreads*sizeof(float));
cudaMalloc((void**)&dev_c, numberOfThreads*sizeof(float));
cudaMemcpy(dev_a,host_a,numberOfThreads*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,host_b,numberOfThreads*sizeof(float),cudaMemcpyHostToDevice);
gettimeofday(&start,NULL);
start_time=start.tv_sec*1000000 + start.tv_usec;
for (i=0; i<LIMIT; ++i)
{
gpuFloatFun<<<blocks,threads>>>(dev_a,dev_b,dev_c);
}
gettimeofday(&stop,NULL);
end_time=stop.tv_sec*1000000 + stop.tv_usec;//get end time
cudaMemcpy(host_c,dev_c,numberOfThreads*sizeof(int),cudaMemcpyDeviceToHost);
time_s=end_time-start_time;
printf("\nTime taken: %lf",time_s);
printf("\nGFLOPS: %lf\n",(double)(LIMIT*numberOfThreads*1.024*0.1048576)/(time_s*2.0));
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
}
return 0;
} |
12,451 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
#include<stdlib.h>
__global__ void addmat(int *a, int *b, int *c)
{
int n= threadIdx.x,m=blockIdx.x, size=blockDim.x;
c[m*size+n]=a[m*size+n]+b[m*size+n];
}
__global__ void addrow (int *A, int *B, int *C,int n) {
int idx = threadIdx.x;
printf("idx = %d\n", idx);
for (int i = 0; i < n; ++i) {
C[i + n * idx] = A[i + n * idx] + B[i + n * idx];
}
}
__global__ void addcol(int *A, int *B, int *C,int m) {
int idx = threadIdx.x;
int x=blockDim.x;
printf("idx = %d\n", idx);
for (int i = 0; i < m; ++i) {
C[ i*x + idx] = A[ i*x + idx] + B[ i*x + idx];
}
}
int main(void)
{
int a[8]={1,2,3,4,5,6,1,2},b[8]={1,2,3,4,5,6,1,2},*c,*c1,*c2,m=4,n=2,i,j;
int *d_a,*d_b,*d_c,*d_c1,*d_c2;
int size=sizeof(int)*m*n;
c=(int*)malloc(m*n*sizeof(int));
c1=(int*)malloc(m*n*sizeof(int));
c2=(int*)malloc(m*n*sizeof(int));
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void**)&d_c,size);
cudaMalloc((void**)&d_c1,size);
cudaMalloc((void**)&d_c2,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
addrow<<<1, m>>>(d_a, d_b, d_c,n);
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
printf("Result matrix using computation using each row is:\n");
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
printf("%d\t",c[i*n+j]);
printf("\n");
}
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
addcol<<<1,n>>>(d_a,d_b,d_c2,m);
cudaMemcpy(c2,d_c2,size,cudaMemcpyDeviceToHost);
printf("Result matrix using computation using each column is:\n");
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
printf("%d\t",c2[i*n+j]);
printf("\n");
}
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
addmat<<<m,n>>>(d_a,d_b,d_c1);
cudaMemcpy(c1,d_c1,size,cudaMemcpyDeviceToHost);
printf("Result matrix using computation using each element is:\n");
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
printf("%d\t",c1[i*n+j]);
printf("\n");
}
getchar();
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_c1);
return 0;
}
|
12,452 |
__device__ int read_cell(int * source_domain, int x, int y, int dx, int dy, unsigned int domain_x, unsigned int domain_y){
x = (unsigned int)(x + dx + domain_x) % domain_x;
y = (unsigned int)(y + dy + domain_y) % domain_y;
return source_domain[y * domain_x + x];
}
__device__ void write_cell(int * source_domain, int x, int y, int dx, int dy, unsigned int domain_x, unsigned int domain_y, int val){
x = (unsigned int)(x + dx + domain_x) % domain_x;
y = (unsigned int)(y + dy + domain_y) % domain_y;
source_domain[y * domain_x + x] = val;
}
// 1 cell per thread, 1 cell per word kernel
__global__ void life_kernel(int * source_domain, int * dest_domain, int domain_x, int domain_y) {
extern __shared__ int cells[];
int tx = blockIdx.x * (blockDim.x-2) + threadIdx.x-1;
int ty = blockIdx.y * (blockDim.y-2) + threadIdx.y-1;
int myself = read_cell(source_domain,tx,ty,0,0,domain_x,domain_y);
cells[threadIdx.y*blockDim.x+threadIdx.x] = myself;
__syncthreads();
// Read the 8 neighbors and count number of blue and red
int num_red = 0;
int num_blue = 0;
int neighbors[8];
neighbors[0] = read_cell(source_domain, tx,ty, -1,-1, domain_x, domain_y);
neighbors[1] = read_cell(source_domain, tx,ty, -1, 1, domain_x, domain_y);
neighbors[2] = read_cell(source_domain, tx,ty, 1,-1, domain_x, domain_y);
neighbors[3] = read_cell(source_domain, tx,ty, 1, 1, domain_x, domain_y);
neighbors[4] = read_cell(source_domain, tx,ty, 0,-1, domain_x, domain_y);
neighbors[5] = read_cell(source_domain, tx,ty, 0, 1, domain_x, domain_y);
neighbors[6] = read_cell(source_domain, tx,ty, 1, 0, domain_x, domain_y);
neighbors[7] = read_cell(source_domain, tx,ty, -1, 0, domain_x, domain_y);
for(int i = 0; i < 8; i++){
if(neighbors[i] == 1){
num_red ++;
}
else if(neighbors[i] == 2){
num_blue ++;
}
}
int tot = num_red + num_blue;
int newVal = myself;
if(myself == 0 && tot == 3){ // If it born
newVal = num_blue > num_red ? 2 : 1;
}
if(myself != 0 && (tot > 3 || tot < 2)){ // If it dies
newVal = 0;
}
else if(myself != 0 && (tot <= 3 || tot >= 2)){ // He continues to live
newVal = myself;
}
write_cell(source_domain, tx, ty, 0, 0, domain_x, domain_y, newVal);
}
|
12,453 | #include "includes.h"
__global__ void kern_PushUpSourceFlows(float* psink, float* sink, float* source, float* div, float* label, float w, float iCC, int size)
{
int idx = CUDASTDOFFSET;
float value = psink[idx] + w*(sink[idx] - source[idx] + div[idx] - label[idx] * iCC);
if( idx < size )
{
psink[idx] = value;
}
} |
12,454 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <memory>
/*
ӦóںҪCPUܽôϣںCPUϵͳڴļCPUϡ˽豸Ƿ֧
˫ȸΪӦóø豸
ʹcudaDevicePropṹеʶ豸Ƿ֧˫Ȳʱmajorminor
major1minor3ô豸֧˫Ȳ
ˣdevice_propertyֵCUDAṩcudaChooseDevice API,ѡضԵ豸
*/
int main(void) {
int device;
cudaDeviceProp device_property;
cudaGetDevice(&device);
printf("ID of device:%d\n", device);
memset(&device_property, 0, sizeof(cudaDeviceProp));
device_property.major = 1;
device_property.minor = 3;
cudaChooseDevice(&device, &device_property);
printf("ID of device which supports double precision is:%d\n", device);
cudaSetDevice(device);
} |
12,455 | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void arradd(const int *md, const int *nd, int *pd, int size){
int myid = blockDim.x*blockIdx.x + threadIdx.x;
if(myid < size)
pd[myid] = md[myid] + nd[myid];
}
int main(){
int num = 200;
size_t size = 200*sizeof(int);
int i = 0;
int *m, *n, *p;
//Allocating memory on CPU
m = (int*)malloc(size);
n = (int*)malloc(size);
p = (int*)malloc(size);
for(i=0; i< num; i++)
m[i]=i, n[i]=i/2, p[i]=0;
// Allocating memery on the Gpu
int *md, *nd, *pd;
cudaMalloc((void **)&md, size);
// (destination, sources, n.o of bytes, direction)
cudaMemcpy(md, m, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&nd, size);
cudaMemcpy(nd, n, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&pd, size);
// no need to allocate size as addition will generate which transfer from gpu to cpu
int blocksize = 1024;
int gridsize = (int)ceil((float)num/blocksize);
arradd<<<gridsize, blocksize>>>(md, nd, pd, num);
cudaMemcpy(p, pd, size, cudaMemcpyDeviceToHost);
cudaFree(md);
cudaFree(nd);
cudaFree(pd);
for(i=0; i < num; i++)
printf("%d + %d = %d\n", m[i], n[i], p[i]);
free(m);
free(n);
free(p);
// Reser the Device and exit
cudaError_t err = cudaDeviceReset();
if( err != cudaSuccess){
printf("Failed to deinitialize the device error %s \n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
12,456 | /***************************************************************************/
/* Name: cmf3DCut_ovl_kernels.cu
Authors:
Martin Rajchl mrajchl@imaging.robarts.ca
Jing Yuan cn.yuanjing@googlemail.com
Description: Fast Max-Flow Implementation for multilayered min-cut
Inputs
(C_t, para, alpha): C_t - the capacities of n flows
para 0,1 - rows, cols
para 2 - n: the label number
para 3 - the total number of iterations
para 4 - the error criterion
para 5 - cc for ALM
para 6 - steps for each iteration
alpha - the vector of penalty parameters with n-1 elements
Outputs
(u, erriter, num): u - the computed labeling result
erriter - the error at each iteration
num - the iteration on stop
For more details, see the report:
Egil Bae, Jing Yuan, Xue-Cheng Tai and Yuri Boykov
"A FAST CONTINUOUS MAX-FLOW APPROACH TO NON-CONVEX
MULTILABELING PROBLEMS"
Submitted to Math. Comp. (AMS Journals)
*/
/***************************************************************************/
#include <stdio.h>
#define SQR(x) (x)*(x)
#define MAX(a,b) ( a > b ? a : b )
#define MIN(a,b) ( a <= b ? a : b )
#define SIGN(x) ( x >= 0.0 ? 1.0 : -1.0 )
#define ABS(x) ( (x) > 0.0 ? x : -(x) )
#define X(iy,ix) (ix)*iNy + iy
#define Xe(iy,ix) (ix)*iNye+ (iy)
#define SQRTgpu sqrt
__global__ void updateP1(float *gk, float *u, float *pt, float *div,
float *bx, float *by, float *bz,
float cc,
int iNx, int iNy, int iNz, int iLab){
int idxVolume = blockIdx.x * blockDim.x + threadIdx.x;
if( ( (idxVolume%iNx) != (iNx-1) ) &&
( (idxVolume/(iNx*iNy)) < (iNz-1) ) &&
( ((idxVolume/iNx)%iNy) != (iNy-1))
){
for(int id = 0; id < iLab-1; id++){
int idx = idxVolume + id*(iNx*iNy*iNz);
gk[idx] = div[idx] -
( pt[idx] - pt[idx+(iNx*iNy*iNz)] + u[idx]/cc );
}
}
}
__global__ void updateP(float *bx, float *by, float *bz,
float steps,
float *gk,
int iNx, int iNy, int iNz, int iLab){
int idxVolume = blockIdx.x * blockDim.x + threadIdx.x;
if( ( (idxVolume%iNx) != (iNx-1) ) &&
( (idxVolume/(iNx*iNy)) < (iNz-1) ) &&
( ((idxVolume/iNx)%iNy) != (iNy-1))
){
for(int id = 0; id < iLab-1; id++){
int idx = idxVolume + id*(iNx*iNy*iNz);
float currVal = gk[idx];
bx[idx+1] = steps * ( gk[idx+1] - currVal ) + bx[idx+1];
by[idx+iNx] = steps * ( gk[idx+iNx] - currVal ) + by[idx+iNx];
bz[idx+(iNx*iNy)] = steps * (gk[idx+(iNx*iNy)] - currVal) + bz[idx+(iNx*iNy)];
}
}
}
__global__ void projStep1alpha(float *bx, float *by, float *bz,
float *lambda,
float *gk,
int iNx, int iNy, int iNz, int iLab){
int idxVolume = blockIdx.x * blockDim.x + threadIdx.x;
if( ( (idxVolume%iNx) != (iNx-1) ) &&
( (idxVolume/(iNx*iNy)) < (iNz-1) ) &&
( ((idxVolume/iNx)%iNy) != (iNy-1))
){
for(int id = 0; id < iLab-1; id++){
int idx = idxVolume + id*(iNx*iNy*iNz);
float fpt = SQRTgpu((SQR(bx[idx]) + SQR(bx[idx+1]) +
SQR(by[idx]) + SQR(by[idx+iNx]) +
SQR(bz[idx]) + SQR(bz[idx+(iNx*iNy)]) ) * 0.5f );
gk[idx] = (fpt > lambda[id]) ? lambda[id] / fpt : 1.0f;
}
}
}
__global__ void projStep2Total(float *bx, float *by, float *bz,
float *gk,
int iNx, int iNy, int iNz, int iLab){
int idxVolume = blockIdx.x * blockDim.x + threadIdx.x;
if( ( (idxVolume%iNx) != (iNx-1) ) &&
( (idxVolume/(iNx*iNy)) < (iNz-1) ) &&
( ((idxVolume/iNx)%iNy) != (iNy-1))
){
for(int id = 0; id < iLab-1; id++){
int idx = idxVolume + id*(iNx*iNy*iNz);
float gkVal = gk[idx];
bx[idx+1] = ( gk[idx+1] + gkVal ) * 0.5f * bx[idx+1];
by[idx+iNx] = ( gk[idx+iNx] + gkVal ) * 0.5f * by[idx+iNx];
bz[idx+(iNx*iNy)] = ( gk[idx+(iNx*iNy)] + gkVal ) * 0.5f * bz[idx+(iNx*iNy)];
}
}
}
__global__ void updatePstMult(float *gk, float *u,
float *bx, float *by, float *bz,
float *div, float *pt,
float *Ct,
float *FPS,
float cc,
int iNx, int iNy, int iNz, int iLab){
int idxVolume = blockIdx.x * blockDim.x + threadIdx.x;
float fpt = 0.0f;
FPS[idxVolume] = 0.0f;
if( ( (idxVolume%iNx) != (iNx-1) ) &&
( (idxVolume/(iNx*iNy)) < (iNz-1) ) &&
( ((idxVolume/iNx)%iNy) != (iNy-1))
){
for(int id = 0; id < iLab-1; id++){
int idx = idxVolume + id*(iNx*iNy*iNz);
div[idx] = bx[idx+1] - bx[idx] + by[idx+iNx] - by[idx]
+ bz[idx+(iNx*iNy)] - bz[idx];
}
for(int id = 0; id < iLab; id++){
int idx = idxVolume + id*(iNx*iNy*iNz);
int idx1 = idx - (iNx*iNy*iNz);
if(id == 0){
fpt = div[idx] + pt[idx+(iNx*iNy*iNz)] - u[idx]/cc + 1.0f/cc;
pt[idx] = MIN(fpt, Ct[idx]);
}
else if (id == iLab-1){
fpt = - div[idx1] + pt[idx1] + u[idx1]/cc;
pt[idx] = MIN(fpt, Ct[idx]);
}
else{
fpt = - div[idx1] + pt[idx1] + u[idx1]/cc;
fpt = fpt + div[idx] + pt[idx+(iNx*iNy*iNz)] - u[idx]/cc;
fpt = fpt/2.0f;
pt[idx] = MIN(fpt, Ct[idx]);
}
}
for(int id = 0; id < iLab-1; id++){
int idx = idxVolume + id*(iNx*iNy*iNz);
float fpsVal = cc*( div[idx] + pt[idx+(iNx*iNy*iNz)] - pt[idx]);
u[idx] -= fpsVal;
FPS[idxVolume] += ABS(fpsVal);
}
}
}
__global__ void errorAccumulation(float* errorBuffer, unsigned int blockSize, unsigned int arraySize){
int idx = (blockSize + blockSize) * (blockIdx.x * blockDim.x + threadIdx.x);
int idxUp = idx + blockSize;
float error1 = (idx < arraySize) ? errorBuffer[idx] : 0.0f;
float error2 = (idxUp < arraySize) ? errorBuffer[idxUp] : 0.0f;
__syncthreads();
if(idx < arraySize) errorBuffer[idx] = error1 + error2;
}
// NOT FUNCTIONAL YET!
__global__ void resolveBoundaryCondtions(float *u, int iNx, int iNy, int iNz, int iLab){
int idxVolume = blockIdx.x * blockDim.x + threadIdx.x;
if(!( (idxVolume%iNx) != (iNx-1) )){
for(int id = 0; id < iLab; id++){
int idx = idxVolume + id*(iNx*iNy*iNz);
u[idx] = u[idx-1];
}
}
if (! ((idxVolume/iNx)%iNy) != (iNy-1)){
for(int id = 0; id < iLab; id++){
int idx = idxVolume + id*(iNx*iNy*iNz);
u[idx] = u[idx-iNx];
}
}
if(! (idxVolume/(iNx*iNy)) < (iNz-1) ){
for(int id = 0; id < iLab; id++){
int idx = idxVolume + id*(iNx*iNy*iNz);
u[idx] = u[idx-(iNx*iNy)];
}
}
}
|
12,457 | #include<bits/stdc++.h>
using namespace std;
const int THREADS_PER_BLOCK = 1024;
const int BLOCKS = 50;
const int MAXN = 16;
const int INF = 1e9;
const int MIN_EDGE_WEIGHT = 1;
const int MAX_EDGE_WEIGHT = 10;
long long factorial[MAXN+1];
__managed__ int block_optimal_values[BLOCKS];
// __managed__ int block_optimal_paths[BLOCKS][MAXN+1];
__managed__ int block_optimal_permutation[BLOCKS];
/////////////////// Host Functions ///////////////////
__host__ int random(int l, int r) {
return l + rand()%(r-l+1);
}
__host__ void precompute_factorial() {
factorial[0] = 1;
for(int i=1;i<=MAXN;i++)
{
factorial[i] = i * factorial[i-1];
}
}
__host__ void assign_edge_weights(int* matrix, int N) {
for (int i = 0 ; i < N ; i++) {
for (int j = i+1 ; j < N ; j++) {
matrix[i*N + j] = random(MIN_EDGE_WEIGHT,MAX_EDGE_WEIGHT);
matrix[j*N + i] = matrix[i*N + j];
}
matrix[i*N + i] = 0;
}
}
__host__ void print_matrix(int* matrix, int N) {
for(int i=0; i<N; i++) {
for(int j=0; j<N; j++) {
cout << matrix[i*N + j] << " ";
}
printf("\n");
}
}
/////////////////// Device Functions ///////////////////
__device__ void swap(int &a, int &b) {
int temp = a;
a = b;
b = temp;
}
__host__ __device__ long long fact(int n) {
long long ans = 1;
for(int i=1;i<=n;i++) {
ans *= i;
}
return ans;
}
__device__ bool nxt_permutation(int *arr, int n) {
bool nxt_permutation_possible = false;
int fi = -1;
for(int i=n-2;i>=0;i--) {
if(arr[i+1] > arr[i]) {
nxt_permutation_possible = true;
fi = i;
break;
}
}
if(!nxt_permutation_possible)return false;
int next_greater_ele = arr[fi+1], next_greater_ele_ind = fi+1;
for(int i=fi+2;i<n;i++) {
if(arr[i] > arr[fi] && arr[i] < next_greater_ele) {
next_greater_ele = arr[i];
next_greater_ele_ind = i;
}
}
swap(arr[fi],arr[next_greater_ele_ind]);
//Reverse
int li = fi+1, ri = n-1;
while(li < ri) {
swap(arr[li],arr[ri]);
li++;
ri--;
}
return true;
}
__device__ int find_path_cost(int* matrix, int* arr, int arrsize, int n) {
int cost = 0;
for(int i=1; i<arrsize; i++) {
int to = arr[i];
int from = arr[i-1];
cost += matrix[to*n + from];
}
return cost;
}
/////////////////// Global Functions ///////////////////
//Input array should be sorted
__host__ __device__ bool nth_permutation(int *arr, int arrsize, long long n) {
if(n>fact(arrsize))return false;
// Assuming arrSize = N+1
bool taken[MAXN];
for(int i=0; i<arrsize; i++) taken[i] = false;
int *ans = new int[arrsize];
for(int i=0; i<arrsize; i++) {
int cn = 1;
long long cval = fact(arrsize-1-i);
while(cval<n) {
cn++;
cval=(long long)cn*cval;
cval=(long long)cval/(cn-1);
}
long long pval = cval*(cn-1)/cn;
n -= pval;
for(int j=0; j<arrsize; j++) {
if(!taken[j]) {
cn--;
if(cn==0) {
ans[i] = arr[j];
taken[j] = true;
break;
}
}
}
}
for(int i=0; i<arrsize; i++) {
arr[i] = ans[i];
}
free(ans);
return true;
}
__global__ void tsp_cuda(int* matrix, int* path, long long* factorials, int N) {
__shared__ int thread_optimal_values[THREADS_PER_BLOCK];
// __shared__ int* thread_optimal_paths[THREADS_PER_BLOCK];
__shared__ int thread_optimal_permutation[THREADS_PER_BLOCK];
int thread = threadIdx.x + blockIdx.x * blockDim.x;
thread_optimal_values[threadIdx.x] = INF;
// thread_optimal_paths[threadIdx.x] = new int[N+1];
long long iter_per_thread = factorials[N-1] / (BLOCKS * THREADS_PER_BLOCK);
int arr[MAXN-1];
for (int i = 1; i < N; i++) arr[i-1] = path[i];
long long start_perm = (thread * iter_per_thread) + 1;
thread_optimal_permutation[threadIdx.x] = start_perm;
nth_permutation(arr, N-1, start_perm);
// Last thread of all handles the permutations not entirely divisible by the total threads in all blocks
if (thread == (BLOCKS * THREADS_PER_BLOCK) - 1) {
iter_per_thread += factorials[N-1] % (BLOCKS * THREADS_PER_BLOCK);
}
long long iter = 0;
do {
int temp_path[MAXN+1];
temp_path[0] = 0;
for (int i = 1; i < N; i++) temp_path[i] = arr[i-1];
temp_path[N] = 0;
int val = find_path_cost(matrix, temp_path, N+1, N);
if(val < thread_optimal_values[threadIdx.x])
{
thread_optimal_values[threadIdx.x] = val;
// for (int i = 0; i < N+1; i++) thread_optimal_paths[threadIdx.x][i] = temp_path[i];
thread_optimal_permutation[threadIdx.x] = start_perm + iter;
}
iter++;
nxt_permutation(arr, N-1);
} while (iter < iter_per_thread);
__syncthreads();
if (threadIdx.x == 0) {
int optimal_cost = INF;
for (int i = 0; i < THREADS_PER_BLOCK; i++) {
if (thread_optimal_values[i] < optimal_cost) {
optimal_cost = thread_optimal_values[i];
block_optimal_values[blockIdx.x] = thread_optimal_values[i];
// for (int j = 0; j < N+1; j++) {
// block_optimal_paths[blockIdx.x][j] = thread_optimal_paths[i][j];
// }
block_optimal_permutation[blockIdx.x] = thread_optimal_permutation[i];
}
}
}
}
//////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
const int N = stoi(argv[1]);
precompute_factorial();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int* matrix = new int[N*N];
int path[N+1];
path[0] = 0;
path[N] = 0;
for (int i = 1; i < N; i++) path[i] = i;
assign_edge_weights(matrix, N);
// print_matrix(matrix, N);
for (int i = 0; i < BLOCKS; i++){
block_optimal_values[i] = INF;
}
int *dev_matrix, *dev_path;
long long *dev_factorial;
int mat_size = N*N*sizeof(int);
int path_size = (N+1)*sizeof(int);
int factorial_size = (MAXN+1)*sizeof(long long);
cudaMalloc((void **)&dev_matrix, mat_size);
cudaMalloc((void **)&dev_path, path_size);
cudaMalloc((void **)&dev_factorial, factorial_size);
cudaEventRecord(start);
// Copy inputs from host to device
cudaMemcpy(dev_matrix, matrix, mat_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_path, path, path_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_factorial, factorial, factorial_size, cudaMemcpyHostToDevice);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128*1024*1024);
// Launch the TSP kernel
tsp_cuda<<<BLOCKS, THREADS_PER_BLOCK>>>(dev_matrix, dev_path, dev_factorial, N);
cudaDeviceSynchronize();
cudaDeviceSynchronize();
int optimal_cost = INF;
long long optimal_permutation;
for (int i = 0; i < BLOCKS; i++) {
if (block_optimal_values[i] < optimal_cost) {
optimal_cost = block_optimal_values[i];
// for (int j = 0; j < N+1; j++) {
// path[j] = block_optimal_paths[i][j];
// }
optimal_permutation = block_optimal_permutation[i];
}
}
int arr[MAXN-1];
for (int i = 1; i < N; i++) arr[i-1] = path[i];
nth_permutation(arr, N-1, optimal_permutation);
for (int i = 1; i < N; i++) path[i] = arr[i-1];
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%f\n", milliseconds*0.001);
// printing the minimum cost path
printf("Minimum Cost Path: ");
for (int i = 0; i < N+1; i++) {
printf("%d ", path[i]);
}
printf("\n");
// printing the minimum cost path
int cost = 0;
for(int i=1; i<N+1; i++) {
cost += matrix[path[i]*N + path[i-1]];
}
printf("Path cost: %d \n", cost);
// printing the run-time
// printf("Time taken: %f s\n", milliseconds*0.001);
cudaFree(dev_matrix);
cudaFree(dev_path);
cudaFree(dev_factorial);
}
|
12,458 | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <stdbool.h>
#include <time.h>
#include <limits.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define ORBITSPERDAY 20.0
#define SIMULATIONTIMEDAYS 1
#define STEPTIME 50.0
#define SECONDSPERDAY 86400.0
#define PI 3.141592653589793
#define RADIUSOFORBIT 7000.0
#define INCLINATION 30.0
double batterycharge = 100.00;
int num_images = 0;
double avg_charge = 0.0;
int num_images_comp = 0;
int num_images_trans = 0;
int num_beacon_trans = 0;
double sun_time = 0.0;
int gps_access = 0;
int batteryfails = 0;
int adcsfails = 0;
double average_comp_ratio = 0.0;
__global__ void masterkernel(int* imgsizecuda,double* pos, int* suncuda, uint16_t* imgcuda, uint8_t* compimgcuda, double* batterychargecuda,\
int* batteryfailscuda, double* avg_chargecuda, int* adcsfailscuda, int* gps_accesscuda,\
int* num_imagescuda, int* num_images_transcuda, int* num_images_compcuda,\
double* average_comp_ratiocuda, int* num_beacon_transcuda,\
double* sun_timecuda, double SECONDSPERORBIT, double inc, double sec)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
switch(bid)
{
case 0:{
switch(tid)
{
case 0:{
if(*batterychargecuda < 30.00)
{
(*batteryfailscuda)++;
return;
}
double theta = sec * (360.0 / SECONDSPERORBIT);
double phi = inc;
double range = sqrt(pow(pos[0], 2) + pow(pos[1], 2) + pow(pos[2], 2));
double xaxis = range * cos(theta * (PI / 180.0));
double yaxis = range * sin(theta * (PI / 180.0)) * cos(phi * (PI / 180.0));
double zaxis = range * sin(theta * (PI / 180.0)) * sin(phi * (PI / 180.0));
pos[0] = xaxis;
pos[1] = yaxis;
pos[2] = zaxis;
(gps_accesscuda)++;
*batterychargecuda -= 12.0;
break;
}
case 1:{
if(*batterychargecuda < 10.0)
{
(*batteryfailscuda)++;
return;
}
if(sec > (SECONDSPERORBIT / 2))
{
return;
}
int ind1 = 0;
int ind2 = 3;
int ind3 = 4;
suncuda[ind1] = suncuda[ind2] = suncuda[ind3] = 1;
*sun_timecuda += 0.1 * ((suncuda[0] ? 1 : 0) + (suncuda[5] ? 1 : 0)) + 0.2 * ((suncuda[1] ? 1 : 0) + (suncuda[2] ? 1 : 0) + (suncuda[3] ? 1 : 0) + (suncuda[4] ? 1 : 0));
*batterychargecuda -= 6.0;
break;
}
case 2:{
if(*batterychargecuda < 40.0)
{
(*batteryfailscuda)++;
return;
}
int i = 0;
for(i = 0;i < 512;i++)
{
int j = 0;
for(j = 0;j < 640;j++)
{
imgcuda[i * 640 + j] = 5524;
}
}
(*num_imagescuda)++;
*batterychargecuda -= 25.0;
break;
}
case 3:{
if(*batterychargecuda < 6.0)
{
(*batteryfailscuda)++;
return;
}
int i = 0, mainindex = 0;
for(i = 0;i < (512 * 640);i += 2)
{
uint8_t first = (imgcuda[i] & 0x0000FFFF);
uint8_t second = (((uint16_t)(imgcuda[i] >> 8)) & 0x0000FFFF);
compimgcuda[mainindex++] = first + second;
}
*batterychargecuda -= 5.0;
(*num_images_compcuda)++;
*imgsizecuda = mainindex;
*average_comp_ratiocuda += (*imgsizecuda) > 0.0 ? ((512.0 * 640.0 * 2) / ((double)(*imgsizecuda))) : 0;
break;
}
}
break;
}
case 1:{
switch(tid)
{
case 0:{
if(*batterychargecuda < 25.0)
{
(*batteryfailscuda)++;
return;
}
int i;
for(i = 0;i < 3;i++)
{
int j;
for(j = 0;j < *imgsizecuda;j++)
{
compimgcuda[j] = 0;
}
}
*batterychargecuda -= 15.0;
(*num_images_compcuda)++;
break;
}
case 1:{
if(*batterychargecuda < 20.0)
{
(*batteryfailscuda)++;
return;
}
int i;
uint8_t becaon[38];
union DBL
{
double d;
char c[sizeof(double)];
}dbl;
for(i = 0;i < 60;i++)
{
int mainindex = 0;
int j;
for(j = 0;j < 3;j++)
{
dbl.d = pos[j];
int k = 0;
for(k = 0;k < sizeof(double);k++)
{
becaon[mainindex++] = dbl.c[k];
}
}
for(j = 0;j < 6;j++)
{
becaon[mainindex++] = (suncuda[j] ? 1 : 0);
}
dbl.d = *batterychargecuda;
for(j = 0;j < sizeof(double);j++)
{
becaon[mainindex++] = dbl.c[j];
}
for(j = 0;j < 38;j++)
{
becaon[j] = 0;
}
}
(*num_beacon_transcuda)++;
*batterychargecuda -= 10.0;
break;
}
case 2:{
if(*batterychargecuda < 6.0)
{
(*batteryfailscuda)++;
return;
}
double range = sqrt(pow(pos[0], 2) + pow(pos[1], 2) + pow(pos[2], 2));
double theta = acos(pos[0] / range) * (180.0 / PI);
double phi = acos(pos[1] / (range * sin(theta * (PI / 180.0)))) * (180.0 / PI);
phi = (phi + (asin(pos[2] / (range * sin(theta * (PI / 180.0)))) * (180.0 / PI))) / 2;
if(((int)(inc)) != ((int)(phi)))
{
(*adcsfailscuda)++;
}
*batterychargecuda -= 5.0;
break;
}
case 3:{
if(sec <= (SECONDSPERORBIT / 2))
{
*batterychargecuda = min(100.0, *batterychargecuda + 60.0);
*avg_chargecuda += *batterychargecuda;
}
break;
}
}
break;
}
}
}
int main()
{
printf("****Orbit Simulator****\n\n");
printf("Geocentric circular orbit\n");
printf("Radius of orbit: %lfkm\n", RADIUSOFORBIT);
printf("Height of orbit: %lfkm\n", RADIUSOFORBIT - 6400.0);
printf("Inclination of orbit: %lfdeg\n", INCLINATION);
printf("Number of orbits per day: %lf\n", ORBITSPERDAY);
printf("Temporal length of each orbit: %lfsec\n", SECONDSPERDAY / ORBITSPERDAY);
printf("Tangential orbital velocity: %lfkm/sec\n\n", (2 * PI * RADIUSOFORBIT) / (SECONDSPERDAY / ORBITSPERDAY));
double Position[3] = {RADIUSOFORBIT, 0.0, 0.0};
uint16_t image[512][640] = {0};
uint8_t compressedimage[512 * 640] = {0};
int SunSensorVal[6] = {0};
int days = 0;
int *suncuda, *imgsizecuda, *num_imagescuda, *num_images_compcuda, *num_images_transcuda, *num_beacon_transcuda, *batteryfailscuda, *adcsfailscuda, *gps_accesscuda;
uint8_t* compimgcuda;
uint16_t* imgcuda;
int compressedsize = 0.0;
double *avg_chargecuda, *sun_timecuda, *batterychargecuda, *poscuda, *average_comp_ratiocuda;
cudaMalloc(&poscuda, sizeof(double) * 3);
cudaMalloc(&suncuda, sizeof(int) * 6);
cudaMalloc(&imgcuda, sizeof(uint16_t) * 512 * 640);
cudaMalloc(&compimgcuda, sizeof(uint8_t) * 512 * 640);
cudaMalloc(&num_images_compcuda, sizeof(int));
cudaMalloc(&num_imagescuda, sizeof(int));
cudaMalloc(&num_images_transcuda, sizeof(int));
cudaMalloc(&gps_accesscuda, sizeof(int));
cudaMalloc(&num_beacon_transcuda, sizeof(int));
cudaMalloc(&batteryfailscuda, sizeof(int));
cudaMalloc(&imgsizecuda, sizeof(int));
cudaMalloc(&adcsfailscuda, sizeof(int));
cudaMalloc(&avg_chargecuda, sizeof(double));
cudaMalloc(&average_comp_ratiocuda, sizeof(double));
cudaMalloc(&batterychargecuda, sizeof(double));
cudaMalloc(&sun_timecuda, sizeof(double));
cudaMemcpy(poscuda, Position, sizeof(double) * 3, cudaMemcpyHostToDevice);
cudaMemcpy(avg_chargecuda, &avg_charge, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(average_comp_ratiocuda, &average_comp_ratio, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(sun_timecuda, &sun_time, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(suncuda, SunSensorVal, sizeof(int) * 6, cudaMemcpyHostToDevice);
cudaMemcpy(batterychargecuda, &batterycharge, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(batteryfailscuda, &batteryfails, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(imgsizecuda, &compressedsize, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(adcsfailscuda, &adcsfails, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(imgcuda, image, sizeof(uint16_t) * 512 * 640, cudaMemcpyHostToDevice);
cudaMemcpy(compimgcuda, compressedimage, sizeof(uint8_t) * 512 * 640, cudaMemcpyHostToDevice);
cudaMemcpy(gps_accesscuda, &gps_access, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(num_imagescuda, &num_images, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(num_beacon_transcuda, &num_beacon_trans, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(num_images_transcuda, &num_images_trans, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(num_images_compcuda, &num_images_comp, sizeof(int), cudaMemcpyHostToDevice);
clock_t begin = clock();
for(days = 1;days <= SIMULATIONTIMEDAYS;days++)
{
printf("%s%d\n", "Day: ", days);
double orbits = 0;
for(orbits = 0.0;orbits < ORBITSPERDAY;orbits++)
{
printf("%s%lf\n", "Orbit: ", orbits);
double seconds = 0.0;
double SECONDSPERORBIT = SECONDSPERDAY / ORBITSPERDAY;
for(seconds = 0.0;seconds <= SECONDSPERORBIT;seconds += STEPTIME)
{
masterkernel<<<2, 4>>>(imgsizecuda, poscuda, suncuda, imgcuda,\
compimgcuda, batterychargecuda, batteryfailscuda, avg_chargecuda,\
adcsfailscuda, gps_accesscuda, num_imagescuda, num_images_transcuda,\
num_images_compcuda, average_comp_ratiocuda, num_beacon_transcuda,\
sun_timecuda, SECONDSPERORBIT, INCLINATION, seconds);
}
}
}
clock_t end = clock();
cudaMemcpy(Position, poscuda, sizeof(double) * 3, cudaMemcpyDeviceToHost);
cudaMemcpy(&avg_charge, avg_chargecuda, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&average_comp_ratio, average_comp_ratiocuda, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&sun_time, sun_timecuda, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(SunSensorVal, suncuda, sizeof(int) * 6, cudaMemcpyDeviceToHost);
cudaMemcpy(&batterycharge, batterychargecuda, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&batteryfails, batteryfailscuda, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&adcsfails, adcsfailscuda, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&compressedsize, imgsizecuda, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(image, imgcuda, sizeof(uint16_t) * 512 * 640, cudaMemcpyDeviceToHost);
cudaMemcpy(compressedimage, compimgcuda, sizeof(uint8_t) * 512 * 640, cudaMemcpyDeviceToHost);
cudaMemcpy(&gps_access, gps_accesscuda, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&num_images, num_imagescuda, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&num_beacon_trans, num_beacon_transcuda, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&num_images_trans, num_images_transcuda, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&num_images_comp, num_images_compcuda, sizeof(int), cudaMemcpyDeviceToHost);
printf("\nRelative sun time: %lf\n", sun_time);
printf("Number of GPS Access: %d\n", gps_access);
printf("Number of images clicked: %d\n", num_images);
printf("Number of images compressed: %d\n", num_images_comp);
printf("Number of images transmitted: %d\n", num_images_trans);
printf("Average compression ratio: %lf\n", average_comp_ratio / ((double)(num_images_comp)));
printf("Number of beacon transmissions: %d\n", num_beacon_trans);
printf("Number of ADCS failures: %d\n", adcsfails);
printf("Average battery charge: %lf\n", avg_charge / ((double)(SIMULATIONTIMEDAYS * SECONDSPERDAY)));
printf("Number of battery failures: %d\n", batteryfails);
printf("Run time: %lf\n", ((double)(end - begin)) / CLOCKS_PER_SEC);
}
|
12,459 | /***********************************************************
#
# \file parallel_bit_count.cu
# \author Sudnya Diamos <mailsudnya@gmail.co>
# \date Sunday March 5, 2017
# \brief A program to parallely count the number of 1s in a super big bit array
***********************************************************/
#include <string>
#include <iostream>
#include <vector>
#include <sstream>
#include <stdexcept>
#include <cuda.h>
constexpr int block_size = 1024;
__global__ void parallelBitCount(int* ctaScratch, const int *d_in, const int N)
{
int maxThreads = blockDim.x * gridDim.x;
int myId = threadIdx.x + blockDim.x * blockIdx.x;
//int myOnes = __popc(d_in[myId]);
int myOnes = 0;
for (int idx = myId; idx < N; idx+=maxThreads) {
int temp = d_in[idx];
int totalWord = 8*sizeof(int);
for (int i = 0; i < totalWord; ++i)
{
myOnes += (temp & 1);
temp = temp >> 1;
}
}
//std::printf("Thread id %d: my ones %d\n", myId, myOnes);
// reduce all threads
__shared__ int bitCounter[block_size];
bitCounter[threadIdx.x] = myOnes;
__syncthreads();
for (int groupStep = 2; groupStep < block_size; groupStep*=2)
{
int myTemp = bitCounter[threadIdx.x];
int neighborTemp = 0;
if(threadIdx.x % groupStep == 0)
{
neighborTemp = bitCounter[threadIdx.x + groupStep/2];
}
__syncthreads();
myTemp += neighborTemp;
if(threadIdx.x % groupStep == 0)
{
bitCounter[threadIdx.x] = myTemp;
}
__syncthreads();
}
int finalCtaCount = bitCounter[0];
if(threadIdx.x == 0)
{
ctaScratch[blockIdx.x] = finalCtaCount;
}
}
__global__ void serialReduce(int* scratch, int len)
{
int answer = 0;
for (int i = 0; i < len; ++i )
{
answer += scratch[i];
}
scratch[0] = answer;
}
static void check(cudaError_t status)
{
if(status != cudaSuccess)
{
throw std::runtime_error(cudaGetErrorString(status));
}
}
int main(void)
{
// host and device pointers for our bit array
int *aHost, *aDevice;
// size of bit array
const int N = 100;
size_t arraySize = N * sizeof(int);
//alloc on host
aHost = (int *)malloc(arraySize);
//alloc on device - always void**
check(cudaMalloc((void **) &aDevice, arraySize));
// Initialize host array
for (int i=0; i<N; i++)
{
aHost[i] = (int)i%2; //TODO: make this random?!
}
// copy to device
check(cudaMemcpy(aDevice, aHost, arraySize, cudaMemcpyHostToDevice));
// count 1 bits on device
int n_blocks = (N + block_size - 1)/block_size;
int *ctaScratch;
check(cudaMalloc((void **) &ctaScratch, n_blocks*sizeof(int)));
/*for (int i=0; i<n_blocks; i++)
{
ctaScratch[i] = 0;
}*/
parallelBitCount <<< n_blocks, block_size >>> (ctaScratch, aDevice, N);
check(cudaDeviceSynchronize());
serialReduce <<< 1, 1>>> (ctaScratch, n_blocks);
check(cudaDeviceSynchronize());
// bring results back to host
int finalAnswer;
check(cudaMemcpy(&finalAnswer, ctaScratch, sizeof(int), cudaMemcpyDeviceToHost));
// Print results
std::cout << "Final answer " << finalAnswer << "\n";
// Cleanup
free(aHost);
check(cudaFree(aDevice));
}
|
12,460 | #include <cstdio>
int main(void) {
int n;
cudaError_t err = cudaGetDeviceCount(&n);
if (err != cudaSuccess) {
return -1;
}
for (int i = 0; i < n; ++i) {
printf("%d ",i);
}
printf("\n");
} |
12,461 | #include "includes.h"
__device__ inline static float euclid_distance(int numCoords, int numObjs, int numClusters, int tid, int clusterId, float *objects, float *clusters )
{
float ans=0.0;
for (int i = 0; i < numCoords; i++) {
ans += (objects[3*tid+i] - clusters[i + clusterId*3]) *
(objects[3*tid+i] - clusters[i + clusterId*3]);
}
return(ans);
}
__global__ static void find_nearest_cluster(int numCoords, int numObjs, int numClusters, float *objects, float *deviceClusters, int *membership, int *changedmembership )
{
extern __shared__ float sharedMem[];
float *sh_Clusters = sharedMem;
float *sh_Objects = (float*)&sh_Clusters[numClusters * 3];
for(int i = 0; i < numCoords * numClusters; i++) {
sh_Clusters[i] = deviceClusters[i];
}
__syncthreads();
unsigned int tid = threadIdx.x;
int objectId = blockDim.x * blockIdx.x + threadIdx.x;
while (objectId < numObjs) {
int index, i;
float dist, min_dist;
for(int i = 0; i < numCoords; i++) {
sh_Objects[3*tid+i] = objects[3*objectId+i];
}
index = 0;
min_dist = euclid_distance(numCoords, numObjs, numClusters, tid,
0, sh_Objects, sh_Clusters);
for (i=1; i<numClusters; i++) {
dist = euclid_distance(numCoords, numObjs, numClusters, tid,
i, sh_Objects, sh_Clusters);
if (dist < min_dist) {
min_dist = dist;
index = i;
}
}
if (membership[objectId] != index)
{
changedmembership[objectId] = 1;
membership[objectId] = index;
}
objectId += blockDim.x * gridDim.x;
}
} |
12,462 |
#include <iostream>
#include <sstream>
#include <fstream>
#include <string>
using namespace std;
inline void __cudaSafeCall( cudaError err,
const char *file, const int line )
{
#ifdef CUDA_CHECK_ERROR
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
if ( cudaSuccess != err )
{
fprintf( stderr,
"cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
#pragma warning( pop )
#endif
// CUDA_CHECK_ERROR
return;
}//end function
inline void __cudaCheckError( const char *file, const int line ) {
#ifdef CUDA_CHECK_ERROR
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err )
{
fprintf( stderr,
"cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
err = cudaThreadSynchronize();
if( cudaSuccess != err )
{
if ( cudaSuccess != err )
fprintf( stderr,
"cudaCheckError() failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
// More careful checking. However, this will affect performance. // Comment if not needed
#pragma warning( pop )
#endif // CUDA_CHECK_ERROR
return;
}
int * makeRandArray( const int size, const int seed ) {
srand( seed );
int * array = new int[ size ];
for( int i = 0; i < size; i ++ ) {
array[i] = std::rand() % 1000000;
}
return array; }
__global__ void matavgKernel( ) {
}
int main( int argc, char* argv[] ) {
int * array; // the poitner to the array of rands
int size, seed; // values for the size of the array
bool printSorted = false;
// and the seed for generating
// random numbers
// check the command line args
if( argc < 3 ){
std::cerr << "usage: "
<< argv[0]
<< " [amount of random nums to generate] [seed value for rand]" << " [1 to print sorted array, 0 otherwise]"
<< std::endl;
exit( -1 ); }
// convert cstrings to ints
{
std::stringstream ss1( argv[1] );
ss1 >> size;
} {
std::stringstream ss1( argv[2] );
ss1 >> seed; }
/*
{
int sortPrint;
std::stringstream ss1( argv[2] );
ss1 >> sortPrint;
if( sortPrint == 1 )
printSorted = true;
}
*/
// get the random numbers
array = makeRandArray( size, seed );
cudaEvent_t startTotal, stopTotal; float timeTotal; cudaEventCreate(&startTotal); cudaEventCreate(&stopTotal); cudaEventRecord( startTotal, 0 );
/////////////////////////////////////////////////////////////////////
/////////////////////// YOUR CODE HERE ///////////////////////
/////////////////////////////////////////////////////////////////////
/***********************************
*
Stop and destroy the cuda timer
**********************************/
cudaEventRecord( stopTotal, 0 );
cudaEventSynchronize( stopTotal );
cudaEventElapsedTime( &timeTotal, startTotal, stopTotal );
cudaEventDestroy( startTotal );
cudaEventDestroy( stopTotal );
/***********************************
end of cuda timer destruction
**********************************/
std::cerr << "Total time in seconds: "
<< timeTotal / 1000.0 << std::endl;
if( printSorted ){
///////////////////////////////////////////////
/// Your code to print the sorted array here //
///////////////////////////////////////////////
} }
|
12,463 | #include<stdio.h>
#include<math.h>
#include<stdlib.h>
#define CHECK(res) if (res!=cudaSuccess){exit(-1);} //check if success
const int height=10; //the kinds of attributes(>=)
const int width=100; //the kinds of datas(>=)
const int kinds=30; //the kinds of types(>=)
const int bit_size=width*height*sizeof(int ); //the size of bitat[][]
const int index_size=width*sizeof(int );
unsigned int bit[height][width];
unsigned int bitat[width][height]; //add 0-Fill data
int key[height][kinds];
int offset[height][kinds];
int index_bit[width];
int index_long[1];
int attr_size; //the kinds of attributes(=)
int attr_total; //the kinds of datas/31 (=)
unsigned int bin_31=0x80000000;
FILE *fp;
char str[33];
cudaError_t res;
void my_itoa(int num,char *strr,int bin2) //change num(decimal) into strr(binary)
{
int i;
int b=0x00000001;
for(i=31;i>=0;i--)
{
if(num&b)
strr[i]='1';
else
strr[i]='0';
num=num>>1;
}
strr[32]='\0';
}
void get_attr_size() //get attr_size
{
fp=fopen("outputm.txt","r");
char c;
attr_size=0;
while((c=fgetc(fp))!=EOF)
{
if(c=='[')
attr_size++;
}
attr_size=attr_size/2;
fclose(fp);
}
void get_bitmap() //get bitmap,key and offset from file
{
fp=fopen("outputm.txt","r");
int i,j,k,offs;
char init;
i=0;j=0;k=0;
fscanf(fp,"%d",&bit[i][j]);j++;
while((init=fgetc(fp))!=EOF)
{
if(init=='[')
{
fscanf(fp,"%d",&offs);
while(fgetc(fp)!=']')
{
key[i][k]=offs;k++;
fscanf(fp,"%d",&offs);
}
key[i][k]=offs;
while(fgetc(fp)!='[');k=0;
fscanf(fp,"%d",&offs);
while(fgetc(fp)!=']')
{
offset[i][k]=offs;k++;
fscanf(fp,"%d",&offs);
}
offset[i][k]=offs;
i++;j=0;k=0;
}
else{
fscanf(fp,"%d",&bit[i][j]);
j++;
}
}
}
void get_total()
{
int i,tsize,tlie;
attr_total=0;
tsize=key[0][0];
tlie=offset[0][0];
for(i=0;i<tsize;i++)
{
attr_total++;
if(bit[0][tlie+i]<=bin_31)
attr_total=attr_total+bit[0][tlie+i]-1;
}
printf("attr_total:%d\n",attr_total);
}
void get_attr() //get attr from screen,store them in the bitat[][]
{
int i,j,k,attr;
int size[height];
int lie[height];
int local;
index_long[0]=0;
for(i=0;i<attr_total;i++)
{
for(j=0;j<attr_size;j++)
bitat[i][j]=0xffffffff;
}
for(i=0;i<attr_size;i++)
{
printf("Please input the attribute you choose(if not,input -1):\n");
scanf("%d",&attr);
if(attr==-1)
{
size[i]=0;
lie[i]=0;
}
else{
size[i]=key[i][attr]; //find key and offset
lie[i]=offset[i][attr];
}
}
for(i=0;i<attr_size;i++) //store bitmap in the bitat[][]
{
local=-1;
for(j=0;j<size[i];j++)
{
local+=1;
if(bit[i][lie[i]+j]>bin_31) //not 0-Fill
{
bitat[local][i]=bit[i][lie[i]+j];
}
else //0-Fill
{
for(k=0;k<bit[i][lie[i]+j];k++)
{
bitat[local+k][i]=0;
}
local=local+bit[i][lie[i]+j]-1;
}
}
}
}
__device__ void d_itoa(int num,char *strr) //device change num(decimal) into strr(binary)
{
int i;
int b=0x00000001;
for(i=31;i>=0;i--)
{
if(num&b)
strr[i]='1';
else
strr[i]='0';
num=num>>1;
}
strr[32]='\0';
}
__global__ void kernel_index_bitmap(unsigned int **dbit,int *dindex_bit,int *dindex_long,int dtotal,int dsize,int dheight,int dmul)
{
int i,j,k,addr;
char strr[33];
unsigned int num;
int idx=threadIdx.x+blockIdx.x*blockDim.x;
int idy;
int *add=(int *)((int *)dbit); //the address of the bitat[][]
for(i=0;i<dmul;i++)
{
idy=dmul*idx+i;
num=0xffffffff; //num=32 bits of '1'
if(idy<dtotal)
{
for(j=0;j<dsize;j++)
{
num&=add[idy*dheight+j];
printf("(%d,%d):%d\n",idy,idy*dheight+j,add[idy*dheight+j]);
}
printf("num:(%d,%d):%d\n",idx,idy*dheight+j,num);
d_itoa(num,strr);
printf("%d:%s\n",idy,strr);
for(j=1;j<32;j++)
{
if(strr[j]=='1')
{
addr=idy*31+j;
printf("attr:%d\n",addr);
k=atomicAdd(&(dindex_long[0]),1);
dindex_bit[k]=addr;
printf("%d:%d\n",k,dindex_bit[k]);
}
}
}
}
}
void cuda_malloc_cpy()
{
int i,j,mul;
int thread_size=3;
int block_size=1;
mul=(attr_total+(thread_size*block_size-1))/(thread_size*block_size);//distribution of number of tasks
printf("mul:%d\n",mul);
int *dindex_bit;
int *dindex_long;
unsigned int **dbit;
int a[width][height];//test
for(i=0;i<width;i++)
{
for(j=0;j<height;j++)
{
a[i][j]=0;
}
}
res=cudaMalloc((void **)&dindex_bit,index_size);CHECK(res);printf("\n[0] \n");
res=cudaMalloc((void **)&dindex_long,sizeof(int ));CHECK(res);printf("[1] \n");
res=cudaMalloc((void **)&dbit,bit_size);CHECK(res);printf("[2] \n");
res=cudaMemcpy(dbit,bitat,bit_size,cudaMemcpyHostToDevice);CHECK(res);printf("[3] \n");
res=cudaMemcpy(dindex_long,index_long,sizeof(int ),cudaMemcpyHostToDevice);CHECK(res);printf("[4] \n");
dim3 threads(thread_size,1);
dim3 blocks(block_size,1);
kernel_index_bitmap<<<blocks,threads>>>(dbit,dindex_bit,dindex_long,attr_total,attr_size,height,mul);
printf("---------------T_T-------------\n");
res=cudaMemcpy(index_bit,dindex_bit,index_size,cudaMemcpyDeviceToHost);CHECK(res);printf("[5] \n");
res=cudaMemcpy(index_long,dindex_long,sizeof(int ),cudaMemcpyDeviceToHost);CHECK(res);printf("[6] \n");
res=cudaMemcpy(a,dbit,bit_size,cudaMemcpyDeviceToHost);CHECK(res);printf("[7] \n");
printf("long:%d\n",index_long[0]);
for(i=0;i<index_long[0];i++)
printf("%d,",index_bit[i]);
printf("\n");
for(i=0;i<attr_total;i++)
{
for(j=0;j<attr_size;j++)
{
printf("%d,",a[i][j]);
}
printf("\n");
}
cudaFree(dbit);
}
int main()
{
get_attr_size();
get_bitmap();
get_total();
get_attr();
cuda_malloc_cpy();
return 0;
}
|
12,464 | #include "includes.h"
__global__ void Replace(float *WHAT , float *WHERE)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
WHERE[idx] = WHAT[idx];
} |
12,465 | #include <cstdio>
#define N 16
#define THREADS_PER_BLOCK 8
using namespace std;
__global__ void add(int *dA) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
dA[idx] = threadIdx.x + blockIdx.x;
}
int main() {
int *dA;
int size = N * sizeof(int);
cudaMalloc((void **)&dA, size);
int *hA;
hA = new int[size];
add<<<2, THREADS_PER_BLOCK>>>(dA);
cudaDeviceSynchronize();
cudaMemcpy(hA, dA, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
if (i == N - 1)
printf("%d", hA[i]);
else
printf("%d ", hA[i]);
free(hA);
cudaFree(dA);
return 0;
}
|
12,466 | #include "includes.h"
cudaEvent_t start, stop;
__global__ void cudaComputeXGradient(int* x_gradient, unsigned char* channel, int image_width, int image_height) {
int x_kernel[3][3] = { { 1, 0, -1 }, { 2, 0, -2 }, { 1, 0, -1 } };
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index == 0) {
return;
}
x_gradient[index] =
x_kernel[0][0] * channel[index - 1] +
x_kernel[1][0] * channel[index] +
x_kernel[2][0] * channel[index + 1] +
x_kernel[0][1] * channel[index + image_width - 1] +
x_kernel[1][1] * channel[index + image_width] +
x_kernel[2][1] * channel[index + image_width + 1] +
x_kernel[0][2] * channel[index + 2 * image_width - 1] +
x_kernel[1][2] * channel[index + 2 * image_width] +
x_kernel[2][2] * channel[index + 2 * image_width + 1];
return;
} |
12,467 | /**
* Copyright 2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**Modified, updated and re-oragnized some part of codes
* by Dr. Yingfeng Yu, CUEB, School of Finance,
* last modified Jul,4th,2016
* Only for education purpose
*/
#include "cuda.h"
#include "stdio.h"
#include "stdlib.h"
const int OPT_N = 6400000;
const int NUM_ITERATIONS = 10;
const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
#define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
__device__ inline float cndGPUv1(float d)
{//written by Dr.Yingfeng Yu
float cnd;
cnd=normcdff(d);
return cnd;
}
__device__ inline float cndGPUv2(float d)
{//Nvidia's code
const float A1 = 0.31938153f;
const float A2 = -0.356563782f;
const float A3 = 1.781477937f;
const float A4 = -1.821255978f;
const float A5 = 1.330274429f;
const float RSQRT2PI = 0.39894228040143267793994605993438f;
float K, cnd;
K = __fdividef(1.0f, (1.0f + 0.2316419f * fabsf(d)));
cnd = RSQRT2PI * __expf(- 0.5f * d * d) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))));
if (d > 0)
cnd = 1.0f - cnd;
return cnd;
}
__device__ inline float pndGPU(float d)
{//written by Dr.Yingfeng Yu
const float PI = 3.141592653589793238462643f;
float pnd;
pnd= (rsqrtf(2.0f*PI))*__expf(-0.5f*d*d);
return pnd;
}
__device__ inline void BlackScholesBodyGPU(
float &CallResult,
float &PutResult,
float &DeltaCall, //new added by yyf
float &Gamma,//new added by yyf
float S, //Stock price
float X, //Option strike
float T, //Option years
float R, //Riskless rate
float V //Volatility rate
)
{
float sqrtT, expRT;
float d1, d2,CNDD1, CNDD2;
sqrtT = __fdividef(1.0F, rsqrtf(T));
d1 = __fdividef(__logf(S / X) + (R + 0.5f * V * V) * T, V * sqrtT);
d2 = d1 - V * sqrtT;
CNDD1 = cndGPUv1(d1);// it is better to use my version
CNDD2 = cndGPUv1(d2);
//Calculate Call and Put simultaneously
expRT = __expf(- R * T);
CallResult = S * CNDD1 - X * expRT * CNDD2;
PutResult = X * expRT * (1.0f - CNDD2) - S * (1.0f - CNDD1);
DeltaCall = pndGPU(d1);
Gamma = __fdividef(pndGPU(d1),V*S*sqrtT);
}
////////////////////////////////////////////////////////////////////////////////
//Process an array of optN options on GPU
////////////////////////////////////////////////////////////////////////////////
__launch_bounds__(128)
__global__ void BlackScholesGPU(
float * d_CallResult,
float * d_PutResult,
float * d_DeltaCall,
float * d_Gamma,
float * d_StockPrice,
float * d_OptionStrike,
float * d_OptionYears,
float Riskfree,
float Volatility,
int optN
)
{
const int opt = blockDim.x * blockIdx.x + threadIdx.x;
float callResult, putResult, deltaCall, gamma;//add yyf
BlackScholesBodyGPU(
callResult,
putResult,
deltaCall,
gamma,
d_StockPrice[opt],
d_OptionStrike[opt],
d_OptionYears[opt],
Riskfree,
Volatility);
d_CallResult[opt] = callResult;
d_PutResult[opt] = putResult;
d_DeltaCall[opt] = deltaCall;
d_Gamma[opt] = gamma;
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("[%s] - Starting...\n", argv[0]);
float
*h_CallResult,
*h_PutResult,
*h_DeltaCall,
*h_Gamma,
//CPU instance of input data
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
*d_DeltaCall,
*d_Gamma,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
int i;
printf("Initializing data...\n");
h_CallResult = (float *)malloc(OPT_SZ);
h_PutResult = (float *)malloc(OPT_SZ);
h_DeltaCall = (float *)malloc(OPT_SZ);
h_Gamma = (float *)malloc(OPT_SZ);
h_StockPrice = (float *)malloc(OPT_SZ);
h_OptionStrike = (float *)malloc(OPT_SZ);
h_OptionYears = (float *)malloc(OPT_SZ);
printf("...allocating GPU memory for options.\n");
cudaMalloc( (void **) &d_CallResult, OPT_SZ);
cudaMalloc( (void **) &d_PutResult, OPT_SZ);
cudaMalloc( (void **) &d_DeltaCall, OPT_SZ);
cudaMalloc( (void **) &d_Gamma, OPT_SZ);
cudaMalloc( (void **) &d_StockPrice, OPT_SZ);
cudaMalloc( (void **) &d_OptionStrike, OPT_SZ);
cudaMalloc( (void **) &d_OptionYears, OPT_SZ);
printf("...generating input data in CPU mem.\n");
srand(5347);
//Generate options set
for (i = 0; i < OPT_N; i++)
{
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
printf("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
cudaMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, cudaMemcpyHostToDevice);
cudaMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, cudaMemcpyHostToDevice);
cudaMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, cudaMemcpyHostToDevice);
printf("Data init done.\n\n");
printf("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS);
cudaDeviceSynchronize();
for (i = 0; i < NUM_ITERATIONS; i++)
{
printf("Now executing Black-Scholes GPU kernel (%i -th)...\n", i);
BlackScholesGPU<<<DIV_UP(OPT_N, 128), 128/*480, 128*/>>>(
d_CallResult,
d_PutResult,
d_DeltaCall, //new added by yyf
d_Gamma,//new added by yyf
d_StockPrice,
d_OptionStrike,
d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
}
cudaDeviceSynchronize();
cudaMemcpy(h_CallResult, d_CallResult, OPT_SZ, cudaMemcpyDeviceToHost);
cudaMemcpy(h_PutResult, d_PutResult, OPT_SZ, cudaMemcpyDeviceToHost);
cudaMemcpy(h_DeltaCall, d_DeltaCall, OPT_SZ, cudaMemcpyDeviceToHost);
cudaMemcpy(h_Gamma, d_Gamma, OPT_SZ, cudaMemcpyDeviceToHost);
int NN=20;
printf("===============================================Basic Info===============================================\n");
printf("\t\t\tTotal Num.of Options=%d,\n\t\t\tRisk-free rate Rf=%f,\n\t\t\tVolatility(Sigma)=%f.\n",OPT_N,RISKFREE,VOLATILITY);
printf("\t\t\tCopyright belongs to Nvidia, modified by Dr. Yingfeng Yu. \n\t\t\tFor education purpose only.\n" );
printf("=====================CUDA Results========================================||=====BSM's other Info========\n");
printf("The index |\tCall Price |\t Put Price |\tCall Delta|\tGamma\t || (S,K,T)\n");
for (i = 0; i < NN; i++)
{
printf("[%d]\t\t %f\t %f\t %f\t %f||(%f,%f,%f)\n",i+1,
h_CallResult[i],h_PutResult[i],
h_DeltaCall[i],h_Gamma[i],
h_StockPrice[i],h_OptionStrike[i],h_OptionYears[i]);
}
printf(".\n");
printf(".\n");
printf(".\n");
for (i = OPT_N-NN; i < OPT_N; i++)
{
printf("[%d]\t %f\t %f\t %f\t %f||(%f,%f,%f)\n",i+1,
h_CallResult[i],h_PutResult[i],
h_DeltaCall[i],h_Gamma[i],
h_StockPrice[i],h_OptionStrike[i],h_OptionYears[i]);
}
printf("...releasing GPU memory.\n");
cudaFree(d_OptionYears);
cudaFree(d_OptionStrike);
cudaFree(d_StockPrice);
cudaFree(d_PutResult);
cudaFree(d_CallResult);
cudaFree(d_DeltaCall);
cudaFree(d_Gamma);
printf("...releasing CPU memory.\n");
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResult);
free(h_CallResult);
free(h_DeltaCall);
free(h_Gamma);
printf("All testing, .... done.\n");
cudaDeviceReset();
}
|
12,468 | #include "includes.h"
__global__ void leapstep(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double dt) {
const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x;
if (serial < n){
x[serial] += dt * vx[serial];
y[serial] += dt * vy[serial];
z[serial] += dt * vz[serial];
}
} |
12,469 | #include "includes.h"
__global__ void transposeCoalesced(float *odata, float *idata, int width, int height)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
} |
12,470 | #include <bits/stdc++.h>
#include <cuda.h>
using namespace std;
#define N ((int)1e7)
#define CEIL(a, b) ((a-1)/b +1)
__global__ void reduce(int *d_a, int *sum) {
__shared__ int data[1024];
int i = blockIdx.x*blockDim.x + threadIdx.x;
// Copy all elements in block to shared memory and wait
data[threadIdx.x] = d_a[i];
__syncthreads();
for(int step=1; step<1024; step*=2) {
int threadID = 2*step*threadIdx.x;
if(threadID + step < 1024)
data[threadID] += data[threadID + step];
__syncthreads();
}
if(threadIdx.x == 0)
atomicAdd(sum, data[0]);
}
int main() {
int *h_a, *h_sum;
int *d_a, *d_sum;
clock_t tim;
h_a = new int[N];
h_sum = new int;
*h_sum = 0;
printf("\nValue of N : %d\n", N);
srand(time(0));
for(int i=0; i<N; i++)
h_a[i] = rand()%2;
cudaMalloc((void**)&d_a, N*sizeof(int));
cudaMalloc((void**)&d_sum, sizeof(int));
cudaMemcpy(d_a, h_a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_sum, h_sum, sizeof(int), cudaMemcpyHostToDevice);
// Device timer
cudaEvent_t start, stop;
float tims;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Kernel call
reduce<<<CEIL(N, 1024), 1024>>>(d_a, d_sum);
cudaThreadSynchronize();
// End timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tims, start, stop);
printf("\nDevice Time : %0.2lf ms\n", tims);
cudaMemcpy(h_sum, d_sum, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_sum);
int sum = 0;
tim = clock();
for(int i=0; i<N; i++)
sum += h_a[i];
tim = clock() - tim;
printf("Host Time : %0.2lf ms\n", tim*1.0/CLOCKS_PER_SEC*1000.0);
printf("\nDevice sum : %d\nHost sum : %d\n\n", *h_sum, sum);
delete[] h_a;
delete h_sum;
} |
12,471 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__
void sumTriangle(float *M, float *V, int N)
{
int j = threadIdx.x;
int i;
float sum = 0;
for (i = 0; i <= j; ++i)
{
sum += M[i * N + j];
}
V[j] = sum;
__syncthreads();
if (j == N - 1)
{
sum = 0.0;
for (i = 0; i < N; ++i)
{
sum += V[i];
}
V[N] = sum;
}
}
__global__
void sumTriangle2(float *M, float *V, int N)
{
int j = threadIdx.x;
float sum = 0.0;
int i;
for (i = 0; i <= j; ++i)
{
if (i % 2 == 0)
{
sum += M[i * N + j];
}
}
V[j] = sum;
__syncthreads();
if (j == N - 1)
{
sum = 0;
for (i = 0; i < N; ++i)
{
sum += V[i];
}
V[N] = sum;
}
}
__global__
void sumTriangle3(float *M, float *V, int N)
{
int j = threadIdx.x;
int i;
float sum = 0;
for (i = 0; i <= j; ++i)
{
sum += M[i * N + j];
}
V[j] = sum;
__syncthreads();
int s;
for (s = 1; s < N; s *= 2)
{
if (j % (2 * s) == 0 && j + s < N)
{
V[j] += V[j + s];
}
__syncthreads();
}
V[N] = V[0];
}
int main()
{
int N = 11;
int size_M = N * N;
int size_V = N + 1;
float *M, *V;
M = (float *) malloc(sizeof(float) * size_M);
V = (float *) malloc(sizeof(float) * size_V);
int i, j;
srand(time(0));
V[0] = 0;
for (i = 0; i < N; ++i)
{
V[i + 1] = 0;
for (j = 0; j < N; ++j)
{
M[i * N + j] = rand() % 10000;
}
}
cudaError_t err = cudaSuccess;
float *d_M = NULL, *d_V = NULL;
err = cudaMalloc((void **) &d_M, size_M * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Error allocating device vector d_M (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **) &d_V, size_V * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Error allocating device vector d_V (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_M, M, size_M * sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Error copying vector M from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_V, V, size_V * sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Error copying vector V from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
dim3 grid(1, 1, 1);
dim3 block(N, 1, 1);
printf("Lauching cuda kernel sumTriangle with blocks: (%d, %d, %d) and threads: (%d, %d, %d).\n", grid.x, grid.y, grid.z, block.x, block.y, block.z);
sumTriangle3<<<grid, block>>>(d_M, d_V, N);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Error launching kernel sumTriangle (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(V, d_V, sizeof(float) * size_V, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Error copying vector V from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float sum_all = 0;
float sum;
for (j = 0; j < N; ++j)
{
sum = 0;
for (i = 0; i <= j; i += 1)
{
sum += M[i * N + j];
}
// if (fabs(sum - V[j]) > 1e-5)
// {
// fprintf(stderr, "Error in kernel's computation - kernel gives incorrect results for triangle sum.\n");
// exit(EXIT_FAILURE);
// }
sum_all += sum;
}
if (fabs(sum_all - V[N]) > 1e-5)
{
fprintf(stderr, "Error in kernel's computation - kernel gives incorrect result for overall sum.\n");
exit(EXIT_FAILURE);
}
printf("TEST PASSED.\n");
return 0;
} |
12,472 | #include "includes.h"
__global__ void pw_copy_cr_cu_z(const double *zin, double *dout, const int n) {
const int igpt = (gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x + threadIdx.x;
if (igpt < n) {
dout[igpt] = zin[2 * igpt];
}
} |
12,473 | #include <stdio.h>
#include <cuda.h>
#include <sys/time.h>
#define N 1024
struct nodeAOS {
int a;
double b;
char c;
} *allnodesAOS;
struct nodeSOA {
int *a;
double *b;
char *c;
} allnodesSOA;
__global__ void dkernelaos(struct nodeAOS *allnodesAOS) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
allnodesAOS[id].a = id;
allnodesAOS[id].b = 0.0;
allnodesAOS[id].c = 'c';
}
__global__ void dkernelsoa(int *a, double *b, char *c) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
a[id] = id;
b[id] = 0.0;
c[id] = 'd';
}
double rtclock() {
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d", stat);
return(Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
void printtime(const char *str, double starttime, double endtime) {
printf("%s%3f seconds\n", str, endtime - starttime);
}
#define BLOCKSIZE 1024
int main(int nn, char *str[]) {
cudaMalloc(&allnodesAOS, N * sizeof(struct nodeAOS));
cudaMalloc(&allnodesSOA.a, N * sizeof(int));
cudaMalloc(&allnodesSOA.b, N * sizeof(double));
cudaMalloc(&allnodesSOA.c, N * sizeof(char));
unsigned nblocks = ceil((float)N / BLOCKSIZE);
double starttime = rtclock();
dkernelaos<<<nblocks, BLOCKSIZE>>>(allnodesAOS);
cudaThreadSynchronize();
double endtime = rtclock();
printtime("AoS time: ", starttime, endtime);
starttime = rtclock();
dkernelsoa<<<nblocks, BLOCKSIZE>>>(allnodesSOA.a, allnodesSOA.b, allnodesSOA.c);
cudaThreadSynchronize();
endtime = rtclock();
printtime("SoA time: ", starttime, endtime);
return 0;
}
|
12,474 | #include "includes.h"
__global__ void createCosineMatrix(float* matrix, int xsize){
int threadGlobalID = blockIdx.x * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
int i;
for (i = 0; i < xsize; i++){
if (threadGlobalID == 0)
matrix[threadGlobalID + i * xsize] = 1 / sqrt((float)xsize);
else
matrix[threadGlobalID + i * xsize] = (sqrt((float)2 / xsize) * cos((PI * (2 * i + 1) * threadGlobalID) / (2 * xsize)));
}
} |
12,475 | #include "includes.h"
#define NTHREADS 512
// Updates the column norms by subtracting the Hadamard-square of the
// Householder vector.
//
// N.B.: Overflow incurred in computing the square should already have
// been detected in the original norm construction.
__global__ void makeHVector(int rows, float * input, float * output)
{
int
i, j;
float
elt, sum;
__shared__ float
beta, sums[NTHREADS];
if(threadIdx.x >= rows)
return;
sum = 0.f;
for(i = threadIdx.x ; i < rows; i += NTHREADS) {
if((threadIdx.x == 0) && (i == 0))
continue;
elt = input[i];
output[i] = elt;
sum += elt * elt;
}
sums[threadIdx.x] = sum;
__syncthreads();
for(i = blockDim.x >> 1; i > 0 ; i >>= 1) {
j = i+threadIdx.x;
if((threadIdx.x < i) && (j < rows))
sums[threadIdx.x] += sums[j];
__syncthreads();
}
if(threadIdx.x == 0) {
elt = input[0];
float norm = sqrtf(elt * elt + sums[0]);
if(elt > 0)
elt += norm;
else
elt -= norm;
output[0] = elt;
norm = elt * elt + sums[0];
beta = sqrtf(2.f / norm);
}
__syncthreads();
for(i = threadIdx.x; i < rows; i += NTHREADS)
output[i] *= beta;
} |
12,476 | #include <cuda_runtime_api.h>
#include <stdint.h>
__global__ void gaussian_kl_loss_fwd_kernel(
const float *mean,
uint32_t batch_sz,
const float *target_mean,
float var,
float target_var,
float *loss)
{
uint32_t batch_idx = threadIdx.x + blockDim.x * blockIdx.x;
if (batch_idx < batch_sz) {
float x = mean[batch_idx];
float t = target_mean[batch_idx];
loss[batch_idx] = 0.5f * ((target_var + (x - t) * (x - t)) / var - logf(target_var) + logf(var) - 1.0f);
}
}
extern "C" void neuralops_cuda_gaussian_kl_loss_fwd(
const float *mean,
uint32_t batch_sz,
const float *target_mean,
float var,
float target_var,
float *loss,
cudaStream_t stream)
{
gaussian_kl_loss_fwd_kernel<<<(batch_sz+1024-1)/1024, 1024, 0, stream>>>(
mean, batch_sz, target_mean, var, target_var, loss);
}
|
12,477 | #include "includes.h"
__global__ void set_kernel(int* dst, int const value, int const count)
{
int const index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= count)
return;
dst[index] = value;
} |
12,478 |
// Kernel Average with Depth
extern "C"
__global__ void AVERAGE_DEPTH_1D(int envSizeX, int envSizeY, float* envData, int depth){
int tidX = blockIdx.x * blockDim.x + threadIdx.x;
int tidY = blockIdx.y * blockDim.y + threadIdx.y;
float moyenne = 0;
int nbNombre = 0;
if(tidX < envSizeX && tidY < envSizeY){
for(int l = tidX - depth; l <= tidX + depth; l++){
if(l < 0){
int ltemp = l;
ltemp += envSizeX;
for(int k = tidY - depth; k <= tidY + depth; k++){
if(k < 0){
int ktemp = k;
ktemp += envSizeY;
if(envData[envSizeX * ltemp + ktemp] != -1){
moyenne += envData[envSizeX * ltemp + ktemp];
nbNombre++;
}
}
else if(k > envSizeY - 1){
int ktemp = k;
ktemp -= envSizeY;
if(envData[envSizeX * ltemp + ktemp] != -1){
moyenne += envData[envSizeX * ltemp + ktemp];
nbNombre++;
}
}
else{
if(envData[envSizeX * ltemp + k] != -1){
moyenne += envData[envSizeX * ltemp + k];
nbNombre++;
}
}
}
}
else if(l > envSizeX - 1){
int ltemp = l;
ltemp -= envSizeX;
for(int k = tidY - depth; k <= tidY + depth; k++){
if(k < 0){
int ktemp = k;
ktemp += envSizeY;
if(envData[envSizeX * ltemp + ktemp] != -1){
moyenne += envData[envSizeX * ltemp + ktemp];
nbNombre++;
}
}
else if(k > envSizeY - 1){
int ktemp = k;
ktemp -= envSizeY;
if(envData[envSizeX * ltemp + ktemp] != -1){
moyenne += envData[envSizeX * ltemp + ktemp];
nbNombre++;
}
}
else{
if(envData[envSizeX * ltemp + k] != -1){
moyenne += envData[envSizeX * ltemp + k];
nbNombre++;
}
}
}
}
else{
for(int k = tidY - depth; k <= tidY + depth; k++){
if(k < 0){
int ktemp = k;
ktemp += envSizeY;
if(envData[envSizeX * l + ktemp] != -1){
moyenne += envData[envSizeX * l + ktemp];
nbNombre++;
}
}
else if(k > envSizeY - 1){
int ktemp = k;
ktemp -= envSizeY;
if(envData[envSizeX * l + ktemp] != -1){
moyenne += envData[envSizeX * l + ktemp];
nbNombre++;
}
}
else{
if(envData[envSizeX * l + k] != -1){
moyenne += envData[envSizeX * l + k];
nbNombre++;
}
}
}
}
}
if(nbNombre != 0){
envData[envSizeX * tidX + tidY] = moyenne / nbNombre;
}
}
__syncthreads();
}
//Converting 2D coordinates into one 1D coordinate
__device__ int getFOV(int x, int y,int width){
return y * width + x;
}
//Normalize coordinates for infinite world
__device__ int normeFOV(int x, int width){
if(x < 0)
return x + width;
if(x > width - 1)
return x - width;
return x;
}
//Average Kernel
extern "C"
__global__ void AVERAGE_DEPTH_1D_V2(int envSizeX, int envSizeY, float* envData, float* result, int depth){
int tidX = blockIdx.x * blockDim.x + threadIdx.x;
int tidY = blockIdx.y * blockDim.y + threadIdx.y;
float moyenne = 0;
float nbNombre = 0;
if(tidX < envSizeX && tidY < envSizeY){
int borneInfX = tidX - depth;
int borneSupX = tidX + depth;
int borneInfY = tidY - depth;
int borneSupY = tidY + depth;
for(int i = borneInfX; i <= borneSupX; i++){
for(int j = borneInfY; j <= borneSupY; j++){
float valeur = envData[getFOV(normeFOV(i,envSizeX),normeFOV(j,envSizeY),envSizeY)];
if(valeur != -1){
moyenne += valeur;
nbNombre++;
}
}
}
if(nbNombre != 0){
result[envSizeY * tidX + tidY] = moyenne / nbNombre;
}
}
}
//Heat Diffusion Kernel
extern "C"
__global__ void HEAT_DEPTH_1D_V2(int envSizeX, int envSizeY, float* envData, float* result, int depth){
int tidX = blockIdx.x * blockDim.x + threadIdx.x;
int tidY = blockIdx.y * blockDim.y + threadIdx.y;
float actualHeat = envData[getFOV(normeFOV(tidX,envSizeX),normeFOV(tidY,envSizeY),envSizeY)];
float heat = 0;
if(tidX < envSizeX && tidY < envSizeY){
int borneInfX = tidX - depth;
int borneSupX = tidX + depth;
int borneInfY = tidY - depth;
int borneSupY = tidY + depth;
for(int i = borneInfX; i <= borneSupX; i++){
for(int j = borneInfY; j <= borneSupY; j++){
heat += envData[getFOV(normeFOV(i,envSizeX),normeFOV(j,envSizeY),envSizeY)];
}
}
heat -= actualHeat;
envData[envSizeY * tidY + tidX] = (actualHeat + 0.125f * (heat - 8 * actualHeat));
}
}
//Number Neighbors Kernel
extern "C"
__global__ void NUMBER_NEIGHBORS_ALIVE(int envSizeX, int envSizeY, float* envData, float* result, int depth){
int tidX = blockIdx.x * blockDim.x + threadIdx.x;
int tidY = blockIdx.y * blockDim.y + threadIdx.y;
float temp = 0.0f;
if(tidX < envSizeX && tidY < envSizeY){
int borneInfX = tidX - depth;
int borneSupX = tidX + depth;
int borneInfY = tidY - depth;
int borneSupY = tidY + depth;
for(int i = borneInfX; i <= borneSupX; i++){
for(int j = borneInfY; j <= borneSupY; j++){
if(!(i == tidX && j == tidY)){
if(envData[getFOV(normeFOV(i,envSizeX),normeFOV(j,envSizeY),envSizeY)] == 1.0f){
temp++;
}
}
}
}
__syncthreads();
result[envSizeY * tidY + tidX] = temp;
}
}
//State Computation Kernel
extern "C"
__global__ void STATE_COMPUTATION(int envSizeX, int envSizeY, float* envData, float* result, int depth){
int tidX = blockIdx.x * blockDim.x + threadIdx.x;
int tidY = blockIdx.y * blockDim.y + threadIdx.y;
float temp = 0.0f;
if(tidX < envSizeX && tidY < envSizeY){
int borneInfX = tidX - depth;
int borneSupX = tidX + depth;
int borneInfY = tidY - depth;
int borneSupY = tidY + depth;
for(int i = borneInfX; i <= borneSupX; i++){
for(int j = borneInfY; j <= borneSupY; j++){
if(!(i == tidX && j == tidY)){
if(envData[getFOV(normeFOV(i,envSizeX),normeFOV(j,envSizeY),envSizeY)] == -1.0f){
temp--;
}
if(envData[getFOV(normeFOV(i,envSizeX),normeFOV(j,envSizeY),envSizeY)] == 1.0f){
temp++;
}
}
}
}
__syncthreads();
result[envSizeY * tidY + tidX] = temp;
}
}
//Here Computation Kernel
extern "C"
__global__ void HERE_COMPUTATION(int envSizeX, int envSizeY, float* envData, float* result, int depth){
int tidX = blockIdx.x * blockDim.x + threadIdx.x;
int tidY = blockIdx.y * blockDim.y + threadIdx.y;
float temp = 0.0f;
if(tidX < envSizeX && tidY < envSizeY){
int borneInfX = tidX - depth;
int borneSupX = tidX + depth;
int borneInfY = tidY - depth;
int borneSupY = tidY + depth;
for(int i = borneInfX; i <= borneSupX; i++){
for(int j = borneInfY; j <= borneSupY; j++){
if(!(i == tidX && j == tidY)){
if(envData[getFOV(normeFOV(i,envSizeX),normeFOV(j,envSizeY),envSizeY)] == 1.0f){
temp++;
}
}
}
}
__syncthreads();
result[envSizeY * tidY + tidX] = temp;
}
}
|
12,479 | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
#define USAGE_EXIT(s) do\
{\
printf("Usage: %s <# of elements> <random seed> \n%s\n", argv[0], s);\
exit(-1);\
}while(0);
__global__ void xor_piece(int *arr, int *step, int num)
{
int i = (blockDim.x * blockIdx.x + threadIdx.x);
if (((float) num) / i < *step)
return;
i *= *step;
if ((i >= num) || ((i + (*step) / 2) >= num))
return;
arr[i] ^= arr[i + (*step) / 2];
}
__global__ void double_step(int* step)
{
*step *= 2;
}
int main(int argc, char **argv)
{
struct timeval start, end, t_start, t_end;
int i;
int *host_mem;
int *gpu_mem;
int *host_step;
int *gpu_step;
int *answer;
unsigned long num; /*Default value of num from MACRO*/
int blocks, seed;
if(argc != 3)
USAGE_EXIT("Not enough parameters");
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
USAGE_EXIT("Invalid number of elements");
seed = atoi(argv[2]); /*Update after checking*/
if(seed <= 0)
USAGE_EXIT("Invalid number of elements");
/* Allocate host (CPU) memory and initialize*/
host_mem = (int*)malloc(num * sizeof(int));
srand(seed);
for(i=0; i<num; ++i){
host_mem[i] = random();
}
answer = (int*)malloc(sizeof(int));
host_step = (int*)malloc(sizeof(int));
*host_step = 2;
gettimeofday(&t_start, NULL);
/* Allocate GPU memory and copy from CPU --> GPU*/
cudaMalloc(&gpu_mem, num * sizeof(int));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMalloc(&gpu_step, sizeof(int));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(gpu_mem, host_mem, num * sizeof(int) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
cudaMemcpy(gpu_step, host_step, sizeof(int) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
gettimeofday(&start, NULL);
blocks = num / 2048;
if(num % 2048)
++blocks;
while((*host_step / 2) <= num)
{
xor_piece<<<blocks, 1024>>>(gpu_mem, gpu_step, num);
CUDA_ERROR_EXIT("kernel invocation");
double_step<<<1, 1>>>(gpu_step);
CUDA_ERROR_EXIT("kernel invocation");
*host_step *= 2;
}
gettimeofday(&end, NULL);
/* Copy back result*/
cudaMemcpy(answer, gpu_mem, sizeof(int) , cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("Total time = %ld microsecs. Processsing = %ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
cudaFree(gpu_mem);
/*Print the answer*/
printf("Result = %d\n", *answer);
/**answer = 0;
for (i = 0; i < num; i++)
*answer ^= host_mem[i];
printf("Actual answer = %d\n", *answer);*/
free(host_mem);
free(host_step);
free(answer);
}
|
12,480 | extern "C"
__global__
void contrastiveDivergence(float* positive, float* negative, float* weights, float learningRate, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
weights[i] = weights[i] + (positive[i] - negative[i]) * learningRate;
}
} |
12,481 | #include <iostream>
__global__ void add(int* devVal, int addEnd){
*devVal= *devVal + addEnd;
printf("Value after kernel is %d\n", *devVal);
}
int main(void){
int* p0;
int* p1;
int* h1;
int currentDev;
size_t size = sizeof(int);
cudaSetDevice(0);
cudaMallocManaged(&p0, size);
*p0 = 2;
cudaGetDevice(¤tDev);
printf("Calling add on device: %d\n", currentDev);
add<<<1,1>>>(p0, 2);
cudaDeviceSynchronize();
cudaSetDevice(1);
cudaMallocManaged(&p1, size);
cudaError_t memErr = cudaMemcpyPeer(p1, 1, p0, 0, size);
cudaGetDevice(¤tDev);
printf("Calling add on device: %d\n", currentDev);
add<<<1,1>>>(p1, 3);
cudaDeviceSynchronize();
printf("The final value is %d\n", *p1);
} |
12,482 | extern "C" {
__global__
void gaussian_blur(const float* const inputChannel,
float* const outputChannel,
int numRows, int numCols, const float* const filter, const int filterWidth)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if ( col >= numCols || row >= numRows )
{
return;
}
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r)
{
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c)
{
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(row + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(col + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[row * numCols + col] = result;
}
} |
12,483 | #include "includes.h"
__global__ void histo_equalization_kernel ( unsigned char *buffer, long size, int *histo, unsigned char *output ) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
while (i < size) {
if ( dev_lut[buffer[i]] > 255)
output[i] = 255;
else
output[i] = (unsigned char) dev_lut[buffer[i]];
i += offset;
}
} |
12,484 | extern "C" __global__ void
mmkernel( float* a, float* b, float* c,
int pitch_a, int pitch_b, int pitch_c,
int n, int m, int p )
{
int tx = threadIdx.x;
int bx = blockDim.x;
int i = blockIdx.x*bx + tx;
int j = blockIdx.y*2;
__shared__ float cb0[512], cb1[512];
float sum0 = 0.0, sum1 = 0.0;
for( int ks = 0; ks < p; ks += bx ){
cb0[tx] = c[ks+tx+pitch_c*j];
cb1[tx] = c[ks+tx+pitch_c*(j+1)];
__syncthreads();
for( int k = ks; k < ks+bx; ++k ){
float rb = b[i+pitch_b*k];
sum0 += rb * cb0[k-ks];
sum1 += rb * cb1[k-ks];
}
__syncthreads();
}
a[i+pitch_a*j] = sum0;
a[i+pitch_a*(j+1)] = sum1;
}
|
12,485 | #include "includes.h"
__global__ void compute_inv(const int* destination_offsets, const int* source_indices, const float* out_degrees, const int node_count, const float* input, float *output)
{
int dest = blockDim.x*blockIdx.x + threadIdx.x;
if (dest<node_count)
{
int srcStart = destination_offsets[dest];
int srcEnd = destination_offsets[dest + 1];
int in_degree = srcEnd - srcStart;
float rank = 0;
if (in_degree>0)
{
for (int srcIdx = srcStart; srcIdx<srcEnd; ++srcIdx)
{
int src = source_indices[srcIdx];
float contrib = ((input[src] * DECAY) * out_degrees[src]);
rank = rank + contrib;
}
}
output[dest] = rank + (1 - DECAY);
}
} |
12,486 |
#include <iostream>
#ifdef _WIN32
# define IMPORT __declspec(dllimport)
#else
# define IMPORT
#endif
int static_cuda11_func(int);
IMPORT int shared_cuda11_func(int);
void test_functions()
{
static_cuda11_func(int(42));
shared_cuda11_func(int(42));
}
int main(int argc, char** argv)
{
test_functions();
return 0;
}
|
12,487 | // memory page is managed by OS
// "unpageable" means cuda directly accesses physical RAM
// to access pageable memory allocated by like malloc()
// pageable memory -> pinned memory -> device memory
// if we directly operate on pinned memory
// then we save transfer from pageable memory to pinned memory
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
void HandleError(cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit( EXIT_FAILURE );
}
}
int main()
{
unsigned int N = 256 * 1024 * 1024;
float *d = NULL;
HANDLE_ERROR( cudaMalloc((void**)&d, N * sizeof(float)) );
cudaEvent_t startEvent, stopEvent;
HANDLE_ERROR( cudaEventCreate(&startEvent) );
HANDLE_ERROR( cudaEventCreate(&stopEvent) );
float *h_a = NULL, *h_b = NULL;
float time = 0;
// test on pageable memory
h_a = (float *) malloc(N * sizeof(float));
h_b = (float *) malloc(N * sizeof(float));
HANDLE_ERROR( cudaEventRecord(startEvent, 0) );
HANDLE_ERROR( cudaMemcpy(d, h_a, N * sizeof(float), cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaEventRecord(stopEvent, 0) );
HANDLE_ERROR( cudaEventSynchronize(stopEvent) );
HANDLE_ERROR( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf("Pageable, host to device, %.3fGB/s\n",
N * sizeof(float) * 1000.0 / (1024 * 1024 * 1024 * time));
HANDLE_ERROR( cudaEventRecord(startEvent, 0) );
HANDLE_ERROR( cudaMemcpy(h_b, d, N * sizeof(float), cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaEventRecord(stopEvent, 0) );
HANDLE_ERROR( cudaEventSynchronize(stopEvent) );
HANDLE_ERROR( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf("Pageable, device to host, %.3fGB/s\n",
N * sizeof(float) * 1000.0 / (1024 * 1024 * 1024 * time));
free(h_a);
free(h_b);
h_a = NULL;
h_b = NULL;
// test on pinned memory
HANDLE_ERROR( cudaMallocHost((void**)&h_a, N * sizeof(float)) );
HANDLE_ERROR( cudaMallocHost((void**)&h_b, N * sizeof(float)) );
HANDLE_ERROR( cudaEventRecord(startEvent, 0) );
HANDLE_ERROR( cudaMemcpy(d, h_a, N * sizeof(float), cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaEventRecord(stopEvent, 0) );
HANDLE_ERROR( cudaEventSynchronize(stopEvent) );
HANDLE_ERROR( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf("Pinned, host to device, %.3fGB/s\n",
N * sizeof(float) * 1000.0 / (1024 * 1024 * 1024 * time));
HANDLE_ERROR( cudaEventRecord(startEvent, 0) );
HANDLE_ERROR( cudaMemcpy(h_b, d, N * sizeof(float), cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaEventRecord(stopEvent, 0) );
HANDLE_ERROR( cudaEventSynchronize(stopEvent) );
HANDLE_ERROR( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf("Pinned, device to host, %.3fGB/s\n",
N * sizeof(float) * 1000.0 / (1024 * 1024 * 1024 * time));
cudaFreeHost(h_a);
cudaFreeHost(h_b);
// clean up
cudaFree(d);
HANDLE_ERROR( cudaEventDestroy(startEvent) );
HANDLE_ERROR( cudaEventDestroy(stopEvent) );
return 0;
}
|
12,488 | #include <cuda_runtime.h>
#include <bits/stdc++.h>
#define BLOCKS 32768*2
#define THREADS 256
#define SIZE BLOCKS*THREADS
using namespace std;
__host__ void printArr(int *arr, int size);
__host__ void randomArrGenerator(int *arr, int size);
__host__ void checkSorted(int *arr, int size);
__device__ void swapCu(int &a, int &b)
{
int temp = a;
a = b;
b = temp;
}
//bitonic sort on GPU
__global__ void bitonicSortCu(int *arr, int i, int j, int size)
{
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k<size && k%(j<<1) < j)
{
bool descending = (k/i)%2;
if(descending && arr[k] < arr[k+j])
swapCu(arr[k], arr[k+j]);
else if(!descending && arr[k] > arr[k+j])
swapCu(arr[k], arr[k+j]);
}
}
void bitonicSortParallel(int *arr, int size)
{
for(int i=2; i<=size; i*=2)
for(int j=i/2; j>=1; j/=2)
bitonicSortCu<<<BLOCKS, THREADS>>>(arr, i, j, size);
}
int main(int argc, char const *argv[])
{
int *d_arr;
int *arr = new int[SIZE];
randomArrGenerator(arr, SIZE);
cudaMalloc(&d_arr, sizeof(int)*SIZE);
cudaDeviceSynchronize();
//start timer here
cudaMemcpyAsync(d_arr, arr, sizeof(int)*SIZE, cudaMemcpyHostToDevice);
bitonicSortParallel(d_arr, SIZE);
cudaMemcpyAsync(arr, d_arr, sizeof(int)*SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//end timer here
checkSorted(arr, SIZE);
return 0;
}
//Auxilliary CPU functions
__host__ void checkSorted(int *arr, int size)
{
for(int i=1; i<size; i++)
if(arr[i] < arr[i-1])
{
cout << "sorting unsuccessful\n";
return;
}
cout << "sorting successful\n";
}
__host__ void randomArrGenerator(int *arr, int size)
{
for(int i=0; i<size; i++)
arr[i] = rand()%1000;
}
__host__ void printArr(int *arr, int size)
{
for(int i=0; i<size; i++)
cout << arr[i] << " ";
cout << endl;
}
__host__ void swap(int &a, int &b)
{
int temp = a;
a = b;
b = temp;
}
//bitonic sort on CPU
__host__ void bitonicSort(int *arr, int size)
{
if(size > 1)
{
for(int i=2; i<=size; i*=2)
{
for(int j=i/2; j>=1; j/=2)
{
for(int k=0; k<size; k++)
{
if(k%(j<<1) < j)
{
bool descending = (k/i)%2;
if(descending && arr[k] < arr[k+j])
swap(arr[k], arr[k+j]);
else if(!descending && arr[k] > arr[k+j])
swap(arr[k], arr[k+j]);
}
}
}
}
}
}
|
12,489 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#define N 1024
//making my own strcpy and and str cat because screw cuda, not giving access to libraries :(
__device__ char* nStrCpy(char *dest, const char *src) {
int i =0;
do {
dest[i] = src[1];
} while (src[i++] != 0);
return dest;
}
__device__ char* nStrcat(char *dest, const char *src){
int i =0;
while (dest[i] != 0) i++;
nStrCpy(dest+1, src);
return dest;
}
//this makes a single password, recursivly adding 2 characters to password every time and removing one from site
__device__ void makePassword(char *square, char* site, int position, int direction, int size, char* password) {
//x position and y position within square as square is a linear array
int x = position%size;
int y = position/size;
int firstCharP = 0;
int secCharP = 0;
//if direction is vertical
if (direction ==0) {
//check every character in the current vertical line
for (int i =0; i < size; i++) {
//position of new character
int newPosition = (i*size) + x;
//found a match
if (site[0] == square[newPosition]) {
//goes up
if (newPosition < position) {
//first character for password
firstCharP = newPosition - size;
//if below first line go to bottom
if(firstCharP < 0)
firstCharP += (size * size);
//second character for password
secCharP = firstCharP - size;
if(secCharP < 0)
secCharP += (size*size);
//goes down
} else {
firstCharP = newPosition + size;
// if below last line, loop to top
if (firstCharP >= (size*size))
firstCharP -= (size*size);
secCharP = firstCharP + size;
if(secCharP >= (size*size))
secCharP -= (size*size);
}
}
}
//switch to horizontal directiuon for next 2 characters
direction = 1;
//if direction is horizontal
} else {
for (int i =0; i < size; i++) {
int newPosition = (y*size)+i;
if (site[0] == square[newPosition]) {
//new position to the left of previous, should never be the same
if (newPosition < position) {
firstCharP = newPosition -1;
//if previous row, wrap around to right side instead
if ((firstCharP/size) < y || firstCharP == -1)
firstCharP += size;
secCharP = firstCharP -1;
if ((secCharP/size) < y || secCharP == -1)
secCharP += size;
//new position to right of previous
} else {
//if on next row wrap to front
firstCharP = newPosition +1;
if ((firstCharP/size) > y)
firstCharP -= size;
secCharP = firstCharP +1;
if ((secCharP/size) > y)
secCharP -= size;
}
}
}
//switch to vertical direction for next couple of characters
direction = 0;
}
//go to next character in site name
site++;
//if more of the password is neeeded
if (site[0] != '\n') {
//set the next couple of characters
password[0] = square[firstCharP];
password[1] = square[secCharP];
//increase pointer to start the next part of password without overwrting previous characters
password++;
password++;
//mor parts of the password!
makePassword(square, site, secCharP, direction, size, password);
} else {
//set the last two characters of the password.
password[0] = square[firstCharP];
password[1] = square[secCharP];
}
}
//get the starting poisition of the password within the gride, i.e. start at top row, and travel through the domain name
__device__ int getStartPosition(char *square, char *site, int size) {
int position =0;
//find the atarting position within the first row
for (int i =0; i < size; i++) {
if (square[i] == site[0])
position = i;
}
//direction 0 is going down, as it starts
int direction = 0;
//doing 6 characters only, because apparently I hate make modularized code the first time
for (int i =1; i < 6; i++) {
//x and y position within a linear array
int x = position%size;
int y = position/size;
//check all characters in row/colums
for (int j = 0; j < size; j++) {
//vertical directions
if (direction ==0) {
//it found the next character!
if (site[i] == square[(j * size) + x ]) {
position = (j * size) +x;
direction = 1;
break;
}
//horizontal direction
} else {
//it found the nest character!
if (site[i] == square[(y * size) + j]) {
position = (y* size) + j;
direction = 0;
break;
}
}
}
}
//return the starting poistion... because that's the point of this function ... dur
return position;
}
//make a random password
__global__ void randomWords(char *square, char *passwords, int size, int *c, int amount) {
//that id though
int tid = blockIdx.x*blockDim.x+threadIdx.x;
//cuda random intitalizers
curandState_t state;
curand_init(tid, 1, 2, &state);
//make a certain number of passwords per core
for (int a = 0; a < amount; a++) {
//starting position for this password
int tidNum = ((tid * amount) + a) *24;
passwords[(tidNum)] = square[(curand(&state) % size)];
//7 characters for the site, 6 and a \n
char site[7];
site[0] = passwords[tidNum];
site[6] = '\n';
//make 6 random characters
for (int i=1; i < 6; i++) {
//make sure 2 characters do not repeat
do {
passwords[i + (tidNum)] = square[(curand(&state) % size)];
} while (passwords[(i-1) +(tidNum)] == passwords[i + (tidNum)]);
//set random character
site[i] = passwords[i + (tidNum)];
}
// add that ' -> ' Miller wanted
passwords[7 + (tidNum)] = ' ';
passwords[8 + (tidNum)] = '-';
passwords[9 + (tidNum)] = '>';
passwords[10 + (tidNum)] = ' ';
//lets get that starting position
int position = getStartPosition(square, site, size);
//stored the startingposition within c for debuggin puroposes
//I left this in here becuase it could be useful if I ever come back to this project
c[(tid * amount)+ a] = position;
//create the password object
char *password;
password = (char *)malloc(sizeof(char) *13);
//generate that password finally
makePassword(square, site, position, 1, size, password);
//save the password in the passwords array that the main program can access
for(int i = 0; i < 12; i++) {
passwords[11 + i + (tidNum)] = password[i];
}
}
}
int main(int argc, char ** argv)
{
//used to organize cores on cuda
dim3 gridsize, blocksize;
int device = atoi(argv[1]);
cudaSetDevice(device);
//get size of the grid
int size = 15;
//printf("Please input a size of grid to be tested (integer number only): ");
//scanf("%d", &size);
//I want at least total passwords (I used 12,000 because when creating 10,000 passwords
//out of a possible 100,000 there are bound to be some repeats
int total = 500;
int amount = total / N;
amount++;
total = amount * N;
//get the file to be read
char file[] = "grid15.txt";
//printf("Insert a file containing your latin square: ");
//scanf("%s", file);
//allocate memory for the grid
char grid[size][size];
char *square;
cudaMallocManaged((void**)&square, size * size * sizeof(char));
//allocate memory for the grid
char *passwords;
printf("total: %d\n", total);
cudaMallocManaged((void**)&passwords, sizeof(char) * 24 * total);
//open grid file to read grid
FILE *file1 = fopen(file, "r");
//copy each character from grid file to grid object
for (int i=0; i < size; i++) {
for (int j=0; j < size; j++) {
fscanf(file1, "%c\n", &grid[i][j]);
}
}
//transfer the grid to a linear array
for(int i=0; i < size;i++) {
for(int j=0; j < size; j++) {
square[size * i + j] = grid[i][j];
}
}
//close the grid file
fclose(file1);
// allocate the memory on the GPU, this was used for saving the starting positions of each password
int *c;
cudaMallocManaged( (void**)&c, N * amount * sizeof(int));
//I randomly chose 16 as the block size, it seems like a good number
blocksize.x = 16;
gridsize.x = N/blocksize.x;
//this activates some cool cuda stuff
randomWords<<<gridsize.x, blocksize.x>>>(square, passwords, size, c, amount);
cudaDeviceSynchronize();
//outpt file brah
FILE * f = fopen("output15.txt", "w");
//lets make sure that file exists brh
if (f == NULL) {
printf("error opening output.txt\n");
exit(1);
}
//copy the passwords to the file one character at a time. oh yeah the effeciency broseph
for (int i=0; i<total; i++)
{
char * output = (char *)malloc(sizeof(char) * 24);
for (int j=0; j<23;j++) {
fprintf(f, "%c", passwords[j + (i * 24)]);
}
fprintf(f, "\n");
}
// free the memory allocated on the GPU, close the file and you are done Tyranbrosaurus Rex!
fclose(f);
cudaFree( c );
cudaFree( square );
cudaFree( passwords );
return 0;
}
|
12,490 |
namespace GPU {
#define CUT_THR 0.9999999
__device__ double calcPValue(double r, int sampleSize)
{
r = isnan(r) ? 0.0 : fmin(CUT_THR, fabs(r));
double absz = sqrt(sampleSize - 3.0) * 0.5 * log1p(2.0 * r / (1.0 - r));
return 2.0 * (1.0 - normcdf(absz));
}
__device__ double pValL1(double x1, double x2, double x3, int sampleSize)
{
// with edge i, j given k values are:
// x1: edge i, j
// x2: edge i, k
// x3: edge j, k
double r = (x1 - x2 * x3) / sqrt((1.0 - x3 * x3) * (1.0 - x2 * x2));
return calcPValue(r, sampleSize);
}
} |
12,491 | #include <cuda.h>
#include <iostream>
using namespace std;
/* example for device function usage
*/
__device__ void addOne_block(double *blockData) {
// thread id is enough for computation
int t = threadIdx.x;
blockData[t]++;
}
__global__ void addOne(int n, double *data) {
int b = blockIdx.x;
// each block gets its data pointer as function argument
addOne_block(data + b*blockDim.x);
}
int main() {
int n = 2048;
double *data = (double*) malloc(n * sizeof(double));
for (int i=0; i<n; i++) {
data[i] = (double)i;
}
double *data_dev;
cudaMalloc((void**) &data_dev, n * sizeof(double));
cudaMemcpy(data_dev, data, n * sizeof(double) , cudaMemcpyHostToDevice);
dim3 nBlocks(32,1);
dim3 nThreads(64,1,1);
addOne <<< nBlocks, nThreads >>> (n,data_dev);
cudaMemcpy(data, data_dev, n * sizeof(double) , cudaMemcpyDeviceToHost);
cudaFree(data_dev);
cout << "data[n-1] = " << data[n-1] << endl;
free(data);
}
|
12,492 | // #include<iostream>
#include<fstream>
#include<math.h>
#include<vector>
#include <chrono>
#include <iomanip>
using namespace std::chrono;
using namespace std;
__global__ void Calculate(double *T,double *T_old,int r,int n)
{
int j = blockDim.x*(blockIdx.x) + threadIdx.x;
int k = blockDim.y*(blockIdx.y) + threadIdx.y;
if(j*n+k>n && j*n+k<=n*n)
{
*(T+j*n+k)=*(T_old+j*n+k)+r*( *(T_old+(j+1)*n+k)+*(T_old+j*n+k+1)+*(T_old+(j-1)*n+k)+*(T_old+j*n+k-1)- 4* *(T_old+j*n+k));
}
__syncthreads();
}
int main(){
int n,ntime;
double delta, sigma,nu,dom_len,dt,r;
fstream fin;
fin.open("input.dat",ios::in);
fin>>n>>sigma>>nu>>dom_len>>ntime;
fin.close();
delta=dom_len/(n-1);
dt=(sigma*pow(delta,2))/nu;
auto start = high_resolution_clock::now();
double *T = (double *)malloc((n+1) * (n+1 )* sizeof(double));
double *x = (double *)malloc((n+1) *( n+1) * sizeof(double));
double *y = (double *)malloc((n+1) * (n+1) * sizeof(double));
double *T_old = (double *)malloc((n+1) * (n+1) * sizeof(double));
for(int i=1;i<n+1;++i)
{
*(x+n+i)=0.0;
*(x+n*n+i)=2.0;
*(y+n*i+1)=0.0;
*(y+n+n*i)=2.0;
}
for(int i=2;i<n;++i)
{
for(int j=1;j<n+1;++j)
{
*(x+i*n+j)=*(x+(i-1)*n+j)+delta;
*(y+j*n+i)=*(y+(j)*n+i-1)+delta;
}
}
for(int i=1;i<n+1;++i)
{
for(int j=1;j<n+1;++j)
{
if(*(x+i*n+j)<=1.5 && *(x+i*n+j)>=0.5 && *(y+i*n+j)<=1.5 && *(y+i*n+j) >=0.5)
{
*(T+i*n+j)=2.0;
}
else{
*(T+i*n+j)=1.0;
}
}
}
fstream foutw;
foutw.open("int.dat",ios::out);
for(int i=1;i<n+1;++i)
{
for(int j=1;j<n+1;++j)
{
foutw<<*(x+i*n+j)<<" "<<*(y+i*n+j)<<" "<<*(T+i*n+j)<<"\n";
}
}
foutw.close();
r=(nu*dt)/pow(delta,2);
double *temp;
double *dev_T;
double *dev_t_old;
cudaMalloc(&dev_T,(n+1)*(n+1)*sizeof(double));
cudaMalloc(&dev_t_old,(n+1)*(n+1)*sizeof(double));
cudaMalloc(&temp,(n+1)*(n+1)*sizeof(double));
// for(int j=1;j<n+1;++j)
// {
// for(int k=1;k<n+1;++k)
// {
// *(T_old+j*n+k)=*(T+j*n+k);
// }
// }
cudaMemcpy(dev_T, T, (n+1)*(n+1)*sizeof(double), cudaMemcpyHostToDevice);
for(int i=1;i<=ntime;++i)
{
//cout<<"time_it:"<<i<<endl;
temp=dev_T;
dev_T=dev_t_old;
dev_t_old=temp;
dim3 a(32,8);
dim3 b(n/a.x,n/a.y);
// cudaMemcpy(dev_t_old, T_old, (n+1)*(n+1)*sizeof(double), cudaMemcpyHostToDevice);
Calculate<<<b,a>>>(dev_T,dev_t_old,r,n);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error));
}
// cudaMemcpy(T_old, dev_t_old, (n+1)*(n+1)*sizeof(double), cudaMemcpyDeviceToHost);
}
cudaMemcpy(T, dev_t_old, (n+1)*(n+1)*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(&dev_T);
cudaFree(&dev_t_old);
cudaFree(&temp);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(stop - start);
printf("simulation completed\n");
printf("Time_Taken : %d\n",duration.count());
fstream fout;
fout.open("soln.dat",ios::out);
for(int i=1;i<n+1;++i)
{
for(int j=1;j<n+1;++j)
{
fout<<std::scientific<<*(x+i*n+j)<<" "<<*(y+i*n+j)<<" "<<*(T+i*n+j)<<"\n";
}
}
fout.close();
delete x,y,T,T_old,temp;
return 0;
} |
12,493 | /*********************************************************************************
*FileName: Stencil
*Author: Glinttsd
*Version: 1.0
*Date: 2020.10.23
*Description: GPUм٣һƾIJijһԪص
* RADIUSΧڵԪӣ洢һ
*Others: 鳤N̫ʾĿǿshare memoryĺʹá̹߳
* Ӧ÷ֶblockм㣬ʾֻһblock
**********************************************************************************/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <malloc.h>
#define N 16 // NӦ̫(1-255)һblockл̫߳
#define RADIUS 3
void init_vec(int* a)
{
for (int i = 0; i < N; i++)
{
a[i] = i + 1;
}
}
void func_print(int* b)
{
for (int i = 0; i < N; i++)
printf("%d\n", b[i]);
}
__global__ void stencil_kernel(int *in, int *out)
{
int ID_local = threadIdx.x; //̵߳ı
int ID_global = blockIdx.x * blockDim.x + threadIdx.x; //̵߳ȫ
__shared__ int share_in[N + 2 * RADIUS];//share memory(SM)
//ʼSM
if (ID_local < RADIUS)
{
share_in[ID_local] = 0;
share_in[(N + 2 * RADIUS) - ID_local] = 0;
}
share_in[ID_local + RADIUS] = in[ID_global];
__syncthreads();//ͬеỵ̈߳ͻ
//ݽв
int value = 0;
for (int offset = -RADIUS; offset < RADIUS + 1; offset++)
{
value += share_in[ID_local + RADIUS + offset];
}
out[ID_global] = value;
}
int main()
{
//CPUڴռ
int* a = (int*)malloc(sizeof(int) * N);
int* b = (int*)malloc(sizeof(int) * N);
//GPUڴռ
int* dev_a, *dev_b;
cudaMalloc((void**)&dev_a, sizeof(int) * N);
cudaMalloc((void**)&dev_b, sizeof(int) * N);
//ʼ
init_vec(a);
//CPUݿGPUڴ
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
//kernel
stencil_kernel <<<1, N>>> (dev_a, dev_b);// ֻõһblock
//GPUݿCPU
cudaMemcpy(b, dev_b, N * sizeof(int), cudaMemcpyDeviceToHost);
//ͷGPUڴ
cudaFree(dev_a);
cudaFree(dev_b);
//
func_print(b);
return 0;
} |
12,494 | #include<cstdio>
#include "vector_types.h"
extern "C" {
__global__ void cuAdd(int* list, int* elements, int i, int listSize){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
while(thid < listSize){
int value = list[thid] + elements[i];
list[thid+listSize] = value;
thid += blockDim.x * gridDim.x;
}
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d);
__global__ void cuPartition(int j, int* prevList, int4* H, int size){
int* newList = prevList+size;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadCounts = 1 << (j-1);
while (tid < threadCounts){
int medianId = tid + threadCounts;
int a = H[medianId].x;
int b = H[medianId].y;
int c = H[medianId].z;
int d = H[medianId].w;
int2 ef = findMedian(prevList, a, b, newList, c, d);
H[2*medianId].x = a;
H[2*medianId].y = ef.x;
H[2*medianId].z = c;
H[2*medianId].w = ef.y;
H[2*medianId + 1].x = ef.x;
H[2*medianId + 1].y = b;
H[2*medianId + 1].z = ef.y;
H[2*medianId + 1].w = d;
tid += blockDim.x * gridDim.x;
}
}
__device__ void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result);
__global__ void cuMergeIncreasing(int* lists, int4* H, int listSize, int threads, int* result){
int* newList = lists + listSize;
int tid = blockIdx.x * blockDim.x + threadIdx.x + 1;
while(tid <= threads){
int medianId = tid + threads - 1;
int4 localFetch = H[medianId];
int a = localFetch.x;
int b = localFetch.y;
int c = localFetch.z;
int d = localFetch.w;
mergeInc(lists, a, b, newList, c, d, result);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cuPrune(int* listA, int sizeA, int* listB, int sizeB, int* found, int2* pickedBlocks, int* pickedBlocksCounter, int M){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
for(int j = 0; j < k ; j++){
if(*found) return;
int x = listA[tid * chunkA] + listB[(j+1) * chunkB - 1]; // mozemy wyskoczyc jesli chunkA lub ChunbB nie dzieli k
int y = listA[(tid+1) * chunkA - 1] + listB[j * chunkB]; // mozemy wyskoczyc tez
if (x == M || y == M) atomicExch(found, 1);
else if(x < M && y > M){
int pos = atomicAdd(pickedBlocksCounter, 1);
pickedBlocks[pos].x = tid;
pickedBlocks[pos].y = j;
}
}
}
__device__ bool searchSteep(int* listA, int chunkSizeA, int* listB, int chunkSizeB, int M){
int a, b;
a = b = 0;
while(a < chunkSizeA && b < chunkSizeB){
int value = listA[a] + listB[b];
if(value == M) return true;
if(value < M) a++;
else b++;
}
return false;
}
__global__ void cuSearch(int* listA, int sizeA, int* listB, int sizeB, int2* pickedBlocks, int* noPickedBlocks, int* found, int M){
int thid = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockDim.x*gridDim.x;
int chunkA = (sizeA + k - 1)/ (k);
int chunkB = (sizeB + k - 1)/ (k);
while(thid < *noPickedBlocks){
if(*found) return;
int2 idsOfFragmentToCheck = pickedBlocks[thid];
int* shiftedListA = listA + idsOfFragmentToCheck.x * chunkA;
int* shiftedListB = listB + idsOfFragmentToCheck.y * chunkB;
int _sizeA = thid != k-1 ? chunkA : sizeA % chunkA;
int _sizeB = thid != k-1 ? chunkB : sizeB % chunkB;
bool f = searchSteep(shiftedListA, _sizeA, shiftedListB, _sizeB, M);
if(f) *found = true;
thid += k;
}
}
__global__ void cuReverse(int* tab, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= size/2)
return;
int tmp = tab[tid];
tab[tid] = tab[size-tid-1];
tab[size-tid-1] = tmp;
}
__device__ int binsearchInc(int* tab, int l, int r, int value){
while(l < r){
int m = (l + r) / 2;
if(tab[m] >= value){
r = m;
} else{
l = m+1;
}
}
return l;
}
__device__ int2 findMedian(int* tabA, int a, int b, int* tabB, int c, int d){
int aMiddle, bMiddle, otherBegin, otherEnd, otherValue;
int* otherTab;
if(b-a > d-c){
aMiddle = (b + a) / 2;
otherTab = tabB;
otherBegin = c;
otherEnd = d;
otherValue = tabA[aMiddle];
//bMiddle = binsearchInc(tabB, c, d, tabA[aMiddle]);
} else{
bMiddle = (c + d) / 2;
otherTab = tabA;
otherBegin = a;
otherEnd = b;
otherValue = tabB[bMiddle];
//aMiddle = binsearchInc(tabA, a, b, tabB[bMiddle]);
}
int theOtherMiddle = binsearchInc(otherTab, otherBegin, otherEnd, otherValue);
if(b-a > d-c){
bMiddle = theOtherMiddle;
} else{
aMiddle = theOtherMiddle;
}
int2 result;
result.x = aMiddle;
result.y = bMiddle;
return result;
}
__device__ inline void mergeInc(int* listA, int beginA, int endA, int* listB, int beginB, int endB, int* result){
int position = beginA + beginB;
while(beginA < endA && beginB < endB){
if (listA[beginA] < listB[beginB]){
result[position++] = listA[beginA++];
} else{
result[position++] = listB[beginB++];
}
}
while(beginA < endA){
result[position++] = listA[beginA++];
}
while(beginB < endB){
result[position++] = listB[beginB++];
}
}
}
|
12,495 | /* kernel routine starts with keyword __global__ */
#include <stdio.h>
#define BLOCK_SIZE 512
__global__ void vecadd(float* A, float* B, float* C)
{
int i = blockIdx.x * BLOCK_SIZE + threadIdx.x; // threadIdx is a CUDA built-in variable
C[i] = A[i] + B[i];
}
int main(int argc, char * argv[])
{
float *host_A, *host_B, *host_C;
float *dev_A, *dev_B, *dev_C;
int i, n;
if (argc == 1) n = 1024;
else n = atoi(argv[1]);
/* 1. allocate host memory */
host_A = (float*)malloc( n*sizeof(float) );
host_B = (float*)malloc( n*sizeof(float) );
host_C = (float*)malloc( n*sizeof(float) );
/* 2. allocate GPU memory */
cudaMalloc( &dev_A, n*sizeof(float) );
cudaMalloc( &dev_B, n*sizeof(float) );
cudaMalloc( &dev_C, n*sizeof(float) );
/* initialize array A and B */
for( int i = 0; i < n; ++i ) {
host_A[i] = (float) 1.0;
host_B[i] = (float) 1.0;
}
/* 3. Copydata (host_A and host_B) to GPU */
cudaMemcpy( dev_A, host_A, n*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( dev_B, host_B, n*sizeof(float), cudaMemcpyHostToDevice );
/* 4. call kernel routine to execute on GPU */
/* launch 1 thread per vector-element, 1024 threads per block */
vecadd<<<n/BLOCK_SIZE, BLOCK_SIZE>>>( dev_A, dev_B, dev_C );
/* transfer results from GPU (dev_C) to CPU (host_C) */
cudaMemcpy( host_C, dev_C, n*sizeof(float), cudaMemcpyDeviceToHost );
#ifdef CHECK
{
FILE *fd;
fd = fopen("tmp333", "w");
for (i=0;i<n; i++) {
fprintf(fd, "%f\n", host_C[i]);
}
}
#endif
/* free host and GPU memory */
free(host_A);
free(host_B);
free(host_C);
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
return( 0 );
}
|
12,496 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <float.h>
#ifndef M_PI
# define M_PI 3.14159265358979323846
#endif
# define N 192
__device__ int count;
__device__ void sum(double *partial_sum, int dummy) {
if(threadIdx.x == 0) {
count = dummy;
if(count %2 != 0) {
count++;
partial_sum[count-1] = 0;
}
}
__syncthreads();
for(int i = count/2; i > 0; i = i/2) {
if(threadIdx.x < i)
partial_sum[threadIdx.x] += partial_sum[threadIdx.x + i];
__syncthreads();
if(threadIdx.x == 0) {
if(i%2 != 0 && i != 1) {
partial_sum[0] += partial_sum[--i];
}
}
__syncthreads();
}
__syncthreads();
return;
}
void init_grid_points(double * x, double * y, int m)
{
double h = (double)1/(m + 1);
for (int i = 0; i < m; i ++)
{
for (int j = 0; j < m; j ++)
{
x[i*m+j] = (i + 1)*h;
y[i*m+j] = (j + 1)*h;
}
}
}
void init_observed_data_vector(double * f, double * x, double * y, int m)
{
double l[2] = {(double)2/m, (double)2/m};
int n = m * m;
//kernel(f, x, y, l, n);
for (int i = 0; i < n; i ++)
{
f[i] = 0.02 * ((double)rand() / (double)RAND_MAX - 0.5);
double d = pow((x[i] - 0.25)/l[0], 2) + pow((y[i] - 0.25)/l[1],2);
f[i] += 1.0/sqrt(2.0*M_PI) *exp(-d/2);
f[i] += x[i] * 0.2 + y[i] * 0.1;
}
}
void randperm(int * r, int n){
for(int i = 0; i < n; i ++){
r[i] = i;
}
for (int i = n - 1; i >= 0; i --){
int j = rand() % (i + 1);
int temp = r[i];
r[i] = r[j];
r[j] = temp;
}
}
void init_data_set_indices(int * itest, int * itrain, int ntest, int ntrain){
int n = ntest + ntrain;
int * r = (int *) malloc(n * sizeof(int));
randperm(r, n);
for (int i = 0; i < ntest; i ++){
itest[i] = r[i];
}
for (int i = 0; i < ntrain; i ++){
itrain[i] = r[ntest + i];
}
free(r);
}
void compute_A(double * A, double t, int n)//tI + K
{
//Compute A = tI+K
for (int i = 0; i < n; i ++)
{
A[i*n + i] += t;
}
}
__device__ void compute_A_gpu(double * A, double t, int n)//tI + K
{
//Compute A = tI+K
for (int i = threadIdx.x; i < n; i += N)
{
A[i*n + i] += t;
}
}
void compute_k(double * k, double * x, double * y, double * rstar, int n)
{
int i;
double d;
for (i = 0; i < n; i ++)
{
d = pow(rstar[0]-x[i], 2) + pow(rstar[1]-y[i], 2);
k[i] = exp(-d);
}
}
void compute_LU_factors(double * A, int n)
{
int k, i, j;
double m;
for (k = 0; k < n - 1; k ++)
{
for (i = k + 1; i < n; i ++)
{
m = A[i*n + k] / A[k*n + k];
for (j = k + 1; j < n; j ++)
{
A[i*n + j] = A[i*n + j] - m * A[k*n + j];
}
A[i*n + k] = m;
}
}
}
void solve_triangular_systems(double * z, double * A, double * f, int * itrain, int n)
{
int i, j;
double m;
//Solve Az = f by LUz = f
//1. Solve Ly = f for y
for (i = 0; i < n; i ++)
{
m = 0;
for (j = 0; j < i; j ++)
{
m += A[i*n + j] * z[j];
}
z[i] = (f[itrain[i]] - m)/A[i*n + i];
}
//2. Solve Uz = y for z
for (i = n - 1; i >= 0; i --)
{
m = 0;
for (j = i + 1; j < n; j ++)
{
m += A[i*n + j] * z[j];
}
z[i] = (z[i]-m)/A[i*n + i];
}
}
double compute_fstar(double * k, double * z, int n)
{
int i;
double fstar = 0.0;
// Compute predicted value fstar at rstar: k'*z
for (i = 0; i < n; i ++)
{
fstar += k[i] * z[i];
}
return fstar;
}
void compute_ftest(double * ftest, double * k, double * z, int ntrain, int ntest)
{
// Compute predicted value ftest at itest array: kT*z
for (int i = 0; i < ntest; i ++)
{
ftest[i] = 0;
for (int j = 0; j < ntrain; j ++){
ftest[i] += k[i * ntrain + j] * z[j];
}
}
}
void compute_kernel(double * K, double * x, double * y, double * l, int n)
{
for (int i = 0; i < n; i ++)
{
for (int j = 0; j < n; j++)
{
double d = pow((x[i] - x[j])/l[0], 2) + pow((y[i] - y[j])/l[1],2);
K[i*n + j] = 1.0/sqrt(2.0*M_PI) * exp(-d/2);
}
}
}
__device__ void compute_kernel_gpu(double * K, double * x, double * y, double * l, int n)
{
for (int m = threadIdx.x; m < n*n; m += N )
{
int i = m / n;
int j = m % n;
if (i < j){
continue;
}
double d = pow((x[i] - x[j])/l[0], 2) + pow((y[i] - y[j])/l[1],2);
K[i*n + j] = 1.0/sqrt(2.0*M_PI) * exp(-d/2);
K[j*n + i] = K[i*n + j];
}
return;
}
void extract_K(double * K0, double * K, int * i1, int * i2, int n, int n1, int n2){
for (int i = 0; i < n1; i ++)
{
for (int j = 0; j < n2; j++)
{
K[i * n2 + j] = K0[i1[i] * n + i2[j]];
}
}
}
__device__ void extract_K_gpu(double * K0, double * K, int * i1, int * i2, int n, int n1, int n2){
for (int m = threadIdx.x; m < n1*n2; m += N )
{
int i = m / n2;
int j = m % n2;
K[i * n2 + j] = K0[i1[i] * n + i2[j]];
}
return;
}
__device__ void print_array(double * array, int n)
{
for (int i = 0; i < n; i++)
{
printf("%.4f ", array[i]);
}
printf("\n");
}
__device__ void print_matrix(double * matrix, int m, int n)
{
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
{
printf("%.4f ", matrix[i*n + j]);
}
printf("\n");
}
}
void GPR(double * ftest, double * x, double * y, double * f, int * itest, int * itrain, double t, double * l, int n, int ntest)
{
int ntrain = n - ntest;
double * K0;
double * LU;
double * kT;
double * z;
// Allocate host memory
K0 = (double *) malloc(n * n * sizeof(double));
LU = (double *) malloc(ntrain * ntrain * sizeof(double));
kT = (double *) malloc(ntest * ntrain * sizeof(double));
z = (double *) malloc(ntrain * sizeof(double));
printf("CPU\n");
compute_kernel(K0, x, y, l, n);
extract_K(K0, LU, itrain, itrain, n, ntrain, ntrain);
compute_A(LU, t, ntrain);//tI + K
compute_LU_factors(LU, ntrain);
extract_K(K0, kT, itest, itrain, n, ntest, ntrain);
solve_triangular_systems(z, LU, f, itrain, ntrain);
compute_ftest(ftest, kT, z, ntrain, ntest);
free(K0);
free(LU);
free(kT);
free(z);
}
__global__ void GPR_gpu(double * ftest, double * x, double * y, double * f, int * itest, int * itrain, double t, double * l, int n, int ntest)
{
extern __shared__ double partial_sum[];
__shared__ double * K0;
__shared__ double * A;
__shared__ double * kT;
__shared__ double * z;
int ntrain = n - ntest;
if (threadIdx.x == 0) {
K0 = (double *) malloc(n * n * sizeof(double));
A = (double *) malloc(ntrain * ntrain * sizeof(double));
kT = (double *) malloc(ntest * ntrain * sizeof(double));
}
__syncthreads();
compute_kernel_gpu(K0, x, y, l, n);
__syncthreads();
extract_K_gpu(K0, A, itrain, itrain, n, ntrain, ntrain);
__syncthreads();
extract_K_gpu(K0, kT, itest, itrain, n, ntest, ntrain);
__syncthreads();
if (threadIdx.x == 0) {
free(K0);
z = (double *) malloc(ntrain * sizeof(double));
}
__syncthreads();
compute_A_gpu(A, t, ntrain); //tI + K
n = ntrain;
__syncthreads();
// compute LU factors
for (int k = 0; k < n - 1; k ++)
{
for (int i = k + 1 + threadIdx.x; i < n; i += N)
{
A[i*n + k] = A[i*n + k] / A[k*n + k];
}
__syncthreads();
for (int m = threadIdx.x; m < (n - k - 1)*(n - k - 1); m += N )
{
int i = k + 1 + m / (n - k - 1);
int j = k + 1 + m % (n - k - 1);
A[i*n + j] -= A[i*n + k] * A[k*n + j];
}
__syncthreads();
}
__syncthreads();
//Solve Az = f by LUz = f
// 1. Solve Ly = f for y
for (int i = 0; i < n; i ++)
{
partial_sum[threadIdx.x] = 0;
for (int j = threadIdx.x; j < i; j += N)
{
partial_sum[threadIdx.x] += A[i*n + j] * z[j];
}
__syncthreads();
sum (partial_sum, (N<i)?N:i);
if (threadIdx.x == 0){
z[i] = (f[itrain[i]] - partial_sum[0])/A[i*n + i];
}
__syncthreads();
}
__syncthreads();
//2. Solve Uz = y for z
for (int i = n - 1; i >= 0; i --)
{
partial_sum[threadIdx.x] = 0;
for (int j = i + 1 + threadIdx.x; j < n; j += N)
{
partial_sum[threadIdx.x] += A[i*n + j] * z[j];
}
__syncthreads();
sum(partial_sum, (N < (n-1-i))? N:(n-1-i));
if(threadIdx.x == 0) {
z[i] = (z[i]-partial_sum[0])/A[i*n + i];
}
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 0) {
free(A);
// cudaFree(A);
}
__syncthreads();
// compute ftest
for (int i = 0; i < ntest; i ++)
{
partial_sum[threadIdx.x] = 0;
for (int j = threadIdx.x; j < ntrain; j += N){
partial_sum[threadIdx.x] += kT[i * ntrain + j] * z[j];
}
__syncthreads();
sum(partial_sum, (N < ntrain)? N : ntrain);
if(threadIdx.x == 0) {
ftest[i] = partial_sum[0];
}
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 0) {
free(kT);
free(z);
}
__syncthreads();
return;
}
double compute_MSE(double * f, int * itest, double * ftest, int ntest) // compute the mean square error
{
double squareError = 0;
for (int i = 0; i < ntest; i ++){
squareError += pow(f[itest[i]] - ftest[i], 2);
}
return squareError / ntest;
}
int main(int argc, char** argv)
{
// Host Data
double * hGx; // host grid x-coordinate array
double * hGy; // host grid y-coordinate array
double * hf;// host observed data vector f
int * hitest; // Indices of test points (randomly chosen)
int * hitrain; //Indices of training points
// Device Data
double * dx;
double * dy;
double * dl;
double * df;
double * dftest;
int * ditest;
int * ditrain;
// Grid size m, grid points n, size of test data and training data,
int m = 4, n, ntest, ntrain;
// Coordinate of hyper-parameter l(l1, l2)
double l[2], bestL[2];
// predicted value of test
double * ftest;
// Timing variables
cudaEvent_t start, stop; // GPU timing variables
float total_time;
// Other variables
// double fstar;
double Lparam[20];
double MSE[20][20];
int size;
double minMSE = DBL_MAX;
// Timing initializations
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Check input
if (argc > 1){
m = atoi(argv[1]);
}else{
printf("Please indicate grid size m\n");
return -1;
}
// Allocate host coordinate arrays
n = m * m;
size = n * sizeof(double);
hGx = (double *) malloc(size);
hGy = (double *) malloc(size);
hf = (double *) malloc(size);
ntest = (n + 9) / 10;
ntrain = n - ntest;
printf("testing data: %d, training data: %d\n", ntest, ntrain);
size = sizeof(int);
hitest = (int *) malloc(ntest * size);
hitrain = (int *) malloc(ntrain * size);
size = sizeof(double);
ftest = (double *) malloc(ntest * size);
for (int i = 0; i < 20; i++){
Lparam[i] = (i + 1) * 0.5/ m;
}
init_grid_points(hGx, hGy, m);
srand(time(0));
init_observed_data_vector(hf, hGx, hGy, m);
init_data_set_indices(hitest, hitrain, ntest, ntrain);
printf("Number of threads %d\n", N);
cudaMalloc(&dx, n * sizeof(double));
cudaMalloc(&dy, n * sizeof(double));
cudaMalloc(&dl, 2 * sizeof(double));
cudaMalloc(&dftest, ntest * sizeof(double));
cudaMalloc(&df, n * sizeof(double));
cudaMalloc(&ditest, ntest * sizeof(int));
cudaMalloc(&ditrain, ntrain * sizeof(int));
cudaMemcpy(dx, hGx, n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dy, hGy, n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(df, hf, n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(ditest, hitest, ntest * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(ditrain, hitrain, ntrain * sizeof(int), cudaMemcpyHostToDevice);
double t = 0.5;// Parameter t
// printf("20*20 values of Parameter L[2]\n");
for (int il1 = 0; il1 < 20; il1 ++){
l[0] = Lparam[il1];
for (int il2 = 0; il2 < 20; il2 ++){
l[1] = Lparam[il2];
// printf("(%d,%d)\t",il1, il2);
// GPR(ftest, hGx, hGy, hf, hitest, hitrain, t, l, n, ntest);
cudaMemcpy(dl, l, 2 * sizeof(double), cudaMemcpyHostToDevice);
if(il1 == 0 && il2 == 0){
cudaEventRecord(start, 0);
GPR_gpu<<<1, N, N * sizeof(double)>>>(dftest, dx, dy, df, ditest, ditrain, t, dl, n, ntest);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&total_time, start, stop);
printf("One round time = %f ms\n", total_time);
}else{
GPR_gpu<<<1, N, N * sizeof(double)>>>(dftest, dx, dy, df, ditest, ditrain, t, dl, n, ntest);
}
cudaMemcpy(ftest, dftest, ntest * sizeof(double), cudaMemcpyDeviceToHost);
// print_array(ftest, ntest);
MSE[il1][il2] = compute_MSE(hf, hitest, ftest, ntest);
printf("\rFinished (l1,l2) = %f, %f, mse = %e", Lparam[il1], Lparam[il2], MSE[il1][il2]);
if (MSE[il1][il2] < minMSE){
bestL[0] = l[0];
bestL[1] = l[1];
minMSE = MSE[il1][il2];
}
printf("\t progress: %d/400", il1*20 + il2+1);
}
}
printf("\nBest (l1,l2) = %f, %f, mse = %e\n", bestL[0], bestL[1], minMSE);
free(hGx);
free(hGy);
free(hf);
free(hitest);
free(hitrain);
free(ftest);
cudaFree(dx);
cudaFree(dy);
cudaFree(dl);
cudaFree(df);
cudaFree(dftest);
cudaFree(ditest);
cudaFree(ditrain);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
12,497 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void curvi (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_7_;
double _t_4_;
double _t_5_;
double _t_2_;
double _t_0_;
double _t_10_;
double _t_11_;
double _t_16_;
double _t_17_;
double _t_15_;
double _t_26_;
double _t_23_;
double _t_24_;
double _t_21_;
double _t_29_;
double _t_30_;
double _t_35_;
double _t_36_;
double _t_34_;
double _t_46_;
double _t_43_;
double _t_44_;
double _t_41_;
double _t_49_;
double _t_50_;
double _t_55_;
double _t_56_;
double _t_54_;
double _t_65_;
double _t_62_;
double _t_63_;
double _t_60_;
double _t_68_;
double _t_69_;
double _t_74_;
double _t_75_;
double _t_73_;
double r1ic0jc0kc0 = r1[i][j][k];
double _t_86_;
double _t_83_;
double _t_84_;
double _t_81_;
double _t_90_;
double _t_91_;
double _t_95_;
double _t_96_;
double _t_104_;
double _t_101_;
double _t_102_;
double _t_108_;
double _t_109_;
double _t_113_;
double _t_114_;
double _t_79_;
double _t_123_;
double _t_120_;
double _t_121_;
double _t_118_;
double _t_127_;
double _t_128_;
double _t_132_;
double _t_133_;
double _t_141_;
double _t_138_;
double _t_139_;
double _t_145_;
double _t_146_;
double _t_150_;
double _t_151_;
double _t_159_;
double _t_160_;
double _t_157_;
double _t_155_;
double _t_164_;
double _t_165_;
double _t_171_;
double _t_172_;
double _t_169_;
double _t_176_;
double _t_177_;
double _t_184_;
double _t_185_;
double _t_182_;
double _t_189_;
double _t_190_;
double _t_196_;
double _t_197_;
double _t_194_;
double _t_201_;
double _t_202_;
_t_7_ = 2.0 * mu[i][j][k+2];
_t_7_ += la[i][j][k+2];
_t_4_ = met1[i][j][k+2] * _t_7_ * met2[i][j][k+2];
_t_5_ = c2 * u1[i+2][j][k+2];
_t_5_ -= c2 * u1[i-2][j][k+2];
_t_5_ += c1 * u1[i+1][j][k+2];
_t_5_ -= c1 * u1[i-1][j][k+2];
_t_2_ = strx[i] * _t_4_ * _t_5_;
_t_0_ = c2 * _t_2_ * stry[j];
_t_10_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_11_ = c2 * u2[i+2][j][k+2];
_t_11_ -= c2 * u2[i-2][j][k+2];
_t_11_ += c1 * u2[i+1][j][k+2];
_t_11_ -= c1 * u2[i-1][j][k+2];
_t_0_ += c2 * _t_10_ * _t_11_;
_t_16_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2];
_t_17_ = c2 * u3[i+2][j][k+2];
_t_17_ -= c2 * u3[i-2][j][k+2];
_t_17_ += c1 * u3[i+1][j][k+2];
_t_17_ -= c1 * u3[i-1][j][k+2];
_t_15_ = _t_16_ * _t_17_;
_t_0_ += c2 * _t_15_ * stry[j];
_t_26_ = 2.0 * mu[i][j][k-2];
_t_26_ += la[i][j][k-2];
_t_23_ = met1[i][j][k-2] * _t_26_ * met2[i][j][k-2];
_t_24_ = c2 * u1[i+2][j][k-2];
_t_24_ -= c2 * u1[i-2][j][k-2];
_t_24_ += c1 * u1[i+1][j][k-2];
_t_24_ -= c1 * u1[i-1][j][k-2];
_t_21_ = strx[i] * _t_23_ * _t_24_;
_t_0_ += c2 * _t_21_ * stry[j];
_t_29_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
_t_30_ = c2 * u2[i+2][j][k-2];
_t_30_ -= c2 * u2[i-2][j][k-2];
_t_30_ += c1 * u2[i+1][j][k-2];
_t_30_ -= c1 * u2[i-1][j][k-2];
_t_0_ += c2 * _t_29_ * _t_30_;
_t_35_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2];
_t_36_ = c2 * u3[i+2][j][k-2];
_t_36_ -= c2 * u3[i-2][j][k-2];
_t_36_ += c1 * u3[i+1][j][k-2];
_t_36_ -= c1 * u3[i-1][j][k-2];
_t_34_ = _t_35_ * _t_36_;
_t_0_ += c2 * _t_34_ * stry[j];
_t_46_ = 2.0 * mu[i][j][k+1];
_t_46_ += la[i][j][k+1];
_t_43_ = met1[i][j][k+1] * _t_46_ * met2[i][j][k+1];
_t_44_ = c2 * u1[i+2][j][k+1];
_t_44_ -= c2 * u1[i-2][j][k+1];
_t_44_ += c1 * u1[i+1][j][k+1];
_t_44_ -= c1 * u1[i-1][j][k+1];
_t_41_ = strx[i+2] * _t_43_ * _t_44_;
_t_0_ += c1 * _t_41_ * stry[j];
_t_49_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_50_ = c2 * u2[i+2][j][k+1];
_t_50_ -= c2 * u2[i-2][j][k+1];
_t_50_ += c1 * u2[i+1][j][k+1];
_t_50_ -= c1 * u2[i-1][j][k+1];
_t_0_ += c1 * _t_49_ * _t_50_;
_t_55_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1];
_t_56_ = c2 * u3[i+2][j][k+1];
_t_56_ -= c2 * u3[i-2][j][k+1];
_t_56_ += c1 * u3[i+1][j][k+1];
_t_56_ -= c1 * u3[i-1][j][k+1];
_t_54_ = _t_55_ * _t_56_;
_t_0_ += c1 * _t_54_ * stry[j];
_t_65_ = 2.0 * mu[i][j][k-1];
_t_65_ += la[i][j][k-1];
_t_62_ = met1[i][j][k-1] * _t_65_ * met2[i][j][k-1];
_t_63_ = c2 * u1[i+2][j][k-1];
_t_63_ -= c2 * u1[i-2][j][k-1];
_t_63_ += c1 * u1[i+1][j][k-1];
_t_63_ -= c1 * u1[i-1][j][k-1];
_t_60_ = strx[i-2] * _t_62_ * _t_63_;
_t_0_ += c1 * _t_60_ * stry[j];
_t_68_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
_t_69_ = c2 * u2[i+2][j][k-1];
_t_69_ -= c2 * u2[i-2][j][k-1];
_t_69_ += c1 * u2[i+1][j][k-1];
_t_69_ -= c1 * u2[i-1][j][k-1];
_t_0_ += c1 * _t_68_ * _t_69_;
_t_74_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1];
_t_75_ = c2 * u3[i+2][j][k-1];
_t_75_ -= c2 * u3[i-2][j][k-1];
_t_75_ += c1 * u3[i+1][j][k-1];
_t_75_ -= c1 * u3[i-1][j][k-1];
_t_73_ = _t_74_ * _t_75_;
_t_0_ += c1 * _t_73_ * stry[j];
r1ic0jc0kc0 += _t_0_;
_t_86_ = 2.0 * mu[i+2][j][k];
_t_86_ += la[i+2][j][k];
_t_83_ = met1[i+2][j][k] * _t_86_ * met2[i+2][j][k];
_t_84_ = c2 * u1[i+2][j][k+2];
_t_84_ -= c2 * u1[i+2][j][k-2];
_t_84_ += c1 * u1[i+2][j][k+1];
_t_84_ -= c1 * u1[i+2][j][k-1];
_t_81_ = strx[i] * _t_83_ * _t_84_;
_t_90_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k];
_t_91_ = c2 * u2[i+2][j][k+2];
_t_91_ -= c2 * u2[i+2][j][k-2];
_t_91_ += c1 * u2[i+2][j][k+1];
_t_91_ -= c1 * u2[i+2][j][k-1];
_t_81_ += stry[j] * _t_90_ * _t_91_;
_t_95_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k];
_t_96_ = c2 * u3[i+2][j][k+2];
_t_96_ -= c2 * u3[i+2][j][k-2];
_t_96_ += c1 * u3[i+2][j][k+1];
_t_96_ -= c1 * u3[i+2][j][k-1];
_t_81_ += _t_95_ * _t_96_;
_t_104_ = 2.0 * mu[i-2][j][k];
_t_104_ += la[i-2][j][k];
_t_101_ = met1[i-2][j][k] * _t_104_ * met2[i-2][j][k];
_t_102_ = c2 * u1[i-2][j][k+2];
_t_102_ -= c2 * u1[i-2][j][k-2];
_t_102_ += c1 * u1[i-2][j][k+1];
_t_102_ -= c1 * u1[i-2][j][k-1];
_t_81_ += strx[i] * _t_101_ * _t_102_;
_t_108_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k];
_t_109_ = c2 * u2[i-2][j][k+2];
_t_109_ -= c2 * u2[i-2][j][k-2];
_t_109_ += c1 * u2[i-2][j][k+1];
_t_109_ -= c1 * u2[i-2][j][k-1];
_t_81_ += stry[j] * _t_108_ * _t_109_;
_t_113_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k];
_t_114_ = c2 * u3[i-2][j][k+2];
_t_114_ -= c2 * u3[i-2][j][k-2];
_t_114_ += c1 * u3[i-2][j][k+1];
_t_114_ -= c1 * u3[i-2][j][k-1];
_t_81_ += _t_113_ * _t_114_;
_t_79_ = stry[j] * c2 * _t_81_;
_t_123_ = 2.0 * mu[i+1][j][k];
_t_123_ += la[i+1][j][k];
_t_120_ = met1[i+1][j][k] * _t_123_ * met2[i+1][j][k];
_t_121_ = c2 * u1[i+1][j][k+2];
_t_121_ -= c2 * u1[i+1][j][k-2];
_t_121_ += c1 * u1[i+1][j][k+1];
_t_121_ -= c1 * u1[i+1][j][k-1];
_t_118_ = strx[i] * _t_120_ * _t_121_;
_t_127_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k];
_t_128_ = c2 * u2[i+1][j][k+2];
_t_128_ -= c2 * u2[i+1][j][k-2];
_t_128_ += c1 * u2[i+1][j][k+1];
_t_128_ -= c1 * u2[i+1][j][k-1];
_t_118_ += stry[j] * _t_127_ * _t_128_;
_t_132_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k];
_t_133_ = c2 * u3[i+1][j][k+2];
_t_133_ -= c2 * u3[i+1][j][k-2];
_t_133_ += c1 * u3[i+1][j][k+1];
_t_133_ -= c1 * u3[i+1][j][k-1];
_t_118_ += _t_132_ * _t_133_;
_t_141_ = 2.0 * mu[i-1][j][k];
_t_141_ += la[i-1][j][k];
_t_138_ = met1[i-1][j][k] * _t_141_ * met2[i-1][j][k];
_t_139_ = c2 * u1[i-1][j][k+2];
_t_139_ -= c2 * u1[i-1][j][k-2];
_t_139_ += c1 * u1[i-1][j][k+1];
_t_139_ -= c1 * u1[i-1][j][k-1];
_t_118_ += strx[i] * _t_138_ * _t_139_;
_t_145_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k];
_t_146_ = c2 * u2[i-1][j][k+2];
_t_146_ -= c2 * u2[i-1][j][k-2];
_t_146_ += c1 * u2[i-1][j][k+1];
_t_146_ -= c1 * u2[i-1][j][k-1];
_t_118_ += stry[j] * _t_145_ * _t_146_;
_t_150_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k];
_t_151_ = c2 * u3[i-1][j][k+2];
_t_151_ -= c2 * u3[i-1][j][k-2];
_t_151_ += c1 * u3[i-1][j][k+1];
_t_151_ -= c1 * u3[i-1][j][k-1];
_t_118_ += _t_150_ * _t_151_;
_t_79_ += stry[j] * c1 * _t_118_;
r1ic0jc0kc0 += _t_79_;
_t_159_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_160_ = c2 * u1[i][j+2][k+2];
_t_160_ -= c2 * u1[i][j-2][k+2];
_t_160_ += c1 * u1[i][j+1][k+2];
_t_160_ -= c1 * u1[i][j-1][k+2];
_t_157_ = stry[j+2] * _t_159_ * _t_160_;
_t_155_ = c2 * _t_157_ * strx[i];
_t_164_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2];
_t_165_ = c2 * u2[i][j+2][k+2];
_t_165_ -= c2 * u2[i][j-2][k+2];
_t_165_ += c1 * u2[i][j+1][k+2];
_t_165_ -= c1 * u2[i][j-1][k+2];
_t_155_ += c2 * _t_164_ * _t_165_;
_t_171_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
_t_172_ = c2 * u1[i][j+2][k-2];
_t_172_ -= c2 * u1[i][j-2][k-2];
_t_172_ += c1 * u1[i][j+1][k-2];
_t_172_ -= c1 * u1[i][j-1][k-2];
_t_169_ = stry[j] * _t_171_ * _t_172_;
_t_155_ += c2 * _t_169_ * strx[i];
_t_176_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2];
_t_177_ = c2 * u2[i][j+2][k-2];
_t_177_ -= c2 * u2[i][j-2][k-2];
_t_177_ += c1 * u2[i][j+1][k-2];
_t_177_ -= c1 * u2[i][j-1][k-2];
_t_155_ += c2 * _t_176_ * _t_177_;
_t_184_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_185_ = c2 * u1[i][j+2][k+1];
_t_185_ -= c2 * u1[i][j-2][k+1];
_t_185_ += c1 * u1[i][j+1][k+1];
_t_185_ -= c1 * u1[i][j-1][k+1];
_t_182_ = stry[j-2] * _t_184_ * _t_185_;
_t_155_ += c1 * _t_182_ * strx[i];
_t_189_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1];
_t_190_ = c2 * u2[i][j+2][k+1];
_t_190_ -= c2 * u2[i][j-2][k+1];
_t_190_ += c1 * u2[i][j+1][k+1];
_t_190_ -= c1 * u2[i][j-1][k+1];
_t_155_ += c1 * _t_189_ * _t_190_;
_t_196_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
_t_197_ = c2 * u1[i][j+2][k-1];
_t_197_ -= c2 * u1[i][j-2][k-1];
_t_197_ += c1 * u1[i][j+1][k-1];
_t_197_ -= c1 * u1[i][j-1][k-1];
_t_194_ = stry[j] * _t_196_ * _t_197_;
_t_155_ += c1 * _t_194_ * strx[i];
_t_201_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1];
_t_202_ = c2 * u2[i][j+2][k-1];
_t_202_ -= c2 * u2[i][j-2][k-1];
_t_202_ += c1 * u2[i][j+1][k-1];
_t_202_ -= c1 * u2[i][j-1][k-1];
_t_155_ += c1 * _t_201_ * _t_202_;
r1ic0jc0kc0 += _t_155_;
r1[i][j][k] = r1ic0jc0kc0;
r1[i][j][k] += c2*(
mu[i][j+2][k]*met3[i][j+2][k]*met1[i][j+2][k]*(
c2*(u1[i][j+2][k+2]-u1[i][j+2][k-2]) +
c1*(u1[i][j+2][k+1]-u1[i][j+2][k-1]) )*stry[j+1]*strx[i]
+ mu[i][j+2][k]*met2[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i][j+2][k+2]-u2[i][j+2][k-2]) +
c1*(u2[i][j+2][k+1]-u2[i][j+2][k-1]) )
+ ( mu[i][j-2][k]*met3[i][j-2][k]*met1[i][j-2][k]*(
c2*(u1[i][j-2][k+2]-u1[i][j-2][k-2]) +
c1*(u1[i][j-2][k+1]-u1[i][j-2][k-1]) )*stry[j]*strx[i]
+ mu[i][j-2][k]*met2[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i][j-2][k+2]-u2[i][j-2][k-2]) +
c1*(u2[i][j-2][k+1]-u2[i][j-2][k-1]) ) )
) + c1*(
mu[i][j+1][k]*met3[i][j+1][k]*met1[i][j+1][k]*(
c2*(u1[i][j+1][k+2]-u1[i][j+1][k-2]) +
c1*(u1[i][j+1][k+1]-u1[i][j+1][k-1]) )*stry[j-1]*strx[i]
+ mu[i][j+1][k]*met2[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i][j+1][k+2]-u2[i][j+1][k-2]) +
c1*(u2[i][j+1][k+1]-u2[i][j+1][k-1]) )
+ ( mu[i][j-1][k]*met3[i][j-1][k]*met1[i][j-1][k]*(
c2*(u1[i][j-1][k+2]-u1[i][j-1][k-2]) +
c1*(u1[i][j-1][k+1]-u1[i][j-1][k-1]) )*stry[j]*strx[i]
+ mu[i][j-1][k]*met2[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i][j-1][k+2]-u2[i][j-1][k-2]) +
c1*(u2[i][j-1][k+1]-u2[i][j-1][k-1]) ) ) );
r1[i][j][k] +=
c2*( mu[i][j+2][k]*met1[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i+2][j+2][k]-u2[i-2][j+2][k]) +
c1*(u2[i+1][j+2][k]-u2[i-1][j+2][k]) )
+ mu[i][j-2][k]*met1[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i+2][j-2][k]-u2[i-2][j-2][k])+
c1*(u2[i+1][j-2][k]-u2[i-1][j-2][k]) )
) +
c1*( mu[i][j+1][k]*met1[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i+2][j+1][k]-u2[i-2][j+1][k]) +
c1*(u2[i+1][j+1][k]-u2[i-1][j+1][k]) )
+ mu[i][j-1][k]*met1[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i+2][j-1][k]-u2[i-2][j-1][k]) +
c1*(u2[i+1][j-1][k]-u2[i-1][j-1][k])))
+
c2*( la[i+2][j][k]*met1[i+2][j][k]*met1[i+2][j][k]*(
c2*(u2[i+2][j+2][k]-u2[i+2][j-2][k]) +
c1*(u2[i+2][j+1][k]-u2[i+2][j-1][k]) )
+ la[i-2][j][k]*met1[i-2][j][k]*met1[i-2][j][k]*(
c2*(u2[i-2][j+2][k]-u2[i-2][j-2][k])+
c1*(u2[i-2][j+1][k]-u2[i-2][j-1][k]) )
) +
c1*( la[i+1][j][k]*met1[i+1][j][k]*met1[i+1][j][k]*(
c2*(u2[i+1][j+2][k]-u2[i+1][j-2][k]) +
c1*(u2[i+1][j+1][k]-u2[i+1][j-1][k]) )
+ la[i-1][j][k]*met1[i-1][j][k]*met1[i-1][j][k]*(
c2*(u2[i-1][j+2][k]-u2[i-1][j-2][k]) +
c1*(u2[i-1][j+1][k]-u2[i-1][j-1][k])));
}
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
curvi <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
12,498 | #include <stdio.h>
__global__
void testKernel(int val){
int blockIndex = blockIdx.y*gridDim.x+blockIdx.x;
int threadIndex = threadIdx.z*blockDim.x*blockDim.y+threadIdx.y*blockDim.x+threadIdx.x;
double l2n_d = 1.9;
printf("[%d, %d]:\t\tValue is:%g\n",\
blockIndex,\
threadIndex,\
l2n_d);
}
int main(){
dim3 dimGrid(2, 2);
dim3 dimBlock(2, 2, 2);
testKernel<<<dimGrid, dimBlock>>>(10);
cudaDeviceSynchronize();
return EXIT_SUCCESS;
}
|
12,499 | #include <stdio.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <stdlib.h>
#include "device_launch_parameters.h"
#include <thrust/scan.h>
const int BASE1 = 10000 + 7;
const int BASE2 = 100000 + 3;
const int MOD1 = 1000000 + 3;
const int MOD2 = 1000000 + 37;
__global__ void findhash(int *d_qvert,char *d_qverlabel,int *d_qverc,int *d_qvid,int *d_qelist,bool *d_over,bool *d_qtree,int *d_hash1,int *d_hash2)
{
int i;
int ver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(ver>=d_qverc[0])
return ;
if(d_qvid[ver]!=0)
return;
int l=d_qvert[ver+1];
int hash1=d_qverlabel[ver],hash2=1;
int flag=0;
for(i=d_qvert[ver];i<l;i++)
{
int m=d_qelist[i];
bool treeedge=d_qtree[i];
if(treeedge){
int tt=d_qvid[m];
if(tt==0)
return;
flag=1;
hash1=(hash1*1L*BASE1)*tt % MOD1;
// hash2=(hash2*1L*BASE2)*tt % MOD2;
}
}
//if(flag==0)
// return;
//if(flag==1)
{
*d_over=false;
d_hash1[hash1]=1;
//d_hash2[hash2]=1;
}
}
__global__ void setdeg1(int *d_qvert,int *d_qverc,int *d_qvid,bool *d_qtree)
{
int i;
int ver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(ver>=d_qverc[0])
return ;
if(d_qvid[ver]!=0)
return;
int l=d_qvert[ver+1];
bool treeedge;
for(i=d_qvert[ver];i<l;i++)
{
treeedge=d_qtree[i];
if(treeedge)
return;
}
//printf("%d %d\n",ver,i);
d_qvid[ver]=1;
}
/*__global__ void alignhash(bool *d_hash1,bool *d_hash2)
{
int ver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(ver>=1000038)
return ;
if(d_hash1[ver] || d_hash2[ver]){
d_hash1=true;
}
}*/
__global__ void puttoid(int *d_qvert,char *d_qverlabel,int *d_qverc,int *d_qvid,int *d_qelist,bool *d_qtree,int *d_loc,int * d_qidtov,int *qparent)
{
int i;
int ver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(ver>=d_qverc[0])
return ;
if(d_qvid[ver]!=0)
return;
int l=d_qvert[ver+1];
int hash1=d_qverlabel[ver],hash2=1;
int flag=0;
for(i=d_qvert[ver];i<l;i++)
{
int m=d_qelist[i];
bool treeedge=d_qtree[i];
if(treeedge){
int tt=d_qvid[m];
if(tt==0)
return;
flag=1;
hash1=(hash1*1L*BASE1)*tt % MOD1;
// hash2=(hash2*1L*BASE2)*tt % MOD2;
}
}
for(i=d_qvert[ver];i<l;i++){
int m=d_qelist[i];
bool treeedge=d_qtree[i];
if(treeedge){
qparent[m]=ver;
}
}
//printf("%d %d %d \n",ver,flag,d_loc[hash1]);
// if(flag==0)
// return;
int id=d_loc[hash1];
d_qvid[ver]=id;
d_qidtov[id]=ver;
}
__device__ bool chechall(int ver,bool *check,int i,int dfrom,int dto,int *d_delist,int *d_qelist,int *d_qvid,int qfrom,int qto,int ** d_dcvslist){
//int ql=qfrom-qto;
int ql=qto-qfrom;
int j,k,l;
//d_dcvslist[2][ql]=true;
if(i==ql){
k=d_qelist[i+qfrom-1];
k=d_qvid[k];
if(k>=d_qvid[ver])
return true;
for(j=dfrom;j<dto;j++){
l=d_delist[j];
if(check[j])
continue;
//if(ver==0)
// printf("a%da",l);
if(!d_dcvslist[k][l])
continue;
return true;
}
}
else{
int res=false;
k=d_qelist[i+qfrom-1];
k=d_qvid[k];
if(k>=d_qvid[ver])
return chechall(ver,check,i+1,dfrom,dto,d_delist,d_qelist,d_qvid,qfrom,qto,d_dcvslist);
for(j=dfrom;j<dto;j++){
l=d_delist[j];
if(check[j])
continue;
if(!d_dcvslist[k][l])
continue;
check[j]=true;
res|=chechall(ver,check,i+1,dfrom,dto,d_delist,d_qelist,d_qvid,qfrom,qto,d_dcvslist);
if(res==true)
return true;
check[j]=false;
}
}
return false;
}
__global__ void findcvs(bool *temp,int ver,int *d_dvert,char * d_dverlabel,int *d_dverc,int *d_delist,int *d_qvert,char *d_qverlabel,int *d_qelist,int *d_qvid,int ** d_dcvslist )
{
//int i;
int dver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(dver>=d_dverc[0])
return ;
if(d_dverlabel[dver]!=d_qverlabel[ver])
return;
int ql=d_qvert[ver+1]-d_qvert[ver];
int dl=d_dvert[dver+1]-d_dvert[dver];
if(ql>dl)
return;
// if(dver!=1 && ver==0)
// return;
// printf("%d\n",dver);
//bool *checked=(bool*)malloc(sizeof(bool)*d_dverc[0]);
//bool *checked=new bool[d_dverc[0]];
//memset(checked,false,sizeof(bool)*d_dverc[0]);
//chechall(bool *check,int i,int dfrom,int dto,int *d_delist,int *d_qelist,int *d_qvid,int qfrom,int qto,bool ** d_dcvslist)
if(chechall(ver,temp,1,d_dvert[dver],d_dvert[dver+1],d_delist,d_qelist,d_qvid,d_qvert[ver],d_qvert[ver+1],d_dcvslist))
d_dcvslist[d_qvid[ver]][dver]=true;
//free(checked);
}
__global__ void puttolist(int *d_dverc,int *d_loc,int * d_dcvslist )
{
int dver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(dver>=d_dverc[0])
return ;
if(d_loc[dver]!=d_loc[dver+1])
d_dcvslist[d_loc[dver]]=dver;
}
__global__ void checkperm(int *found,int * qdmap,int * d_qverc,int * d_qelist,int * d_qvert,int * d_dvert,int *d_delist,bool *d_qtree){
int i;
//found[0]=false;
int ver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(ver>=d_qverc[0])
return ;
int n,p,j,k,flag=0;
//for(ver=0;ver<d_qverc[0];ver++){
int l=d_qvert[ver+1];
int dver=qdmap[ver];
n=d_dvert[dver+1];
for(i=d_qvert[ver];i<l;i++)
{
flag=0;
j=d_qelist[i];
//if(!d_qtree[i])
// continue;
p=d_dvert[dver];
k=qdmap[j];
for(;p<n;p++){
if(k==d_delist[p]){
flag=1;
break;
}
}
if(!flag){
//*found=false;
if(d_qtree[i]){
found[0]=found[1]=-1;
return;
}
else
found[1]=0,found[0]++;
}
}
//}
}
int * qdmap;
int *d_qverc,*d_dverc;
int *d_qvid,*d_qidtov,*h_qidtov,*h_qvid;
int *d_qvert,*d_qelist,*d_dvert,*d_delist;//,*d_dvelist,*d_qvelist;
bool *d_qtree,*d_over;
int *d_qdmap;
bool h_over;
bool *h_qtree;
int *d_size_cvs,*h_size_cvs,ansi=0,treeansi=0;
long long int * h_anslist,*d_anslist;
long long int * h_treeanslist,*d_treeanslist;
int *h_treeremlist,*d_treeremlist;
/*__global__ void processoperation(int type,int a,int b,int nans,long long int *anslist,int *qverc,int *dverc,int *qvert,int *qelist,int *dvert,int *delist,int **cvsverlist,int *size_cvs,int *qvid){
int ansi=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(ansi>=nans)
return ;
long long int indexperm=anslist[ansi];
if(indexperm==-1)
return;
//int *d_qdmap=new int[d_qverc[0]];//&d_mapans[d_qverc[0]*threadId];//new int[d_qverc[0]];
int mapvera,mapverb=-1;
int *aedges=NULL,till,i;
//printf("%d\n ",indexperm);
//anslist[ansi]=-1;
for(i=0;i<qverc[0];i++){
int j=qvid[i];
// printf("j%d %d %d %dj ",i,j,size_cvs[j],mapvera);
if(type==0 && i==a)
mapvera=cvsverlist[j][indexperm%size_cvs[j]],aedges=&delist[dvert[mapvera]],till=dvert[mapvera+1];
else if(type==0 && i==b)
mapverb=cvsverlist[j][indexperm%size_cvs[j]];
else if(type==1 && cvsverlist[j][indexperm%size_cvs[j]]==a)
mapvera=i,aedges=&qelist[qvert[mapvera]],till=qvert[mapvera+1];
else if(type==1 && cvsverlist[j][indexperm%size_cvs[j]]==b)
mapverb=i;
indexperm/=size_cvs[j];
}
bool flag=false;
// anslist[ansi]=-1;
if(aedges==NULL || mapverb==-1 || indexperm>0)
return;
// printf("j%d %dj",aedges[0],mapverb);
for(i=0;i<till;i++){
if(aedges[i]==mapverb){
if(type==1){
anslist[ansi]=-1;
break;
}
flag=true;
break;
}
}
if(!flag && type==0)
anslist[ansi]=-1;
}
__global__ void processqdnontree(int type,int a,int b,int ntans,long long int *tanslist,int *tremlist,int *qverc,int *dverc,int *qvert,int *qelist,int *dvert,int *delist,int **cvsverlist,int *size_cvs,int *qvid){
int ansi=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(ansi>=ntans)
return ;
long long int indexperm=tanslist[ansi];
if(tremlist[ansi]==0)
return;
int mapvera,mapverb=-1;
int *aedges=NULL,till,i;
for(i=0;i<qverc[0];i++){
int j=qvid[i];
if(i==a)
mapvera=cvsverlist[j][indexperm%size_cvs[j]],aedges=&delist[dvert[mapvera]],till=dvert[mapvera+1];
else if(i==b)
mapverb=cvsverlist[j][indexperm%size_cvs[j]];
indexperm/=size_cvs[j];
}
bool flag=false;
// anslist[ansi]=-1;
if(aedges==NULL || mapverb==-1 || indexperm>0)
return;
// printf("j%d %dj",aedges[0],mapverb);
for(i=0;i<till;i++){
if(aedges[i]==mapverb){
flag=true;
break;
}
}
if(!flag)
atomicDec((unsigned int *)&tremlist[ansi],tremlist[ansi]);
}
__device__ void process(int id,int type,int a,int b,int *qverc,int *dverc,int *qvert,int *qelist,int *dvert,int *delist,char *qverlabel,char *dverlabel,int **cvslist,int *qvid,int ** qaddnodes,int *locks,int *tempcheck,int *parent){
int i,j;
int ver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(ver>=qverc[0])
return ;
//__syncthreads();
//v=id;
//for(v!=-1;v=parent[v]){
if(atomicCAS(locks[ver],0,0xFFFFFFFF)!=0)
return;
for(i=qverv[v];i<qver[v+1];i++){
if(qelist[i]!=-1 )
while(atomicCAS(locks[qelist[i]],ver,0xFFFFFFFF)!=ver || atomicCAS(locks[qelist[i]],0,0xFFFFFFFF)!=0 );
}
dim3 dblocks((sqrtf(dverc[0])/16 )+ 1,(sqrtf(dverc[0])/16)+1);
dim3 dthreads(16,16);
findaddcvslist<<<dblocks,dthreads>>>(tempcheck,v,dvert,dverlabel,dverc,delist,qvert,qverlabel,qelist,qvid,cvslist,qaddnodes);
atomicExch(locks[v],0);
for(i=qver[v];i<qver[v+1];i++){
if(qelist[i]!=-1 )
atomicExch(locks[qelist[i]],0);
}
//}
}
__global__ void findaddcvslist(bool *temp,int ver,int *d_dvert,char * d_dverlabel,int *d_dverc,int *d_delist,int *d_qvert,char *d_qverlabel,int *d_qelist,int *d_qvid,int ** d_dcvslist,int **d_addcvslist )
{
//int i;
int dver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(dver>=d_dverc[0])
return ;
if(d_dcvslist[dver])
return;
if(d_dverlabel[dver]!=d_qverlabel[ver])
return;
int ql=d_qvert[ver+1]-d_qvert[ver];
int dl=d_dvert[dver+1]-d_dvert[dver];
if(ql>dl)
return;
//bool *checked=(bool*)malloc(sizeof(bool)*d_dverc[0]);
//bool *checked=new bool[d_dverc[0]];
//memset(checked,false,sizeof(bool)*d_dverc[0]);
//chechall(bool *check,int i,int dfrom,int dto,int *d_delist,int *d_qelist,int *d_qvid,int qfrom,int qto,bool ** d_dcvslist)
if(chechall(ver,temp,1,d_dvert[dver],d_dvert[dver+1],d_delist,d_qelist,d_qvid,d_qvert[ver],d_qvert[ver+1],d_dcvslist))
d_addcvslist[ver][dver]=true;
//free(checked);
}
__global__ void findaddcvs(bool *temp,int ver,int *d_dvert,char * d_dverlabel,int *d_dverc,int *d_delist,int *d_qvert,char *d_qverlabel,int *d_qelist,int *d_qvid,int ** d_dcvslist,int **d_addcvslist )
{
//int i;
int dver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(dver>=d_dverc[0])
return ;
if(d_dcvslist[dver])
return;
if(d_dverlabel[dver]!=d_qverlabel[ver])
return;
int ql=d_qvert[ver+1]-d_qvert[ver];
int dl=d_dvert[dver+1]-d_dvert[dver];
if(ql>dl)
return;
//bool *checked=(bool*)malloc(sizeof(bool)*d_dverc[0]);
//bool *checked=new bool[d_dverc[0]];
//memset(checked,false,sizeof(bool)*d_dverc[0]);
//chechall(bool *check,int i,int dfrom,int dto,int *d_delist,int *d_qelist,int *d_qvid,int qfrom,int qto,bool ** d_dcvslist)
if(chechall(ver,temp,1,d_dvert[dver],d_dvert[dver+1],d_delist,d_qelist,d_qvid,d_qvert[ver],d_qvert[ver+1],d_dcvslist))
d_addcvslist[qvid[ver]][dver]=true;
//free(checked);
}*//*
__global__ void doquery(int *nquery,int * type,int * vera,int * verb,int *ntans,long long int * tanslist,int *tremlist,int *nans,long long int * anslist,int ** cvsmatrix,int **cvslist,int *qverc,int *dverc,int *qvert,int *qelist,int *dvert,int *delist,int *size_cvs,int *qvid,bool *qtree,int ** cvsaddlist,int **qaddnodes,int *locks,int *tempcheck,int *parent){
int i;
int qi=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(qi>=nquery[0])
return ;
//nans[0]=1;
dim3 blocks((sqrtf(nans[0])/16 )+ 1,(sqrtf(nans[0])/16)+1);
dim3 threads(16,16);
if(type[qi]==0){
int a=vera[qi];
int b=verb[qi];
for(i=dvert[a];i<dvert[a+1];i++){
if(delist[i]==b)
delist[i]=-1;
}
for(i=dvert[b];i<dvert[b+1];i++){
if(delist[i]==a)
delist[i]=-1;
}
processoperation<<<blocks,threads>>>(type[qi],vera[qi],verb[qi],nans[0],anslist,qverc,dverc,qvert,qelist,dvert,delist,cvslist,size_cvs,qvid);
}
else if(type[qi]==1){
processoperation<<<blocks,threads>>>(type[qi],vera[qi],verb[qi],nans[0],anslist,qverc,dverc,qvert,qelist,dvert,delist,cvslist,size_cvs,qvid);
}
else if(type[qi]==2){
dim3 ntblocks((sqrtf(ntans[0])/16 )+ 1,(sqrtf(ntans[0])/16)+1);
int a=vera[qi];
int b=verb[qi];
for(i=qvert[a];i<qvert[a+1];i++){
if(qelist[i]==b)
qelist[i]=-1;
}
for(i=qvert[b];i<qvert[b+1];i++){
if(qelist[i]==a)
qelist[i]=-1;
}
int l=qvert[vera[qi]+1],i;
bool flag=false,istree=false;
for(i=qvert[vera[qi]];i<l;i++){
if(qelist[i]==verb[qi]){
flag=true;
if(qtree[i])
istree=true;
break;
}
}
if(!flag)
return;
if(!istree){
processqdnontree<<<ntblocks,threads>>>(type[qi],vera[qi],verb[qi],ntans[0],tanslist,tremlist,qverc,dverc,qvert,qelist,dvert,delist,cvslist,size_cvs,qvid);
}
else{
int v=a;
while(v!=-1){
locks[v]=v;
v=parent[v];
}
//locks[ver]=ver;
//processqdtree(qi+1,type[qi],vera[qi],verb[qi],qverc,dverc,qvert,qelist,dvert,delist,cvsmatrix,qvid,qaddnodes,locks,tempcheck,parent);
}
}
else{
}
}*/
//parms[0]=max thread size
//parms[1]=from qvertex
//parms[2]=to qvertex
//parms[3=till now size
__global__ void cperm(long long int *parms,int *d_found,int * qdmap,long long int *tillnow,int * d_qverc,int * d_qelist,int * d_qvert,int * d_dvert,int *d_delist,bool *d_qtree,int *d_size_cvs,int **d_cvsverlist,int *d_qvid){
int i;
int threadId=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
long long int indexperm=threadId+parms[0];
if(parms[3]!=0)
indexperm/=parms[3];
for(i=parms[1]+1;i<=parms[2];i++){
int j=d_qvid[i];
indexperm/=d_size_cvs[j];
}
if(indexperm)
return;
if(threadId>=parms[4])
return;
indexperm=threadId+parms[0];
// if(indexperm!=3409 && parms[3]!=0)
// return;
d_found[threadId+1]=1;
// int *found=&d_found[2*threadId];
int *d_qdmap=&qdmap[d_qverc[0]*threadId];//new int[d_qverc[0]];
if(parms[3]){
indexperm=tillnow[indexperm%parms[3]];
for(i=parms[1];i>=0;i--){
int j=d_qvid[i];
d_qdmap[i]=d_cvsverlist[j][indexperm%d_size_cvs[j]];
//if(parms[3]!=0)
// printf("%d ",d_qdmap[i]);
indexperm/=d_size_cvs[j];
}
}
indexperm=threadId+parms[0];
if(parms[3]!=0)
indexperm/=parms[3];
for(i=parms[1]+1;i<=parms[2];i++){
int j=d_qvid[i];
d_qdmap[i]=d_cvsverlist[j][indexperm%d_size_cvs[j]];
// if(parms[3]!=0)
// printf("%d ",d_qdmap[i]);
indexperm/=d_size_cvs[j];
}
for(i=0;i<=parms[2];i++){
int j;
for(j=i+1;j<=parms[2];j++){
if(d_qdmap[i]==d_qdmap[j]){
d_found[threadId+1]=0;
return;
}
}
}
int n,p,j,k,flag=0,ver;
for(ver=0;ver<=parms[2];ver++){
int l=d_qvert[ver+1];
int dver=d_qdmap[ver];
n=d_dvert[dver+1];
for(i=d_qvert[ver];i<l;i++)
{
flag=0;
j=d_qelist[i];
if(j>parms[2])
continue;
k=d_qdmap[j];
//if(!d_qtree[i])
// continue;
p=d_dvert[dver];
for(;p<n;p++){
if(k==d_delist[p]){
flag=1;
break;
}
}
if(!flag){
// if(parms[3]!=0)
// printf("iNOK ");
d_found[threadId+1]=0;
return;
/*if(d_qtree[i]){
found[0]=found[1]=-1;
return;
}
else
found[1]=0,found[0]++;*/
}
}
}
}
__global__ void puttoanswer(long long int *parms,int *d_qvid,int *d_size_cvs,int *found,long long int *till,long long int *next){
int i;
int threadId=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
long long int indexperm=threadId+parms[0];
if(parms[3]!=0)
indexperm/=parms[3];
for(i=parms[1]+1;i<=parms[2];i++){
int j=d_qvid[i];
indexperm/=d_size_cvs[j];
}
if(indexperm)
return;
if(threadId>=parms[4])
return;
if(found[threadId+1]==found[threadId+2])
return;
// if(parms[3]!=0)
// printf("Thread%d",threadId);
long long int Id=threadId+parms[0];
indexperm=0;
if(parms[3]!=0){
indexperm=till[Id%parms[3]];
Id/=parms[3];
}
for(i=parms[1]+1;i<=parms[2];i++){
int j=d_qvid[i];
indexperm*=d_size_cvs[j];
indexperm+=Id%d_size_cvs[j];
Id/=d_size_cvs[j];
}
next[found[threadId+1]]=indexperm;
//printf("a%llda",next[0]);
}
#define maxthreadsize 10000000
int *h_qvert,*h_qelist,*h_dvert,*h_delist;//,*h_dvelist,*h_qvelist;
char *h_qverlabel,*d_qverlabel,*h_dverlabel,*d_dverlabel;
int **h_cvslist,**d_cvslist,**h_tem;
int **d_cvsverlist,**d_temverlist;
long long int *h_parms,*d_parms;
long long int *d_tillnow,*d_next;
int *d_found,*h_found;
void callforallperm(int i,int till,int tillnowsize,int qver,long long int mapid){
int j,k,l;
l=h_qvid[i-1];
//printf("mm%lld\n",mapid);
//printf("i%d %di",i,l);
if(i==qver+1){
long long int ansc=0,fix=maxthreadsize/qver/10;
dim3 blocks((sqrt(fix)/16 )+ 1,(sqrt(fix)/16)+1);
dim3 threads(16,16);
for(int ii=0;(ii)*fix<mapid;ii++){
h_parms[0]=ii*fix;
h_parms[1]=till;
h_parms[2]=i-2;
h_parms[3]=tillnowsize;
h_parms[4]=fix;
cudaMemset(d_found,0,sizeof(int)*(fix+2));
cudaMemcpy(d_parms, h_parms, sizeof(long long int)*5, cudaMemcpyHostToDevice) ;
// printf("aaa%d %d %d %daaa",h_parms[1],h_parms[2],h_parms[3],fix);
cperm<<<blocks,threads>>>(d_parms,d_found,d_qdmap,d_tillnow,d_qverc,d_qelist,d_qvert,d_dvert,d_delist,d_qtree,d_size_cvs,d_cvsverlist,d_qvid);
cudaMemcpy(h_found, d_found, sizeof(int)*(fix+2), cudaMemcpyDeviceToHost) ;
h_found[0]=ansc;
thrust::exclusive_scan(h_found,h_found+fix+2,h_found);
ansc=h_found[fix+1];
cudaMemcpy(d_found, h_found, sizeof(int)*(fix+2), cudaMemcpyHostToDevice) ;
puttoanswer<<<blocks,threads>>>(d_parms,d_qvid,d_size_cvs,d_found,d_tillnow,d_next);
// printf("bb%lldbb\n",ansc);
}
mapid=tillnowsize=ansc;
till=i-2;
long long int * te=d_next;
d_next=d_tillnow;
d_tillnow=te;
if(mapid==0)
return;
ansi=mapid;
}
else{
if(mapid*h_size_cvs[l]>maxthreadsize/qver && i>2){
long long int ansc=0,fix=maxthreadsize/qver;
dim3 blocks((sqrt(fix)/16 )+ 1,(sqrt(fix)/16)+1);
dim3 threads(16,16);
for(int ii=0;(ii)*fix<mapid;ii++){
h_parms[0]=ii*fix;
h_parms[1]=till;
h_parms[2]=i-2;
h_parms[3]=tillnowsize;
h_parms[4]=fix;
cudaMemset(d_found,0,sizeof(int)*(fix+2));
cudaMemcpy(d_parms, h_parms, sizeof(long long int)*5, cudaMemcpyHostToDevice) ;
// printf("aaa%d %d %d %daaa",h_parms[1],h_parms[2],h_parms[3],fix);
cperm<<<blocks,threads>>>(d_parms,d_found,d_qdmap,d_tillnow,d_qverc,d_qelist,d_qvert,d_dvert,d_delist,d_qtree,d_size_cvs,d_cvsverlist,d_qvid);
cudaMemcpy(h_found, d_found, sizeof(int)*(fix+2), cudaMemcpyDeviceToHost) ;
h_found[0]=ansc;
thrust::exclusive_scan(h_found,h_found+fix+2,h_found);
ansc=h_found[fix+1];
cudaMemcpy(d_found, h_found, sizeof(int)*(fix+2), cudaMemcpyHostToDevice) ;
puttoanswer<<<blocks,threads>>>(d_parms,d_qvid,d_size_cvs,d_found,d_tillnow,d_next);
// printf("bb%lldbb\n",ansc);
// break;
}
mapid=tillnowsize=ansc;
till=i-2;
long long int * te=d_next;
d_next=d_tillnow;
d_tillnow=te;
cudaMemcpy(h_found, d_tillnow+ansc-2, sizeof(long long int)*(4), cudaMemcpyDeviceToHost) ;
// printf("zz%lld %lldzz\n",h_found[0],h_found[2]);
if(mapid==0)
return;
// if(i==10)
// return;
}
callforallperm(i+1,till,tillnowsize,qver,mapid*h_size_cvs[l]);
/*for(j=0;j<dmax;j++){
//printf("%d %d %d\n",j,check[j],cvslist[l][j]);
if(cvslist[l][j] && !check[j]){
//ansi++;
check[j]=true;
qdmap[i-1]=j;
//mapid+=l*h_size_cvs[l];
callforallperm(check,cvslist,i+1,max,dmax,mapid*h_size_cvs[l] +j );
check[j]=false;
}
}*/
}
}
int main(int argc, char **argv)
{
int deviceId = 4;
cudaSetDevice(deviceId);
int h_qverc,h_dverc;
int *d_hash1,*d_hash2;
int i,j;
scanf("%d",&h_qverc);
h_qvert=(int *)malloc(sizeof(int)*(h_qverc+1));
h_qvid=(int *)malloc(sizeof(int)*(h_qverc+1));
h_qidtov=(int *)malloc(sizeof(int)*(h_qverc+1));
h_tem=(int **)malloc(sizeof(int*)*(h_qverc+1));
h_cvslist=(int **)malloc(sizeof(int*)*(h_qverc+1));
for(i=0;i<=h_qverc;i++){
scanf("%d ",&h_qvert[i]);
}
h_qverlabel=(char *)malloc(sizeof(char)*(h_qverc+1));
for(i=0;i<h_qverc;i++){
scanf("%c ",&h_qverlabel[i]);
printf("i%ci ",h_qverlabel[i]);
}
printf("\n");
h_qelist=(int *)malloc(sizeof(int)*h_qvert[h_qverc]);
for(i=0;i<h_qvert[h_qverc];i++)
scanf("%d",&h_qelist[i]);
h_qtree=(bool *)malloc(sizeof(bool)*h_qvert[h_qverc]);
for(i=0;i<h_qvert[h_qverc];i++){
scanf("%d",&j);
if(j==1)
h_qtree[i]=true;
else
h_qtree[i]=false;
}
scanf("%d",&h_dverc);
h_dvert=(int *)malloc(sizeof(int)*(h_dverc+1));
for(i=0;i<=h_dverc;i++){
scanf("%d ",&h_dvert[i]);
}
h_dverlabel=(char *)malloc(sizeof(int)*(h_dverc+1));
for(i=0;i<h_dverc;i++){
scanf("%c ",&h_dverlabel[i]);
}
for(i=0;i<=h_qverc;i++)
h_cvslist[i]=(int *)malloc(sizeof(int)*(h_dverc+1));
h_delist=(int *)malloc(sizeof(int)*h_dvert[h_dverc]);
for(i=0;i<h_dvert[h_dverc];i++)
scanf("%d",&h_delist[i]);
printf("Start processing\n");
cudaMalloc(&d_qverc,sizeof(int));
cudaMalloc(&d_over,sizeof(bool));
cudaMalloc(&d_qvert,sizeof(int)*(h_qverc+1));
cudaMalloc(&d_qverlabel,sizeof(char)*(h_qverc+1));
cudaMalloc(&d_qidtov,sizeof(int)*(h_qverc+1));
//cudaMalloc(&d_loc,sizeof(int)*(h_qverc+1));
cudaMalloc(&d_qelist,sizeof(int)*h_qvert[h_qverc]);
cudaMalloc(&d_qtree,sizeof(bool)*h_qvert[h_qverc]);
cudaMalloc(&d_hash1,sizeof(int)*1000038);
cudaMalloc(&d_hash2,sizeof(int)*1000038);
cudaMalloc(&d_qvid,sizeof(int)*(h_qverc+1));
cudaMemcpy(d_qverc,&h_qverc,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_qvert,h_qvert,sizeof(int)*(h_qverc+1),cudaMemcpyHostToDevice);
cudaMemcpy(d_qverlabel,h_qverlabel,sizeof(char)*(h_qverc+1),cudaMemcpyHostToDevice);
cudaMemcpy(d_qelist,h_qelist,sizeof(int)*h_qvert[h_qverc],cudaMemcpyHostToDevice);
cudaMemcpy(d_qtree,h_qtree,sizeof(bool)*h_qvert[h_qverc],cudaMemcpyHostToDevice);
cudaMemset(d_hash1,0,sizeof(int)*1000038);
//cudaMemset(d_hash2,0,sizeof(int)*1000038);
//cudaMemset(d_loc,0,sizeof(int)*(h_qverc+1));
int *qparent;
cudaMalloc(&qparent,sizeof(int)*(h_qverc+1));
cudaMemset(qparent,-1,sizeof(int)*(h_qverc+1));
cudaMemset(d_qidtov,-1,sizeof(int)*(h_qverc+1));
cudaMemset(d_qvid,0,sizeof(int)*(h_qverc+1));
int *h_hash1=(int *)malloc(sizeof(int)*1000038);
int *h_hash2=(int *)malloc(sizeof(int)*1000038);
dim3 blocks((sqrt(h_qverc)/16 )+ 1,(sqrt(h_qverc)/16)+1);
dim3 threads(16,16);
//int *d_qvert,int *d_dverc,int *d_qvid,int *d_qelist,bool *d_over,bool *d_hash1,bool *d_hash2)
h_over=true;
//h_qvid[1]=1;
//h_qvid[3]=1;
//cudaMemcpy(d_qvid,h_qvid,sizeof(int)*(h_qverc+1),cudaMemcpyHostToDevice);
//printf("qt%d %dqt\n",h_qtree[0],h_qtree[1]);
//setdeg1<<<blocks,threads>>>(d_qvert,d_qverc,d_qvid,d_qtree);
h_over=false;
int maxval=1;
while(!h_over)
{
h_over=true;
cudaMemcpy(d_over, &h_over, sizeof(bool), cudaMemcpyHostToDevice) ;
cudaMemset(d_hash1,0,sizeof(int)*1000038);
findhash <<<blocks,threads>>> (d_qvert,d_qverlabel,d_qverc,d_qvid,d_qelist,d_over,d_qtree,d_hash1,d_hash2);
//(int *d_qvert,int *d_dverc,int *d_qvid,int *d_qelist,bool *d_over,bool *d_hash0,bool *d_qtree,bool *d_hash2)
cudaError_t err = cudaGetLastError();
if(err!=cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(err));
printf("Not Ok");
}
cudaMemcpy(h_hash1,d_hash1,sizeof(int)*1000038,cudaMemcpyDeviceToHost);
h_hash1[0]+=maxval;
thrust::exclusive_scan(h_hash1,h_hash1+1000038,h_hash1);
maxval=h_hash1[1000037];
cudaMemcpy(d_hash1,h_hash1,sizeof(int)*1000038,cudaMemcpyHostToDevice);
puttoid<<<blocks,threads>>>(d_qvert,d_qverlabel,d_qverc,d_qvid,d_qelist,d_qtree,d_hash1,d_qidtov,qparent);
/// cudaMemcpy(h_hash2,d_hash2,sizeof(bool)*1000038,cudaMemcpyDeviceToHost);
cudaMemcpy(&h_over, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ;
//printf("over flag:%d ",h_over);
/*for(i=0;i<h_qverc;i++){
//if()
printf("%d ",h_qvid[i]);
// if(h_hash2[i])
// printf("h2 %d ",i);
// if(h_hash1[i] || h_hash2[i])
// printf("\n");
}
printf("\n");*/
printf("Step %d\n",maxval);
}
cudaMemcpy(h_qvid,d_qvid,sizeof(int)*h_qverc,cudaMemcpyDeviceToHost);
cudaMemcpy(h_qidtov,d_qidtov,sizeof(int)*(h_qverc+1),cudaMemcpyDeviceToHost);
for(i=0;i<=h_qverc;i++){
printf("%d ",h_qidtov[i]);
}
printf("\n");
for(i=0;i<=h_qverc;i++){
printf("%d ",h_qvid[i]);
}
printf("\n");
cudaFree(d_qtree);
cudaFree(d_hash1);
cudaFree(d_hash2);
free(h_hash1);
free(h_hash2);
free(h_qtree);
cudaMalloc(&d_cvslist,sizeof(int*)*(h_qverc+1));
for(i=0;i<=h_qverc;i++){
cudaMalloc(&h_tem[i],sizeof(int)*(h_dverc+1));
cudaMemset(h_tem[i],0,sizeof(int)*(h_dverc+1));
}
cudaMemset(h_tem[1],0,sizeof(int)*(h_dverc+1));
cudaMemcpy(d_cvslist,h_tem,sizeof(int*)*(h_qverc+1),cudaMemcpyHostToDevice);
cudaMalloc(&d_dvert,sizeof(int)*(h_dverc+1));
cudaMalloc(&d_dverlabel,sizeof(char)*(h_dverc+1));
cudaMalloc(&d_dverc,sizeof(int));
cudaMalloc(&d_delist,sizeof(int)*h_dvert[h_dverc]);
cudaMemcpy(d_dverc,&h_dverc,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_dvert,h_dvert,sizeof(int)*(h_dverc+1),cudaMemcpyHostToDevice);
cudaMemcpy(d_dverlabel,h_dverlabel,sizeof(char)*(h_dverc+1),cudaMemcpyHostToDevice);
cudaMemcpy(d_delist,h_delist,sizeof(int)*h_dvert[h_dverc],cudaMemcpyHostToDevice);
dim3 dblocks((sqrt(h_dverc)/16 )+ 1,(sqrt(h_dverc)/16)+1);
dim3 dthreads(16,16);
memset(h_cvslist[1],1,sizeof(int)*(h_dverc+1));
h_size_cvs=(int *)malloc(sizeof(int)*(h_qverc+1));
memset(h_size_cvs,0,sizeof(int)*(h_qverc+1));
cudaMalloc(&d_size_cvs,sizeof(int)*(h_qverc+1));
cudaMemset(d_size_cvs,0,sizeof(int)*(h_qverc+1));
cudaMalloc(&d_cvsverlist,sizeof(int*)*(h_qverc+1));
d_temverlist=(int **)malloc(sizeof(int*)*(h_qverc+1));
for(i=0;i<=h_qverc;i++){
cudaMalloc(&d_temverlist[i],sizeof(int)*(h_dverc+1));
cudaMemset(d_temverlist[i],0,sizeof(int)*(h_dverc+1));
}
cudaMemcpy(d_cvsverlist,d_temverlist,sizeof(int*)*(h_qverc+1),cudaMemcpyHostToDevice);
long long int totalthreads=1;
int *h_temploc;
h_temploc=(int *)malloc(sizeof(int)*(h_dverc+1));
for(i=0;i<h_dverc;i++)
h_temploc[i]=i;
cudaMemcpy(d_temverlist[1],h_temploc,sizeof(int)*(h_dverc+1),cudaMemcpyHostToDevice);
h_size_cvs[1]=h_dverc;
bool *d_tempcheck;
cudaMalloc(&d_tempcheck,sizeof(bool)*(h_dvert[h_dverc]+1));
printf("Starting cvs find\n");
for(i=0;i<=h_qverc;i++)
{
if(h_qidtov[i]!=-1)
{
cudaMemset(d_tempcheck,false,sizeof(bool)*(h_dvert[h_dverc]+1));
//findcvs(int ver,int *d_dvert,int *d_dverc,int *d_delist,int *d_qvert,int *d_qelist,int *d_qvid,bool ** d_dcvslist )
findcvs<<<dblocks,dthreads>>>(d_tempcheck,h_qidtov[i],d_dvert,d_dverlabel,d_dverc,d_delist,d_qvert,d_qverlabel,d_qelist,d_qvid,d_cvslist);
printf("id %d \n",i);
cudaMemcpy(h_cvslist[i],h_tem[i],sizeof(int)*(h_dverc+1),cudaMemcpyDeviceToHost);
//printf("%d ",h_qidtov[i]);
thrust::exclusive_scan(h_cvslist[i],h_cvslist[i]+h_dverc+1,h_temploc);
h_size_cvs[i]=h_temploc[h_dverc];
cudaMemcpy(h_tem[0],h_temploc,sizeof(int)*(h_dverc+1),cudaMemcpyHostToDevice);
puttolist<<<dblocks,dthreads>>>(d_dverc,h_tem[0],d_temverlist[i]);
for(j=0;j<h_dverc;j++)
if(h_cvslist[i][j])
printf("%d ",j);
//printf("\n");
// cudaMemcpy(h_cvslist[i],d_temverlist[i],sizeof(int)*(h_dverc+1),cudaMemcpyDeviceToHost);
//cudaMemcpy(h_temploc,h_tem[i],sizeof(int)*(h_size_cvs[i]),cudaMemcpyDeviceToHost);
//printf("On list");
//for(j=0;j<h_size_cvs[i];j++)
// printf("%d ",h_temploc[j]);
//printf("\n");
printf("size %d\n",h_size_cvs[i]);
}
}
cudaMemcpy(d_size_cvs,h_size_cvs,sizeof(int)*(h_qverc+1),cudaMemcpyHostToDevice);
//cudaMemcpy(h_delist,d_delist,sizeof(int)*(h_dvert[h_dverc]),cudaMemcpyDeviceToHost);
// for(j=0;j<h_dvert[h_dverc];j++)
// printf("%d ",h_delist[j]);
//cudaFree(d_tempcheck);
free(h_temploc);
bool * check=(bool *)malloc(sizeof(bool)*(h_dverc+1));
memset(check,false,sizeof(bool)*(h_dverc+1));
qdmap=(int *)malloc(sizeof(int)*(h_qverc+1));
cudaMalloc(&d_qdmap,sizeof(int)*(h_qverc+1));
h_anslist=(long long int *)malloc(sizeof(long long int)*1000001);
cudaMalloc(&d_anslist,sizeof(long long int)*(1000001));
h_treeanslist=(long long int*)malloc(sizeof(long long int)*1000001);
cudaMalloc(&d_treeanslist,sizeof(long long int)*(1000001));
h_treeremlist=(int *)malloc(sizeof(int)*1000001);
cudaMalloc(&d_treeremlist,sizeof(int)*(1000001));
ansi=0;
h_parms=(long long int *)malloc(sizeof(long long int)*5);
cudaMalloc(&d_parms,sizeof(long long int)*5);
cudaMalloc(&d_tillnow,sizeof(long long int)*maxthreadsize);
cudaMalloc(&d_next,sizeof(long long int)*maxthreadsize);
cudaMalloc(&d_qdmap,sizeof(int)*maxthreadsize);
h_found=(int *)malloc(sizeof(int)*maxthreadsize);
cudaMalloc(&d_found,sizeof(int)*maxthreadsize);
callforallperm(1,-1,0,h_qverc,1);
printf("Final:%d\n",ansi);
//answers found
/*int * d_ansi,*d_treeansi;
cudaMalloc(&d_ansi,sizeof(int));
cudaMemcpy(d_ansi,&ansi,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_anslist,h_anslist,sizeof(long long int)*(ansi),cudaMemcpyHostToDevice);
cudaMalloc(&d_treeansi,sizeof(int));
cudaMemcpy(d_treeansi,&treeansi,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_treeanslist,h_treeanslist,sizeof(long long int)*(treeansi),cudaMemcpyHostToDevice);
cudaMemcpy(d_treeremlist,h_treeremlist,sizeof(int)*(treeansi),cudaMemcpyHostToDevice);
int nqueries,*d_nqueries;
int *h_vera,*h_verb;
int *d_vera,*d_verb;
int *h_type,*d_type;
scanf("%d",&nqueries);
h_vera=(int*) malloc(sizeof(int)*nqueries);
h_verb=(int*) malloc(sizeof(int)*nqueries);
h_type=(int *) malloc(sizeof(int)*nqueries);
cudaMalloc(&d_vera,sizeof(int)*nqueries);
cudaMalloc(&d_verb,sizeof(int)*nqueries);
cudaMalloc(&d_type,sizeof(int)*nqueries);
int *h_qvertadd,*h_qelistadd,*h_dvertadd,*h_delistadd;//,*h_dvelist,*h_qvelist;
int *d_qvertadd,*d_qelistadd,*d_dvertadd,*d_delistadd;//,*h_dvelist,*h_qvelist;
h_qvertadd=(int *)malloc(sizeof(int)*(h_qverc+1));
h_dvertadd=(int *)malloc(sizeof(int)*(h_dverc+1));
memset(h_qvertadd,0,sizeof(int)*(h_qverc+1));
memset(h_dvertadd,0,sizeof(int)*(h_dverc+1));
map<int,vector<int>> qaddlist,daddlist;
for(i=0;i<nqueries;i++){
scanf("%d%d%d",&h_type[i],&h_vera[i],&h_verb[i]);
if(h_type[i]==2)
h_qvertadd[h_vera[i]]++,qaddlist[h_vera].push_back(verb);
else if(h_type[i]==0)
h_dvertadd[h_vera[i]]++,daddlist[h_vera].push_back(verb);
}
thrust::exclusive_scan(h_qvertadd,h_qvertadd+h_qverc+1,h_qvertadd);
thrust::exclusive_scan(h_dvertadd,h_dvertadd+h_dverc+1,h_dvertadd);
h_qelistadd=(int *)malloc(sizeof(int)*(h_qvertadd[h_qverc]+1));
h_delistadd=(int *)malloc(sizeof(int)*(h_dvertadd[h_dverc]+1));
cudaMalloc(&d_qvertadd,sizeof(int)*(h_qverc+1));
cudaMalloc(&d_dvertadd,sizeof(int)*(h_dverc+1));
cudaMalloc(&d_qelistadd,sizeof(int)*(h_qvertadd[h_qverc]+1));
cudaMalloc(&d_delistadd,sizeof(int)*(h_dvertadd[h_dverc]+1));
cudaMemcpy(d_qvertadd,h_qvertadd,sizeof(int)*(h_qverc+1),cudaMemcpyHostToDevice);
cudaMemcpy(d_dvertadd,h_dvertadd,sizeof(int)*(h_dverc+1),cudaMemcpyHostToDevice);
for(i=0;i<h_qverc;i++)
for(j=0;j<qaddlist[i].size();j++)
h_qelistadd[h_qvertadd[i+j]]=qaddlist[i][j];
for(i=0;i<h_dverc;i++)
for(j=0;j<daddlist[i].size();j++)
h_delistadd[h_dvertadd[i+j]]=daddlist[i][j];
cudaMemcpy(d_qelistadd,h_qelistadd,sizeof(int)*(h_qvertadd[h_qverc]+1),cudaMemcpyHostToDevice);
cudaMemcpy(d_delistadd,h_delistadd,sizeof(int)*(h_dvertadd[h_dverc]+1),cudaMemcpyHostToDevice);
cudaMemcpy(d_vera,h_vera,sizeof(int)*nqueries,cudaMemcpyHostToDevice);
cudaMemcpy(d_verb,h_verb,sizeof(int)*nqueries,cudaMemcpyHostToDevice);
cudaMemcpy(d_type,h_type,sizeof(int)*nqueries,cudaMemcpyHostToDevice);
cudaMalloc(&d_nqueries,sizeof(int));
cudaMemcpy(d_nqueries,&nqueries,sizeof(int),cudaMemcpyHostToDevice);
dim3 qblocks((sqrt(nqueries)/16 )+ 1,(sqrt(nqueries)/16)+1);
dim3 qthreads(16,16);
int **cvsaddlist,**qaddnodes,**h_cvsaddlist,**h_qaddnodes;
int *locks;
cudaMalloc(&qaddnodes,sizeof(int*)*(h_qverc+1));
h_qaddnodes=(int **)malloc(sizeof(int*)*(h_qverc+1));
for(i=0;i<=h_qverc;i++){
cudaMalloc(&h_qaddnodes[i],sizeof(int)*(h_dverc+1));
cudaMemset(h_qaddnodes[i],0,sizeof(int)*(h_dverc+1));
}
cudaMemcpy(qaddnodes,h_qaddnodes,sizeof(int*)*(h_qverc+1),cudaMemcpyHostToDevice);
cudaMalloc(&cvsaddlist,sizeof(int*)*(h_qverc+1));
h_cvsaddlist=(int **)malloc(sizeof(int*)*(h_qverc+1));
for(i=0;i<=h_qverc;i++){
cudaMalloc(&h_cvsaddlist[i],sizeof(int)*(h_dverc+1));
cudaMemset(h_cvsaddlist[i],0,sizeof(int)*(h_dverc+1));
}
cudaMemcpy(cvsaddlist,h_cvsaddlist,sizeof(int*)*(h_qverc+1),cudaMemcpyHostToDevice);
cudaMalloc(&locks,sizeof(int)*(h_qverc+1));
cudaMemset(locks,0,sizeof(int)*(h_qverc+1));
doquery<<<qblocks,qthreads>>>(d_nqueries,d_type,d_vera,d_verb,d_treeansi,d_treeanslist,d_treeremlist,d_ansi,d_anslist,d_cvslist,d_cvsverlist,d_qverc,d_dverc,d_qvert,d_qelist,d_dvert,d_delist,d_size_cvs,d_qvid,d_qtree,cvsaddlist,qaddnodes,locks,d_tempcheck,qparent);
dohard<<<blocks,threads>>>(d_cvslist,d_cvsverlist,d_qverc,d_dverc,d_qvert,d_qelist,d_dvert,d_delist,d_size_cvs,d_qvid,d_qtree,cvsaddlist,qaddnodes,locks,d_qvertadd,d_qelistadd,d_dvertadd,d_delistadd);
cudaMemcpy(h_anslist,d_anslist,sizeof(long long int)*(ansi),cudaMemcpyDeviceToHost);
for(i=0;i<ansi;i++)
if(h_anslist[i]==-1)
printf(" %d ",i);
*/cudaFree(d_over);
cudaFree(d_qverc);
cudaFree(d_qvert);
cudaFree(d_qelist);
cudaFree(d_qvid);
cudaFree(d_qidtov);
cudaFree(d_dvert);
cudaFree(d_delist);
cudaFree(d_dverc);
cudaFree(d_cvslist);
cudaFree(d_cvsverlist);
cudaFree(d_size_cvs);
cudaFree(d_anslist);
/*free(h_qvid);
free(h_qvert);
//free(h_qelist);
free(h_qidtov);
free(h_cvslist);
free(h_dvert);
free(h_delist);*/
}
|
12,500 | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
}
__global__ void vector_axpby (const int n, const REAL alpha, const REAL* x, const int offset_x, const int stride_x, const REAL beta, REAL* y, int offset_y, int stride_y) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
const int ix = offset_x + gid * stride_x;
const int iy = offset_y + gid * stride_y;
y[iy] = alpha * x[ix] + beta * y [iy];
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.