text stringlengths 2.5k 6.39M | kind stringclasses 3 values |
|---|---|
//////////////////////////////
//////////////////////////////
//////////////////////////////
//Helper functions for leaf-nodes
__device__ void compute_monopole(float2 &mass, float2 &posx,
float2 &posy, float2 &posz,
float4 pos)
{
mass = ds_accumulate(mass, pos.w);
posx = ds_accumulate(posx, pos.w*pos.x);
posy = ds_accumulate(posy, pos.w*pos.y);
posz = ds_accumulate(posz, pos.w*pos.z);
}
__device__ void compute_quadropole(float2 &oct_q11, float2 &oct_q22, float2 &oct_q33,
float2 &oct_q12, float2 &oct_q13, float2 &oct_q23,
float4 pos)
{
oct_q11 = ds_accumulate(oct_q11, pos.w * pos.x*pos.x);
oct_q22 = ds_accumulate(oct_q22, pos.w * pos.y*pos.y);
oct_q33 = ds_accumulate(oct_q33, pos.w * pos.z*pos.z);
oct_q12 = ds_accumulate(oct_q12, pos.w * pos.x*pos.y);
oct_q13 = ds_accumulate(oct_q13, pos.w * pos.y*pos.z);
oct_q23 = ds_accumulate(oct_q23, pos.w * pos.z*pos.x);
}
__device__ void compute_bounds(float3 &r_min, float3 &r_max,
float4 pos)
{
r_min.x = fminf(r_min.x, pos.x);
r_min.y = fminf(r_min.y, pos.y);
r_min.z = fminf(r_min.z, pos.z);
r_max.x = fmaxf(r_max.x, pos.x);
r_max.y = fmaxf(r_max.y, pos.y);
r_max.z = fmaxf(r_max.z, pos.z);
}
//Non-leaf node helper functions
__device__ void compute_quadropole_node(float2 &oct_q11, float2 &oct_q22, float2 &oct_q33,
float2 &oct_q12, float2 &oct_q13, float2 &oct_q23,
float4 Q0, float4 Q1)
{
oct_q11 = ds_accumulate(oct_q11, Q0.x);
oct_q22 = ds_accumulate(oct_q22, Q0.y);
oct_q33 = ds_accumulate(oct_q33, Q0.z);
oct_q12 = ds_accumulate(oct_q12, Q1.x);
oct_q13 = ds_accumulate(oct_q13, Q1.y);
oct_q23 = ds_accumulate(oct_q23, Q1.z);
}
__device__ void compute_bounds_node(float3 &r_min, float3 &r_max,
float4 node_min, float4 node_max)
{
r_min.x = fminf(r_min.x, node_min.x);
r_min.y = fminf(r_min.y, node_min.y);
r_min.z = fminf(r_min.z, node_min.z);
r_max.x = fmaxf(r_max.x, node_max.x);
r_max.y = fmaxf(r_max.y, node_max.y);
r_max.z = fmaxf(r_max.z, node_max.z);
}
extern "C" __global__ void compute_leaf(const int n_leafs,
uint *leafsIdxs,
uint2 *node_bodies,
real4 *body_pos,
real4 *multipole,
real4 *nodeLowerBounds,
real4 *nodeUpperBounds,
float3 *lowerBounds,
float3 *upperBounds,
real4 *body_vel) {
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint id = bid * blockDim.x + tid;
volatile __shared__ float3 shmem[256];
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&shmem[128];
//Set shared memory to defaults and return
if (id >= n_leafs)
{
sh_rmin[tid].x = +1e10f; sh_rmin[tid].y = +1e10f; sh_rmin[tid].z = +1e10f;
sh_rmax[tid].x = -1e10f; sh_rmax[tid].y = -1e10f; sh_rmax[tid].z = -1e10f;
return;
}
//Since nodes are intermixes with non-leafs in the node_bodies array
//we get a leaf-id from the leafsIdxs array
int nodeID = leafsIdxs[id];
const uint2 bij = node_bodies[nodeID];
const uint firstChild = bij.x & ILEVELMASK;
const uint lastChild = bij.y; //TODO maybe have to increase it by 1
//Variables holding properties and intermediate answers
float4 p;
float2 mass, posx, posy, posz;
mass = posx = posy = posz = (float2){0.0f, 0.0f};
float2 oct_q11, oct_q22, oct_q33;
float2 oct_q12, oct_q13, oct_q23;
oct_q11 = oct_q22 = oct_q33 = (float2){0.0f, 0.0f};
oct_q12 = oct_q13 = oct_q23 = (float2){0.0f, 0.0f};
float3 r_min, r_max;
r_min = (float3){+1e10f, +1e10f, +1e10f};
r_max = (float3){-1e10f, -1e10f, -1e10f};
//Loop over the children=>particles=>bodys
//unroll increases register usage #pragma unroll 16
float maxEps = 0.0f;
int count=0;
for(int i=firstChild; i < lastChild; i++)
{
p = body_pos[i];
maxEps = fmaxf(body_vel[i].w, maxEps); //Determine the max softening within this leaf
count++;
compute_monopole(mass, posx, posy, posz, p);
compute_quadropole(oct_q11, oct_q22, oct_q33, oct_q12, oct_q13, oct_q23, p);
compute_bounds(r_min, r_max, p);
}
float4 mon = {ds_regularise(posx).x, ds_regularise(posy).x, ds_regularise(posz).x, ds_regularise(mass).x};
float im = 1.0f/mon.w;
mon.x *= im;
mon.y *= im;
mon.z *= im;
float4 Q0, Q1;
Q0 = (float4){ds_regularise(oct_q11).x, ds_regularise(oct_q22).x, ds_regularise(oct_q33).x, maxEps};
Q1 = (float4){ds_regularise(oct_q12).x, ds_regularise(oct_q13).x, ds_regularise(oct_q23).x, 0.0f};
//Store the node properties
multipole[3*nodeID + 0] = mon; //Monopole
multipole[3*nodeID + 1] = Q0; //Quadropole
multipole[3*nodeID + 2] = Q1; //Quadropole
//TODO why is this required again?
nodeLowerBounds[nodeID] = (float4){r_min.x, r_min.y, r_min.z, 0.0f};
nodeUpperBounds[nodeID] = (float4){r_max.x, r_max.y, r_max.z, 1.0f}; //4th parameter is set to 1 to indicate this is a leaf
//Global domain boundaries using reduction
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
__syncthreads();
//Reduction of the global boundaries of the system
if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if (tid < 32)
{
sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin,sh_rmax);
sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin,sh_rmax);
}
__syncthreads();
//Store the results
if(tid == 0)
{
//Compiler doesnt allow: volatile float3 = float3
lowerBounds[bid].x = sh_rmin[0].x; lowerBounds[bid].y = sh_rmin[0].y; lowerBounds[bid].z = sh_rmin[0].z;
upperBounds[bid].x = sh_rmax[0].x; upperBounds[bid].y = sh_rmax[0].y; upperBounds[bid].z = sh_rmax[0].z;
}
return;
}
//Function goes level by level (starting from deepest) and computes
//the properties of the non-leaf nodes
extern "C" __global__ void compute_non_leaf(const int curLevel, //Level for which we calc
uint *leafsIdxs, //Conversion of ids
uint *node_level_list, //Contains the start nodes of each lvl
uint *n_children, //Reference from node to first child and number of childs
real4 *multipole,
real4 *nodeLowerBounds,
real4 *nodeUpperBounds){
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int idx = bid * (blockDim.x * blockDim.y) + tid;
const int endNode = node_level_list[curLevel];
const int startNode = node_level_list[curLevel-1];
if(idx >= (endNode-startNode)) return;
const int nodeID = leafsIdxs[idx + startNode];
//Get the children info
const uint firstChild = n_children[nodeID] & 0x0FFFFFFF; //TODO make this name/define?
const uint nChildren = ((n_children[nodeID] & 0xF0000000) >> 28); //TODO make this name/define?
//Variables
float2 mass, posx, posy, posz;
mass = posx = posy = posz = (float2){0.0f, 0.0f};
float2 oct_q11, oct_q22, oct_q33;
float2 oct_q12, oct_q13, oct_q23;
oct_q11 = oct_q22 = oct_q33 = (float2){0.0f, 0.0f};
oct_q12 = oct_q13 = oct_q23 = (float2){0.0f, 0.0f};
float3 r_min, r_max;
r_min = (float3){+1e10f, +1e10f, +1e10f};
r_max = (float3){-1e10f, -1e10f, -1e10f};
float maxEps = 0;
//Process the children (1 to 8)
for(int i=firstChild; i < firstChild+nChildren; i++)
{
//Gogo process this data!
float4 tmon = multipole[3*i + 0];
maxEps = fmaxf(multipole[3*i + 1].w, maxEps);
compute_monopole(mass, posx, posy, posz, tmon);
compute_quadropole_node(oct_q11, oct_q22, oct_q33, oct_q12, oct_q13, oct_q23,
multipole[3*i + 1], multipole[3*i + 2]);
compute_bounds_node(r_min, r_max, nodeLowerBounds[i], nodeUpperBounds[i]);
}
//Save the bounds
nodeLowerBounds[nodeID] = (float4){r_min.x, r_min.y, r_min.z, 0.0f};
nodeUpperBounds[nodeID] = (float4){r_max.x, r_max.y, r_max.z, 0.0f}; //4th is set to 0 to indicate a non-leaf
//Regularize and store the results
float4 mon = {ds_regularise(posx).x, ds_regularise(posy).x, ds_regularise(posz).x, ds_regularise(mass).x};
float im = 1.0f/mon.w;
mon.x *= im;
mon.y *= im;
mon.z *= im;
float4 Q0, Q1;
Q0 = (float4){ds_regularise(oct_q11).x, ds_regularise(oct_q22).x, ds_regularise(oct_q33).x, maxEps};
Q1 = (float4){ds_regularise(oct_q12).x, ds_regularise(oct_q13).x, ds_regularise(oct_q23).x, 0.0f};
multipole[3*nodeID + 0] = mon; //Monopole
multipole[3*nodeID + 1] = Q0; //Quadropole1
multipole[3*nodeID + 2] = Q1; //Quadropole2
return;
}
extern "C" __global__ void compute_scaling(const int node_count,
real4 corner,
real4 *multipole,
real4 *nodeLowerBounds,
real4 *nodeUpperBounds,
uint *n_children,
uint4 *node_data,
float theta,
real4 *boxSizeInfo,
real4 *boxCenterInfo){
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int idx = bid * (blockDim.x * blockDim.y) + tid;
if(idx >= node_count) return;
float4 mon, Q0, Q1;
mon = multipole[3*idx + 0]; //Monopole
Q0 = multipole[3*idx + 1]; //Quadropole1
Q1 = multipole[3*idx + 2]; //Quadropole2
//Scale the quadropole
float im = 1.0f / mon.w;
Q0.x = Q0.x*im - mon.x*mon.x;
Q0.y = Q0.y*im - mon.y*mon.y;
Q0.z = Q0.z*im - mon.z*mon.z;
Q1.x = Q1.x*im - mon.x*mon.y;
Q1.y = Q1.y*im - mon.y*mon.z;
Q1.z = Q1.z*im - mon.x*mon.z;
//Switch the y and z parameter
real temp = Q1.y;
Q1.y = Q1.z; Q1.z = temp;
multipole[3*idx + 1] = Q0; //Quadropole1
multipole[3*idx + 2] = Q1; //Quadropole2
float4 r_min, r_max;
r_min = nodeLowerBounds[idx];
r_max = nodeUpperBounds[idx];
float3 boxCenter;
boxCenter.x = 0.5*(r_min.x + r_max.x);
boxCenter.y = 0.5*(r_min.y + r_max.y);
boxCenter.z = 0.5*(r_min.z + r_max.z);
float3 boxSize = (float3){fmaxf(fabs(boxCenter.x-r_min.x), fabs(boxCenter.x-r_max.x)),
fmaxf(fabs(boxCenter.y-r_min.y), fabs(boxCenter.y-r_max.y)),
fmaxf(fabs(boxCenter.z-r_min.z), fabs(boxCenter.z-r_max.z))};
//Calculate distance between center of the box and the center of mass
float3 s3 = (float3){(boxCenter.x - mon.x), (boxCenter.y - mon.y), (boxCenter.z - mon.z)};
double s = sqrt((s3.x*s3.x) + (s3.y*s3.y) + (s3.z*s3.z));
//Length of the box, note times 2 since we only computed half the distance before
float l = 2*fmaxf(boxSize.x, fmaxf(boxSize.y, boxSize.z));
//Extra check, shouldnt be necessary
// if(l < 0.000001)
// l = 0.000001;
//Store the box size and opening criteria
boxSizeInfo[idx].x = boxSize.x;
boxSizeInfo[idx].y = boxSize.y;
boxSizeInfo[idx].z = boxSize.z;
boxSizeInfo[idx].w = __int_as_float(n_children[idx]);
boxCenterInfo[idx].x = boxCenter.x;
boxCenterInfo[idx].y = boxCenter.y;
boxCenterInfo[idx].z = boxCenter.z;
#ifdef IMPBH
float cellOp = (l/theta) + s;
#else
//Minimum distance method
float cellOp = (l/theta);
#endif
cellOp = cellOp*cellOp;
if(r_max.w > 0)
{
cellOp = -cellOp; //This is a leaf node
}
boxCenterInfo[idx].w = cellOp;
/* //Determine the size of the node based on the center of mass and the bounds of the node
float3 size3 = (float3){fmaxf(fabs(mon.x-r_min.x), fabs(mon.x-r_max.x)),
fmaxf(fabs(mon.y-r_min.y), fabs(mon.y-r_max.y)),
fmaxf(fabs(mon.z-r_min.z), fabs(mon.z-r_max.z))};
float size = fmaxf(size3.x, fmaxf(size3.y, size3.z));
//Box properties
float3 boxCenter;
boxCenter.x = 0.5*(r_min.x + r_max.x);
boxCenter.y = 0.5*(r_min.y + r_max.y);
boxCenter.z = 0.5*(r_min.z + r_max.z);
float3 boxSize3 = (float3){fmaxf(fabs(boxCenter.x-r_min.x), fabs(boxCenter.x-r_max.x)),
fmaxf(fabs(boxCenter.y-r_min.y), fabs(boxCenter.y-r_max.y)),
fmaxf(fabs(boxCenter.z-r_min.z), fabs(boxCenter.z-r_max.z))};
//Calculate distance between center of the box and the center of mass
// float3 s3 = (float3){(boxCenter.x - mon.x), (boxCenter.y - mon.y), (boxCenter.z - mon.z)};
// double s = sqrt((s3.x*s3.x) + (s3.y*s3.y) + (s3.z*s3.z));
//BH: l/theta + s < d
float l = fmaxf(boxSize3.x, fmaxf(boxSize3.y, boxSize3.z));
// float cellOp = (l/theta) + s;
float cellOp;
cellOp = cellOp*cellOp;
//Store the box size and opening criteria
// cellOpening[idx].x = boxSize.x;
// cellOpening[idx].y = boxSize.y;
// cellOpening[idx].z = boxSize.z;
// cellOpening[idx].w = cellOp;
//Store the properties of the node
boxCenterInfo[idx].x = boxCenter.x;
boxCenterInfo[idx].y = boxCenter.y;
boxCenterInfo[idx].z = boxCenter.z;
boxCenterInfo[idx].w = __int_as_float(n_children[idx]);
boxSize[idx].x = boxSize3.x;
boxSize[idx].y = boxSize3.y;
boxSize[idx].z = boxSize3.z;
if(r_max.w > 0)
boxSize[idx].w = -1; //Leaf node
else
boxSize[idx].w = 1; // non-leaf node
//Calculate the key
int4 crd;
float domain_fac = corner.w;
float idomain_fac = 1.0f / domain_fac;
crd.x = (int)((mon.x - corner.x) * idomain_fac + 0.5);
crd.y = (int)((mon.y - corner.y) * idomain_fac + 0.5);
crd.z = (int)((mon.z - corner.z) * idomain_fac + 0.5);
uint2 key = get_key(crd);
//Use the key to calculate back the position
float3 pos;
pos.x = crd.x*domain_fac + corner.x;
pos.y = crd.y*domain_fac + corner.y;
pos.z = crd.z*domain_fac + corner.z;
//Adjust size based on the key-based position of the node
float ds = fmax(fabs(pos.x - mon.x), max(fabs(pos.y - mon.y), fabs(pos.z - mon.z)));
temp = size;
size += ds;
#ifdef IMPBH
//Box size, max size for now
size = l;
if(l < 0.000001)
size = 0.000001;
#endif
if(r_max.w > 0)
{
size = -size; //This is a leaf node
}
//nchildren contains the node to node references
//we also need to use node_bodies to get the
//leaf-particle references
node_data[idx] = (uint4){key.x, key.y,
__float_as_int(size),
n_children[idx]};*/
// r_min.w = size;
// nodeLowerBounds[idx] = r_min;
return;
}
//Modify the references to the fist body and the number of bodys
//for the leafs
//Also copy the node_data to the group data
extern "C" __global__ void copyNodeDataToGroupData(const int n_groups,
const int n_nodes,
uint4 *node_data,
uint4 *group_data,
uint2 *node_bodies,
int *group_list,
real4 *boxCenterInfo,
real4 *boxSizeInfo,
real4 *groupCenterInfo,
real4 *groupSizeInfo){
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int idx = bid * (blockDim.x * blockDim.y) + tid;
if(idx >= n_nodes) return;
//Copy the data and change the children data
//Instead of pointing to child nodes we want it to point to
//particles
// uint4 nodeData = node_data[idx];
// bool leaf = __int_as_float(nodeData.z) <= 0;
float temp = boxCenterInfo[idx].w;
bool leaf = temp <= 0;
//uint2 bij2 = node_bodies[idx];
//uint pfirst2 = bij2.x & ILEVELMASK;
//uint nchild2 = bij2.y - pfirst2;
//Change the indirections of the leaf nodes so they point to
//the particle data
if(leaf)
{
uint2 bij = node_bodies[idx];
uint pfirst = bij.x & ILEVELMASK;
uint nchild = bij.y - pfirst;
pfirst = pfirst | ((nchild-1) << LEAFBIT);
boxSizeInfo[idx].w = __int_as_float(pfirst);
}
//Now fill in the group data
if(idx >= n_groups) return;
int nodeID = group_list[idx];
real4 nodeData = boxSizeInfo[nodeID];
uint2 bij = node_bodies[nodeID];
int pfirst = bij.x & ILEVELMASK;
int nchild = bij.y - pfirst;
pfirst = pfirst | (nchild-1) << CRITBIT;
nodeData.w = __int_as_float(pfirst);
groupSizeInfo[idx] = nodeData;
groupCenterInfo[idx] = boxCenterInfo[nodeID];
} | the_stack |
namespace amgx
{
template<class TConfig> class NBinormalizationScaler;
/**********************************************************************
* HOST FUNCTIONS
*********************************************************************/
template <typename IndexType, typename MatrixType, typename VectorType>
void computeBetaGammaHost(int rows, IndexType *offsets, IndexType *indices, MatrixType *vals,
VectorType *x, VectorType *y, VectorType *beta, VectorType *gamma)
{
for (int i = 0; i < rows; i++) { gamma[i] = 0.; }
for (int i = 0; i < rows; i++)
{
VectorType bi = 0.;
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int col = indices[jj];
VectorType val = vals[jj];
bi += (val * val) * y[col];
gamma[col] += (val * val) * x[i];
}
beta[i] = bi;
}
}
// compute Gamma on its own
template <typename IndexType, typename MatrixType, typename VectorType>
void computeGammaHost(int rows, IndexType *offsets, IndexType *indices, MatrixType *vals,
VectorType *x, VectorType *gamma)
{
for (int i = 0; i < rows; i++) { gamma[i] = 0.; }
for (int i = 0; i < rows; i++)
{
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int col = indices[jj];
VectorType val = vals[jj];
gamma[col] += (val * val) * x[i];
}
}
}
// compute Beta on its own
template <typename IndexType, typename MatrixType, typename VectorType>
void computeBetaHost(int rows, IndexType *offsets, IndexType *indices, MatrixType *vals,
VectorType *y, VectorType *beta)
{
for (int i = 0; i < rows; i++)
{
VectorType bi = 0.;
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int col = indices[jj];
VectorType val = vals[jj];
bi += (val * val) * y[col];
}
beta[i] = bi;
}
}
template <typename IndexType, typename MatrixType, typename VectorType>
void scaleMatrixHost(int rows, IndexType *offsets, IndexType *indices, MatrixType *values,
VectorType *x, VectorType *y)
{
for (int i = 0; i < rows; i++)
{
VectorType fi = sqrt(fabs(x[i]));
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int j = indices[jj];
VectorType gj = sqrt(fabs(y[j]));
values[jj] *= fi * gj;
}
}
}
/**********************************************************************
* DEVICE FUNCTIONS
*********************************************************************/
// these warp reductions should be able to be replaced with amgx:: functions
template <int warpSize, typename T>
__device__ __inline__ T warpReduceSum(T val)
{
if (warpSize > 16) { val += utils::shfl_down(val, 16, warpSize); }
utils::syncwarp();
if (warpSize > 8) { val += utils::shfl_down(val, 8, warpSize); }
utils::syncwarp();
if (warpSize > 4) { val += utils::shfl_down(val, 4, warpSize); }
utils::syncwarp();
if (warpSize > 2) { val += utils::shfl_down(val, 2, warpSize); }
utils::syncwarp();
if (warpSize > 1) { val += utils::shfl_down(val, 1, warpSize); }
return val;
}
template <int warpSize, typename T>
__device__ T warpReduceSumShared(volatile T *vals, const int lane_id)
{
if (warpSize > 16) { vals[lane_id] += vals[lane_id + 16]; }
if (warpSize > 8) { vals[lane_id] += vals[lane_id + 8]; }
if (warpSize > 4) { vals[lane_id] += vals[lane_id + 4]; }
if (warpSize > 2) { vals[lane_id] += vals[lane_id + 2]; }
if (warpSize > 1) { vals[lane_id] += vals[lane_id + 1]; }
return vals[lane_id];
}
// compute beta = B*y, gamma = C*x (B = A.^2, C = B^T)
template <int CTASize, int VectorSize, int VectorsPerCTA, typename IndexType, typename MatrixValue, typename VectorValue>
__global__
void computeBetaGammaDevice(IndexType rows, IndexType *offsets, IndexType *indices, MatrixValue *values,
VectorValue *x, VectorValue *y, VectorValue *beta, VectorValue *gamma)
{
const int vectors_per_block = VectorsPerCTA;
const int vector_id = threadIdx.x / VectorSize;
const int lane_id = threadIdx.x % VectorSize;
for (int i = vectors_per_block * blockIdx.x + vector_id; i < rows; i += vectors_per_block * gridDim.x)
{
// load start + end pointers
int row_tmp;
if (lane_id < 2)
{
row_tmp = offsets[i + lane_id];
}
// distribute to all other threads in warp
int row_begin = utils::shfl(row_tmp, vector_id * VectorSize, warpSize, utils::activemask());
int row_end = utils::shfl(row_tmp, vector_id * VectorSize + 1, warpSize, utils::activemask());
VectorValue bi(0.);
for (int jj = row_begin + lane_id; utils::any(jj < row_end, utils::activemask()); jj += VectorSize)
{
int col = -1;
VectorValue val(0.);
if (jj < row_end)
{
col = indices[jj];
val = values[jj];
bi += (val * val) * y[col];
utils::atomic_add(&gamma[col], (val * val) * x[i]);
}
}
// reduce over bi
VectorValue bi_s = warpReduceSum<VectorSize>(bi);
if (lane_id == 0)
{
beta[i] = bi_s;
}
}
}
// compute gamma = B^T*x (B = A.^2)
template <int CTASize, int VectorSize, typename IndexType, typename MatrixValue, typename VectorValue>
__global__
void computeGammaDevice(int rows, IndexType *offsets, IndexType *indices, MatrixValue *values,
VectorValue *x, VectorValue *gamma)
{
const int vectors_per_block = CTASize / VectorSize;
const int vector_id = threadIdx.x / VectorSize;
const int lane_id = threadIdx.x % VectorSize;
for (int i = vectors_per_block * blockIdx.x + vector_id; i < rows; i += vectors_per_block * gridDim.x)
{
// load start + end pointers
int row_tmp;
if (lane_id < 2)
{
row_tmp = offsets[i + lane_id];
}
// distribute to all other threads in warp
int row_begin = utils::shfl(row_tmp, vector_id * VectorSize, warpSize, utils::activemask());
int row_end = utils::shfl(row_tmp, vector_id * VectorSize + 1, warpSize, utils::activemask());
for (int jj = row_begin + lane_id; utils::any(jj < row_end, utils::activemask()); jj += VectorSize)
{
int col = -1;
VectorValue val = 0.;
if (jj < row_end)
{
col = indices[jj];
val = values[jj];
utils::atomic_add(&gamma[col], (val * val) * x[i]);
}
}
}
}
// compute beta = B*y (B = A.^2)
template <int CTASize, int VectorSize, typename IndexType, typename MatrixValue, typename VectorValue>
__global__
void computeBetaDevice(int rows, IndexType *offsets, IndexType *indices, MatrixValue *values,
VectorValue *y, VectorValue *beta)
{
const int vectors_per_block = CTASize / VectorSize;
const int vector_id = threadIdx.x / VectorSize;
const int lane_id = threadIdx.x % VectorSize;
for (int i = vectors_per_block * blockIdx.x + vector_id; i < rows; i += vectors_per_block * gridDim.x)
{
// load start + end pointers
int row_tmp;
if (lane_id < 2)
{
row_tmp = offsets[i + lane_id];
}
// distribute to all other threads in warp
int row_begin = utils::shfl(row_tmp, vector_id * VectorSize, warpSize, utils::activemask());
int row_end = utils::shfl(row_tmp, vector_id * VectorSize + 1, warpSize, utils::activemask());
VectorValue bi = 0.;
for (int jj = row_begin + lane_id; utils::any(jj < row_end, utils::activemask()); jj += VectorSize)
{
int col = -1;
VectorValue val = 0.;
if (jj < row_end)
{
col = indices[jj];
val = values[jj];
bi += (val * val) * y[col];
}
}
// reduce over bi
VectorValue bi_s = warpReduceSum<VectorSize>(bi);
if (lane_id == 0)
{
beta[i] = bi_s;
}
}
}
template <typename ValueType>
__global__
void setOneOverVector(int N, ValueType *x, ValueType sum1, ValueType *beta)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += gridDim.x * blockDim.x)
{
//x[i] = ( isNotCloseToZero(beta[i]) ? sum1 / beta[i] : sum1 / epsilon(beta[i]) );
x[i] = ( isNotCloseToZero(beta[i]) ? (sum1 / beta[i]) : (ValueType)1. );
}
}
template<typename T>
struct square_value : public unary_function<T, T>
{
__host__ __device__ T operator()(const T &x) const
{
return x * x;
}
};
// functor to generate stddev of vectors
template <typename T>
struct std_f
{
std_f(T x) : v(x) {};
T v;
__host__ __device__
T operator()(const T &x1, const T &x2) const
{
return (x1 * x2 - v) * (x1 * x2 - v);
}
};
// scaled the matrix using diag(F)*A*diag(G), f = sqrt(fabs(x)), g = sqrt(fabs(y))
template <amgx::ScaleDirection direction, typename IndexType, typename MatrixType, typename VectorType>
__global__
void scaleMatrixDevice(int rows, IndexType *offsets, IndexType *indices, MatrixType *values,
VectorType *x, VectorType *y)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < rows; i += gridDim.x * blockDim.x)
{
VectorType fi = sqrt(fabs(x[i]));
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int j = indices[jj];
VectorType gj = sqrt(fabs(y[j]));
// scale matrix value in place
if (direction == amgx::SCALE)
{
values[jj] *= fi * gj;
}
else
{
values[jj] /= fi * gj;
}
}
}
}
template <typename IndexType, typename MatrixType, typename VectorType>
__global__
void getColRowNorms(int rows, IndexType *offsets, IndexType *indices, MatrixType *values,
VectorType *rownorms, VectorType *colnorms)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < rows; i += gridDim.x * blockDim.x)
{
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int j = indices[jj];
VectorType curval = values[jj] * values[jj];
rownorms[i] += curval;
utils::atomic_add(colnorms + j, curval);
}
}
}
// vector constant scale operand
template <typename T>
struct vmul_scale_const
{
T _alpha;
vmul_scale_const(T alpha): _alpha(alpha) {};
__host__ __device__
T operator()(const T &vec) const
{
return vec * _alpha;
}
};
// vector scale operand
template <typename T>
struct vmul_scale
{
vmul_scale() {};
__host__ __device__
T operator()(const T &vec, const T &alpha) const
{
return (vec * sqrt(fabs(alpha)));
}
};
// vector unscale operand
template <typename T>
struct vmul_unscale
{
vmul_unscale() {};
__host__ __device__
T operator()(const T &vec, const T &alpha) const
{
return (vec / sqrt(fabs(alpha)));
}
};
// Setup on Device
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void NBinormalizationScaler<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setup(Matrix_d &A)
{
if (A.is_matrix_distributed())
{
FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED);
}
// move these out to config parameters?
const int max_iters = 50;
const ValueTypeB tolerance = 1e-10;
int rows = A.get_num_rows(), cols = A.get_num_cols();
// temporary vectors
VVector x(rows, 1), y(cols, 1), beta(rows, 0), gamma(cols, 0);
// perform matvecs to get beta and gamma (spmv for beta, spmvT for gamma)
computeBetaGammaDevice<256, 8, 32> <<< 4096, 256>>>(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(),
x.raw(), y.raw(), beta.raw(), gamma.raw());
ValueTypeB sum1 = cols, sum2 = rows, std1, std2;
// calculate initial std1 and std2
thrust::device_ptr<ValueTypeB> x_ptr(x.raw()), y_ptr(y.raw()), beta_ptr(beta.raw()), gamma_ptr(gamma.raw());
std1 = sqrt(thrust::inner_product(x_ptr, x_ptr + rows, beta_ptr, ValueTypeB(0.), thrust::plus<ValueTypeB>(), std_f<ValueTypeB>(sum1)) / rows) / sum1;
std2 = sqrt(thrust::inner_product(y_ptr, y_ptr + cols, gamma_ptr, ValueTypeB(0.), thrust::plus<ValueTypeB>(), std_f<ValueTypeB>(sum2)) / cols) / sum2;
ValueTypeB std = sqrt(std1 * std1 + std2 * std2);
for (int t = 0; t < max_iters; t++)
{
if (std < tolerance) { break; } // finished
// x = sum1 ./ beta
setOneOverVector <<< 4096, 256>>>(rows, x.raw(), sum1, beta.raw());
// gamma = C*x := B'*x
thrust::fill(gamma.begin(), gamma.end(), ValueTypeB(0.));
computeGammaDevice<256, 8> <<< 4096, 256>>>(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), x.raw(), gamma.raw());
// gamma = 1 ./ beta
setOneOverVector <<< 4096, 256>>>(cols, y.raw(), sum2, gamma.raw());
// beta = B*y
computeBetaDevice<256, 8> <<< 4096, 256>>>(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), y.raw(), beta.raw());
//ValueTypeB std_old = std;
std = sqrt(thrust::inner_product(x_ptr, x_ptr + rows, beta_ptr, ValueTypeB(0.), thrust::plus<ValueTypeB>(), std_f<ValueTypeB>(sum1)) / rows) / sum1;
// print it #, current error, convergence rate
//printf("ITER: %d %.3e %.4lg\n",t, std, std / std_old);
}
//Save scaling vectors for later user, setup complete
left_scale = VVector(beta);
right_scale = VVector(gamma);
this->scaled_before = false;
}
// Matrix Scaling on Device
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void NBinormalizationScaler<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::scaleMatrix(Matrix_d &A, ScaleDirection scaleOrUnscale)
{
if (left_scale.size() != A.get_num_rows())
{
FatalError("Must call setup(A) before binormalization scaling can scale matrix", AMGX_ERR_NOT_IMPLEMENTED);
}
if (A.is_matrix_distributed())
{
FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED);
}
int nrows = A.get_num_rows();
if (scaleOrUnscale == amgx::SCALE)
{
/*VVector rownorms(nrows, 0.0);
VVector colnorms(nrows, 0.0);
getColRowNorms<<<4096,256>>>(nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), rownorms.raw(), colnorms.raw());
cudaCheckError();
ValueTypeB row_max = *(thrust::max_element(rownorms.begin(), rownorms.end()));
ValueTypeB row_min = *(thrust::min_element(rownorms.begin(), rownorms.end()));
ValueTypeB col_max = *(thrust::max_element(colnorms.begin(), colnorms.end()));
ValueTypeB col_min = *(thrust::min_element(colnorms.begin(), colnorms.end()));
cudaCheckError();
printf("Original Matrix: rowmax: %e, rowmin: %e, colmax: %e, colmin: %e\n", row_max, row_min, col_max, col_min);fflush(stdout);*/
// A_scaled = F*A*G (f = diag(F) = sqrt(fabs(x)), g = diag(G) = sqrt(fabs(y))
// A_ij = f_i * A_ij * g_j
scaleMatrixDevice<amgx::SCALE> <<< 4096, 256>>>(A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), left_scale.raw(), right_scale.raw());
cudaCheckError();
if (!scaled_before)
{
this->norm_coef = sqrt(thrust::transform_reduce(A.values.begin(), A.values.begin() + A.get_num_nz() * A.get_block_size(), square_value<ValueTypeB>(), 0., thrust::plus<ValueTypeB>()) / A.get_num_rows());
cudaCheckError();
thrust::transform(A.values.begin(), A.values.begin() + A.get_num_nz()*A.get_block_size(), A.values.begin(), vmul_scale_const<ValueTypeB>(1. / this->norm_coef) );
thrust::transform(left_scale.begin(), left_scale.end(), left_scale.begin(), vmul_scale_const<ValueTypeB>(sqrt(1. / this->norm_coef)) );
thrust::transform(right_scale.begin(), right_scale.end(), right_scale.begin(), vmul_scale_const<ValueTypeB>(sqrt(1. / this->norm_coef)) );
cudaCheckError();
/*thrust::fill(rownorms.begin(), rownorms.end(), 0.);
thrust::fill(colnorms.begin(), colnorms.end(), 0.);
getColRowNorms<<<4096,256>>>(nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), rownorms.raw(), colnorms.raw());
cudaCheckError();
row_max = *(thrust::max_element(rownorms.begin(), rownorms.end()));
row_min = *(thrust::min_element(rownorms.begin(), rownorms.end()));
col_max = *(thrust::max_element(colnorms.begin(), colnorms.end()));
col_min = *(thrust::min_element(colnorms.begin(), colnorms.end()));
cudaCheckError();
printf("Scaled Matrix: rowmax: %e, rowmin: %e, colmax: %e, colmin: %e\n", row_max, row_min, col_max, col_min);fflush(stdout);*/
}
this->scaled_before = true;
}
else
{
scaleMatrixDevice<amgx::UNSCALE> <<< 4096, 256>>>(A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), left_scale.raw(), right_scale.raw());
cudaCheckError();
}
}
// Setup on Host
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void NBinormalizationScaler<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setup(Matrix_h &A)
{
if (A.is_matrix_distributed())
{
FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED);
}
// move these out to config parameters?
const int max_iters = 10;
const ValueTypeB tolerance = 1e-10;
int rows = A.get_num_rows(), cols = A.get_num_cols();
// temporary vectors
VVector x(rows, 1), y(cols, 1), beta(rows, 0), gamma(cols, 0);
// perform matvecs to get beta and gamma (spmv for beta, spmvT for gamma)
computeBetaGammaHost(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(),
x.raw(), y.raw(), beta.raw(), gamma.raw());
double std1 = 0., std2 = 0., sum1 = cols, sum2 = rows;
// calculate initial std1 and std2
for (int i = 0; i < rows; i++)
{
std1 += pow(x[i] * beta[i] - sum1, 2.0);
}
std1 = sqrt(std1 / rows) / sum1;
for (int i = 0; i < cols; i++)
{
std2 += pow(y[i] * gamma[i] - sum2, 2.0);
}
std2 = sqrt(std2 / cols) / sum2;
//printf("std1: %lg, std2: %lg\n",std1, std2);
double std_initial = sqrt((std1 * std1) + (std2 * std2));
double std = std_initial;
for (int t = 0; t < max_iters; t++)
{
if (std < tolerance) { break; } // finished
// x = sum1 ./ beta
for (int i = 0; i < rows; i++) { x[i] = ( isNotCloseToZero(beta[i]) ? sum1 / beta[i] : sum1 / epsilon(beta[i]) ); }
// gamma = C*x
computeGammaHost(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), x.raw(), gamma.raw());
// gamma = 1 ./ beta
for (int i = 0; i < cols; i++) { y[i] = ( isNotCloseToZero(gamma[i]) ? sum2 / gamma[i] : sum2 / epsilon(gamma[i]) ); }
// beta = B*y
computeBetaHost(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), y.raw(), beta.raw());
//ValueTypeB std_old = std;
std = 0.;
for (int i = 0; i < rows; i++)
{
std += pow(x[i] * beta[i] - sum1, 2.0);
}
std = sqrt(std / rows) / sum1;
// print it #, current error, convergence rate
//printf("ITER: %d %.3e %.4lg\n",t, std, std / std_old);
}
//Save scaling vectors for later user, setup complete
left_scale = VVector(beta);
right_scale = VVector(gamma);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void NBinormalizationScaler<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::scaleMatrix(Matrix_h &A, ScaleDirection scaleOrUnscale)
{
if (A.is_matrix_distributed())
{
FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED);
}
if (left_scale.size() != A.get_num_rows())
{
FatalError("Must call setup(A) before binormalization scaling can scale matrix", AMGX_ERR_NOT_IMPLEMENTED);
}
// A_scaled = F*A*G (f = diag(F) = sqrt(fabs(x)), g = diag(G) = sqrt(fabs(y))
// A_ij = f_i * A_ij * g_j
scaleMatrixHost(A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), left_scale.raw(), right_scale.raw());
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void NBinormalizationScaler<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::scaleVector(VVector &v, ScaleDirection scaleOrUnscale, ScaleSide leftOrRight)
{
VVector *scale_vector = (leftOrRight == amgx::LEFT) ? &this->left_scale : &this->right_scale;
//thrust::transform(v.begin(), v.end(), scale_vector->begin(), v.begin(), (scaleOrUnscale == amgx::SCALE) ? vmul_scale<ValueTypeB>() : vmul_unscale<ValueTypeB>() );
if (scaleOrUnscale == amgx::SCALE)
{
thrust::transform(v.begin(), v.end(), scale_vector->begin(), v.begin(), vmul_scale<ValueTypeB>() );
}
else
{
thrust::transform(v.begin(), v.end(), scale_vector->begin(), v.begin(), vmul_unscale<ValueTypeB>() );
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void NBinormalizationScaler<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::scaleVector(VVector &v, ScaleDirection scaleOrUnscale, ScaleSide leftOrRight)
{
FatalError("4x4 block size not supported", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class NBinormalizationScaler_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class NBinormalizationScaler<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx | the_stack |
template<typename T>
__global__ void depth2xyz(uint16_t* d, T* x, T* y,
T* z, T invF, int w, int h, T *xyz)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int idy = threadIdx.y + blockIdx.y*blockDim.y;
const int id = idx+w*idy;
if(idx<w && idy<h)
{
T dd = T(d[id])*0.001; // convert to mm
// have a buffer of nan pixels around the border to prohibit
// the filters to do bad stuff at the corners
if ((0.0<dd)&&(dd<4.0)&&( BOARDER_SIZE<idx && idx<w-BOARDER_SIZE
&& BOARDER_SIZE<idy && idy<h-BOARDER_SIZE)){
// in combination with the normal computation this gives the right normals
x[id] = dd*(T(idx)-(w-1.)*0.5)*invF;
y[id] = dd*(T(idy)-(h-1.)*0.5)*invF;
z[id] = dd;
}else{
x[id] = 0.0/0.0;
y[id] = 0.0/0.0;
z[id] = 0.0/0.0;
}
if (xyz != NULL){
xyz[id*4] = x[id];
xyz[id*4+1] = y[id];
xyz[id*4+2] = z[id];
}
}
}
void depth2xyzGPU(uint16_t* d, float* x, float* y, float* z,
float invF, int w, int h, float *xyz=NULL)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
printf("depth2xyzGPU %d x %d",w,h);
depth2xyz<float><<<blocks, threads>>>(d,x,y,z,invF,w,h,xyz);
getLastCudaError("depth2xyzGPU() execution failed\n");
}
void depth2xyzGPU(uint16_t* d, double* x, double* y, double* z,
double invF, int w, int h, double *xyz=NULL)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
printf("depth2xyzGPU %d x %d",w,h);
depth2xyz<double><<<blocks, threads>>>(d,x,y,z,invF,w,h,xyz);
getLastCudaError("depth2xyzGPU() execution failed\n");
}
template<typename T>
__global__ void depth2float(uint16_t* d, T* d_float, uint8_t* haveData, int w, int h, int outStep)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int idy = threadIdx.y + blockIdx.y*blockDim.y;
const uint32_t id = idx+w*idy;
const uint32_t idOut = idx+outStep*idy;
if(idx<w && idy<h)
{
T dd = T(d[id])*0.001; // convert to mm
if ((dd>0.0f)){
d_float[idOut] = dd;
haveData[idOut] = 1;
}else{
d_float[idOut] = 0.0; //TODO broke the nan trick! 0.0/0.0;
haveData[idOut] = 0;
}
}
}
void depth2floatGPU(uint16_t* d, double* d_float, uint8_t* haveData,int w, int h, int outStep)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
// printf("depth2floatGPU (double) %d x %d",w,h);
if (outStep < 0 ) outStep = w;
depth2float<double><<<blocks, threads>>>(d,d_float,haveData,w,h,outStep);
getLastCudaError("depth2floatGPU() execution failed\n");
}
void depth2floatGPU(uint16_t* d, float* d_float,uint8_t* haveData, int w, int h, int outStep)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
// printf("depth2floatGPU (float) %d x %d",w,h);
if (outStep < 0 ) outStep = w;
depth2float<float><<<blocks, threads>>>(d,d_float,haveData,w,h,outStep);
getLastCudaError("depth2floatGPU() execution failed\n");
}
//#define SQRT2 1.4142135623730951
__global__ void depthFilter(float* d,
int w, int h)
{
const float thresh = 0.2; // 5cm
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int idy = threadIdx.y + blockIdx.y*blockDim.y;
const int id = idx+w*idy;
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
__shared__ float duSq[8];
if(tid==0) duSq[0] = 2.;
if(tid==1) duSq[1] = 1.;
if(tid==2) duSq[2] = 2.;
if(tid==3) duSq[3] = 1.;
if(tid==4) duSq[4] = 2.;
if(tid==5) duSq[5] = 1.;
if(tid==6) duSq[6] = 2.;
if(tid==7) duSq[7] = 1.;
__syncthreads(); // make sure that ys have been cached
// filtering according to noise model from file:///home/jstraub/Downloads/Nguyen2012-ModelingKinectSensorNoise.pdf
if(1<idx && idx<w-1 && 1<idy && idy<h-1)
{
float dd = d[id];
if ((dd>0.0f))
{
float invSigSqL = 1.0f/0.5822699462742343; //for theta=30deg //0.8f + 0.035f*theta/(PI*0.5f -theta);
float invSigSqZ = 1.0f/(0.0012f + 0.0019f*(dd-0.4f)*(dd-0.4f));
invSigSqZ = invSigSqZ*invSigSqZ;
float ds[8];
ds[0] = d[idx-1+w*(idy-1)];
ds[1] = d[idx +w*(idy-1)];
ds[2] = d[idx+1+w*(idy-1)];
ds[3] = d[idx+1+w*idy];
ds[4] = d[idx+1+w*(idy+1)];
ds[5] = d[idx +w*(idy+1)];
ds[6] = d[idx-1+w*(idy+1)];
ds[7] = d[idx-1+w*idy];
float wSum = 0.0f;
float dwSum = 0.0f;
#pragma unroll
for(int32_t i=0; i<8; ++i)
{
float dz = fabs(ds[i]-dd);
float wi = dz < thresh ? expf(-0.5f*(duSq[i]*invSigSqL + dz*dz*invSigSqZ)) : 0.0f;
wSum += wi;
dwSum += wi*ds[i];
}
d[id] = dwSum/wSum;
}
}
}
void depthFilterGPU(float* d, int w, int h)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
depthFilter<<<blocks, threads>>>(d,w,h);
getLastCudaError("depthFilterGPU() execution failed\n");
}
template<typename T>
__global__ void depth2xyzFloat(T* d, T* x, T* y,
T* z, T invF, int w, int h, T *xyz)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int idy = threadIdx.y + blockIdx.y*blockDim.y;
const int id = idx+w*idy;
if(idx<w && idy<h)
{
T dd = d[id]; // convert to mm
// have a buffer of nan pixels around the border to prohibit
// the filters to do bad stuff at the corners
if ((dd>0.0f)&&( BOARDER_SIZE<idx && idx<w-BOARDER_SIZE
&& BOARDER_SIZE<idy && idy<h-BOARDER_SIZE)){
// in combination with the normal computation this gives the right normals
x[id] = dd*T(idx-w/2)*invF;
y[id] = dd*T(idy-h/2)*invF;
z[id] = dd;
}else{
x[id] = 0.0f/0.0f;
y[id] = 0.0f/0.0f;
z[id] = 0.0f/0.0f;
}
if (xyz != NULL){
xyz[id*4] = x[id];
xyz[id*4+1] = y[id];
xyz[id*4+2] = z[id];
}
}
}
void depth2xyzFloatGPU(float* d, float* x, float* y, float* z,
float invF, int w, int h, float *xyz=NULL)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
depth2xyzFloat<float><<<blocks, threads>>>(d,x,y,z,invF,w,h,xyz);
getLastCudaError("depth2xyzGPU() execution failed\n");
}
void depth2xyzFloatGPU(double* d, double* x, double* y, double* z,
double invF, int w, int h, double *xyz=NULL)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
depth2xyzFloat<double><<<blocks, threads>>>(d,x,y,z,invF,w,h,xyz);
getLastCudaError("depth2xyzGPU() execution failed\n");
}
inline __device__ float signf(float a)
{
if (a<0.0f)
return -1.0f;
else
return 1.0f;
// else
// return 0.0f;
}
inline __device__ float absf(float a)
{
return a<0.0f?-a:a;
}
/*
* derivatives2normals takes pointers to all derivatives and fills in d_n
* d_n is a w*h*3 array for all three normal components (x,y,z)
*/
template<typename T>
__global__ void derivatives2normalsPcl(T* d_x, T* d_y, T* d_z,
T* d_xu, T* d_yu, T* d_zu,
T* d_xv, T* d_yv, T* d_zv,
T* d_n, int w, int h)
{
const uint32_t idx = threadIdx.x + blockIdx.x*blockDim.x;
const uint32_t idy = threadIdx.y + blockIdx.y*blockDim.y;
const uint32_t id = idx+w*idy;
if(idx<w && idy<h)
{
// in combination with the depth to xyz computation this gives the right normals
T xu=d_xu[id];
T yu=d_yu[id];
T zu=d_zu[id];
T xv=d_xv[id];
T yv=d_yv[id];
T zv=d_zv[id];
T invLenu = 1.0f/sqrtf(xu*xu + yu*yu + zu*zu);
xu *= invLenu;
yu *= invLenu;
zu *= invLenu;
T invLenv = 1.0f/sqrtf(xv*xv + yv*yv + zv*zv);
xv *= invLenv;
yv *= invLenv;
zv *= invLenv;
T nx = yu*zv - yv*zu;
T ny = xv*zu - xu*zv;
T nz = xu*yv - xv*yu;
T lenn = sqrtf(nx*nx + ny*ny + nz*nz);
T sgn = signf(d_x[id]*nx + d_y[id]*ny + d_z[id]*nz)/lenn;
// normals are pointing away from where the kinect sensor is
// ie. if pointed at the ceiling the normals will be (0,0,1)
// the coordinate system is aligned with the image coordinates:
// z points outward to the front
// x to the right (when standing upright on the foot and looking from behind)
// y down (when standing upright on the foot and looking from behind)
// if (absf(ny)<0.01f || absf(nx)<0.01f)
//{
// nx=0.0f/0.0f;
// ny=0.0f/0.0f;
// nz=0.0f/0.0f;
//}
// the 4th component is always 1.0f - due to PCL conventions!
d_n[id*X_STEP+X_OFFSET] = nx*sgn;
d_n[id*X_STEP+X_OFFSET+1] = ny*sgn;
d_n[id*X_STEP+X_OFFSET+2] = nz*sgn;
d_n[id*X_STEP+X_OFFSET+3] = 1.0f;
// f!=f only true for nans
//d_nGood[id] = ((nx!=nx) | (ny!=ny) | (nz!=nz))?0:1;
}
}
void derivatives2normalsPclGPU(float* d_x, float* d_y, float* d_z,
float* d_xu, float* d_yu, float* d_zu,
float* d_xv, float* d_yv, float* d_zv,
float* d_n, int w, int h)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
derivatives2normalsPcl<<<blocks, threads>>>(d_x,d_y,d_z,
d_xu,d_yu,d_zu,
d_xv,d_yv,d_zv,
d_n,w,h);
getLastCudaError("derivatives2normalsGPU() execution failed\n");
}
void derivatives2normalsPclGPU(double* d_x, double* d_y, double* d_z,
double* d_xu, double* d_yu, double* d_zu,
double* d_xv, double* d_yv, double* d_zv,
double* d_n, int w, int h)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
derivatives2normalsPcl<<<blocks, threads>>>(d_x,d_y,d_z,
d_xu,d_yu,d_zu,
d_xv,d_yv,d_zv,
d_n,w,h);
getLastCudaError("derivatives2normalsGPU() execution failed\n");
}
template<typename T>
__global__ void xyzImg2PointCloudXYZRGB(T* xyzImg, float* pclXYZRGB, int32_t w,
int32_t h)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int idy = threadIdx.y + blockIdx.y*blockDim.y;
const int id = idx+w*idy;
if(idx<w && idy<h)
{
pclXYZRGB[id*8] = (float) xyzImg[id*3];
pclXYZRGB[id*8+1] = (float) xyzImg[id*3+1];
pclXYZRGB[id*8+2] = (float) xyzImg[id*3+2];
pclXYZRGB[id*8+3] = 1.0f;
pclXYZRGB[id*8+4] = 0.0f;
pclXYZRGB[id*8+5] = 0.0f;
pclXYZRGB[id*8+6] = 0.0f;
pclXYZRGB[id*8+7] = 1.0f;
}
}
void xyzImg2PointCloudXYZRGB(double* d_xyzImg, float* d_pclXYZRGB, int32_t w,
int32_t h)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
printf("%d %d %d\n",blocks.x,blocks.y,blocks.z);
printf("%d %d %d\n",threads.x,threads.y,threads.z);
// xyzImg2PointCloudXYZRGB<double><<<blocks, threads>>>(d_xyzImg,d_pclXYZRGB,w,h);
// getLastCudaError("xyzImg2PointCloudXYZRGB() execution failed\n");
}
void xyzImg2PointCloudXYZRGB(float* d_xyzImg, float* d_pclXYZRGB, int32_t w,
int32_t h)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
printf("%d %d %d\n",blocks.x,blocks.y,blocks.z);
printf("%d %d %d\n",threads.x,threads.y,threads.z);
// xyzImg2PointCloudXYZRGB<float><<<blocks, threads>>>(d_xyzImg,d_pclXYZRGB,w,h);
// getLastCudaError("xyzImg2PointCloudXYZRGB() execution failed\n");
}
template<typename T>
__global__ void derivatives2normals(T* d_x, T* d_y, T* d_z,
T* d_xu, T* d_yu, T* d_zu,
T* d_xv, T* d_yv, T* d_zv,
T* d_n, uint8_t* d_haveData, int w, int h)
{
const uint32_t idx = threadIdx.x + blockIdx.x*blockDim.x;
const uint32_t idy = threadIdx.y + blockIdx.y*blockDim.y;
const uint32_t id = idx+w*idy;
if(idx<w && idy<h)
{
// in combination with the depth to xyz computation this gives the right normals
T xu=d_xu[id];
T yu=d_yu[id];
T zu=d_zu[id];
T xv=d_xv[id];
T yv=d_yv[id];
T zv=d_zv[id];
T* d_ni = d_n+id*3;
if (xu!=xu || yu!=yu || zu!=zu ||
xv!=xv || yv!=yv || zv!=zv ||
d_x[id]!=d_x[id] || d_y[id]!=d_y[id] || d_z[id]!=d_z[id])
{
if (d_haveData) d_haveData[id] = 0;
d_ni[0] = 0.0/0.0;
d_ni[1] = 0.0/0.0;
d_ni[2] = 0.0/0.0;
}else{
T invLenu = 1.0f/sqrtf(xu*xu + yu*yu + zu*zu);
xu *= invLenu;
yu *= invLenu;
zu *= invLenu;
T invLenv = 1.0f/sqrtf(xv*xv + yv*yv + zv*zv);
xv *= invLenv;
yv *= invLenv;
zv *= invLenv;
T nx = yu*zv - yv*zu;
T ny = xv*zu - xu*zv;
T nz = xu*yv - xv*yu;
T lenn = sqrtf(nx*nx + ny*ny + nz*nz);
T sgn = signf(d_x[id]*nx + d_y[id]*ny + d_z[id]*nz)/lenn;
// normals are pointing away from where the kinect sensor is
// ie. if pointed at the ceiling the normals will be (0,0,1)
// the coordinate system is aligned with the image coordinates:
// z points outward to the front
// x to the right (when standing upright on the foot and looking from behind)
// y down (when standing upright on the foot and looking from behind)
// if (absf(ny)<0.01f || absf(nx)<0.01f)
//{
// nx=0.0f/0.0f;
// ny=0.0f/0.0f;
// nz=0.0f/0.0f;
//}
//
// STUPID ME
// if(idy > 440)
// { // stupid fix for weird artifact
// nx = 0./0.;
// ny = 0./0.;
// nz = 0./0.;
// }
// the 4th component is always 1.0f - due to PCL conventions!
d_ni[0] = nx*sgn;
d_ni[1] = ny*sgn;
d_ni[2] = nz*sgn;
if(d_haveData)
d_haveData[id] = (sgn!=sgn || (nx!=nx) || (ny!=ny) || (nz!=nz))?0:1;
// f!=f only true for nans
}
// if(idy >= 450 && d_haveData[id] > 0)
// printf("%d,%d: %d n= %f %f %f p= %f %f %f\n",
// idx,idy,d_haveData[id],
// d_ni[0],d_ni[1],d_ni[2],
// d_x[id],d_y[id],d_z[id] );
}
}
void derivatives2normalsGPU(float* d_x, float* d_y, float* d_z,
float* d_xu, float* d_yu, float* d_zu,
float* d_xv, float* d_yv, float* d_zv,
float* d_n, uint8_t* d_haveData, int w, int h)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
derivatives2normals<<<blocks, threads>>>(d_x,d_y,d_z,
d_xu,d_yu,d_zu,
d_xv,d_yv,d_zv,
d_n,d_haveData,w,h);
getLastCudaError("derivatives2normalsGPU() execution failed\n");
}
void derivatives2normalsGPU(double* d_x, double* d_y, double* d_z,
double* d_xu, double* d_yu, double* d_zu,
double* d_xv, double* d_yv, double* d_zv,
double* d_n, uint8_t* d_haveData, int w, int h)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
derivatives2normals<<<blocks, threads>>>(d_x,d_y,d_z,
d_xu,d_yu,d_zu,
d_xv,d_yv,d_zv,
d_n,d_haveData,w,h);
getLastCudaError("derivatives2normalsGPU() execution failed\n");
}
// derivative of depth image to surface normals
template<typename T>
__global__ void derivatives2normals(T* d_z, T* d_zu, T* d_zv, T* d_n, uint8_t*
d_haveData, T invF, int w, int h)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int idy = threadIdx.y + blockIdx.y*blockDim.y;
const int id = idx+w*idy;
if(idx<w && idy<h)
{
// in combination with the depth to xyz computation this gives the right normals
const T du = (T(idx)-(w-1.)*0.5);
const T dv = (T(idy)-(h-1.)*0.5);
const T z = d_z[id];
T zu = d_zu[id];
T zv = d_zv[id];
T* d_ni = d_n+id*3;
if (d_haveData && (z != z || zu!=zu || zv!=zv))
{
d_haveData[id] = 0;
d_ni[0] = 0.0/0.0;
d_ni[1] = 0.0/0.0;
d_ni[2] = 0.0/0.0;
}else{
// // TODO I cannot find the bug - seems instable numerically somehow?
T xu = invF *(zu*du + z);
T yu = invF *(zu*dv);
T xv = invF*zv*du;
T yv = invF*(zv*dv + z);
T invLenu = 1.0f/sqrtf(xu*xu + yu*yu + zu*zu);
xu *= invLenu;
yu *= invLenu;
zu *= invLenu;
T invLenv = 1.0f/sqrtf(xv*xv + yv*yv + zv*zv);
xv *= invLenv;
yv *= invLenv;
zv *= invLenv;
T nx = yu*zv - yv*zu;
T ny = xv*zu - xu*zv;
T nz = xu*yv - xv*yu;
T lenn = sqrtf(nx*nx + ny*ny + nz*nz);
T sgn = 1./lenn;
// T zInvF = z*invF;
//// T nx = zu*zInvF;
//// T ny = zv*zInvF;
//// T nz = (du*zu+dv*zv+z)*zInvF*invF;
//// T zInvF = z*invF;
//// T nz = -zu/invF; // zInvF is going to be taken care of by normalizer anyway
//// T ny = -zv/invF;
//// T nx = -(du*zu+dv*zv+z);//*invF;
//
// T nx = zu*zInvF; // zInvF is going to be taken care of by normalizer anyway
// T ny = zv*zInvF;
// T nz = -(du*zu+dv*zv+z)*zInvF*invF;
// T lenn = sqrtf(nx*nx + ny*ny + nz*nz);
// if(idx==100 && idy==100) printf("%f %f %f |.|=%f; %f %f %f %f %f %f ;%f %f %f; %f %f \n",
// nx,ny,nz,lenn, zu,zv,du,dv,z,invF,
// du*zu,dv*zv,z,zu/invF, zv/invF);
// T sgn = 1./lenn;
//
// if(idx==100 && idy==100) printf("%f %f %f",nx*sgn,ny*sgn,nz*sgn);
// T sgn = 1.;
// T sgn = signf(d_x[id]*nx + d_y[id]*ny + d_z[id]*nz)/lenn;
// normals are pointing away from where the kinect sensor is
// ie. if pointed at the ceiling the normals will be (0,0,1)
// the coordinate system is aligned with the image coordinates:
// z points outward to the front
// x to the right (when standing upright on the foot and looking from behind)
// y down (when standing upright on the foot and looking from behind)
// if (absf(ny)<0.01f || absf(nx)<0.01f)
//{
// nx=0.0f/0.0f;
// ny=0.0f/0.0f;
// nz=0.0f/0.0f;
//}
d_ni[0] = nx*sgn;
d_ni[1] = ny*sgn;
d_ni[2] = nz*sgn;
if(d_haveData)
d_haveData[id] = (sgn!=sgn || (nx!=nx) || (ny!=ny) || (nz!=nz))?0:1;
// f!=f only true for nans
}
}
}
void derivatives2normalsGPU(float* d_z, float* d_zu, float* d_zv, float* d_n,
uint8_t* d_haveData, float invF, int w, int h)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
derivatives2normals<<<blocks, threads>>>(d_z, d_zu, d_zv,
d_n,d_haveData,invF,w,h);
getLastCudaError("derivatives2normalsGPU() execution failed\n");
}
void derivatives2normalsGPU(double* d_z, double* d_zu, double* d_zv, double*
d_n, uint8_t* d_haveData, double invF, int w, int h)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
printf("%d %d %d\n",blocks.x,blocks.y,blocks.z);
printf("%d %d %d\n",threads.x,threads.y,threads.z);
derivatives2normals<<<blocks, threads>>>(d_z, d_zu, d_zv,
d_n,d_haveData,invF,w,h);
getLastCudaError("derivatives2normalsGPU() execution failed\n");
}
/*
* derivatives2normals takes pointers to all derivatives and fills in d_n
* d_n is a w*h*3 array for all three normal components (x,y,z)
*/
__global__ void derivatives2normalsCleaner(float* d_x, float* d_y, float* d_z,
float* d_xu, float* d_yu, float* d_zu,
float* d_xv, float* d_yv, float* d_zv,
float* d_n, int w, int h)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int idy = threadIdx.y + blockIdx.y*blockDim.y;
const int id = idx+w*idy;
if(idx<w && idy<h)
{
// in combination with the depth to xyz computation this gives the right normals
float xu=d_xu[id];
float yu=d_yu[id];
float zu=d_zu[id];
float xv=d_xv[id];
float yv=d_yv[id];
float zv=d_zv[id];
float invLenu = 1.0f/sqrtf(xu*xu + yu*yu + zu*zu);
xu *= invLenu;
yu *= invLenu;
zu *= invLenu;
float invLenv = 1.0f/sqrtf(xv*xv + yv*yv + zv*zv);
xv *= invLenv;
yv *= invLenv;
zv *= invLenv;
float nx = 0.;
float ny = 0.;
float nz = 0.;
float sgn = 1.;
if (invLenu < 1./0.04 || invLenv < 1./0.04 )
{
nx=0.0f/0.0f;
ny=0.0f/0.0f;
nz=0.0f/0.0f;
} else {
nx = yu*zv - yv*zu;
ny = xv*zu - xu*zv;
nz = xu*yv - xv*yu;
float lenn = sqrtf(nx*nx + ny*ny + nz*nz);
sgn = signf(d_x[id]*nx + d_y[id]*ny + d_z[id]*nz)/lenn;
// normals are pointing away from where the kinect sensor is
// ie. if pointed at the ceiling the normals will be (0,0,1)
// the coordinate system is aligned with the image coordinates:
// z points outward to the front
// x to the right (when standing upright on the foot and looking from behind)
// y down (when standing upright on the foot and looking from behind)
}
// the 4th component is always 1.0f - due to PCL conventions!
d_n[id*X_STEP+X_OFFSET] = nx*sgn;
d_n[id*X_STEP+X_OFFSET+1] = ny*sgn;
d_n[id*X_STEP+X_OFFSET+2] = nz*sgn;
d_n[id*X_STEP+X_OFFSET+3] = 1.0f;
// f!=f only true for nans
//d_nGood[id] = ((nx!=nx) | (ny!=ny) | (nz!=nz))?0:1;
}
}
void derivatives2normalsCleanerGPU(float* d_x, float* d_y, float* d_z,
float* d_xu, float* d_yu, float* d_zu,
float* d_xv, float* d_yv, float* d_zv,
float* d_n, int w, int h)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
derivatives2normalsCleaner<<<blocks, threads>>>(d_x,d_y,d_z,
d_xu,d_yu,d_zu,
d_xv,d_yv,d_zv,
d_n,w,h);
getLastCudaError("derivatives2normalsGPU() execution failed\n");
}
__device__ inline float square(float a )
{ return a*a;}
__global__ void weightsFromCov(float* z, float* weights,
float theta, float invF, int w, int h)
{
// according to ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6375037
// weights are the inverse of the determinant of the covariance of the noise ellipse
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int idy = threadIdx.y + blockIdx.y*blockDim.y;
const int id = idx+w*idy;
if(idx<w && idy<h)
{
float z_i = z[id];
//float ang = theta/180.0f*M_PI;
float sigma_z = 0.0012f + 0.019f * square(z_i-0.4f);
//float sigma_l = (0.8f + 0.035f*ang/(M_PI*0.5f-ang))*z_i*invF;
weights[id] = 1.0f/sigma_z;
//weights[id] = 1.0f/(square(sigma_z)+2.0f*square(sigma_l));
}
}
void weightsFromCovGPU(float* z, float* weights, float theta, float invF, int
w, int h)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
weightsFromCov<<<blocks, threads>>>(z,weights,theta,invF,w,h);
getLastCudaError("depth2xyzGPU() execution failed\n");
}
__global__ void weightsFromArea(float* z, float* weights, int w, int h)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int idy = threadIdx.y + blockIdx.y*blockDim.y;
const int id = idx+w*idy;
if(idx<w && idy<h)
{
// weight proportial to area that the pixel i observes at distance z_i
// the area = z_i^2/f^2 but f is constant so we dont need divide by it.
weights[id] = square(z[id]);
}
}
void weightsFromAreaGPU(float* z, float* weights, int w, int h)
{
dim3 threads(16,16,1);
dim3 blocks(w/16 + (w%16>0?1:0),h/16 + (h%16>0?1:0),1);
weightsFromArea<<<blocks, threads>>>(z,weights,w,h);
getLastCudaError("weightsFromAreaGPU() execution failed\n");
}
// rotate point cloud
template<typename T, int32_t STEP>
__global__ void rotatePc_kernel(T* pc, T* d_R, int N)
{
const int id = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ T R[9];
if(threadIdx.x<9) R[threadIdx.x] = d_R[threadIdx.x];
__syncthreads();
if(id<N)
{
T* pc_i = pc+id*STEP;
T pc_[3];
pc_[0] = pc_i[0];
pc_[1] = pc_i[1];
pc_[2] = pc_i[2];
T pp[3];
pp[0] = R[0]*pc_[0] + R[3]*pc_[1] + R[6]*pc_[2];
pp[1] = R[1]*pc_[0] + R[4]*pc_[1] + R[7]*pc_[2];
pp[2] = R[2]*pc_[0] + R[5]*pc_[1] + R[8]*pc_[2];
// pc_i[0] = pc_[0];
// pc_i[1] = pc_[1];
// pc_i[2] = pc_[2];
pc_i[0] = pp[0];
pc_i[1] = pp[1];
pc_i[2] = pp[2];
}
}
void rotatePcGPU(float* d_pc, float* d_R, int32_t N, int32_t step)
{
dim3 threads(256,1,1);
dim3 blocks(N/256 + (N%256>0?1:0),1,1);
if(step == 3) // image layout
rotatePc_kernel<float,3><<<blocks, threads>>>(d_pc,d_R,N);
else if(step == 8) // pcl
rotatePc_kernel<float,8><<<blocks, threads>>>(d_pc,d_R,N);
else
assert(false);
getLastCudaError("rotatePc_kernel() execution failed\n");
}
void rotatePcGPU(double* d_pc, double* d_R, int32_t N, int32_t step)
{
dim3 threads(256,1,1);
dim3 blocks(N/256 + (N%256>0?1:0),1,1);
if(step == 3) // image layout
rotatePc_kernel<double,3><<<blocks, threads>>>(d_pc,d_R,N);
else if(step == 8) // pcl
rotatePc_kernel<double,8><<<blocks, threads>>>(d_pc,d_R,N);
else
assert(false);
checkCudaErrors(cudaDeviceSynchronize());
}
template<typename T, int32_t STEP>
__global__ void copyShuffle_kernel(T* in, T* out, uint32_t* ind, int N)
{
const int id = threadIdx.x + blockIdx.x*blockDim.x;
if(id<N)
{
uint32_t ind_i = ind[id];
T* in_i = in+ind_i*STEP;
T* out_i = out+id*STEP;
// T in_[3];
// in_[0] = in_i[0];
// in_[1] = in_i[1];
// in_[2] = in_i[2];
out_i[0] = in_i[0];
out_i[1] = in_i[1];
out_i[2] = in_i[2];
}
}
void copyShuffleGPU(float* in, float* out, uint32_t* ind, int32_t N, int32_t step)
{
dim3 threads(256,1,1);
dim3 blocks(N/256 + (N%256>0?1:0),1,1);
if(step == 3) // image layout
copyShuffle_kernel<float,3><<<blocks, threads>>>(in,out,ind,N);
else if(step == 8) // pcl
copyShuffle_kernel<float,8><<<blocks, threads>>>(in,out,ind,N);
else
assert(false);
checkCudaErrors(cudaDeviceSynchronize());
}
void copyShuffleGPU(double* in, double* out, uint32_t* ind, int32_t N, int32_t step)
{
dim3 threads(256,1,1);
dim3 blocks(N/256 + (N%256>0?1:0),1,1);
if(step == 3) // image layout
copyShuffle_kernel<double,3><<<blocks, threads>>>(in,out,ind,N);
else if(step == 8) // pcl
copyShuffle_kernel<double,8><<<blocks, threads>>>(in,out,ind,N);
else
assert(false);
getLastCudaError("copyShuffle_kernel() execution failed\n");
}
template<typename T, int32_t STEP>
__global__ void copyShuffleInv_kernel(T* in, T* out, uint32_t* ind, int N)
{
const int id = threadIdx.x + blockIdx.x*blockDim.x;
if(id<N)
{
uint32_t ind_i = ind[id];
T* in_i = in+id*STEP;
T* out_i = out+ind_i*STEP;
// T in_[3];
// in_[0] = in_i[0];
// in_[1] = in_i[1];
// in_[2] = in_i[2];
out_i[0] = in_i[0];
out_i[1] = in_i[1];
out_i[2] = in_i[2];
}
}
void copyShuffleInvGPU(float* in, float* out, uint32_t* ind, int32_t N, int32_t step)
{
dim3 threads(256,1,1);
dim3 blocks(N/256 + (N%256>0?1:0),1,1);
if(step == 3) // image layout
copyShuffleInv_kernel<float,3><<<blocks, threads>>>(in,out,ind,N);
else if(step == 8) // pcl
copyShuffleInv_kernel<float,8><<<blocks, threads>>>(in,out,ind,N);
else
assert(false);
checkCudaErrors(cudaDeviceSynchronize());
}
void copyShuffleInvGPU(double* in, double* out, uint32_t* ind, int32_t N, int32_t step)
{
dim3 threads(256,1,1);
dim3 blocks(N/256 + (N%256>0?1:0),1,1);
if(step == 3) // image layout
copyShuffleInv_kernel<double,3><<<blocks, threads>>>(in,out,ind,N);
else if(step == 8) // pcl
copyShuffleInv_kernel<double,8><<<blocks, threads>>>(in,out,ind,N);
else
assert(false);
checkCudaErrors(cudaDeviceSynchronize());
}
template<typename T, uint32_t STEP>
__global__ void haveData_kernel(T* d_x, uint8_t* d_haveData, int32_t N)
{
const uint32_t id = threadIdx.x + blockIdx.x*blockDim.x;
if(id < N)
{
if (d_x[id*STEP] != d_x[id*STEP])
{
d_haveData[id] = 0;
}else{
d_haveData[id] = 1;
}
}
}
void haveDataGpu(float* d_x, uint8_t* d_haveData, int32_t N, uint32_t step)
{
dim3 threads(256,1,1);
dim3 blocks(N/256 + (N%256>0?1:0),1,1);
if(step == 1) // vector layout
haveData_kernel<float,1><<<blocks, threads>>>(d_x,d_haveData,N);
else if(step == 3) // image layout
haveData_kernel<float,3><<<blocks, threads>>>(d_x,d_haveData,N);
else if(step == 8) // pcl
haveData_kernel<float,8><<<blocks, threads>>>(d_x,d_haveData,N);
else
assert(false);
checkCudaErrors(cudaDeviceSynchronize());
}
void haveDataGpu(double* d_x, uint8_t* d_haveData, int32_t N, uint32_t step)
{
dim3 threads(256,1,1);
dim3 blocks(N/256 + (N%256>0?1:0),1,1);
if(step == 1) // vector layout
haveData_kernel<double,3><<<blocks, threads>>>(d_x,d_haveData,N);
else if(step == 3) // image layout
haveData_kernel<double,3><<<blocks, threads>>>(d_x,d_haveData,N);
else if(step == 8) // pcl
haveData_kernel<double,8><<<blocks, threads>>>(d_x,d_haveData,N);
else
assert(false);
checkCudaErrors(cudaDeviceSynchronize());
} | the_stack |
#include "deviceCode.h"
#include "helpers.h"
#include <optix_device.h>
#include <owl/common/math/random.h>
extern "C" __constant__ LaunchParams optixLaunchParams;
typedef owl::common::LCG<4> Random;
struct PRD {
Random rng;
float t_hit;
vec3f gn, sn;
vec3f texCoord;
struct {
vec3f result;
float importance;
int depth;
} radiance;
struct {
vec3f attenuation;
} shadow;
int max_depth;
};
// ---------------------------------------------------------
// Parallelogram
// ---------------------------------------------------------
OPTIX_BOUNDS_PROGRAM(Parallelogram)(const void *geomData,
box3f &primBounds,
const int primID)
{
const ParallelogramGeomData &self = *(const ParallelogramGeomData*)geomData;
// v1 and v2 are scaled by 1./length^2. Rescale back to normal for the bounds computation.
const vec3f tv1 = self.v1 / dot( self.v1, self.v1 );
const vec3f tv2 = self.v2 / dot( self.v2, self.v2 );
const vec3f p00 = self.anchor;
const vec3f p01 = self.anchor + tv1;
const vec3f p10 = self.anchor + tv2;
const vec3f p11 = self.anchor + tv1 + tv2;
const float area = length(cross(tv1, tv2));
if(area > 0.0f && !isinf(area)) {
primBounds.lower = fminf( fminf( p00, p01 ), fminf( p10, p11 ) );
primBounds.upper = fmaxf( fmaxf( p00, p01 ), fmaxf( p10, p11 ) );
} else {
primBounds.lower = vec3f( 1e20f);
primBounds.upper = vec3f(-1e20f);
}
}
OPTIX_INTERSECT_PROGRAM(Parallelogram)()
{
const auto &self
= owl::getProgramData<ParallelogramGeomData>();
RadianceRay ray;
ray.origin = optixGetObjectRayOrigin();
ray.direction = optixGetObjectRayDirection();
ray.tmin = optixGetRayTmin();
ray.tmax = optixGetRayTmax();
vec3f n(self.plane);
float dt = dot(ray.direction, n );
float t = (self.plane.w - dot(n, ray.origin))/dt;
if( t > ray.tmin && t < ray.tmax ) {
vec3f p = ray.origin + ray.direction * t;
vec3f vi = p - self.anchor;
float a1 = dot(self.v1, vi);
if(a1 >= 0 && a1 <= 1){
float a2 = dot(self.v2, vi);
if(a2 >= 0 && a2 <= 1){
if( optixReportIntersection(t,0,*(unsigned*)&a1,*(unsigned*)&a2) ) {}
}
}
}
}
static
__device__ void phongShade( vec3f p_Kd,
vec3f p_Ka,
vec3f p_Ks,
vec3f p_normal,
float p_phong_exp,
vec3f p_reflectivity )
{
const auto &self
= owl::getProgramData<ParallelogramGeomData>();
PRD &prd = owl::getPRD<PRD>();
RadianceRay ray;
ray.origin = optixGetWorldRayOrigin();
ray.direction = optixGetWorldRayDirection();
ray.tmin = optixGetRayTmin();
ray.tmax = optixGetRayTmax();
vec3f hit_point = ray.origin + prd.t_hit * ray.direction;
// ambient contribution
vec3f result = p_Ka * optixLaunchParams.ambient_light_color;
// compute direct lighting
unsigned int num_lights = optixLaunchParams.numLights;
for(int i = 0; i < num_lights; ++i) {
// set jittered light direction
BasicLight light = optixLaunchParams.lights[i];
vec3f L = light.pos - hit_point;
vec2f sample = square_to_disk(vec2f(prd.rng(),prd.rng()));
vec3f U, V, W;
create_onb(L, U, V, W);
L += 5.0f * (sample.x * U + sample.y * V);
float Ldist = length(L);
L = (1.0f / Ldist) * L;
float nDl = dot( p_normal, L);
// cast shadow ray
PRD shadow_prd;
shadow_prd.shadow.attenuation = vec3f(1.f);
if(nDl > 0) {
ShadowRay shadow_ray(hit_point,L,optixLaunchParams.scene_epsilon,Ldist);
owl::traceRay(/*accel to trace against*/optixLaunchParams.world,
/*the ray to trace*/shadow_ray,
/*prd*/shadow_prd);
}
// If not completely shadowed, light the hit point
if(fmaxf(shadow_prd.shadow.attenuation) > 0) {
vec3f Lc = light.color * shadow_prd.shadow.attenuation;
result += p_Kd * nDl * Lc;
vec3f H = normalize(L - ray.direction);
float nDh = dot( p_normal, H );
if(nDh > 0) {
float power = pow(nDh, p_phong_exp);
result += p_Ks * power * Lc;
}
}
}
if( fmaxf( p_reflectivity ) > 0 ) {
// ray tree attenuation
PRD new_prd;
vec3f ntsc_luminance = {0.30, 0.59, 0.11};
new_prd.radiance.importance = prd.radiance.importance * dot( p_reflectivity, ntsc_luminance );
new_prd.radiance.depth = prd.radiance.depth + 1;
// reflection ray
if( new_prd.radiance.importance >= 0.01f && new_prd.radiance.depth <= prd.max_depth) {
vec3f R = reflect( ray.direction, p_normal );
RadianceRay refl_ray(hit_point,R,optixLaunchParams.scene_epsilon,1e30f);
owl::traceRay(/*accel to trace against*/optixLaunchParams.world,
/*the ray to trace*/refl_ray,
/*prd*/new_prd,
/*only CH*/OPTIX_RAY_FLAG_DISABLE_ANYHIT);
result += p_reflectivity * new_prd.radiance.result;
}
}
// pass the color back up the tree
prd.radiance.result = result;
}
OPTIX_CLOSEST_HIT_PROGRAM(Parallelogram)()
{
const auto &self
= owl::getProgramData<ParallelogramGeomData>();
PRD &prd = owl::getPRD<PRD>();
RadianceRay ray;
ray.origin = optixGetWorldRayOrigin();
ray.direction = optixGetWorldRayDirection();
ray.tmin = optixGetRayTmin();
ray.tmax = optixGetRayTmax();
unsigned attr0 = optixGetAttribute_0();
unsigned attr1 = optixGetAttribute_1();
float a1 = *(float*)&attr0;
float a2 = *(float*)&attr1;
prd.t_hit = optixGetRayTmax();
prd.sn = prd.gn = vec3f(self.plane);
prd.texCoord = vec3f(a1,a2,0);
vec3f uvw = prd.texCoord; // testing
float4 sampKa, sampKd, sampKs;
tex2D(&sampKa,self.ka_map,uvw.x,uvw.y);
tex2D(&sampKd,self.kd_map,uvw.x,uvw.y);
tex2D(&sampKs,self.ks_map,uvw.x,uvw.y);
vec3f ka = self.material.Ka * vec3f( sampKa );
vec3f kd = self.material.Kd * vec3f( sampKd );
vec3f ks = self.material.Ks * vec3f( sampKs );
vec3f world_shading_normal = normalize((vec3f)optixTransformNormalFromObjectToWorldSpace(prd.sn));
vec3f world_geometric_normal = normalize((vec3f)optixTransformNormalFromObjectToWorldSpace(prd.gn));
vec3f ffnormal = faceforward( world_shading_normal, -ray.direction, world_geometric_normal );
phongShade( kd, ka, ks, ffnormal, self.material.phong_exp, self.material.reflectivity );
}
OPTIX_ANY_HIT_PROGRAM(Parallelogram)()
{
PRD &prd = owl::getPRD<PRD>();
// this material is opaque, so it fully attenuates all shadow rays
prd.shadow.attenuation = vec3f(0.f);
optixTerminateRay();
}
// ---------------------------------------------------------
// PoolBalls
// ---------------------------------------------------------
OPTIX_BOUNDS_PROGRAM(PoolBall)(const void *geomData,
box3f &primBounds,
const int primID)
{
const PoolBallsGeomData &self = *(const PoolBallsGeomData*)geomData;
const vec3f cen( self.center[primID] );
const vec3f rad( self.radius );
if( rad.x > 0.0f && !isinf(rad.x) ) {
primBounds.lower = cen - rad;
primBounds.upper = cen + rad;
} else {
primBounds.lower = vec3f( 1e20f);
primBounds.upper = vec3f(-1e20f);
}
}
OPTIX_INTERSECT_PROGRAM(PoolBall)()
{
const int primID = optixGetPrimitiveIndex();
const auto &self
= owl::getProgramData<PoolBallsGeomData>();
RadianceRay ray;
ray.origin = optixGetObjectRayOrigin();
ray.direction = optixGetObjectRayDirection();
ray.tmin = optixGetRayTmin();
ray.tmax = optixGetRayTmax();
vec3f center = self.center[primID];
vec3f O = ray.origin - center;
vec3f D = ray.direction;
float radius = self.radius;
float b = dot(O, D);
float c = dot(O, O)-radius*radius;
float disc = b*b-c;
if(disc > 0.0f){
float sdisc = sqrtf(disc);
float root1 = (-b - sdisc);
bool check_second = true;
if( optixReportIntersection(root1,0) ) {
check_second = false;
}
if(check_second) {
float root2 = (-b + sdisc);
if( optixReportIntersection(root2,0) ) {}
}
}
}
OPTIX_CLOSEST_HIT_PROGRAM(PoolBall)()
{
const int primID = optixGetPrimitiveIndex();
const auto &self
= owl::getProgramData<PoolBallsGeomData>();
PRD &prd = owl::getPRD<PRD>();
RadianceRay ray;
ray.origin = optixGetWorldRayOrigin();
ray.direction = optixGetWorldRayDirection();
ray.tmin = optixGetRayTmin();
ray.tmax = optixGetRayTmax();
vec3f center = self.center[primID];
vec3f O = ray.origin - center;
vec3f D = ray.direction;
float radius = self.radius;
linear3f rotation = self.rotation[primID];
prd.t_hit = optixGetRayTmax();
prd.sn = prd.gn = (O + prd.t_hit*D)/radius;
vec3f polar;
polar.x = dot(rotation.vx, prd.gn);
polar.y = dot(rotation.vy, prd.gn);
polar.z = dot(rotation.vz, prd.gn);
polar = cart_to_pol(polar);
prd.texCoord = vec3f( polar.x*0.5f*M_1_PIf, (polar.y+M_PI_2f)*M_1_PIf, polar.z/radius );
// intersection vectors
const vec3f hit = ray.origin + prd.t_hit * ray.direction; // hitpoint
const vec3f N = normalize((vec3f)optixTransformNormalFromObjectToWorldSpace(prd.sn)); // normal
const vec3f I = ray.direction; // incident direction
vec3f R = reflect(I, N); // reflection direction
float depth = prd.radiance.depth;
float reflection = fresnel_schlick(-dot(N, I), self.material.fresnel_exponent, self.material.fresnel_minimum, self.material.fresnel_maximum);
// we need not clamp this subtraction because after fresnel_schlick,
// reflection is guaranteed to be <= fresnel_maximum
float oneMinusFresnel = self.material.fresnel_maximum - reflection;
// ambient
float4 samp;
if (self.kd_map[primID]) {
tex2D(&samp, self.kd_map[primID], prd.texCoord.x, prd.texCoord.y);
} else {
samp.x=samp.y=samp.z=1.f;
}
vec3f kd = self.material.Kd[primID] * vec3f( samp );
vec3f result = oneMinusFresnel * self.material.Ka * optixLaunchParams.ambient_light_color * kd;
// direct lighting
for (unsigned i=0; i<optixLaunchParams.numLights; ++i)
{
vec3f L = normalize(optixLaunchParams.lights[i].pos - hit);
// diffuse
vec3f diffuse = 1.0f/optixLaunchParams.numLights * ( max(dot(N, L), 0.0f) * optixLaunchParams.lights[i].color );
result += oneMinusFresnel * diffuse * kd;
// specular
result += powf(max(dot(R, L), 0.0f) , self.material.exponent) * self.material.Ks;
}
// reflection
// if (depth < min(self.material.reflection_max_depth, prd.max_depth))
if (depth < self.material.reflection_max_depth)
{
// phong lobe jittering
vec3f U, V, W;
create_onb(R, U, V, W);
R = sample_phong_lobe(vec2f(prd.rng(), prd.rng()), 4096.0f, U, V, W);
// avoid directions below surface
if (dot(R, N) < 0.01f)
R = W;
// shoot reflection ray
vec3f ntsc_luminance = {0.30f, 0.59f, 0.11f};
float importance = prd.radiance.importance * reflection * dot( self.material.reflection_color, ntsc_luminance );
vec3f color = self.material.cutoff_color;
if ( importance > self.material.importance_cutoff ) {
PRD new_prd;
new_prd.t_hit = 1e20f;
new_prd.radiance.depth = depth+1;
new_prd.radiance.importance = importance;
RadianceRay refl_ray(hit,R,optixLaunchParams.scene_epsilon,1e30f);
owl::traceRay(/*accel to trace against*/optixLaunchParams.world,
/*the ray to trace*/refl_ray,
/*prd*/new_prd,
/*only CH*/OPTIX_RAY_FLAG_DISABLE_ANYHIT);
color = new_prd.radiance.result;
}
result += reflection * self.material.reflection_color * color;
}
prd.radiance.result = result;
}
OPTIX_ANY_HIT_PROGRAM(PoolBall)()
{
PRD &prd = owl::getPRD<PRD>();
prd.shadow.attenuation = vec3f(0.f);
optixTerminateRay();
}
OPTIX_RAYGEN_PROGRAM(simpleRayGen)()
{
const RayGenData &self = owl::getProgramData<RayGenData>();
const auto &lp = optixLaunchParams;
const vec2i launchIndex = owl::getLaunchIndex();
const int pixelID = launchIndex.x+self.fbSize.x*launchIndex.y;
Random rng(pixelID,lp.accumID);
const vec2f screen = (vec2f(launchIndex)+vec2f(rng(),rng())) / vec2f(self.fbSize);
RadianceRay ray;
ray.origin
= self.camera.pos;
ray.direction
= normalize(self.camera.dir_00
+ screen.u * self.camera.dir_du
+ screen.v * self.camera.dir_dv);
vec3f ray_target = ray.origin + self.camera.focal_scale * ray.direction;
// lens sampling
vec2f sample = square_to_disk(make_float2(rng(), rng()));
ray.origin = ray.origin + self.camera.aperture_radius * ( sample.x * normalize( self.camera.dir_du ) + sample.y * normalize( self.camera.dir_dv ) );
ray.direction = normalize(ray_target - ray.origin);
//ray.time = 0.5f;
vec4f accumColor = 0.f;
PRD prd;
prd.t_hit = 1e20f;
prd.radiance.importance = 1.f;
owl::traceRay(/*accel to trace against*/self.world,
/*the ray to trace*/ray,
/*prd*/prd,
/*only CH*/OPTIX_RAY_FLAG_DISABLE_ANYHIT);
accumColor += vec4f(prd.radiance.result,1.f);
if (lp.accumID > 0)
accumColor += vec4f(lp.accumBuffer[pixelID]);
lp.accumBuffer[pixelID] = accumColor;
accumColor *= (1.f/(lp.accumID+1));
self.fbPtr[pixelID]
= owl::make_rgba(vec3f(accumColor));
}
OPTIX_MISS_PROGRAM(miss)()
{
const MissProgData &self = owl::getProgramData<MissProgData>();
PRD &prd = owl::getPRD<PRD>();
prd.radiance.result = self.bg_color;
} | the_stack |
#include "error.h"
#include "f5c.h"
#include "f5cmisc.cuh"
#include "f5cmisc.h"
void init_cuda(core_t* core){
cuda_exists();
int32_t cuda_device_num = core->opt.cuda_dev_id;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, cuda_device_num);
CUDA_CHK();
cudaSetDevice(cuda_device_num);
CUDA_CHK();
int cuda_device_num_current=-1;
cudaGetDevice(&cuda_device_num_current);
CUDA_CHK();
STDERR("Running on %s (device id %d)",prop.name, cuda_device_num_current);
//fprintf(stderr,"AVG_EVENTS_PER_KMER %f\n",AVG_EVENTS_PER_KMER);
//fprintf(stderr,"AVG_EVENTS_PER_KMER %f\n",AVG_EVENTS_PER_KMER_GPU_THRESH);
//fprintf(stderr,"readfac %f\n",core->opt.cuda_max_readlen);
assert(AVG_EVENTS_PER_KMER>0 && AVG_EVENTS_PER_KMER>0);
core->cuda = (cuda_data_t*)malloc(sizeof(cuda_data_t));
MALLOC_CHK(core->cuda);
core->align_kernel_time=0;
core->align_pre_kernel_time=0;
core->align_core_kernel_time=0;
core->align_post_kernel_time=0;
core->align_cuda_malloc=0;
core->extra_load_cpu=0;
core->align_cuda_memcpy=0;
core->align_cuda_postprocess=0;
core->align_cuda_preprocess=0;
core->previous_mem = -1;
core->previous_count_mem = 0;
core->previous_load = -1;
core->previous_count_load = 0;
int32_t n_bam_rec = core->opt.batch_size;
//cpu arrays
core->cuda->read_ptr_host = (ptr_t*)malloc(sizeof(ptr_t) * n_bam_rec);
MALLOC_CHK(core->cuda->read_ptr_host);
core->cuda->n_events_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec);
MALLOC_CHK(core->cuda->n_events_host);
core->cuda->event_ptr_host = (ptr_t*)malloc(sizeof(ptr_t) * n_bam_rec);
MALLOC_CHK(core->cuda->event_ptr_host);
core->cuda->read_len_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec);
MALLOC_CHK(core->cuda->read_len_host);
core->cuda->scalings_host = (scalings_t*)malloc(sizeof(scalings_t) * n_bam_rec);
MALLOC_CHK(core->cuda->scalings_host);
core->cuda->n_event_align_pairs_host = (int32_t*)malloc(sizeof(int32_t) * n_bam_rec);
MALLOC_CHK(core->cuda->n_event_align_pairs_host);
//cuda arrays
if(core->opt.verbosity>1) print_size("read_ptr array",n_bam_rec * sizeof(ptr_t));
cudaMalloc((void**)&(core->cuda->read_ptr), n_bam_rec * sizeof(ptr_t));
CUDA_CHK();
if(core->opt.verbosity>1) print_size("read_lens",n_bam_rec * sizeof(int32_t));
cudaMalloc((void**)&(core->cuda->read_len), n_bam_rec * sizeof(int32_t));
CUDA_CHK();
//n_events
if(core->opt.verbosity>1) print_size("n_events",n_bam_rec * sizeof(int32_t));
cudaMalloc((void**)&(core->cuda->n_events), n_bam_rec * sizeof(int32_t));
CUDA_CHK();
//event ptr
if(core->opt.verbosity>1) print_size("event ptr",n_bam_rec * sizeof(ptr_t));
cudaMalloc((void**)&(core->cuda->event_ptr), n_bam_rec * sizeof(ptr_t));
CUDA_CHK();
//scalings : already linear
if(core->opt.verbosity>1) print_size("Scalings",n_bam_rec * sizeof(scalings_t));
cudaMalloc((void**)&(core->cuda->scalings), n_bam_rec * sizeof(scalings_t));
CUDA_CHK();
cudaMalloc((void**)&(core->cuda->model),
MAX_NUM_KMER * sizeof(model_t));
CUDA_CHK();
if(core->opt.verbosity>1) print_size("n_event_align_pairs",n_bam_rec * sizeof(int32_t));
cudaMalloc((void**)&(core->cuda->n_event_align_pairs), n_bam_rec * sizeof(int32_t));
CUDA_CHK();
//model
cudaMemcpy(core->cuda->model, core->model, MAX_NUM_KMER * sizeof(model_t),
cudaMemcpyHostToDevice);
CUDA_CHK();
#ifndef CUDA_DYNAMIC_MALLOC
// //dynamic arrays
//compute the maximum
uint64_t free_mem = 0;
if(prop.integrated==1){ //in tegra free mem should be sought differently
free_mem=tegra_freemem(cuda_device_num);
}
else{
free_mem=cuda_freemem(cuda_device_num);
}
double factor = 1 * sizeof(char) + //read_capacity
AVG_EVENTS_PER_KMER * sizeof(event_t) + //event_table_capacity
1 * sizeof(model_t) + //model_kmer_cache_capacity
(AVG_EVENTS_PER_KMER * 2) * sizeof(AlignedPair) + //event_align_pairs_capacity
(AVG_EVENTS_PER_KMER + 1) * ALN_BANDWIDTH * sizeof(float) + //bands_capacity
(AVG_EVENTS_PER_KMER + 1) * ALN_BANDWIDTH * sizeof(uint8_t) + //trace_capacity
(AVG_EVENTS_PER_KMER + 1) * sizeof(EventKmerPair) ; //band_lower_left_capacity
uint64_t sum_read_len = 0;
//if unset by user (or set to weird values by user)
if(core->opt.cuda_mem_frac>=1.0f || core->opt.cuda_mem_frac<=0.0f){
if(prop.integrated==1){ //for tegra we have to reserve some space for RAM
sum_read_len= floor(free_mem*TEGRA_MEM_FACTOR/factor);
}
else{
sum_read_len= floor(free_mem*MEM_FACTOR/factor);
}
}
else{
sum_read_len= floor(free_mem*(core->opt.cuda_mem_frac)/factor);
}
core->cuda->max_sum_read_len = sum_read_len;
uint64_t sum_n_events = floor(sum_read_len * AVG_EVENTS_PER_KMER);
core->cuda->max_sum_n_events = sum_n_events;
uint64_t read_capacity = sum_read_len * sizeof(char);
uint64_t event_table_capacity = sum_n_events * sizeof(event_t);
uint64_t model_kmer_cache_capacity= sum_read_len * sizeof(model_t);
uint64_t event_align_pairs_capacity= sum_n_events * 2 * sizeof(AlignedPair);
uint64_t bands_capacity = (sum_n_events + sum_read_len) * ALN_BANDWIDTH * sizeof(float) ;
uint64_t trace_capacity = (sum_n_events + sum_read_len) * ALN_BANDWIDTH * sizeof(uint8_t) ;
uint64_t band_lower_left_capacity = (sum_n_events + sum_read_len) * sizeof(EventKmerPair);
assert(read_capacity + event_table_capacity + model_kmer_cache_capacity + event_align_pairs_capacity
+ bands_capacity + trace_capacity + band_lower_left_capacity <= free_mem);
if(core->opt.verbosity>1) print_size("read_capacity",read_capacity);
if(core->opt.verbosity>1) print_size("event_table_capacity",event_table_capacity);
if(core->opt.verbosity>1) print_size("model_kmer_cache_capacity",model_kmer_cache_capacity);
if(core->opt.verbosity>1) print_size("event_align_pairs_capacity",event_align_pairs_capacity);
if(core->opt.verbosity>1) print_size("bands_capacity",bands_capacity);
if(core->opt.verbosity>1) print_size("trace_capacity",trace_capacity);
if(core->opt.verbosity>1) print_size("band_lower_left_capacity",band_lower_left_capacity);
//input arrays
cudaMalloc((void**)&(core->cuda->read), read_capacity); //with null char
CUDA_CHK();
cudaMalloc((void**)&(core->cuda->event_table), event_table_capacity);
CUDA_CHK();
cudaMalloc((void**)&(core->cuda->model_kmer_cache), model_kmer_cache_capacity);
CUDA_CHK();
/**allocate output arrays for cuda**/
cudaMalloc((void**)&(core->cuda->event_align_pairs),event_align_pairs_capacity); //todo : need better huristic
CUDA_CHK();
//scratch arrays
cudaMalloc((void**)&(core->cuda->bands), bands_capacity);
CUDA_CHK();
cudaMalloc((void**)&(core->cuda->trace), trace_capacity);
CUDA_CHK();
cudaMalloc((void**)&(core->cuda->band_lower_left), band_lower_left_capacity);
CUDA_CHK();
STDERR("Max GPU capacity %.1fM bases",core->cuda->max_sum_read_len/(1000.0*1000.0));
int64_t num_bases_gap = core->cuda->max_sum_read_len - core->opt.batch_size_bases;
if(num_bases_gap> 0.25*core->cuda->max_sum_read_len){
INFO("Your GPU can accommodate upto %.1fM bases. You may increase -B option (currently %.1fM) for better performance!",
core->cuda->max_sum_read_len/(1000.0*1000.0), core->opt.batch_size_bases/((1000.0*1000.0)));
}
else if(num_bases_gap< -0.25*core->cuda->max_sum_read_len){
INFO("Your GPU can accommodate only %.1fM bases. You may decrease -B option (currently %.1fM) for better performance!",
core->cuda->max_sum_read_len/(1000.0*1000.0), core->opt.batch_size_bases/((1000.0*1000.0)));
}
#endif
return;
}
void free_cuda(core_t* core){
free(core->cuda->event_ptr_host);
free(core->cuda->n_events_host);
free(core->cuda->read_ptr_host);
free(core->cuda->read_len_host);
free(core->cuda->scalings_host);
free(core->cuda->n_event_align_pairs_host);
cudaFree(core->cuda->read_ptr);
cudaFree(core->cuda->read_len);
cudaFree(core->cuda->n_events);
cudaFree(core->cuda->event_ptr);
cudaFree(core->cuda->model); //constant memory
cudaFree(core->cuda->scalings);
cudaFree(core->cuda->n_event_align_pairs);
#ifndef CUDA_DYNAMIC_MALLOC
cudaFree(core->cuda->read);
cudaFree(core->cuda->event_table);
cudaFree(core->cuda->model_kmer_cache);
cudaFree(core->cuda->event_align_pairs);
cudaFree(core->cuda->bands);
cudaFree(core->cuda->trace);
cudaFree(core->cuda->band_lower_left);
#endif
free(core->cuda);
return;
}
#ifdef CPU_GPU_PROC
#ifdef WORK_STEAL
static inline int32_t steal_work(pthread_arg_t* all_args, int32_t n_threads)
{
int32_t i, c_i = -1;
int32_t k;
for (i = 0; i < n_threads; ++i){
pthread_arg_t args = all_args[i];
//fprintf(stderr,"endi : %d, starti : %d\n",args.endi,args.starti);
if (args.endi-args.starti > STEAL_THRESH_CUDA) {
//fprintf(stderr,"gap : %d\n",args.endi-args.starti);
c_i = i;
break;
}
}
if(c_i<0){
return -1;
}
k = __sync_fetch_and_add(&(all_args[c_i].starti), 1);
//fprintf(stderr,"k : %d, end %d, start %d\n",k,all_args[c_i].endi,all_args[c_i].starti);
return k >= all_args[c_i].endi ? -1 : k;
}
#endif
void* pthread_cusingle(void* voidargs) {
double realtime1 = realtime();
int32_t i,j;
pthread_arg_t* args = (pthread_arg_t*)voidargs;
db_t* db = args->db;
core_t* core = args->core;
#ifndef WORK_STEAL
for (i = args->starti; i < args->endi; i++) {
j=args->ultra_long_reads[i];
args->func(core,db,j);
}
#else
pthread_arg_t* all_args = (pthread_arg_t*)(args->all_pthread_args);
//adapted from kthread
for (;;) {
i = __sync_fetch_and_add(&args->starti, 1);
if (i >= args->endi) {
break;
}
j=args->ultra_long_reads[i];
if(core->opt.verbosity>2) fprintf(stderr, "[%s::%.3fsec] Thread (%d-%d) : read %d events %ld assigned\n", __func__,
realtime() - realtime1, args->starti,args->endi, db->read_len[j], db->et[j].n );
args->func(core,db,j);
if(core->opt.verbosity>2) fprintf(stderr, "[%s::%.3fsec] Thread (%d-%d) : read %d events %ld done\n", __func__,
realtime() - realtime1, args->starti,args->endi, db->read_len[j], db->et[j].n );
}
while ((i = steal_work(all_args,core->opt.num_thread)) >= 0){
j=args->ultra_long_reads[i];
if(core->opt.verbosity>2) fprintf(stderr, "[%s::%.3fsec] Thread (%d-%d) : stolen read %d events %ld assigned\n", __func__,
realtime() - realtime1, args->starti,args->endi, db->read_len[j], db->et[j].n );
args->func(core,db,j);
if(core->opt.verbosity>2) fprintf(stderr, "[%s::%.3fsec] Thread (%d-%d) : stolen read %d events %ld done\n", __func__,
realtime() - realtime1, args->starti,args->endi, db->read_len[j], db->et[j].n );
}
#endif
if(core->opt.verbosity>2) fprintf(stderr, "[%s::%.3fsec] Thread (%d-%d) done\n", __func__,
realtime() - realtime1, args->starti,args->endi );
//fprintf(stderr,"Thread %d done\n",(myargs->position)/THREADS);
pthread_exit(0);
}
void pthread_cudb(core_t* core, db_t* db, int32_t* ultra_long_reads, int32_t n_ultra_long_reads,void (*func)(core_t*,db_t*,int)){
//create threads
pthread_t tids[core->opt.num_thread];
pthread_arg_t pt_args[core->opt.num_thread];
int32_t t, ret;
int32_t i = 0;
int32_t num_thread = core->opt.num_thread;
int32_t step = (n_ultra_long_reads + num_thread - 1) / num_thread;
//todo : check for higher num of threads than the data
//current works but many threads are created despite
//set the data structures
for (t = 0; t < num_thread; t++) {
pt_args[t].core = core;
pt_args[t].db = db;
pt_args[t].starti = i;
i += step;
if (i > n_ultra_long_reads) {
pt_args[t].endi = n_ultra_long_reads;
} else {
pt_args[t].endi = i;
}
pt_args[t].func=func;
pt_args[t].ultra_long_reads=ultra_long_reads;
#ifdef WORK_STEAL
pt_args[t].all_pthread_args = (void *)pt_args;
#endif
//fprintf(stderr,"t%d : %d-%d\n",t,pt_args[t].starti,pt_args[t].endi);
}
//create threads
for(t = 0; t < core->opt.num_thread; t++){
ret = pthread_create(&tids[t], NULL, pthread_cusingle,
(void*)(&pt_args[t]));
NEG_CHK(ret);
}
//pthread joining
for (t = 0; t < core->opt.num_thread; t++) {
int ret = pthread_join(tids[t], NULL);
NEG_CHK(ret);
}
}
void* align_cudb(void* voidargs){
double realtime1 = realtime();
pthread_arg_t* args = (pthread_arg_t*)voidargs;
db_t* db = args->db;
core_t* core = args->core;
int32_t* ultra_long_reads = args->ultra_long_reads;
int32_t n_ultra_long_reads = args->endi;
//fprintf(stderr,"ultra long guys : %d\n",n_ultra_long_reads);
//fprintf(stderr, "cpu\n");
if (core->opt.num_thread == 1) {
int j;
for(j=0;j<n_ultra_long_reads;j++) {
int32_t i = ultra_long_reads[j];
align_single(core, db, i);
// db->n_event_align_pairs[i] =
// align(db->event_align_pairs[i], db->read[i],
// db->read_len[i], db->et[i], core->model,
// db->scalings[i], db->f5[i]->sample_rate);
//fprintf(stderr,"readlen %d,n_events %d\n",db->read_len[i],n_event_align_pairs);
}
} else {
pthread_cudb(core, db, ultra_long_reads,n_ultra_long_reads,align_single);
}
args->ret1 = realtime() - realtime1;
if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3fsec] %d reads processed on cpu\n", __func__,
realtime() - realtime1, n_ultra_long_reads);
return NULL;
}
pthread_t align_cudb_async(pthread_arg_t **pt_args_ptr,core_t* core, db_t* db, int32_t* ultra_long_reads, int32_t n_ultra_long_reads) {
assert(*pt_args_ptr==NULL);
*pt_args_ptr = (pthread_arg_t *)malloc(sizeof(pthread_arg_t));
pthread_arg_t *pt_args=*pt_args_ptr;
MALLOC_CHK(pt_args);
pt_args->core = core;
pt_args->db = db;
pt_args->starti = 0;
pt_args->endi = n_ultra_long_reads;
pt_args->ultra_long_reads=ultra_long_reads;
pthread_t tid;
int ret = pthread_create(&tid, NULL, align_cudb,(void*)(pt_args));
NEG_CHK(ret);
return tid;
}
double align_cudb_async_join(pthread_arg_t *pt_args, pthread_t tid) {
int ret = pthread_join(tid, NULL);
NEG_CHK(ret);
assert(pt_args);
double time_cpu = pt_args->ret1;
free(pt_args);
return time_cpu;
}
//check if we have run out of space in the pre-allocated gpu arrays
static inline int8_t if_gpu_mem_free(core_t* core, db_t* db, int32_t i,int64_t sum_read_len,int64_t sum_n_events){
#ifdef CUDA_DYNAMIC_MALLOC
return 1;
#else
if((sum_read_len+(db->read_len[i] + 1) <= (int64_t)core->cuda->max_sum_read_len) &&
(sum_n_events+db->et[i].n <= (core->cuda->max_sum_n_events)) ){
return 1;
}
else{
return 0;
}
#endif
}
//if a suitable candidate to be run on GPU
//ultra-long reads as well as the reads with too many average events per base
//are done of CPU
static inline int8_t if_on_gpu(core_t* core, db_t* db, int32_t i){
if(db->f5[i]->nsample<=0){ //bad reads
return 0;
}
if(db->read_len[i]<(core->opt.cuda_max_readlen * db->sum_bases/(float)db->n_bam_rec) && (db->et[i].n)/(float)(db->read_len[i]) < AVG_EVENTS_PER_KMER_GPU_THRESH ){
return 1;
}
else{
return 0;
}
}
#define LB_T1_DEC_K 0
#define LB_T2_INC_MAX_LF 1
#define LB_T3_INC_MAX_EPK 2
#define LB_T4_DEC_ULTRA_INC_T_CPU 3
#define LB_T5_DEC_MAX_LF_EPK 4
static inline void load_balance_advisor(core_t* core, int32_t state){
if(core->previous_load==state){
core->previous_count_load++;
if(core->previous_count_load>3){
switch (core->previous_load) {
case LB_T1_DEC_K : INFO("%s","CPU got too much work. Try decreasing -K. See http://bit.ly/f5cperf"); break;
case LB_T2_INC_MAX_LF : INFO("%s","CPU got too much work. Try increasing --cuda-max-lf. See http://bit.ly/f5cperf"); break;
case LB_T3_INC_MAX_EPK : INFO("%s", "CPU got too much work. Try increasing --cuda-max-epk. See http://bit.ly/f5cperf"); break;
case LB_T4_DEC_ULTRA_INC_T_CPU : INFO("%s", "CPU got too much work. Try --skip-ultra, decreasing --ultra-thresh or increasing -t. Else, CPU is too weaker than GPUa and just ignore. See http://bit.ly/f5cperf"); break;
case LB_T5_DEC_MAX_LF_EPK : INFO("%s", "GPU got too much work. Try increasing --ultra-thresh, decreasing --cuda-max-lf, decreasing --cuda-max-epk. Else, CPU is too powerful than GPU and just ignore. See http://bit.ly/f5cperf"); break;
default :
break;
}
}
}
else{
core->previous_load=state;
core->previous_count_load=0;
}
}
void load_balance(core_t *core, db_t *db, double cpu_process_time,double gpu_process_time,
int32_t stat_n_gpu_mem_out, int32_t stat_n_too_many_events, int32_t stat_n_ultra_long_reads,
float read_array_usage, float event_array_usage){
fprintf(stderr,"[%s] Processing time : CPU %.1f sec, GPU %.1f sec\n",__func__,cpu_process_time,gpu_process_time);
double factor = (cpu_process_time-gpu_process_time)/(cpu_process_time+gpu_process_time);
if (core->opt.verbosity>1) fprintf(stderr,"[%s] factor %f\n",__func__,factor);
float thresh_factor=0.3;
float thresh_reads=0.1;
//float thresh=0.3;
//cpu-gpu load balance
if(factor>thresh_factor){ //cpu too much time
if (core->opt.verbosity>1) fprintf(stderr,"[%s] CPU too much time\n",__func__);
if(stat_n_gpu_mem_out > db->n_bam_rec * thresh_reads ||
stat_n_ultra_long_reads> db->n_bam_rec * thresh_reads ||
stat_n_too_many_events > db->n_bam_rec * thresh_reads){
if(stat_n_gpu_mem_out > db->n_bam_rec * thresh_reads){ //gpu run out of memory
load_balance_advisor(core,LB_T1_DEC_K);
if (core->opt.verbosity>1) INFO("%s", "CPU did most work. If this message repeats, consider decreasing -K or -B");
}
else{
if(stat_n_ultra_long_reads> db->n_bam_rec * thresh_reads){ //ultra long reads
load_balance_advisor(core,LB_T2_INC_MAX_LF);
if (core->opt.verbosity>1) INFO("%s","CPU got too many very long reads to process. If this message repeats, consider increasing --cuda-max-lf");
}
else{
if(stat_n_too_many_events > db->n_bam_rec * thresh_reads){//reads with too many events
load_balance_advisor(core,LB_T3_INC_MAX_EPK);
if (core->opt.verbosity>1) INFO("%s","CPU got too many over segmented reads to process. If this message repeats, consider increasing --cuda-max-epk");
}
else{
if (core->opt.verbosity>1) INFO("%s", "Impossible exception\n");
}
}
}
}
else{
load_balance_advisor(core,LB_T4_DEC_ULTRA_INC_T_CPU);
if (core->opt.verbosity>1) INFO("%s", "CPU took too much time. If this message repeats, consider using --skip-ultra or decreasing --ultra-thresh or increasing number of CPU threads. If you tried all that means your CPU is not powerful enough to match the GPU and just ignore.");
}
}
else if(factor<-thresh_factor){ //gpu too much time
load_balance_advisor(core,LB_T5_DEC_MAX_LF_EPK);
if (core->opt.verbosity>1) INFO("%s", "GPU got too much work. If this message repeats, consider increasing --ultra-thresh or decreasing --cuda-max-lf or decreasing --cuda-max-epk. If you tried all that means your GPU is not powerful enough to match the CPU and just ignore.");
}
else{
if (core->opt.verbosity>1) fprintf(stderr,"[%s] No load balancing required\n",__func__);
}
}
#define MEM_S1_EPK_INC_MAX_DEC_AVG 0
#define MEM_S2_EPK_DEC_MAX_INC_AVG 1
#define MEM_S3_INC_B 2
#define MEM_S4_INC_K 3
static inline void memory_balance_advisor(core_t* core, int32_t state){
if(core->previous_mem==state){
core->previous_count_mem++;
if(core->previous_count_mem>3){
switch (core->previous_mem) {
case MEM_S1_EPK_INC_MAX_DEC_AVG : INFO("%s","GPU event arrays under-utilised. Try increasing --max-epk or (decreasing --avg-epk). See http://bit.ly/f5cperf"); break;
case MEM_S2_EPK_DEC_MAX_INC_AVG : INFO("%s", "GPU read arrays under-utilised. Try decreasing --max-epk or (increasing --avg-epk). See http://bit.ly/f5cperf"); break;
case MEM_S3_INC_B : INFO("%s","GPU arrays under-utilised. Try increasing -B. See http://bit.ly/f5cperf"); break;
case MEM_S4_INC_K : INFO("%s","GPU arrays under-utilised. Try increasing -K. See http://bit.ly/f5cperf"); break;
default :
break;
}
}
}
else{
core->previous_mem=state;
core->previous_count_mem=0;
}
}
void memory_balance(core_t *core, db_t *db, double cpu_process_time,double gpu_process_time,
int32_t stat_n_gpu_mem_out, int32_t stat_n_too_many_events, int32_t stat_n_ultra_long_reads,
float read_array_usage, float event_array_usage){
//float thresh_factor=0.3;
//float thresh_reads=0.1;
float thresh=0.3;
//memory usage isssues
//read arrays > 70%
if(read_array_usage>100-thresh*100){
//event arrays > 70%
if(event_array_usage>100-thresh*100){
if (core->opt.verbosity>1) fprintf(stderr,"[%s] GPU array usage good\n",__func__);
}
else{
//read arrays-event arrays > 30%
if(read_array_usage-event_array_usage>thresh*100){
memory_balance_advisor(core,MEM_S1_EPK_INC_MAX_DEC_AVG);
if (core->opt.verbosity>1) INFO("%s", "GPU event arrays under-utilised. If this message repeats, consider increasing --max-epk or (decreasing --avg-epk)");
}
else{
if (core->opt.verbosity>1) fprintf(stderr,"[%s] GPU array usage alright\n",__func__);
}
}
}
else{
//event arrays > 70%
if(event_array_usage>100-thresh*100){
//event arrays-read arrays > 30%
if(event_array_usage-read_array_usage>thresh*100){
memory_balance_advisor(core,MEM_S2_EPK_DEC_MAX_INC_AVG);
if (core->opt.verbosity>1) INFO("%s", "GPU read arrays under-utilised. If this message repeats, consider decreasing --max-epk or (increasing --avg-epk)");
}
else{
if (core->opt.verbosity>1) fprintf(stderr,"[%s] GPU array usage alright\n",__func__);
}
}
else{
//db->n_bam_rec is n, core->opt.batch_size is K
//db->sum_bases is b, core->opt.batch_size_bases B
// n<K
if(db->n_bam_rec < core->opt.batch_size){
//b<B
if(db->sum_bases < core->opt.batch_size_bases){
if (core->opt.verbosity>1) fprintf(stderr,"[%s] Probably the last batch\n",__func__);
}
else{
memory_balance_advisor(core,MEM_S3_INC_B);
if (core->opt.verbosity>1) INFO("%s", "GPU arrays are not fully utilised. If this message repeats, consider increasing the --max-bases (-B option)");
}
}
else{
//b<B
if(db->sum_bases < core->opt.batch_size_bases){
memory_balance_advisor(core,MEM_S4_INC_K);
if (core->opt.verbosity>1) INFO("%s", "GPU arrays are not fully utilised. If this message repeats, consider increasing the --batchsize (-K option)");
}
else{
if (core->opt.verbosity>1) INFO("%s", "Unhandled exception\n");
}
}
}
}
}
void align_cuda(core_t* core, db_t* db) {
int32_t i,j;
int32_t n_bam_rec = db->n_bam_rec;
int32_t n_bam_rec_cuda;
double realtime1;
int32_t n_ultra_long_reads=0;
int32_t stat_n_ultra_long_reads=0; //number of ultralong reads processed on CPU
int32_t stat_n_too_many_events=0; //number of reads with high avg events per base that are processed on CPU
int32_t stat_n_gpu_mem_out=0; //number of reads run on CPU due to the GPU memory running out
int32_t sum_bases_cpu=0; //The total sum of bases run on GPU
int32_t ultra_long_reads[n_bam_rec]; //not only ultra-long reads, but also ones with large number of average events per base
//cpu temp pointers
ptr_t* read_ptr_host;
int32_t* n_events_host;
ptr_t* event_ptr_host;
event_t* event_table_host;
AlignedPair* event_align_pairs_host;
int32_t* read_len_host;
scalings_t* scalings_host;
int32_t* n_event_align_pairs_host;
char* read_host;
/**cuda pointers*/
char* read; //flattened reads sequences
ptr_t* read_ptr; //index pointer for flattedned "reads"
int32_t* read_len;
int64_t sum_read_len;
int32_t* n_events;
event_t* event_table;
ptr_t* event_ptr;
int64_t sum_n_events;
scalings_t* scalings;
AlignedPair* event_align_pairs;
int32_t* n_event_align_pairs;
float *bands;
uint8_t *trace;
EventKmerPair* band_lower_left;
model_t* model_kmer_cache;
model_t* model;
realtime1 = realtime();
int32_t cuda_device_num = core->opt.cuda_dev_id;
cudaSetDevice(cuda_device_num);
CUDA_CHK();
read_ptr_host = core->cuda->read_ptr_host;
sum_read_len = 0;
sum_n_events = 0;
//read sequences : needflattening
for (i = 0,j=0; i < n_bam_rec; i++) {
if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){
read_ptr_host[j] = sum_read_len;
sum_read_len += (db->read_len[i] + 1); //with null term
sum_n_events += db->et[i].n;
j++;
}
else{
if (db->f5[i]->nsample>0 && (db->et[i].n)/(float)(db->read_len[i]) < AVG_EVENTS_PER_KMER_MAX){
ultra_long_reads[n_ultra_long_reads]=i;
n_ultra_long_reads++;
sum_bases_cpu += db->read_len[i];
if(db->read_len[i]>=(core->opt.cuda_max_readlen * db->sum_bases/(float)db->n_bam_rec)){
stat_n_ultra_long_reads++;
if(core->opt.verbosity>2)STDERR("readlen>=%.0fkbases\t%d",(core->opt.cuda_max_readlen * db->sum_bases/(float)db->n_bam_rec)/1000,db->read_len[i]);
}
else if ((db->et[i].n)/(float)(db->read_len[i]) >= AVG_EVENTS_PER_KMER_GPU_THRESH){
stat_n_too_many_events++;
}
else{
stat_n_gpu_mem_out++;
}
}
else{ //either bad read or too many avg events per base, even for the CPU
db->n_event_align_pairs[i]=0;
}
}
}
n_bam_rec_cuda = j;
//can start processing on the ultra long reads on the CPU
pthread_arg_t *tmparg=NULL;
pthread_t tid = align_cudb_async(&tmparg,core, db, ultra_long_reads, n_ultra_long_reads);
double realtime_process_start=realtime();
read_len_host = core->cuda->read_len_host;
scalings_host = core->cuda->scalings_host;
n_event_align_pairs_host = core->cuda->n_event_align_pairs_host;
//form the temporary flattened array on host
read_host = (char*)malloc(sizeof(char) * sum_read_len);
MALLOC_CHK(read_host);
sum_read_len = 0;
sum_n_events = 0;
for (i = 0,j=0; i < n_bam_rec; i++) {
if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){
ptr_t idx = read_ptr_host[j];
strcpy(&read_host[idx], db->read[i]);
read_len_host[j]=db->read_len[i];
scalings_host[j]=db->scalings[i];
j++;
sum_read_len += (db->read_len[i] + 1); //with null term
sum_n_events += db->et[i].n;
}
}
//now the events : need flattening
//num events : need flattening
//get the total size and create the pointers
n_events_host = core->cuda->n_events_host;
event_ptr_host = core->cuda->event_ptr_host;
sum_read_len = 0;
sum_n_events = 0;
for (i = 0,j=0; i < n_bam_rec; i++) {
if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){
n_events_host[j] = db->et[i].n;
event_ptr_host[j] = sum_n_events;
sum_n_events += db->et[i].n;
j++;
sum_read_len += (db->read_len[i] + 1); //with null term
}
}
//event table flatten
//form the temporary flattened array on host
event_table_host =
(event_t*)malloc(sizeof(event_t) * sum_n_events);
MALLOC_CHK(event_table_host);
sum_read_len = 0;
sum_n_events = 0;
for (i = 0,j=0; i < n_bam_rec; i++) {
if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){
ptr_t idx = event_ptr_host[j];
memcpy(&event_table_host[idx], db->et[i].event,
sizeof(event_t) * db->et[i].n);
j++;
sum_read_len += (db->read_len[i] + 1); //with null term
sum_n_events += db->et[i].n;
}
}
event_align_pairs_host =
(AlignedPair*)malloc(2 * sum_n_events * sizeof(AlignedPair));
MALLOC_CHK(event_align_pairs_host);
core->align_cuda_preprocess += (realtime() - realtime1);
/** Start GPU mallocs**/
realtime1 = realtime();
read_ptr =core->cuda->read_ptr;
read_len=core->cuda->read_len;
n_events=core->cuda->n_events;
event_ptr=core->cuda->event_ptr;
scalings=core->cuda->scalings;
model = core->cuda->model;
n_event_align_pairs=core->cuda->n_event_align_pairs;
#ifndef CUDA_DYNAMIC_MALLOC
assert(sum_read_len <= (int64_t)core->cuda->max_sum_read_len);
assert(sum_n_events <= (int64_t)(core->cuda->max_sum_n_events));
//fprintf(stderr,"%d %d\n", sum_read_len,sum_n_events);
if(core->opt.verbosity>1) STDERR("%.2f %% of GPU read arrays and %.2f %% of GPU event arrays were utilised",
sum_read_len/(float)(core->cuda->max_sum_read_len)*100 ,
sum_n_events/(float)(core->cuda->max_sum_n_events)*100);
read=(core->cuda->read);
event_table=(core->cuda->event_table);
model_kmer_cache=(core->cuda->model_kmer_cache);
event_align_pairs=(core->cuda->event_align_pairs);
bands=(core->cuda->bands);
trace=(core->cuda->trace);
band_lower_left=(core->cuda->band_lower_left);
cudaMemset(trace,0,sizeof(uint8_t) * (sum_n_events + sum_read_len) * ALN_BANDWIDTH); //initialise the trace array to 0
CUDA_CHK();
#else
if(core->opt.verbosity>1) print_size("read array",sum_read_len * sizeof(char));
cudaMalloc((void**)&read, sum_read_len * sizeof(char)); //with null char
CUDA_CHK();
if(core->opt.verbosity>1) print_size("event table",sum_n_events * sizeof(event_t));
cudaMalloc((void**)&event_table, sum_n_events * sizeof(event_t));
CUDA_CHK();
if(core->opt.verbosity>1) print_size("model kmer cache",sum_read_len * sizeof(model_t));
cudaMalloc((void**)&model_kmer_cache, sum_read_len * sizeof(model_t));
CUDA_CHK();
/**allocate output arrays for cuda**/
if(core->opt.verbosity>1) print_size("event align pairs",2 * sum_n_events *sizeof(AlignedPair));
cudaMalloc((void**)&event_align_pairs,
2 * sum_n_events *
sizeof(AlignedPair)); //todo : need better huristic
CUDA_CHK();
//scratch arrays
size_t sum_n_bands = sum_n_events + sum_read_len; //todo : can be optimised
if(core->opt.verbosity>1) print_size("bands",sizeof(float) * sum_n_bands * ALN_BANDWIDTH);
cudaMalloc((void**)&bands,sizeof(float) * sum_n_bands * ALN_BANDWIDTH);
CUDA_CHK();
if(core->opt.verbosity>1) print_size("trace",sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH);
cudaMalloc((void**)&trace, sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH);
CUDA_CHK();
cudaMemset(trace,0,sizeof(uint8_t) * sum_n_bands * ALN_BANDWIDTH); //initialise the trace array to 0
CUDA_CHK();
if(core->opt.verbosity>1) print_size("band_lower_left",sizeof(EventKmerPair)* sum_n_bands);
cudaMalloc((void**)&band_lower_left, sizeof(EventKmerPair)* sum_n_bands);
CUDA_CHK();
#endif
core->align_cuda_malloc += (realtime() - realtime1);
/* cuda mem copys*/
realtime1 =realtime();
cudaMemcpy(read_ptr, read_ptr_host, n_bam_rec_cuda * sizeof(ptr_t),
cudaMemcpyHostToDevice);
CUDA_CHK();
cudaMemcpy(read, read_host, sum_read_len * sizeof(char),
cudaMemcpyHostToDevice);
CUDA_CHK();
//read length : already linear hence direct copy
cudaMemcpy(read_len, read_len_host, n_bam_rec_cuda * sizeof(int32_t),
cudaMemcpyHostToDevice);
CUDA_CHK();
cudaMemcpy(n_events, n_events_host, n_bam_rec_cuda * sizeof(int32_t),
cudaMemcpyHostToDevice);
CUDA_CHK();
cudaMemcpy(event_ptr, event_ptr_host, n_bam_rec_cuda * sizeof(ptr_t),
cudaMemcpyHostToDevice);
CUDA_CHK();
cudaMemcpy(event_table, event_table_host, sizeof(event_t) * sum_n_events,
cudaMemcpyHostToDevice);
CUDA_CHK();
//can be interleaved
cudaMemcpy(scalings, scalings_host, sizeof(scalings_t) * n_bam_rec_cuda,
cudaMemcpyHostToDevice);
CUDA_CHK();
core->align_cuda_memcpy += (realtime() - realtime1);
uint32_t kmer_size = core->kmer_size;
realtime1 = realtime();
if(n_bam_rec_cuda>0){
/*pre kernel*/
assert(BLOCK_LEN_BANDWIDTH>=ALN_BANDWIDTH);
dim3 gridpre(1,(n_bam_rec_cuda + BLOCK_LEN_READS - 1) / BLOCK_LEN_READS);
dim3 blockpre(BLOCK_LEN_BANDWIDTH,BLOCK_LEN_READS);
if(core->opt.verbosity>1) STDERR("grid %d,%d, block %d,%d",gridpre.x,gridpre.y, blockpre.x,blockpre.y);
align_kernel_pre_2d<<<gridpre, blockpre>>>(read,
read_len, read_ptr,
n_events, event_ptr, model, kmer_size, n_bam_rec_cuda, model_kmer_cache, bands,trace,band_lower_left);
cudaDeviceSynchronize();CUDA_CHK();
if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3fsec] align-pre kernel done\n", __func__,
realtime() - realtime1);
}
core->align_kernel_time += (realtime() - realtime1);
core->align_pre_kernel_time += (realtime() - realtime1);
realtime1 = realtime();
/* core kernel*/
if(n_bam_rec_cuda>0){
assert(BLOCK_LEN_BANDWIDTH>=ALN_BANDWIDTH);
dim3 grid1(1,(n_bam_rec_cuda + BLOCK_LEN_READS - 1) / BLOCK_LEN_READS);
dim3 block1(BLOCK_LEN_BANDWIDTH,BLOCK_LEN_READS);
align_kernel_core_2d_shm<<<grid1, block1>>>(read_len, read_ptr, event_table, n_events,
event_ptr, scalings, n_bam_rec_cuda, model_kmer_cache, kmer_size, bands,trace,band_lower_left );
cudaDeviceSynchronize();CUDA_CHK();
if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3fsec] align-core kernel done\n", __func__,
realtime() - realtime1);
}
core->align_kernel_time += (realtime() - realtime1);
core->align_core_kernel_time += (realtime() - realtime1);
realtime1 = realtime();
/*post kernel*/
if(n_bam_rec_cuda>0){
int32_t BLOCK_LEN = core->opt.cuda_block_size;
dim3 gridpost((n_bam_rec_cuda + BLOCK_LEN - 1) / BLOCK_LEN);
dim3 blockpost(BLOCK_LEN);
#ifndef WARP_HACK
align_kernel_post<<<gridpost, blockpost>>>(event_align_pairs, n_event_align_pairs,
read_len, read_ptr, event_table, n_events,
event_ptr,scalings, n_bam_rec_cuda, model_kmer_cache, kmer_size, bands,trace,band_lower_left );
#else
assert(BLOCK_LEN>=32);
dim3 grid1post((n_bam_rec_cuda + (BLOCK_LEN/32) - 1) / (BLOCK_LEN/32));
if(core->opt.verbosity>1) STDERR("grid new %d",grid1post.x);
align_kernel_post<<<grid1post, blockpost>>>(event_align_pairs, n_event_align_pairs,
read_len, read_ptr, event_table, n_events,
event_ptr, scalings, n_bam_rec_cuda, model_kmer_cache, kmer_size, bands,trace,band_lower_left );
#endif
cudaDeviceSynchronize();CUDA_CHK();
if(core->opt.verbosity>1) fprintf(stderr, "[%s::%.3fsec] align-post kernel done\n", __func__,
realtime() - realtime1);
}
core->align_kernel_time += (realtime() - realtime1);
core->align_post_kernel_time += (realtime() - realtime1);
//fprintf(stderr,"readlen %d,n_events %d\n",db->read_len[i],n_event_align_pairs);
#ifdef CUDA_DEBUG
cudaDeviceSynchronize();
CUDA_CHK();
#endif
/** copyback ans**/
realtime1 = realtime();
cudaMemcpy(n_event_align_pairs_host, n_event_align_pairs,
n_bam_rec_cuda * sizeof(int32_t), cudaMemcpyDeviceToHost);
CUDA_CHK();
cudaMemcpy(event_align_pairs_host, event_align_pairs,
2 * sum_n_events * sizeof(AlignedPair), cudaMemcpyDeviceToHost);
CUDA_CHK();
core->align_cuda_memcpy += (realtime() - realtime1);
realtime1 = realtime();
#ifdef CUDA_DYNAMIC_MALLOC
cudaFree(read); //with null char
cudaFree(event_table);
cudaFree(event_align_pairs);
cudaFree(bands);
cudaFree(trace);
cudaFree(band_lower_left);
cudaFree(model_kmer_cache);
#endif
core->align_cuda_malloc += (realtime() - realtime1);
/** post work**/
realtime1 = realtime();
//copy back
sum_read_len = 0;
sum_n_events = 0;
for (i = 0,j=0; i < n_bam_rec; i++) {
if(if_on_gpu(core, db, i) && if_gpu_mem_free(core, db, i,sum_read_len,sum_n_events)){
ptr_t idx = event_ptr_host[j];
db->n_event_align_pairs[i]=n_event_align_pairs_host[j];
#ifdef REVERSAL_ON_CPU
int c;
int end = db->n_event_align_pairs[i] - 1;
AlignedPair* out_2= db->event_align_pairs[i];
AlignedPair* in_2= &event_align_pairs_host[idx * 2];
for (c = 0; c < db->n_event_align_pairs[i] ; c++) {
out_2[c].ref_pos = in_2[end].ref_pos;
out_2[c].read_pos = in_2[end].read_pos;
end--;
}
#else
memcpy(db->event_align_pairs[i], &event_align_pairs_host[idx * 2],
sizeof(AlignedPair) * db->n_event_align_pairs[i]);
#endif
j++;
sum_read_len += (db->read_len[i] + 1); //with null term
sum_n_events += db->et[i].n;
}
}
//free the temp arrays on host
free(read_host);
free(event_table_host);
free(event_align_pairs_host);
core->align_cuda_postprocess += (realtime() - realtime1);
double gpu_process_time = realtime()-realtime_process_start;
realtime1 = realtime();
double cpu_process_time = align_cudb_async_join(tmparg,tid);
core->extra_load_cpu += (realtime() - realtime1);
if(core->opt.verbosity>1) {
fprintf(stderr, "[%s::%.3fsec] CPU extra processing done (>=%.0fkbases:%d|>=%.1fevents:%d|gpu_mem_out:%d)\n",
__func__,realtime() - realtime1,((core->opt.cuda_max_readlen * db->sum_bases/(float)db->n_bam_rec))/1000,
stat_n_ultra_long_reads, AVG_EVENTS_PER_KMER_GPU_THRESH,stat_n_too_many_events, stat_n_gpu_mem_out);
}
STDERR("Load : CPU %d entries (%.1fM bases), GPU %d entries (%.1fM bases)",
n_bam_rec-n_bam_rec_cuda, (float)sum_bases_cpu/(1000*1000),n_bam_rec_cuda, (float)sum_read_len/(1000*1000));
load_balance(core,db,cpu_process_time,gpu_process_time,stat_n_gpu_mem_out,stat_n_too_many_events, stat_n_ultra_long_reads,
sum_read_len/(float)(core->cuda->max_sum_read_len)*100 ,
sum_n_events/(float)(core->cuda->max_sum_n_events)*100);
memory_balance(core,db,cpu_process_time,gpu_process_time,stat_n_gpu_mem_out,stat_n_too_many_events, stat_n_ultra_long_reads,
sum_read_len/(float)(core->cuda->max_sum_read_len)*100 ,
sum_n_events/(float)(core->cuda->max_sum_n_events)*100);
}
#endif | the_stack |
* Test of BlockAdjacentDifference utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <cub/block/block_adjacent_difference.cuh>
#include <cub/util_allocator.cuh>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/mismatch.h>
#include <thrust/random.h>
#include <thrust/sequence.h>
#include <thrust/shuffle.h>
#include <thrust/sort.h>
#include <thrust/tabulate.h>
#include <limits>
#include <memory>
#include <typeinfo>
#include "test_util.h"
using namespace cub;
/**
* \brief Generates integer sequence \f$S_n=i(i-1)/2\f$.
*
* The adjacent difference of this sequence produce consecutive numbers:
* \f[
* p = \frac{i(i - 1)}{2} \\
* n = \frac{(i + 1) i}{2} \\
* n - p = i \\
* \frac{(i + 1) i}{2} - \frac{i (i - 1)}{2} = i \\
* (i + 1) i - i (i - 1) = 2 i \\
* (i + 1) - (i - 1) = 2 \\
* 2 = 2
* \f]
*/
template <typename DestT>
struct TestSequenceGenerator
{
std::size_t offset;
TestSequenceGenerator(std::size_t offset = 0)
: offset(offset)
{}
template <typename SourceT>
__device__ __host__ DestT operator()(SourceT index) const
{
index += static_cast<SourceT>(offset);
return static_cast<DestT>(index * (index - 1) / SourceT(2));
}
};
struct CustomType
{
unsigned int key;
unsigned int value;
__device__ __host__ CustomType()
: key(0)
, value(0)
{}
__device__ __host__ CustomType(unsigned int key, unsigned int value)
: key(key)
, value(value)
{}
};
__device__ __host__ bool operator==(const CustomType& lhs,
const CustomType& rhs)
{
return lhs.key == rhs.key && lhs.value == rhs.value;
}
__device__ __host__ bool operator!=(const CustomType& lhs,
const CustomType& rhs)
{
return !(lhs == rhs);
}
__device__ __host__ CustomType operator-(const CustomType& lhs,
const CustomType& rhs)
{
return CustomType{lhs.key - rhs.key, lhs.value - rhs.value};
}
struct CustomDifference
{
template <typename DataType>
__device__ DataType operator()(DataType &lhs, DataType &rhs)
{
return lhs - rhs;
}
};
template <typename DataType,
unsigned int ThreadsInBlock,
unsigned int ItemsPerThread,
bool ReadLeft = false>
__global__ void LastTileTestKernel(const DataType *input,
DataType *output,
unsigned int valid_items)
{
using BlockAdjacentDifferenceT =
cub::BlockAdjacentDifference<DataType, ThreadsInBlock>;
__shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage;
DataType thread_data[ItemsPerThread];
DataType thread_result[ItemsPerThread];
const unsigned int thread_offset = threadIdx.x * ItemsPerThread;
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
thread_data[item] = input[thread_offset + item];
}
__syncthreads();
if (ReadLeft)
{
BlockAdjacentDifferenceT(temp_storage).SubtractLeftPartialTile(
thread_data,
thread_result,
CustomDifference(),
valid_items);
}
else
{
BlockAdjacentDifferenceT(temp_storage).SubtractRightPartialTile(
thread_data,
thread_result,
CustomDifference(),
valid_items);
}
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
output[thread_offset + item] = thread_result[item];
}
}
template <typename DataType,
unsigned int ThreadsInBlock,
unsigned int ItemsPerThread,
bool ReadLeft = false>
__global__ void MiddleTileTestKernel(const DataType *input,
DataType *output,
DataType neighbour_tile_value)
{
using BlockAdjacentDifferenceT =
cub::BlockAdjacentDifference<DataType, ThreadsInBlock>;
__shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage;
DataType thread_data[ItemsPerThread];
DataType thread_result[ItemsPerThread];
const unsigned int thread_offset = threadIdx.x * ItemsPerThread;
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
thread_data[item] = input[thread_offset + item];
}
__syncthreads();
if (ReadLeft)
{
BlockAdjacentDifferenceT(temp_storage)
.SubtractLeft(thread_data,
thread_result,
CustomDifference(),
neighbour_tile_value);
}
else
{
BlockAdjacentDifferenceT(temp_storage)
.SubtractRight(thread_data,
thread_result,
CustomDifference(),
neighbour_tile_value);
}
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
output[thread_offset + item] = thread_result[item];
}
}
template <typename DataType,
unsigned int ThreadsInBlock,
unsigned int ItemsPerThread,
bool ReadLeft = false>
__global__ void MiddleTileInplaceTestKernel(const DataType *input,
DataType *output,
DataType neighbour_tile_value)
{
using BlockAdjacentDifferenceT =
cub::BlockAdjacentDifference<DataType, ThreadsInBlock>;
__shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage;
DataType thread_data[ItemsPerThread];
const unsigned int thread_offset = threadIdx.x * ItemsPerThread;
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
thread_data[item] = input[thread_offset + item];
}
__syncthreads();
if (ReadLeft)
{
BlockAdjacentDifferenceT(temp_storage)
.SubtractLeft(thread_data,
thread_data,
CustomDifference(),
neighbour_tile_value);
}
else
{
BlockAdjacentDifferenceT(temp_storage)
.SubtractRight(thread_data,
thread_data,
CustomDifference(),
neighbour_tile_value);
}
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
output[thread_offset + item] = thread_data[item];
}
}
template <typename DataType,
unsigned int ThreadsInBlock,
unsigned int ItemsPerThread,
bool ReadLeft = false>
__global__ void TestKernel(DataType *data)
{
using BlockAdjacentDifferenceT =
cub::BlockAdjacentDifference<DataType, ThreadsInBlock>;
__shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage;
DataType thread_data[ItemsPerThread];
DataType thread_result[ItemsPerThread];
const unsigned int thread_offset = threadIdx.x * ItemsPerThread;
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
thread_data[item] = data[thread_offset + item];
}
__syncthreads();
if (ReadLeft)
{
BlockAdjacentDifferenceT(temp_storage)
.SubtractLeft(thread_data, thread_result, CustomDifference());
}
else
{
BlockAdjacentDifferenceT(temp_storage)
.SubtractRight(thread_data, thread_result, CustomDifference());
}
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
data[thread_offset + item] = thread_result[item];
}
}
template <typename DataType,
unsigned int ThreadsInBlock,
unsigned int ItemsPerThread,
bool ReadLeft = false>
__global__ void LastTileTestInplaceKernel(const DataType *input,
DataType *output,
unsigned int valid_items)
{
using BlockAdjacentDifferenceT =
cub::BlockAdjacentDifference<DataType, ThreadsInBlock>;
__shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage;
DataType thread_data[ItemsPerThread];
const unsigned int thread_offset = threadIdx.x * ItemsPerThread;
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
thread_data[item] = input[thread_offset + item];
}
__syncthreads();
if (ReadLeft)
{
BlockAdjacentDifferenceT(temp_storage)
.SubtractLeftPartialTile(thread_data,
thread_data,
CustomDifference(),
valid_items);
}
else
{
BlockAdjacentDifferenceT(temp_storage)
.SubtractRightPartialTile(thread_data,
thread_data,
CustomDifference(),
valid_items);
}
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
output[thread_offset + item] = thread_data[item];
}
}
template <typename DataType,
unsigned int ThreadsInBlock,
unsigned int ItemsPerThread,
bool ReadLeft = false>
__global__ void TestInplaceKernel(DataType *data)
{
using BlockAdjacentDifferenceT =
cub::BlockAdjacentDifference<DataType, ThreadsInBlock>;
__shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage;
DataType thread_data[ItemsPerThread];
const unsigned int thread_offset = threadIdx.x * ItemsPerThread;
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
thread_data[item] = data[thread_offset + item];
}
__syncthreads();
if (ReadLeft)
{
BlockAdjacentDifferenceT(temp_storage)
.SubtractLeft(thread_data,
thread_data,
CustomDifference());
}
else
{
BlockAdjacentDifferenceT(temp_storage)
.SubtractRight(thread_data,
thread_data,
CustomDifference());
}
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
data[thread_offset + item] = thread_data[item];
}
}
template <typename DataType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock,
bool ReadLeft = false>
void LastTileTest(const DataType *input,
DataType *output,
unsigned int valid_items)
{
LastTileTestKernel<DataType, ThreadsInBlock, ItemsPerThread, ReadLeft>
<<<1, ThreadsInBlock>>>(input, output, valid_items);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
}
template <typename DataType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock,
bool ReadLeft = false>
void Test(DataType *data)
{
TestKernel<DataType, ThreadsInBlock, ItemsPerThread, ReadLeft>
<<<1, ThreadsInBlock>>>(data);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
}
template <typename DataType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock,
bool ReadLeft = false>
void MiddleTileTest(const DataType *input,
DataType *output,
DataType neighbour_tile_value)
{
MiddleTileTestKernel<DataType, ThreadsInBlock, ItemsPerThread, ReadLeft>
<<<1, ThreadsInBlock>>>(input, output, neighbour_tile_value);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
}
template <typename DataType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock,
bool ReadLeft = false>
void LastTileInplaceTest(const DataType *input,
DataType *output,
unsigned int valid_items)
{
LastTileTestInplaceKernel<DataType, ThreadsInBlock, ItemsPerThread, ReadLeft>
<<<1, ThreadsInBlock>>>(input, output, valid_items);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
}
template <typename DataType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock,
bool ReadLeft = false>
void InplaceTest(DataType *data)
{
TestInplaceKernel<DataType, ThreadsInBlock, ItemsPerThread, ReadLeft>
<<<1, ThreadsInBlock>>>(data);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
}
template <typename DataType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock,
bool ReadLeft = false>
void MiddleTileInplaceTest(const DataType *input,
DataType *output,
DataType neighbour_tile_value)
{
MiddleTileInplaceTestKernel<DataType, ThreadsInBlock, ItemsPerThread, ReadLeft>
<<<1, ThreadsInBlock>>>(input, output, neighbour_tile_value);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
}
template <typename FirstIteratorT,
typename SecondOperatorT>
bool CheckResult(FirstIteratorT first_begin,
FirstIteratorT first_end,
SecondOperatorT second_begin)
{
auto err = thrust::mismatch(first_begin, first_end, second_begin);
if (err.first != first_end)
{
return false;
}
return true;
}
template <typename DataType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock>
void TestLastTile(bool inplace,
unsigned int num_items,
thrust::device_vector<DataType> &d_input)
{
thrust::tabulate(d_input.begin(),
d_input.end(),
TestSequenceGenerator<DataType>{});
thrust::device_vector<DataType> d_output(d_input.size());
constexpr bool read_left = true;
constexpr bool read_right = false;
DataType *d_input_ptr = thrust::raw_pointer_cast(d_input.data());
DataType *d_output_ptr = thrust::raw_pointer_cast(d_output.data());
if (inplace)
{
LastTileInplaceTest<DataType, ItemsPerThread, ThreadsInBlock, read_left>(
d_input_ptr,
d_output_ptr,
num_items);
}
else
{
LastTileTest<DataType, ItemsPerThread, ThreadsInBlock, read_left>(
d_input_ptr,
d_output_ptr,
num_items);
}
{
using CountingIteratorT =
typename thrust::counting_iterator<DataType,
thrust::use_default,
std::size_t,
std::size_t>;
AssertEquals(d_output.front(), d_input.front());
AssertTrue(CheckResult(d_output.begin() + 1,
d_output.begin() + num_items,
CountingIteratorT(DataType{0})));
AssertTrue(CheckResult(d_output.begin() + num_items,
d_output.end(),
d_input.begin() + num_items));
}
thrust::tabulate(d_input.begin(),
d_input.end(),
TestSequenceGenerator<DataType>{});
if (inplace)
{
LastTileInplaceTest<DataType, ItemsPerThread, ThreadsInBlock, read_right>(
d_input_ptr,
d_output_ptr,
num_items);
}
else
{
LastTileTest<DataType, ItemsPerThread, ThreadsInBlock, read_right>(
d_input_ptr,
d_output_ptr,
num_items);
}
{
thrust::device_vector<DataType> reference(num_items);
thrust::sequence(reference.begin(),
reference.end(),
static_cast<DataType>(0),
static_cast<DataType>(-1));
AssertTrue(CheckResult(d_output.begin(),
d_output.begin() + num_items - 1,
reference.begin()));
AssertTrue(CheckResult(d_output.begin() + num_items - 1,
d_output.end(),
d_input.begin() + num_items - 1));
}
}
template <typename DataType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock>
void TestMiddleTile(bool inplace,
thrust::device_vector<DataType> &d_input)
{
thrust::tabulate(d_input.begin(),
d_input.end(),
TestSequenceGenerator<DataType>{std::size_t{1}});
thrust::device_vector<DataType> d_output(d_input.size());
constexpr bool read_left = true;
constexpr bool read_right = false;
DataType *d_input_ptr = thrust::raw_pointer_cast(d_input.data());
DataType *d_output_ptr = thrust::raw_pointer_cast(d_output.data());
const DataType left_tile_last_value{0};
const DataType right_tile_first_value{
TestSequenceGenerator<DataType>{}(d_input.size())
};
if (inplace)
{
MiddleTileInplaceTest<DataType, ItemsPerThread, ThreadsInBlock, read_left>(
d_input_ptr,
d_output_ptr,
left_tile_last_value);
}
else
{
MiddleTileTest<DataType, ItemsPerThread, ThreadsInBlock, read_left>(
d_input_ptr,
d_output_ptr,
left_tile_last_value);
}
{
using CountingIteratorT =
typename thrust::counting_iterator<DataType,
thrust::use_default,
std::size_t,
std::size_t>;
AssertTrue(CheckResult(d_output.begin(),
d_output.end(),
CountingIteratorT(DataType{0})));
}
thrust::tabulate(d_input.begin(),
d_input.end(),
TestSequenceGenerator<DataType>{});
if (inplace)
{
MiddleTileInplaceTest<DataType, ItemsPerThread, ThreadsInBlock, read_right>(
d_input_ptr,
d_output_ptr,
right_tile_first_value);
}
else
{
MiddleTileTest<DataType, ItemsPerThread, ThreadsInBlock, read_right>(
d_input_ptr,
d_output_ptr,
right_tile_first_value);
}
{
thrust::device_vector<DataType> reference(d_input.size());
thrust::sequence(reference.begin(),
reference.end(),
static_cast<DataType>(0),
static_cast<DataType>(-1));
AssertTrue(CheckResult(d_output.begin(),
d_output.end(),
reference.begin()));
}
}
struct IntToCustomType
{
unsigned int offset;
IntToCustomType()
: offset(0)
{}
explicit IntToCustomType(unsigned int offset)
: offset(offset)
{}
__device__ __host__ CustomType operator()(unsigned int idx) const
{
return { idx + offset, idx + offset };
}
};
template <typename DataType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock>
void TestFullTile(bool inplace,
thrust::device_vector<DataType> &d_data)
{
thrust::tabulate(d_data.begin(),
d_data.end(),
TestSequenceGenerator<DataType>{});
constexpr bool read_left = true;
constexpr bool read_right = false;
DataType *d_data_ptr = thrust::raw_pointer_cast(d_data.data());
if (inplace)
{
InplaceTest<DataType, ItemsPerThread, ThreadsInBlock, read_left>(
d_data_ptr);
}
else
{
Test<DataType, ItemsPerThread, ThreadsInBlock, read_left>(d_data_ptr);
}
{
using CountingIteratorT =
typename thrust::counting_iterator<DataType,
thrust::use_default,
std::size_t,
std::size_t>;
AssertEquals(d_data.front(), TestSequenceGenerator<DataType>{}(0));
AssertTrue(CheckResult(d_data.begin() + 1,
d_data.end(),
CountingIteratorT(DataType{0})));
}
thrust::tabulate(d_data.begin(),
d_data.end(),
TestSequenceGenerator<DataType>{});
if (inplace)
{
InplaceTest<DataType, ItemsPerThread, ThreadsInBlock, read_right>(
d_data_ptr);
}
else
{
Test<DataType, ItemsPerThread, ThreadsInBlock, read_right>(d_data_ptr);
}
{
thrust::device_vector<DataType> reference(d_data.size());
thrust::sequence(reference.begin(),
reference.end(),
static_cast<DataType>(0),
static_cast<DataType>(-1));
AssertTrue(CheckResult(d_data.begin(),
d_data.end() - 1,
reference.begin()));
AssertEquals(d_data.back(),
TestSequenceGenerator<DataType>{}(d_data.size() - 1));
}
}
template <unsigned int ItemsPerThread,
unsigned int ThreadsInBlock>
void TestCustomType(bool inplace,
thrust::device_vector<CustomType> &d_data)
{
thrust::tabulate(d_data.begin(), d_data.end(), IntToCustomType{1});
CustomType *d_data_ptr = thrust::raw_pointer_cast(d_data.data());
constexpr bool read_left = true;
constexpr bool read_right = false;
if (inplace)
{
InplaceTest<CustomType, ItemsPerThread, ThreadsInBlock, read_left>(
d_data_ptr);
}
else
{
Test<CustomType, ItemsPerThread, ThreadsInBlock, read_left>(d_data_ptr);
}
{
const std::size_t expected_count = d_data.size();
const std::size_t actual_count =
thrust::count(d_data.begin(), d_data.end(), CustomType{1, 1});
AssertEquals(expected_count, actual_count);
}
thrust::tabulate(d_data.begin(), d_data.end(), IntToCustomType{});
if (inplace)
{
InplaceTest<CustomType, ItemsPerThread, ThreadsInBlock, read_right>(
d_data_ptr);
}
else
{
Test<CustomType, ItemsPerThread, ThreadsInBlock, read_right>(d_data_ptr);
}
{
const auto unsigned_minus_one = static_cast<unsigned int>(-1);
const std::size_t expected_count = d_data.size() - 1;
const std::size_t actual_count =
thrust::count(d_data.begin(),
d_data.end() - 1,
CustomType{unsigned_minus_one, unsigned_minus_one});
AssertEquals(expected_count, actual_count);
}
}
template <
typename ValueType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock>
void Test(bool inplace)
{
constexpr int tile_size = ItemsPerThread * ThreadsInBlock;
thrust::device_vector<ValueType> d_values(tile_size);
for (unsigned int num_items = tile_size; num_items > 1; num_items /= 2)
{
TestLastTile<ValueType, ItemsPerThread, ThreadsInBlock>(inplace,
num_items,
d_values);
}
TestFullTile<ValueType, ItemsPerThread, ThreadsInBlock>(inplace, d_values);
TestMiddleTile<ValueType, ItemsPerThread, ThreadsInBlock>(inplace, d_values);
}
template <unsigned int ItemsPerThread,
unsigned int ThreadsInBlock>
void TestCustomType(bool inplace)
{
constexpr int tile_size = ItemsPerThread * ThreadsInBlock;
thrust::device_vector<CustomType> d_values(tile_size);
TestCustomType<ItemsPerThread, ThreadsInBlock>(inplace, d_values);
}
template <unsigned int ItemsPerThread, unsigned int ThreadsPerBlock>
void Test(bool inplace)
{
Test<std::uint8_t, ItemsPerThread, ThreadsPerBlock>(inplace);
Test<std::uint16_t, ItemsPerThread, ThreadsPerBlock>(inplace);
Test<std::uint32_t, ItemsPerThread, ThreadsPerBlock>(inplace);
Test<std::uint64_t, ItemsPerThread, ThreadsPerBlock>(inplace);
}
template <unsigned int ItemsPerThread>
void Test(bool inplace)
{
Test<ItemsPerThread, 32>(inplace);
Test<ItemsPerThread, 256>(inplace);
}
template <unsigned int ItemsPerThread>
void Test()
{
Test<ItemsPerThread>(false);
Test<ItemsPerThread>(true);
}
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
// Initialize device
CubDebugExit(args.DeviceInit());
Test<1>();
Test<2>();
Test<10>();
Test<15>();
// More of a compilation check
TestCustomType<5, 256>(true);
return 0;
} | the_stack |
// minimal data needed to compute forces on a device
typedef struct atom_t {
double pos[3] = {0,0,0};
double eps=0; // lj
double sig=0; // lj
double charge=0;
double f[3] = {0,0,0}; // force
int molid=0;
int frozen=0;
double u[3] = {0,0,0}; // dipole
double polar=0; // polarizability
} d_atom;
__global__
void calculateForceKernel(
d_atom *__restrict__ atom_list,
const int N,
const double cutoffD,
const double *__restrict__ basis,
const double *__restrict__ reciprocal_basis,
const int pformD,
const double ewald_alpha,
const int kmax,
const int kspace,
const double polar_damp)
{
// only run for real atoms (no ghost threads)
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < N) {
const d_atom anchoratom = atom_list[i];
const int pform = pformD;
const double damp = polar_damp;
const double alpha = ewald_alpha;
const double cutoff = cutoffD;
double rimg, rsq;
const double sqrtPI=sqrt(M_PI);
double d[3], di[3], img[3], dimg[3],r,r2,ri,ri2;
int q,j,n;
double sig,eps,r6,s6,u[3]= {0,0,0};
double af[3] = {0,0,0}; // accumulated forces for anchoratom
double holder,chargeprod; // for ES force
// if LJ
if (pform == 0 || pform == 1 || pform == 2) {
for (j=i+1; j<N; j++) {
if (anchoratom.molid == atom_list[j].molid) continue; // skip same molecule
if (anchoratom.frozen && atom_list[j].frozen) continue; // skip frozens
// LB mixing
sig = anchoratom.sig;
if (sig != atom_list[j].sig) sig = 0.5*(sig+atom_list[j].sig);
eps = anchoratom.eps;
if (eps != atom_list[j].eps) eps = sqrt(eps * atom_list[j].eps);
if (sig == 0 || eps == 0) continue;
// get R (nearest image)
for (n=0; n<3; n++) d[n] = anchoratom.pos[n] - atom_list[j].pos[n];
for (n=0; n<3; n++) {
img[n]=0;
for (q=0; q<3; q++) {
img[n] += reciprocal_basis[n*3+q]*d[q];
}
img[n] = rint(img[n]);
}
for (n=0; n<3; n++) {
di[n] = 0;
for (q=0; q<3; q++) {
di[n] += basis[n*3+q]*img[q];
}
di[n] = d[n] - di[n];
}
r2=0;
ri2=0;
for (n=0; n<3; n++) {
r2 += d[n]*d[n];
ri2 += di[n]*di[n];
}
r = sqrt(r2);
ri = sqrt(ri2);
if (ri != ri) {
rimg=r;
rsq=r2;
for (n=0; n<3; n++) dimg[n] = d[n];
} else {
rimg=ri;
rsq=ri2;
for (n=0; n<3; n++) dimg[n] = di[n];
}
// distance is now rimg
if (rimg <= cutoff) {
r6 = rsq*rsq*rsq;
s6 = sig*sig;
s6 *= s6 * s6;
for (n=0; n<3; n++) {
holder = 24.0*dimg[n]*eps*(2*(s6*s6)/(r6*r6*rsq) - s6/(r6*rsq));
atomicAdd(&(atom_list[j].f[n]), -holder);
af[n] += holder;
}
}
} // end pair j
// finally add the accumulated forces (stored on register) to the anchor atom
for (n=0; n<3; n++)
atomicAdd(&(atom_list[i].f[n]), af[n]);
} // end if LJ
// ==============================================================================
// Now handle electrostatics
// ==============================================================================
if (pform == 1 || pform == 2) {
for (n=0; n<3; n++) af[n]=0; // reset register-stored force for anchoratom.
double invV;
int l[3], p, q;
double k[3], k_sq, fourPI = 4.0*M_PI;
invV = basis[0] * (basis[4]*basis[8] - basis[7]*basis[5] );
invV += basis[3] * (basis[7]*basis[2] - basis[1]*basis[8] );
invV += basis[6] * (basis[1]*basis[5] - basis[5]*basis[2] );
invV = 1.0/invV;
for (j=0; j<N; j++) {
if (anchoratom.frozen && atom_list[j].frozen) continue; // don't do frozen pairs
if (anchoratom.charge == 0 || atom_list[j].charge == 0) continue; // skip 0-force
if (i==j) continue; // don't do atom with itself
// get R (nearest image)
for (n=0; n<3; n++) d[n] = anchoratom.pos[n] - atom_list[j].pos[n];
for (n=0; n<3; n++) {
img[n]=0;
for (q=0; q<3; q++) {
img[n] += reciprocal_basis[n*3+q]*d[q];
}
img[n] = rint(img[n]);
}
for (n=0; n<3; n++) {
di[n] = 0;
for (q=0; q<3; q++) {
di[n] += basis[n*3+q]*img[q];
}
}
for (n=0; n<3; n++) di[n] = d[n] - di[n];
r2=0;
ri2=0;
for (n=0; n<3; n++) {
r2 += d[n]*d[n];
ri2 += di[n]*di[n];
}
r = sqrt(r2);
ri = sqrt(ri2);
if (ri != ri) {
rimg=r;
rsq=r2;
for (n=0; n<3; n++) dimg[n] = d[n];
} else {
rimg=ri;
rsq=ri2;
for (n=0; n<3; n++) dimg[n] = di[n];
}
// real-space
if (rimg <= cutoff && (anchoratom.molid < atom_list[j].molid)) { // non-duplicated pairs, not intramolecular, not beyond cutoff
chargeprod = anchoratom.charge * atom_list[j].charge;
for (n=0; n<3; n++) u[n] = dimg[n]/rimg;
for (n=0; n<3; n++) {
holder = -((-2.0*chargeprod*alpha*exp(-alpha*alpha*rsq))/(sqrtPI*rimg) - (chargeprod*erfc(alpha*rimg)/rsq))*u[n];
af[n] += holder;
atomicAdd(&(atom_list[j].f[n]), -holder);
}
}
// k-space
if (kspace && (anchoratom.molid < atom_list[j].molid)) {
chargeprod = anchoratom.charge * atom_list[j].charge;
for (n=0; n<3; n++) {
for (l[0] = 0; l[0] <= kmax; l[0]++) {
for (l[1] = (!l[0] ? 0 : -kmax); l[1] <= kmax; l[1]++) {
for (l[2] = ((!l[0] && !l[1]) ? 1 : -kmax); l[2] <= kmax; l[2]++) {
// skip if norm is out of sphere
if (l[0]*l[0] + l[1]*l[1] + l[2]*l[2] > kmax*kmax) continue;
/* get reciprocal lattice vectors */
for (p=0; p<3; p++) {
for (q=0, k[p] = 0; q < 3; q++) {
k[p] += 2.0*M_PI*reciprocal_basis[3*q+p] * l[q];
}
}
k_sq = k[0]*k[0] + k[1]*k[1] + k[2]*k[2];
holder = chargeprod * invV * fourPI * k[n] *
exp(-k_sq/(4*alpha*alpha))*
sin(k[0]*dimg[0] + k[1]*dimg[1] + k[2]*dimg[2])/k_sq * 2; // times 2 b/c half-Ewald sphere
af[n] += holder;
atomicAdd(&(atom_list[j].f[n]), -holder);
} // end for l[2], n
} // end for l[1], m
} // end for l[0], l
} // end 3d
}
} // end pair loop j
// finally add ES contribution to anchor-atom
for (n=0; n<3; n++) atomicAdd(&(atom_list[i].f[n]), af[n]);
} // end ES component
// ============================================================
// Polarization
// ============================================================
if (pform == 2) {
double common_factor, r, rinv, r2, r2inv, r3, r3inv, r5inv, r7inv;
double x2,y2,z2,x,y,z;
double udotu, ujdotr, uidotr;
const double cc2inv = 1.0/(cutoff*cutoff);
double t1,t2,t3,p1,p2,p3,p4,p5;
const double u_i[3] = {anchoratom.u[0], anchoratom.u[1], anchoratom.u[2]};
double u_j[3];
// loop all pair atoms
for (int j=i+1; j<N; j++) {
for (n=0; n<3; n++) af[n] = 0; // reset local force for this pair.
if (anchoratom.molid == atom_list[j].molid) continue; // no same-molecule
// get R (nearest image)
for (n=0; n<3; n++) d[n] = anchoratom.pos[n] - atom_list[j].pos[n];
for (n=0; n<3; n++) {
img[n]=0;
for (q=0; q<3; q++) {
img[n] += reciprocal_basis[n*3+q]*d[q];
}
img[n] = rint(img[n]);
}
for (n=0; n<3; n++) {
di[n] = 0;
for (q=0; q<3; q++) {
di[n] += basis[n*3+q]*img[q];
}
}
for (n=0; n<3; n++) di[n] = d[n] - di[n];
r2=0;
ri2=0;
for (n=0; n<3; n++) {
r2 += d[n]*d[n];
ri2 += di[n]*di[n];
}
r = sqrt(r2);
ri = sqrt(ri2);
if (ri != ri) {
rimg=r;
rsq=r2;
for (n=0; n<3; n++) dimg[n] = d[n];
} else {
rimg=ri;
rsq=ri2;
for (n=0; n<3; n++) dimg[n] = di[n];
}
// got pair displacements
if (rimg > cutoff) continue; // skip outside cutoff
r = rimg;
x = dimg[0];
y = dimg[1];
z = dimg[2];
x2 = x*x;
y2 = y*y;
z2 = z*z;
r2 = r*r;
r3 = r2*r;
rinv = 1./r;
r2inv = rinv*rinv;
r3inv = r2inv*rinv;
for (n=0; n<3; n++) u_j[n] = atom_list[j].u[n];
// (1) u_i -- q_j
if (atom_list[j].charge != 0 && anchoratom.polar != 0) {
common_factor = atom_list[j].charge * r3inv;
af[0] += common_factor*((u_i[0]*(r2inv*(-2*x2 + y2 + z2) - cc2inv*(y2 + z2))) + (u_i[1]*(r2inv*(-3*x*y) + cc2inv*x*y)) + (u_i[2]*(r2inv*(-3*x*z) + cc2inv*x*z)));
af[1] += common_factor*(u_i[0]*(r2inv*(-3*x*y) + cc2inv*x*y) + u_i[1]*(r2inv*(-2*y2 + x2 + z2) - cc2inv*(x2 + z2)) + u_i[2]*(r2inv*(-3*y*z) + cc2inv*y*z));
af[2] += common_factor*(u_i[0]*(r2inv*(-3*x*z) + cc2inv*x*z) + u_i[1]*(r2inv*(-3*y*z) + cc2inv*y*z) + u_i[2]*(r2inv*(-2*z2 + x2 + y2) - cc2inv*(x2 + y2)));
}
// (2) u_j -- q_i
if (anchoratom.charge != 0 && atom_list[j].polar != 0) {
common_factor = anchoratom.charge * r3inv;
af[0] -= common_factor*((u_j[0]*(r2inv*(-2*x2 + y2 + z2) - cc2inv*(y2 + z2))) + (u_j[1]*(r2inv*(-3*x*y) + cc2inv*x*y)) + (u_j[2]*(r2inv*(-3*x*z) + cc2inv*x*z)));
af[1] -= common_factor*(u_j[0]*(r2inv*(-3*x*y) + cc2inv*x*y) + u_j[1]*(r2inv*(-2*y2 + x2 + z2) - cc2inv*(x2 + z2)) + u_j[2]*(r2inv*(-3*y*z) + cc2inv*y*z));
af[2] -= common_factor*(u_j[0]*(r2inv*(-3*x*z) + cc2inv*x*z) + u_j[1]*(r2inv*(-3*y*z) + cc2inv*y*z) + u_j[2]*(r2inv*(-2*z2 + x2 + y2) - cc2inv*(x2 + y2)));
}
// (3) u_i -- u_j
if (anchoratom.polar != 0 && atom_list[j].polar != 0) {
r5inv = r2inv*r3inv;
r7inv = r5inv*r2inv;
udotu = u_i[0]*u_j[0] + u_i[1]*u_j[1] + u_i[2]*u_j[2];
uidotr = u_i[0]*dimg[0] + u_i[1]*dimg[1] + u_i[2]*dimg[2];
ujdotr = u_j[0]*dimg[0] + u_j[1]*dimg[1] + u_j[2]*dimg[2];
t1 = exp(-damp*r);
t2 = 1. + damp*r + 0.5*damp*damp*r2;
t3 = t2 + damp*damp*damp*r3/6.;
p1 = 3*r5inv*udotu*(1. - t1*t2) - r7inv*15.*uidotr*ujdotr*(1. - t1*t3);
p2 = 3*r5inv*ujdotr*(1. - t1*t3);
p3 = 3*r5inv*uidotr*(1. - t1*t3);
p4 = -udotu*r3inv*(-t1*(damp*rinv + damp*damp) + rinv*t1*damp*t2);
p5 = 3*r5inv*uidotr*ujdotr*(-t1*(rinv*damp + damp*damp + 0.5*r*damp*damp*damp) + rinv*t1*damp*t3);
af[0] += p1*x + p2*u_i[0] + p3*u_j[0] + p4*x + p5*x;
af[1] += p1*y + p2*u_i[1] + p3*u_j[1] + p4*y + p5*y;
af[2] += p1*z + p2*u_i[2] + p3*u_j[2] + p4*z + p5*z;
}
// apply Newton for pair.
for (n=0; n<3; n++) {
atomicAdd(&(atom_list[i].f[n]), af[n]);
atomicAdd(&(atom_list[j].f[n]), -af[n]);
}
} // end pair loop with atoms j
} // end polarization forces
} // end if i<n (all threads)
}
void force_kernel(
const int total_atoms,
const int block_size,
const int pform,
const double cutoff,
const double ewald_alpha,
const int ewald_kmax,
const int kspace_option,
const double polar_damp,
const double *h_basis,
const double *h_rbasis,
d_atom *h_atom_list)
{
// allocate memory on device
double *d_basis;
double *d_rbasis;
d_atom *d_atom_list;
const int basis_size = sizeof(double) * 9;
const int atoms_size = sizeof(d_atom) * total_atoms;
cudaMalloc((void**) &d_basis, basis_size);
cudaMemcpy(d_basis, h_basis, basis_size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_rbasis, basis_size);
cudaMemcpy(d_rbasis, h_rbasis, basis_size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_atom_list, atoms_size);
cudaMemcpy(d_atom_list, h_atom_list, atoms_size, cudaMemcpyHostToDevice);
// grid elements
int dimGrid = ceil((float)total_atoms / block_size);
int dimBlock = block_size;
calculateForceKernel <<< dim3(dimGrid), dim3(dimBlock) >>> (
d_atom_list, total_atoms, cutoff, d_basis, d_rbasis, pform,
ewald_alpha, ewald_kmax, kspace_option, polar_damp);
cudaMemcpy(h_atom_list, d_atom_list, atoms_size, cudaMemcpyDeviceToHost);
cudaFree(d_atom_list);
cudaFree(d_basis);
cudaFree(d_rbasis);
} | the_stack |
#pragma once
#include "triangle_buffer.cuh"
#include "index_queue.cuh"
#include "progress_queue.cuh"
#include "config.h"
#include <cub/cub.cuh>
#include <ptx_primitives.cuh>
#include <bitonic_sort.cuh>
#include "instrumentation.cuh"
#ifndef RASTERIZATION_STAGE_GLOBAL
#define RASTERIZATION_STAGE_GLOBAL extern
#endif
//RASTERIZATION_STAGE_GLOBAL __device__ IndexQueue<RASTERIZER_QUEUE_SIZE, unsigned int, true, true> rasterizer_queue[NUM_BLOCKS];
//RASTERIZATION_STAGE_GLOBAL __device__ TriangleBuffer<TRIANGLE_BUFFER_SIZE, NUM_INTERPOLATORS, true> triangle_buffer;
static_assert(RASTERIZER_QUEUE_SIZE >= 2 * RASTERIZATION_CONSUME_THRESHOLD, "RASTERIZER_COSUME_THRESHOLD too restrictive... increase RASTERIZER_QUEUE_SIZE?");
template<bool PRIMITIVE_ORDER, bool USECUB = true>
struct RasterizerQueueT;
template<bool A>
struct RasterizerQueueT<false, A>
{
typedef unsigned int IndexQueueType;
MultiIndexQueue<MAX_NUM_RASTERIZERS, RASTERIZER_QUEUE_SIZE, IndexQueueType, IndexQueueAccessControl::EnumAccessControl<INDEXQUEUEATOMICS, INDEXQUEUEABORTONOVERFLOW>, -1U, TRACK_FILL_LEVEL> index_queue;
struct SharedMemT
{
};
__device__
void newPrimitive()
{
}
__device__
void init()
{
index_queue.init();
}
__device__
int dequeueIndexBlock(int q, unsigned int &triid, int num_threads)
{
return index_queue.dequeueBlock(q, &triid, num_threads);
}
__device__
void completedPrimitive(unsigned int primitive_id)
{
}
__device__
void enqueue(int q, unsigned int triangle_id, unsigned int primitive_id)
{
index_queue.enqueue(q, triangle_id);
}
template<int NUM_THREADS>
struct SortQueueShared
{
};
template<int NUM_THREADS>
__device__
bool sortQueue(int q, char* shared_memory_in, volatile int * sufficientToRun)
{
*sufficientToRun = index_queue.size(q) >= NUM_THREADS;
return false;
}
__device__
int availableElements(int q)
{
return index_queue.size(q);
}
__device__
int count(int q)
{
return index_queue.size(q);
}
};
template<int NUM_THREADS, int SORTING_ELEMENTS, bool CUB>
struct RasterizerQueueSorter;
template<int NUM_THREADS, int SORTING_ELEMENTS>
struct RasterizerQueueSorter<NUM_THREADS, SORTING_ELEMENTS, true>
{
typedef typename cub::BlockRadixSort<unsigned int, NUM_THREADS, SORTING_ELEMENTS, int>::TempStorage SharedT;
__device__
static void sort(SharedT& storage, unsigned int(&keys)[SORTING_ELEMENTS], int (&values)[SORTING_ELEMENTS], int begin_bit = 0, int end_bit = 32)
{
cub::BlockRadixSort<unsigned int, NUM_THREADS, SORTING_ELEMENTS, int>(storage).SortBlockedToStriped(keys, values, begin_bit, end_bit);
}
};
template<int NUM_THREADS>
struct RasterizerQueueSorter<NUM_THREADS, 2, false>
{
struct SharedT
{
unsigned int sort_keys[2 * NUM_THREADS];
int sort_values[2 * NUM_THREADS];
};
__device__
static void sort(SharedT& storage, unsigned int(&keys)[2], int(&values)[2], int begin_bit = 0, int end_bit = 32)
{
#pragma unroll
for (int i = 0; i < 2; ++i)
storage.sort_keys[threadIdx.x + i*NUM_THREADS] = keys[i],
storage.sort_values[threadIdx.x + i*NUM_THREADS] = values[i];
BitonicSort::sort<unsigned int, int, NUM_THREADS, true>(storage.sort_keys, storage.sort_values, threadIdx.x);
for (int i = 0; i < 2; ++i)
keys[i] = storage.sort_keys[threadIdx.x + i*NUM_THREADS],
values[i] = storage.sort_values[threadIdx.x + i*NUM_THREADS];
}
};
template<bool USECUB>
struct RasterizerQueueT<true, USECUB>
{
static constexpr int SORTING_ELEMENTS = USECUB ? 10 : 2;
static constexpr int TAKE_ALONG = USECUB ? 5 : 1;
static constexpr bool REDUCE_SORTED_BITS = true;
//static_assert(SORTING_ELEMENTS >= 2, "SortingElements must be at least two, to get a sufficient number of primitives sorted with a single run");
typedef unsigned long long int IndexQueueType;
typedef ProgressQueue<TRIANGLE_BUFFER_SIZE> ProgressQueueType;
typedef MultiIndexQueue<MAX_NUM_RASTERIZERS, RASTERIZER_QUEUE_SIZE, IndexQueueType, IndexQueueAccessControl::EnumAccessControl<INDEXQUEUEATOMICS, INDEXQUEUEABORTONOVERFLOW>, -1U, TRACK_FILL_LEVEL> MultiIndexQueueType;
MultiIndexQueueType index_queue;
ProgressQueueType triangle_progress;
MultiIndexQueueType::QueuePos ready[MAX_NUM_RASTERIZERS];
//unsigned int lastReadyPrimitive[MAX_NUM_RASTERIZERS];
template<int NUM_THREADS>
using Sorter = RasterizerQueueSorter<NUM_THREADS, SORTING_ELEMENTS, USECUB>;
__device__
void newPrimitive()
{
triangle_progress.reset();
//if (REDUCE_SORTED_BITS && threadIdx.x < MAX_NUM_RASTERIZERS)
// lastReadyPrimitive[threadIdx.x] = 0;
}
__device__
void init()
{
index_queue.init();
triangle_progress.init();
if (threadIdx.x < MAX_NUM_RASTERIZERS)
ready[threadIdx.x] = 0;
}
__device__
int dequeueIndexBlock(int q, unsigned int &triid, int num_threads)
{
__shared__ int take, offset;
unsigned long long comptriid = triid;
if (threadIdx.x == 0)
{
int num = min(num_threads, availableElements(q));
int ttake = index_queue.singleThreadReserveRead(q, num);
offset = index_queue.singleThreadTake(q, ttake);
take = ttake;
}
__syncthreads();
if (threadIdx.x < take)
index_queue.multiThreadRead(q, &comptriid, threadIdx.x, offset);
triid = static_cast<unsigned int>(comptriid & 0xFFFFFFFFULL);
return take;
}
__device__
void completedPrimitive(unsigned int primitive_id)
{
triangle_progress.markDone(primitive_id);
}
__device__
void enqueue(int q, unsigned int triangle_id, unsigned int primitive_id)
{
index_queue.enqueue(q, (static_cast<unsigned long long>(primitive_id) << 32) | triangle_id);
}
template<int NUM_THREADS>
struct SortQueueShared
{
union
{
Sorter<NUM_THREADS>::SharedT sort_storage;
ProgressQueueType::CheckProgressShared <NUM_THREADS> progress_storage;
};
cub::BlockReduce<int, NUM_THREADS>::TempStorage reduce_storage;
MultiIndexQueueType::QueuePos rdy;
int count, sorted, toSort, available_primitives, sortbits, lastReadyPrimitive;
};
template<int NUM_THREADS>
__device__
bool sortQueue(int q, char* shared_memory_in, volatile int * sufficientToRun)
{
static_assert(static_popcnt<NUM_THREADS>::value == 1, "NUM_THREADS for sorting must be a power of two");
//Instrumentation::BlockObserver<10, 1> observer;
SortQueueShared<NUM_THREADS>& shared_memory = *new(shared_memory_in)SortQueueShared<NUM_THREADS>;
// initial check if it makes any sense to do something even
if (threadIdx.x == 0)
{
shared_memory.rdy = ready[q];
shared_memory.count = index_queue.begin(q).until(shared_memory.rdy);
shared_memory.toSort = min(index_queue.size(q), RASTERIZER_QUEUE_SIZE) - shared_memory.count;
*sufficientToRun = shared_memory.count >= NUM_THREADS;
}
__syncthreads();
if (shared_memory.toSort == 0)
return false;
if (TAKE_ALONG > 1)
if (shared_memory.count >= NUM_THREADS)
return true;
// update the progress queue
unsigned int available = [&]()->unsigned int {
Instrumentation::BlockObserver<11, 2> observer;
return triangle_progress. template checkProgressBlock<NUM_THREADS>(shared_memory.progress_storage);
}();
__syncthreads();
// get current state of the queue
// we need to redo that, as we now only know available primitive number - if something got enqueued
// after we checked initally, we could miss a tringle!
if (threadIdx.x == 0)
{
shared_memory.available_primitives = available;
shared_memory.toSort = min(index_queue.size(q), RASTERIZER_QUEUE_SIZE) - shared_memory.count;
if (REDUCE_SORTED_BITS)
{
//shared_memory.lastReadyPrimitive = lastReadyPrimitive[q];
//shared_memory.sortbits = max(4, 32 - __clz(available - shared_memory.lastReadyPrimitive + 3));
shared_memory.sortbits = max(4, 32 - __clz(available + 2));
}
}
__syncthreads();
// TODO: for now we ignore and do not update the sorted pointer
// (could be used to make sure that we dont sort if it is not needed and stop sorting ealier if nothing changes anymore)
{
//there is something to sort -> run from back to either sorted or ready and update ready
Instrumentation::BlockObserver<12, 2> observer;
int newReadyOffset = shared_memory.toSort;
int toSort = newReadyOffset;
int startOffset;
do
{
startOffset = toSort - SORTING_ELEMENTS * NUM_THREADS;
unsigned int local_keys[SORTING_ELEMENTS];
int local_values[SORTING_ELEMENTS];
#pragma unroll
for (int i = 0; i < SORTING_ELEMENTS; ++i)
{
local_values[i] = startOffset + threadIdx.x + i*NUM_THREADS;
if (local_values[i] >= 0)
{
// we can try to remap the sorting range to fewer bits
unsigned int primitiveid = static_cast<unsigned int>(((shared_memory.rdy + local_values[i]).read(index_queue, q) >> 32) & 0xFFFFFFFFULL);
//if (REDUCE_SORTED_BITS)
// local_keys[i] = 2 + min(primitiveid, shared_memory.available_primitives + 1) - shared_memory.lastReadyPrimitive;
//else
local_keys[i] = 1 + min(primitiveid, shared_memory.available_primitives + 1);
}
else
local_keys[i] = 0;
}
Sorter<NUM_THREADS>::sort(shared_memory.sort_storage, local_keys, local_values, 0, REDUCE_SORTED_BITS ? shared_memory.sortbits : 32);
// read entries
unsigned long long int entries[SORTING_ELEMENTS];
#pragma unroll
for (int i = SORTING_ELEMENTS-1; i >= 0; --i)
{
if (local_values[i] >= 0)
{
entries[i] = (shared_memory.rdy + local_values[i]).read(index_queue, q);
//if (REDUCE_SORTED_BITS)
//{
// if (local_keys[i] - 2 + shared_memory.lastReadyPrimitive >= shared_memory.available_primitives)
// newReadyOffset = startOffset + threadIdx.x + NUM_THREADS*i;
//}
//else
{
if (local_keys[i] - 1 >= shared_memory.available_primitives)
newReadyOffset = startOffset + threadIdx.x + NUM_THREADS*i;
}
}
}
__syncthreads();
// write the entries to their new positions
#pragma unroll
for (int i = 0; i < SORTING_ELEMENTS; ++i)
{
int outoffset = startOffset + threadIdx.x + NUM_THREADS*i;
if (outoffset >= 0)
(shared_memory.rdy + outoffset).write(index_queue, q, entries[i]);
}
__syncthreads();
toSort -= NUM_THREADS*(SORTING_ELEMENTS - TAKE_ALONG);
} while (startOffset > 0);
// compute new ReadOffset accross block
newReadyOffset = cub::BlockReduce<int, NUM_THREADS>(shared_memory.reduce_storage).Reduce(min(newReadyOffset, TAKE_ALONG*NUM_THREADS), cub::Min());
if (threadIdx.x == 0)
{
*sufficientToRun = (shared_memory.count + newReadyOffset) >= NUM_THREADS;
//if (REDUCE_SORTED_BITS && newReadyOffset > 0)
//{
// lastReadyPrimitive[q] = 1 + static_cast<unsigned int>(((shared_memory.rdy + (newReadyOffset - 1)).read(index_queue, q) >> 32) & 0xFFFFFFFFULL);
// ready[q] = shared_memory.rdy + newReadyOffset;
//}
//else
ready[q] = shared_memory.rdy + newReadyOffset;
}
}
__syncthreads();
return true;
}
__device__
int availableElements(int q)
{
auto front = index_queue.begin(q);
return front.until(ready[q]);
}
__device__
int count(int q)
{
return index_queue.size(q);
}
};
template <int BITS, typename = void>
struct bitmask_type_t;
template <int BITS>
struct bitmask_type_t<BITS, typename enable_if<(BITS <= 32)>::type>
{
using type = unsigned int;
};
template <int BITS>
struct bitmask_type_t<BITS, typename enable_if<(BITS > 32 && BITS <= 64)>::type>
{
using type = unsigned long long;
};
template <int BITS>
using bitmask_type = typename bitmask_type_t<BITS>::type;
template <int NUM_RASTERIZERS, int ACTIVE_BITS>
class VirtualRasterizers
{
static constexpr int NUM_ELEMENTS = (NUM_RASTERIZERS + ACTIVE_BITS - 1) / ACTIVE_BITS;
bitmask_type<ACTIVE_BITS> active[NUM_ELEMENTS];
public:
__device__
void init()
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < NUM_ELEMENTS; i += gridDim.x * blockDim.x)
active[i] = 0U;
}
__device__
bool isRasterizerActive(int id) const
{
const unsigned int mask = 0x1U << (id % ACTIVE_BITS);
return (ldg_cg(&active[id / ACTIVE_BITS]) & mask) != 0;
}
__device__
bool setRasterizerActive(int id)
{
const unsigned int mask = 0x1U << (id % ACTIVE_BITS);
bool b = (atomicOr(&active[id / ACTIVE_BITS], mask) & mask) == 0;
//if (threadIdx.x == 0 && b)
// printf("%ulld %d acquired %d\n", clock64(), blockIdx.x, id);
return b;
}
__device__
void setRasterizerInactive(int id)
{
//if (threadIdx.x == 0)
// printf("%ulld %d releasing %d\n", clock64(), blockIdx.x, id);
atomicAnd(&active[id / ACTIVE_BITS], ~(0x1U << (id % ACTIVE_BITS)));
}
};
RASTERIZATION_STAGE_GLOBAL __device__ VirtualRasterizers<MAX_NUM_RASTERIZERS, 32> virtual_rasterizers;
typedef RasterizerQueueT<ENFORCE_PRIMITIVE_ORDER> RasterizerQueue;
RASTERIZATION_STAGE_GLOBAL __device__ RasterizerQueue rasterizer_queue;
RASTERIZATION_STAGE_GLOBAL __device__ TriangleBuffer<TRIANGLE_BUFFER_SIZE, NUM_INTERPOLATORS, TRIANGLEBUFFER_REFCOUNTING, TRACK_FILL_LEVEL> triangle_buffer;
#endif // INCLUDED_CURE_RASTERIZATION_STAGE | the_stack |
#include "./commonlib/checkerror.h" // checkCudaErrors
// the base vectors and weight coefficients (GPU)
__constant__ float2 dev_e[9]; // i = x,y; j = 0,1,...8
__constant__ float dev_alpha[9];
// ant vector (GPU)
__constant__ int dev_ant[9];
// END of the base vectors and weight coefficients (GPU)
void set_e_alpha() {
float2 *e = new float2[9] ;
float *alpha = new float[9] ;
int *ant = new int[9] ;
e[0].x = 0.0; e[0].y = 0.0; alpha[0] = 4.0 / 9.0;
e[1].x = 1.0; e[1].y = 0.0; alpha[1] = 1.0 / 9.0;
e[2].x = 0.0; e[2].y = 1.0; alpha[2] = 1.0 / 9.0;
e[3].x = -1.0; e[3].y = 0.0; alpha[3] = 1.0 / 9.0;
e[4].x = 0.0; e[4].y = -1.0; alpha[4] = 1.0 / 9.0;
e[5].x = 1.0; e[5].y = 1.0; alpha[5] = 1.0 / 36.0;
e[6].x = -1.0; e[6].y = 1.0; alpha[6] = 1.0 / 36.0;
e[7].x = -1.0; e[7].y = -1.0; alpha[7] = 1.0 / 36.0;
e[8].x = 1.0; e[8].y = -1.0; alpha[8] = 1.0 / 36.0;
ant[0] = 0; // 6 2 5
ant[1] = 3; // ^
ant[2] = 4; // |
ant[3] = 1; // |
ant[4] = 2; // 3 <---- 0 ----> 1
ant[5] = 7; // |
ant[6] = 8; // |
ant[7] = 5; // V
ant[8] = 6; // 7 4 8
checkCudaErrors(
cudaMemcpyToSymbol( dev_e, e, sizeof(float2)*9, 0, cudaMemcpyHostToDevice) ); // offset from start is 0
checkCudaErrors(
cudaMemcpyToSymbol( dev_alpha, alpha, sizeof(float)*9, 0, cudaMemcpyHostToDevice) ); // offset from start is 0
checkCudaErrors(
cudaMemcpyToSymbol( dev_ant, ant, sizeof(int)*9, 0, cudaMemcpyHostToDevice) ); // offset from start is 0
delete[] e;
delete[] alpha;
delete[] ant;
}
__global__ void initialize( // FieldType *_ex, FieldType *_ey, FieldType *_alpha,
// int *_ant,
//FieldType *_ant, FieldType *_rh, FieldType *_ux, FieldType *_uy, FieldType *_f, FieldType *_feq, FieldType *_f_new) {
float * rh , float2 *u,
float *f, float *feq, float *f_new,
const int N_x, const int N_y, const int NDIR=9
) {
int i = threadIdx.x + blockIdx.x * blockDim.x ;
int j = threadIdx.y + blockIdx.y * blockDim.y ;
if ((i >= N_x) || ( j >= N_y) ) { return; } // bound check for when accessing the array for array's values
// initialize density and velocity fields inside the cavity;
/* rho[ i + N_x * j ] = DENSITY ;
u[ i + N_x * j].x = 0.0f ;
u[ i + N_x * j].y = 0.0f;
if (j == N_y-1 ) { u[ i + N_x * (N_y - 1) ] = LID_VELOCITY ; }
*/
// assign initial values for distribution functions
int k = i + N_x * j ;
for (int dir = 0; dir < NDIR; dir++) {
int index = i + j * N_x + dir * N_x * N_y ;
float edotu = dev_e[dir].x * u[k].x + dev_e[dir].y * u[k].y ;
float udotu = u[k].x * u[k].x + u[k].y * u[k].y ;
feq[index] = rh[k] * dev_alpha[dir] * ( 1.0f + 3.0f * edotu + 4.5f * edotu * edotu - 1.5f * udotu ) ;
f[index] = feq[index];
f_new[index] = feq[index] ;
}
}
__global__ void timeIntegration(
float *rh, float2 *u,
float *f, float *feq, float *f_new,
const float LID_VELOCITY, const float REYNOLDS_NUMBER , const float DENSITY,
const int N_x, const int N_y, const int NDIR =9
) {
// calculate fluid viscosity based on the Reynolds number
float kinematicViscosity = LID_VELOCITY * static_cast<float>( N_x ) / REYNOLDS_NUMBER;
// calculate relaxation time tau
float tau = 0.5f + 3.0f * kinematicViscosity ;
// compute the "i" and "j" location and the "dir"
// handle by this thread
int i = threadIdx.x + blockIdx.x * blockDim.x ;
int j = threadIdx.y + blockIdx.y * blockDim.y ;
int k = i + N_x * j; // k = 0, 1, ... blockDim.x * gridDim.x + N_x * blockDim.y * gridDim.y = N_x * N_y
// collision
if (( i >0 ) && ( i < N_x - 1) && (j > 0 ) && ( j < N_y - 1) ) {
for (int dir = 0; dir < NDIR; dir++ ) {
int index = i + j * N_x + dir * N_x * N_y ;
float edotu = dev_e[dir].x * u[k].x + dev_e[dir].y * u[k].y ;
float udotu = u[k].x * u[k].x + u[k].y * u[k].y ;
feq[index] = rh[k] * dev_alpha[dir] * ( 1.0f + 3.0f * edotu + 4.5f * edotu * edotu - 1.5f * udotu ) ;
}
}
// streaming from interior node points
if ( (i > 0) && ( i < N_x - 1) && ( j > 0 ) && ( j < N_y - 1) ) {
for (int dir = 0; dir < NDIR; dir++ ) {
int index = i + j * N_x + dir * N_x * N_y ; // (i,j, dir)
int index_new = ( i + dev_e[dir].x) + (j + dev_e[dir].y ) * N_x + dir*N_x*N_y ;
int index_ant = i + j * N_x + dev_ant[dir] * N_x * N_y ;
// post-collision distribution at (i,j) along "dir"
float f_plus = f[index] - ( f[index] - feq[index] ) / tau;
if (( i + dev_e[dir].x ==0) || ( i + dev_e[dir].x == N_x-1) ||
( j + dev_e[dir].y == 0) || (j + dev_e[dir].y == N_y-1) ) {
// bounce back
int ixy = i + dev_e[dir].x + N_x * ( j + dev_e[dir].y ) ;
float ubdote = u[ixy].x * dev_e[dir].x + u[ixy].y * dev_e[dir].y ;
f_new[ index_ant ] = f_plus - 6.0 * DENSITY * dev_alpha[dir] * ubdote ;
}
else {
// stream to neighbor
f_new[index_new] = f_plus ;
}
}
}
// push f_new into f
if (( i >0 ) && ( i < N_x - 1) && (j > 0 ) && (j < N_y - 1) ) {
for (int dir=0; dir < NDIR; dir++) {
int index = i + j * N_x + dir * N_x * N_y ; // (i,j,dir)
f[index] = f_new[index] ;
}
}
// update density at interior nodes
if ((i>0) && (i <N_x-1) && (j >0) && (j< N_y -1)) {
float rho = 0;
for (int dir =0; dir < NDIR; dir++ ) {
int index = i + j * N_x + dir * N_x * N_y ;
rho += f_new[index] ;
}
rh[k] = rho ;
}
// update velocity at interior nodes
if ((i >0 ) && (i < N_x - 1) && (j >0 ) && (j < N_y -1) ) {
float velx = 0.0f;
float vely = 0.0f;
for (int dir = 0; dir < NDIR; dir++) {
int index = i + j*N_x + dir* N_x * N_y ;
velx += f_new[index] * dev_e[dir].x ;
vely += f_new[index] * dev_e[dir].y ;
}
u[k].x = velx / rh[k] ;
u[k].y = vely / rh[k] ;
}
} // END of time integration
int main( int argc, char *argv[])
{
// problem parameters
constexpr const int N_x = 512 ; // number of node points along x (cavity length in lattice units)
constexpr const int N_y = 512 ; // number of node points along y (cavity length in lattice units)
constexpr const int TIME_STEPS = 20000; // number of time steps for which the simulation is run
constexpr const int NDIR = 9; // number of discrete velocity directions used in the D2Q9 model
constexpr const float DENSITY = 2.7f ; // fluid density in lattice units
constexpr const float LID_VELOCITY = 0.05f; // lid velocity in lattice units
constexpr const float REYNOLDS_NUMBER = 100.0f; // Re=
// END of problem parameters
// initialize density and velocity fields inside the cavity;
/* rho[ i + N_x * j ] = DENSITY ;
u[ i + N_x * j].x = 0.0f ;
u[ i + N_x * j].y = 0.0f;
if (j == N_y-1 ) { u[ i + N_x * (N_y - 1) ] = LID_VELOCITY ; }
*/
// initialize density and velocity fields inside the cavity on host CPU
float2 *u = new float2[N_x*N_y] ;
float *rh = new float[N_x*N_y] ;
for (int j = 0; j < N_y; ++j) {
for (int i = 0; i < N_x; ++i) {
rh[ i + N_x*j] = DENSITY ;
u[ i + N_x * j].x = 0.0f;
u[ i + N_x * j].y = 0.0f;
if (j == N_y-1) { u[ i + N_x * (N_y - 1)].x = LID_VELOCITY ; }
}
}
// sanity check
/*
std::cout << "\n Initially, on the host CPU : " << std::endl ;
std::cout << " d_rh : " << std::endl ;
for (int j = N_y/2 - 21 ; j < N_y/2 + 21; j++) {
for (int i = N_x/2 - 21; i < N_x/2 + 21; i++) {
std::cout << rh[ i + N_x * j ] << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.x : " << std::endl ;
for (int j = N_y/2 - 21 ; j < N_y/2 + 21; j++) {
for (int i = N_x/2 - 21; i < N_x/2 + 21; i++) {
std::cout << u[ i + N_x * j ].x << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.y : " << std::endl ;
for (int j = N_y/2 - 21 ; j < N_y/2 + 21; j++) {
for (int i = N_x/2 - 21; i < N_x/2 + 21; i++) {
std::cout << u[ i + N_x * j ].y << " " ;
}
std::cout << std::endl ;
}
// in the corner
std::cout << " \n in the corner : " << std::endl;
std::cout << " d_rh : " << std::endl ;
for (int j = N_y - 20 ; j < N_y; j++) {
for (int i = N_x - 20; i < N_x ; i++) {
std::cout << rh[ i + N_x * j ] << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.x : " << std::endl ;
for (int j = N_y - 20 ; j < N_y ; j++) {
for (int i = N_x - 20; i < N_x ; i++) {
std::cout << u[ i + N_x * j ].x << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.y : " << std::endl ;
for (int j = N_y - 20 ; j < N_y ; j++) {
for (int i = N_x - 20; i < N_x; i++) {
std::cout << u[ i + N_x * j ].y << " " ;
}
std::cout << std::endl ;
}
*/
// END of sanity check
// allocate memory on the GPU
float *d_f, *d_feq, *d_f_new ;
checkCudaErrors(
cudaMalloc((void **)&d_f, N_x * N_y * NDIR * sizeof(float)) );
checkCudaErrors(
cudaMalloc((void **)&d_feq, N_x * N_y * NDIR * sizeof(float)) );
checkCudaErrors(
cudaMalloc((void **)&d_f_new, N_x * N_y * NDIR * sizeof(float)) );
float *d_rh ;
float2 *d_u; // velocity
checkCudaErrors(
cudaMalloc((void **)&d_rh, N_x * N_y * sizeof(float)) );
checkCudaErrors(
cudaMalloc((void **)&d_u, N_x * N_y * sizeof(float2)) );
// set to 0
checkCudaErrors(
cudaMemset(d_f, 0,N_x * N_y * NDIR * sizeof(float)) );
checkCudaErrors(
cudaMemset(d_feq, 0,N_x * N_y * NDIR * sizeof(float)) );
checkCudaErrors(
cudaMemset(d_f_new, 0,N_x * N_y * NDIR * sizeof(float)) );
checkCudaErrors(
cudaMemset(d_rh, 0,N_x * N_y * sizeof(float)) );
checkCudaErrors(
cudaMemset(d_u, 0,N_x * N_y * sizeof(float2)) );
////////////////////////////////////////////////////////////////////
// block, grid dimensions
////////////////////////////////////////////////////////////////////
// assign a 3D distribution of CUDA "threads" within each CUDA "block"
int threadsAlongX = 32, threadsAlongY = 32;
dim3 Blockdim( threadsAlongX , threadsAlongY, 1) ;
// calculate number of blocks along x and y in a 2D CUDA "grid"
dim3 Griddim( (N_x + Blockdim.x -1)/Blockdim.x, (N_y + Blockdim.y -1)/Blockdim.y , 1) ;
////////////////////////////////////////////////////////////////////
// END of block, grid dimensions
// initialize density and velocity fields inside the cavity on device GPU
checkCudaErrors(
cudaMemcpy( d_rh, rh, sizeof(float) * N_x*N_y, cudaMemcpyHostToDevice));
checkCudaErrors(
cudaMemcpy( d_u, u, sizeof(float2) * N_x*N_y, cudaMemcpyHostToDevice));
// sanity check
/*
checkCudaErrors(
cudaMemcpy( rh, d_rh, sizeof(float) * N_x*N_y, cudaMemcpyDeviceToHost) );
checkCudaErrors(
cudaMemcpy( u, d_u, sizeof(float2) * N_x*N_y, cudaMemcpyDeviceToHost) );
std::cout << " After Memcpy copy : " << std::endl;
std::cout << " In the middle : " << std::endl ;
std::cout << " d_rh : " << std::endl ;
for (int j = N_y/2 - 21 ; j < N_y/2 + 21; j++) {
for (int i = N_x/2 - 21; i < N_x/2 + 21; i++) {
std::cout << rh[ i + N_x * j ] << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.x : " << std::endl ;
for (int j = N_y/2 - 21 ; j < N_y/2 + 21; j++) {
for (int i = N_x/2 - 21; i < N_x/2 + 21; i++) {
std::cout << u[ i + N_x * j ].x << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.y : " << std::endl ;
for (int j = N_y/2 - 21 ; j < N_y/2 + 21; j++) {
for (int i = N_x/2 - 21; i < N_x/2 + 21; i++) {
std::cout << u[ i + N_x * j ].y << " " ;
}
std::cout << std::endl ;
}
// in the corner
std::cout << " \n in the corner : " << std::endl;
std::cout << " d_rh : " << std::endl ;
for (int j = N_y - 20 ; j < N_y; j++) {
for (int i = N_x - 20; i < N_x ; i++) {
std::cout << rh[ i + N_x * j ] << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.x : " << std::endl ;
for (int j = N_y - 20 ; j < N_y ; j++) {
for (int i = N_x - 20; i < N_x ; i++) {
std::cout << u[ i + N_x * j ].x << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.y : " << std::endl ;
for (int j = N_y - 20 ; j < N_y ; j++) {
for (int i = N_x - 20; i < N_x; i++) {
std::cout << u[ i + N_x * j ].y << " " ;
}
std::cout << std::endl ;
}
*/
// END of sanity check
set_e_alpha();
initialize<<<Griddim, Blockdim>>>( d_rh, d_u,
d_f, d_feq, d_f_new,
N_x, N_y, NDIR ) ;
// sanity check
/*
checkCudaErrors(
cudaMemcpy( rh, d_rh, sizeof(float) * N_x*N_y, cudaMemcpyDeviceToHost) );
checkCudaErrors(
cudaMemcpy( u, d_u, sizeof(float2) * N_x*N_y, cudaMemcpyDeviceToHost) );
std::cout << " After initialize copy : " << std::endl;
std::cout << " In the middle : " << std::endl ;
std::cout << " d_rh : " << std::endl ;
for (int j = N_y/2 - 21 ; j < N_y/2 + 21; j++) {
for (int i = N_x/2 - 21; i < N_x/2 + 21; i++) {
std::cout << rh[ i + N_x * j ] << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.x : " << std::endl ;
for (int j = N_y/2 - 21 ; j < N_y/2 + 21; j++) {
for (int i = N_x/2 - 21; i < N_x/2 + 21; i++) {
std::cout << u[ i + N_x * j ].x << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.y : " << std::endl ;
for (int j = N_y/2 - 21 ; j < N_y/2 + 21; j++) {
for (int i = N_x/2 - 21; i < N_x/2 + 21; i++) {
std::cout << u[ i + N_x * j ].y << " " ;
}
std::cout << std::endl ;
}
// in the corner
std::cout << " \n in the corner : " << std::endl;
std::cout << " d_rh : " << std::endl ;
for (int j = N_y - 20 ; j < N_y; j++) {
for (int i = N_x - 20; i < N_x ; i++) {
std::cout << rh[ i + N_x * j ] << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.x : " << std::endl ;
for (int j = N_y - 20 ; j < N_y ; j++) {
for (int i = N_x - 20; i < N_x ; i++) {
std::cout << u[ i + N_x * j ].x << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.y : " << std::endl ;
for (int j = N_y - 20 ; j < N_y ; j++) {
for (int i = N_x - 20; i < N_x; i++) {
std::cout << u[ i + N_x * j ].y << " " ;
}
std::cout << std::endl ;
}
*/
// END of sanity check
// time integration
int start_time = 0 ;
for ( int t = start_time ; t < TIME_STEPS ; ++t) {
timeIntegration<<<Griddim, Blockdim>>>( d_rh, d_u,
d_f, d_feq, d_f_new ,
LID_VELOCITY, REYNOLDS_NUMBER, DENSITY,
N_x, N_y, NDIR ) ;
}
// sanity check
checkCudaErrors(
cudaMemcpy( rh, d_rh, sizeof(float) * N_x*N_y, cudaMemcpyDeviceToHost) );
checkCudaErrors(
cudaMemcpy( u, d_u, sizeof(float2) * N_x*N_y, cudaMemcpyDeviceToHost) );
std::cout << " After initialize and timeIntegration : " << std::endl;
std::cout << " d_rh : " << std::endl ;
for (int j = N_y/2 - 21 ; j < N_y/2 + 21; j++) {
for (int i = N_x/2 - 21; i < N_x/2 + 21; i++) {
std::cout << rh[ i + N_x * j ] << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.x : " << std::endl ;
for (int j = N_y/2 - 21 ; j < N_y/2 + 21; j++) {
for (int i = N_x/2 - 21; i < N_x/2 + 21; i++) {
std::cout << u[ i + N_x * j ].x << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.x : " << std::endl ;
for (int j = N_y/2 - 21 ; j < N_y/2 + 21; j++) {
for (int i = N_x/2 - 21; i < N_x/2 + 21; i++) {
std::cout << u[ i + N_x * j ].y << " " ;
}
std::cout << std::endl ;
}
// in the corner
std::cout << " \n in the corner : " << std::endl;
std::cout << " d_rh : " << std::endl ;
for (int j = N_y - 20 ; j < N_y; j++) {
for (int i = N_x - 20; i < N_x ; i++) {
std::cout << rh[ i + N_x * j ] << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.x : " << std::endl ;
for (int j = N_y - 20 ; j < N_y ; j++) {
for (int i = N_x - 20; i < N_x ; i++) {
std::cout << u[ i + N_x * j ].x << " " ;
}
std::cout << std::endl ;
}
std::cout << " d_u.x : " << std::endl ;
for (int j = N_y - 20 ; j < N_y ; j++) {
for (int i = N_x - 20; i < N_x; i++) {
std::cout << u[ i + N_x * j ].y << " " ;
}
std::cout << std::endl ;
}
// END of sanity check
// free host CPU memory
delete[] rh;
delete[] u;
// free device GPU memory
checkCudaErrors(
cudaFree( d_f ));
checkCudaErrors(
cudaFree( d_feq ));
checkCudaErrors(
cudaFree( d_f_new ));
checkCudaErrors(
cudaFree( d_rh ));
checkCudaErrors(
cudaFree( d_u ));
return 0;
} | the_stack |
#include "cuda_error.h"
#include "cuda_runtime.h"
#include "StreamingKernels.h"
#include "SingleFitStream.h"
#include "JobWrapper.h"
#include "GpuMultiFlowFitControl.h"
#include "SignalProcessingFitterQueue.h"
#include "LayoutTranslator.h"
//#define MIN_MEMORY_FOR_ONE_STREAM (450*1024*1024)
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////////////////////
//// Simple
int SimpleSingleFitStream::_bpb = -1;
int SimpleSingleFitStream::_l1type = -1;
int SimpleSingleFitStream::_fittype = -1; //0 GaussNewton, 1 LevMar, 2 Hybrid, 3 Relaxing Kmult Gauss Newton
int SimpleSingleFitStream::_hybriditer = -1; // LevMar after N iter of Gauss newton
int SimpleSingleFitStream::l1DefaultSetting()
{
// 0: Equal, 1: Shared preferred, 2: L1 preferred
if(_computeVersion == 20 ) return 2;
if(_computeVersion >= 35 ) return 0;
return 0;
}
int SimpleSingleFitStream::BlockSizeDefaultSetting()
{
// With recent rearrangements (10-31-13), the magic number for C2075 seems to be 160...
// And really, I think I've tested it properly...
if(_computeVersion == 20 ) return 160;
if(_computeVersion >= 35 ) return 256;
return 128;
}
/////////////////////////////////////////////////
//FIT STREAM CLASS
SimpleSingleFitStream::SimpleSingleFitStream(streamResources * res, WorkerInfoQueueItem item ) :
cudaSimpleStreamExecutionUnit(res, item),
_myJob( static_cast< BkgModelWorkInfo * >( item.private_data )->flow_key,
static_cast< BkgModelWorkInfo * >( item.private_data )->inception_state->
bkg_control.signal_chunks.flow_block_sequence.BlockAtFlow(
static_cast< BkgModelWorkInfo * >( item.private_data )->flow )->size() )
{
setName("SingleFitStream");
if(_verbose) cout << getLogHeader() << " created " << endl;
_N = 0;
_F = 0;
_padN = 0;
}
SimpleSingleFitStream::~SimpleSingleFitStream()
{
cleanUp();
}
void SimpleSingleFitStream::cleanUp()
{
if(_verbose) cout << getLogHeader() << " clean up" << endl;
}
void SimpleSingleFitStream::resetPointers()
{
_N = _myJob.getNumBeads();
_F = _myJob.getNumFrames();
if(!_resource->checkDeviceMemory(getMaxDeviceMem(_myJob.getFlowKey(),_myJob.getFlowBlockSize(), _F,_N )))
cout << getLogHeader() << " Successfully reallocated device memory to handle Job" << endl;
_padN = _myJob.getPaddedN();
if(_verbose) cout << getLogHeader() << " resetting pointers for job with " << _N << "("<< _padN <<") beads and " << _F << " frames" << endl;
try{
//HOST DEVICE buffer pairs, Input and Output groups
_hdFgBuffer = _resource->GetHostDevPair(_myJob.getFgBufferSizeShort(true));
//fg buffers are copied first to overlap async copy with gathering of other input data
_resource->StartNewSegGroup();
_hdBeadParams = _resource->GetHostDevPair(_myJob.getBeadParamsSize(true));
_hdBeadState = _resource->GetHostDevPair( _myJob.getBeadStateSize(true));
//bead Params and State are our outputs. therefore:
_hdCopyOutGroup = _resource->GetCurrentPairGroup();
//do not start new group since outputs are also parts of input group
_hdDarkMatter = _resource->GetHostDevPair(_myJob.getDarkMatterSize(true)); // NUMNUC*F
_hdShiftedBkg = _resource->GetHostDevPair(_myJob.getShiftedBackgroundSize(true)); // flow_block_size*F
_hdEmphVector = _resource->GetHostDevPair(_myJob.getEmphVecSize(true)); // (MAX_POISSON_TABLE_COL)*F
_hdStdTimeCompEmphVec = _resource->GetHostDevPair(_myJob.GetStdTimeCompEmphasisSize(true));
_hdNucRise = _resource->GetHostDevPair(_myJob.getNucRiseSize(true)); // ISIG_SUB_STEPS_SINGLE_FLOW * F * flow_block_size
_hdStdTimeCompNucRise = _resource->GetHostDevPair(_myJob.GetStdTimeCompNucRiseSize(true)); // ISIG_SUB_STEPS_SINGLE_FLOW * F * flow_block_size
//all inputs are grouped now
_hdCopyInGroup = _resource->GetCurrentPairGroup();
//Device Only Memory Segments
_dFgBufferFloat = _resource->getDevSegment(_myJob.getFgBufferSize(true));
_dWorkBase = _resource->getDevSegment(getScratchSpaceAllocSize(_myJob) );
_dBeadParamTransp = _resource->getDevSegment(_myJob.getBeadParamsSize(true));
//std::cout << "Memory used: " << _resource->getDevMem()->memoryUsed() << std::endl;
//additional host pointers for Constant memory init
_hConstP = _resource->getHostSegment(sizeof(ConstParams));
if(_myJob.performCrossTalkCorrection()){
_hConstXtalkP = _resource->getHostSegment(sizeof(ConstXtalkParams));
_hNeiIdxMap = _resource->getHostSegment(_myJob.getXtalkNeiIdxMapSize(true));
}
//Reuse buffers on the device for other stuff ot create pointers to repacked data
// We'll use this BeadParams as a reference to check against.
// If someone tries to rearrange the data structures in BeadParams, we should complain.
// Someday, we ought to access these chunks of data dynamically, and not be dependent on
// BeadParams internals.
// Checking for positive differences (>0) ensures that the fields are in the right order,
// whatever size they happen to be.
BeadParams dummy;
//dev pointer after transpose (Structure of Arrays)
size_t padNB = _padN*sizeof(float);
_dCopies = _dBeadParamTransp; //N
assert( & dummy.R - & dummy.Copies == 1 );
_dR = _dCopies.splitAt(padNB); // N
assert( & dummy.dmult - & dummy.R == 1 );
_dDmult = _dR.splitAt(padNB); // N
assert( & dummy.gain - & dummy.dmult == 1 );
_dGain = _dDmult.splitAt(padNB); // N
assert( dummy.Ampl - & dummy.gain == 1 );
_dAmpl = _dGain.splitAt(padNB); // N * flow_block_size
assert( dummy.kmult - dummy.Ampl > 0 );
_dKmult = _dAmpl.splitAt(padNB*(dummy.kmult - dummy.Ampl)); // N * flow_block_size
assert( dummy.pca_vals - dummy.kmult > 0 );
_dPCA_Vals = _dKmult.splitAt(padNB*(dummy.pca_vals - dummy.kmult)); // N*NUM_DM_PCA
assert( & dummy.tau_adj - dummy.pca_vals == NUM_DM_PCA );
_dTau_Adj = _dPCA_Vals.splitAt(padNB*NUM_DM_PCA); // N
assert( & dummy.phi - & dummy.tau_adj == 1 );
_dPhi = _dTau_Adj.splitAt(padNB); // N
_dPhi.checkSize(padNB); // N
//device scratch space pointers
_davg_trc = _dWorkBase; // NxF
_derr = _davg_trc.splitAt(padNB*_F); // NxF
_dfval = _derr.splitAt(padNB*_F); // NxF
_dtmp_fval = _dfval.splitAt(padNB*_F); // NxF
_djac = _dtmp_fval.splitAt(padNB*_F); // 3*NxF Can be reduced in Taubadjust kernel
_dMeanErr = _djac.splitAt(3*padNB *_F); // N * flow_block_size
_dMeanErr.checkSize(padNB*_myJob.getFlowBlockSize());
// xtalk scratch space pointers
if(_myJob.performCrossTalkCorrection()){
_dNeiContribution = _dWorkBase;
_dXtalk = _dNeiContribution.splitAt(padNB *_myJob.getNumXtalkNeighbours()*_F);
_dXtalkScratch = _dXtalk.splitAt(padNB*_F);
_dNeiIdxMap = _dXtalkScratch.splitAt(padNB * 3*_F);
_dNeiIdxMap.checkSize(padNB*_myJob.getNumXtalkNeighbours());
}
}
catch(cudaException &e)
{
e.Print();
cout << getLogHeader() << "Encountered Error during Resource Acquisition!" << endl;
throw cudaExecutionException(e.getCudaError(),__FILE__,__LINE__);
}
if(_verbose)cout << getLogHeader() << " " << _resource->Status() << endl;
}
void SimpleSingleFitStream::serializeInputs()
{
if(_verbose) cout << getLogHeader() <<" serialize data for async global mem copy" << endl;
try{
_hdFgBuffer.copyIn(_myJob.getFgBuffer() ,_myJob.getFgBufferSizeShort());
_hdBeadParams.copyIn(_myJob.getBeadParams(),_myJob.getBeadParamsSize());
_hdBeadState.copyIn(_myJob.getBeadState(),_myJob.getBeadStateSize());
_hdDarkMatter.copyIn(_myJob.getDarkMatter(), _myJob.getDarkMatterSize());
_hdShiftedBkg.copyIn(_myJob.getShiftedBackground(), _myJob.getShiftedBackgroundSize());
_hdEmphVector.copyIn(_myJob.getEmphVec(), _myJob.getEmphVecSize());
_hdNucRise.copyIn(_myJob.getCalculateNucRise(), _myJob.getNucRiseSize());
// a little hacky but we want to fill the structure in page locked memory with data
ConstParams* tmpConstP = _hConstP.getPtr();
//init the reg_param part (all we need from the reg params is non-dynamic)
reg_params* tmpConstPCastToReg = (reg_params*)tmpConstP;
*(tmpConstPCastToReg) = *(_myJob.getRegionParams()); // use the
// init the rest of the ConstParam buffers
memcpy( tmpConstP->start, _myJob.getStartNuc(), _myJob.getStartNucSize() );
memcpy( tmpConstP->deltaFrames, _myJob.getDeltaFrames(), _myJob.getDeltaFramesSize() );
memcpy( tmpConstP->frameNumber, _myJob.getFrameNumber(), _myJob.getFrameNumberSize() );
memcpy( tmpConstP->flowIdxMap, _myJob.getFlowIdxMap(), _myJob.getFlowIdxMapSize());
memcpy(tmpConstP->non_zero_emphasis_frames, _myJob.GetNonZeroEmphasisFrames(),
_myJob.GetNonZeroEmphasisFramesVecSize());
tmpConstP->useDarkMatterPCA = _myJob.useDarkMatterPCA();
tmpConstP->useRecompressTailRawTrace = (_myJob.performRecompressionTailRawTrace() &&
_myJob.performExpTailFitting());
size_t rC = _myJob.getRegCol();
size_t rR = _myJob.getRegRow();
/*
ImgRegParams irP;
irP.init(_myJob.getImgWidth(),_myJob.getImgHeight(), 216,224,_myJob.getNumFrames());
size_t regId = irP.getRegId(rC,rR);
if(irP.isInRegion(regId, 321928)) tmpConstP->dumpRegion = true;
else tmpConstP->dumRegion = false;
*/
memcpy(tmpConstP->std_frames_per_point, _myJob.GetETFFramesPerPoint(), _myJob.GetETFFramesPerPointSize());
// for recompressing traces
if (_myJob.performExpTailFitting() && _myJob.performRecompressionTailRawTrace()) {
_hdStdTimeCompEmphVec.copyIn(_myJob.GetStdTimeCompEmphasis(), _myJob.GetStdTimeCompEmphasisSize());
_hdStdTimeCompNucRise.copyIn(_myJob.GetStdTimeCompNucRise(), _myJob.GetStdTimeCompNucRiseSize());
memcpy(tmpConstP->std_frames_per_point, _myJob.GetStdFramesPerPoint(), _myJob.GetStdFramesPerPointSize());
memcpy(tmpConstP->etf_interpolate_frame, _myJob.GetETFInterpolationFrames(),
_myJob.GetETFInterpolationFrameSize());
memcpy(tmpConstP->etf_interpolateMul, _myJob.GetETFInterpolationMul(),
_myJob.GetETFInterpolationMulSize());
memcpy(tmpConstP->deltaFrames_std, _myJob.GetStdTimeCompDeltaFrame(),
_myJob.GetStdTimeCompDeltaFrameSize());
memcpy(tmpConstP->std_non_zero_emphasis_frames,
_myJob.GetNonZeroEmphasisFramesForStdCompression(),
_myJob.GetNonZeroEmphasisFramesVecSize());
}
if(_myJob.performCrossTalkCorrection()) {
// copy neighbor map for xtalk
ConstXtalkParams *tmpConstXtalkP = _hConstXtalkP.getPtr();
tmpConstXtalkP->neis = _myJob.getNumXtalkNeighbours();
memcpy( tmpConstXtalkP->multiplier, _myJob.getXtalkNeiMultiplier(),sizeof(float)*_myJob.getNumXtalkNeighbours());
memcpy( tmpConstXtalkP->tau_top, _myJob.getXtalkNeiTauTop(),sizeof(float)*_myJob.getNumXtalkNeighbours());
memcpy( tmpConstXtalkP->tau_fluid, _myJob.getXtalkNeiTauFluid(),sizeof(float)*_myJob.getNumXtalkNeighbours());
_hNeiIdxMap.copyIn(const_cast<int*>(_myJob.getNeiIdxMapForXtalk()),
sizeof(int)*_myJob.getNumBeads()*_myJob.getNumXtalkNeighbours());
}
/*
if( _myJob.getAbsoluteFlowNum() == 20){
cout << "GPUTracesAndParameters NewLayout("<< _myJob.getImgWidth()<<"," << _myJob.getImgHeight()<<",216,224,"<< _myJob.getMaxFrames() << ");" << endl;
//static GPUTracesAndParameters NewLayout(216,224,216,224,_myJob.getNumFrames());
static GPUTracesAndParameters NewLayout(_myJob.getImgWidth(),_myJob.getImgHeight(),216,224,_myJob.getMaxFrames());
cout << "NewLayout.InitRegion( " << rC << "," << rR << "," << _myJob.getNumBeads() << "," <<
_myJob.getNumFrames() << "," <<
_myJob.getFgBuffer()<< "," <<
_myJob.getBeadParams() << "," <<
"0, 20); " << endl;
NewLayout.InitRegion( rC, rR,
_myJob.getNumBeads(),
_myJob.getNumFrames(),
_myJob.getFgBuffer(),
_myJob.getBeadParams(),
0,
20 //_myJob.GetFlowBlockSize()
);
ImgRegParams irP;
irP.init(_myJob.getImgWidth(),_myJob.getImgHeight(), 216,224,_myJob.getNumFrames());
cout << "region New Id: " << irP.getRegId(rC,rR) << " has " << _myJob.getNumBeads() << " live beads" << endl;
//time_c.time_start, GetTypicalMidNucTime (& (my_regions.rp.nuc_shape)), my_regions.rp.nuc_shape.sigma, MAGIC_OFFSET_FOR_EMPTY_TRACE,
NewLayout.InitRezeroParams(rC, rR, _myJob.getCTimeStart(), _myJob.getTMidNuc(), _myJob.getSigma(), MAGIC_OFFSET_FOR_EMPTY_TRACE, _myJob.getTShift());
NewLayout.InitParamLimits( _myJob.getAmpLowLimit(), _myJob.getkmultLowLimit(), _myJob.getkmultHighLimit(), _myJob.getkmultAdj());
NewLayout.InitConfigParams(_myJob.useDynamicEmphasis(),_myJob.fitkmultAlways());
NewLayout.InitRegionParams(rC, rR,*tmpConstP, _myJob.getNumFrames());
NewLayout.InitRegionBkgTraces(rC, rR,_myJob.getShiftedBackground(), _myJob.getNumFrames());
NewLayout.InitRegionDarkMatter(rC, rR,_myJob.getDarkMatter(), _myJob.getNumFrames());
NewLayout.InitRegionEmphasis(rC, rR,_myJob.getEmphVec(),_myJob.getNumFrames());
NewLayout.InitRegionNucRise(rC, rR,_myJob.getCalculateNucRise(),_myJob.getNumFrames());
NewLayout.initFlowParams( 0 , _myJob.getAbsoluteFlowNum() ,(_myJob.getFlowIdxMap())[0]);
NewLayout.writeToFile("bkgDumpFlow20.dat");
// just dump region that contains out crazy bead with idx = 321928
}
*/
if( (_myJob.getAbsoluteFlowNum()%_myJob.getFlowBlockSize()) == 0 ){
if(_myJob.getAbsoluteFlowNum() >= 20){
//static RegParamDumper regDump(_myJob.getImgWidth(),_myJob.getImgHeight(),_myJob.getRegionWidth(), _myJob.getRegionHeight());
//regDump.DumpAtFlow(rC, rR,*tmpConstP,_myJob.getAbsoluteFlowNum());
ImgRegParams irP(_myJob.getImgWidth(),_myJob.getImgHeight(), _myJob.getMaxRegionWidth(),_myJob.getMaxRegionHeight());
static CubePerFlowDump<ConstParams> RegionDump( irP.getGridDimX(), irP.getGridDimY(),1,1,1,1);
RegionDump.setFilePathPrefix("RegionParams");
RegionDump.DumpFlowBlockRegion(irP.getRegId(rC,rR),tmpConstP,_myJob.getAbsoluteFlowNum(),1);
//creates collection of planes per flow with a buffer of uncompressed frames length per region.
//_myJob.setMaxFrames(42);
static CubePerFlowDump<float> planeDump( irP.getGridDimX()*_myJob.getUncompressedFrames(), irP.getGridDimY(),
_myJob.getUncompressedFrames(), 1,1, _myJob.getFlowBlockSize());
//empty trace average;
planeDump.setFilePathPrefix("EmptyTraces");
planeDump.DumpFlowBlockRegion(irP.getRegId(rC,rR),_myJob.getShiftedBackground(),_myJob.getAbsoluteFlowNum(),_myJob.getNumFrames());
/*
static CubePerFlowDump<float> emphasisDump( irP.getGridDimX()*_myJob.getMaxFrames()*MAX_POISSON_TABLE_COL, irP.getGridDimY(),
_myJob.getMaxFrames()*MAX_POISSON_TABLE_COL, 1,1,1);
emphasisDump.setFilePathPrefix("EmphasisDump");
emphasisDump.DumpFlowBlockRegion(irP.getRegId(rC,rR),_myJob.getEmphVec(), _myJob.getAbsoluteFlowNum(), _myJob.getEmphVecSize()/sizeof(float));
*/
#if FGBUFFER_DUMP
static CubePerFlowDump<short> FGDump(_myJob.getImgWidth(),_myJob.getImgHeight(), _myJob.getRegionWidth(),_myJob.getRegionHeight(),_myJob.getImageFrames(), _myJob.getFlowBlockSize());
FGDump.setFilePathPrefix("FgBufferDump");
size_t regId = irP.getRegId(rC,rR);
LayoutCubeWithRegions<short> fgBufferCube(irP.getRegW(regId),irP.getRegH(regId),irP.getRegW(regId),irP.getRegH(regId),_myJob.getNumFrames());
for(int f=0; f<_myJob.getFlowBlockSize(); f++){
TranslateFgBuffer_RegionToCube(fgBufferCube, _myJob.getNumBeads() , _myJob.getNumFrames(), _myJob.getFlowBlockSize(),_myJob.getFgBuffer()+_myJob.getNumFrames()*f,_myJob.getBeadParams());
FGDump.DumpOneFlowRegion(regId,fgBufferCube,0,_myJob.getAbsoluteFlowNum(),f,0,_myJob.getNumFrames());
}
#endif
/*
ImgRegParams iP =regDump.getIP();
for(int flow = 0; flow < 1; flow ++){
cout <<"reg: " << iP.getRegId(rC,rR) <<", " <<flow << ", ";
for(int f = 0; f < _myJob.getNumFrames(); f++)
cout << (_myJob.getShiftedBackground())[flow*_myJob.getNumFrames()+f] << ", ";
cout << endl;
}
*/
}
}
}
catch(cudaException &e)
{
cout << getLogHeader() << "Encountered Error during Input Serialization!" << endl;
throw cudaExecutionException(e.getCudaError(),__FILE__,__LINE__);
}
}
//////////////////////////
// IMPLEMENTATION OF THE VIRTUAL INTERFACE
// ASYNC CUDA FUNCTIONS, KERNEL EXECUTION AND DATA HANDLING
bool SimpleSingleFitStream::InitJob() {
_myJob.setData(static_cast<BkgModelWorkInfo *>(getJobData()));
return _myJob.ValidJob();
}
void SimpleSingleFitStream::ExecuteJob()
{
prepareInputs();
copyToDevice();
executeKernel();
copyToHost();
}
int SimpleSingleFitStream::handleResults()
{
if(_verbose) cout << getLogHeader() << " Handling Results" <<endl;
if(_myJob.isSet()){
// for actual pipeline we have to copy the results back into original buffer
try{
_hdBeadParams.copyOut(_myJob.getBeadParams(), _myJob.getBeadParamsSize());
_hdBeadState.copyOut(_myJob.getBeadState(),_myJob.getBeadStateSize());
if( _myJob.getAbsoluteFlowNum() >= 20){
ImgRegParams irP(_myJob.getImgWidth(),_myJob.getImgHeight(), _myJob.getMaxRegionWidth(),_myJob.getMaxRegionHeight());
size_t rC = _myJob.getRegCol();
size_t rR = _myJob.getRegRow();
size_t regId = irP.getRegId(rC,rR);
static CubePerFlowDump<float> ResultDump(_myJob.getImgWidth(),_myJob.getImgHeight(), _myJob.getMaxRegionWidth(),_myJob.getMaxRegionHeight(), Result_NUM_PARAMS, _myJob.getFlowBlockSize());
ResultDump.setFilePathPrefix("ResultDump");
LayoutCubeWithRegions<float> ResultCube(irP.getRegW(regId),irP.getRegH(regId),irP.getRegW(regId),irP.getRegH(regId),Result_NUM_PARAMS);
for(int f=0; f<_myJob.getFlowBlockSize(); f++){
TranslateResults_RegionToCube(ResultCube, _myJob.getNumBeads() , f, _myJob.getBeadParams());
ResultDump.DumpOneFlowRegion(regId,ResultCube,0,_myJob.getAbsoluteFlowNum(),f);
}
/*
//static GPUTracesAndParameters NewLayout(216,224,216,224,_myJob.getNumFrames());
//static GPUTracesAndParameters NewLayoutResults(_myJob.getImgWidth(),_myJob.getImgHeight(),216,224,_myJob.getMaxFrames());
//Todo: need one Cube per flow
static vector<BeadParamOneFlow> ResultDump(20);
static size_t currBlock = 0;
if(_myJob.getAbsoluteFlowNum() == 40 && currBlock != 40)
for(size_t f =0; f < 20 ; f++)
ResultDump[f].dumpResultsFile(true); //force dump and set dumpAtcounter
currBlock = _myJob.getAbsoluteFlowNum();
for(size_t f =0; f < 20 ; f++){
if(currBlock+f != ResultDump[f].getRealFlow())
ResultDump[f].Init(_myJob.getImgWidth(),_myJob.getImgHeight(),_myJob.getRegionWidth(),_myJob.getRegionHeight());
ResultDump[f].TranslateParamToCube( rC, rR,
_myJob.getNumBeads(),
_myJob.getAbsoluteFlowNum()+f,
_myJob.getBeadParams()
);
ResultDump[f].dumpResultsFile();
}
*/
}
_myJob.setJobToPostFitStep();
_myJob.putJobToCPU(_item);
}
catch(cudaException &e)
{
cout << getLogHeader() << "Encountered Error during Result Handling!" << endl;
throw cudaExecutionException(e.getCudaError(),__FILE__,__LINE__);
}
}
return 0;
}
void SimpleSingleFitStream::printStatus()
{
cout << getLogHeader() << " status: " << endl
<< " +------------------------------" << endl
<< " | block size: " << getBeadsPerBlock() << endl
<< " | l1 setting: " << getL1Setting() << endl
<< " | state: " << _state << endl;
if(_resource->isSet())
cout << " | streamResource acquired successfully"<< endl;
else
cout << " | streamResource not acquired"<< endl;
_myJob.printJobSummary();
cout << " +------------------------------" << endl;
}
///////////////////////////////////////////////////////////////
void SimpleSingleFitStream::prepareInputs()
{
//prepare environment for new job
preProcessCpuSteps();
resetPointers();
serializeInputs();
}
void SimpleSingleFitStream::copyToDevice()
{
// move data to device
if(_verbose) cout << getLogHeader() << " Async Copy To Device" << endl;
try{
StreamingKernels::copyFittingConstParamAsync(_hConstP.getPtr(), getStreamId() ,_stream);CUDA_ERROR_CHECK();
_hdFgBuffer.copyToDeviceAsync(_stream, _myJob.getFgBufferSizeShort());
_hdCopyInGroup.copyToDeviceAsync(_stream);
// copy xtalk neighbor map
if(_myJob.performCrossTalkCorrection()) {
StreamingKernels::copyXtalkConstParamAsync(_hConstXtalkP.getPtr(), getStreamId() ,_stream);CUDA_ERROR_CHECK();
_dNeiIdxMap.copyAsync(_hNeiIdxMap, _stream, sizeof(int)*_myJob.getNumBeads()*_myJob.getNumXtalkNeighbours());
}
}
catch(cudaException &e)
{
cout << getLogHeader() << "Encountered Error during Copy to device!" << endl;
throw cudaExecutionException(e.getCudaError(),__FILE__,__LINE__);
}
}
void SimpleSingleFitStream::executeKernel()
{
if(_verbose) cout << getLogHeader() << " Async Kernel Exec" << endl;
dim3 block(32,32);
dim3 grid( (_F*_myJob.getFlowBlockSize()+ block.x-1)/block.x , (_padN+block.y-1)/block.y);
StreamingKernels::transposeDataToFloat (grid, block, 0 ,_stream,_dFgBufferFloat.getPtr(), _hdFgBuffer.getPtr(), _F*_myJob.getFlowBlockSize(), _padN);
int StructLength = (sizeof(BeadParams)/sizeof(float));
if((sizeof(BeadParams)%sizeof(float)) != 0 )
{
cerr << "Structure not a multiple of sizeof(float), transpose not possible" << endl;
exit(-1);
}
grid.x = (StructLength + block.x-1)/block.x ;
grid.y = (_padN+block.y-1)/block.y;
StreamingKernels::transposeData(
grid,
block,
0,
_stream,
(float*)_dBeadParamTransp.getPtr(),
(float*)_hdBeadParams.getPtr(),
StructLength,
_padN);
block.x = getBeadsPerBlock();
block.y = 1;
grid.y = 1;
grid.x = (_N+block.x-1)/block.x;
// cross talk correction is performed for 3-series chips only
if (_myJob.performCrossTalkCorrection()) {
for (int fnum=0; fnum<_myJob.getFlowBlockSize(); ++fnum) {
StreamingKernels::NeighbourContributionToXtalk(
grid,
block,
0,
_stream,
_dR.getPtr(), // N
_dCopies.getPtr(), // N
_dPhi.getPtr(), // N
(float*)_hdShiftedBkg.getPtr() + fnum*_F, // FLxF
(float*)_dFgBufferFloat.getPtr() + fnum*_padN*_F, // FLxFxN
_myJob.getAbsoluteFlowNum(), // starting flow number to calculate absolute flow num
fnum,
_N, // 4
_F, // 4
//xtalk arguments
_dXtalkScratch.getPtr(),
_dNeiContribution.getPtr(),
getStreamId());
StreamingKernels::XtalkAccumulationAndSignalCorrection(
grid,
block,
0,
_stream,
fnum,
(float*)_dFgBufferFloat.getPtr() + fnum*_padN*_F, // FLxFxN
_N, // 4
_F, // 4
_dNeiIdxMap.getPtr(),
_dNeiContribution.getPtr(),
_dXtalk.getPtr(),
_dCopies.getPtr(), // N
_dR.getPtr(), // N
_dPhi.getPtr(), // N
_dGain.getPtr(), // N
(float*)_hdShiftedBkg.getPtr() + fnum*_F,
_hdDarkMatter.getPtr(), // FLxF
_dPCA_Vals.getPtr(),
_myJob.getAbsoluteFlowNum(), // starting flow number to calculate absolute flow num
getStreamId());
}
}
else {
StreamingKernels::PreSingleFitProcessing( grid, block, 0 , _stream,
// Here FL stands for flows
// inputs from data reorganization
_dCopies.getPtr(), // N
_dR.getPtr(), // N
_dPhi.getPtr(), // N
_dGain.getPtr(), // N
_dAmpl.getPtr(), // FLxN
_hdShiftedBkg.getPtr(), // FLxF
_hdDarkMatter.getPtr(), // FLxF
_dPCA_Vals.getPtr(),
_dFgBufferFloat.getPtr(), // FLxFxN
// other inputs
_myJob.getAbsoluteFlowNum(), // starting flow number to calculate absolute flow num
_N, // 4
_F, // 4
//_myJob.performAlternatingFit(),
false,
getStreamId(),
_myJob.getFlowBlockSize());
}
//fg same!
// perform projection search for amplitude estimation
if ((_myJob.getAbsoluteFlowNum() > 19) && _myJob.InitializeAmplitude()) {
StreamingKernels::ProjectionSearch(
grid,
block,
0,
_stream,
_dFgBufferFloat.getPtr(),
_hdEmphVector.getPtr(),
_hdNucRise.getPtr(),
_dCopies.getPtr(),
_dfval.getPtr(),
_myJob.getAbsoluteFlowNum(),
_N,
_F,
getStreamId(),
_myJob.getFlowBlockSize());
}
//ampl update
// perform exponential tail fitting
if (_myJob.performExpTailFitting()) {
// only done in first 20 flows
if (_myJob.getAbsoluteFlowNum() == 0) {
StreamingKernels::TaubAdjustForExponentialTailFitting(
grid,
block,
0,
_stream,
_dFgBufferFloat.getPtr(), // FLxFxN,
_dAmpl.getPtr(), // FLxN
_dR.getPtr(), // N
_dCopies.getPtr(), // N
_dPhi.getPtr(), // N
_davg_trc.getPtr(),
_dfval.getPtr(),
_dtmp_fval.getPtr(),
_derr.getPtr(),
_djac.getPtr(),
_N,
_F,
_dTau_Adj.getPtr(), // output it is a per bead parameter
getStreamId(),
_myJob.getFlowBlockSize());
}
StreamingKernels::ExponentialTailFitting(
grid,
block,
0,
_stream,
_dTau_Adj.getPtr(),
_dAmpl.getPtr(),
_dR.getPtr(),
_dCopies.getPtr(),
_dPhi.getPtr(), // N
_dFgBufferFloat.getPtr(),
_hdShiftedBkg.getPtr(),
_dtmp_fval.getPtr(),
_N,
_F,
_myJob.getAbsoluteFlowNum(),
getStreamId(),
_myJob.getFlowBlockSize());
if (_myJob.performRecompressionTailRawTrace())
StreamingKernels::RecompressRawTracesForSingleFlowFit(
grid,
block,
0,
_stream,
_dFgBufferFloat.getPtr(),
_dtmp_fval.getPtr(),
_myJob.GetETFStartFrame(),
_F, // exponential tail fit compressed frames
_myJob.GetNumStdCompressedFrames(),
_myJob.getFlowBlockSize(),
_N,
getStreamId());
}
// decide some data buffers based on whether tail need to be recompressed
// or not. Need to refactor
int sharedMem = _myJob.getEmphVecSize();
float* dEmpVec = _hdEmphVector.getPtr();
float* dNucRise = _hdNucRise.getPtr();
int numFrames = _F;
if (_myJob.performExpTailFitting() && _myJob.performRecompressionTailRawTrace()) {
sharedMem = _myJob.GetStdTimeCompEmphasisSize();
dEmpVec = _hdStdTimeCompEmphVec.getPtr();
dNucRise = _hdStdTimeCompNucRise.getPtr();
numFrames = _myJob.GetNumStdCompressedFrames();
}
//std::cout << "====================> Numframes: " << numFrames << std::endl;
// perform single flow fitting
switch(_fittype){
case 1:
StreamingKernels::PerFlowLevMarFit(getL1Setting(), grid, block, sharedMem, _stream,
// inputs
_dFgBufferFloat.getPtr(),
dEmpVec,
dNucRise,
// bead params
_dCopies.getPtr(),
_hdBeadState.getPtr(),
// scratch space in global memory
_derr.getPtr(), //
_dfval.getPtr(), // NxF
_dtmp_fval.getPtr(), // NxF
_dMeanErr.getPtr(),
// other inputs
_myJob.getAmpLowLimit(),
_myJob.getkmultHighLimit(),
_myJob.getkmultLowLimit(),
_myJob.getkmultAdj(),
_myJob.fitkmultAlways(),
_myJob.getAbsoluteFlowNum() , // real flow number
_myJob.getNumBeads(), // 4
numFrames,
_myJob.useDynamicEmphasis(),
getStreamId(), // stream id for offset in const memory
_myJob.getFlowBlockSize()
);
break;
case 2:
StreamingKernels::PerFlowHybridFit(getL1Setting(), grid, block, sharedMem, _stream,
// inputs
_dFgBufferFloat.getPtr(),
dEmpVec,
dNucRise,
// bead params
_dCopies.getPtr(),
_hdBeadState.getPtr(),
// scratch space in global memory
_derr.getPtr(), //
_dfval.getPtr(), // NxF
_dtmp_fval.getPtr(), // NxF
_dMeanErr.getPtr(),
// other inputs
_myJob.getAmpLowLimit(),
_myJob.getkmultHighLimit(),
_myJob.getkmultLowLimit(),
_myJob.getkmultAdj(),
_myJob.fitkmultAlways(),
_myJob.getAbsoluteFlowNum() , // real flow number
_myJob.getNumBeads(), // 4
numFrames,
_myJob.useDynamicEmphasis(),
getStreamId(), // stream id for offset in const memory
3, // switchToLevMar ???
_myJob.getFlowBlockSize()
);
break;
case 3:
StreamingKernels::PerFlowRelaxKmultGaussNewtonFit(getL1Setting(), grid, block, sharedMem, _stream,
// inputs
_dFgBufferFloat.getPtr(),
dEmpVec,
dNucRise,
// bead params
_dCopies.getPtr(),
_hdBeadState.getPtr(),
// scratch space in global memory
_derr.getPtr(), //
_dfval.getPtr(), // NxF
_dtmp_fval.getPtr(), // NxF
_djac.getPtr(), //NxF
_dMeanErr.getPtr(),
// other inputs
_myJob.getAmpLowLimit(),
_myJob.getkmultHighLimit(),
_myJob.getkmultLowLimit(),
_myJob.getkmultAdj(),
_myJob.fitkmultAlways(),
_myJob.getAbsoluteFlowNum() , // real flow number
_myJob.getNumBeads(), // 4
numFrames,
_myJob.useDynamicEmphasis(),
getStreamId(), // stream id for offset in const memory
_myJob.getFlowBlockSize()
);
break;
case 0:
default:
StreamingKernels::PerFlowGaussNewtonFit(getL1Setting(), grid, block, sharedMem, _stream,
// inputs
_dFgBufferFloat.getPtr(),
dEmpVec,
dNucRise,
// bead params
_dCopies.getPtr(),
_hdBeadState.getPtr(),
// scratch space in global memory
_derr.getPtr(), //
_dfval.getPtr(), // NxF
_dtmp_fval.getPtr(), // NxF
_dMeanErr.getPtr(),
// other inputs
_myJob.getAmpLowLimit(),
_myJob.getkmultHighLimit(),
_myJob.getkmultLowLimit(),
_myJob.getkmultAdj(),
_myJob.fitkmultAlways(),
_myJob.getAbsoluteFlowNum() , // real flow number
_myJob.getNumBeads(), // 4
numFrames,
_myJob.useDynamicEmphasis(),
getStreamId(), // stream id for offset in const memory
_myJob.getFlowBlockSize()
);
}
block.x = 32;
block.y = 32;
grid.x = (_padN+block.y-1)/block.y;
grid.y = (StructLength + block.x-1)/block.x;
StreamingKernels::transposeData(
grid,
block,
0,
_stream,
(float*)_hdBeadParams.getPtr(),
(float*)_dBeadParamTransp.getPtr(),
_padN,
StructLength);
}
void SimpleSingleFitStream::copyToHost()
{
//cout << getId() << " Async copy back" <<endl;
//cudaMemcpyAsync( _h_pBeadParams, _d_pBeadParams, _copyOutSize , cudaMemcpyDeviceToHost, _stream); CUDA_ERROR_CHECK();
_hdCopyOutGroup.copyToHostAsync(_stream);
#if 0
// To use this, you'll need to tweak JobWrapper.h to make BkgModelWorkInfo * _info public.
cudaMemcpy( _h_pBeadParams, _d_pBeadParams, _copyOutSize , cudaMemcpyDeviceToHost); CUDA_ERROR_CHECK();
ostringstream name;
name << "dumpFile_" << getpid() << "_" << _myJob._info->bkgObj->region_data->region->index;
ofstream out( name.str().c_str() );
out << "N " << _N << "\n";
out << "F " << _F << "\n";
out << "padN " << _padN << "\n";
out << "copyInSize " << _copyInSize << "\n";
out << "copyOutSize " << _copyOutSize << "\n";
out << "host state array: " << _h_pBeadState << "\n";
out << "device state array: " << _d_pBeadState << "\n";
// We've got N BeadParams...
for( size_t i = 0 ; i < _N ; ++i ) {
BeadParams &bp = _h_pBeadParams[i];
out << i << ":\n";
out << " Copies " << bp.Copies << "\n";
out << " R " << bp.R << "\n";
out << " dmult " << bp.dmult << "\n";
out << " gain " << bp.gain << "\n";
out << " Ampl, kmult " << "\n";
for( size_t j = 0 ; j < _myJob.getFlowBlockSize() ; ++j )
{
out << " " << j << ": " << bp.Ampl[j] << " " << bp.kmult[j] << "\n";
}
out << " pca_vals " << "\n";
for( size_t j = 0 ; j < NUM_DM_PCA ; ++j )
{
out << " " << j << ": " << bp.pca_vals[j] << "\n";
}
out << " tau_adj " << bp.tau_adj << "\n";
//out << " my_state (ptr) " << bp.my_state << "\n";
out << " trace_ndx " << bp.trace_ndx << "\n";
out << " x " << bp.x << "\n";
out << " y " << bp.y << "\n";
}
for( size_t i = 0 ; i < _N ; ++i ) {
bead_state & bs = _h_pBeadState[i];
out << "state " << i << ": " << "\n";
out << " bad_read " << bs.bad_read << "\n";
out << " clonal_read " << bs.clonal_read << "\n";
out << " corrupt " << bs.corrupt << "\n";
out << " pinned " << bs.pinned << "\n";
out << " random_samp " << bs.random_samp << "\n";
out << " key_norm " << bs.key_norm << "\n";
out << " ppf " << bs.ppf << "\n";
out << " ssq " << bs.ssq << "\n";
out << " avg_err " << bs.avg_err << "\n";
}
#endif
}
void SimpleSingleFitStream::preProcessCpuSteps() {
_myJob.setUpFineEmphasisVectors();
if (_myJob.performExpTailFitting() && _myJob.performRecompressionTailRawTrace())
_myJob.setUpFineEmphasisVectorsForStdCompression();
}
int SimpleSingleFitStream::getBeadsPerBlock()
{
if(_bpb < 0){
return BlockSizeDefaultSetting();
}
return _bpb;
}
int SimpleSingleFitStream::getL1Setting()
{
if(_l1type < 0 || _l1type > 2){
return l1DefaultSetting();
}
return _l1type;
}
/////////////////////////////////////////////////////////////////////////
//static Function
void SimpleSingleFitStream::requestResources( int flow_key, int flow_block_size, float deviceFraction)
{
size_t devAlloc = static_cast<size_t>( deviceFraction *
max( getMaxDeviceMem( flow_key, flow_block_size, 0, 0 ),
getMaxDeviceMem( 0, flow_block_size, 0, 0 ) ) );
size_t hostAlloc = max( getMaxHostMem(flow_key, flow_block_size),
getMaxHostMem(0, flow_block_size) );
cout << "CUDA SingleFitStream active and resources requested dev = "<< devAlloc/(1024.0*1024) << "MB ("<< (int)(deviceFraction*100)<<"%) host = " << hostAlloc/(1024.0*1024) << "MB" <<endl;
cudaResourcePool::requestDeviceMemory(devAlloc);
cudaResourcePool::requestHostMemory(hostAlloc);
}
size_t SimpleSingleFitStream::getMaxHostMem(int flow_key, int flow_block_size)
{
WorkSet Job( flow_key, flow_block_size );
size_t ret = 0;
if(GpuMultiFlowFitControl::doGPUTraceLevelXtalk()){
ret += Job.padTo128Bytes(sizeof(ConstXtalkParams));
ret += Job.getXtalkNeiIdxMapSize(true);
}
ret += Job.padTo128Bytes(sizeof(ConstParams));
ret += Job.getFgBufferSizeShort(true);
ret += Job.getBeadParamsSize(true);
ret += Job.getBeadStateSize(true);
ret += Job.getDarkMatterSize(true);
ret += Job.getShiftedBackgroundSize(true);
ret += Job.getEmphVecSize(true);
ret += Job.getNucRiseSize(true);
ret += Job.GetStdTimeCompEmphasisSize(true);
ret += Job.GetStdTimeCompNucRiseSize(true);
return ret;
}
size_t SimpleSingleFitStream::getScratchSpaceAllocSize(const WorkSet & Job)
{
size_t ScratchSize = 0;
ScratchSize += 7 * Job.getPaddedN() * Job.getNumFrames();
ScratchSize += 1* Job.getPaddedN() * Job.getFlowBlockSize();
if(GpuMultiFlowFitControl::doGPUTraceLevelXtalk()){
ScratchSize += MAX_XTALK_NEIGHBOURS* Job.getPaddedN() * Job.getNumFrames();
}
ScratchSize *= sizeof(float);
return ScratchSize;
}
size_t SimpleSingleFitStream::getMaxDeviceMem( int flow_key, int flow_block_size, int numFrames, int numBeads)
{
WorkSet Job( flow_key, flow_block_size );
// if numFrames/numBeads are passed overwrite the predefined maxFrames/maxBeads
// for the size calculation
if(numFrames >0) Job.setMaxFrames(numFrames);
if(numBeads> 0) Job.setMaxBeads(numBeads);
size_t ret = 0;
ret = getScratchSpaceAllocSize(Job);
ret += Job.getFgBufferSizeShort(true);
ret += Job.getBeadParamsSize(true);
ret += Job.getBeadStateSize(true);
ret += Job.getDarkMatterSize(true);
ret += Job.getShiftedBackgroundSize(true);
ret += Job.getEmphVecSize(true);
ret += Job.getNucRiseSize(true);
ret += Job.GetStdTimeCompEmphasisSize(true);
ret += Job.GetStdTimeCompNucRiseSize(true);
ret += Job.getFgBufferSizeShort(true);
ret += Job.getFgBufferSize(true);
ret += Job.getBeadParamsSize(true);
//cout << "getMAxDevice SingleFit N: " << N << "("<< Job.getPaddedN() <<") F: " << F << " ret: " << ret/(1024.0*1024) << endl;
//std::cout << "====> mem for single fit: " << ret << " bytes" << std::endl;
return ret;
}
void SimpleSingleFitStream::setBeadsPerBlock(int bpb)
{
_bpb = bpb;
}
void SimpleSingleFitStream::setL1Setting(int type) // 0:sm=l1, 1:sm>l1, 2:sm<l1
{
if( 0 <= type && type <= 2 ) _l1type = type;
}
void SimpleSingleFitStream::setHybridIter(int hybridIter)
{
_hybriditer = hybridIter;
}
void SimpleSingleFitStream::printSettings()
{
cout << "CUDA SingleFitStream SETTINGS: blocksize = " << _bpb << " l1setting = " ;
switch(_l1type){
case 0:
cout << "cudaFuncCachePreferEqual" << endl;;
break;
case 1:
cout << "cudaFuncCachePreferShared" <<endl;
break;
case 2:
cout << "cudaFuncCachePreferL1" << endl;
break;
default:
cout << "GPU specific default" << endl;
}
}
void SimpleSingleFitStream::setFitType(int type) // 0:gauss newton, 1:lev mar
{
_fittype = type;
} | the_stack |
/////////////////////////////////////////////////////////////////////////////////////////////////
static int test_malloc(bool verbose) {
int errors = 0;
const int width = 256;
const int height = 128;
float *block = 0;
printf("test_malloc(%d, %d)\n", width, height);
size_t bytes = width * height * sizeof(float);
float *host = (float *)malloc(bytes);
if (cudaMalloc((void **)&block, bytes) != cudaSuccess) {
printf("cudaMalloc() 0 - failed to allocate %d bytes\n", (int)bytes);
return ++errors;
}
for (int j = 0; j < height; j++) {
float *ptr = &host[j * width];
for (int i = 0; i < width; i++) {
ptr[i] = (float)( (i + j * width) % 128 ) / 128.0f;
}
}
if (cudaMemcpy(block, host, bytes, cudaMemcpyHostToDevice) != cudaSuccess) {
printf("cudaMemcpy() - failed to copy %d bytes to block\n", (int)bytes);
cudaFree(block);
free(host);
return ++errors;
}
for (int i = 0; i < width * height; i++) {
host[i] = -1;
}
if (cudaMemcpy(host, block, bytes, cudaMemcpyDeviceToHost) != cudaSuccess) {
printf("cudaMemcpy() - failed to copy %d bytes from block\n", (int)bytes);
cudaFree(block);
free(host);
return ++errors;
}
for (int j = 0; j < height && errors < 10; j++) {
float *ptr = &host[j * width];
for (int i = 0; i < width && errors < 10; i++) {
float expected =(float)( (i + j * width) % 128 ) / 128.0f;
float got = ptr[i];
if (fabs(expected - got) > 0.001f) {
printf("ERROR 1: (%d, %d) - expected: %f, got: %f\n",
i, j, expected, got);
++errors;
}
}
}
if (errors) {
cudaFree(block);
free(host);
return errors;
}
// now use cudaMemcpy() to select individual elements by offsets and reverify
for (int j = 0; j < height && errors < 5; j++) {
for (int i = 0; i < width && errors < 5; i++) {
float x = -1;
float expected = (float)( (i + j * width) % 128 ) / 128.0f;
if (cudaMemcpy(&x, block + i + j * width, sizeof(float),
cudaMemcpyDeviceToHost) != cudaSuccess) {
printf("FAILED to cudaMemcpy() on element (%d, %d)\n", i, j);
cudaFree(block);
free(host);
printf("FAILED\n");
return errors;
}
if (fabs(x - expected) > 0.0001f) {
++errors;
printf("ERROR 2: (%d, %d) - expected: %f, got: %f\n", i, j, expected, x);
if (errors > 10) {
cudaFree(block);
free(host);
printf("FAILED\n");
return errors;
}
}
}
}
if (verbose) {
printf("%s\n", (errors ? "FAILED" : "PASSED"));
fflush(stdout);
}
cudaFree(block);
free(host);
return errors;
}
static int test_mallocArray(bool verbose) {
int errors = 0;
struct cudaArray *array = 0;
struct cudaArray *array2 = 0;
const int width = 256;
const int height = 128;
size_t bytes = width * height * sizeof(float);
printf("test_mallocArray(%d, %d)\n", width, height);
float *host = (float *)malloc(bytes);
struct cudaChannelFormatDesc desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
if (cudaMallocArray(&array, &desc, width, height) != cudaSuccess) {
printf("cudaMallocArray() 0 - failed to allocate %d bytes\n", (int)bytes);
printf(" error: %s\n", cudaGetErrorString(cudaGetLastError()));
return ++errors;
}
for (int j = 0; j < height; j++) {
float *ptr = &host[j * width];
for (int i = 0; i < width; i++) {
ptr[i] = (float)( (i + j * width) % 128 ) / 128.0f;
}
}
if (cudaMemcpyToArray(array, 0, 0, host, bytes, cudaMemcpyHostToDevice) !=
cudaSuccess) {
printf("cudaMemcpyToArray() - failed to copy %d bytes to array\n", (int)bytes);
cudaFreeArray(array);
free(host);
return ++errors;
}
for (int i = 0; i < width * height; i++) {
host[i] = -1;
}
if (cudaMemcpyFromArray(host, array, 0, 0, bytes, cudaMemcpyDeviceToHost) !=
cudaSuccess) {
printf("cudaMemcpyFromArray() - failed to copy %d bytes from array\n", (int)bytes);
cudaFreeArray(array);
free(host);
return ++errors;
}
for (int j = 0; j < height && errors < 10; j++) {
float *ptr = &host[j * width];
for (int i = 0; i < width && errors < 10; i++) {
float expected =(float)( (i + j * width) % 128 ) / 128.0f;
float got = ptr[i];
if (fabs(expected - got) > 0.001f) {
printf("ERROR 1: (%d, %d) - expected: %f, got: %f\n",
i, j, expected, got);
++errors;
}
}
}
// now use cudaMemcpyFromArray() to select individual elements by offsets and reverify
for (int j = 0; j < height && errors < 5; j++) {
for (int i = 0; i < width && errors < 5; i++) {
float x = -1;
float expected = (float)( (i + j * width) % 128 ) / 128.0f;
if (cudaMemcpyFromArray(&x, array, i*sizeof(float), j, sizeof(float),
cudaMemcpyDeviceToHost) != cudaSuccess) {
printf("FAILED to memcpyFromArray(%d, %d)\n", i, j);
cudaFreeArray(array);
free(host);
printf("FAILED\n");
return -1;
}
if (fabs(x - expected) > 0.0001f) {
++errors;
printf("ERROR 0: (%d, %d) - expected: %f, got: %f\n", i, j, expected, x);
if (errors > 10) {
cudaFreeArray(array);
free(host);
printf("FAILED\n");
return -1;
}
}
}
}
if (cudaMallocArray(&array2, &desc, width*2, height*2) != cudaSuccess) {
printf("cudaMallocArray() 1 - failed to allocate array2\n");
cudaFreeArray(array);
free(host);
}
for (int i = 0; i < width * height; i++) {
host[i] = -1;
}
if (cudaMemcpyArrayToArray(array2, width, height, array, 0, 0, bytes,
cudaMemcpyDeviceToDevice) != cudaSuccess) {
printf("cudaMemcpyArrayToArray() - failed\n");
cudaFreeArray(array2);
cudaFreeArray(array);
free(host);
return ++errors;
}
if (cudaMemcpyFromArray(host, array2, width, height, bytes, cudaMemcpyDeviceToHost) !=
cudaSuccess) {
printf("cudaMemcpyFromArray(host, array2) - failed\n");
cudaFreeArray(array2);
cudaFreeArray(array);
free(host);
}
printf("checking results from last cudaMemcpyFromArray\n"); fflush(stdout);
for (int j = 0; j < height && errors < 10; j++) {
float *ptr = &host[j * width];
for (int i = 0; i < width && errors < 10; i++) {
float expected =(float)( (i + j * width) % 128 ) / 128.0f;
float got = ptr[i];
if (fabs(expected - got) > 0.001f) {
printf("ERROR 2: (%d, %d) - expected: %f, got: %f\n",
i, j, expected, got);
++errors;
}
}
}
if (verbose) {
printf("%s\n", (errors ? "FAILED" : "PASSED"));
fflush(stdout);
}
cudaFreeArray(array);
free(host);
return errors;
}
static int test_mallocPitch(bool verbose) {
int errors = 0;
const int width = 125;
const int height = 128;
size_t pitch;
if (verbose) { printf("[1] mallocing pitch\n"); fflush(stdout); }
float *gpu0 = 0;
if (cudaMallocPitch((void **)&gpu0, &pitch, sizeof(float)*width, height) != cudaSuccess) {
printf("cudaMallocPitch() 0 - failed to allocate %d x %d on device\n", width, height);
return ++errors;
}
size_t bytes = width * height * sizeof(float);
float *host = (float *)malloc(bytes);
for (int j = 0; j < height; j++) {
float *ptr = &host[j * width];
for (int i = 0; i < width; i++) {
ptr[i] = (float)(i % 128) / 64.0f + 2.0f;
}
}
if (verbose) { printf("[2] memcpying2d\n"); fflush(stdout); }
if (cudaMemcpy2D(gpu0, pitch, host, width * sizeof(float), sizeof(float)*width, height,
cudaMemcpyHostToDevice) != cudaSuccess) {
printf("cudaMemcpy2D() 0 - failed to copy %d x %d matrix to device\n", width, height);
free(host);
cudaFree(gpu0);
return ++errors;
}
for (int i = 0; i < width * height; i++) {
host[i] = -1;
}
if (verbose) { printf("[3] memcpying\n"); fflush(stdout); }
if (cudaMemcpy2D(host, sizeof(float) * width, gpu0, pitch, sizeof(float)*width, height,
cudaMemcpyDeviceToHost) != cudaSuccess) {
printf("cudaMemcpy2D() 1 - failed to copy %d x %d matrix to host\n", width, height);
free(host);
cudaFree(gpu0);
return ++errors;
}
for (int j = 0; j < height && errors < 5; j++) {
float *ptr = &host[j * width];
for (int i = 0; i < width && errors < 5; i++) {
float got = ptr[i];
float expected = (float)(i % 128) / 64.0f + 2.0f;
if (fabs(got - expected) > 0.001f) {
printf("ERROR 0 (%d, %d) - expected: %f, got: %f\n", i, j, expected, got);
++errors;
}
}
}
if (verbose) { printf("[4] checking for errors\n"); fflush(stdout); }
if (errors) {
cudaFree(gpu0);
free(host);
return ++errors;
}
if (verbose) { printf("[5] mallocing\n"); fflush(stdout); }
// now copy from device to device with potentially different pitch
float *gpu1 = 0;
size_t pitch1 = 0;
if (cudaMallocPitch( (void **)&gpu1, &pitch1, sizeof(float)*(width+1), height) != cudaSuccess) {
cudaFree(gpu0);
free(host);
printf("cudaMallocPitch() 1 - failed to allocate on device\n");
return ++errors;
}
if (verbose) { printf("[6] memcpying\n"); fflush(stdout); }
if (cudaMemcpy2D(gpu1, pitch1, gpu0, pitch, sizeof(float)*width, height,
cudaMemcpyDeviceToDevice) != cudaSuccess) {
cudaFree(gpu0);
cudaFree(gpu1);
free(host);
printf("cudaMemcpy2D() 2 - failed to copy from buffer with pitch %d to buffer with pitch %d\n",
(int)pitch, (int)pitch1);
return ++errors;
}
for (int i = 0; i < width * height; i++) {
host[i] = -1;
}
if (verbose) { printf("[7] memcpying\n"); fflush(stdout); }
if (cudaMemcpy2D(host, sizeof(float)*width, gpu1, pitch1, sizeof(float)*width, height,
cudaMemcpyDeviceToHost) != cudaSuccess) {
cudaFree(gpu0);
cudaFree(gpu1);
free(host);
printf("cudaMemcpy2D() 3 - failed to copy from buffer with pitch %d to buffer with pitch %d\n",
(int)pitch, (int)(sizeof(float)*width));
return ++errors;
}
for (int j = 0; j < height && errors < 5; j++) {
float *ptr = &host[j * width];
for (int i = 0; i < width && errors < 5; i++) {
float got = ptr[i];
float expected = (float)(i % 128) / 64.0f + 2.0f;
if (fabs(got - expected) > 0.001f) {
printf("ERROR 0 (%d, %d) - expected: %f, got: %f\n", i, j, expected, got);
++errors;
}
}
}
if (verbose) { printf("[8] final free\n"); fflush(stdout); }
cudaFree(gpu0);
cudaFree(gpu1);
free(host);
return errors;
}
static int test_malloc2d(bool verbose) {
int errors = 0;
return errors;
}
static int test_malloc3d(bool verbose) {
int errors = 0;
return errors;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *arg[]) {
int errors = 0;
bool verbose = true;
if (!errors) {
errors += test_malloc(verbose);
}
if (!errors) {
errors += test_mallocArray(verbose);
}
if (!errors) {
errors += test_mallocPitch(verbose);
}
if (!errors) {
errors += test_malloc2d(verbose);
}
if (!errors) {
errors += test_malloc3d(verbose);
}
{
printf("Pass/Fail : %s\n", (errors ? "Fail" : "Pass"));
}
return -errors;
}
///////////////////////////////////////////////////////////////////////////////////////////////// | the_stack |
#include "amgx_types/util.h"
#define USE_EXPERIMENTAL_FACTOR_COPY
// #define USE_EXPERIMENTAL_NLARGEST
namespace amgx
{
__host__ __device__ __forceinline__
double ourHash2(const int i)
{
unsigned int a = i;
double result;
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) + (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a ^ 0xd3a2646c) + (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) + (a >> 16);
result = (a ^ 0x4a51e590) / (double) UINT_MAX;
return result;
}
template <typename IndexType, typename metricType, typename matrixType>
struct copy_pred
{
double _trunc_factor;
metricType *_metric_arr;
copy_pred() : _trunc_factor(0), _metric_arr(NULL) {};
copy_pred(const double trunc_factor, metricType *metric_arr) : _trunc_factor(trunc_factor),
_metric_arr(metric_arr) {};
__host__ __device__
bool operator()(const thrust::tuple<IndexType, IndexType, matrixType> &a)
{
metricType metric = _metric_arr[thrust::get<0>(a)];
if ( types::util<matrixType>::abs(thrust::get<2>(a)) >= _trunc_factor * metric) { return true; }
return false;
}
copy_pred<IndexType, metricType, matrixType> &operator=(const copy_pred<IndexType, metricType, matrixType> &a)
{
this->_trunc_factor = a._trunc_factor;
this->_metric_arr = a._metric_arr;
return *this;
}
};
template <typename IndexType, typename VectorType, typename MatrixType>
struct scale_op
{
const VectorType *scale_vec;
scale_op(const VectorType *s) : scale_vec(s) {};
__host__ __device__
thrust::tuple<IndexType, MatrixType> operator()(const thrust::tuple<IndexType, MatrixType> &a)
{
const IndexType row = thrust::get<0>(a);
return thrust::tuple<IndexType, MatrixType>(row, thrust::get<1>(a) * scale_vec[row]);
}
};
template <typename index_type, typename value_type>
__global__
void scale_kernel(const index_type *row_indices, value_type *values, const value_type *new_row_sums, const value_type *old_row_sums, const int size)
{
for (int tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < size; tidx += blockDim.x * gridDim.x)
{
int row_index = row_indices[tidx];
value_type multiplier = ( types::util<value_type>::abs(new_row_sums[row_index]) == 0. ) ? types::util<value_type>::get_one() : (old_row_sums[row_index] / new_row_sums[row_index]);
values[tidx] = values[tidx] * multiplier;
if (values[tidx] != values[tidx]) { printf("%d (%d): NaN after scaling\n", tidx, row_index); }
}
}
template <typename IndexType>
__global__
void get_row_lengths_kernel(const IndexType *offsets, const IndexType num_rows, IndexType *row_lengths,
const int max_elmts)
{
for (int row = threadIdx.x + blockIdx.x * blockDim.x; row < num_rows; row += blockDim.x * gridDim.x)
{
row_lengths[row] = min(offsets[row + 1] - offsets[row], max_elmts);
}
}
// sort array by abs(values), largest element in [0]
template <typename IndexType, typename ValueType>
__device__
void sortByFabs(IndexType *indices, ValueType *values, int elements)
{
int n = elements;
do
{
int newn = 0;
for (int i = 1; i < n; i++)
{
if (types::util<ValueType>::abs(values[i - 1]) < types::util<ValueType>::abs(values[i]))
{
ValueType temp_v = values[i - 1];
IndexType temp_i = indices[i - 1];
values[i - 1] = values[i];
values[i] = temp_v;
indices[i - 1] = indices[i];
indices[i] = temp_i;
newn = i;
}
}
n = newn;
}
while (n > 0);
}
#ifdef USE_EXPERIMENTAL_NLARGEST
template <typename IndexType, typename ValueType>
__device__
void insert_new_max(volatile ValueType *Mv, volatile IndexType *Mi, volatile int *T, const ValueType S,
const IndexType I, const int thread, const int N)
{
if (thread < N)
{
T[thread] = 1;
// new value larger - shift my value down
if (Mv[thread] < S)
{
T[thread] = 0;
Mv[thread + 1] = Mv[thread];
Mi[thread + 1] = Mi[thread];
T[thread + 1] = 1;
}
// insert new largest value here
if (!T[thread])
{
Mv[thread] = S;
Mi[thread] = I;
// inserted to me -- note to zero out
}
}
}
template <typename IndexType, typename ValueType, typename VecValueType,
int BLOCKSIZE, int THREADS_PER_VECTOR>
__global__
void truncateNandScale_kernel(const IndexType *A_offsets, const IndexType *A_indices, const ValueType *A_values,
const IndexType num_rows,
const IndexType *At_offsets, IndexType *At_indices, ValueType *At_values,
const int max_elmts, const VecValueType *old_row_sums)
{
const int WARP_SIZE = 32;
const int VECTORS_PER_WARP = WARP_SIZE / THREADS_PER_VECTOR;
const int WARPS_PER_BLOCK = BLOCKSIZE / WARP_SIZE;
const int N_LARGEST_SIZE = (THREADS_PER_VECTOR + 1) * VECTORS_PER_WARP * WARPS_PER_BLOCK;
__shared__ volatile int T[N_LARGEST_SIZE]; // has M array been modified
__shared__ volatile ValueType Mv[N_LARGEST_SIZE]; // maximum values
__shared__ volatile int Mi[N_LARGEST_SIZE]; // maximum indices
__shared__ volatile ValueType S[WARP_SIZE * WARPS_PER_BLOCK]; // temporary holding for currently read values
__shared__ volatile IndexType I[WARP_SIZE * WARPS_PER_BLOCK]; // -- " -- indices
ValueType S_copy;
IndexType I_copy;
__shared__ volatile int Ap[WARPS_PER_BLOCK * VECTORS_PER_WARP][2]; // store row offset pointers
__shared__ volatile ValueType old_row_sum[WARPS_PER_BLOCK * VECTORS_PER_WARP];
const int num_vectors = blockDim.x / THREADS_PER_VECTOR * gridDim.x;
const int vector_id = (threadIdx.x + blockIdx.x * blockDim.x) / THREADS_PER_VECTOR;
const int thread_lane = threadIdx.x & (THREADS_PER_VECTOR - 1);
const int vector_lane = threadIdx.x / THREADS_PER_VECTOR;
// initialise relevant shared memory arrays
for (int i = threadIdx.x; i < N_LARGEST_SIZE; i += blockDim.x)
{
Mv[i] = 0.;
Mi[i] = -1;
T[i] = 0;
}
__syncthreads();
// outer loop over rows
for (int row = vector_id; row < num_rows; row += num_vectors)
{
// load row offsets into shared memory
if (thread_lane < 2)
{
Ap[vector_lane][thread_lane] = A_offsets[row + thread_lane];
}
const int row_start = Ap[vector_lane][0];
const int row_end = Ap[vector_lane][1];
const int row_len = row_end - row_start;
const int max_elmts_row = min(At_offsets[row + 1] - At_offsets[row], max_elmts);
// degenerate case -- simply copy over entire row
if (row_len <= max_elmts && row_len > 0 && thread_lane < row_len)
{
At_indices[At_offsets[row] + thread_lane] = A_indices[row_start + thread_lane];
At_values[At_offsets[row] + thread_lane] = A_values[row_start + thread_lane];
continue;
}
// loop over chunks of the row
const int nloops = (row_len < THREADS_PER_VECTOR) ? 1 : (int)ceil((float)row_len / THREADS_PER_VECTOR);
for (int i = 0; i < nloops; i++)
{
// zero out temp shared memory
S[threadIdx.x] = 0;
I[threadIdx.x] = -2;
// read chunk into shared memory
if (i * THREADS_PER_VECTOR + thread_lane < row_len)
{
S[threadIdx.x] = fabs(A_values[row_start + i * THREADS_PER_VECTOR + thread_lane]);
I[threadIdx.x] = A_indices[row_start + i * THREADS_PER_VECTOR + thread_lane];
}
// loop over n' max elements
for (int j = 0; j < max_elmts_row; j++)
{
// first take a copy of this section of the row so we don't re-read it
S_copy = S[threadIdx.x];
I_copy = I[threadIdx.x];
// local max & associated index (save volatile loads)
ValueType max = S_copy; // S[threadIdx.x];
IndexType ind = I_copy; // I[threadIdx.x];
// reduce to get max
if (THREADS_PER_VECTOR > 16)
{
I[threadIdx.x] = ind = max > S[threadIdx.x + 16] ? ind : I[threadIdx.x + 16];
S[threadIdx.x] = max = max > S[threadIdx.x + 16] ? max : S[threadIdx.x + 16];
}
if (THREADS_PER_VECTOR > 8)
{
I[threadIdx.x] = ind = max > S[threadIdx.x + 8] ? ind : I[threadIdx.x + 8];
S[threadIdx.x] = max = max > S[threadIdx.x + 8] ? max : S[threadIdx.x + 8];
}
if (THREADS_PER_VECTOR > 4)
{
I[threadIdx.x] = ind = max > S[threadIdx.x + 4] ? ind : I[threadIdx.x + 4];
S[threadIdx.x] = max = max > S[threadIdx.x + 4] ? max : S[threadIdx.x + 4];
}
if (THREADS_PER_VECTOR > 2)
{
I[threadIdx.x] = ind = max > S[threadIdx.x + 2] ? ind : I[threadIdx.x + 2];
S[threadIdx.x] = max = max > S[threadIdx.x + 2] ? max : S[threadIdx.x + 2];
}
if (THREADS_PER_VECTOR > 1)
{
I[threadIdx.x] = ind = max > S[threadIdx.x + 1] ? ind : I[threadIdx.x + 1];
S[threadIdx.x] = max = max > S[threadIdx.x + 1] ? max : S[threadIdx.x + 1];
}
// maximum now in S_copy[vector_lane*32] -- insert into maximum array
const int sm_offset = vector_lane * (THREADS_PER_VECTOR + 1);
insert_new_max(&Mv[sm_offset], &Mi[sm_offset], &T[sm_offset],
S[vector_lane * THREADS_PER_VECTOR], I[vector_lane * THREADS_PER_VECTOR], thread_lane,
max_elmts);
S[threadIdx.x] = (I[vector_lane * THREADS_PER_VECTOR] == I_copy) ? 0 : S_copy;
I[threadIdx.x] = I_copy;
}
}
// get scaling factor for new row
// copy Mv -> S
S[threadIdx.x] = 0; // intialise unneeded S array to 0
if (thread_lane < max_elmts_row)
{
S[threadIdx.x] = Mv[vector_lane * (THREADS_PER_VECTOR + 1) + thread_lane];
}
// reduce on S
ValueType sum = fabs(S[threadIdx.x]);
if (THREADS_PER_VECTOR > 16) { S[threadIdx.x] = sum = sum + S[threadIdx.x + 16]; }
if (THREADS_PER_VECTOR > 8) { S[threadIdx.x] = sum = sum + S[threadIdx.x + 8]; }
if (THREADS_PER_VECTOR > 4) { S[threadIdx.x] = sum = sum + S[threadIdx.x + 4]; }
if (THREADS_PER_VECTOR > 2) { S[threadIdx.x] = sum = sum + S[threadIdx.x + 2]; }
if (THREADS_PER_VECTOR > 1) { S[threadIdx.x] = sum = sum + S[threadIdx.x + 1]; }
const ValueType scale = old_row_sums[row] / S[vector_lane * THREADS_PER_VECTOR];
// copy final scaled results into output buffer
if (thread_lane < max_elmts_row)
{
At_values[At_offsets[row] + thread_lane] = Mv[vector_lane * (THREADS_PER_VECTOR + 1) + thread_lane] * scale;
At_indices[At_offsets[row] + thread_lane] = Mi[vector_lane * (THREADS_PER_VECTOR + 1) + thread_lane];
}
}
}
#else
// sort array by column index
template <typename IndexType, typename ValueType>
__device__
void sortByIndex(IndexType *indices, ValueType *values, int elements)
{
int n = elements;
do
{
int newn = 0;
for (int i = 1; i < n; i++)
{
if (indices[i - 1] > indices[i])
{
ValueType temp_v = values[i - 1];
IndexType temp_i = indices[i - 1];
values[i - 1] = values[i];
values[i] = temp_v;
indices[i - 1] = indices[i];
indices[i] = temp_i;
newn = i;
}
}
n = newn;
}
while (n > 0);
}
template <typename IndexType, typename ValueType>
__global__
void row_sum_kernel(const IndexType *A_offsets, const ValueType *A_values,
ValueType *row_sums, const IndexType num_rows, IndexType *row_lengths, const int max_elmts, bool compute_row_length)
{
for (int tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < num_rows; tidx += blockDim.x * gridDim.x)
{
IndexType A_start = A_offsets[tidx], A_end = A_offsets[tidx + 1];
if (compute_row_length)
{
row_lengths[tidx] = min(A_end - A_start, max_elmts);
}
ValueType row_sum = 0.;
bool found_nans = false;
for (int i = A_start; i < A_end; i++)
{
row_sum += A_values[i];
if (A_values[i] != A_values[i]) { found_nans = true; }
}
if (found_nans)
{
printf("row %d has NaNs in input\n", tidx);
}
row_sums[tidx] = row_sum;
}
}
template <typename IndexType, typename ValueType>
__global__
void __launch_bounds__(256)
truncate_kernel(const IndexType *A_offsets, const IndexType *A_col_indices, const ValueType *A_values,
const IndexType num_rows,
IndexType *trunc_offsets, IndexType *trunc_col_indices, ValueType *trunc_values,
const int max_elmts)
{
// const int tidx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ IndexType s_ind[1024]; // assume <= 256 threads per block
__shared__ ValueType s_val[1024];
const int smem_start = threadIdx.x * max_elmts;
// if (tidx < num_rows)
for (int tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < num_rows; tidx += blockDim.x * gridDim.x)
{
IndexType A_start = A_offsets[tidx], A_end = A_offsets[tidx + 1];
IndexType trunc_start = trunc_offsets[tidx];
IndexType A_len = A_end - A_start;
// copy my entire row if possible
if (A_len <= max_elmts)
{
for (int j = 0; j < A_len; j++)
{
trunc_col_indices[trunc_start + j] = A_col_indices[A_start + j];
trunc_values[trunc_start + j] = A_values[A_start + j];
}
}
else // normal case
{
// read first max_elmts elements into shared
for (int j = 0; j < max_elmts; j++)
{
s_ind[smem_start + j] = A_col_indices[A_start + j];
s_val[smem_start + j] = A_values[A_start + j];
}
// sort the initially inserted elements (start = largest)
sortByFabs(&s_ind[smem_start], &s_val[smem_start], max_elmts);
// loop over all other elements on the row
for (int j = A_start + max_elmts; j < A_end; j++)
{
// check against current maximums
for (int i = 0; i < max_elmts; i++)
{
if (types::util<ValueType>::abs(A_values[j]) > types::util<ValueType>::abs(s_val[smem_start + i]))
{
// shift remaining values down
for (int k = max_elmts - 1; k > i; k--)
{
s_val[smem_start + k] = s_val[smem_start + k - 1];
s_ind[smem_start + k] = s_ind[smem_start + k - 1];
}
// tentatively replace element
s_val[smem_start + i] = A_values[j];
s_ind[smem_start + i] = A_col_indices[j];
break;
}
}
}
//sortByIndex(&s_ind[smem_start],&s_val[smem_start],max_elmts);
// now we have the 4 largest elements (hopefully) -- output
for (int i = 0; i < max_elmts; i++)
{
trunc_col_indices[trunc_start + i] = s_ind[smem_start + i];
trunc_values[trunc_start + i] = s_val[smem_start + i];
}
}
}
}
#endif
template <typename IndexType, typename ValueType>
__global__
void perturb_kernel(const IndexType *A_offsets, const IndexType *A_col_indices, ValueType *A_values,
const IndexType num_rows,
const int max_elmts)
{
for (int tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < num_rows; tidx += blockDim.x * gridDim.x)
{
IndexType A_start = A_offsets[tidx], A_end = A_offsets[tidx + 1];
// perturb A slightly
for (int j = A_start; j < A_end; j++)
{
A_values[j] += ourHash2(A_col_indices[j]) * A_values[j];
}
}
}
// warp-based inclusive scan
// from: Sengupta, Harris, Garland 2008
// idx is index of thread
template <typename IndexType>
__device__
int scan_warp(volatile IndexType *ptr, const unsigned int idx)
{
const unsigned int lane = idx & 31; // index of thread in warp
IndexType p = ptr[idx];
if (lane >= 1) { ptr[idx] = p = ptr[idx - 1] + p; }
if (lane >= 2) { ptr[idx] = p = ptr[idx - 2] + p; }
if (lane >= 4) { ptr[idx] = p = ptr[idx - 4] + p; }
if (lane >= 8) { ptr[idx] = p = ptr[idx - 8] + p; }
if (lane >= 16) { ptr[idx] = p = ptr[idx - 16] + p; }
return ptr[idx];
}
template <typename IndexType, typename matValueType, typename vecValueType, int THREADS_PER_BLOCK,
int THREADS_PER_VECTOR>
__global__
void truncateAndScale_kernel(const IndexType *A_offsets, const IndexType *A_indices,
const matValueType *A_values,
const IndexType num_rows,
IndexType *At_offsets, IndexType *At_indices, matValueType *At_values,
const vecValueType *old_row_sums, const vecValueType *new_row_sums,
const double truncate_factor, const vecValueType *metric)
{
const int WARP_SIZE = 32;
const int WARPS_PER_BLOCK = THREADS_PER_BLOCK / WARP_SIZE;
const int VECTORS_PER_WARP = WARP_SIZE / THREADS_PER_VECTOR;
// allocate shared memory for warp-based scans (assume up to 512 threads / block)
__shared__ volatile int s_scan[WARP_SIZE * WARPS_PER_BLOCK];
__shared__ volatile int s_A_ptr[WARPS_PER_BLOCK * VECTORS_PER_WARP][2]; // row offsets for A
__shared__ volatile int s_At_ptr[WARPS_PER_BLOCK * VECTORS_PER_WARP][2]; // row offsets for At
__shared__ double s_scale[WARPS_PER_BLOCK * VECTORS_PER_WARP]; // scaling factor for each warp
const int vectors_per_block = THREADS_PER_BLOCK / WARP_SIZE * VECTORS_PER_WARP;
const int num_vectors = THREADS_PER_BLOCK / THREADS_PER_VECTOR * gridDim.x;
const int vector_id = (threadIdx.x + blockIdx.x * blockDim.x) / THREADS_PER_VECTOR;
const int thread_lane = threadIdx.x & (THREADS_PER_VECTOR - 1);
const int vector_lane = threadIdx.x / THREADS_PER_VECTOR;
int scan_offset = 0;
matValueType val = 0., fval = 0.;
IndexType ind = 0;
// outer loop over rows
for (int row = vector_id; row < num_rows; row += num_vectors)
{
scan_offset = 0;
val = 0.;
fval = 0.; // fabs(val)
if (threadIdx.x < vectors_per_block && threadIdx.x < num_rows - row) // first threads in first warp
{
s_scale[threadIdx.x] = old_row_sums[row + threadIdx.x] / new_row_sums[row + threadIdx.x];
}
__syncthreads();
if (thread_lane < 2)
{
s_A_ptr[vector_lane][thread_lane] = A_offsets[row + thread_lane];
s_At_ptr[vector_lane][thread_lane] = At_offsets[row + thread_lane];
}
const int row_start = s_A_ptr[vector_lane][0];
const int row_end = s_A_ptr[vector_lane][1];
const int row_len = row_end - row_start;
const int At_row_start = s_At_ptr[vector_lane][0];
// mark & scan over consecutive blocks of 32 threads
int nloops = (int)ceil((float)row_len / THREADS_PER_VECTOR);
#pragma unroll 2
for (int i = 0; i < nloops; i++)
{
// initialise shared memory to 0
s_scan[threadIdx.x] = 0;
// mark appropriate elements of the row
if (i * THREADS_PER_VECTOR + thread_lane < row_len)
{
val = A_values[row_start + i * THREADS_PER_VECTOR + thread_lane];
ind = A_indices[row_start + i * THREADS_PER_VECTOR + thread_lane];
fval = fabs(val);
s_scan[threadIdx.x] = fval >= truncate_factor * metric[row] ? 1 : 0;
}
// now prefix scan over this
scan_warp(&s_scan[vector_lane * THREADS_PER_VECTOR], thread_lane); // threadIdx.x%32);
// copy relevant parts of A to At
if (i * THREADS_PER_VECTOR + thread_lane < row_len)
{
if (fval >= truncate_factor * metric[row])
{
// get relevant index in At
const int At_index = At_row_start + scan_offset + s_scan[threadIdx.x] - 1;
if (s_scale[vector_lane] == 0.)
{
At_values[At_index] = val;
}
else
{
At_values[At_index] = val / s_scale[vector_lane];
}
At_indices[At_index] = ind;
}
}
// note new scan_offset
scan_offset = s_scan[vector_lane * THREADS_PER_VECTOR + THREADS_PER_VECTOR - 1];
}
}
}
template <typename Matrix, typename Vector, int THREADS_PER_BLOCK, int THREADS_PER_VECTOR>
void truncateAndScale(const Matrix &A, Matrix &A_trunc, const Vector &row_sum, const Vector &new_row_sum,
const double trunc_factor, const Vector &metric)
{
const int num_blocks = std::min(4096, A.get_num_rows());
truncateAndScale_kernel<typename Matrix::index_type, typename Matrix::value_type,
typename Vector::value_type, THREADS_PER_BLOCK, THREADS_PER_VECTOR>
<<< num_blocks, 128>>>(A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(),
A.get_num_rows(),
A_trunc.row_offsets.raw(), A_trunc.col_indices.raw(),
A_trunc.values.raw(),
row_sum.raw(), new_row_sum.raw(),
trunc_factor, metric.raw());
cudaCheckError();
}
// host code
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Truncate<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::truncateByFactor(Matrix_h &A,
const double trunc_factor, const AMGX_TruncateType truncType)
{
// throw error
FatalError("Truncation on host not implemented yet", AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Truncate<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::truncateByMaxElements(Matrix_h &A,
const int max_elmts)
{
// throw error
FatalError("Truncation on host not implemented yet", AMGX_ERR_NOT_IMPLEMENTED);
}
// device code
#ifdef _WIN32
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Truncate<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::truncateByFactor(Matrix_d &A, const double trunc_factor, const AMGX_TruncateType truncType)
#else
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Truncate<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::truncateByFactor(Matrix_d &A, const double trunc_factor, const AMGX_TruncateType truncType) __attribute__((noinline))
#endif
{
typedef typename TConfig_d::IndPrec index_type;
typedef typename TConfig_d::VecPrec vec_value_type;
typedef typename TConfig_d::MatPrec value_type;
// store the weighting values per row
Matrix<TConfig_d> A_trunc(0, 0, 0, CSR);
typedef Vector<TConfig_d> VVector;
typedef typename Matrix<TConfig_d>::MVector MVector;
typedef Vector<typename TConfig_d::template setVecPrec<AMGX_vecInt>::Type> IVector;
VVector row_sum(A.get_num_rows());
VVector new_row_sum(A.get_num_rows());
VVector max_coef;
// point to either row_sum or max_coef depending on type of truncation desired
// vec_value_type *metric = 0;
VVector *metric;
// get either max row coefficient or row sum
if (truncType == AMGX_TruncateByRowSum)
{
absRowSum(A, row_sum);
metric = &row_sum;
}
else if (truncType == AMGX_TruncateByMaxCoefficient)
{
max_coef.resize(A.get_num_rows());
maxCoefAndSum(A, max_coef, row_sum);
metric = &max_coef;
}
else
{
FatalError("Truncation type not implemented", AMGX_ERR_NOT_IMPLEMENTED);
}
IVector row_counts(A.get_num_rows());
if (truncType == AMGX_TruncateByRowSum)
{
countTruncElements(A, trunc_factor, row_sum, row_counts, new_row_sum);
}
else if (truncType == AMGX_TruncateByMaxCoefficient)
{
countTruncElements(A, trunc_factor, max_coef, row_counts, new_row_sum);
}
cudaCheckError();
// initial resize (so we can scan into the row_offsets array)
A_trunc.resize(A.get_num_rows(), A.get_num_cols(), 0);
cudaCheckError();
thrust_wrapper::exclusive_scan(row_counts.begin(), row_counts.end(), A_trunc.row_offsets.begin());
cudaCheckError();
const int nnz = A_trunc.row_offsets[A.get_num_rows() - 1] + row_counts[A.get_num_rows() - 1];
A_trunc.row_offsets[A.get_num_rows()] = nnz;
if (nnz == A.get_num_nz()) // early return -- nothing truncated
{
return;
}
// final resize & get row indices for both matrices
A_trunc.resize(A.get_num_rows(), A.get_num_cols(), nnz);
#ifndef USE_EXPERIMENTAL_FACTOR_COPY
A_trunc.addProps(COO);
A.addProps(COO);
// copy relevant values from A -> A_trunc
copy_pred<index_type, vec_value_type, value_type> pred;
if (truncType == AMGX_TruncateByRowSum)
{
pred = copy_pred<index_type, vec_value_type, value_type>(trunc_factor, row_sum.raw());
}
else if (truncType == AMGX_TruncateByMaxCoefficient)
{
pred = copy_pred<index_type, vec_value_type, value_type>(trunc_factor, max_coef.raw());
}
using thrust::make_zip_iterator;
using thrust::make_tuple;
thrust::copy_if(
make_zip_iterator(make_tuple(A.row_indices.begin(), A.col_indices.begin(), A.values.begin())),
make_zip_iterator(make_tuple(A.row_indices.end(), A.col_indices.end(), A.values.end())),
make_zip_iterator(make_tuple(A_trunc.row_indices.begin(), A_trunc.col_indices.begin(),
A_trunc.values.begin())),
pred
);
// scale each row of the truncated matrix - per-row scale in new_row_sum
thrust::transform(row_sum.begin(), row_sum.end(), new_row_sum.begin(), new_row_sum.begin(),
thrust::divides<vec_value_type>());
scale_op<index_type, vec_value_type, value_type> op(new_row_sum.raw());
thrust::for_each(
make_zip_iterator(make_tuple(A_trunc.row_indices.begin(), A_trunc.values.begin())),
make_zip_iterator(make_tuple(A_trunc.row_indices.end(), A_trunc.values.end())),
op
);
#else
const int avg_row_len = (int)ceil(sqrt((float)A.get_num_nz() / A.get_num_rows()));
if (avg_row_len > 16)
{
truncateAndScale<Matrix<TConfig>, Vector<TConfig>, 128, 32>(A, A_trunc, row_sum, new_row_sum,
trunc_factor, *metric);
}
else if (avg_row_len > 8)
{
truncateAndScale<Matrix<TConfig>, Vector<TConfig>, 128, 16>(A, A_trunc, row_sum, new_row_sum,
trunc_factor, *metric);
}
else if (avg_row_len > 4)
{
truncateAndScale<Matrix<TConfig>, Vector<TConfig>, 128, 8>(A, A_trunc, row_sum, new_row_sum,
trunc_factor, *metric);
}
else
{
truncateAndScale<Matrix<TConfig>, Vector<TConfig>, 128, 4>(A, A_trunc, row_sum, new_row_sum,
trunc_factor, *metric);
}
#endif
cudaCheckError();
// copy truncated matrix to this
A.copy(A_trunc);
}
#ifdef USE_EXPERIMENTAL_NLARGEST
template <typename Matrix, typename Vector, int BLOCKSIZE, int THREADS_PER_VECTOR>
void truncateNandScale(const Matrix &A, Matrix &A_trunc, const int max_elmts, const Vector &orig_row_sums)
{
const int nthreads = BLOCKSIZE;
const int nblocks = min(4096, A.get_num_rows() / (nthreads / THREADS_PER_VECTOR) + 1);
truncateNandScale_kernel<typename Matrix::index_type, typename Matrix::value_type,
typename Vector::value_type, BLOCKSIZE, THREADS_PER_VECTOR>
<<< nblocks, nthreads>>>(
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
A.get_num_rows(),
A_trunc.row_offsets.raw(),
A_trunc.col_indices.raw(),
A_trunc.values.raw(),
max_elmts,
orig_row_sums.raw()
);
cudaCheckError();
}
#endif
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Truncate<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::truncateByMaxElements(Matrix_d &A,
const int max_elmts)
{
if (max_elmts > 32) // not supported
{
FatalError("Matrix truncation to > 32 elements not supported", AMGX_ERR_BAD_PARAMETERS);
}
typedef typename TConfig_d::IndPrec index_type;
typedef typename TConfig_d::VecPrec vec_value_type;
typedef typename TConfig_d::MatPrec value_type;
// store the weighting values per row
typedef Vector<TConfig_d> VVector;
typedef typename Matrix<TConfig_d>::MVector MVector;
typedef Vector<typename TConfig_d::template setVecPrec<AMGX_vecInt>::Type> IVector;
IVector row_lengths(A.get_num_rows());
MVector orig_row_sums(A.get_num_rows(), 0);
MVector new_row_sums(A.get_num_rows(), 0);
const int blocksize = 128;
const int num_blocks = A.get_num_rows() / blocksize + 1;
// get the original row sums of A
row_sum_kernel <<< 4096, blocksize>>>(A.row_offsets.raw(), A.values.raw(), orig_row_sums.raw(), A.get_num_rows(), row_lengths.raw(), max_elmts, true);
cudaCheckError();
// initial definition
Matrix_d A_trunc(A.get_num_rows(), A.get_num_cols(), 0, CSR); // add CSR prop
// exclusive scan to get new row structure
thrust_wrapper::exclusive_scan(row_lengths.begin(), row_lengths.end(), A_trunc.row_offsets.begin());
int nnz = A_trunc.row_offsets[A.get_num_rows() - 1] + row_lengths[A.get_num_rows() - 1];
A_trunc.row_offsets[A.get_num_rows()] = nnz;
// set final size of truncated matrix
A_trunc.resize(A.get_num_rows(), A.get_num_cols(), nnz);
#ifdef USE_EXPERIMENTAL_NLARGEST
// run kernel with appropriate vector size
if (max_elmts > 16)
{
truncateNandScale<Matrix<TConfig_d>, Vector<TConfig_d>, 128, 32>(A, A_trunc, max_elmts, orig_row_sums);
}
else if (max_elmts > 8)
{
truncateNandScale<Matrix<TConfig_d>, Vector<TConfig_d>, 128, 16>(A, A_trunc, max_elmts, orig_row_sums);
}
else if (max_elmts > 4)
{
truncateNandScale<Matrix<TConfig_d>, Vector<TConfig_d>, 192, 8>(A, A_trunc, max_elmts, orig_row_sums);
}
else
{
truncateNandScale<Matrix<TConfig_d>, Vector<TConfig_d>, 128, 4>(A, A_trunc, max_elmts, orig_row_sums);
}
#else
cudaCheckError();
truncate_kernel <<< 4096, blocksize>>>(
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
A.get_num_rows(),
A_trunc.row_offsets.raw(),
A_trunc.col_indices.raw(),
A_trunc.values.raw(),
max_elmts
);
cudaCheckError();
// get the new row sums of A_trunc
row_sum_kernel <<< 4096, blocksize>>>(A_trunc.row_offsets.raw(), A_trunc.values.raw(), new_row_sums.raw(), A_trunc.get_num_rows(), (index_type *) NULL, 0, false);
cudaCheckError();
A_trunc.addProps(COO);
scale_kernel <<< 4096, blocksize>>>(A_trunc.row_indices.raw(), A_trunc.values.raw(), new_row_sums.raw(), orig_row_sums.raw(), nnz);
cudaCheckError();
#endif
cudaCheckError();
A_trunc.delProps(COO);
A.set_initialized(0);
A.copy(A_trunc);
A.computeDiagonal();
A.set_initialized(1);
}
// -------------------------------
// Explict instantiations
// -------------------------------
#define AMGX_CASE_LINE(CASE) template class Truncate<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx | the_stack |
namespace HugeCTR {
template <typename TypeHashKey, typename TypeEmbeddingComp>
EmbeddingOptimizer<TypeHashKey, TypeEmbeddingComp>::EmbeddingOptimizer(
size_t max_vocabulary_size_per_gpu_, SparseEmbeddingHashParams ¶m,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>> &buf)
: param(param) {
// new optimizer params used by update_params
// should be match with HugeCTR/src/parsers/create_embedding.cpp
// should be match with HugeCTR/src/pybind/model.cpp
switch (param.opt_params.optimizer) {
case Optimizer_t::Adam: // adam
{
{
buf->reserve({max_vocabulary_size_per_gpu_, param.embedding_vec_size},
&opt_tensors_.opt_m_tensors_);
buf->reserve({max_vocabulary_size_per_gpu_, param.embedding_vec_size},
&opt_tensors_.opt_v_tensors_);
}
if (param.opt_params.update_type == Update_t::LazyGlobal) {
buf->reserve({max_vocabulary_size_per_gpu_, param.embedding_vec_size},
&opt_tensors_.opt_prev_time_tensors_);
}
break;
}
case Optimizer_t::AdaGrad: // nesterov
{
buf->reserve({max_vocabulary_size_per_gpu_, param.embedding_vec_size},
&opt_tensors_.opt_accm_tensors_);
break;
}
case Optimizer_t::MomentumSGD: // momentum_sgd
{
buf->reserve({max_vocabulary_size_per_gpu_, param.embedding_vec_size},
&opt_tensors_.opt_momentum_tensors_);
break;
}
case Optimizer_t::Nesterov: // nesterov
{
buf->reserve({max_vocabulary_size_per_gpu_, param.embedding_vec_size},
&opt_tensors_.opt_accm_tensors_);
break;
}
case Optimizer_t::SGD:
break;
default:
throw std::runtime_error(
std::string("[HCDEBUG][ERROR] Runtime error: Invalid optimizer type\n"));
}
{ buf->reserve({1, param.get_batch_size(true) * param.max_feature_num}, &sample_id_tensors_); }
{
buf->reserve({1, param.get_batch_size(true) * param.max_feature_num}, &sample_id_sort_tensors_);
}
{
buf->reserve({1, param.get_batch_size(true) * param.max_feature_num},
&hash_value_index_sort_tensors_);
}
{
buf->reserve({1, param.get_batch_size(true) * param.max_feature_num + 1},
&hash_value_index_count_offset_tensors_);
}
{
buf->reserve({1, param.get_batch_size(true) * param.max_feature_num},
&new_hash_value_flag_tensors_);
}
{
buf->reserve({1, param.get_batch_size(true) * param.max_feature_num},
&hash_value_flag_sumed_tensors_);
}
{ buf->reserve({1, 1}, &hash_value_index_count_counter_tensors_); }
{
// cal the temp storage bytes for CUB radix sort
size_t size = 0;
cub::DeviceRadixSort::SortPairs((void *)nullptr, size, (size_t *)nullptr, (size_t *)nullptr,
(TypeHashKey *)nullptr, (TypeHashKey *)nullptr,
param.get_batch_size(true) * param.max_feature_num);
// new temp storage tensors for CUB radix sort
buf->reserve({size}, &temp_storage_sort_tensors_);
}
{
size_t size = 0;
cub::DeviceScan::InclusiveSum((void *)nullptr, size, (uint32_t *)nullptr, (uint32_t *)nullptr,
param.get_batch_size(true) * param.max_feature_num);
buf->reserve({size}, &temp_storage_scan_tensors_);
}
}
template <typename TypeHashKey, typename TypeEmbeddingComp>
void EmbeddingOptimizer<TypeHashKey, TypeEmbeddingComp>::initialize(const GPUResource &local_gpu) {
switch (param.opt_params.optimizer) {
case Optimizer_t::Adam: // adam
HCTR_LIB_THROW(cudaMemsetAsync(opt_tensors_.opt_m_tensors_.get_ptr(), 0,
opt_tensors_.opt_m_tensors_.get_size_in_bytes(),
local_gpu.get_stream()));
HCTR_LIB_THROW(cudaMemsetAsync(opt_tensors_.opt_v_tensors_.get_ptr(), 0,
opt_tensors_.opt_v_tensors_.get_size_in_bytes(),
local_gpu.get_stream()));
param.opt_params.hyperparams.adam.times = 0;
if (param.opt_params.update_type == Update_t::LazyGlobal) {
dim3 grid(local_gpu.get_sm_count() * 4, 1, 1);
dim3 block(512, 1, 1);
initialize_array<<<grid, block, 0, local_gpu.get_stream()>>>(
opt_tensors_.opt_prev_time_tensors_.get_ptr(),
opt_tensors_.opt_prev_time_tensors_.get_num_elements(), uint64_t(1));
}
break;
case Optimizer_t::AdaGrad:
HCTR_LIB_THROW(cudaMemsetAsync(opt_tensors_.opt_accm_tensors_.get_ptr(),
param.opt_params.hyperparams.adagrad.initial_accu_value,
opt_tensors_.opt_accm_tensors_.get_size_in_bytes(),
local_gpu.get_stream()));
break;
case Optimizer_t::MomentumSGD: // momentum_sgd
HCTR_LIB_THROW(cudaMemsetAsync(opt_tensors_.opt_momentum_tensors_.get_ptr(), 0,
opt_tensors_.opt_momentum_tensors_.get_size_in_bytes(),
local_gpu.get_stream()));
break;
case Optimizer_t::Nesterov: // nesterov
HCTR_LIB_THROW(cudaMemsetAsync(opt_tensors_.opt_accm_tensors_.get_ptr(), 0,
opt_tensors_.opt_accm_tensors_.get_size_in_bytes(),
local_gpu.get_stream()));
break;
case Optimizer_t::SGD:
break;
default:
throw std::runtime_error(
std::string("[HCDEBUG][ERROR] Runtime error: Invalid optimizer type\n"));
}
}
namespace {
__global__ void value_count_kernel_2(int nnz, const uint32_t *new_hash_value_flag,
const uint32_t *hash_value_flag_sumed,
uint32_t *hash_value_index_index, uint32_t *counter)
{
for (int gid = blockIdx.x * blockDim.x + threadIdx.x; gid < nnz; gid += blockDim.x * gridDim.x) {
uint32_t flag = new_hash_value_flag[gid];
if (flag == 1) {
hash_value_index_index[hash_value_flag_sumed[gid] - 1] = gid;
}
}
if (blockIdx.x * blockDim.x + threadIdx.x == 0) {
*counter = hash_value_flag_sumed[nnz - 1];
hash_value_index_index[*counter] = nnz;
}
}
// expand sample id by row_offset
template <typename TypeKey>
__global__ void sample_id_expand_kernel(int batch_size, int slot_num, const TypeKey *row_offset,
TypeKey *sample_id) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < (batch_size * slot_num)) {
TypeKey offset = row_offset[gid];
int value_num = row_offset[gid + 1] - offset;
for (int i = 0; i < value_num; i++) {
sample_id[offset + i] = gid;
}
}
}
__global__ void value_count_kernel_1(int nnz, const size_t *hash_value_index_sort,
uint32_t *new_hash_value_flag) {
for (int gid = blockIdx.x * blockDim.x + threadIdx.x; gid < nnz; gid += blockDim.x * gridDim.x) {
size_t cur_value = hash_value_index_sort[gid];
if (gid > 0) {
size_t former_value = hash_value_index_sort[gid - 1];
// decide if this is the start of a group(the elements in this group have the same
// hash_value_index_sort)
if (cur_value != former_value) {
new_hash_value_flag[gid] = 1;
} else {
new_hash_value_flag[gid] = 0;
}
} else { // gid == 0
new_hash_value_flag[gid] = 1;
}
}
}
// Helper function to accumulate the weight gradients for a thread's embedding vector
template <typename TypeKey, typename TypeEmbeddingComp>
__device__ __forceinline__ float accumulate_gradients(int embedding_vec_size,
const TypeKey *sample_id,
const uint32_t *hash_value_index_count_offset,
const TypeEmbeddingComp *wgrad, float scaler,
uint32_t offset, int bid, int tid) {
uint32_t sample_num = hash_value_index_count_offset[bid + 1] - hash_value_index_count_offset[bid];
float gi = 0.0f;
for (int i = 0; i < sample_num; i++) {
int sample_index = sample_id[offset + i];
gi += TypeConvertFunc<float, TypeEmbeddingComp>::convert(
wgrad[sample_index * embedding_vec_size + tid]);
}
return gi / scaler;
}
// First step of the global update with the Adam optimizer: compute gradient and add the
// corresponding terms to the moving-average accumulators
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void opt_adam_kernel_global(uint32_t hash_value_index_count_num, int embedding_vec_size,
const AdamOptHyperParams adam, TypeEmbeddingComp *m_ptr,
TypeEmbeddingComp *v_ptr, const TypeKey *sample_id,
const size_t *hash_value_index_sort,
const uint32_t *hash_value_index_count_offset,
const TypeEmbeddingComp *wgrad, float scaler) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (tid < embedding_vec_size && bid < hash_value_index_count_num) {
uint32_t offset = hash_value_index_count_offset[bid];
float gi = accumulate_gradients(embedding_vec_size, sample_id, hash_value_index_count_offset,
wgrad, scaler, offset, bid, tid);
size_t row_index = hash_value_index_sort[offset];
size_t feature_index = row_index * embedding_vec_size + tid;
float mi = TypeConvertFunc<float, TypeEmbeddingComp>::convert(m_ptr[feature_index]) +
(1.0f - adam.beta1) * gi / adam.beta1;
float vi = TypeConvertFunc<float, TypeEmbeddingComp>::convert(v_ptr[feature_index]) +
(1.0f - adam.beta2) * gi * gi / adam.beta2;
m_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mi);
v_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(vi);
}
}
// Second step of the global update with the Adam optimizer: update the moving-average accumulators
// and the weights for all the features
template <typename TypeEmbeddingComp>
__global__ void adam_update_kernel_global(int embedding_vec_size,
size_t table_size, // vocabulary size / factor
const AdamOptHyperParams adam, TypeEmbeddingComp *m_ptr,
TypeEmbeddingComp *v_ptr, float alpha_t,
float *hash_table_value) {
const int TILE_SIZE = blockDim.x * gridDim.x;
for (size_t feature_index = blockIdx.x * blockDim.x + threadIdx.x;
feature_index < table_size * embedding_vec_size; feature_index += TILE_SIZE) {
float mi =
adam.beta1 * TypeConvertFunc<float, TypeEmbeddingComp>::convert(m_ptr[feature_index]);
float vi =
adam.beta2 * TypeConvertFunc<float, TypeEmbeddingComp>::convert(v_ptr[feature_index]);
m_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mi);
v_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(vi);
float weight_diff = -alpha_t * mi / (sqrtf(vi) + adam.epsilon);
hash_table_value[feature_index] += weight_diff;
}
}
// First step of the global update with Momentum SGD: compute gradient and add the corresponding
// term to the momentum
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void opt_momentum_sgd_kernel_global(
uint32_t hash_value_index_count_num, int embedding_vec_size, float lr,
const MomentumSGDOptHyperParams momentum, TypeEmbeddingComp *momentum_ptr,
const TypeKey *sample_id, const size_t *hash_value_index_sort,
const uint32_t *hash_value_index_count_offset, const TypeEmbeddingComp *wgrad, float scaler) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (tid < embedding_vec_size && bid < hash_value_index_count_num) {
uint32_t offset = hash_value_index_count_offset[bid];
float gi = accumulate_gradients(embedding_vec_size, sample_id, hash_value_index_count_offset,
wgrad, scaler, offset, bid, tid);
size_t row_index = hash_value_index_sort[offset];
size_t feature_index = row_index * embedding_vec_size + tid;
float mo = TypeConvertFunc<float, TypeEmbeddingComp>::convert(momentum_ptr[feature_index]) -
lr * gi / momentum.factor;
momentum_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mo);
}
}
// Second step of the global update with Momentum SGD: update the momentum and the weights for all
// the features
template <typename TypeEmbeddingComp>
__global__ void momentum_sgd_update_kernel_global(int embedding_vec_size,
size_t table_size, // vocabulary size / factor
const MomentumSGDOptHyperParams momentum,
TypeEmbeddingComp *momentum_ptr,
float *hash_table_value) {
const int TILE_SIZE = blockDim.x * gridDim.x;
for (size_t feature_index = blockIdx.x * blockDim.x + threadIdx.x;
feature_index < table_size * embedding_vec_size; feature_index += TILE_SIZE) {
float mo = TypeConvertFunc<float, TypeEmbeddingComp>::convert(momentum_ptr[feature_index]);
mo *= momentum.factor;
hash_table_value[feature_index] += mo;
momentum_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mo);
}
}
// First step of the global update with Nesterov: update momentum and weights for all the features
template <typename TypeEmbeddingComp>
__global__ void nesterov_global_update_kernel_global(int embedding_vec_size,
size_t table_size, // vocabulary size / factor
const NesterovOptHyperParams nesterov,
TypeEmbeddingComp *accm_ptr,
float *hash_table_value) {
const int TILE_SIZE = blockDim.x * gridDim.x;
for (size_t feature_index = blockIdx.x * blockDim.x + threadIdx.x;
feature_index < table_size * embedding_vec_size; feature_index += TILE_SIZE) {
float accm = TypeConvertFunc<float, TypeEmbeddingComp>::convert(accm_ptr[feature_index]);
accm *= nesterov.mu;
accm_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(accm);
hash_table_value[feature_index] += accm * nesterov.mu;
}
}
// Second step of the global update with Nesterov: compute gradient, add the corresponding term
// to the momentum and update the weights
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void nesterov_local_update_kernel_global(
uint32_t hash_value_index_count_num, int embedding_vec_size, float lr,
const NesterovOptHyperParams nesterov, TypeEmbeddingComp *accm_ptr, const TypeKey *sample_id,
const size_t *hash_value_index_sort, const uint32_t *hash_value_index_count_offset,
const TypeEmbeddingComp *wgrad, float *hash_table_value, float scaler) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (tid < embedding_vec_size && bid < hash_value_index_count_num) {
uint32_t offset = hash_value_index_count_offset[bid];
float gi = accumulate_gradients(embedding_vec_size, sample_id, hash_value_index_count_offset,
wgrad, scaler, offset, bid, tid);
size_t row_index = hash_value_index_sort[offset];
size_t feature_index = row_index * embedding_vec_size + tid;
float accm = TypeConvertFunc<float, TypeEmbeddingComp>::convert(accm_ptr[feature_index]);
accm -= lr * gi;
accm_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(accm);
hash_table_value[feature_index] -= (1 + nesterov.mu) * (lr * gi);
}
}
// Local update for the Adam optimizer: compute the gradients and update the accumulators and the
// weights
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void opt_adam_kernel(uint32_t hash_value_index_count_num, int embedding_vec_size,
const AdamOptHyperParams adam, TypeEmbeddingComp *m_ptr,
TypeEmbeddingComp *v_ptr, float alpha_t, const TypeKey *sample_id,
const size_t *hash_value_index_sort,
const uint32_t *hash_value_index_count_offset,
const TypeEmbeddingComp *wgrad, float *hash_table_value,
float scaler) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (tid < embedding_vec_size && bid < hash_value_index_count_num) {
uint32_t offset = hash_value_index_count_offset[bid];
float gi = accumulate_gradients(embedding_vec_size, sample_id, hash_value_index_count_offset,
wgrad, scaler, offset, bid, tid);
size_t row_index = hash_value_index_sort[offset];
size_t feature_index = row_index * embedding_vec_size + tid;
float mi =
adam.beta1 * TypeConvertFunc<float, TypeEmbeddingComp>::convert(m_ptr[feature_index]) +
(1.0f - adam.beta1) * gi;
float vi =
adam.beta2 * TypeConvertFunc<float, TypeEmbeddingComp>::convert(v_ptr[feature_index]) +
(1.0f - adam.beta2) * gi * gi;
m_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mi);
v_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(vi);
float weight_diff = -alpha_t * mi / (sqrtf(vi) + adam.epsilon);
hash_table_value[feature_index] += weight_diff;
}
}
// Local update for the Adagrad optimizer: compute the gradients and update the accumulators and the
// weights
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void opt_adagrad_kernel(uint32_t hash_value_index_count_num, int embedding_vec_size,
float lr, const AdaGradParams adagrad,
TypeEmbeddingComp *accum_ptr, const TypeKey *sample_id,
const size_t *hash_value_index_sort,
const uint32_t *hash_value_index_count_offset,
const TypeEmbeddingComp *wgrad, float *hash_table_value,
float scaler) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (tid < embedding_vec_size && bid < hash_value_index_count_num) {
uint32_t offset = hash_value_index_count_offset[bid];
float gi = accumulate_gradients(embedding_vec_size, sample_id, hash_value_index_count_offset,
wgrad, scaler, offset, bid, tid);
size_t row_index = hash_value_index_sort[offset];
size_t feature_index = row_index * embedding_vec_size + tid;
float accum =
TypeConvertFunc<float, TypeEmbeddingComp>::convert(accum_ptr[feature_index]) + gi * gi;
accum_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(accum);
float weight_diff = -lr * gi / (sqrtf(accum) + adagrad.epsilon);
hash_table_value[feature_index] += weight_diff;
}
}
// Local update for Momentum SGD: compute the gradients and update the momentum and the weights
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void opt_momentum_sgd_kernel(uint32_t hash_value_index_count_num, int embedding_vec_size,
float lr, const MomentumSGDOptHyperParams momentum,
TypeEmbeddingComp *momentum_ptr, const TypeKey *sample_id,
const size_t *hash_value_index_sort,
const uint32_t *hash_value_index_count_offset,
const TypeEmbeddingComp *wgrad, float *hash_table_value,
float scaler) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (tid < embedding_vec_size && bid < hash_value_index_count_num) {
uint32_t offset = hash_value_index_count_offset[bid];
float gi = accumulate_gradients(embedding_vec_size, sample_id, hash_value_index_count_offset,
wgrad, scaler, offset, bid, tid);
size_t row_index = hash_value_index_sort[offset];
size_t feature_index = row_index * embedding_vec_size + tid;
float mo = momentum.factor *
TypeConvertFunc<float, TypeEmbeddingComp>::convert(momentum_ptr[feature_index]) -
lr * gi;
momentum_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mo);
hash_table_value[feature_index] += mo;
}
}
// Local update for Nesterov: compute the gradients and update the accumulators and the weights
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void opt_nesterov_kernel(uint32_t hash_value_index_count_num, int embedding_vec_size,
float lr, const NesterovOptHyperParams nesterov,
TypeEmbeddingComp *accm_ptr, const TypeKey *sample_id,
const size_t *hash_value_index_sort,
const uint32_t *hash_value_index_count_offset,
const TypeEmbeddingComp *wgrad, float *hash_table_value,
float scaler) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (tid < embedding_vec_size && bid < hash_value_index_count_num) {
uint32_t offset = hash_value_index_count_offset[bid];
float gi = accumulate_gradients(embedding_vec_size, sample_id, hash_value_index_count_offset,
wgrad, scaler, offset, bid, tid);
size_t row_index = hash_value_index_sort[offset];
size_t feature_index = row_index * embedding_vec_size + tid;
float accm_old = TypeConvertFunc<float, TypeEmbeddingComp>::convert(accm_ptr[feature_index]);
float accm_new = nesterov.mu * accm_old - lr * gi;
accm_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(accm_new);
float weight_diff = -nesterov.mu * accm_old + (1.0f + nesterov.mu) * accm_new;
hash_table_value[feature_index] += weight_diff;
}
}
// Local update for SGD: compute the gradients and update the weights
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void opt_sgd_kernel(uint32_t hash_value_index_count_num, int embedding_vec_size,
float lr, const TypeKey *sample_id,
const size_t *hash_value_index_sort,
const uint32_t *hash_value_index_count_offset,
const TypeEmbeddingComp *wgrad, float *hash_table_value,
float scaler) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (tid < embedding_vec_size && bid < hash_value_index_count_num) {
uint32_t offset = hash_value_index_count_offset[bid];
float gi = accumulate_gradients(embedding_vec_size, sample_id, hash_value_index_count_offset,
wgrad, scaler, offset, bid, tid);
size_t row_index = hash_value_index_sort[offset];
float weight_diff = -lr * gi;
size_t feature_index = row_index * embedding_vec_size + tid;
hash_table_value[feature_index] += weight_diff;
}
}
// Lazy global update for the Adam optimizer: compute the gradients and update the weights and the
// accumulators (local approximation of the global update)
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void opt_adam_kernel_lazy(uint32_t hash_value_index_count_num, int embedding_vec_size,
const AdamOptHyperParams adam, uint64_t *prev_time_ptr,
TypeEmbeddingComp *m_ptr, TypeEmbeddingComp *v_ptr,
float alpha_t_common, uint64_t times, const TypeKey *sample_id,
const size_t *hash_value_index_sort,
const uint32_t *hash_value_index_count_offset,
const TypeEmbeddingComp *wgrad, float *hash_table_value,
float scaler) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (tid < embedding_vec_size && bid < hash_value_index_count_num) {
uint32_t offset = hash_value_index_count_offset[bid];
float gi = accumulate_gradients(embedding_vec_size, sample_id, hash_value_index_count_offset,
wgrad, scaler, offset, bid, tid);
size_t row_index = hash_value_index_sort[offset];
size_t feature_index = row_index * embedding_vec_size + tid;
// First update the weights
uint64_t prev_time = prev_time_ptr[feature_index];
prev_time_ptr[feature_index] = times;
uint64_t skipped = times - prev_time;
float beta1_pow_skipped = powf(adam.beta1, skipped);
float alpha_t = alpha_t_common * sqrtf(1.0f - powf(adam.beta2, prev_time)) /
(1.0f - powf(adam.beta1, prev_time)) * (1.0f - beta1_pow_skipped);
float mi = TypeConvertFunc<float, TypeEmbeddingComp>::convert(m_ptr[feature_index]);
float vi = TypeConvertFunc<float, TypeEmbeddingComp>::convert(v_ptr[feature_index]);
float weight_diff = -alpha_t * mi / (sqrtf(vi) + adam.epsilon);
hash_table_value[feature_index] += weight_diff;
// Then update the moving-average accumulators
mi = beta1_pow_skipped * mi + (1.0f - adam.beta1) * gi;
vi = powf(adam.beta2, skipped) * vi + (1.0f - adam.beta2) * gi * gi;
m_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(mi);
v_ptr[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(vi);
}
}
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void opt_sgd_atomic_kernel(int nnz, int embedding_vec_size, float lr_scale,
const size_t *hash_value_index, const TypeKey *sample_ids,
const TypeEmbeddingComp *wgrad, float *hash_table_value) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (tid < embedding_vec_size && bid < nnz) {
for (int key_id = bid; key_id < nnz; key_id += gridDim.x) {
int sample_id = sample_ids[key_id];
float deltaw = -lr_scale * TypeConvertFunc<float, TypeEmbeddingComp>::convert(
wgrad[sample_id * embedding_vec_size + tid]);
// atomic update
size_t value_index = hash_value_index[key_id];
size_t feature_index = value_index * embedding_vec_size + tid;
atomicAdd(&hash_table_value[feature_index], deltaw);
}
}
}
// only support LocalizedSlotSparseEmbeddingOneHot
template <typename TypeEmbeddingComp>
__global__ void opt_sgd_atomic_kernel(int nnz, int embedding_vec_size, float lr_scale,
const size_t *hash_value_index,
const TypeEmbeddingComp *wgrad, float *hash_table_value) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (tid < embedding_vec_size && bid < nnz) {
for (int key_id = bid; key_id < nnz; key_id += gridDim.x) {
// for one-hot, the max_feature_per_slot is 1, so sample_id is equal to key_id
float deltaw = -lr_scale * TypeConvertFunc<float, TypeEmbeddingComp>::convert(
wgrad[key_id * embedding_vec_size + tid]);
// atomic update
size_t value_index = hash_value_index[key_id];
size_t feature_index = value_index * embedding_vec_size + tid;
atomicAdd(&hash_table_value[feature_index], deltaw);
}
}
}
} // namespace
template <typename TypeHashKey, typename TypeEmbeddingComp>
void EmbeddingOptimizer<TypeHashKey, TypeEmbeddingComp>::update(
size_t batch_size, size_t slot_num, size_t embedding_vec_size,
size_t max_vocabulary_size_per_gpu, size_t nnz, const Tensor2<TypeHashKey> &row_offset,
Tensor2<size_t> &hash_value_index, const Tensor2<TypeEmbeddingComp> &wgrad,
Tensor2<float> &hash_table_value, size_t sm_count, cudaStream_t stream) {
OptimizerTensor<TypeEmbeddingComp> &opt_tensor = opt_tensors_;
OptParams &opt_params = param.opt_params;
Tensor2<TypeHashKey> &sample_id = sample_id_tensors_;
Tensor2<TypeHashKey> &sample_id_sort = sample_id_sort_tensors_;
Tensor2<size_t> &hash_value_index_sort = hash_value_index_sort_tensors_;
Tensor2<uint32_t> &hash_value_index_count_offset = hash_value_index_count_offset_tensors_;
Tensor2<uint32_t> &new_hash_value_flag = new_hash_value_flag_tensors_;
Tensor2<uint32_t> &hash_value_flag_sumed = hash_value_flag_sumed_tensors_;
Tensor2<uint32_t> &hash_value_index_count_counter = hash_value_index_count_counter_tensors_;
Tensor2<void> &temp_storage_sort = temp_storage_sort_tensors_;
Tensor2<void> &temp_storage_scan = temp_storage_scan_tensors_;
if (slot_num == 0) {
return;
}
size_t block_size, grid_size;
try {
// step1: expand sample IDs
block_size = 64;
grid_size = (batch_size * slot_num - 1) / block_size + 1;
sample_id_expand_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size, slot_num, row_offset.get_ptr(), sample_id.get_ptr());
if (opt_params.optimizer == Optimizer_t::SGD &&
opt_params.hyperparams.sgd.atomic_update) { // for SGD, do atomic update
const size_t block_size = embedding_vec_size;
const size_t grid_size = min(max(1ul, nnz), sm_count * 32);
float lr_scale = opt_params.lr / opt_params.scaler;
opt_sgd_atomic_kernel<<<grid_size, block_size, 0, stream>>>(
nnz, embedding_vec_size, lr_scale, hash_value_index.get_ptr(), sample_id.get_ptr(),
wgrad.get_ptr(), hash_table_value.get_ptr());
} else {
// step3: sort by hash_value_index
int end_bit = static_cast<int>(log2(static_cast<float>(max_vocabulary_size_per_gpu))) + 1;
size_t temp_storage_sort_size = temp_storage_sort.get_size_in_bytes();
HCTR_LIB_THROW(cub::DeviceRadixSort::SortPairs(
temp_storage_sort.get_ptr(), temp_storage_sort_size, hash_value_index.get_ptr(),
hash_value_index_sort.get_ptr(), sample_id.get_ptr(), sample_id_sort.get_ptr(), nnz, 0,
end_bit, stream, false));
// step4: count the number for each unduplicated hash_value_index
HCTR_LIB_THROW(
cudaMemsetAsync(hash_value_index_count_counter.get_ptr(), 0, sizeof(uint32_t), stream));
constexpr size_t max_grid_size = 384;
block_size = 256;
grid_size = min(max_grid_size, (nnz - 1) / block_size + 1);
value_count_kernel_1<<<grid_size, block_size, 0, stream>>>(
nnz, hash_value_index_sort.get_ptr(), new_hash_value_flag.get_ptr());
// prefix_sum
size_t temp_storage_scan_size = temp_storage_scan.get_size_in_bytes();
HCTR_LIB_THROW(cub::DeviceScan::InclusiveSum(
temp_storage_scan.get_ptr(), temp_storage_scan_size, new_hash_value_flag.get_ptr(),
hash_value_flag_sumed.get_ptr(), nnz, stream));
value_count_kernel_2<<<grid_size, block_size, 0, stream>>>(
nnz, new_hash_value_flag.get_ptr(), hash_value_flag_sumed.get_ptr(),
hash_value_index_count_offset.get_ptr(), hash_value_index_count_counter.get_ptr());
uint32_t hash_hash_value_index_count_num = 0;
// this async memcpy will not perform as a async operation because the host memory is not
// a pinned memroy
HCTR_LIB_THROW(cudaMemcpyAsync(&hash_hash_value_index_count_num,
hash_value_index_count_counter.get_ptr(), sizeof(uint32_t),
cudaMemcpyDeviceToHost, stream));
// step5: use optimizer method to compute deltaw and update the parameters
block_size = embedding_vec_size;
grid_size = max(1, hash_hash_value_index_count_num);
switch (opt_params.update_type) {
case Update_t::Global: {
switch (opt_params.optimizer) {
case Optimizer_t::Adam: {
float alpha_t =
opt_params.lr *
sqrt(1 -
pow(opt_params.hyperparams.adam.beta2, opt_params.hyperparams.adam.times)) /
(1 - pow(opt_params.hyperparams.adam.beta1, opt_params.hyperparams.adam.times));
// update target mi and vi
opt_adam_kernel_global<<<grid_size, block_size, 0, stream>>>(
hash_hash_value_index_count_num, embedding_vec_size, opt_params.hyperparams.adam,
opt_tensor.opt_m_tensors_.get_ptr(), opt_tensor.opt_v_tensors_.get_ptr(),
sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(),
hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), opt_params.scaler);
// all update according to the mi vi
adam_update_kernel_global<<<1024, 256, 0, stream>>>(
embedding_vec_size, max_vocabulary_size_per_gpu, opt_params.hyperparams.adam,
opt_tensor.opt_m_tensors_.get_ptr(), opt_tensor.opt_v_tensors_.get_ptr(), alpha_t,
hash_table_value.get_ptr());
break;
}
case Optimizer_t::AdaGrad: {
opt_adagrad_kernel<<<grid_size, block_size, 0, stream>>>(
hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr,
opt_params.hyperparams.adagrad, opt_tensor.opt_accm_tensors_.get_ptr(),
sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(),
hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(),
hash_table_value.get_ptr(), opt_params.scaler);
break;
}
case Optimizer_t::MomentumSGD:
opt_momentum_sgd_kernel_global<<<grid_size, block_size, 0, stream>>>(
hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr,
opt_params.hyperparams.momentum, opt_tensor.opt_momentum_tensors_.get_ptr(),
sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(),
hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(), opt_params.scaler);
momentum_sgd_update_kernel_global<<<1024, 256, 0, stream>>>(
embedding_vec_size, max_vocabulary_size_per_gpu, opt_params.hyperparams.momentum,
opt_tensor.opt_momentum_tensors_.get_ptr(), hash_table_value.get_ptr());
break;
case Optimizer_t::Nesterov:
nesterov_global_update_kernel_global<<<1024, 256, 0, stream>>>(
embedding_vec_size, max_vocabulary_size_per_gpu, opt_params.hyperparams.nesterov,
opt_tensor.opt_accm_tensors_.get_ptr(), hash_table_value.get_ptr());
nesterov_local_update_kernel_global<<<grid_size, block_size, 0, stream>>>(
hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr,
opt_params.hyperparams.nesterov, opt_tensor.opt_accm_tensors_.get_ptr(),
sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(),
hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(),
hash_table_value.get_ptr(), opt_params.scaler);
break;
case Optimizer_t::SGD:
// Note: this is in fact a local update
/// TODO: remove duplicate?
opt_sgd_kernel<<<grid_size, block_size, 0, stream>>>(
hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr,
sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(),
hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(),
hash_table_value.get_ptr(), opt_params.scaler);
break;
default:
HCTR_OWN_THROW(Error_t::WrongInput, "Error: Invalid opitimizer type");
} // switch (optimizer)
break;
}
case Update_t::Local: {
switch (opt_params.optimizer) {
case Optimizer_t::Adam: {
float alpha_t =
opt_params.lr *
sqrt(1 -
pow(opt_params.hyperparams.adam.beta2, opt_params.hyperparams.adam.times)) /
(1 - pow(opt_params.hyperparams.adam.beta1, opt_params.hyperparams.adam.times));
opt_adam_kernel<<<grid_size, block_size, 0, stream>>>(
hash_hash_value_index_count_num, embedding_vec_size, opt_params.hyperparams.adam,
opt_tensor.opt_m_tensors_.get_ptr(), opt_tensor.opt_v_tensors_.get_ptr(), alpha_t,
sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(),
hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(),
hash_table_value.get_ptr(), opt_params.scaler);
break;
}
case Optimizer_t::AdaGrad: {
opt_adagrad_kernel<<<grid_size, block_size, 0, stream>>>(
hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr,
opt_params.hyperparams.adagrad, opt_tensor.opt_accm_tensors_.get_ptr(),
sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(),
hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(),
hash_table_value.get_ptr(), opt_params.scaler);
break;
}
case Optimizer_t::MomentumSGD:
opt_momentum_sgd_kernel<<<grid_size, block_size, 0, stream>>>(
hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr,
opt_params.hyperparams.momentum, opt_tensor.opt_momentum_tensors_.get_ptr(),
sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(),
hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(),
hash_table_value.get_ptr(), opt_params.scaler);
break;
case Optimizer_t::Nesterov:
opt_nesterov_kernel<<<grid_size, block_size, 0, stream>>>(
hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr,
opt_params.hyperparams.nesterov, opt_tensor.opt_accm_tensors_.get_ptr(),
sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(),
hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(),
hash_table_value.get_ptr(), opt_params.scaler);
break;
case Optimizer_t::SGD:
opt_sgd_kernel<<<grid_size, block_size, 0, stream>>>(
hash_hash_value_index_count_num, embedding_vec_size, opt_params.lr,
sample_id_sort.get_ptr(), hash_value_index_sort.get_ptr(),
hash_value_index_count_offset.get_ptr(), wgrad.get_ptr(),
hash_table_value.get_ptr(), opt_params.scaler);
break;
default:
HCTR_OWN_THROW(Error_t::WrongInput, "Error: Invalid opitimizer type");
} // switch (optimizer)
break;
}
case Update_t::LazyGlobal: {
switch (opt_params.optimizer) {
case Optimizer_t::Adam: {
const float alpha_t_common =
opt_params.lr / (1.0f - opt_params.hyperparams.adam.beta1);
opt_adam_kernel_lazy<<<grid_size, block_size, 0, stream>>>(
hash_hash_value_index_count_num, embedding_vec_size, opt_params.hyperparams.adam,
opt_tensor.opt_prev_time_tensors_.get_ptr(), opt_tensor.opt_m_tensors_.get_ptr(),
opt_tensor.opt_v_tensors_.get_ptr(), alpha_t_common,
opt_params.hyperparams.adam.times, sample_id_sort.get_ptr(),
hash_value_index_sort.get_ptr(), hash_value_index_count_offset.get_ptr(),
wgrad.get_ptr(), hash_table_value.get_ptr(), opt_params.scaler);
break;
}
case Optimizer_t::AdaGrad:
case Optimizer_t::MomentumSGD:
case Optimizer_t::Nesterov:
case Optimizer_t::SGD: {
/// TODO: implement lazy global update for other optimizer types
HCTR_OWN_THROW(Error_t::WrongInput,
"Error: lazy global update is only implemented for Adam");
break;
}
default:
HCTR_OWN_THROW(Error_t::WrongInput, "Error: Invalid opitimizer type");
}
break;
}
default:
HCTR_OWN_THROW(Error_t::WrongInput, "Error: Invalid update type");
} // switch (update type)
}
#ifndef NDEBUG
cudaDeviceSynchronize();
HCTR_LIB_THROW(cudaGetLastError());
#endif
} catch (const std::runtime_error &rt_err) {
HCTR_LOG_S(ERROR, WORLD) << rt_err.what() << std::endl;
throw;
}
return;
}
template class EmbeddingOptimizer<unsigned int, float>;
template class EmbeddingOptimizer<long long, float>;
template class EmbeddingOptimizer<unsigned int, __half>;
template class EmbeddingOptimizer<long long, __half>;
} // namespace HugeCTR | the_stack |
#include <cusp/coo_matrix.h>
#include <cusp/csr_matrix.h>
#include <cusp/dia_matrix.h>
#include <cusp/ell_matrix.h>
#include <cusp/hyb_matrix.h>
#include <cusp/multiply.h>
template <typename MemorySpace>
void TestCooMatrixView(void)
{
typedef int IndexType;
typedef float ValueType;
typedef typename cusp::coo_matrix<IndexType,ValueType,MemorySpace> Matrix;
typedef typename cusp::array1d<IndexType,MemorySpace>::iterator IndexIterator;
typedef typename cusp::array1d<ValueType,MemorySpace>::iterator ValueIterator;
typedef typename cusp::array1d_view<IndexIterator> IndexView;
typedef typename cusp::array1d_view<ValueIterator> ValueView;
typedef typename cusp::coo_matrix_view<IndexView,IndexView,ValueView> View;
Matrix M(3, 2, 6);
View V(3, 2, 6,
cusp::make_array1d_view(M.row_indices.begin(), M.row_indices.end()),
cusp::make_array1d_view(M.column_indices.begin(), M.column_indices.end()),
cusp::make_array1d_view(M.values.begin(), M.values.end()));
ASSERT_EQUAL(V.num_rows, 3);
ASSERT_EQUAL(V.num_cols, 2);
ASSERT_EQUAL(V.num_entries, 6);
ASSERT_EQUAL_QUIET(V.row_indices.begin(), M.row_indices.begin());
ASSERT_EQUAL_QUIET(V.row_indices.end(), M.row_indices.end());
ASSERT_EQUAL_QUIET(V.column_indices.begin(), M.column_indices.begin());
ASSERT_EQUAL_QUIET(V.column_indices.end(), M.column_indices.end());
ASSERT_EQUAL_QUIET(V.values.begin(), M.values.begin());
ASSERT_EQUAL_QUIET(V.values.end(), M.values.end());
View W(M);
ASSERT_EQUAL(W.num_rows, 3);
ASSERT_EQUAL(W.num_cols, 2);
ASSERT_EQUAL(W.num_entries, 6);
ASSERT_EQUAL_QUIET(W.row_indices.begin(), M.row_indices.begin());
ASSERT_EQUAL_QUIET(W.row_indices.end(), M.row_indices.end());
ASSERT_EQUAL_QUIET(W.column_indices.begin(), M.column_indices.begin());
ASSERT_EQUAL_QUIET(W.column_indices.end(), M.column_indices.end());
ASSERT_EQUAL_QUIET(W.values.begin(), M.values.begin());
ASSERT_EQUAL_QUIET(W.values.end(), M.values.end());
}
DECLARE_HOST_DEVICE_UNITTEST(TestCooMatrixView);
template <typename MemorySpace>
void TestCooMatrixViewAssignment(void)
{
typedef int IndexType;
typedef float ValueType;
typedef typename cusp::coo_matrix<IndexType,ValueType,MemorySpace> Matrix;
typedef typename cusp::array1d<IndexType,MemorySpace>::iterator IndexIterator;
typedef typename cusp::array1d<ValueType,MemorySpace>::iterator ValueIterator;
typedef typename cusp::array1d_view<IndexIterator> IndexView;
typedef typename cusp::array1d_view<ValueIterator> ValueView;
typedef typename cusp::coo_matrix_view<IndexView,IndexView,ValueView> View;
Matrix M(3, 2, 6);
View V = M;
ASSERT_EQUAL(V.num_rows, 3);
ASSERT_EQUAL(V.num_cols, 2);
ASSERT_EQUAL(V.num_entries, 6);
ASSERT_EQUAL_QUIET(V.row_indices.begin(), M.row_indices.begin());
ASSERT_EQUAL_QUIET(V.row_indices.end(), M.row_indices.end());
ASSERT_EQUAL_QUIET(V.column_indices.begin(), M.column_indices.begin());
ASSERT_EQUAL_QUIET(V.column_indices.end(), M.column_indices.end());
ASSERT_EQUAL_QUIET(V.values.begin(), M.values.begin());
ASSERT_EQUAL_QUIET(V.values.end(), M.values.end());
View W = V;
ASSERT_EQUAL(W.num_rows, 3);
ASSERT_EQUAL(W.num_cols, 2);
ASSERT_EQUAL(W.num_entries, 6);
ASSERT_EQUAL_QUIET(W.row_indices.begin(), M.row_indices.begin());
ASSERT_EQUAL_QUIET(W.row_indices.end(), M.row_indices.end());
ASSERT_EQUAL_QUIET(W.column_indices.begin(), M.column_indices.begin());
ASSERT_EQUAL_QUIET(W.column_indices.end(), M.column_indices.end());
ASSERT_EQUAL_QUIET(W.values.begin(), M.values.begin());
ASSERT_EQUAL_QUIET(W.values.end(), M.values.end());
}
DECLARE_HOST_DEVICE_UNITTEST(TestCooMatrixViewAssignment);
template <typename MemorySpace>
void TestMakeCooMatrixView(void)
{
typedef int IndexType;
typedef float ValueType;
typedef typename cusp::coo_matrix<IndexType,ValueType,MemorySpace> Matrix;
typedef typename cusp::array1d<IndexType,MemorySpace>::iterator IndexIterator;
typedef typename cusp::array1d<ValueType,MemorySpace>::iterator ValueIterator;
typedef typename cusp::array1d_view<IndexIterator> IndexView;
typedef typename cusp::array1d_view<ValueIterator> ValueView;
typedef typename cusp::coo_matrix_view<IndexView,IndexView,ValueView> View;
// construct view from parts
{
Matrix M(3, 2, 6);
View V =
cusp::make_coo_matrix_view(3, 2, 6,
cusp::make_array1d_view(M.row_indices),
cusp::make_array1d_view(M.column_indices),
cusp::make_array1d_view(M.values));
ASSERT_EQUAL(V.num_rows, 3);
ASSERT_EQUAL(V.num_cols, 2);
ASSERT_EQUAL(V.num_entries, 6);
V.row_indices[0] = 0;
V.column_indices[0] = 1;
V.values[0] = 2;
ASSERT_EQUAL_QUIET(V.row_indices.begin(), M.row_indices.begin());
ASSERT_EQUAL_QUIET(V.row_indices.end(), M.row_indices.end());
ASSERT_EQUAL_QUIET(V.column_indices.begin(), M.column_indices.begin());
ASSERT_EQUAL_QUIET(V.column_indices.end(), M.column_indices.end());
ASSERT_EQUAL_QUIET(V.values.begin(), M.values.begin());
ASSERT_EQUAL_QUIET(V.values.end(), M.values.end());
}
// construct view from matrix
{
Matrix M(3, 2, 6);
View V = cusp::make_coo_matrix_view(M);
ASSERT_EQUAL(V.num_rows, 3);
ASSERT_EQUAL(V.num_cols, 2);
ASSERT_EQUAL(V.num_entries, 6);
V.row_indices[0] = 0;
V.column_indices[0] = 1;
V.values[0] = 2;
ASSERT_EQUAL_QUIET(V.row_indices.begin(), M.row_indices.begin());
ASSERT_EQUAL_QUIET(V.row_indices.end(), M.row_indices.end());
ASSERT_EQUAL_QUIET(V.column_indices.begin(), M.column_indices.begin());
ASSERT_EQUAL_QUIET(V.column_indices.end(), M.column_indices.end());
ASSERT_EQUAL_QUIET(V.values.begin(), M.values.begin());
ASSERT_EQUAL_QUIET(V.values.end(), M.values.end());
}
// construct view from view
{
Matrix M(3, 2, 6);
View X = cusp::make_coo_matrix_view(M);
View V = cusp::make_coo_matrix_view(X);
ASSERT_EQUAL(V.num_rows, 3);
ASSERT_EQUAL(V.num_cols, 2);
ASSERT_EQUAL(V.num_entries, 6);
V.row_indices[0] = 0;
V.column_indices[0] = 1;
V.values[0] = 2;
ASSERT_EQUAL_QUIET(V.row_indices.begin(), M.row_indices.begin());
ASSERT_EQUAL_QUIET(V.row_indices.end(), M.row_indices.end());
ASSERT_EQUAL_QUIET(V.column_indices.begin(), M.column_indices.begin());
ASSERT_EQUAL_QUIET(V.column_indices.end(), M.column_indices.end());
ASSERT_EQUAL_QUIET(V.values.begin(), M.values.begin());
ASSERT_EQUAL_QUIET(V.values.end(), M.values.end());
}
// construct view from const matrix
{
const Matrix M(3, 2, 6);
ASSERT_EQUAL(cusp::make_coo_matrix_view(M).num_rows, 3);
ASSERT_EQUAL(cusp::make_coo_matrix_view(M).num_cols, 2);
ASSERT_EQUAL(cusp::make_coo_matrix_view(M).num_entries, 6);
ASSERT_EQUAL_QUIET(cusp::make_coo_matrix_view(M).row_indices.begin(), M.row_indices.begin());
ASSERT_EQUAL_QUIET(cusp::make_coo_matrix_view(M).row_indices.end(), M.row_indices.end());
ASSERT_EQUAL_QUIET(cusp::make_coo_matrix_view(M).column_indices.begin(), M.column_indices.begin());
ASSERT_EQUAL_QUIET(cusp::make_coo_matrix_view(M).column_indices.end(), M.column_indices.end());
ASSERT_EQUAL_QUIET(cusp::make_coo_matrix_view(M).values.begin(), M.values.begin());
ASSERT_EQUAL_QUIET(cusp::make_coo_matrix_view(M).values.end(), M.values.end());
}
}
DECLARE_HOST_DEVICE_UNITTEST(TestMakeCooMatrixView);
template <typename TestMatrix>
void TestToCooMatrixView(void)
{
typedef typename TestMatrix::index_type IndexType;
typedef typename TestMatrix::value_type ValueType;
typedef typename TestMatrix::coo_view_type View;
cusp::coo_matrix<IndexType,ValueType,cusp::host_memory> A(3, 2, 6);
A.row_indices[0] = 0;
A.column_indices[0] = 0;
A.values[0] = 1;
A.row_indices[1] = 0;
A.column_indices[1] = 1;
A.values[1] = 2;
A.row_indices[2] = 1;
A.column_indices[2] = 0;
A.values[2] = 3;
A.row_indices[3] = 1;
A.column_indices[3] = 1;
A.values[3] = 4;
A.row_indices[4] = 2;
A.column_indices[4] = 0;
A.values[4] = 5;
A.row_indices[5] = 2;
A.column_indices[5] = 1;
A.values[5] = 6;
TestMatrix M(A);
View V(M);
ASSERT_EQUAL(V.num_rows, 3);
ASSERT_EQUAL(V.num_cols, 2);
ASSERT_EQUAL(V.num_entries, 6);
ASSERT_EQUAL(V.row_indices.size(), 6);
ASSERT_EQUAL(V.column_indices.size(), 6);
ASSERT_EQUAL(V.values.size(), 6);
cusp::array1d<IndexType,cusp::host_memory> row_indices(V.row_indices);
cusp::array1d<IndexType,cusp::host_memory> column_indices(V.column_indices);
cusp::array1d<ValueType,cusp::host_memory> values(V.values);
ASSERT_EQUAL(row_indices, A.row_indices);
ASSERT_EQUAL(column_indices, A.column_indices);
ASSERT_EQUAL(values, A.values);
}
DECLARE_SPARSE_MATRIX_UNITTEST(TestToCooMatrixView);
template <typename MemorySpace>
void TestCooToCooMatrixView(void)
{
typedef int IndexType;
typedef float ValueType;
typedef cusp::coo_matrix<IndexType,ValueType,MemorySpace> TestMatrix;
typedef typename TestMatrix::coo_view_type View;
cusp::coo_matrix<IndexType,ValueType,cusp::host_memory> A(3, 2, 6);
A.row_indices[0] = 0;
A.column_indices[0] = 0;
A.values[0] = 1;
A.row_indices[1] = 0;
A.column_indices[1] = 1;
A.values[1] = 2;
A.row_indices[2] = 1;
A.column_indices[2] = 0;
A.values[2] = 3;
A.row_indices[3] = 1;
A.column_indices[3] = 1;
A.values[3] = 4;
A.row_indices[4] = 2;
A.column_indices[4] = 0;
A.values[4] = 5;
A.row_indices[5] = 2;
A.column_indices[5] = 1;
A.values[5] = 6;
TestMatrix M(A);
View V(M);
V.row_indices[0] = -1;
V.column_indices[0] = -1;
V.values[0] = -1;
ASSERT_EQUAL(M.row_indices[0], -1);
ASSERT_EQUAL(M.column_indices[0], -1);
ASSERT_EQUAL(M.values[0], -1);
}
DECLARE_HOST_DEVICE_UNITTEST(TestCooToCooMatrixView); | the_stack |
#include <thrust/count.h> //count
#include <thrust/sort.h> //sort
#include <thrust/unique.h> //unique
#include <thrust/remove.h> //remove
#include <thrust/transform_scan.h> //transform_inclusive_scan
#include <cusp/detail/format_utils.h> //offsets_to_indices
#include <iostream>
#include <strided_reduction.h>
#include <aggregation/selectors/selector_kernels.h>
namespace amgx
{
namespace strided_reduction
{
template<int STRIDE, class scalar_t, class OP>
void count_block_results_pinned_memory(scalar_t *out_host, const int n_blocks, scalar_t *out_d, const OP &op = OP(), cudaStream_t stream = 0)
{
strided_reduction_collect_partials<scalar_t, STRIDE, 32, OP> <<< 1, 32, 0, stream>>>(out_host, out_d, n_blocks);
cudaCheckError();
}
template<class scalar_t, class OP>
scalar_t count_block_results_pinned_memory(const int a, const int i, const int n_blocks, scalar_t *out_d, const OP &op = OP(), cudaStream_t stream = 0) //STRIDE=1 case
{
static scalar_t *ret = 0;
static cudaEvent_t throttle_event = 0;
const int buffers = 1;
if (ret == 0)
{
thrust::global_thread_handle::cudaMallocHost((void **)&ret, buffers * sizeof(scalar_t));
ret[0] = 0;
cudaEventCreateWithFlags(&throttle_event, cudaEventDisableTiming);
}
int ib = i % buffers;
count_block_results_pinned_memory<1, scalar_t, OP>(ret + ib, n_blocks, out_d, op, stream);
if (ib == buffers - 1)
{
cudaEventRecord(throttle_event);
cudaEventSynchronize(throttle_event);
scalar_t tot = 0;
for (int j = 0; j < buffers; j++)
{
tot += ret[j];
}
return tot + buffers - 1;
}
else
{
return -1;
}
}
}
void analyze_coloring(device_vector_alloc<int> aggregates_d, device_vector_alloc<int> colors_d);
namespace aggregation
{
namespace size8_selector
{
// include common routines for all selectors
#include <aggregation/selectors/common_selector.h>
// ------------------------
// Kernels
// ------------------------
// findStrongestNeighbour kernel for block_dia_csr_matrix format
// Reads the weight from edge_weights array
template <typename IndexType>
__global__
void findStrongestNeighbourBlockDiaCsr_StoreWeight_2(const IndexType *row_offsets, const IndexType *column_indices,
const float *edge_weights, const IndexType num_block_rows, IndexType *aggregated, IndexType *aggregates, int *strongest_neighbour, IndexType *partner_index, float *weight_strongest_neighbour)
{
float weight;
int jcol, jmin, jmax;
int partner0, partner1, partner2;
int agg_jcol;
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_block_rows; tid += blockDim.x * gridDim.x)
{
float max_weight_unaggregated = 0.;
float max_weight_aggregated = 0.;
int strongest_unaggregated = -1;
int strongest_aggregated = -1;
if (aggregated[tid] == -1) // Unaggregated row
{
partner0 = partner_index[tid];
partner1 = partner_index[num_block_rows + tid];
partner2 = partner_index[2 * num_block_rows + tid];
jmin = row_offsets[tid];
jmax = row_offsets[tid + 1];
for (int j = jmin; j < jmax; j++)
{
jcol = column_indices[j];
if (jcol == tid || jcol >= num_block_rows) { continue; }
weight = edge_weights[j];
agg_jcol = aggregated[jcol];
if (jcol != partner0 && jcol != partner1 && jcol != partner2)
{
if (agg_jcol == -1 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (agg_jcol != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // unaggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
}
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // all neighbours are aggregated, store the strongest aggregated
{
weight_strongest_neighbour[tid] = -max_weight_aggregated;
strongest_neighbour[tid] = aggregates[strongest_aggregated];
}
else if (strongest_unaggregated != -1)
{
weight_strongest_neighbour[tid] = max_weight_unaggregated;
strongest_neighbour[tid] = aggregates[strongest_unaggregated];
}
}
}
}
// findStrongestNeighbour kernel for block_dia_csr_matrix format
// Reads the weight from edge_weights array
template <typename IndexType>
__global__
void agreeOnProposal_2(const IndexType *row_offsets, const IndexType *column_indices,
IndexType num_block_rows, IndexType *aggregated, int *strongest_neighbour, float *weight_strongest_neighbour, IndexType *partner_index, int *aggregates, int deterministic)
{
int partner[3];
float weight[3];
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_block_rows; tid += blockDim.x * gridDim.x)
{
int strongest_partner = -1;
float my_weight = 0.;
if (aggregated[tid] == -1)
{
my_weight = weight_strongest_neighbour[tid];
#pragma unroll
for (int m = 0; m < 3; m++)
{
partner[m] = partner_index[tid + m * num_block_rows];
}
#pragma unroll
for (int m = 0; m < 3; m++)
{
weight[m] = weight_strongest_neighbour[partner[m]];
}
#pragma unroll
for (int m = 0; m < 3; m++)
{
//if (weight[m] > my_weight && weight[m] > 0.) // there is a partner that has an unaggregated neighbour
if (weight[m] > my_weight)
{
if (weight[m] > 0.) // there is a partner that has an unaggregated neighbour
{
strongest_partner = m;
my_weight = weight[m];
}
}
else if (weight[m] < my_weight) // there is a partner without an unaggregated neighbour, whose neighbour is stronger than mine
{
if (my_weight < 0.)
{
strongest_partner = m;
my_weight = weight[m];
}
}
}
if (my_weight < 0.) // means all neighbours of vertices in aggregate are aggregated, merge to another aggregate
{
if (!deterministic)
{
aggregated[tid] = 1;
aggregates[tid] = strongest_partner != -1 ? strongest_neighbour[partner[strongest_partner]] : strongest_neighbour[tid];
}
}
else if (strongest_partner != -1) // store my partner's pick
{
strongest_neighbour[tid] = strongest_neighbour[partner[strongest_partner]];
}
}
}
}
template <typename IndexType>
__global__
void agreeOnProposal_2_deterministic(int *strongest_neighbour_out, const IndexType *row_offsets, const IndexType *column_indices,
IndexType num_block_rows, IndexType *aggregated, int *strongest_neighbour, float *weight_strongest_neighbour, IndexType *partner_index, int *aggregates, int deterministic)
{
int partner[3];
float weight[3];
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_block_rows; tid += blockDim.x * gridDim.x)
{
int strongest_partner = -1;
float my_weight = 0.;
//copy here to avoid redundant copies before launching the kernel
int new_strongest_neighbour_out = strongest_neighbour[tid];
if (aggregated[tid] == -1)
{
my_weight = weight_strongest_neighbour[tid];
#pragma unroll
for (int m = 0; m < 3; m++)
{
partner[m] = partner_index[tid + m * num_block_rows];
}
#pragma unroll
for (int m = 0; m < 3; m++)
{
weight[m] = weight_strongest_neighbour[partner[m]];
}
#pragma unroll
for (int m = 0; m < 3; m++)
{
if (weight[m] > my_weight)
{
if (weight[m] > 0.) // there is a partner that has an unaggregated neighbour
{
strongest_partner = m;
my_weight = weight[m];
}
}
else if (weight[m] < my_weight) // there is a partner without an unaggregated neighbour, whose neighbour is stronger than mine
{
if (my_weight < 0.)
{
strongest_partner = m;
my_weight = weight[m];
}
}
}
if (my_weight < 0.) // means all neighbours of vertices in aggregate are aggregated, merge to another aggregate
{
if (!deterministic)
{
aggregated[tid] = 1;
aggregates[tid] = strongest_partner != -1 ? strongest_neighbour[partner[strongest_partner]] : strongest_neighbour[tid];
}
}
else if (strongest_partner != -1) // store my partner's pick
{
new_strongest_neighbour_out = strongest_neighbour[partner[strongest_partner]];
}
}
//copy here to avoid redundant copies before launching the kernel
strongest_neighbour_out[tid] = new_strongest_neighbour_out;
}
}
// Kernel that checks if perfect matchs exist
template <typename IndexType>
__global__
void matchAggregatesSize4(IndexType *aggregates, IndexType *aggregated, IndexType *strongest_neighbour, IndexType *partner_index, const IndexType num_rows)
{
int potential_match, potential_match_neighbour, my_aggregate;
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_rows; tid += blockDim.x * gridDim.x)
{
if (aggregated[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
if (potential_match != -1)
{
potential_match_neighbour = strongest_neighbour[potential_match];
my_aggregate = aggregates[tid];
if (potential_match_neighbour == my_aggregate) // we have a match
{
aggregated[tid] = 1;
aggregates[tid] = ( potential_match > my_aggregate) ? my_aggregate : potential_match;
partner_index[tid + num_rows] = potential_match;
partner_index[tid + 2 * num_rows] = partner_index[potential_match];
}
}
}
}
}
template <typename IndexType>
__global__
void assignUnassignedVertices_2(IndexType *partner_index, const IndexType num_rows)
{
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; tid < num_rows; tid += blockDim.x * gridDim.x)
{
if (partner_index[num_rows + tid] == -1) // Unaggregated row
{
partner_index[num_rows + tid] = tid;
}
if (partner_index[2 * num_rows + tid] == -1) // Unaggregated row
{
partner_index[2 * num_rows + tid] = tid;
}
}
}
// -----------------
// Methods
// ----------------
// Constructor
template<class T_Config>
Size8SelectorBase<T_Config>::Size8SelectorBase(AMG_Config &cfg, const std::string &cfg_scope)
{
deterministic = cfg.AMG_Config::getParameter<IndexType>("determinism_flag", "default");
max_iterations = cfg.AMG_Config::getParameter<IndexType>("max_matching_iterations", cfg_scope);
numUnassigned_tol = cfg.AMG_Config::getParameter<double>("max_unassigned_percentage", cfg_scope);
m_aggregation_edge_weight_component = cfg.AMG_Config::getParameter<int>("aggregation_edge_weight_component", cfg_scope);
weight_formula = cfg.AMG_Config::getParameter<int>("weight_formula", cfg_scope);
}
// setAggregates for block_dia_csr_matrix_h format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size8Selector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblock(const Matrix_h &A,
IVector &aggregates, IVector &aggregates_global, int &num_aggregates)
{
FatalError("Size8 selector: setAggregates not implemented on CPU, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
template <int NUM_COLS, typename IndexType, typename ValueType>
__global__ //__launch_bounds__(256,2)
void computeEdgeWeightsBlockDiaCsr_V2_1(
const IndexType *row_offsets,
//const int* __myrestrict row_offsets,
const IndexType *row_indices,
const IndexType *column_indices,
const IndexType *dia_values,
const ValueType *nonzero_values,
//const ValueType* __restrict nonzero_values,
const IndexType num_nonzero_blocks,
float *str_edge_weights, float *rand_edge_weights, int num_owned, int bsize, int component)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int i, j;
int bsize_sq = bsize * bsize;
float kvalue;
int matrix_weight_entry = component * bsize + component;
bool valid_tid;
while (utils::any(valid_tid = tid < num_nonzero_blocks))
{
i = -1;
double d1, d2, w1;
if (valid_tid)
{
if ( rand_edge_weights != NULL )
{
rand_edge_weights[tid] = random_weight(i, j, num_owned);
}
i = row_indices[tid];
j = column_indices[tid];
d1 = types::util<ValueType>::abs(__cachingLoad(&nonzero_values[__load_nc(dia_values + i) * bsize_sq + matrix_weight_entry]));
d2 = types::util<ValueType>::abs(__cachingLoad(&nonzero_values[__load_nc(dia_values + j) * bsize_sq + matrix_weight_entry]));
}
const bool valid_j = valid_tid && i != j && j < num_owned;
int ki = -1; //my transpose index, initialized to not found
//int diag_j = -1; //j diagonal index
if (!utils::any(valid_j))
{
continue;
}
int kmin = 0, kmax = 0;
if (valid_j)
{
kmin = __cachingLoad(&row_offsets[j ]);
kmax = __cachingLoad(&row_offsets[j + 1]);
}
for ( int k = kmin ; k < kmax ; ++k )
{
const int idx = __load_nc(column_indices + k);
if (idx == i)
{
ki = k; //find the transpose ji
}
}
kvalue = 0.0f;
if (ki > -1)
{
kvalue = types::util<ValueType>::abs(__cachingLoad(&nonzero_values[ki * bsize_sq + matrix_weight_entry]));
}
if (valid_tid)
{
w1 = types::util<ValueType>::abs(__cachingLoad(&nonzero_values[tid * bsize_sq + matrix_weight_entry]));
str_edge_weights[tid] = 0.5 * (w1 + kvalue) / ( (float) max(d1, d2) ) * valid_j;
}
tid += gridDim.x * blockDim.x;
}
}
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
template<class T, class M, class V>
struct async_size8_task : public task_cuda
{
T *size8;
M *A;
V *aggregates;
V *aggregates_global;
int *num_aggregates;
typedef typename T::IndexType IndexType;
void run()
{
const IndexType nnz_per_row = (*A).get_num_nz() / (*A).get_num_rows();
if (0)
{
}
else if (nnz_per_row > 2)
{
size8->template setAggregates_common_sqblock_avg_specialized<4>(*A, *aggregates, *aggregates_global, *num_aggregates);
}
else if (nnz_per_row > 1)
{
size8->template setAggregates_common_sqblock_avg_specialized<4>(*A, *aggregates, *aggregates_global, *num_aggregates);
}
else
{
size8->template setAggregates_common_sqblock_avg_specialized<2>(*A, *aggregates, *aggregates_global, *num_aggregates);
}
}
};
template<class T, class M, class V>
async_size8_task<T, M, V> *make_async_size8_task(T *size8, M &A, V &aggregates, V &aggregates_global, int &num_aggregates)
{
async_size8_task<T, M, V> *ret = new async_size8_task<T, M, V>;
ret->size8 = size8;
ret->A = &A;
ret->aggregates = &aggregates;
ret->aggregates_global = &aggregates_global;
ret->num_aggregates = &num_aggregates;
static task_chain_cuda_streamset *ss = new task_chain_cuda_streamset(1);
task_chain_cuda *cr = new task_chain_cuda(ss);
cr->append(ret, asyncmanager::singleton()->main_thread_queue(2));
return ret;
}
#endif
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size8Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblock(const Matrix_d &A,
typename Matrix_d::IVector &aggregates,
typename Matrix_d::IVector &aggregates_global,
int &num_aggregates)
{
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
bool task = false;
bool push = false;
if (task)
{
task_cuda *t = make_async_size8_task(this, A, aggregates, aggregates_global, num_aggregates);
enqueue_async_get_receipt(asyncmanager::singleton()->global_parallel_queue, t)->wait();
return;
}
cudaStream_t stream_old;
static cudaStream_t stream = 0;
if (push)
{
stream_old = thrust::global_thread_handle::threadStream[getCurrentThreadId()];
cudaStreamSynchronize(thrust::global_thread_handle::threadStream[getCurrentThreadId()]);
if (stream == 0) { cudaStreamCreate(&stream); }
thrust::global_thread_handle::threadStream[getCurrentThreadId()] = stream;
}
#endif
const IndexType nnz_per_row = A.get_num_nz() / A.get_num_rows();
if (0)
{
}
else if (nnz_per_row > 2)
{
setAggregates_common_sqblock_avg_specialized<4>(A, aggregates, aggregates_global, num_aggregates);
}
else if (nnz_per_row > 1)
{
setAggregates_common_sqblock_avg_specialized<4>(A, aggregates, aggregates_global, num_aggregates);
}
else
{
setAggregates_common_sqblock_avg_specialized<2>(A, aggregates, aggregates_global, num_aggregates);
}
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
if (push)
{
//cudaStreamSynchronize(stream);
//thrust::global_thread_handle::threadStream[getCurrentThreadId()] = stream_old;
}
#endif
}
// setAggregates for block_dia_csr_matrix_d format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
template<int AVG_NNZ>
void Size8Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >:: setAggregates_common_sqblock_avg_specialized(const Matrix_d &A,
typename Matrix_d::IVector &aggregates, typename Matrix_d::IVector &aggregates_global, int &num_aggregates)
{
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
cudaStream_t stream = thrust::global_thread_handle::threadStream[getCurrentThreadId()];
#else
cudaStream_t stream = thrust::global_thread_handle::get_stream();
#endif
const IndexType num_block_rows = A.get_num_rows();
const IndexType num_nonzero_blocks = A.get_num_nz();
if (!A.is_matrix_singleGPU())
{
aggregates.resize(A.manager->halo_offset(A.manager->num_neighbors()));
}
else
{
aggregates.resize(num_block_rows);
}
// Initially, put each vertex in its own aggregate
thrust::sequence(aggregates.begin(), aggregates.begin() + num_block_rows);
cudaCheckError();
IndexType *aggregates_ptr = aggregates.raw();
// Create row_indices array
IndexType total_nz = (A.is_matrix_singleGPU()) ? num_nonzero_blocks : A.manager->num_nz_all();
typename Matrix_d::IVector row_indices(total_nz);
cusp::detail::offsets_to_indices(A.row_offsets, row_indices);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_row_indices_ptr = row_indices.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
//const ValueType *A_dia_val_ptr = thrust::raw_pointer_cast(&A.values[A.get_block_size()*A.diagOffset()]);
const ValueType *A_nonzero_values_ptr = A.values.raw();
typename Matrix_d::IVector strongest_neighbour(num_block_rows, -1);
typename Matrix_d::IVector partner_index(3 * num_block_rows, -1);
typename Matrix_d::IVector strongest_neighbour_tmp(num_block_rows);
IndexType *strongest_neighbour_ptr = strongest_neighbour.raw();
IndexType *partner_index_ptr = partner_index.raw();
const int threads_per_block = 256;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (num_block_rows - 1) / threads_per_block + 1);
int numUnassigned = num_block_rows;
int numUnassigned_previous = numUnassigned;
Vector<TemplateConfig<AMGX_device, AMGX_vecFloat, t_matPrec, t_indPrec> > edge_weights(num_nonzero_blocks + 8); //8-padded
float *edge_weights_ptr = edge_weights.raw();
float *rand_edge_weights_ptr = NULL;
const int num_blocks_V2 = min( AMGX_GRID_MAX_SIZE, (num_nonzero_blocks - 1) / threads_per_block + 1);
// Compute the edge weights
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
int avoid_thrust_count = 1;//0;
int newFindStrongest = 1;//0;
int newWeights = 1;//0;
#else
int avoid_thrust_count = 0;//0;
int newFindStrongest = 0;//0;
int newWeights = 0;//0;
#endif
int usenumassignedassumption = false;
if (newWeights == 0)
{
cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType, ValueType, float>, cudaFuncCachePreferL1);
computeEdgeWeightsBlockDiaCsr_V2 <<< num_blocks_V2, threads_per_block, 0, stream>>>(
A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, num_nonzero_blocks, edge_weights_ptr, rand_edge_weights_ptr, num_block_rows, A.get_block_dimy(), this->m_aggregation_edge_weight_component, this->weight_formula);
cudaCheckError();
}
else
{
cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2_1<AVG_NNZ, IndexType, ValueType>, cudaFuncCachePreferL1);
computeEdgeWeightsBlockDiaCsr_V2_1<AVG_NNZ, IndexType, ValueType> <<< num_blocks_V2, threads_per_block, 0, stream>>>(
A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, num_nonzero_blocks, edge_weights_ptr, rand_edge_weights_ptr, num_block_rows, A.get_block_dimy(), this->m_aggregation_edge_weight_component);
cudaCheckError();
}
// -------------------------------------------------
// First create aggregates of size 2
// -------------------------------------------------
int icount = 0;
const int num_blocks_1024 = min( 13 * 2, (num_block_rows - 1) / 1024 + 1);
device_vector_alloc<int> sets_per_block_t(num_blocks_1024);
int *sets_per_block = thrust::raw_pointer_cast(sets_per_block_t.data());
cudaCheckError();
do
{
if (newFindStrongest)
{
if (numUnassigned == num_block_rows && usenumassignedassumption)
{
cudaFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_NOMERGE, 1, 0, int>, cudaFuncCachePreferL1);
my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_NOMERGE, 1, 0, int> <<< num_blocks, threads_per_block, 0, stream>>>(
A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, 0, 0, strongest_neighbour_ptr, partner_index_ptr, 0, this->deterministic, 0, 0);
}
else
{
cudaFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_NOMERGE, 0, 0, int>, cudaFuncCachePreferL1);
my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_NOMERGE, 0, 0, int> <<< num_blocks, threads_per_block, 0, stream>>>(
A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, 0, 0, strongest_neighbour_ptr, partner_index_ptr, 0, this->deterministic, 0, 0);
}
}
else
{
findStrongestNeighbourBlockDiaCsr_NoMerge <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, partner_index_ptr, strongest_neighbour_ptr, this->deterministic);
}
cudaCheckError();
// Look for perfect matches
numUnassigned_previous = numUnassigned;
if (avoid_thrust_count == 0)
{
matchEdges <<< num_blocks, threads_per_block, 0, stream>>>(num_block_rows, partner_index_ptr, aggregates_ptr, strongest_neighbour_ptr);
cudaCheckError();
numUnassigned = (int)thrust::count(partner_index.begin(), partner_index.begin() + num_block_rows, -1);
cudaCheckError();
}
else
{
my_MatchEdges <<< num_blocks_1024, 1024, 0, stream>>>(num_block_rows, partner_index_ptr, aggregates_ptr, strongest_neighbour_ptr, sets_per_block);
cudaCheckError();
numUnassigned = numUnassigned_previous - amgx::strided_reduction::count_block_results_pinned_memory(0, icount, num_blocks_1024, sets_per_block, amgx::strided_reduction::op_sum(), stream);
}
icount++;
}
while (!(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned_previous == numUnassigned));
assignUnassignedVertices <<< num_blocks, threads_per_block, 0, stream>>>(partner_index_ptr, num_block_rows);
cudaCheckError();
// -------------------------------------------------
// Merge aggregates to create aggregates of size 4
// -------------------------------------------------
Vector<TemplateConfig<AMGX_device, AMGX_vecFloat, t_matPrec, t_indPrec> > weight_strongest_neighbour(num_block_rows, -1);
float *weight_strongest_neighbour_ptr = weight_strongest_neighbour.raw();
// At this point, partner index contain either your index or your neighbours index, depending on weither you're matched or not
// aggregates contain the largest vertex index of vertices in aggregate
typename Matrix_d::IVector aggregated(num_block_rows, -1);
IndexType *aggregated_ptr = aggregated.raw();
// now used as flag to check if aggregated or not
icount = 0;
numUnassigned = num_block_rows;
numUnassigned_previous = numUnassigned;
do
{
// Each vertex stores in strongest_neighbour the aggregates number of strongest neighbour and the weight of connection
if (newFindStrongest)
{
if (numUnassigned == num_block_rows && usenumassignedassumption)
{
cudaFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS, 1, 0, int>, cudaFuncCachePreferL1);
my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS, 1, 0, int> <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic, 0, 0);
}
else
{
cudaFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS, 0, 0, int>, cudaFuncCachePreferL1);
my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS, 0, 0, int> <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic, 0, 0);
}
}
else
{
findStrongestNeighbourBlockDiaCsr_StoreWeight <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic);
}
cudaCheckError();
// Each vertex in same aggregates will agree on aggregates to propose too, and both store the aggregate number they want to match with
agreeOnProposal <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, num_block_rows, aggregated_ptr, strongest_neighbour_ptr, weight_strongest_neighbour_ptr, partner_index_ptr, aggregates_ptr);
cudaCheckError();
numUnassigned_previous = numUnassigned;
if (avoid_thrust_count == 0)
{
matchAggregatesSize4 <IndexType> <<< num_blocks, threads_per_block, 0, stream>>>(aggregates_ptr, aggregated_ptr, strongest_neighbour_ptr, partner_index_ptr, num_block_rows);
numUnassigned = thrust::count(aggregated.begin(), aggregated.end(), -1);
}
else
{
my_matchAggregatesSize4 <<< num_blocks_1024, 1024, 0, stream>>>(aggregates_ptr, aggregated_ptr, strongest_neighbour_ptr, partner_index_ptr, num_block_rows, sets_per_block);
numUnassigned = numUnassigned_previous - amgx::strided_reduction::count_block_results_pinned_memory(1, icount, num_blocks_1024, sets_per_block, amgx::strided_reduction::op_sum(), stream);
}
cudaCheckError();
icount++;
}
while (!(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned == numUnassigned_previous) );
assignUnassignedVertices_2 <<< num_blocks, threads_per_block, 0, stream>>>(partner_index_ptr, num_block_rows);
cudaCheckError();
// -------------------------------------------------
// Merge aggregates to create aggregates of size 8
// -------------------------------------------------
thrust::fill(aggregated.begin(), aggregated.end(), -1);
cudaCheckError();
thrust::fill(weight_strongest_neighbour.begin(), weight_strongest_neighbour.end(), -1.);
cudaCheckError();
icount = 0;
numUnassigned = num_block_rows;
numUnassigned_previous = numUnassigned;
do
{
// Each vertex stores in strongest_neighbour the aggregates number of strongest neighbour and the weight of connection
if (newFindStrongest)
{
if (numUnassigned == num_block_rows && usenumassignedassumption)
{
cudaFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS_2, 1, 0, int>, cudaFuncCachePreferL1);
my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS_2, 1, 0, int> <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic, 0, 0);
}
else
{
cudaFuncSetCacheConfig(my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS_2, 0, 0, int>, cudaFuncCachePreferL1);
my_findStrongestNeighbourBlockDiaCsr_NoMerge<AVG_NNZ, ALGORITHM_STOREWEIGHTS_2, 0, 0, int> <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, num_nonzero_blocks, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr, this->deterministic, 0, 0);
}
}
else
{
findStrongestNeighbourBlockDiaCsr_StoreWeight_2 <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregated_ptr, aggregates_ptr, strongest_neighbour_ptr, partner_index_ptr, weight_strongest_neighbour_ptr);
}
cudaCheckError();
// Each vertex in same aggregates will agree on aggregates to propose too, and both store the aggregate number they want to match with
if (!this->deterministic)
{
agreeOnProposal_2 <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, num_block_rows, aggregated_ptr, strongest_neighbour_ptr, weight_strongest_neighbour_ptr, partner_index_ptr, aggregates_ptr, this->deterministic);
}
else
{
//strongest_neighbour_tmp = strongest_neighbour; // copied that directly in the kernel
agreeOnProposal_2_deterministic <<< num_blocks, threads_per_block, 0, stream>>>(
strongest_neighbour_tmp.raw(),
A_row_offsets_ptr,
A_column_indices_ptr, num_block_rows,
aggregated_ptr, strongest_neighbour_ptr,
weight_strongest_neighbour_ptr, partner_index_ptr, aggregates_ptr, this->deterministic);
strongest_neighbour_tmp.swap(strongest_neighbour);
strongest_neighbour_ptr = strongest_neighbour.raw(); //re-saving the correct pointer..
}
cudaCheckError();
numUnassigned_previous = numUnassigned;
if (avoid_thrust_count == 0)
{
matchAggregates <IndexType> <<< num_blocks, threads_per_block, 0, stream>>>(aggregates_ptr, aggregated_ptr, strongest_neighbour_ptr, num_block_rows);
numUnassigned = thrust::count(aggregated.begin(), aggregated.end(), -1);
}
else
{
my_matchAggregates <<< num_blocks_1024, 1024, 0, stream>>>(aggregates_ptr, aggregated_ptr, strongest_neighbour_ptr, num_block_rows, sets_per_block);
numUnassigned = numUnassigned_previous - amgx::strided_reduction::count_block_results_pinned_memory(2, icount, num_blocks_1024, sets_per_block, amgx::strided_reduction::op_sum(), stream);
}
cudaCheckError();
icount++;
}
while (!(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned_previous == numUnassigned) );
// Merge remaining vertices with current aggregates
int local_iter = 0;
if (!this->deterministic)
{
while (numUnassigned != 0)
{
mergeWithExistingAggregatesBlockDiaCsr <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, aggregated_ptr, this->deterministic, (IndexType *) NULL, local_iter > 1);
cudaCheckError();
numUnassigned_previous = numUnassigned;
numUnassigned = (int)thrust::count(aggregated.begin(), aggregated.end(), -1);
cudaCheckError();
local_iter++;
}
}
else
{
typename Matrix_d::IVector aggregates_candidate(num_block_rows, -1);
while (numUnassigned != 0)
{
// allow singletons only from the 2nd local iteration
mergeWithExistingAggregatesBlockDiaCsr <<< num_blocks, threads_per_block, 0, stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, aggregated_ptr, this->deterministic, aggregates_candidate.raw(), local_iter > 1);
cudaCheckError();
numUnassigned_previous = numUnassigned;
if (avoid_thrust_count == 0)
{
joinExistingAggregates <<< num_blocks, threads_per_block, 0, stream>>>(num_block_rows, aggregates_ptr, aggregated_ptr, aggregates_candidate.raw());
numUnassigned = (int)thrust::count(aggregated.begin(), aggregated.end(), -1);
}
else
{
my_joinExistingAggregates <<< num_blocks_1024, 1024, 0, stream>>>(num_block_rows, aggregates_ptr, aggregated_ptr, aggregates_candidate.raw(), sets_per_block);
numUnassigned = numUnassigned_previous - amgx::strided_reduction::count_block_results_pinned_memory(3, local_iter, num_blocks_1024, sets_per_block, amgx::strided_reduction::op_sum(), stream);
}
cudaCheckError();
local_iter++;
}
aggregates_candidate.resize(0);
}
this->renumberAndCountAggregates(aggregates, aggregates_global, num_block_rows, num_aggregates);
//analyze_coloring(aggregates, A.getMatrixColoring().getRowColors());
}
template <class T_Config>
void Size8SelectorBase<T_Config>::setAggregates(Matrix<T_Config> &A,
IVector &aggregates, IVector &aggregates_global, int &num_aggregates)
{
if (A.get_block_dimx() == A.get_block_dimy())
{
setAggregates_common_sqblock( A, aggregates, aggregates_global, num_aggregates );
}
else
{
FatalError("Unsupported block size for Size8Selector", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
}
// -------------------------
// Explict instantiations
// -------------------------
#define AMGX_CASE_LINE(CASE) template class Size8SelectorBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class Size8Selector<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
}
} | the_stack |
* CTA tile-processing abstraction for BFS frontier expansion
******************************************************************************/
#pragma once
#include "../../../../util/device_intrinsics.cuh"
#include "../../../../util/cta_work_progress.cuh"
#include "../../../../util/scan/cooperative_scan.cuh"
#include "../../../../util/io/modified_load.cuh"
#include "../../../../util/io/modified_store.cuh"
#include "../../../../util/io/load_tile.cuh"
#include "../../../../util/operators.cuh"
#include "../../../../util/soa_tuple.cuh"
#include "../../../../util/scan/soa/cooperative_soa_scan.cuh"
B40C_NS_PREFIX
namespace b40c {
namespace graph {
namespace bfs {
namespace two_phase {
namespace expand_atomic {
/**
* CTA tile-processing abstraction for BFS frontier expansion
*/
template <typename SizeT>
struct RowOffsetTex
{
static texture<SizeT, cudaTextureType1D, cudaReadModeElementType> ref;
};
template <typename SizeT>
texture<SizeT, cudaTextureType1D, cudaReadModeElementType> RowOffsetTex<SizeT>::ref;
template<typename SizeT, typename VertexId>
struct Tex
{
static __device__ __forceinline__ VertexId fetch(SizeT* row_offsets, VertexId row_id)
{
return row_offsets[row_id];
}
};
template<typename VertexId>
struct Tex<int, VertexId>
{
static __device__ __forceinline__ VertexId fetch(int* row_offsets, VertexId row_id)
{
return tex1Dfetch(RowOffsetTex<int>::ref, row_id);
}
};
/**
* Derivation of KernelPolicy that encapsulates tile-processing routines
*/
template <typename KernelPolicy>
struct Cta
{
//---------------------------------------------------------------------
// Typedefs
//---------------------------------------------------------------------
typedef typename KernelPolicy::VertexId VertexId;
typedef typename KernelPolicy::SizeT SizeT;
typedef typename KernelPolicy::SmemStorage SmemStorage;
typedef typename KernelPolicy::SoaScanOp SoaScanOp;
typedef typename KernelPolicy::RakingSoaDetails RakingSoaDetails;
typedef typename KernelPolicy::TileTuple TileTuple;
typedef util::Tuple<
SizeT (*)[KernelPolicy::LOAD_VEC_SIZE],
SizeT (*)[KernelPolicy::LOAD_VEC_SIZE]> RankSoa;
//---------------------------------------------------------------------
// Members
//---------------------------------------------------------------------
// Input and output device pointers
VertexId *d_in; // Incoming vertex frontier
VertexId *d_out; // Outgoing edge frontier
VertexId *d_predecessor_out; // Outgoing predecessor edge frontier (used when KernelPolicy::MARK_PREDECESSORS)
VertexId *d_column_indices; // CSR column-indices array
SizeT *d_row_offsets; // CSR row-offsets array
// Work progress
VertexId queue_index; // Current frontier queue counter index
util::CtaWorkProgress &work_progress; // Atomic workstealing and queueing counters
SizeT max_edge_frontier; // Maximum size (in elements) of outgoing edge frontier
int num_gpus; // Number of GPUs
// Operational details for raking grid
RakingSoaDetails raking_soa_details;
// Shared memory for the CTA
SmemStorage &smem_storage;
//---------------------------------------------------------------------
// Helper Structures
//---------------------------------------------------------------------
/**
* Tile of incoming vertex frontier to process
*/
template <
int LOG_LOADS_PER_TILE,
int LOG_LOAD_VEC_SIZE>
struct Tile
{
//---------------------------------------------------------------------
// Typedefs and Constants
//---------------------------------------------------------------------
enum {
LOADS_PER_TILE = 1 << LOG_LOADS_PER_TILE,
LOAD_VEC_SIZE = 1 << LOG_LOAD_VEC_SIZE
};
typedef typename util::VecType<SizeT, 2>::Type Vec2SizeT;
//---------------------------------------------------------------------
// Members
//---------------------------------------------------------------------
// Dequeued vertex ids
VertexId vertex_id[LOADS_PER_TILE][LOAD_VEC_SIZE];
// Edge list details
SizeT row_offset[LOADS_PER_TILE][LOAD_VEC_SIZE];
SizeT row_length[LOADS_PER_TILE][LOAD_VEC_SIZE];
// Global scatter offsets. Coarse for CTA/warp-based scatters, fine for scan-based scatters
SizeT fine_count;
SizeT coarse_row_rank[LOADS_PER_TILE][LOAD_VEC_SIZE];
SizeT fine_row_rank[LOADS_PER_TILE][LOAD_VEC_SIZE];
// Progress for expanding scan-based gather offsets
SizeT row_progress[LOADS_PER_TILE][LOAD_VEC_SIZE];
SizeT progress;
//---------------------------------------------------------------------
// Helper Structures
//---------------------------------------------------------------------
/**
* Iterate next vector element
*/
template <int LOAD, int VEC, int dummy = 0>
struct Iterate
{
/**
* Init
*/
template <typename Tile>
static __device__ __forceinline__ void Init(Tile *tile)
{
tile->row_length[LOAD][VEC] = 0;
tile->row_progress[LOAD][VEC] = 0;
Iterate<LOAD, VEC + 1>::Init(tile);
}
/**
* Inspect
*/
template <typename Cta, typename Tile>
static __device__ __forceinline__ void Inspect(Cta *cta, Tile *tile)
{
if (tile->vertex_id[LOAD][VEC] != -1) {
// Translate vertex-id into local gpu row-id (currently stride of num_gpu)
VertexId row_id = (tile->vertex_id[LOAD][VEC] & KernelPolicy::VERTEX_ID_MASK) / cta->num_gpus;
// Load neighbor row range from d_row_offsets
Vec2SizeT row_range;
row_range.x = Tex<SizeT, VertexId>::fetch(cta->d_row_offsets, row_id);
row_range.y = Tex<SizeT, VertexId>::fetch(cta->d_row_offsets, row_id + 1);
// Node is previously unvisited: compute row offset and length
tile->row_offset[LOAD][VEC] = row_range.x;
tile->row_length[LOAD][VEC] = row_range.y - row_range.x;
}
tile->fine_row_rank[LOAD][VEC] = (tile->row_length[LOAD][VEC] < KernelPolicy::WARP_GATHER_THRESHOLD) ?
tile->row_length[LOAD][VEC] : 0;
tile->coarse_row_rank[LOAD][VEC] = (tile->row_length[LOAD][VEC] < KernelPolicy::WARP_GATHER_THRESHOLD) ?
0 : tile->row_length[LOAD][VEC];
Iterate<LOAD, VEC + 1>::Inspect(cta, tile);
}
/**
* Expand by CTA
*/
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ExpandByCta(Cta *cta, Tile *tile)
{
// CTA-based expansion/loading
while (true) {
// Vie
if (tile->row_length[LOAD][VEC] >= KernelPolicy::CTA_GATHER_THRESHOLD) {
cta->smem_storage.state.cta_comm = threadIdx.x;
}
__syncthreads();
// Check
int owner = cta->smem_storage.state.cta_comm;
if (owner == KernelPolicy::THREADS) {
// No contenders
break;
}
if (owner == threadIdx.x) {
// Got control of the CTA: command it
cta->smem_storage.state.warp_comm[0][0] = tile->row_offset[LOAD][VEC]; // start
cta->smem_storage.state.warp_comm[0][1] = tile->coarse_row_rank[LOAD][VEC]; // queue rank
cta->smem_storage.state.warp_comm[0][2] = tile->row_offset[LOAD][VEC] + tile->row_length[LOAD][VEC]; // oob
if (KernelPolicy::MARK_PREDECESSORS) {
cta->smem_storage.state.warp_comm[0][3] = tile->vertex_id[LOAD][VEC]; // predecessor
}
// Unset row length
tile->row_length[LOAD][VEC] = 0;
// Unset my command
cta->smem_storage.state.cta_comm = KernelPolicy::THREADS; // invalid
}
__syncthreads();
// Read commands
SizeT coop_offset = cta->smem_storage.state.warp_comm[0][0];
SizeT coop_rank = cta->smem_storage.state.warp_comm[0][1] + threadIdx.x;
SizeT coop_oob = cta->smem_storage.state.warp_comm[0][2];
VertexId predecessor_id;
if (KernelPolicy::MARK_PREDECESSORS) {
predecessor_id = cta->smem_storage.state.warp_comm[0][3];
}
VertexId neighbor_id;
while (coop_offset + KernelPolicy::THREADS < coop_oob) {
// Gather
util::io::ModifiedLoad<KernelPolicy::COLUMN_READ_MODIFIER>::Ld(
neighbor_id, cta->d_column_indices + coop_offset + threadIdx.x);
// Scatter neighbor
util::io::ModifiedStore<KernelPolicy::QUEUE_WRITE_MODIFIER>::St(
neighbor_id,
cta->d_out + cta->smem_storage.state.coarse_enqueue_offset + coop_rank);
if (KernelPolicy::MARK_PREDECESSORS) {
// Scatter predecessor
util::io::ModifiedStore<KernelPolicy::QUEUE_WRITE_MODIFIER>::St(
predecessor_id,
cta->d_predecessor_out + cta->smem_storage.state.coarse_enqueue_offset + coop_rank);
}
coop_offset += KernelPolicy::THREADS;
coop_rank += KernelPolicy::THREADS;
}
if (coop_offset + threadIdx.x < coop_oob) {
// Gather
util::io::ModifiedLoad<KernelPolicy::COLUMN_READ_MODIFIER>::Ld(
neighbor_id, cta->d_column_indices + coop_offset + threadIdx.x);
// Scatter neighbor
util::io::ModifiedStore<KernelPolicy::QUEUE_WRITE_MODIFIER>::St(
neighbor_id, cta->d_out + cta->smem_storage.state.coarse_enqueue_offset + coop_rank);
if (KernelPolicy::MARK_PREDECESSORS) {
// Scatter predecessor
util::io::ModifiedStore<KernelPolicy::QUEUE_WRITE_MODIFIER>::St(
predecessor_id, cta->d_predecessor_out + cta->smem_storage.state.coarse_enqueue_offset + coop_rank);
}
}
}
// Next vector element
Iterate<LOAD, VEC + 1>::ExpandByCta(cta, tile);
}
/**
* Expand by warp
*/
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ExpandByWarp(Cta *cta, Tile *tile)
{
if (KernelPolicy::WARP_GATHER_THRESHOLD < KernelPolicy::CTA_GATHER_THRESHOLD) {
// Warp-based expansion/loading
int warp_id = threadIdx.x >> B40C_LOG_WARP_THREADS(KernelPolicy::CUDA_ARCH);
int lane_id = util::LaneId();
while (__any(tile->row_length[LOAD][VEC] >= KernelPolicy::WARP_GATHER_THRESHOLD)) {
if (tile->row_length[LOAD][VEC] >= KernelPolicy::WARP_GATHER_THRESHOLD) {
// Vie for control of the warp
cta->smem_storage.state.warp_comm[warp_id][0] = lane_id;
}
if (lane_id == cta->smem_storage.state.warp_comm[warp_id][0]) {
// Got control of the warp
cta->smem_storage.state.warp_comm[warp_id][0] = tile->row_offset[LOAD][VEC]; // start
cta->smem_storage.state.warp_comm[warp_id][1] = tile->coarse_row_rank[LOAD][VEC]; // queue rank
cta->smem_storage.state.warp_comm[warp_id][2] = tile->row_offset[LOAD][VEC] + tile->row_length[LOAD][VEC]; // oob
if (KernelPolicy::MARK_PREDECESSORS) {
cta->smem_storage.state.warp_comm[warp_id][3] = tile->vertex_id[LOAD][VEC]; // predecessor
}
// Unset row length
tile->row_length[LOAD][VEC] = 0;
}
SizeT coop_offset = cta->smem_storage.state.warp_comm[warp_id][0];
SizeT coop_rank = cta->smem_storage.state.warp_comm[warp_id][1] + lane_id;
SizeT coop_oob = cta->smem_storage.state.warp_comm[warp_id][2];
VertexId predecessor_id;
if (KernelPolicy::MARK_PREDECESSORS) {
predecessor_id = cta->smem_storage.state.warp_comm[warp_id][3];
}
VertexId neighbor_id;
while (coop_offset + B40C_WARP_THREADS(KernelPolicy::CUDA_ARCH) < coop_oob) {
// Gather
util::io::ModifiedLoad<KernelPolicy::COLUMN_READ_MODIFIER>::Ld(
neighbor_id, cta->d_column_indices + coop_offset + lane_id);
// Scatter neighbor
util::io::ModifiedStore<KernelPolicy::QUEUE_WRITE_MODIFIER>::St(
neighbor_id, cta->d_out + cta->smem_storage.state.coarse_enqueue_offset + coop_rank);
if (KernelPolicy::MARK_PREDECESSORS) {
// Scatter predecessor
util::io::ModifiedStore<KernelPolicy::QUEUE_WRITE_MODIFIER>::St(
predecessor_id, cta->d_predecessor_out + cta->smem_storage.state.coarse_enqueue_offset + coop_rank);
}
coop_offset += B40C_WARP_THREADS(KernelPolicy::CUDA_ARCH);
coop_rank += B40C_WARP_THREADS(KernelPolicy::CUDA_ARCH);
}
if (coop_offset + lane_id < coop_oob) {
// Gather
util::io::ModifiedLoad<KernelPolicy::COLUMN_READ_MODIFIER>::Ld(
neighbor_id, cta->d_column_indices + coop_offset + lane_id);
// Scatter neighbor
util::io::ModifiedStore<KernelPolicy::QUEUE_WRITE_MODIFIER>::St(
neighbor_id, cta->d_out + cta->smem_storage.state.coarse_enqueue_offset + coop_rank);
if (KernelPolicy::MARK_PREDECESSORS) {
// Scatter predecessor
util::io::ModifiedStore<KernelPolicy::QUEUE_WRITE_MODIFIER>::St(
predecessor_id, cta->d_predecessor_out + cta->smem_storage.state.coarse_enqueue_offset + coop_rank);
}
}
}
// Next vector element
Iterate<LOAD, VEC + 1>::ExpandByWarp(cta, tile);
}
}
/**
* Expand by scan
*/
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ExpandByScan(Cta *cta, Tile *tile)
{
// Attempt to make further progress on this dequeued item's neighbor
// list if its current offset into local scratch is in range
SizeT scratch_offset = tile->fine_row_rank[LOAD][VEC] + tile->row_progress[LOAD][VEC] - tile->progress;
while ((tile->row_progress[LOAD][VEC] < tile->row_length[LOAD][VEC]) &&
(scratch_offset < SmemStorage::GATHER_ELEMENTS))
{
// Put gather offset into scratch space
cta->smem_storage.gather_offsets[scratch_offset] = tile->row_offset[LOAD][VEC] + tile->row_progress[LOAD][VEC];
if (KernelPolicy::MARK_PREDECESSORS) {
// Put dequeued vertex as the predecessor into scratch space
cta->smem_storage.gather_predecessors[scratch_offset] = tile->vertex_id[LOAD][VEC];
}
tile->row_progress[LOAD][VEC]++;
scratch_offset++;
}
// Next vector element
Iterate<LOAD, VEC + 1>::ExpandByScan(cta, tile);
}
};
/**
* Iterate next load
*/
template <int LOAD, int dummy>
struct Iterate<LOAD, LOAD_VEC_SIZE, dummy>
{
/**
* Init
*/
template <typename Tile>
static __device__ __forceinline__ void Init(Tile *tile)
{
Iterate<LOAD + 1, 0>::Init(tile);
}
/**
* Inspect
*/
template <typename Cta, typename Tile>
static __device__ __forceinline__ void Inspect(Cta *cta, Tile *tile)
{
Iterate<LOAD + 1, 0>::Inspect(cta, tile);
}
/**
* Expand by CTA
*/
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ExpandByCta(Cta *cta, Tile *tile)
{
Iterate<LOAD + 1, 0>::ExpandByCta(cta, tile);
}
/**
* Expand by warp
*/
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ExpandByWarp(Cta *cta, Tile *tile)
{
Iterate<LOAD + 1, 0>::ExpandByWarp(cta, tile);
}
/**
* Expand by scan
*/
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ExpandByScan(Cta *cta, Tile *tile)
{
Iterate<LOAD + 1, 0>::ExpandByScan(cta, tile);
}
};
/**
* Terminate
*/
template <int dummy>
struct Iterate<LOADS_PER_TILE, 0, dummy>
{
// Init
template <typename Tile>
static __device__ __forceinline__ void Init(Tile *tile) {}
// Inspect
template <typename Cta, typename Tile>
static __device__ __forceinline__ void Inspect(Cta *cta, Tile *tile) {}
// ExpandByCta
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ExpandByCta(Cta *cta, Tile *tile) {}
// ExpandByWarp
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ExpandByWarp(Cta *cta, Tile *tile) {}
// ExpandByScan
template <typename Cta, typename Tile>
static __device__ __forceinline__ void ExpandByScan(Cta *cta, Tile *tile) {}
};
//---------------------------------------------------------------------
// Interface
//---------------------------------------------------------------------
/**
* Constructor
*/
__device__ __forceinline__ Tile()
{
Iterate<0, 0>::Init(this);
}
/**
* Inspect dequeued vertices, updating label if necessary and
* obtaining edge-list details
*/
template <typename Cta>
__device__ __forceinline__ void Inspect(Cta *cta)
{
Iterate<0, 0>::Inspect(cta, this);
}
/**
* Expands neighbor lists for valid vertices at CTA-expansion granularity
*/
template <typename Cta>
__device__ __forceinline__ void ExpandByCta(Cta *cta)
{
Iterate<0, 0>::ExpandByCta(cta, this);
}
/**
* Expands neighbor lists for valid vertices a warp-expansion granularity
*/
template <typename Cta>
__device__ __forceinline__ void ExpandByWarp(Cta *cta)
{
Iterate<0, 0>::ExpandByWarp(cta, this);
}
/**
* Expands neighbor lists by local scan rank
*/
template <typename Cta>
__device__ __forceinline__ void ExpandByScan(Cta *cta)
{
Iterate<0, 0>::ExpandByScan(cta, this);
}
};
//---------------------------------------------------------------------
// Methods
//---------------------------------------------------------------------
/**
* Constructor
*/
__device__ __forceinline__ Cta(
VertexId queue_index,
int num_gpus,
SmemStorage &smem_storage,
VertexId *d_in,
VertexId *d_out,
VertexId *d_predecessor_out,
VertexId *d_column_indices,
SizeT *d_row_offsets,
util::CtaWorkProgress &work_progress,
SizeT max_edge_frontier) :
queue_index(queue_index),
num_gpus(num_gpus),
smem_storage(smem_storage),
raking_soa_details(
typename RakingSoaDetails::GridStorageSoa(
smem_storage.coarse_raking_elements,
smem_storage.fine_raking_elements),
typename RakingSoaDetails::WarpscanSoa(
smem_storage.state.coarse_warpscan,
smem_storage.state.fine_warpscan),
TileTuple(0, 0)),
d_in(d_in),
d_out(d_out),
d_predecessor_out(d_predecessor_out),
d_column_indices(d_column_indices),
d_row_offsets(d_row_offsets),
work_progress(work_progress),
max_edge_frontier(max_edge_frontier)
{
if (threadIdx.x == 0) {
smem_storage.state.cta_comm = KernelPolicy::THREADS; // invalid
smem_storage.state.overflowed = false; // valid
}
}
/**
* Process a single tile
*/
__device__ __forceinline__ void ProcessTile(
SizeT cta_offset,
SizeT guarded_elements = KernelPolicy::TILE_ELEMENTS)
{
Tile<
KernelPolicy::LOG_LOADS_PER_TILE,
KernelPolicy::LOG_LOAD_VEC_SIZE> tile;
// Load tile
util::io::LoadTile<
KernelPolicy::LOG_LOADS_PER_TILE,
KernelPolicy::LOG_LOAD_VEC_SIZE,
KernelPolicy::THREADS,
KernelPolicy::QUEUE_READ_MODIFIER,
false>::LoadValid(
tile.vertex_id,
d_in,
cta_offset,
guarded_elements,
(VertexId) -1);
// Inspect dequeued vertices, updating label and obtaining
// edge-list details
tile.Inspect(this);
// Scan tile with carry update in raking threads
SoaScanOp scan_op;
TileTuple totals;
util::scan::soa::CooperativeSoaTileScan<KernelPolicy::LOAD_VEC_SIZE>::ScanTile(
totals,
raking_soa_details,
RankSoa(tile.coarse_row_rank, tile.fine_row_rank),
scan_op);
SizeT coarse_count = totals.t0;
tile.fine_count = totals.t1;
// Use a single atomic add to reserve room in the queue
if (threadIdx.x == 0) {
SizeT enqueue_amt = coarse_count + tile.fine_count;
SizeT enqueue_offset = work_progress.Enqueue(enqueue_amt, queue_index + 1);
smem_storage.state.coarse_enqueue_offset = enqueue_offset;
smem_storage.state.fine_enqueue_offset = enqueue_offset + coarse_count;
// Check for queue overflow due to redundant expansion
if (enqueue_offset + enqueue_amt >= max_edge_frontier) {
smem_storage.state.overflowed = true;
work_progress.SetOverflow<SizeT>();
}
}
// Protect overflowed flag
__syncthreads();
// Quit if overflow
if (smem_storage.state.overflowed) {
util::ThreadExit();
}
// Enqueue valid edge lists into outgoing queue
tile.ExpandByCta(this);
// Enqueue valid edge lists into outgoing queue
tile.ExpandByWarp(this);
//
// Enqueue the adjacency lists of unvisited node-IDs by repeatedly
// gathering edges into the scratch space, and then
// having the entire CTA copy the scratch pool into the outgoing
// frontier queue.
//
tile.progress = 0;
while (tile.progress < tile.fine_count) {
// Fill the scratch space with gather-offsets for neighbor-lists.
tile.ExpandByScan(this);
__syncthreads();
// Copy scratch space into queue
int scratch_remainder = B40C_MIN(SmemStorage::GATHER_ELEMENTS, tile.fine_count - tile.progress);
for (int scratch_offset = threadIdx.x;
scratch_offset < scratch_remainder;
scratch_offset += KernelPolicy::THREADS)
{
// Gather a neighbor
VertexId neighbor_id;
util::io::ModifiedLoad<KernelPolicy::COLUMN_READ_MODIFIER>::Ld(
neighbor_id,
d_column_indices + smem_storage.gather_offsets[scratch_offset]);
// Scatter it into queue
util::io::ModifiedStore<KernelPolicy::QUEUE_WRITE_MODIFIER>::St(
neighbor_id,
d_out + smem_storage.state.fine_enqueue_offset + tile.progress + scratch_offset);
if (KernelPolicy::MARK_PREDECESSORS) {
// Scatter predecessor it into queue
VertexId predecessor_id = smem_storage.gather_predecessors[scratch_offset];
util::io::ModifiedStore<KernelPolicy::QUEUE_WRITE_MODIFIER>::St(
predecessor_id,
d_predecessor_out + smem_storage.state.fine_enqueue_offset + tile.progress + scratch_offset);
}
}
tile.progress += SmemStorage::GATHER_ELEMENTS;
__syncthreads();
}
}
};
} // namespace expand_atomic
} // namespace two_phase
} // namespace bfs
} // namespace graph
} // namespace b40c
B40C_NS_POSTFIX | the_stack |
#include "miner.h"
extern "C" {
#include "sph/sph_blake.h"
}
#include "cuda_helper.h"
#ifdef __INTELLISENSE__
#define __byte_perm(x, y, b) x
#endif
/* threads per block and nonces per thread */
#define TPB 768
#define NPT 384
#define NBN 2
__constant__ uint32_t _ALIGN(16) d_data[21];
/* 16 gpu threads max */
static uint32_t *d_resNonce[MAX_GPUS];
static uint32_t *h_resNonce[MAX_GPUS];
static cudaStream_t streams[MAX_GPUS];
/* hash by cpu with blake 256 */
extern "C" void vanillahash(void *output, const void *input, int8_t blakerounds){
uchar hash[64];
sph_blake256_context ctx;
sph_blake256_set_rounds(blakerounds);
sph_blake256_init(&ctx);
sph_blake256(&ctx, input, 80);
sph_blake256_close(&ctx, hash);
memcpy(output, hash, 32);
}
#define GS4(a,b,c,d,x,y,a1,b1,c1,d1,x1,y1,a2,b2,c2,d2,x2,y2,a3,b3,c3,d3,x3,y3) { \
v[ a]+= (m[ x] ^ z[ y]) + v[ b]; \
v[a1]+= (m[x1] ^ z[y1]) + v[b1]; \
v[a2]+= (m[x2] ^ z[y2]) + v[b2]; \
v[a3]+= (m[x3] ^ z[y3]) + v[b3]; \
\
v[ d] = __byte_perm(v[ d] ^ v[ a], 0, 0x1032); \
v[d1] = __byte_perm(v[d1] ^ v[a1], 0, 0x1032); \
v[d2] = __byte_perm(v[d2] ^ v[a2], 0, 0x1032); \
v[d3] = __byte_perm(v[d3] ^ v[a3], 0, 0x1032); \
\
v[ c]+= v[ d]; \
v[c1]+= v[d1]; \
v[c2]+= v[d2]; \
v[c3]+= v[d3]; \
\
v[ b] = ROTR32(v[ b] ^ v[ c], 12); \
v[b1] = ROTR32(v[b1] ^ v[c1], 12); \
v[b2] = ROTR32(v[b2] ^ v[c2], 12); \
v[b3] = ROTR32(v[b3] ^ v[c3], 12); \
\
v[ a]+= (m[ y] ^ z[ x]) + v[ b]; \
v[a1]+= (m[y1] ^ z[x1]) + v[b1]; \
v[a2]+= (m[y2] ^ z[x2]) + v[b2]; \
v[a3]+= (m[y3] ^ z[x3]) + v[b3]; \
\
v[ d] = __byte_perm(v[ d] ^ v[ a], 0, 0x0321); \
v[d1] = __byte_perm(v[d1] ^ v[a1], 0, 0x0321); \
v[d2] = __byte_perm(v[d2] ^ v[a2], 0, 0x0321); \
v[d3] = __byte_perm(v[d3] ^ v[a3], 0, 0x0321); \
\
v[ c]+= v[ d]; \
v[c1]+= v[d1]; \
v[c2]+= v[d2]; \
v[c3]+= v[d3]; \
\
v[ b] = ROTR32(v[ b] ^ v[ c], 7); \
v[b1] = ROTR32(v[b1] ^ v[c1], 7); \
v[b2] = ROTR32(v[b2] ^ v[c2], 7); \
v[b3] = ROTR32(v[b3] ^ v[c3], 7); \
}
#define GS3(a,b,c,d,x,y,a1,b1,c1,d1,x1,y1,a2,b2,c2,d2,x2,y2) { \
v[ a]+= (m[ x] ^ z[ y]) + v[ b]; \
v[a1]+= (m[x1] ^ z[y1]) + v[b1]; \
v[a2]+= (m[x2] ^ z[y2]) + v[b2]; \
\
v[ d] = __byte_perm(v[ d] ^ v[ a], 0, 0x1032); \
v[d1] = __byte_perm(v[d1] ^ v[a1], 0, 0x1032); \
v[d2] = __byte_perm(v[d2] ^ v[a2], 0, 0x1032); \
\
v[ c]+= v[ d]; \
v[c1]+= v[d1]; \
v[c2]+= v[d2]; \
\
v[ b] = ROTR32(v[ b] ^ v[ c], 12); \
v[b1] = ROTR32(v[b1] ^ v[c1], 12); \
v[b2] = ROTR32(v[b2] ^ v[c2], 12); \
\
v[ a]+= (m[ y] ^ z[ x]) + v[ b]; \
v[a1]+= (m[y1] ^ z[x1]) + v[b1]; \
v[a2]+= (m[y2] ^ z[x2]) + v[b2]; \
\
v[ d] = __byte_perm(v[ d] ^ v[ a], 0, 0x0321); \
v[d1] = __byte_perm(v[d1] ^ v[a1], 0, 0x0321); \
v[d2] = __byte_perm(v[d2] ^ v[a2], 0, 0x0321); \
\
v[ c]+= v[ d]; \
v[c1]+= v[d1]; \
v[c2]+= v[d2]; \
\
v[ b] = ROTR32(v[ b] ^ v[ c], 7); \
v[b1] = ROTR32(v[b1] ^ v[c1], 7); \
v[b2] = ROTR32(v[b2] ^ v[c2], 7); \
}
#define GS2(a,b,c,d,x,y,a1,b1,c1,d1,x1,y1) { \
v[ a]+= (m[ x] ^ z[ y]) + v[ b]; \
v[a1]+= (m[x1] ^ z[y1]) + v[b1]; \
\
v[ d] = __byte_perm(v[ d] ^ v[ a], 0, 0x1032); \
v[d1] = __byte_perm(v[d1] ^ v[a1], 0, 0x1032); \
\
v[ c]+= v[ d]; \
v[c1]+= v[d1]; \
\
v[ b] = ROTR32(v[ b] ^ v[ c], 12); \
v[b1] = ROTR32(v[b1] ^ v[c1], 12); \
\
v[ a]+= (m[ y] ^ z[ x]) + v[ b]; \
v[a1]+= (m[y1] ^ z[x1]) + v[b1]; \
\
v[ d] = __byte_perm(v[ d] ^ v[ a], 0, 0x0321); \
v[d1] = __byte_perm(v[d1] ^ v[a1], 0, 0x0321); \
\
v[ c]+= v[ d]; \
v[c1]+= v[d1]; \
\
v[ b] = ROTR32(v[ b] ^ v[ c], 7); \
v[b1] = ROTR32(v[b1] ^ v[c1], 7); \
}
#define GS(a,b,c,d,x,y) { \
v[a] += (m[x] ^ z[y]) + v[b]; \
v[d] = __byte_perm(v[d] ^ v[a],0, 0x1032); \
v[c] += v[d]; \
v[b] = ROTR32(v[b] ^ v[c], 12); \
v[a] += (m[y] ^ z[x]) + v[b]; \
v[d] = __byte_perm(v[d] ^ v[a],0, 0x0321); \
v[c] += v[d]; \
v[b] = ROTR32(v[b] ^ v[c], 7); \
}
__global__ __launch_bounds__(TPB,1)
void vanilla_gpu_hash_16_8(const uint32_t threads, const uint32_t startNonce, uint32_t *resNonce,const uint64_t highTarget){
uint32_t _ALIGN(16) v[16];
uint32_t _ALIGN(16) tmp[16];
const size_t thread = blockDim.x * blockIdx.x + threadIdx.x;
const uint64_t step = gridDim.x * blockDim.x;
const uint64_t maxNonce = startNonce + threads;
const int8_t r[][16] = {
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }
};
const uint32_t z[16] = {
0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344, 0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89,
0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C, 0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917
};
//PREFETCH
#pragma unroll
for(int i=0;i<16;i++){
tmp[ i] = d_data[ i];
}
uint32_t m[16] = {
d_data[16], d_data[17], d_data[18], 0,
0x80000000UL, 0, 0, 0,
0, 0, 0, 0,
0, 1, 0, 640
};
const uint32_t h7 = d_data[19];
const uint32_t h6 = d_data[20];
//END OF PREFETCH
uint64_t m3 = startNonce + thread;
loopstart:
if(m3>=maxNonce)return;
m[3] = m3;
#pragma unroll
for(int i=0;i<16;i++)
v[ i] = tmp[ i];
v[ 1]+= m[3] ^ z[2];
v[13] = __byte_perm(v[13] ^ v[1],0, 0x0321);
v[ 9]+= v[13];
v[ 5] = ROTR32(v[5] ^ v[9], 7);
v[ 0]+= v[5];
v[15] = __byte_perm(v[15] ^ v[0],0, 0x1032);
v[10]+= v[15];
v[ 5] = ROTR32(v[5] ^ v[10], 12);
v[ 0]+= z[8] + v[5];
v[15] = __byte_perm(v[15] ^ v[0],0, 0x0321);
v[10]+= v[15];
v[ 5] = ROTR32(v[5] ^ v[10], 7);
GS3( 1, 6,11,12,10,11, 2, 7, 8,13,12,13, 3, 4, 9,14,14,15);
#pragma unroll
for(int i=0;i<6;i++){
GS4(0, 4, 8,12,r[i][ 0],r[i][ 1], 1, 5, 9,13,r[i][ 2],r[i][ 3], 2, 6,10,14,r[i][ 4],r[i][ 5], 3, 7,11,15,r[i][ 6],r[i][ 7]);
GS4(0, 5,10,15,r[i][ 8],r[i][ 9], 1, 6,11,12,r[i][10],r[i][11], 2, 7, 8,13,r[i][12],r[i][13], 3, 4, 9,14,r[i][14],r[i][15]);
}
GS4(0, 4, 8,12,r[6][ 0],r[6][ 1], 1, 5, 9,13,r[6][ 2],r[6][ 3], 2, 6,10,14,r[6][ 4],r[6][ 5], 3, 7,11,15,r[6][ 6],r[6][ 7]);
v[ 0] += (m[ 5] ^ z[0]) + v[5];
v[ 2] += (m[ 8] ^ z[6]) + v[7];
v[13] = __byte_perm(v[13] ^ v[2],0, 0x1032);
v[15] = __byte_perm(v[15] ^ v[0],0, 0x1032);
v[ 8] += v[13];
v[10] += v[15];
v[ 5] = ROTR32(v[ 5] ^ v[10], 12);
v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12);
v[ 0] += (m[ 0] ^ z[5]) + v[5];
v[ 2] += (m[ 6] ^ z[8]) + v[7];
v[15] = __byte_perm(v[15] ^ v[ 0],0, 0x0321);
v[13] = __byte_perm(v[13] ^ v[ 2],0, 0x0321);
v[8] += v[13];
v[7] = ROTR32(v[7] ^ v[8], 7);
// only compute h6 & 7
if((v[15]^h7)==v[7]){
v[ 1] += (m[15] ^ z[ 4]) + v[6];
v[ 3] += (m[2] ^ z[10]) + v[4];
v[12] = __byte_perm(v[12] ^ v[ 1],0, 0x1032);
v[14] = __byte_perm(v[14] ^ v[3],0, 0x1032);
v[11] += v[12];
v[ 9] += v[14];
v[ 6] = ROTR32(v[ 6] ^ v[11], 12);
v[ 1] += (m[ 4] ^ z[15]) + v[ 6];
v[ 3] += (m[10] ^ z[ 2]) + ROTR32(v[ 4] ^ v[ 9],12);
v[12] = __byte_perm(v[12] ^ v[ 1],0, 0x0321);
v[14] = __byte_perm(v[14] ^ v[ 3],0, 0x0321);
v[11] += v[12];
v[ 6] = ROTR32(v[ 6] ^ v[11], 7);
if(cuda_swab32(h6^v[6]^v[14]) <= highTarget) {
#if NBN == 2
/* keep the smallest nonce, + extra one if found */
if (m[3] < resNonce[0]){
resNonce[1] = resNonce[0];
resNonce[0] = m[3];
}
else
resNonce[1] = m[3];
#else
resNonce[0] = m[3];
#endif
return; //<-- this may cause a problem on extranonce if the extranonce is on position current_nonce + X * step where X=[1,2,3..,N]
}
}
m3+=step;
goto loopstart;
}
__host__
void vanilla_cpu_setBlock_16(const int thr_id,const uint32_t* endiandata, uint32_t *penddata){
const uint32_t _ALIGN(64) z[16] = {
SPH_C32(0x243F6A88), SPH_C32(0x85A308D3), SPH_C32(0x13198A2E), SPH_C32(0x03707344),
SPH_C32(0xA4093822), SPH_C32(0x299F31D0), SPH_C32(0x082EFA98), SPH_C32(0xEC4E6C89),
SPH_C32(0x452821E6), SPH_C32(0x38D01377), SPH_C32(0xBE5466CF), SPH_C32(0x34E90C6C),
SPH_C32(0xC0AC29B7), SPH_C32(0xC97C50DD), SPH_C32(0x3F84D5B5), SPH_C32(0xB5470917)
};
uint32_t _ALIGN(64) h[22];
sph_blake256_context ctx;
sph_blake256_set_rounds(8);
sph_blake256_init(&ctx);
sph_blake256(&ctx, endiandata, 64);
h[ 0] = ctx.H[0]; h[ 1] = ctx.H[1];
h[ 2] = ctx.H[2]; h[21] = ctx.H[3];
h[ 4] = ctx.H[4]; h[20] = ctx.H[5];
h[19] = ctx.H[6]; h[16] = ctx.H[7];
uint32_t tmp = h[20];
h[20] = h[19];
h[19] = h[16];
h[16] = penddata[ 0];
h[17] = penddata[ 1];
h[18] = penddata[ 2];
h[12] = z[ 4] ^ 640;
h[ 8] = z[ 0];
h[ 0] += (h[16] ^ z[ 1]) + h[ 4];
h[12] = SPH_ROTR32(h[12] ^ h[0],16);
h[ 8] += h[12];
h[ 4] = SPH_ROTR32(h[ 4] ^ h[ 8], 12);
h[ 0] += (h[17] ^ z[ 0]) + h[ 4];
h[12] = SPH_ROTR32(h[12] ^ h[0],8);
h[ 8] += h[12];
h[ 4] = SPH_ROTR32(h[ 4] ^ h[ 8], 7);
h[1] += (h[18] ^ z[ 3]) + tmp;
h[13] = SPH_ROTR32(z[ 5] ^ 640 ^ h[1],16);
h[ 5] = ROTR32(tmp ^ (z[ 1] + h[13]), 12);
h[ 1] += h[ 5];
h[ 2] += (0x80000000UL ^ z[ 5]) + h[20];
h[14] = SPH_ROTR32(z[ 6] ^ h[2], 16);
h[ 6] = z[ 2] + h[14];
h[ 6] = SPH_ROTR32(h[20] ^ h[ 6], 12);
h[21] += z[ 7] + h[19];
h[ 0] += z[ 9];
h[ 2] += z[ 4] + h[ 6];
h[ 9] = z[ 1] + h[13];
h[10] = z[ 2] + h[14];
h[14] = SPH_ROTR32(h[14] ^ h[2],8); //0x0321
h[10]+=h[14];
h[ 6] = SPH_ROTR32(h[ 6] ^ h[10],7);
h[15] = SPH_ROTR32(z[ 7] ^ h[21],16);
h[11] = z[ 3] + h[15];
h[ 7] = SPH_ROTR32(h[19] ^ h[11], 12);
h[ 3] = h[21] + h[ 7] + z[ 6];
h[15] = SPH_ROTR32(h[15] ^ h[ 3],8);
h[11]+= h[15];
h[ 7] = ROTR32(h[ 7] ^ h[11],7);
cudaMemcpyToSymbolAsync(d_data, h, 21*sizeof(uint32_t), 0, cudaMemcpyHostToDevice, streams[thr_id]);
}
static bool init[MAX_GPUS] = { 0 };
extern "C" int scanhash_vanilla(int thr_id, struct work* work, uint32_t max_nonce, unsigned long *hashes_done, const int8_t blakerounds)
{
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
const uint32_t targetHigh = ptarget[6];
int dev_id = device_map[thr_id];
int intensity = (device_sm[dev_id] > 500 && !is_windows()) ? 30 : 24;
if (device_sm[dev_id] < 350) intensity = 22;
uint32_t throughput = cuda_default_throughput(thr_id, 1U << intensity);
if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce);
if (!init[thr_id]) {
cudaSetDevice(dev_id);
if (opt_cudaschedule == -1 && gpu_threads == 1) {
cudaDeviceReset();
// reduce cpu usage (linux)
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_CALL_OR_RET_X(cudaMalloc(&d_resNonce[thr_id], NBN * sizeof(uint32_t)), -1);
CUDA_CALL_OR_RET_X(cudaMallocHost(&h_resNonce[thr_id], NBN * sizeof(uint32_t)), -1);
cudaStreamCreate(&streams[thr_id]);
init[thr_id] = true;
}
uint32_t _ALIGN(64) endiandata[20];
for (int k = 0; k < 16; k++)
be32enc(&endiandata[k], pdata[k]);
cudaMemsetAsync(d_resNonce[thr_id], 0xff, sizeof(uint32_t),streams[thr_id]);
vanilla_cpu_setBlock_16(thr_id,endiandata,&pdata[16]);
const dim3 grid((throughput + (NPT*TPB)-1)/(NPT*TPB));
const dim3 block(TPB);
int rc = 0;
do {
vanilla_gpu_hash_16_8<<<grid,block, 0, streams[thr_id]>>>(throughput, pdata[19], d_resNonce[thr_id], targetHigh);
cudaMemcpyAsync(h_resNonce[thr_id], d_resNonce[thr_id], NBN*sizeof(uint32_t), cudaMemcpyDeviceToHost,streams[thr_id]);
cudaStreamSynchronize(streams[thr_id]);
if (h_resNonce[thr_id][0] != UINT32_MAX){
uint32_t vhashcpu[8];
uint32_t Htarg = (uint32_t)targetHigh;
for (int k=0; k < 19; k++)
be32enc(&endiandata[k], pdata[k]);
be32enc(&endiandata[19], h_resNonce[thr_id][0]);
vanillahash(vhashcpu, endiandata, blakerounds);
if (vhashcpu[6] <= Htarg && fulltest(vhashcpu, ptarget)) {
rc = 1;
work_set_target_ratio(work, vhashcpu);
*hashes_done = pdata[19] - first_nonce + throughput;
pdata[19] = h_resNonce[thr_id][0];
#if NBN > 1
if (h_resNonce[thr_id][1] != UINT32_MAX) {
be32enc(&endiandata[19], h_resNonce[thr_id][1]);
vanillahash(vhashcpu, endiandata, blakerounds);
pdata[21] = h_resNonce[thr_id][1];
if (bn_hash_target_ratio(vhashcpu, ptarget) > work->shareratio[0]) {
work_set_target_ratio(work, vhashcpu);
xchg(pdata[19], pdata[21]);
}
rc = 2;
}
#endif
return rc;
}
else {
gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU!", h_resNonce[thr_id][0]);
}
}
pdata[19] += throughput;
} while (!work_restart[thr_id].restart && ((uint64_t)max_nonce > ((uint64_t)(pdata[19]) + (uint64_t)throughput)));
*hashes_done = pdata[19] - first_nonce;
MyStreamSynchronize(NULL, 0, dev_id);
return rc;
}
// cleanup
extern "C" void free_vanilla(int thr_id)
{
if (!init[thr_id])
return;
cudaThreadSynchronize();
cudaFreeHost(h_resNonce[thr_id]);
cudaFree(d_resNonce[thr_id]);
init[thr_id] = false;
cudaDeviceSynchronize();
} | the_stack |
* Cooperative tile reduction and scanning within CTAs
******************************************************************************/
#pragma once
#include "../../util/device_intrinsics.cuh"
#include "../../util/srts_grid.cuh"
#include "../../util/reduction/cooperative_reduction.cuh"
#include "../../util/scan/serial_scan.cuh"
#include "../../util/scan/warp_scan.cuh"
#include "../../util/ns_umbrella.cuh"
B40C_NS_PREFIX
namespace b40c {
namespace util {
namespace scan {
/**
* Cooperative reduction in raking smem grid hierarchies
*/
template <
typename RakingDetails,
typename SecondaryRakingDetails = typename RakingDetails::SecondaryRakingDetails>
struct CooperativeGridScan;
/**
* Cooperative tile scan
*/
template <
int VEC_SIZE, // Length of vector-loads (e.g, vec-1, vec-2, vec-4)
bool EXCLUSIVE = true> // Whether or not this is an exclusive scan
struct CooperativeTileScan
{
//---------------------------------------------------------------------
// Iteration structures for extracting partials from raking lanes and
// using them to seed scans of tile vectors
//---------------------------------------------------------------------
// Next lane/load
template <int LANE, int TOTAL_LANES>
struct ScanLane
{
template <typename RakingDetails, typename ReductionOp>
static __device__ __forceinline__ void Invoke(
RakingDetails raking_details,
typename RakingDetails::T data[RakingDetails::SCAN_LANES][VEC_SIZE],
ReductionOp scan_op)
{
// Retrieve partial reduction from raking grid
typename RakingDetails::T exclusive_partial = raking_details.lane_partial[LANE][0];
// Scan the partials in this lane/load
SerialScan<VEC_SIZE, EXCLUSIVE>::Invoke(
data[LANE], exclusive_partial, scan_op);
// Next load
ScanLane<LANE + 1, TOTAL_LANES>::Invoke(
raking_details, data, scan_op);
}
};
// Terminate
template <int TOTAL_LANES>
struct ScanLane<TOTAL_LANES, TOTAL_LANES>
{
template <typename RakingDetails, typename ReductionOp>
static __device__ __forceinline__ void Invoke(
RakingDetails raking_details,
typename RakingDetails::T data[RakingDetails::SCAN_LANES][VEC_SIZE],
ReductionOp scan_op) {}
};
//---------------------------------------------------------------------
// Interface
//---------------------------------------------------------------------
/**
* Scan a single tile. Total aggregate is computed and returned in all threads.
*
* No post-synchronization needed before grid reuse.
*/
template <typename RakingDetails, typename ReductionOp>
static __device__ __forceinline__ typename RakingDetails::T ScanTile(
RakingDetails raking_details,
typename RakingDetails::T data[RakingDetails::SCAN_LANES][VEC_SIZE],
ReductionOp scan_op)
{
// Reduce partials in each vector-load, placing resulting partial in raking smem grid lanes (one lane per load)
reduction::CooperativeTileReduction<VEC_SIZE>::template ReduceLane<0, RakingDetails::SCAN_LANES>::Invoke(
raking_details, data, scan_op);
__syncthreads();
CooperativeGridScan<RakingDetails>::ScanTile(
raking_details, scan_op);
__syncthreads();
// Scan each vector-load, seeded with the resulting partial from its raking grid lane,
ScanLane<0, RakingDetails::SCAN_LANES>::Invoke(
raking_details, data, scan_op);
// Return last thread's inclusive partial
return raking_details.CumulativePartial();
}
/**
* Scan a single tile where carry is updated with the total aggregate only
* in raking threads.
*
* No post-synchronization needed before grid reuse.
*/
template <typename RakingDetails, typename ReductionOp>
static __device__ __forceinline__ void ScanTileWithCarry(
RakingDetails raking_details,
typename RakingDetails::T data[RakingDetails::SCAN_LANES][VEC_SIZE],
typename RakingDetails::T &carry,
ReductionOp scan_op)
{
// Reduce partials in each vector-load, placing resulting partials in raking smem grid lanes (one lane per load)
reduction::CooperativeTileReduction<VEC_SIZE>::template ReduceLane<0, RakingDetails::SCAN_LANES>::Invoke(
raking_details, data, scan_op);
__syncthreads();
CooperativeGridScan<RakingDetails>::ScanTileWithCarry(
raking_details, carry, scan_op);
__syncthreads();
// Scan each vector-load, seeded with the resulting partial from its raking grid lane,
ScanLane<0, RakingDetails::SCAN_LANES>::Invoke(
raking_details, data, scan_op);
}
/**
* Scan a single tile with atomic enqueue. Returns updated queue offset.
*
* No post-synchronization needed before grid reuse.
*/
template <typename RakingDetails, typename ReductionOp>
static __device__ __forceinline__ typename RakingDetails::T ScanTileWithEnqueue(
RakingDetails raking_details,
typename RakingDetails::T data[RakingDetails::SCAN_LANES][VEC_SIZE],
typename RakingDetails::T* d_enqueue_counter,
ReductionOp scan_op)
{
// Reduce partials in each vector-load, placing resulting partial in raking smem grid lanes (one lane per load)
reduction::CooperativeTileReduction<VEC_SIZE>::template ReduceLane<0, RakingDetails::SCAN_LANES>::Invoke(
raking_details, data, scan_op);
__syncthreads();
CooperativeGridScan<RakingDetails>::ScanTileWithEnqueue(
raking_details, d_enqueue_counter, scan_op);
__syncthreads();
// Scan each vector-load, seeded with the resulting partial from its raking grid lane,
ScanLane<0, RakingDetails::SCAN_LANES>::Invoke(
raking_details, data, scan_op);
return scan_op(raking_details.QueueReservation(), raking_details.CumulativePartial());
}
/**
* Scan a single tile with atomic enqueue. Local aggregate is computed and
* returned in all threads. Enqueue offset is returned in all threads.
*
* No post-synchronization needed before grid reuse.
*/
template <typename RakingDetails, typename ReductionOp>
static __device__ __forceinline__ typename RakingDetails::T ScanTileWithEnqueue(
RakingDetails raking_details,
typename RakingDetails::T data[RakingDetails::SCAN_LANES][VEC_SIZE],
typename RakingDetails::T *d_enqueue_counter,
typename RakingDetails::T &enqueue_offset,
ReductionOp scan_op)
{
// Reduce partials in each vector-load, placing resulting partial in raking smem grid lanes (one lane per load)
reduction::CooperativeTileReduction<VEC_SIZE>::template ReduceLane<0, RakingDetails::SCAN_LANES>::Invoke(
raking_details, data, scan_op);
__syncthreads();
CooperativeGridScan<RakingDetails>::ScanTileWithEnqueue(
raking_details, d_enqueue_counter, enqueue_offset, scan_op);
__syncthreads();
// Scan each vector-load, seeded with the resulting partial from its raking grid lane,
ScanLane<0, RakingDetails::SCAN_LANES>::Invoke(
raking_details, data, scan_op);
// Return last thread's inclusive partial
return raking_details.CumulativePartial();
}
};
/******************************************************************************
* CooperativeGridScan
******************************************************************************/
/**
* Cooperative raking grid reduction (specialized for last-level of raking grid)
*/
template <typename RakingDetails>
struct CooperativeGridScan<RakingDetails, NullType>
{
typedef typename RakingDetails::T T;
/**
* Scan in last-level raking grid.
*/
template <typename ReductionOp>
static __device__ __forceinline__ void ScanTile(
RakingDetails raking_details,
ReductionOp scan_op)
{
if (threadIdx.x < RakingDetails::RAKING_THREADS) {
// Raking reduction
T inclusive_partial = reduction::SerialReduce<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, scan_op);
// Exclusive warp scan
T exclusive_partial = WarpScan<RakingDetails::LOG_RAKING_THREADS>::Invoke(
inclusive_partial, raking_details.warpscan, scan_op);
// Exclusive raking scan
SerialScan<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, exclusive_partial, scan_op);
}
}
/**
* Scan in last-level raking grid. Carry-in/out is updated only in raking threads
*/
template <typename ReductionOp>
static __device__ __forceinline__ void ScanTileWithCarry(
RakingDetails raking_details,
T &carry,
ReductionOp scan_op)
{
if (threadIdx.x < RakingDetails::RAKING_THREADS) {
// Raking reduction
T inclusive_partial = reduction::SerialReduce<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, scan_op);
// Exclusive warp scan, get total
T warpscan_total;
T exclusive_partial = WarpScan<RakingDetails::LOG_RAKING_THREADS>::Invoke(
inclusive_partial, warpscan_total, raking_details.warpscan, scan_op);
// Seed exclusive partial with carry-in
exclusive_partial = scan_op(carry, exclusive_partial);
// Exclusive raking scan
SerialScan<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, exclusive_partial, scan_op);
// Update carry
carry = scan_op(carry, warpscan_total); // Increment the CTA's running total by the full tile reduction
}
}
/**
* Scan in last-level raking grid with atomic enqueue
*/
template <typename ReductionOp>
static __device__ __forceinline__ void ScanTileWithEnqueue(
RakingDetails raking_details,
T *d_enqueue_counter,
ReductionOp scan_op)
{
if (threadIdx.x < RakingDetails::RAKING_THREADS) {
// Raking reduction
T inclusive_partial = reduction::SerialReduce<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, scan_op);
// Exclusive warp scan, get total
T warpscan_total;
T exclusive_partial = WarpScan<RakingDetails::LOG_RAKING_THREADS>::Invoke(
inclusive_partial, warpscan_total, raking_details.warpscan, scan_op);
// Atomic-increment the global counter with the total allocation
T reservation_offset;
if (threadIdx.x == 0) {
reservation_offset = util::AtomicInt<T>::Add(
d_enqueue_counter,
warpscan_total);
raking_details.warpscan[1][0] = reservation_offset;
}
// Seed exclusive partial with queue reservation offset
reservation_offset = raking_details.warpscan[1][0];
exclusive_partial = scan_op(reservation_offset, exclusive_partial);
// Exclusive raking scan
SerialScan<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, exclusive_partial, scan_op);
}
}
/**
* Scan in last-level raking grid with atomic enqueue
*/
template <typename ReductionOp>
static __device__ __forceinline__ void ScanTileWithEnqueue(
RakingDetails raking_details,
T *d_enqueue_counter,
T &enqueue_offset,
ReductionOp scan_op)
{
if (threadIdx.x < RakingDetails::RAKING_THREADS) {
// Raking reduction
T inclusive_partial = reduction::SerialReduce<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, scan_op);
// Exclusive warp scan, get total
T warpscan_total;
T exclusive_partial = WarpScan<RakingDetails::LOG_RAKING_THREADS>::Invoke(
inclusive_partial, warpscan_total, raking_details.warpscan, scan_op);
// Atomic-increment the global counter with the total allocation
if (threadIdx.x == 0) {
enqueue_offset = util::AtomicInt<T>::Add(
d_enqueue_counter,
warpscan_total);
}
// Exclusive raking scan
SerialScan<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, exclusive_partial, scan_op);
}
}
};
/**
* Cooperative raking grid reduction for multi-level raking grids
*/
template <
typename RakingDetails,
typename SecondaryRakingDetails>
struct CooperativeGridScan
{
typedef typename RakingDetails::T T;
/**
* Scan in raking grid.
*/
template <typename ReductionOp>
static __device__ __forceinline__ void ScanTile(
RakingDetails raking_details,
ReductionOp scan_op)
{
if (threadIdx.x < RakingDetails::RAKING_THREADS) {
// Raking reduction
T partial = reduction::SerialReduce<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, scan_op);
// Place partial in next grid
raking_details.secondary_details.lane_partial[0][0] = partial;
}
__syncthreads();
// Collectively scan in next grid
CooperativeGridScan<SecondaryRakingDetails>::ScanTile(
raking_details.secondary_details, scan_op);
__syncthreads();
if (threadIdx.x < RakingDetails::RAKING_THREADS) {
// Retrieve partial from next grid
T exclusive_partial = raking_details.secondary_details.lane_partial[0][0];
// Exclusive raking scan
SerialScan<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, exclusive_partial, scan_op);
}
}
/**
* Scan in raking grid. Carry-in/out is updated only in raking threads (homogeneously)
*/
template <typename ReductionOp>
static __device__ __forceinline__ void ScanTileWithCarry(
RakingDetails raking_details,
T &carry,
ReductionOp scan_op)
{
if (threadIdx.x < RakingDetails::RAKING_THREADS) {
// Raking reduction
T partial = reduction::SerialReduce<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, scan_op);
// Place partial in next grid
raking_details.secondary_details.lane_partial[0][0] = partial;
}
__syncthreads();
// Collectively scan in next grid
CooperativeGridScan<SecondaryRakingDetails>::ScanTileWithCarry(
raking_details.secondary_details, carry, scan_op);
__syncthreads();
if (threadIdx.x < RakingDetails::RAKING_THREADS) {
// Retrieve partial from next grid
T exclusive_partial = raking_details.secondary_details.lane_partial[0][0];
// Exclusive raking scan
SerialScan<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, exclusive_partial, scan_op);
}
}
/**
* Scan in raking grid. Carry-in/out is updated only in raking threads (homogeneously)
*/
template <typename ReductionOp>
static __device__ __forceinline__ void ScanTileWithEnqueue(
RakingDetails raking_details,
T* d_enqueue_counter,
ReductionOp scan_op)
{
if (threadIdx.x < RakingDetails::RAKING_THREADS) {
// Raking reduction
T partial = reduction::SerialReduce<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, scan_op);
// Place partial in next grid
raking_details.secondary_details.lane_partial[0][0] = partial;
}
__syncthreads();
// Collectively scan in next grid
CooperativeGridScan<SecondaryRakingDetails>::ScanTileWithEnqueue(
raking_details.secondary_details, d_enqueue_counter, scan_op);
__syncthreads();
if (threadIdx.x < RakingDetails::RAKING_THREADS) {
// Retrieve partial from next grid
T exclusive_partial = raking_details.secondary_details.lane_partial[0][0];
// Exclusive raking scan
SerialScan<RakingDetails::PARTIALS_PER_SEG>::Invoke(
raking_details.raking_segment, exclusive_partial, scan_op);
}
}
};
} // namespace scan
} // namespace util
} // namespace b40c
B40C_NS_POSTFIX | the_stack |
const float sigma_color = 30; //in mm
const float sigma_space = 4.5; // in pixels
__global__ void
bilateralKernel (const PtrStepSz<unsigned short> src,
PtrStep<unsigned short> dst,
float sigma_space2_inv_half, float sigma_color2_inv_half)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
const int R = 6; //static_cast<int>(sigma_space * 1.5);
const int D = R * 2 + 1;
int value = src.ptr (y)[x];
int tx = min (x - D / 2 + D, src.cols - 1);
int ty = min (y - D / 2 + D, src.rows - 1);
float sum1 = 0;
float sum2 = 0;
for (int cy = max (y - D / 2, 0); cy < ty; ++cy)
{
for (int cx = max (x - D / 2, 0); cx < tx; ++cx)
{
int tmp = src.ptr (cy)[cx];
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
float color2 = (value - tmp) * (value - tmp);
float weight = __expf (-(space2 * sigma_space2_inv_half + color2 * sigma_color2_inv_half));
sum1 += tmp * weight;
sum2 += weight;
}
}
int res = __float2int_rn (sum1 / sum2);
dst.ptr (y)[x] = max (0, min (res, numeric_limits<short>::max ()));
}
__global__ void
pyrDownGaussKernel (const PtrStepSz<unsigned short> src, PtrStepSz<unsigned short> dst, float sigma_color)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
const int D = 5;
int center = src.ptr (2 * y)[2 * x];
int x_mi = max(0, 2*x - D/2) - 2*x;
int y_mi = max(0, 2*y - D/2) - 2*y;
int x_ma = min(src.cols, 2*x -D/2+D) - 2*x;
int y_ma = min(src.rows, 2*y -D/2+D) - 2*y;
float sum = 0;
float wall = 0;
float weights[] = {0.375f, 0.25f, 0.0625f} ;
for(int yi = y_mi; yi < y_ma; ++yi)
for(int xi = x_mi; xi < x_ma; ++xi)
{
int val = src.ptr (2*y + yi)[2*x + xi];
if (abs (val - center) < 3 * sigma_color)
{
sum += val * weights[abs(xi)] * weights[abs(yi)];
wall += weights[abs(xi)] * weights[abs(yi)];
}
} dst.ptr (y)[x] = static_cast<int>(sum /wall);
}
__global__ void
pyrDownKernel (const PtrStepSz<unsigned short> src, PtrStepSz<unsigned short> dst, float sigma_color)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
const int D = 5;
int center = src.ptr (2 * y)[2 * x];
int tx = min (2 * x - D / 2 + D, src.cols - 1);
int ty = min (2 * y - D / 2 + D, src.rows - 1);
int cy = max (0, 2 * y - D / 2);
int sum = 0;
int count = 0;
for (; cy < ty; ++cy)
for (int cx = max (0, 2 * x - D / 2); cx < tx; ++cx)
{
int val = src.ptr (cy)[cx];
if (abs (val - center) < 3 * sigma_color)
{
sum += val;
++count;
}
}
dst.ptr (y)[x] = sum / count;
}
__global__ void
pyrDownKernelIntensityGauss(const PtrStepSz<unsigned char> src, PtrStepSz<unsigned char> dst, float * gaussKernel)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
const int D = 5;
int center = src.ptr (2 * y)[2 * x];
int tx = min (2 * x - D / 2 + D, src.cols - 1);
int ty = min (2 * y - D / 2 + D, src.rows - 1);
int cy = max (0, 2 * y - D / 2);
float sum = 0;
int count = 0;
for (; cy < ty; ++cy)
for (int cx = max (0, 2 * x - D / 2); cx < tx; ++cx)
{
sum += src.ptr (cy)[cx] * gaussKernel[(ty - cy - 1) * 5 + (tx - cx - 1)];
count += gaussKernel[(ty - cy - 1) * 5 + (tx - cx - 1)];
}
dst.ptr (y)[x] = (sum / (float)count);
}
__global__ void
pyrDownKernelGaussF(const PtrStepSz<float> src, PtrStepSz<float> dst, float * gaussKernel)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
const int D = 5;
float center = src.ptr (2 * y)[2 * x];
int tx = min (2 * x - D / 2 + D, src.cols - 1);
int ty = min (2 * y - D / 2 + D, src.rows - 1);
int cy = max (0, 2 * y - D / 2);
float sum = 0;
int count = 0;
for (; cy < ty; ++cy)
{
for (int cx = max (0, 2 * x - D / 2); cx < tx; ++cx)
{
if(!isnan(src.ptr (cy)[cx]))
{
sum += src.ptr (cy)[cx] * gaussKernel[(ty - cy - 1) * 5 + (tx - cx - 1)];
count += gaussKernel[(ty - cy - 1) * 5 + (tx - cx - 1)];
}
}
}
dst.ptr (y)[x] = (float)(sum / (float)count);
}
__global__ void
short2FloatKernel(const PtrStepSz<unsigned short> src, PtrStepSz<float> dst, int cutOff)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
dst.ptr(y)[x] = src.ptr(y)[x] > cutOff || src.ptr(y)[x] <= 0 ? numeric_limits<float>::quiet_NaN() : ((float)src.ptr(y)[x]) / 1000.0f;
}
__global__ void
bgr2IntensityKernel(const PtrStepSz<PixelRGB> src, PtrStepSz<unsigned char> dst)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
int value = (float)src.ptr(y)[x].r * 0.114f + (float)src.ptr(y)[x].b * 0.299f + (float)src.ptr(y)[x].g * 0.587f;
dst.ptr (y)[x] = value;
}
__global__ void
truncateDepthKernel(PtrStepSz<unsigned short> depth, unsigned short max_distance_mm)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth.cols && y < depth.rows)
if(depth.ptr(y)[x] > max_distance_mm)
depth.ptr(y)[x] = 0;
}
__constant__ float gsobel_x3x3[9];
__constant__ float gsobel_y3x3[9];
__global__ void applyKernel(const PtrStepSz<unsigned char> src, PtrStep<short> dx, PtrStep<short> dy)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= src.cols || y >= src.rows)
return;
float dxVal = 0;
float dyVal = 0;
int kernelIndex = 8;
for(int j = max(y - 1, 0); j <= min(y + 1, src.rows - 1); j++)
{
for(int i = max(x - 1, 0); i <= min(x + 1, src.cols - 1); i++)
{
dxVal += (float)src.ptr(j)[i] * gsobel_x3x3[kernelIndex];
dyVal += (float)src.ptr(j)[i] * gsobel_y3x3[kernelIndex];
--kernelIndex;
}
}
dx.ptr(y)[x] = dxVal;
dy.ptr(y)[x] = dyVal;
}
void computeDerivativeImages(DeviceArray2D<unsigned char>& src, DeviceArray2D<short>& dx, DeviceArray2D<short>& dy)
{
static bool once = false;
if(!once)
{
float gsx3x3[9] = {0.52201, 0.00000, -0.52201,
0.79451, -0.00000, -0.79451,
0.52201, 0.00000, -0.52201};
float gsy3x3[9] = {0.52201, 0.79451, 0.52201,
0.00000, 0.00000, 0.00000,
-0.52201, -0.79451, -0.52201};
cudaMemcpyToSymbol(gsobel_x3x3, gsx3x3, sizeof(float) * 9);
cudaMemcpyToSymbol(gsobel_y3x3, gsy3x3, sizeof(float) * 9);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
once = true;
}
dim3 block(32, 8);
dim3 grid(divUp (src.cols (), block.x), divUp (src.rows (), block.y));
applyKernel<<<grid, block>>>(src, dx, dy);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
}
void
bilateralFilter (const DeviceArray2D<unsigned short>& src, DeviceArray2D<unsigned short>& dst)
{
dim3 block (32, 8);
dim3 grid (divUp (src.cols (), block.x), divUp (src.rows (), block.y));
cudaFuncSetCacheConfig (bilateralKernel, cudaFuncCachePreferL1);
bilateralKernel<<<grid, block>>>(src, dst, 0.5f / (sigma_space * sigma_space), 0.5f / (sigma_color * sigma_color));
cudaSafeCall ( cudaGetLastError () );
};
void
pyrDown (const DeviceArray2D<unsigned short>& src, DeviceArray2D<unsigned short>& dst)
{
dst.create (src.rows () / 2, src.cols () / 2);
dim3 block (32, 8);
dim3 grid (divUp (dst.cols (), block.x), divUp (dst.rows (), block.y));
pyrDownGaussKernel<<<grid, block>>>(src, dst, sigma_color);
cudaSafeCall ( cudaGetLastError () );
};
void pyrDownGaussF(const DeviceArray2D<float>& src, DeviceArray2D<float> & dst)
{
dst.create (src.rows () / 2, src.cols () / 2);
dim3 block (32, 8);
dim3 grid (divUp (dst.cols (), block.x), divUp (dst.rows (), block.y));
const float gaussKernel[25] = {1, 4, 6, 4, 1,
4, 16, 24, 16, 4,
6, 24, 36, 24, 6,
4, 16, 24, 16, 4,
1, 4, 6, 4, 1};
float * gauss_cuda;
cudaMalloc((void**) &gauss_cuda, sizeof(float) * 25);
cudaMemcpy(gauss_cuda, &gaussKernel[0], sizeof(float) * 25, cudaMemcpyHostToDevice);
pyrDownKernelGaussF<<<grid, block>>>(src, dst, gauss_cuda);
cudaSafeCall ( cudaGetLastError () );
cudaFree(gauss_cuda);
};
void pyrDownUcharGauss(const DeviceArray2D<unsigned char>& src, DeviceArray2D<unsigned char> & dst)
{
dst.create (src.rows () / 2, src.cols () / 2);
dim3 block (32, 8);
dim3 grid (divUp (dst.cols (), block.x), divUp (dst.rows (), block.y));
const float gaussKernel[25] = {1, 4, 6, 4, 1,
4, 16, 24, 16, 4,
6, 24, 36, 24, 6,
4, 16, 24, 16, 4,
1, 4, 6, 4, 1};
float * gauss_cuda;
cudaMalloc((void**) &gauss_cuda, sizeof(float) * 25);
cudaMemcpy(gauss_cuda, &gaussKernel[0], sizeof(float) * 25, cudaMemcpyHostToDevice);
pyrDownKernelIntensityGauss<<<grid, block>>>(src, dst, gauss_cuda);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
cudaFree(gauss_cuda);
};
void shortDepthToMetres(const DeviceArray2D<unsigned short>& src, DeviceArray2D<float> & dst, int cutOff)
{
dim3 block (32, 8);
dim3 grid (divUp (dst.cols (), block.x), divUp (dst.rows (), block.y));
short2FloatKernel<<<grid, block>>>(src, dst, cutOff);
cudaSafeCall ( cudaGetLastError () );
};
void imageBGRToIntensity(const DeviceArray2D<PixelRGB> & src, DeviceArray2D<unsigned char> & dst)
{
dim3 block (32, 8);
dim3 grid (divUp (dst.cols (), block.x), divUp (dst.rows (), block.y));
bgr2IntensityKernel<<<grid, block>>>(src, dst);
cudaSafeCall ( cudaGetLastError () );
}; | the_stack |
namespace AggMIS {
namespace MergeSplitGPU {
namespace Kernels {
namespace D {
__device__ void LoadLocalNeighbors(int *neighbors,
int *nextNeighbor,
int aggSize,
int *nodeIds,
int *adjIndices,
int *adjacency) {
if (threadIdx.x < aggSize)
{
int nodeId = nodeIds[threadIdx.x];
int start = adjIndices[nodeId];
int end = adjIndices[nodeId + 1];
for (int i = start; i < end; i++)
{
int neighborId = adjacency[i];
int neighborPlace = D::BinarySearch(neighborId,
0,
aggSize,
nodeIds);
if (neighborPlace != -1) {
neighbors[(*nextNeighbor)++] = neighborPlace;
}
}
}
__syncthreads();
}
__device__ int BinarySearch(int value,
int imin,
int imax,
int *array) {
while (imin < imax) {
int imid = (imax + imin) / 2;
if (array[imid] < value)
imin = imid + 1;
else
imax = imid;
}
if (imax == imin && array[imin] == value)
return imin;
else
return -1;
}
__device__ void FloodFillDistanceFrom(int starter,
int* array,
int nodeCount,
int *neighbors,
int neighborCount,
int *farthestId,
bool *incomplete) {
// Start by marking the starter node with distance zero
int myDist = threadIdx.x == starter ? 0 : -1;
if (threadIdx.x < nodeCount) {
array[threadIdx.x] = myDist;
}
// Then set the incomplete flag to true
*incomplete = true;
__syncthreads();
while (*incomplete)
{
// Set the incomplete flag to false
*incomplete = false;
// Check if a neighbor has a positive distance
if (myDist == -1 && threadIdx.x < nodeCount)
{
for (int i = 0; i < neighborCount; i++)
{
int neighborDist = array[neighbors[i]];
if (neighborDist > -1) {
myDist = neighborDist + 1;
*farthestId = threadIdx.x;
}
}
}
__syncthreads();
// Writing current value to shared array
array[threadIdx.x] = myDist;
if (myDist == -1 && threadIdx.x < nodeCount) {
*incomplete = true;
}
__syncthreads();
}
}
__device__ void PrintSharedArray(int size,
int *array,
const char *note) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
printf("%s\n [%3d:%3d] ", note, 0, 10);
for (int i = 0; i < size; i++) {
printf("%6d ", array[i]);
if ((i + 1) % 10 == 0 && i != size - 1)
printf("\n [%3d:%3d] ", (i + 1), (i + 11));
}
printf("\nDone Printing Array.\n");
}
__syncthreads();
}
__device__ void WarpReport(const char* note) {
if (threadIdx.x % 32 == 0)
printf("Warp %d %s\n", threadIdx.x / 32, note);
}
__device__ void SillyTest() {
if (threadIdx.x == 0)
printf("Silly test worked!");
}
}
__global__ void MakeMerges (int size,
int *mergeWith,
int *offsets,
int *mis) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
int currentAgg = mis[idx];
int newAgg = mergeWith[currentAgg];
// If the aggregate is not merging just apply offset
if (newAgg == -1)
{
mis[idx] = currentAgg - offsets[currentAgg];
}
// The aggregate is merging find offset of aggregate merging with
else
{
mis[idx] = newAgg - offsets[newAgg];
}
}
}
__global__ void MakeMerges_MarkSplits(int size,
int* mergeWith,
int* offsets,
int* mis,
int* splitsToMake) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
int currentAgg = mis[idx];
int newAgg = mergeWith[currentAgg];
// If the aggregate is not merging just apply offset
if (newAgg == -1)
{
mis[idx] = currentAgg - offsets[currentAgg];
}
// The aggregate is merging find offset of aggregate merging with
else
{
int newId = newAgg - offsets[newAgg];
mis[idx] = newId;
splitsToMake[newId] = 1;
}
}
}
__global__ void MakeSplits(int baseAggregateIdx,
int* splitting,
int* aggregation,
int* aggMapAdjIndices,
int* aggMapAdjacency,
int* adjIndices,
int* adjacency) {
int currentAgg = splitting[blockIdx.x];
int aggBegin = aggMapAdjIndices[currentAgg];
int aggSize = aggMapAdjIndices[currentAgg + 1] - aggBegin;
int newAgg = baseAggregateIdx + blockIdx.x;
__shared__ int nodeIds[64];
__shared__ int scratchA[64];
__shared__ int scratchB[64];
__shared__ int rootA;
__shared__ int rootB;
__shared__ int aCount, bCount;
__shared__ bool incomplete;
incomplete = true;
aCount = 1;
bCount = 1;
// Load in the node Id's from the aggregate map to the shared array:
if (threadIdx.x < aggSize)
nodeIds[threadIdx.x] = aggMapAdjacency[aggBegin + threadIdx.x];
__syncthreads();
// Each thread loads it's neighbors list into registers, translating into
// aggregate offsets.
int neighbors[40];
int nextNeighbor = 0;
int nodeId = -1;
if (threadIdx.x < aggSize)
{
nodeId = nodeIds[threadIdx.x];
int start = adjIndices[nodeId];
int end = adjIndices[nodeId + 1];
for (int i = start; i < end; i++)
{
int neighborId = adjacency[i];
int a = 0, b = aggSize - 1, midpoint;
while (a < b)
{
midpoint = a + ((b - a) / 2);
if (nodeIds[midpoint] < neighborId)
a = midpoint + 1;
else
b = midpoint;
}
if (nodeIds[a] == neighborId)
{
neighbors[nextNeighbor++] = a;
}
}
}
__syncthreads();
// Find the farthest node from the lowest indexed node (first root point)
// Start by marking the first node and threads without a node as seen
// Mark initial distances in scratch vector
if (threadIdx.x < aggSize)
scratchA[threadIdx.x] = threadIdx.x == 0 ? 0 : -1;
int myDist = threadIdx.x == 0 ? 0 : -1;
bool swapped = false;
incomplete = true;
__syncthreads();
while (incomplete)
{
// Set the incomplete flag to false
incomplete = false;
__syncthreads();
// Check if a neighbor has a positive distance
if (threadIdx.x < aggSize && myDist == -1)
{
for (int i = 0; i < nextNeighbor; i++)
{
int neighborDist = scratchA[neighbors[i]];
if (neighborDist > -1)
myDist = neighborDist + 1;
}
}
__syncthreads();
if (threadIdx.x < aggSize && myDist > 0 && !swapped)
{
swapped = true;
scratchA[threadIdx.x] = myDist;
rootA = threadIdx.x;
incomplete = true;
}
__syncthreads();
}
// Find the farthest node from the first root point (second root point)
// Start by marking the first node and threads without a node as seen
// Mark initial distances in scratch vector
if (threadIdx.x < aggSize)
scratchA[threadIdx.x] = threadIdx.x == rootA ? 0 : -1;
myDist = threadIdx.x == rootA ? 0 : -1;
swapped = false;
incomplete = true;
__syncthreads();
while (incomplete)
{
// Set the incomplete flag to false
incomplete = false;
__syncthreads();
// Check if a neighbor has a positive distance
if (threadIdx.x < aggSize && myDist == -1)
{
for (int i = 0; i < nextNeighbor; i++)
{
int neighborDist = scratchA[neighbors[i]];
if (neighborDist > -1)
{
myDist = neighborDist + 1;
}
}
}
__syncthreads();
if (threadIdx.x < aggSize && myDist > 0 && !swapped)
{
swapped = true;
scratchA[threadIdx.x] = myDist;
rootB = threadIdx.x;
incomplete = true;
}
__syncthreads();
}
// Setting an assigned aggregate label (In ScratchA) for every node with the node at
// rootA being assigned the current aggregate ID and the node at rootB
// being assigned the newAgg ID and set initial distances from a root node
// (In ScratchB) for each node, -1 for unknown and 0 for the roots
int myAggregate = -1;
if (threadIdx.x == rootA)
myAggregate = currentAgg;
if (threadIdx.x == rootB)
myAggregate = newAgg;
if (threadIdx.x < aggSize)
{
scratchA[threadIdx.x] = myAggregate;
scratchB[threadIdx.x] = myAggregate > -1 ? 0 : -1;
}
incomplete = true;
__syncthreads();
// Assign nodes to each aggregate until no unassigned nodes remain.
while (incomplete)
{
// Set the incomplete flag to false
incomplete = false;
__syncthreads();
if (threadIdx.x < aggSize && myAggregate == -1)
{
for (int i = 0; i < nextNeighbor; i++)
{
int neighborAgg = scratchA[neighbors[i]];
if (neighborAgg > -1)
{
myDist = scratchB[neighbors[i]] + 1;
myAggregate = neighborAgg;
}
}
if (myAggregate == -1)
incomplete = true;
if (myAggregate == newAgg)
atomicAdd((unsigned int*)&bCount, (unsigned)1);
if (myAggregate == currentAgg)
atomicAdd((unsigned int*)&aCount, (unsigned)1);
}
__syncthreads();
if (threadIdx.x < aggSize)
{
scratchA[threadIdx.x] = myAggregate;
scratchB[threadIdx.x] = myDist;
}
__syncthreads();
}
// If the split was uneven try to repair it
int sizeDifference = aCount > bCount ? aCount - bCount : bCount - aCount;
bool moveToA = aCount < bCount;
__shared__ int moved;
moved = 0;
int toMove = sizeDifference / 2;
incomplete = true;
__syncthreads();
while (incomplete && moved < toMove)
{
incomplete = false;
__syncthreads();
bool swapping = false;
int newDist = INT_MAX;
if (threadIdx.x < aggSize)
{
bool canSwap = moveToA ? myAggregate == newAgg : myAggregate == currentAgg;
bool borderNode = false;
// Check if this node has no same aggregate neighbors of higher distance
// and on a border
for (int i = 0; i < nextNeighbor; i++)
{
int neighborAgg = scratchA[neighbors[i]];
int neighborDist = scratchB[neighbors[i]];
if (neighborAgg == myAggregate && neighborDist > myDist)
canSwap = false;
if (neighborAgg != myAggregate)
{
if (neighborDist + 1 < newDist)
newDist = neighborDist + 1;
borderNode = true;
}
}
// If a node could swap see if it will
if (borderNode && canSwap && atomicAdd((unsigned int*)&moved, 1) < toMove)
{
swapping = true;
}
}
__syncthreads();
if (swapping)
{
int a = moveToA ? 1 : -1;
atomicAdd((unsigned int*)&bCount, -a);
atomicAdd((unsigned int*)&aCount, a);
scratchA[threadIdx.x] = moveToA ? currentAgg : newAgg;
scratchB[threadIdx.x] = newDist;
incomplete = true;
}
__syncthreads();
}
// Write out the values to the aggregation array
if (threadIdx.x < aggSize)
{
aggregation[nodeId] = scratchA[threadIdx.x];
}
}
__global__ void MakeSplitsWeighted(int baseAggregateIdx,
int* splitting,
int* aggregation,
int* aggMapAdjIndices,
int* aggMapAdjacency,
int* adjIndices,
int* adjacency,
int* weights) {
int currentAgg = splitting[blockIdx.x];
int aggBegin = aggMapAdjIndices[currentAgg];
int aggSize = aggMapAdjIndices[currentAgg + 1] - aggBegin;
int newAgg = baseAggregateIdx + blockIdx.x;
// Debug
int iterationCount = 0;
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("Starting MakeSplitsWeighted kernel for aggregate %d with node count %d\n",
// currentAgg, aggSize);
// }
__shared__ int nodeIds[64];
__shared__ int nodeWeights[64];
__shared__ int scratchA[64];
__shared__ int scratchB[64];
__shared__ int rootA, rootB;
__shared__ int aCount, bCount;
__shared__ bool incomplete;
incomplete = true;
// Load in the node Id's from the aggregate map to the shared array:
if (threadIdx.x < aggSize) {
nodeIds[threadIdx.x] = aggMapAdjacency[aggBegin + threadIdx.x];
nodeWeights[threadIdx.x] = weights[nodeIds[threadIdx.x]];
}
__syncthreads();
// Each thread loads it's neighbors list into registers, translating into
// aggregate offsets.
int neighbors[40];
int nextNeighbor = 0;
D::LoadLocalNeighbors(&neighbors[0],
&nextNeighbor,
aggSize,
&nodeIds[0],
adjIndices,
adjacency);
// Flood fill distances from node 0 to find first root node
D::FloodFillDistanceFrom(0,
&scratchA[0],
aggSize,
&neighbors[0],
nextNeighbor,
&rootA,
&incomplete);
// Testing templated function call
// D::PrintSharedArray(aggSize, &scratchA[0], "Before calling transform");
// T::Transform(aggSize, &scratchA[0], T::AddTo<int>(3));
// D::PrintSharedArray(aggSize, &scratchA[0], "After calling transform");
// Flood fill distances from rootA to find rootB
D::FloodFillDistanceFrom(rootA,
&scratchA[0],
aggSize,
&neighbors[0],
nextNeighbor,
&rootB,
&incomplete);
// Setting an assigned aggregate label (In ScratchA) for every node with the node at
// rootA being assigned the current aggregate ID and the node at rootB
// being assigned the newAgg ID and set initial distances from a root node
// (In ScratchB) for each node, -1 for unknown and 0 for the roots
int myAggregate = -1;
if (threadIdx.x == rootA) {
myAggregate = currentAgg;
aCount = weights[threadIdx.x];
}
if (threadIdx.x == rootB) {
myAggregate = newAgg;
bCount = weights[threadIdx.x];
}
if (threadIdx.x < aggSize)
{
scratchA[threadIdx.x] = myAggregate;
scratchB[threadIdx.x] = myAggregate > -1 ? 0 : -1;
}
incomplete = true;
// D::WarpReport("before sync");
// __syncthreads();
// D::WarpReport("after sync");
//
// D::PrintSharedArray(aggSize,
// &scratchA[0],
// "Pre-Initial aggregate assignments");
// D::PrintSharedArray(aggSize,
// &scratchB[0],
// "Pre-Initial distance assignments");
int myDist = threadIdx.x == 0 ? 0 : -1;
// Assign nodes to each aggregate until no unassigned nodes remain.
iterationCount = 0;
while (incomplete && iterationCount < 10)
{
iterationCount++;
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\tStarting an initial allocation cycle. aCount=%d bCount=%d\n", aCount, bCount);
// }
// Set the incomplete flag to false
__syncthreads();
incomplete = false;
__syncthreads();
if (threadIdx.x < aggSize && myAggregate == -1)
{
for (int i = 0; i < nextNeighbor; i++)
{
int neighborAgg = scratchA[neighbors[i]];
if (neighborAgg > -1)
{
myDist = scratchB[neighbors[i]] + 1;
myAggregate = neighborAgg;
}
}
if (myAggregate == -1)
incomplete = true;
if (myAggregate == newAgg)
atomicAdd((unsigned int*)&bCount, (unsigned)nodeWeights[threadIdx.x]);
if (myAggregate == currentAgg)
atomicAdd((unsigned int*)&aCount, (unsigned)nodeWeights[threadIdx.x]);
}
__syncthreads();
if (threadIdx.x < aggSize)
{
scratchA[threadIdx.x] = myAggregate;
scratchB[threadIdx.x] = myDist;
}
__syncthreads();
}
__syncthreads();
// Printing out the initial aggregate assignments made.
//D::PrintSharedArray(aggSize, scratchA, "Initial Aggregate Assignments");
// D::PrintSharedArray(aggSize,
// &scratchA[0],
// "Initial aggregate assignments");
// D::PrintSharedArray(aggSize,
// &scratchB[0],
// "Initial distance assignments");
//
// // Printing a message if max iterations exceeded.
// if (blockIdx.x == 0 && threadIdx.x == 0 && incomplete) {
// printf("***********Max Iterations Exceeded*************\n");
// }
// __syncthreads();
// If the split was uneven try to repair it
__shared__ int goodSwaps[20]; // The id of nodes that have desirable swaps
__shared__ int improvement[20]; // How much improvement the swap would make
__shared__ int insertID; // The index at which to insert new item
incomplete = true;
__syncthreads();
iterationCount = 0;
while (incomplete && iterationCount < 10)
{
iterationCount++;
// Reset values
int sizeDifference = aCount > bCount ? aCount - bCount : bCount - aCount;
bool moveToA = aCount < bCount;
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\tStarting a size correction cycle: A=%d B=%d\n",
// currentAgg, newAgg);
// printf("\t\taCount:=%d bCount=%d sizeDifference=%d moveToA=%s\n\n",
// aCount, bCount, sizeDifference, (moveToA ? "True" : "False"));
// }
insertID = 0;
__syncthreads();
int newDist = INT_MAX;
if (threadIdx.x < aggSize)
{
bool canSwap = moveToA ? myAggregate == newAgg : myAggregate == currentAgg;
bool borderNode = false;
// Check if this node has no same aggregate neighbors of higher distance
// and on a border
for (int i = 0; i < nextNeighbor; i++)
{
int neighborAgg = scratchA[neighbors[i]];
int neighborDist = scratchB[neighbors[i]];
if (neighborAgg == myAggregate && neighborDist > myDist)
canSwap = false;
if (neighborAgg != myAggregate)
{
if (neighborDist + 1 < newDist)
newDist = neighborDist + 1;
borderNode = true;
}
}
// If a node could swap see how attractive the swap would be
if (borderNode && canSwap)
{
int newA = moveToA ?
aCount + nodeWeights[threadIdx.x] :
aCount - nodeWeights[threadIdx.x];
int newB = moveToA ?
bCount - nodeWeights[threadIdx.x] :
bCount + nodeWeights[threadIdx.x];
int newSizeDifference = newA > newB ?
newA - newB :
newB - newA;
if (newSizeDifference < sizeDifference) {
int newID = atomicAdd((int *)&insertID, 1) - 1;
if (newID < 20) {
goodSwaps[newID] = threadIdx.x;
improvement[newID] = newSizeDifference;
}
}
}
}
__syncthreads();
// Now finding the best swap to make and making it
if (insertID > 0)
{
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\tThere are %d possible swaps marked\n", insertID);
// }
// Have zero thread look through options for best
if (threadIdx.x == 0) {
// Checking each option found and getting the best one
int bestValue = INT_MAX;
int swapId = -1;
for (int i = 0; i < insertID && i < 20; i++) {
// Debug
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\t\tNode %d can swap with improvement %d\n",
// goodSwaps[i], improvement[i]);
// }
if (improvement[i] < bestValue) {
bestValue = improvement[i];
swapId = goodSwaps[i];
}
}
insertID = swapId;
}
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\tNode %d in aggregate %d selected to swap\n",
// insertID, scratchA[insertID]);
// }
__syncthreads();
// Have the thread belonging to the swap node do the swap
if (threadIdx.x == insertID) {
myAggregate = moveToA ? currentAgg : newAgg;
scratchA[threadIdx.x] = myAggregate;
scratchB[threadIdx.x] = newDist;
aCount = moveToA ? aCount + nodeWeights[threadIdx.x] : aCount - nodeWeights[threadIdx.x];
bCount = moveToA ? bCount - nodeWeights[threadIdx.x] : bCount + nodeWeights[threadIdx.x];
}
__syncthreads();
// Now recompute the distances to make sure things are still
// connected.
__shared__ bool changed;
scratchB[threadIdx.x] = threadIdx.x == rootA || threadIdx.x == rootB ?
0 : -1;
changed = true;
__syncthreads();
while (changed) {
changed = false;
// Check if a neighbor has a positive distance
if (threadIdx.x < aggSize && scratchB[threadIdx.x] == -1) {
for (int i = 0; i < nextNeighbor; i++) {
// If neighbor has a distance and is in the same
// aggregate fill distance from it.
if (scratchA[neighbors[i]] == scratchA[threadIdx.x] && scratchB[neighbors[i]] > -1) {
scratchB[threadIdx.x] = scratchB[neighbors[i]] + 1;
changed = true;
}
}
}
__syncthreads();
}
if (threadIdx.x < aggSize && scratchB[threadIdx.x] == -1) {
changed = true;
}
__syncthreads();
// if (changed && blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\nWhile splitting aggregate %d into %d:\n",
// currentAgg, newAgg);
// int problem = nodeIds[insertID];
// printf("\tId %d (node %d) broke things when swapped.\n",
// insertID, problem);
// printf("\tDetected a disconnect. Reverting and stopping.\n");
// }
if (changed && threadIdx.x == insertID) {
scratchA[threadIdx.x] = scratchA[threadIdx.x] == newAgg ?
currentAgg : newAgg;
incomplete = true;
}
__syncthreads();
// if (changed && blockIdx.x == 0 && threadIdx.x == 0) {
// printf("Incomplete flag marked as: %s\n",
// incomplete ? "True" : "False");
// }
// __syncthreads();
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\t\tNode %d now in aggregate %d with distance %d\n",
// insertID, scratchA[insertID], scratchB[insertID]);
// }
}
else {
incomplete = false;
}
__syncthreads();
}
// if (blockIdx.x == 0 && threadIdx.x == 0 && incomplete) {
// printf("***********Max Iterations Exceeded*************\n");
// }
//
// D::PrintSharedArray(aggSize,
// &scratchA[0],
// "Final aggregate assignments");
// D::PrintSharedArray(aggSize,
// &scratchB[0],
// "Final distance assignments");
// Write out the values to the aggregation array
if (threadIdx.x < aggSize)
{
aggregation[nodeIds[threadIdx.x]] = scratchA[threadIdx.x];
}
}
__global__ void MakeSplits_Large(int baseAggregateIdx,
int* splitting,
int* aggregation,
int* aggMapAdjIndices,
int* aggMapAdjacency,
int* adjIndices,
int* adjacency) {
int currentAgg = splitting[blockIdx.x];
int aggBegin = aggMapAdjIndices[currentAgg];
int aggSize = aggMapAdjIndices[currentAgg + 1] - aggBegin;
int newAgg = baseAggregateIdx + blockIdx.x;
__shared__ int nodeIds[256];
__shared__ int scratchA[256];
__shared__ int scratchB[256];
__shared__ int rootA;
__shared__ int rootB;
__shared__ int aCount, bCount;
__shared__ bool incomplete;
incomplete = true;
aCount = 1;
bCount = 1;
// Load in the node Id's from the aggregate map to the shared array:
if (threadIdx.x < aggSize)
nodeIds[threadIdx.x] = aggMapAdjacency[aggBegin + threadIdx.x];
__syncthreads();
// Each thread loads it's neighbors list into registers, translating into
// aggregate offsets.
int neighbors[40];
int nextNeighbor = 0;
int nodeId = -1;
if (threadIdx.x < aggSize)
{
nodeId = nodeIds[threadIdx.x];
int start = adjIndices[nodeId];
int end = adjIndices[nodeId + 1];
for (int i = start; i < end; i++)
{
int neighborId = adjacency[i];
int a = 0, b = aggSize - 1, midpoint;
while (a < b)
{
midpoint = a + ((b - a) / 2);
if (nodeIds[midpoint] < neighborId)
a = midpoint + 1;
else
b = midpoint;
}
if (nodeIds[a] == neighborId)
{
neighbors[nextNeighbor++] = a;
}
}
}
__syncthreads();
// Find the farthest node from the lowest indexed node (first root point)
// Start by marking the first node and threads without a node as seen
// Mark initial distances in scratch vector
if (threadIdx.x < aggSize)
scratchA[threadIdx.x] = threadIdx.x == 0 ? 0 : -1;
int myDist = threadIdx.x == 0 ? 0 : -1;
bool swapped = false;
incomplete = true;
__syncthreads();
while (incomplete)
{
// Set the incomplete flag to false
incomplete = false;
__syncthreads();
// Check if a neighbor has a positive distance
if (threadIdx.x < aggSize && myDist == -1)
{
for (int i = 0; i < nextNeighbor; i++)
{
int neighborDist = scratchA[neighbors[i]];
if (neighborDist > -1)
myDist = neighborDist + 1;
}
}
__syncthreads();
if (threadIdx.x < aggSize && myDist > 0 && !swapped)
{
swapped = true;
scratchA[threadIdx.x] = myDist;
rootA = threadIdx.x;
incomplete = true;
}
__syncthreads();
}
// Find the farthest node from the first root point (second root point)
// Start by marking the first node and threads without a node as seen
// Mark initial distances in scratch vector
if (threadIdx.x < aggSize)
scratchA[threadIdx.x] = threadIdx.x == rootA ? 0 : -1;
myDist = threadIdx.x == rootA ? 0 : -1;
swapped = false;
incomplete = true;
__syncthreads();
while (incomplete)
{
// Set the incomplete flag to false
incomplete = false;
__syncthreads();
// Check if a neighbor has a positive distance
if (threadIdx.x < aggSize && myDist == -1)
{
for (int i = 0; i < nextNeighbor; i++)
{
int neighborDist = scratchA[neighbors[i]];
if (neighborDist > -1)
{
myDist = neighborDist + 1;
}
}
}
__syncthreads();
if (threadIdx.x < aggSize && myDist > 0 && !swapped)
{
swapped = true;
scratchA[threadIdx.x] = myDist;
rootB = threadIdx.x;
incomplete = true;
}
__syncthreads();
}
// Setting an assigned aggregate label (In ScratchA) for every node with the node at
// rootA being assigned the current aggregate ID and the node at rootB
// being assigned the newAgg ID and set initial distances from a root node
// (In ScratchB) for each node, -1 for unknown and 0 for the roots
int myAggregate = -1;
if (threadIdx.x == rootA)
myAggregate = currentAgg;
if (threadIdx.x == rootB)
myAggregate = newAgg;
if (threadIdx.x < aggSize)
{
scratchA[threadIdx.x] = myAggregate;
scratchB[threadIdx.x] = myAggregate > -1 ? 0 : -1;
}
incomplete = true;
__syncthreads();
// Assign nodes to each aggregate until no unassigned nodes remain.
while (incomplete)
{
// Set the incomplete flag to false
incomplete = false;
__syncthreads();
if (threadIdx.x < aggSize && myAggregate == -1)
{
for (int i = 0; i < nextNeighbor; i++)
{
int neighborAgg = scratchA[neighbors[i]];
if (neighborAgg > -1)
{
myDist = scratchB[neighbors[i]] + 1;
myAggregate = neighborAgg;
}
}
if (myAggregate == -1)
incomplete = true;
if (myAggregate == newAgg)
atomicAdd((unsigned int*)&bCount, (unsigned)1);
if (myAggregate == currentAgg)
atomicAdd((unsigned int*)&aCount, (unsigned)1);
}
__syncthreads();
if (threadIdx.x < aggSize)
{
scratchA[threadIdx.x] = myAggregate;
scratchB[threadIdx.x] = myDist;
}
__syncthreads();
}
// If the split was uneven try to repair it
int sizeDifference = aCount > bCount ? aCount - bCount : bCount - aCount;
bool moveToA = aCount < bCount;
__shared__ int moved;
moved = 0;
int toMove = sizeDifference / 2;
incomplete = true;
__syncthreads();
while (incomplete && moved < toMove)
{
incomplete = false;
__syncthreads();
bool swapping = false;
int newDist = INT_MAX;
if (threadIdx.x < aggSize)
{
bool canSwap = moveToA ? myAggregate == newAgg : myAggregate == currentAgg;
bool borderNode = false;
// Check if this node has no same aggregate neighbors of higher distance
// and on a border
for (int i = 0; i < nextNeighbor; i++)
{
int neighborAgg = scratchA[neighbors[i]];
int neighborDist = scratchB[neighbors[i]];
if (neighborAgg == myAggregate && neighborDist > myDist)
canSwap = false;
if (neighborAgg != myAggregate)
{
if (neighborDist + 1 < newDist)
newDist = neighborDist + 1;
borderNode = true;
}
}
// If a node could swap see if it will
if (borderNode && canSwap && atomicAdd((unsigned int*)&moved, 1) < toMove)
{
swapping = true;
}
}
__syncthreads();
if (swapping)
{
int a = moveToA ? 1 : -1;
atomicAdd((unsigned int*)&bCount, -a);
atomicAdd((unsigned int*)&aCount, a);
scratchA[threadIdx.x] = moveToA ? currentAgg : newAgg;
scratchB[threadIdx.x] = newDist;
incomplete = true;
}
__syncthreads();
}
// Write out the values to the aggregation array
if (threadIdx.x < aggSize)
{
aggregation[nodeId] = scratchA[threadIdx.x];
}
}
__global__ void MakeSplitsWeighted_Large(int baseAggregateIdx,
int* splitting,
int* aggregation,
int* aggMapAdjIndices,
int* aggMapAdjacency,
int* adjIndices,
int* adjacency,
int* weights) {
int currentAgg = splitting[blockIdx.x];
int aggBegin = aggMapAdjIndices[currentAgg];
int aggSize = aggMapAdjIndices[currentAgg + 1] - aggBegin;
int newAgg = baseAggregateIdx + blockIdx.x;
// Debug
int iterationCount = 0;
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("Starting MakeSplitsWeighted kernel for aggregate %d with node count %d\n",
// currentAgg, aggSize);
// }
__shared__ int nodeIds[256];
__shared__ int nodeWeights[256];
__shared__ int scratchA[256];
__shared__ int scratchB[256];
__shared__ int rootA, rootB;
__shared__ int aCount, bCount;
__shared__ bool incomplete;
incomplete = true;
// Load in the node Id's from the aggregate map to the shared array:
if (threadIdx.x < aggSize) {
nodeIds[threadIdx.x] = aggMapAdjacency[aggBegin + threadIdx.x];
nodeWeights[threadIdx.x] = weights[nodeIds[threadIdx.x]];
}
__syncthreads();
// Each thread loads it's neighbors list into registers, translating into
// aggregate offsets.
int neighbors[40];
int nextNeighbor = 0;
D::LoadLocalNeighbors(&neighbors[0],
&nextNeighbor,
aggSize,
&nodeIds[0],
adjIndices,
adjacency);
// Flood fill distances from node 0 to find first root node
D::FloodFillDistanceFrom(0,
&scratchA[0],
aggSize,
&neighbors[0],
nextNeighbor,
&rootA,
&incomplete);
// Testing templated function call
// D::PrintSharedArray(aggSize, &scratchA[0], "Before calling transform");
// T::Transform(aggSize, &scratchA[0], T::AddTo<int>(3));
// D::PrintSharedArray(aggSize, &scratchA[0], "After calling transform");
// Flood fill distances from rootA to find rootB
D::FloodFillDistanceFrom(rootA,
&scratchA[0],
aggSize,
&neighbors[0],
nextNeighbor,
&rootB,
&incomplete);
// Setting an assigned aggregate label (In ScratchA) for every node with the node at
// rootA being assigned the current aggregate ID and the node at rootB
// being assigned the newAgg ID and set initial distances from a root node
// (In ScratchB) for each node, -1 for unknown and 0 for the roots
int myAggregate = -1;
if (threadIdx.x == rootA) {
myAggregate = currentAgg;
aCount = weights[threadIdx.x];
}
if (threadIdx.x == rootB) {
myAggregate = newAgg;
bCount = weights[threadIdx.x];
}
if (threadIdx.x < aggSize)
{
scratchA[threadIdx.x] = myAggregate;
scratchB[threadIdx.x] = myAggregate > -1 ? 0 : -1;
}
incomplete = true;
// D::WarpReport("before sync");
// __syncthreads();
// D::WarpReport("after sync");
//
// D::PrintSharedArray(aggSize,
// &scratchA[0],
// "Pre-Initial aggregate assignments");
// D::PrintSharedArray(aggSize,
// &scratchB[0],
// "Pre-Initial distance assignments");
int myDist = threadIdx.x == 0 ? 0 : -1;
// Assign nodes to each aggregate until no unassigned nodes remain.
iterationCount = 0;
while (incomplete && iterationCount < 10)
{
iterationCount++;
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\tStarting an initial allocation cycle. aCount=%d bCount=%d\n", aCount, bCount);
// }
// Set the incomplete flag to false
__syncthreads();
incomplete = false;
__syncthreads();
if (threadIdx.x < aggSize && myAggregate == -1)
{
for (int i = 0; i < nextNeighbor; i++)
{
int neighborAgg = scratchA[neighbors[i]];
if (neighborAgg > -1)
{
myDist = scratchB[neighbors[i]] + 1;
myAggregate = neighborAgg;
}
}
if (myAggregate == -1)
incomplete = true;
if (myAggregate == newAgg)
atomicAdd((unsigned int*)&bCount, (unsigned)nodeWeights[threadIdx.x]);
if (myAggregate == currentAgg)
atomicAdd((unsigned int*)&aCount, (unsigned)nodeWeights[threadIdx.x]);
}
__syncthreads();
if (threadIdx.x < aggSize)
{
scratchA[threadIdx.x] = myAggregate;
scratchB[threadIdx.x] = myDist;
}
__syncthreads();
}
__syncthreads();
// Printing out the initial aggregate assignments made.
//D::PrintSharedArray(aggSize, scratchA, "Initial Aggregate Assignments");
// D::PrintSharedArray(aggSize,
// &scratchA[0],
// "Initial aggregate assignments");
// D::PrintSharedArray(aggSize,
// &scratchB[0],
// "Initial distance assignments");
//
// // Printing a message if max iterations exceeded.
// if (blockIdx.x == 0 && threadIdx.x == 0 && incomplete) {
// printf("***********Max Iterations Exceeded*************\n");
// }
// __syncthreads();
// If the split was uneven try to repair it
__shared__ int goodSwaps[20]; // The id of nodes that have desirable swaps
__shared__ int improvement[20]; // How much improvement the swap would make
__shared__ int insertID; // The index at which to insert new item
incomplete = true;
__syncthreads();
iterationCount = 0;
while (incomplete && iterationCount < 10)
{
iterationCount++;
// Reset values
int sizeDifference = aCount > bCount ? aCount - bCount : bCount - aCount;
bool moveToA = aCount < bCount;
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\tStarting a size correction cycle: A=%d B=%d\n",
// currentAgg, newAgg);
// printf("\t\taCount:=%d bCount=%d sizeDifference=%d moveToA=%s\n\n",
// aCount, bCount, sizeDifference, (moveToA ? "True" : "False"));
// }
insertID = 0;
__syncthreads();
int newDist = INT_MAX;
if (threadIdx.x < aggSize)
{
bool canSwap = moveToA ? myAggregate == newAgg : myAggregate == currentAgg;
bool borderNode = false;
// Check if this node has no same aggregate neighbors of higher distance
// and on a border
for (int i = 0; i < nextNeighbor; i++)
{
int neighborAgg = scratchA[neighbors[i]];
int neighborDist = scratchB[neighbors[i]];
if (neighborAgg == myAggregate && neighborDist > myDist)
canSwap = false;
if (neighborAgg != myAggregate)
{
if (neighborDist + 1 < newDist)
newDist = neighborDist + 1;
borderNode = true;
}
}
// If a node could swap see how attractive the swap would be
if (borderNode && canSwap)
{
int newA = moveToA ?
aCount + nodeWeights[threadIdx.x] :
aCount - nodeWeights[threadIdx.x];
int newB = moveToA ?
bCount - nodeWeights[threadIdx.x] :
bCount + nodeWeights[threadIdx.x];
int newSizeDifference = newA > newB ?
newA - newB :
newB - newA;
if (newSizeDifference < sizeDifference) {
int newID = atomicAdd((int *)&insertID, 1) - 1;
if (newID < 20) {
goodSwaps[newID] = threadIdx.x;
improvement[newID] = newSizeDifference;
}
}
}
}
__syncthreads();
// Now finding the best swap to make and making it
if (insertID > 0)
{
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\tThere are %d possible swaps marked\n", insertID);
// }
// Have zero thread look through options for best
if (threadIdx.x == 0) {
// Checking each option found and getting the best one
int bestValue = INT_MAX;
int swapId = -1;
for (int i = 0; i < insertID && i < 20; i++) {
// Debug
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\t\tNode %d can swap with improvement %d\n",
// goodSwaps[i], improvement[i]);
// }
if (improvement[i] < bestValue) {
bestValue = improvement[i];
swapId = goodSwaps[i];
}
}
insertID = swapId;
}
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\tNode %d in aggregate %d selected to swap\n",
// insertID, scratchA[insertID]);
// }
__syncthreads();
// Have the thread belonging to the swap node do the swap
if (threadIdx.x == insertID) {
myAggregate = moveToA ? currentAgg : newAgg;
scratchA[threadIdx.x] = myAggregate;
scratchB[threadIdx.x] = newDist;
aCount = moveToA ? aCount + nodeWeights[threadIdx.x] : aCount - nodeWeights[threadIdx.x];
bCount = moveToA ? bCount - nodeWeights[threadIdx.x] : bCount + nodeWeights[threadIdx.x];
}
__syncthreads();
// Now recompute the distances to make sure things are still
// connected.
__shared__ bool changed;
scratchB[threadIdx.x] = threadIdx.x == rootA || threadIdx.x == rootB ?
0 : -1;
changed = true;
__syncthreads();
while (changed) {
changed = false;
// Check if a neighbor has a positive distance
if (threadIdx.x < aggSize && scratchB[threadIdx.x] == -1) {
for (int i = 0; i < nextNeighbor; i++) {
// If neighbor has a distance and is in the same
// aggregate fill distance from it.
if (scratchA[neighbors[i]] == scratchA[threadIdx.x] && scratchB[neighbors[i]] > -1) {
scratchB[threadIdx.x] = scratchB[neighbors[i]] + 1;
changed = true;
}
}
}
__syncthreads();
}
if (threadIdx.x < aggSize && scratchB[threadIdx.x] == -1) {
changed = true;
}
__syncthreads();
// if (changed && blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\nWhile splitting aggregate %d into %d:\n",
// currentAgg, newAgg);
// int problem = nodeIds[insertID];
// printf("\tId %d (node %d) broke things when swapped.\n",
// insertID, problem);
// printf("\tDetected a disconnect. Reverting and stopping.\n");
// }
if (changed && threadIdx.x == insertID) {
scratchA[threadIdx.x] = scratchA[threadIdx.x] == newAgg ?
currentAgg : newAgg;
incomplete = true;
}
__syncthreads();
// if (changed && blockIdx.x == 0 && threadIdx.x == 0) {
// printf("Incomplete flag marked as: %s\n",
// incomplete ? "True" : "False");
// }
// __syncthreads();
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// printf("\t\tNode %d now in aggregate %d with distance %d\n",
// insertID, scratchA[insertID], scratchB[insertID]);
// }
}
else {
incomplete = false;
}
__syncthreads();
}
// if (blockIdx.x == 0 && threadIdx.x == 0 && incomplete) {
// printf("***********Max Iterations Exceeded*************\n");
// }
//
// D::PrintSharedArray(aggSize,
// &scratchA[0],
// "Final aggregate assignments");
// D::PrintSharedArray(aggSize,
// &scratchB[0],
// "Final distance assignments");
// Write out the values to the aggregation array
if (threadIdx.x < aggSize)
{
aggregation[nodeIds[threadIdx.x]] = scratchA[threadIdx.x];
}
}
__global__ void MarkSplits(int size,
bool force,
int minPartSize,
int maxPartSize,
int* partSizes,
int* splitsToMake) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
int currentSize = partSizes[idx];
bool shouldSplit = currentSize > maxPartSize && (force || currentSize > minPartSize * 2);
splitsToMake[idx] = shouldSplit ? 1 : 0;
}
}
__global__ void FindDesirableMerges(int size,
int minSize,
int maxSize,
bool force,
int* adjIndices,
int* adjacency,
int *partSizes,
int* desiredMerges,
int* merging) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
// Only evaluate if the aggregate is not marked as active (merging
// or no possible merges)
if (merging[idx] != 1)
{
// Check through all neighboring aggregates for most desirable
int currentSize = partSizes[idx];
int checkedNeighbors = 0;
float bestDesirability = 0;
int mostDesirable = -1;
int start = adjIndices[idx];
int end = adjIndices[idx + 1];
for (int i = start; i < end; i++)
{
int neighborAgg = adjacency[i];
// Only active neighbor aggregates should be looked at:
if (merging[neighborAgg] != 1)
{
checkedNeighbors++;
int neighborSize = partSizes[neighborAgg];
float desirability = 0;
desirability += currentSize < minSize ? minSize - currentSize : 0;
desirability += neighborSize < minSize ? minSize - neighborSize : 0;
int totalSize = currentSize + neighborSize;
if (totalSize > maxSize)
desirability *= force ? 1.0/(totalSize - maxSize) : 0;
// If this merge is the most desirable seen mark it
if (desirability > bestDesirability)
{
bestDesirability = desirability;
mostDesirable = neighborAgg;
}
}
}
if (mostDesirable == -1)
merging[idx] = 1;
if (currentSize < minSize && force && mostDesirable == -1)
printf("Aggregate %d is too small but found no merges! %d / %d neighbors checked.\n",idx, checkedNeighbors, end-start);
desiredMerges[idx] = mostDesirable;
}
}
}
__global__ void FindDesirableMergeSplits(int size,
int minSize,
int maxSize,
int desiredSize,
int* adjIndices,
int* adjacency,
int* partSizes,
int* desiredMerges,
int* merging) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
// Only evaluate if the aggregate is not marked as active (merging
// or no possible merges)
if (merging[idx] != 1)
{
// Check through all neighboring aggregates for most desirable
int currentSize = partSizes[idx];
int checkedNeighbors = 0;
bool currentOutSized = currentSize < minSize || currentSize > maxSize;
float bestDesirability = 0;
int mostDesirable = -1;
int start = adjIndices[idx];
int end = adjIndices[idx + 1];
for (int i = start; i < end; i++)
{
int neighborAgg = adjacency[i];
// Only active neighbor aggregates should be looked at:
if (merging[neighborAgg] != 1)
{
checkedNeighbors++;
int neighborSize = partSizes[neighborAgg];
bool neighborOutSized = neighborSize < minSize || neighborSize > maxSize;
int totalSize = currentSize + neighborSize;
bool legalPair = (neighborOutSized || currentOutSized) && totalSize > minSize * 2 && totalSize < maxSize * 2;
float desirability = legalPair ? 1.0 / abs(desiredSize - (currentSize + neighborSize)) : 0;
// If this merge is the most desirable seen mark it
if (desirability > bestDesirability)
{
bestDesirability = desirability;
mostDesirable = neighborAgg;
}
}
}
if (mostDesirable == -1)
merging[idx] = 1;
desiredMerges[idx] = mostDesirable;
}
}
}
__global__ void MarkMerges(int size,
int* desiredMerges,
int* merging,
int* mergesToMake,
int* incomplete) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
// Find what aggregate this one wants to merge with
int desiredMerge = desiredMerges[idx];
// If this aggregate has a real potential merger:
if (desiredMerge >= 0)
{
// If the aggregates agree to merge mark as merging
if (desiredMerges[desiredMerge] == idx)
{
// Mark the merge as the higher indexed aggregate merging into lower
if (desiredMerge > idx)
mergesToMake[desiredMerge] = idx;
else
mergesToMake[idx] = desiredMerge;
// Mark both aggregates as merging
merging[idx] = 1;
merging[desiredMerge] = 1;
}
// Otherwise mark incomplete to check again
else
{
incomplete[0] = 1;
}
}
}
}
}
// Public methods for merge split conditioner
MergeSplitConditionerGPU::MergeSplitConditionerGPU(AggMIS::Types::Graph_d& graph,
AggMIS::Types::IntVector_d& aggregation) {
this->graph = &graph;
this->aggregation.assign(aggregation.begin(), aggregation.end());
inducedGraph = GraphHelpers::GetInducedGraph(graph, aggregation);
// Getting the sizes of each aggregate:
GraphHelpers::getPartSizes(aggregation, partSizes);
verbose = false;
minSize = 20;
maxSize = 30;
outsizedParts = 0;
merges = 0;
mergeSplits = 0;
splits = 0;
}
void MergeSplitConditionerGPU::SetSizeBounds(int min,
int max) {
minSize = min;
maxSize = max;
}
void MergeSplitConditionerGPU::SetVerbose(bool v) {
verbose = v;
}
void MergeSplitConditionerGPU::SetNodeWeights(AggMIS::Types::IntVector_d &input) {
nodeWeights.swap(input);
GraphHelpers::getPartSizes(aggregation, weightedSizes, nodeWeights);
}
AggMIS::Types::IntVector_d* MergeSplitConditionerGPU::GetAggregation() {
return &aggregation;
}
AggMIS::Types::IntVector_d* MergeSplitConditionerGPU::GetNodeWeights() {
return &nodeWeights;
}
void MergeSplitConditionerGPU::CycleMerges(bool force) {
int count = 0;
while (MarkMerges(force))
{
MakeMerges(false);
count++;
}
if (verbose)
printf("Finished cycling merges after %d cycles.\n", count);
}
void MergeSplitConditionerGPU::CycleSplits(bool force) {
int count = 0;
while (MarkSplits(force) && count < 10)
{
MakeSplits();
count++;
}
if (verbose)
printf("Finished cycling splits after %d cycles.\n", count);
}
void MergeSplitConditionerGPU::CycleMergeSplits(float minImprove,
int desiredSize) {
// Start with an initial cycle
bool somethingDone = MakeMergeSplits(desiredSize);
// Choosing which sizes to use:
AggMIS::Types::IntVector_d *sizes = &partSizes;
if (nodeWeights.size() > 0)
sizes = &weightedSizes;
// Check to see how much improvement was made
int after = thrust::count_if(sizes->begin(), sizes->end(), Functors::isOutSized(minSize, maxSize));
float improvement = (float)(outsizedParts - after)/outsizedParts;
outsizedParts = after;
// While good progress is being made continue cycling
while (improvement > minImprove && somethingDone)
{
// Perform Cycle and check improvement
somethingDone = MakeMergeSplits(desiredSize);
after = thrust::count_if(sizes->begin(), sizes->end(), Functors::isOutSized(minSize, maxSize));
improvement = (float)(outsizedParts - after)/outsizedParts;
outsizedParts = after;
}
}
bool MergeSplitConditionerGPU::Condition(int desiredSize,
bool respectUpper,
float tolerance,
float minImprove,
int maxCycles) {
if (verbose)
PrintProgress(&std::cout, "Starting conditioning.", true, true, true, true);
// Start by making any optimal merges and splits
if (verbose)
printf("Starting to CycleMerges\n");
CycleMerges(false);
if (verbose)
printf("Starting to CycleSplits\n");
CycleSplits(false);
if (verbose)
printf("Starting to CycleMergeSplits\n");
// Cycle MergeSplits too, to make sure outsizedParts has a value
CycleMergeSplits(minImprove, desiredSize);
// Find improvement ratio from initial cycle
float currentRatio = (float)outsizedParts / partSizes.size();
if (verbose)
printf("Initial outsized ratio is: %f\n", currentRatio);
// Starting main cycle phase
int counter = 0;
bool highCycle = false;
while(currentRatio > tolerance && counter++ < maxCycles)
{
if (verbose)
printf("Starting %s conditioning cycle %d\n",
highCycle ? "high" : "low", counter);
if (highCycle)
CycleMerges(true);
else
CycleSplits(true);
CycleMergeSplits(minImprove, desiredSize);
// Checking the current improvement ratio
if ((highCycle && !respectUpper) || (!highCycle && respectUpper))
currentRatio = (float)outsizedParts / partSizes.size();
// Switch cycle type
highCycle = !highCycle;
if (verbose)
{
std::stringstream ss;
ss << "After condition cycle: " << counter;
PrintProgress(&std::cout, ss.str(), true, true, true, true);
}
}
// Cleaning up
if (respectUpper)
{
CycleSplits(true);
CycleMerges(false);
}
else
CycleMerges(true);
// Checking if we match criteria given:
int undersized = thrust::count_if(partSizes.begin(), partSizes.end(), Functors::lessThan(minSize));
int oversized = thrust::count_if(partSizes.begin(), partSizes.end(), Functors::greaterThan(maxSize));
// printf("Checking for CUDA errors.\n");
// CheckCudaError(cudaDeviceSynchronize(),__FILE__, __LINE__);
// printf("Checking if aggregation is valid.\n");
//
// if (Aggregation::IsValidAggregation(*graph, aggregation, true))
// printf("Aggregation validates after conditioning.\n");
// else {
// printf("Aggregation does not validate after conditioning!\n");
// int t;
// std::cin >> t;
// }
if (verbose)
PrintProgress(&std::cout, "After conditioning completed.", true, true, true, true);
// Checking if the size constraints are met for the return
if (respectUpper)
return (oversized == 0 && (float)outsizedParts / partSizes.size() < tolerance);
else
return (undersized == 0 && (float)outsizedParts / partSizes.size() < tolerance);
}
void MergeSplitConditionerGPU::PrintProgress(std::ostream* output,
std::string note,
bool graphStat,
bool progressStat,
bool sizeStat,
bool memStat) {
*output << "\n------------------- Progress Check ------------------\n";
*output << "Note: " << note.c_str() << "\n";
if (graphStat)
PrintGraphStats(output, false);
if (progressStat)
PrintProgressStats(output, false);
if (sizeStat)
PrintSizeStats(output, false);
if (memStat)
PrintMemoryStats(output, false);
*output << "-----------------------------------------------------\n\n";
}
void MergeSplitConditionerGPU::PrintSizeStats(std::ostream* output,
bool makeHeader) {
if (makeHeader)
*output << "\n--------------------- Size Check --------------------\n";
// Choosing which sizes to use:
AggMIS::Types::IntVector_d *sizes = &partSizes;
if (nodeWeights.size() > 0)
sizes = &weightedSizes;
int undersized = thrust::count_if(sizes->begin(),
sizes->end(),
Functors::lessThan(minSize));
int oversized = thrust::count_if(sizes->begin(),
sizes->end(),
Functors::greaterThan(maxSize));
int largest = thrust::reduce(sizes->begin(),
sizes->end(),
0,
thrust::maximum<int>());
int smallest = thrust::reduce(sizes->begin(),
sizes->end(),
INT_MAX,
thrust::minimum<int>());
*output << "Aggregate size statistics:";
*output << "\n\tUndersized(<" << minSize << "): " << undersized << " / " << (partSizes.size()) << " Total";
*output << "\n\tOversized(>" << maxSize << "): " << oversized << " / " << (partSizes.size()) << " Total";
*output << "\n\tSmallest: " << smallest;
*output << " Largest: " << largest << "\n";
if (nodeWeights.size() > 0)
{
largest = thrust::reduce(partSizes.begin(),
partSizes.end(),
0,
thrust::maximum<int>());
smallest = thrust::reduce(partSizes.begin(),
partSizes.end(),
INT_MAX,
thrust::minimum<int>());
*output << "\n\tUnweighted: Smallest: " << smallest;
*output << " Largest: " << largest << "\n";
}
if (makeHeader)
*output << "-----------------------------------------------------\n\n";
}
void MergeSplitConditionerGPU::PrintMemoryStats(std::ostream* output,
bool makeHeader) {
if (makeHeader)
*output << "\n-------------------- Memory Check -------------------\n";
size_t avail;
size_t total;
cudaMemGetInfo(&avail, &total);
avail /= 1000000;
total /= 1000000;
*output << "Device Memory Status:";
*output << "\n\tAvailable: " << (int)avail;
*output << "\tTotal: " << (int)total;
*output << "\tUtilized: " << (int)(total-avail) << "\n";
if (makeHeader)
*output << "-----------------------------------------------------\n\n";
}
void MergeSplitConditionerGPU::PrintProgressStats(std::ostream* output,
bool makeHeader) {
if (makeHeader)
*output << "\n------------------- Progress Check ------------------\n";
*output << "Processing done:";
*output << "\n\tMerges: " << merges;
*output << "\tSplits: " << splits;
*output << "\tMerge-Splits: " << mergeSplits << "\n";
if (makeHeader)
*output << "-----------------------------------------------------\n\n";
}
void MergeSplitConditionerGPU::PrintGraphStats(std::ostream* output,
bool makeHeader) {
if (makeHeader)
*output << "\n----------------- Graph Information -----------------\n";
int totalWeight = thrust::reduce(nodeWeights.begin(), nodeWeights.end());
int minWeight = thrust::reduce(nodeWeights.begin(),
nodeWeights.end(),
INT_MAX,
thrust::minimum<int>());
int maxWeight = thrust::reduce(nodeWeights.begin(),
nodeWeights.end(),
0,
thrust::maximum<int>());
AggMIS::Types::IntVector_d *valences = GraphHelpers::GetValences(*graph);
int minValence = thrust::reduce(valences->begin(),
valences->end(),
INT_MAX,
thrust::minimum<int>());
int maxValence = thrust::reduce(valences->begin(),
valences->end(),
0,
thrust::maximum<int>());
valences->clear();
delete valences;
*output << "Graph Information:";
*output << "\n\tNodes: " << graph->Size();
if (nodeWeights.size() > 0)
*output << " Graph is weighted";
else
*output << " Graph is unweighted";
*output << "\n\tMin. Valence: " << minValence;
*output << " Max. Valence: " << maxValence;
*output << " Avg. Valence: " << ((float)graph->adjacency->size()/graph->Size());
if (nodeWeights.size() > 0) {
*output << "\n\tTotal Weight: " << totalWeight;
*output << " Avg. Weight: " << ((float)totalWeight / graph->Size());
*output << " Min. Weight: " << minWeight;
*output << " Max. Weight: " << maxWeight;
}
*output << "\n";
if (makeHeader)
*output << "-----------------------------------------------------\n\n";
}
void MergeSplitConditionerGPU::InteractiveConsole(std::string message) {
// Start off by printing overall status info and message
PrintProgress(&std::cout, message, true, true, true, false);
// Setting needed variables to defaults
float minImprove = .1;
int desiredSize = (minSize + maxSize) / 2;
float tolerance = .1;
int maxCycles = 10;
bool cycling = true;
bool respectUpper = true;
// Starting the main prompt:
char operation;
printf("\nIC:");
std::cin >> operation;
while (operation != 'd')
{
if (operation == 'o' || operation == 'f')
{
bool force = operation == 'f';
std::cin >> operation;
if (operation == 'm')
{
if (cycling)
CycleMerges(force);
else {
MarkMerges(force);
MakeMerges(false);
}
std::string msg = force ? "After forced merges" : "After optimal merges";
PrintProgress(&std::cout, msg, false, true, true, false);
}
if (operation == 's')
{
if (cycling)
CycleSplits(force);
else {
MarkSplits(force);
MakeSplits();
}
std::string msg = force ? "After forced splits" : "After optimal splits";
PrintProgress(&std::cout, msg, false, true, true, false);
}
if (operation == 'g')
{
if (cycling)
CycleMergeSplits(minImprove, desiredSize);
else
MakeMergeSplits(desiredSize);
PrintProgress(&std::cout, "After merge-splits", false, true, true, false);
}
}
else if (operation == 's') {
// Printing the current values of the variables
std::string cyclingFlag = cycling ? "True" : "False";
std::string respectUpperFlag = respectUpper ? "True" : "False";
std::cout << "\nCurrent values of variables:";
std::cout << "\n\tminSize: " << minSize;
std::cout << " maxSize: " << maxSize;
std::cout << " desiredSize: " << desiredSize;
std::cout << " maxCycles: " << maxCycles;
std::cout << "\n\tminImprove: " << minImprove;
std::cout << " tolerance: " << tolerance;
std::cout << " cycling: " << cyclingFlag;
std::cout << " respectUpper: " << respectUpperFlag;
std::cout << "\n\nEnter new values in same order\nIC:";
// Grabbing the new values
std::cin >> minSize;
std::cin >> maxSize;
std::cin >> desiredSize;
std::cin >> maxCycles;
std::cin >> minImprove;
std::cin >> tolerance;
std::cin >> cycling;
std::cin >> respectUpper;
// Confirming the entry
cyclingFlag = cycling ? "True" : "False";
respectUpperFlag = respectUpper ? "True" : "False";
std::cout << "\nNew values of variables:";
std::cout << "\n\tminSize: " << minSize;
std::cout << " maxSize: " << maxSize;
std::cout << " desiredSize: " << desiredSize;
std::cout << " maxCycles: " << maxCycles;
std::cout << "\n\tminImprove: " << minImprove;
std::cout << " tolerance: " << tolerance;
std::cout << " cycling: " << cyclingFlag;
std::cout << " respectUpper: " << respectUpperFlag << "\n\n";
}
else if (operation == 'c')
{
Condition(desiredSize, respectUpper, tolerance, minImprove, maxCycles);
PrintProgress(&std::cout, "After conditioning", false, true, true, false);
}
else if (operation == 'v') {
bool valid = Aggregation::IsValidAggregation(*graph, aggregation, false);
if (valid)
printf("Aggregation is valid\n");
else
printf("Aggregation is not valid!\n");
}
else if (operation == 'l') {
bool v;
std::cin >> v;
SetVerbose(v);
printf("Set verbose to %s\n", v ? "True" : "False");
}
// Printing prompt for another go
printf("IC:");
std::cin >> operation;
}
}
// Private methods for merge split conditioner
bool MergeSplitConditionerGPU::MarkMerges(bool force) {
CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__);
// Making sure the mergesToMake array is sized correctly
thrust::constant_iterator<int> negOne(-1);
mergesToMake.assign(negOne, negOne + inducedGraph->Size());
// Choosing which sizes to use:
AggMIS::Types::IntVector_d *sizes = &partSizes;
if (nodeWeights.size() > 0)
sizes = &weightedSizes;
// Declaring temp arrays
int size = inducedGraph->Size();
AggMIS::Types::IntVector_d desiredMerges(size, -1);
AggMIS::Types::IntVector_d merging(size, 0);
AggMIS::Types::IntVector_d incomplete(1,1);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size/blockSize + (size%blockSize == 0?0:1);
while (incomplete[0] == 1)
{
// AggMIS::Types::IntVector_h before(desiredMerges);
incomplete[0] = 0;
if (verbose)
printf("Calling FindDesirableMerges Kernel <<<%d, %d>>>\n",
blockSize, nBlocks);
Kernels::FindDesirableMerges <<<blockSize, nBlocks>>>
(size,
minSize,
maxSize,
force,
inducedGraph->indStart(),
inducedGraph->adjStart(),
AggMIS::Types::StartOf(sizes),
AggMIS::Types::StartOf(desiredMerges),
AggMIS::Types::StartOf(merging));
if (verbose)
printf("Calling MarkMerges Kernel <<<%d, %d>>>\n",
blockSize, nBlocks);
Kernels::MarkMerges <<<blockSize, nBlocks>>>
(size,
AggMIS::Types::StartOf(desiredMerges),
AggMIS::Types::StartOf(merging),
AggMIS::Types::StartOf(mergesToMake),
AggMIS::Types::StartOf(incomplete));
}
int marked = -1;
// Cleaning up temp arrays
try {
marked = thrust::count_if(mergesToMake.begin(), mergesToMake.end(), Functors::NotNegOne());
desiredMerges.clear();
merging.clear();
incomplete.clear();
}
catch (thrust::system::system_error &e)
{
printf("Caught exception at end of MarkMerges!\n");
std::cerr << e.what() << std::endl;
std::cerr << "Error code: " << e.code() << std::endl;
InputHelpers::GetNonEmptyLineCIN();
}
if (verbose)
printf("MarkMerges completing with %d merges marked.\n", marked);
CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__);
return marked > 0;
}
bool MergeSplitConditionerGPU::MarkSplits(bool force) {
CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__);
int size = inducedGraph->Size();
thrust::constant_iterator<int> zero(0);
splitsToMake.assign(zero, zero + size);
// Choosing which sizes to use:
AggMIS::Types::IntVector_d *sizes = &partSizes;
if (nodeWeights.size() > 0)
sizes = &weightedSizes;
// Debug
if (verbose)
printf("MarkSplits called. InducedGraph Size: %d partSizes Size: %d\n", size, partSizes.size());
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size/blockSize + (size%blockSize == 0?0:1);
// Debug
if (verbose)
printf("Calling MarkSplits kernel. blockSize: %d nBlocks: %d\n", size, blockSize, nBlocks);
// Calling kernel to mark the needed splits
if (verbose)
printf("Calling MarkSplits Kernel <<<%d, %d>>>\n",
blockSize, nBlocks);
Kernels::MarkSplits <<<blockSize, nBlocks>>>
(size,
force,
minSize,
maxSize,
AggMIS::Types::StartOf(sizes),
AggMIS::Types::StartOf(splitsToMake));
int marked = thrust::count(splitsToMake.begin(), splitsToMake.end(), 1);
if (verbose)
printf("MarkSplits completed with %d splits marked\n", marked);
CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__);
return marked > 0;
}
bool MergeSplitConditionerGPU::MarkMergeSplits(int desiredSize) {
CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__);
// Getting the size to use
int size = inducedGraph->Size();
// Choosing which sizes to use:
AggMIS::Types::IntVector_d *sizes = &partSizes;
if (nodeWeights.size() > 0)
sizes = &weightedSizes;
// Making sure the mergesToMake array is sized correctly
thrust::constant_iterator<int> negOne(-1);
mergesToMake.assign(negOne, negOne + size);
// Declaring temp arrays
AggMIS::Types::IntVector_d desiredMerges(size, -1);
AggMIS::Types::IntVector_d merging(size, 0);
AggMIS::Types::IntVector_d incomplete(1,1);
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size/blockSize + (size%blockSize == 0?0:1);
while (incomplete[0] == 1)
{
incomplete[0] = 0;
if (verbose)
printf("Calling FindDesirableMergeSplits Kernel <<<%d, %d>>>\n",
blockSize, nBlocks);
Kernels::FindDesirableMergeSplits <<<blockSize, nBlocks>>>
(size,
minSize,
maxSize,
desiredSize,
inducedGraph->indStart(),
inducedGraph->adjStart(),
AggMIS::Types::StartOf(sizes),
AggMIS::Types::StartOf(desiredMerges),
AggMIS::Types::StartOf(merging));
if (verbose)
printf("Calling MarkMerges Kernel <<<%d, %d>>>\n",
blockSize, nBlocks);
Kernels::MarkMerges <<<blockSize, nBlocks>>>
(size,
AggMIS::Types::StartOf(desiredMerges),
AggMIS::Types::StartOf(merging),
AggMIS::Types::StartOf(mergesToMake),
AggMIS::Types::StartOf(incomplete));
}
// Checking for marked merges
int marked = thrust::count_if(mergesToMake.begin(),
mergesToMake.end(),
Functors::NotNegOne());
// Cleaning up temp arrays
desiredMerges.clear();
merging.clear();
incomplete.clear();
CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__);
return marked > 0;
}
void MergeSplitConditionerGPU::MakeSplits() {
thrust::counting_iterator<int> zero(0);
// Get list of all small aggregates to split 0-64
AggMIS::Types::IntVector_d smallSplits(splitsToMake.size());
int smallSplitsCount = thrust::copy_if(zero,
zero + splitsToMake.size(),
thrust::make_zip_iterator(
thrust::make_tuple(
splitsToMake.begin(),
partSizes.begin())),
smallSplits.begin(),
Functors::SplitFilter(0, 64))
- smallSplits.begin();
smallSplits.resize(smallSplitsCount);
if (verbose)
printf("Found %d small splits to do.\n", smallSplitsCount);
// Get list of all big aggregates to split 65-256
AggMIS::Types::IntVector_d bigSplits(splitsToMake.size());
int bigSplitsCount = thrust::copy_if(zero,
zero + splitsToMake.size(),
thrust::make_zip_iterator(
thrust::make_tuple(
splitsToMake.begin(),
partSizes.begin())),
bigSplits.begin(),
Functors::SplitFilter(65, 256))
- bigSplits.begin();
bigSplits.resize(bigSplitsCount);
splits += smallSplitsCount + bigSplitsCount;
// if (verbose)
// printf("Found %d big splits to do.\n", bigSplitsCount);
//
// int total = thrust::count(splitsToMake.begin(), splitsToMake.end(), 1);
//
// if (verbose)
// printf("There should be %d total splits.\n", total);
//
// int d;
// std::std::cin >> d;
AggMIS::Types::Graph_d* aggMap = Aggregation::GetAggregateMap(aggregation);
// Making the splits is different for weighted/non-weighted
if (nodeWeights.size() == 0) {
// Do the small splits
int offset = 0;
while (smallSplitsCount - offset > 0) {
// Call either 64 blocks or all remaining
int toDo = smallSplitsCount - offset > 64 ?
64 : smallSplitsCount-offset;
if (verbose)
printf("Calling MakeSplits Kernel <<<%d, %d>>>\n",
toDo, 64);
Kernels::MakeSplits <<< toDo, 64 >>>
(aggMap->Size() + offset,
AggMIS::Types::StartOf(smallSplits) + offset,
AggMIS::Types::StartOf(aggregation),
aggMap->indStart(),
aggMap->adjStart(),
graph->indStart(),
graph->adjStart());
offset += toDo;
}
// Reset the offset and do the big splits
offset = 0;
while (bigSplitsCount - offset > 0) {
// Call either 64 blocks or all remaining
int toDo = bigSplitsCount - offset > 64 ?
64 : bigSplitsCount-offset;
if (verbose)
printf("Calling MakeSplits_Large Kernel <<<%d, %d>>>\n",
toDo, 256);
AggMIS::Types::Display::Print(bigSplits, "Big splits");
AggMIS::Types::IntVector_h sizes(bigSplits.size());
for (int i = 0; i < bigSplits.size(); i++)
sizes[i] = partSizes[bigSplits[i]];
AggMIS::Types::Display::Print(sizes, "Sizes of big splits");
Kernels::MakeSplits_Large <<< toDo, 256 >>>
(aggMap->Size() + smallSplitsCount + offset,
AggMIS::Types::StartOf(bigSplits) + offset,
AggMIS::Types::StartOf(aggregation),
aggMap->indStart(),
aggMap->adjStart(),
graph->indStart(),
graph->adjStart());
offset += toDo;
}
cudaDeviceSynchronize();
}
else {
// Do the small splits
int offset = 0;
while (smallSplitsCount - offset > 0) {
// Call either 64 blocks or all remaining
int toDo = smallSplitsCount - offset > 64 ?
64 : smallSplitsCount-offset;
if (verbose)
printf("Calling MakeSplitsWeighted Kernel <<<%d, %d>>>\n",
toDo, 64);
Kernels::MakeSplitsWeighted <<< toDo, 64 >>>
(aggMap->Size() + offset,
AggMIS::Types::StartOf(smallSplits) + offset,
AggMIS::Types::StartOf(aggregation),
aggMap->indStart(),
aggMap->adjStart(),
graph->indStart(),
graph->adjStart(),
AggMIS::Types::StartOf(nodeWeights));
offset += toDo;
}
// Reset the offset and do the big splits
offset = 0;
while (bigSplitsCount - offset > 0) {
// Call either 64 blocks or all remaining
int toDo = bigSplitsCount - offset > 64 ?
64 : bigSplitsCount-offset;
if (verbose)
printf("Calling MakeSplitsWeighted_Large Kernel <<<%d, %d>>>\n",
toDo, 256);
Kernels::MakeSplitsWeighted_Large <<< toDo, 256 >>>
(aggMap->Size() + smallSplitsCount + offset,
AggMIS::Types::StartOf(bigSplits) + offset,
AggMIS::Types::StartOf(aggregation),
aggMap->indStart(),
aggMap->adjStart(),
graph->indStart(),
graph->adjStart(),
AggMIS::Types::StartOf(nodeWeights));
offset += toDo;
}
cudaDeviceSynchronize();
}
// Reset induced graph after changes
delete inducedGraph;
inducedGraph = GraphHelpers::GetInducedGraph(*graph, aggregation);
// Getting the new part sizes:
GraphHelpers::getPartSizes(aggregation, partSizes);
if (nodeWeights.size() > 0)
GraphHelpers::getPartSizes(aggregation, weightedSizes, nodeWeights);
// AggMIS::Types::IntVector_d afterSizes(bigSplits.size());
// for (int i = 0; i < bigSplits.size(); i++)
// afterSizes[i] = partSizes[bigSplits[i]];
// AggMIS::Types::Display::Print(afterSizes, "Sizes of big splits after splitting");
//
// AggMIS::Types::IntVector_d afterSizes2(bigSplits.size());
// for (int i = 0; i < bigSplits.size(); i++)
// afterSizes2[i] = partSizes[i + aggMap->Size() + smallSplitsCount];
// AggMIS::Types::Display::Print(afterSizes, "Sizes of big splits after splitting 2");
// Clean up
smallSplits.clear();
bigSplits.clear();
delete aggMap;
}
void MergeSplitConditionerGPU::MakeMerges(bool markSplits) {
CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__);
if (verbose)
printf("Starting MakeMerges. MarkSplits: %s InducedGraph Size: %d\n", markSplits ? "True" : "False", (inducedGraph->Size()));
// Perform a prefix sum using a transform iterator which
// marks aggregates which are merging to mark the index
// offset for each aggregate:
mergeOffsets.resize(mergesToMake.size());
thrust::inclusive_scan(
thrust::make_transform_iterator(
mergesToMake.begin(),
Functors::NotNegOne()),
thrust::make_transform_iterator(
mergesToMake.end(),
Functors::NotNegOne()),
mergeOffsets.begin());
// If this is part of the merge split routine prepare to
// mark splits
if (markSplits)
{
int lastOffset = mergeOffsets[mergeOffsets.size()-1];
int newLength = mergeOffsets.size() - lastOffset;
thrust::constant_iterator<int> zero(0);
splitsToMake.assign(zero, zero + newLength);
}
// Figuring out block sizes for kernel call:
int size = aggregation.size();
int blockSize = 256;
int nBlocks = size/blockSize + (size%blockSize == 0?0:1);
// Calling the kernel
if (markSplits) {
if (verbose)
printf("Calling MakeMerges_MarkSplits Kernel <<<%d, %d>>>\n",
nBlocks, blockSize);
Kernels::MakeMerges_MarkSplits <<<nBlocks, blockSize>>>
(size,
AggMIS::Types::StartOf(mergesToMake),
AggMIS::Types::StartOf(mergeOffsets),
AggMIS::Types::StartOf(aggregation),
AggMIS::Types::StartOf(splitsToMake));
CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__);
}
else {
if (verbose)
printf("Calling MakeMerges Kernel <<<%d, %d>>>\n",
nBlocks, blockSize);
Kernels::MakeMerges <<<nBlocks, blockSize>>>
(size,
AggMIS::Types::StartOf(mergesToMake),
AggMIS::Types::StartOf(mergeOffsets),
AggMIS::Types::StartOf(aggregation));
if (CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__)) {
}
}
// Re-figuring the aggregate adjacency graph (only if not mergeSplitting)
if(!markSplits)
{
merges += mergeOffsets[mergeOffsets.size() - 1];
delete inducedGraph;
inducedGraph = GraphHelpers::GetInducedGraph(*graph, aggregation);
// Getting the new part sizes:
GraphHelpers::getPartSizes(aggregation, partSizes);
if (nodeWeights.size() > 0)
GraphHelpers::getPartSizes(aggregation, weightedSizes, nodeWeights);
}
else
mergeSplits += mergeOffsets[mergeOffsets.size() - 1];
if (verbose)
printf("Finished MakeMerges. InducedGraph Size: %d\n", (inducedGraph->Size()));
CheckCudaError(cudaDeviceSynchronize(), __FILE__, __LINE__);
}
bool MergeSplitConditionerGPU::MakeMergeSplits(int desiredSize) {
if (MarkMergeSplits(desiredSize)) {
MakeMerges(false);
if (MarkSplits(false))
MakeSplits();
return true;
}
return false;
}
}
} | the_stack |
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define EPSILON 0.000001
#define CROSS(dest,v1,v2) \
dest[0]=v1[1]*v2[2]-v1[2]*v2[1]; \
dest[1]=v1[2]*v2[0]-v1[0]*v2[2]; \
dest[2]=v1[0]*v2[1]-v1[1]*v2[0];
#define DOT(v1,v2) (v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2])
#define SUB(dest,v1,v2) \
dest[0]=v1[0]-v2[0]; \
dest[1]=v1[1]-v2[1]; \
dest[2]=v1[2]-v2[2];
namespace {
template<typename scalar_t>
static __inline__ __device__ scalar_t mag2(const scalar_t* x) {
scalar_t l = 0;
for (int i=0; i<3; ++i) {
l += x[i] * x[i];
}
return l;
}
template<typename scalar_t>
static __inline__ __device__ scalar_t norm(const scalar_t* x) {
scalar_t l = 0;
for (int i=0; i<3; ++i) {
l += x[i] * x[i];
}
return sqrt(l);
}
template<typename scalar_t>
static __inline__ __device__ scalar_t dist2(const scalar_t* x, const scalar_t* y) {
scalar_t l = 0;
scalar_t diff;
for (int i=0; i<3; ++i) {
diff = x[i] - y[i];
l += diff * diff;
}
return l;
}
template<typename scalar_t>
static __inline__ __device__ scalar_t dist(const scalar_t* x, const scalar_t* y) {
scalar_t l = 0;
scalar_t diff;
for (int i=0; i<3; ++i) {
diff = x[i] - y[i];
l += diff * diff;
}
return sqrt(l);
}
template<typename scalar_t>
static __inline__ __device__ scalar_t dot(const scalar_t* x, const scalar_t* y) {
scalar_t l = 0;
for (int i=0; i<3; ++i) {
l += x[i] * y[i];
}
return l;
}
// find distance x0 is from segment x1-x2
template<typename scalar_t>
static __inline__ __device__ scalar_t point_segment_distance(const scalar_t* x0, const scalar_t* x1, const scalar_t* x2, scalar_t* r)
{
scalar_t dx[3] = {x2[0]-x1[0], x2[1]-x1[1], x2[2]-x1[2]};
scalar_t m2 = mag2(dx);
// find parameter value of closest point on segment
// scalar_t s12= (scalar_t) (dot(x2-x0, dx)/m2);
scalar_t s12 = (scalar_t) (dot(x2, dx) - dot(x0, dx)) / m2;
if (s12 < 0){
s12 = 0;
}
else if (s12 > 1){
s12 = 1;
}
for (int i=0; i < 3; ++i) {
r[i] = s12*x1[i] + (1-s12) * x2[i];
}
// and find the distance
return dist(x0, r);
}
/* the original jgt code */
template<typename scalar_t>
static __inline__ __device__ int intersect_triangle(
const scalar_t* orig, const scalar_t* dir,
const scalar_t* vert0, const scalar_t* vert1,
const scalar_t* vert2, scalar_t* t, scalar_t *u, scalar_t *v) {
scalar_t edge1[3], edge2[3], tvec[3], pvec[3], qvec[3];
scalar_t det,inv_det;
/* find vectors for two edges sharing vert0 */
SUB(edge1, vert1, vert0);
SUB(edge2, vert2, vert0);
/* begin calculating determinant - also used to calculate U parameter */
CROSS(pvec, dir, edge2);
/* if determinant is near zero, ray lies in plane of triangle */
det = DOT(edge1, pvec);
if (det > -EPSILON && det < EPSILON)
return 0;
inv_det = 1.0 / det;
/* calculate distance from vert0 to ray origin */
SUB(tvec, orig, vert0);
/* calculate U parameter and test bounds */
*u = DOT(tvec, pvec) * inv_det;
if (*u < 0.0 || *u > 1.0)
return 0;
/* prepare to test V parameter */
CROSS(qvec, tvec, edge1);
/* calculate V parameter and test bounds */
*v = DOT(dir, qvec) * inv_det;
if (*v < 0.0 || (*u + *v) > 1.0)
return 0;
/* calculate t, ray intersects triangle */
*t = DOT(edge2, qvec) * inv_det;
return 1;
}
template<typename scalar_t>
static __inline__ __device__ int triangle_ray_intersection(const scalar_t* origin, const scalar_t* dest,
const scalar_t* v1, const scalar_t* v2, const scalar_t* v3, scalar_t* t) {
scalar_t _dir[3] = {dest[0] - origin[0], dest[1] - origin[1], dest[2] - origin[2]};
// t is the distance, u and v are barycentric coordinates
// http://fileadmin.cs.lth.se/cs/personal/tomas_akenine-moller/code/raytri_tam.pdf
scalar_t u, v;
return intersect_triangle(origin, _dir, v1, v2, v3, t, &u, &v);
}
// find distance x0 is from triangle x1-x2-x3
template<typename scalar_t>
// static scalar_t point_triangle_distance(const Vec3f &x0, const Vec3f &x1, const Vec3f &x2, const Vec3f &x3)
static __inline__ __device__ scalar_t point_triangle_distance(const scalar_t* x0, const scalar_t* x1, const scalar_t* x2, const scalar_t* x3, scalar_t* r) {
// first find barycentric coordinates of closest point on infinite plane
scalar_t x13[3];
scalar_t x23[3];
scalar_t x03[3];
for (int i=0; i<3; ++i) {
x13[i] = x1[i] - x3[i];
x23[i] = x2[i] - x3[i];
x03[i] = x0[i] - x3[i];
}
scalar_t m13 = mag2(x13);
scalar_t m23 = mag2(x23);
scalar_t m33 = mag2(x03);
scalar_t d = dot(x13, x23);
scalar_t invdet=1.f/max(m13*m23-d*d,1e-30f);
scalar_t a = dot(x13, x03);
scalar_t b = dot(x23, x03);
// the barycentric coordinates themselves
scalar_t w23=invdet*(m23*a-d*b);
scalar_t w31=invdet*(m13*b-d*a);
scalar_t w12=1-w23-w31;
if (w23>=0 && w31>=0 && w12>=0){ // if we're inside the triangle
for (int i=0; i<3; ++i) {
r[i] = w23*x1[i] + w31*x2[i]+w12*x3[i];
}
return dist(x0, r);
}
else { // we have to clamp to one of the edges
scalar_t r1[3] = {0,0,0};
scalar_t r2[3] = {0,0,0};
if (w23 > 0) {// this rules out edge 2-3 for us
scalar_t d1 = point_segment_distance(x0,x1,x2,r1);
scalar_t d2 = point_segment_distance(x0,x1,x3,r2);
if (d1 < d2) {
for (int i=0; i < 3; ++i) {
r[i] = r1[i];
}
return d1;
}
else {
for (int i=0; i < 3; ++i) {
r[i] = r2[i];
}
return d2;
}
}
else if (w31 > 0) {// this rules out edge 1-3
scalar_t d1 = point_segment_distance(x0,x1,x2,r1);
scalar_t d2 = point_segment_distance(x0,x2,x3,r2);
if (d1 < d2) {
for (int i=0; i < 3; ++i) {
r[i] = r1[i];
}
return d1;
}
else {
for (int i=0; i < 3; ++i) {
r[i] = r2[i];
}
return d2;
}
}
else {// w12 must be >0, ruling out edge 1-2
scalar_t d1 = point_segment_distance(x0,x1,x3,r1);
scalar_t d2 = point_segment_distance(x0,x2,x3,r2);
if (d1 < d2) {
for (int i=0; i < 3; ++i) {
r[i] = r1[i];
}
return d1;
}
else {
for (int i=0; i < 3; ++i) {
r[i] = r2[i];
}
return d2;
}
}
}
}
template<typename scalar_t>
__global__ void sdf_cuda_kernel(
scalar_t* phi,
const int32_t* faces,
const scalar_t* vertices,
int batch_size,
int num_faces,
int num_vertices,
int grid_size) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= batch_size * grid_size * grid_size * grid_size) {
return;
}
const int i = tid % grid_size;
const int j = (tid / grid_size) % grid_size;
const int k = (tid / (grid_size*grid_size)) % grid_size;
const int bn = tid / (grid_size*grid_size*grid_size);
const scalar_t dx = 2./(grid_size-1);
const scalar_t center_x = -1 + (i + 0.5) * dx;
const scalar_t center_y = -1 + (j + 0.5) * dx;
const scalar_t center_z = -1 + (k + 0.5) * dx;
const scalar_t center[3] = {center_x, center_y, center_z};
int num_intersect = 0;
scalar_t min_distance=1000;
for (int f = 0; f < num_faces; ++f) {
const int32_t* face = &faces[3*f];
const int v1i = face[0];
const int v2i = face[1];
const int v3i = face[2];
const scalar_t* v1 = &vertices[bn*num_vertices*3 + v1i*3];
const scalar_t* v2 = &vertices[bn*num_vertices*3 + v2i*3];
const scalar_t* v3 = &vertices[bn*num_vertices*3 + v3i*3];
scalar_t closest_point[3];
point_triangle_distance(center, v1, v2, v3, closest_point);
scalar_t distance = dist(center, closest_point);
if (distance < min_distance) {
min_distance = distance;
}
scalar_t origin[3] = {-1.0, -1.0, -1.0};
bool intersect = triangle_ray_intersection(center, origin, v1, v2, v3, &distance);
if (intersect && distance >= 0) {
num_intersect++;
}
}
if (num_intersect % 2 == 0) {
min_distance = 0.;
}
// if (num_intersect % 2 == 1) {
// min_distance *= -1;
// }
// phi[tid] = (scalar_t) num_intersect;
// phi[bn*grid_size*grid_size*grid_size + k*grid_size*grid_size + j*grid_size + i] = min_distance;
phi[tid] = min_distance;
// if (num_intersect % 2 == 0) {
// phi[tid] = 0;
// }
}
} // namespace
at::Tensor sdf_cuda(
at::Tensor phi,
at::Tensor faces,
at::Tensor vertices) {
const auto batch_size = phi.size(0);
const auto grid_size = phi.size(1);
const auto num_faces = faces.size(0);
const auto num_vertices = vertices.size(1);
const int threads = 512;
const dim3 blocks ((batch_size * grid_size * grid_size * grid_size) / threads);
AT_DISPATCH_FLOATING_TYPES(phi.type(), "sdf_cuda", ([&] {
sdf_cuda_kernel<scalar_t><<<blocks, threads>>>(
phi.data<scalar_t>(),
faces.data<int32_t>(),
vertices.data<scalar_t>(),
batch_size,
num_faces,
num_vertices,
grid_size);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in sdf: %s\n", cudaGetErrorString(err));
return phi;
} | the_stack |
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
/*! \file TwoStepRATTLELangevinGPU.cuh
\brief Declares GPU kernel code for RATTLELangevin dynamics on the GPU. Used by
TwoStepRATTLELangevinGPU.
*/
#pragma once
#include "hoomd/HOOMDMath.h"
#include "hoomd/ParticleData.cuh"
#include "hoomd/RNGIdentifiers.h"
#include "hoomd/RandomNumbers.h"
using namespace hoomd;
#include <assert.h>
#include <type_traits>
#ifndef __TWO_STEP_RATTLE_LANGEVIN_GPU_CUH__
#define __TWO_STEP_RATTLE_LANGEVIN_GPU_CUH__
//! Temporary holder struct to limit the number of arguments passed to
//! gpu_rattle_langevin_step_two()
struct rattle_langevin_step_two_args
{
Scalar* d_gamma; //!< Device array listing per-type gammas
size_t n_types; //!< Number of types in \a d_gamma
bool use_alpha; //!< Set to true to scale diameters by alpha to get gamma
Scalar alpha; //!< Scale factor to convert diameter to alpha
Scalar T; //!< Current temperature
Scalar tolerance;
uint64_t timestep; //!< Current timestep
uint16_t seed; //!< User chosen random number seed
Scalar* d_sum_bdenergy; //!< Energy transfer sum from bd thermal reservoir
Scalar* d_partial_sum_bdenergy; //!< Array used for summation
unsigned int block_size; //!< Block size
unsigned int num_blocks; //!< Number of blocks
bool noiseless_t; //!< If set true, there will be no translational noise (random force)
bool noiseless_r; //!< If set true, there will be no rotational noise (random torque)
bool tally; //!< Set to true is bd thermal reservoir energy ally is to be performed
};
hipError_t
gpu_rattle_langevin_angular_step_two(const Scalar4* d_pos,
Scalar4* d_orientation,
Scalar4* d_angmom,
const Scalar3* d_inertia,
Scalar4* d_net_torque,
const unsigned int* d_group_members,
const Scalar3* d_gamma_r,
const unsigned int* d_tag,
unsigned int group_size,
const rattle_langevin_step_two_args& rattle_langevin_args,
Scalar deltaT,
unsigned int D,
Scalar scale);
__global__ void gpu_rattle_bdtally_reduce_partial_sum_kernel(Scalar* d_sum,
Scalar* d_partial_sum,
unsigned int num_blocks);
template<class Manifold>
hipError_t gpu_rattle_langevin_step_two(const Scalar4* d_pos,
Scalar4* d_vel,
Scalar3* d_accel,
const Scalar* d_diameter,
const unsigned int* d_tag,
unsigned int* d_group_members,
unsigned int group_size,
Scalar4* d_net_force,
const rattle_langevin_step_two_args& rattle_langevin_args,
Manifold manifold,
Scalar deltaT,
unsigned int D);
#ifdef __HIPCC__
/*! \file TwoStepRATTLELangevinGPU.cu
\brief Defines GPU kernel code for RATTLELangevin integration on the GPU. Used by
TwoStepRATTLELangevinGPU.
*/
//! Takes the second half-step forward in the RATTLELangevin integration on a group of particles
//! with
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_accel array of particle accelerations
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma List of per-type gammas
\param n_types Number of particle types in the simulation
\param use_alpha If true, gamma = alpha * diameter
\param alpha Scale factor to convert diameter to alpha (when use_alpha is true)
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
\param tally Boolean indicating whether energy tally is performed or not
\param d_partial_sum_bdenergy Placeholder for the partial sum
This kernel is implemented in a very similar manner to gpu_nve_step_two_kernel(), see it for
design details.
This kernel will tally the energy transfer from the bd thermal reservoir and the particle system
This kernel must be launched with enough dynamic shared memory per block to read in d_gamma
*/
template<class Manifold>
__global__ void gpu_rattle_langevin_step_two_kernel(const Scalar4* d_pos,
Scalar4* d_vel,
Scalar3* d_accel,
const Scalar* d_diameter,
const unsigned int* d_tag,
unsigned int* d_group_members,
unsigned int group_size,
Scalar4* d_net_force,
Scalar* d_gamma,
size_t n_types,
bool use_alpha,
Scalar alpha,
uint64_t timestep,
uint16_t seed,
Scalar T,
Scalar tolerance,
bool noiseless_t,
Manifold manifold,
Scalar deltaT,
unsigned int D,
bool tally,
Scalar* d_partial_sum_bdenergy)
{
HIP_DYNAMIC_SHARED(char, s_data)
Scalar* s_gammas = (Scalar*)s_data;
if (!use_alpha)
{
// read in the gammas (1 dimensional array)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x];
}
__syncthreads();
}
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar bd_energy_transfer = 0;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// ******** first, calculate the additional BD force
// read the current particle velocity (MEM TRANSFER: 16 bytes)
Scalar4 vel = d_vel[idx];
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
unsigned int ptag = d_tag[idx];
// calculate the magnitude of the random force
Scalar gamma;
if (use_alpha)
{
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
gamma = alpha * d_diameter[idx];
}
else
{
// read in the type of our particle. A texture read of only the fourth part of the
// position Scalar4 (where type is stored) is used.
unsigned int typ = __scalar_as_int(d_pos[idx].w);
gamma = s_gammas[typ];
}
// read in the net force and calculate the acceleration MEM TRANSFER: 16 bytes
Scalar4 net_force = d_net_force[idx];
Scalar3 accel = make_scalar3(net_force.x, net_force.y, net_force.z);
Scalar3 pos = make_scalar3(d_pos[idx].x, d_pos[idx].y, d_pos[idx].z);
// Initialize the Random Number Generator and generate the 3 random numbers
RandomGenerator rng(hoomd::Seed(RNGIdentifier::TwoStepLangevin, timestep, seed),
hoomd::Counter(ptag));
Scalar3 normal = manifold.derivative(pos);
Scalar ndotn = dot(normal, normal);
Scalar randomx, randomy, randomz, coeff;
if (T > 0)
{
UniformDistribution<Scalar> uniform(-1, 1);
randomx = uniform(rng);
randomy = uniform(rng);
randomz = uniform(rng);
coeff = sqrtf(Scalar(6.0) * gamma * T / deltaT);
Scalar3 bd_force = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
if (noiseless_t)
coeff = Scalar(0.0);
Scalar proj_x = normal.x / fast::sqrt(ndotn);
Scalar proj_y = normal.y / fast::sqrt(ndotn);
Scalar proj_z = normal.z / fast::sqrt(ndotn);
Scalar proj_r = randomx * proj_x + randomy * proj_y + randomz * proj_z;
randomx = randomx - proj_r * proj_x;
randomy = randomy - proj_r * proj_y;
randomz = randomz - proj_r * proj_z;
}
else
{
randomx = 0;
randomy = 0;
randomz = 0;
coeff = 0;
}
Scalar3 bd_force;
bd_force.x = randomx * coeff - gamma * vel.x;
bd_force.y = randomy * coeff - gamma * vel.y;
bd_force.z = randomz * coeff - gamma * vel.z;
// MEM TRANSFER: 4 bytes FLOPS: 3
Scalar mass = vel.w;
Scalar minv = Scalar(1.0) / mass;
accel.x = (accel.x + bd_force.x) * minv;
accel.y = (accel.y + bd_force.y) * minv;
accel.z = (accel.z + bd_force.z) * minv;
// v(t+deltaT) = v(t+deltaT/2) + 1/2 * a(t+deltaT)*deltaT
// update the velocity (FLOPS: 6)
Scalar3 next_vel;
next_vel.x = vel.x + Scalar(1.0 / 2.0) * deltaT * accel.x;
next_vel.y = vel.y + Scalar(1.0 / 2.0) * deltaT * accel.y;
next_vel.z = vel.z + Scalar(1.0 / 2.0) * deltaT * accel.z;
Scalar mu = 0;
Scalar inv_alpha = -Scalar(1.0 / 2.0) * deltaT;
inv_alpha = Scalar(1.0) / inv_alpha;
Scalar3 residual;
Scalar resid;
Scalar3 vel_dot;
const unsigned int maxiteration = 10;
unsigned int iteration = 0;
do
{
iteration++;
vel_dot.x = accel.x - mu * minv * normal.x;
vel_dot.y = accel.y - mu * minv * normal.y;
vel_dot.z = accel.z - mu * minv * normal.z;
residual.x = vel.x - next_vel.x + Scalar(1.0 / 2.0) * deltaT * vel_dot.x;
residual.y = vel.y - next_vel.y + Scalar(1.0 / 2.0) * deltaT * vel_dot.y;
residual.z = vel.z - next_vel.z + Scalar(1.0 / 2.0) * deltaT * vel_dot.z;
resid = dot(normal, next_vel) * minv;
Scalar ndotr = dot(normal, residual);
Scalar beta = (mass * resid + ndotr) / ndotn;
next_vel.x = next_vel.x - normal.x * beta + residual.x;
next_vel.y = next_vel.y - normal.y * beta + residual.y;
next_vel.z = next_vel.z - normal.z * beta + residual.z;
mu = mu - mass * beta * inv_alpha;
resid = fabs(resid);
Scalar vec_norm = sqrt(dot(residual, residual));
if (vec_norm > resid)
resid = vec_norm;
} while (resid * mass > tolerance && iteration < maxiteration);
vel.x += (Scalar(1.0) / Scalar(2.0)) * (accel.x - mu * minv * normal.x) * deltaT;
vel.y += (Scalar(1.0) / Scalar(2.0)) * (accel.y - mu * minv * normal.y) * deltaT;
vel.z += (Scalar(1.0) / Scalar(2.0)) * (accel.z - mu * minv * normal.z) * deltaT;
// tally the energy transfer from the bd thermal reservoir to the particles (FLOPS: 6)
bd_energy_transfer = bd_force.x * vel.x + bd_force.y * vel.y + bd_force.z * vel.z;
// write out data (MEM TRANSFER: 32 bytes)
d_vel[idx] = vel;
// since we calculate the acceleration, we need to write it for the next step
d_accel[idx] = accel;
}
Scalar* bdtally_sdata = (Scalar*)&s_data[0];
if (tally)
{
// don't overwrite values in the s_gammas array with bd_energy transfer
__syncthreads();
bdtally_sdata[threadIdx.x] = bd_energy_transfer;
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
bdtally_sdata[threadIdx.x] += bdtally_sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
d_partial_sum_bdenergy[blockIdx.x] = bdtally_sdata[0];
}
}
}
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_accel array of particle accelerations
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param rattle_langevin_args Collected arguments for gpu_rattle_langevin_step_two_kernel() and
gpu_rattle_langevin_angular_step_two() \param deltaT Amount of real time to step forward in one
time step \param D Dimensionality of the system
This is just a driver for gpu_rattle_langevin_step_two_kernel(), see it for details.
*/
template<class Manifold>
hipError_t gpu_rattle_langevin_step_two(const Scalar4* d_pos,
Scalar4* d_vel,
Scalar3* d_accel,
const Scalar* d_diameter,
const unsigned int* d_tag,
unsigned int* d_group_members,
unsigned int group_size,
Scalar4* d_net_force,
const rattle_langevin_step_two_args& rattle_langevin_args,
Manifold manifold,
Scalar deltaT,
unsigned int D)
{
// setup the grid to run the kernel
dim3 grid(rattle_langevin_args.num_blocks, 1, 1);
dim3 grid1(1, 1, 1);
dim3 threads(rattle_langevin_args.block_size, 1, 1);
dim3 threads1(256, 1, 1);
// run the kernel
hipLaunchKernelGGL((gpu_rattle_langevin_step_two_kernel<Manifold>),
grid,
threads,
max((unsigned int)(sizeof(Scalar) * rattle_langevin_args.n_types),
(unsigned int)(rattle_langevin_args.block_size * sizeof(Scalar))),
0,
d_pos,
d_vel,
d_accel,
d_diameter,
d_tag,
d_group_members,
group_size,
d_net_force,
rattle_langevin_args.d_gamma,
rattle_langevin_args.n_types,
rattle_langevin_args.use_alpha,
rattle_langevin_args.alpha,
rattle_langevin_args.timestep,
rattle_langevin_args.seed,
rattle_langevin_args.T,
rattle_langevin_args.tolerance,
rattle_langevin_args.noiseless_t,
manifold,
deltaT,
D,
rattle_langevin_args.tally,
rattle_langevin_args.d_partial_sum_bdenergy);
// run the summation kernel
if (rattle_langevin_args.tally)
hipLaunchKernelGGL((gpu_rattle_bdtally_reduce_partial_sum_kernel),
dim3(grid1),
dim3(threads1),
rattle_langevin_args.block_size * sizeof(Scalar),
0,
&rattle_langevin_args.d_sum_bdenergy[0],
rattle_langevin_args.d_partial_sum_bdenergy,
rattle_langevin_args.num_blocks);
return hipSuccess;
}
#endif
#endif //__TWO_STEP_RATTLE_LANGEVIN_GPU_CUH__ | the_stack |
//
// Matrix multiplication: C = A * B.
// Host code.
//
// This sample implements matrix multiplication as described in Chapter 3
// of the programming guide and uses the CUBLAS library to demonstrate
// the best performance.
// SOME PRECAUTIONS:
// IF WE WANT TO CALCULATE ROW-MAJOR MATRIX MULTIPLY C = A * B,
// WE JUST NEED CALL CUBLAS API IN A REVERSE ORDER: cublasSegemm(B, A)!
// The reason is explained as follows:
// CUBLAS library uses column-major storage, but C/C++ use row-major storage.
// When passing the matrix pointer to CUBLAS, the memory layout alters from
// row-major to column-major, which is equivalent to an implicit transpose.
// In the case of row-major C/C++ matrix A, B, and a simple matrix multiplication
// C = A * B, we can't use the input order like cublasSgemm(A, B) because of
// implicit transpose. The actual result of cublasSegemm(A, B) is A(T) * B(T).
// If col(A(T)) != row(B(T)), equal to row(A) != col(B), A(T) and B(T) are not
// multipliable. Moreover, even if A(T) and B(T) are multipliable, the result C
// is a column-based cublas matrix, which means C(T) in C/C++, we need extra
// transpose code to convert it to a row-based C/C++ matrix.
// To solve the problem, let's consider our desired result C, a row-major matrix.
// In cublas format, it is C(T) actually (because of the implicit transpose).
// C = A * B, so C(T) = (A * B) (T) = B(T) * A(T). Cublas matrice B(T) and A(T)
// happen to be C/C++ matrice B and A (still because of the implicit transpose)!
// We don't need extra transpose code, we only need alter the input order!
//
// CUBLAS provides high-performance matrix multiplication.
// See also:
// V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
// in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
// Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
//
// Utilities and system includes
#include <assert.h>
#include "helper_string.h" // helper for shared functions common to CUDA Samples
// CUDA runtime
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include <iostream>
// CUDA and CUBLAS functions
// #include <helper_functions.h>
#include "helper_cuda.h"
#ifndef min
#define min(a,b) ((a < b) ? a : b)
#endif
#ifndef max
#define max(a,b) ((a > b) ? a : b)
#endif
typedef struct _matrixSize // Optional Command-line multiplier for matrix sizes
{
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
} sMatrixSize;
////////////////////////////////////////////////////////////////////////////////
//! Compute reference data set matrix multiply on CPU
//! C = A * B
//! @param C reference data, computed but preallocated
//! @param A matrix A as provided to device
//! @param B matrix B as provided to device
//! @param hA height of matrix A
//! @param wB width of matrix B
////////////////////////////////////////////////////////////////////////////////
// void
// matrixMulCPU(float *C, const float *A, const float *B, unsigned int hA, unsigned int wA, unsigned int wB)
// {
// for (unsigned int i = 0; i < hA; ++i)
// for (unsigned int j = 0; j < wB; ++j)
// {
// double sum = 0;
// for (unsigned int k = 0; k < wA; ++k)
// {
// double a = A[i * wA + k];
// double b = B[k * wB + j];
// sum += a * b;
// }
// C[i * wB + j] = (float)sum;
// }
// }
// Allocates a matrix with random float entries.
void randomInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
// void printDiff(float *data1, float *data2, int width, int height, int iListLength, float fListTol)
// {
// printf("Listing first %d Differences > %.6f...\n", iListLength, fListTol);
// int i,j,k;
// int error_count=0;
// for (j = 0; j < height; j++)
// {
// if (error_count < iListLength)
// {
// printf("\n Row %d:\n", j);
// }
// for (i = 0; i < width; i++)
// {
// k = j * width + i;
// float fDiff = fabs(data1[k] - data2[k]);
// if (fDiff > fListTol)
// {
// if (error_count < iListLength)
// {
// printf(" Loc(%d,%d)\tCPU=%.5f\tGPU=%.5f\tDiff=%.6f\n", i, j, data1[k], data2[k], fDiff);
// }
// error_count++;
// }
// }
// }
// printf(" \n Total Errors = %d\n", error_count);
// }
// void initializeCUDA(int argc, char **argv, int &devID, int &iSizeMultiple, sMatrixSize &matrix_size)
// {
// // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
// cudaError_t error;
// devID = 0;
// // devID = findCudaDevice(argc, (const char **)argv);
// // if (checkCmdLineFlag(argc, (const char **)argv, "sizemult"))
// // {
// // iSizeMultiple = getCmdLineArgumentInt(argc, (const char **)argv, "sizemult");
// // }
// iSizeMultiple = min(iSizeMultiple, 10);
// iSizeMultiple = max(iSizeMultiple, 1);
// cudaDeviceProp deviceProp;
// error = cudaGetDeviceProperties(&deviceProp, devID);
// if (error != cudaSuccess)
// {
// printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
// exit(EXIT_FAILURE);
// }
// printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
// int block_size = 32;
// matrix_size.uiWA = 3 * block_size * iSizeMultiple;
// matrix_size.uiHA = 4 * block_size * iSizeMultiple;
// matrix_size.uiWB = 2 * block_size * iSizeMultiple;
// matrix_size.uiHB = 3 * block_size * iSizeMultiple;
// matrix_size.uiWC = 2 * block_size * iSizeMultiple;
// matrix_size.uiHC = 4 * block_size * iSizeMultiple;
// printf("MatrixA(%u,%u), MatrixB(%u,%u), MatrixC(%u,%u)\n",
// matrix_size.uiHA, matrix_size.uiWA,
// matrix_size.uiHB, matrix_size.uiWB,
// matrix_size.uiHC, matrix_size.uiWC);
// if( matrix_size.uiWA != matrix_size.uiHB ||
// matrix_size.uiHA != matrix_size.uiHC ||
// matrix_size.uiWB != matrix_size.uiWC)
// {
// printf("ERROR: Matrix sizes do not match!\n");
// exit(-1);
// }
// }
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test matrix multiply using CUBLAS
////////////////////////////////////////////////////////////////////////////////
int matrixMultiply(int argc, char **argv, int devID, sMatrixSize &matrix_size)
{
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// int block_size = 32;
// set seed for rand()
srand(2006);
// allocate host memory for matrices A and B
unsigned int size_A = matrix_size.uiWA * matrix_size.uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = matrix_size.uiWB * matrix_size.uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// set seed for rand()
srand(2006);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float *d_A, *d_B, *d_C;
unsigned int size_C = matrix_size.uiWC * matrix_size.uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
// allocate host memory for the result
float *h_C = (float *) malloc(mem_size_C);
float *h_CUBLAS = (float *) malloc(mem_size_C);
checkCudaErrors(cudaMalloc((void **) &d_A, mem_size_A));
checkCudaErrors(cudaMalloc((void **) &d_B, mem_size_B));
checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **) &d_C, mem_size_C));
// setup execution parameters
// dim3 threads(block_size, block_size);
// dim3 grid(matrix_size.uiWC / threads.x, matrix_size.uiHC / threads.y);
// create and start timer
printf("Computing result using CUBLAS...");
// execute the kernel
int nIter = 10;
if(argc > 1)
{
if(strcmp(argv[1], "2") == 0)
{
nIter = 100;
}
else if(strcmp(argv[1], "3") == 0)
{
nIter = 1000;
}
}
// CUBLAS version 2.0
{
const float alpha = 1.0f;
const float beta = 0.0f;
cublasHandle_t handle;
cudaEvent_t start, stop;
checkCudaErrors(cublasCreate(&handle));
//Perform warmup operation with cublas
// checkCudaErrors(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, matrix_size.uiWB, matrix_size.uiHA, matrix_size.uiWA, &alpha, d_B, matrix_size.uiWB, d_A, matrix_size.uiWA, &beta, d_C, matrix_size.uiWB));
// Allocate CUDA events that we'll use for timing
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//warm-up
checkCudaErrors(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, matrix_size.uiWB, matrix_size.uiHA, matrix_size.uiWA, &alpha, d_B, matrix_size.uiWB, d_A, matrix_size.uiWA, &beta, d_C, matrix_size.uiWB));
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
for (int j = 0; j < nIter; j++)
{
//note cublas is column primary!
//need to transpose the order
checkCudaErrors(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, matrix_size.uiWB, matrix_size.uiHA, matrix_size.uiWA, &alpha, d_B, matrix_size.uiWB, d_A, matrix_size.uiWA, &beta, d_C, matrix_size.uiWB));
}
printf("done.\n");
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)matrix_size.uiHC * (double)matrix_size.uiWC * (double)matrix_size.uiHB;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.6f msec, Size= %.0f Ops\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul);
// copy result from device to host
checkCudaErrors(cudaMemcpy(h_CUBLAS, d_C, mem_size_C, cudaMemcpyDeviceToHost));
// Destroy the handle
checkCudaErrors(cublasDestroy(handle));
}
// compute reference solution
// printf("Computing result using host CPU...");
// float *reference = (float *)malloc(mem_size_C);
// matrixMulCPU(reference, h_A, h_B, matrix_size.uiHA, matrix_size.uiWA, matrix_size.uiWB);
// printf("done.\n");
// // check result (CUBLAS)
// bool resCUBLAS = sdkCompareL2fe(reference, h_CUBLAS, size_C, 1.0e-6f);
// if (resCUBLAS != true)
// {
// printDiff(reference, h_CUBLAS, matrix_size.uiWC, matrix_size.uiHC, 100, 1.0e-5f);
// }
// printf("Comparing CUBLAS Matrix Multiply with CPU results: %s\n", (true == resCUBLAS) ? "PASS" : "FAIL");
// printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
// clean up memory
free(h_A);
free(h_B);
free(h_C);
// free(reference);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
// if (resCUBLAS == true)
// {
// return EXIT_SUCCESS; // return value = 1
// }
// else
// {
// return EXIT_FAILURE; // return value = 0
// }
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("[Matrix Multiply CUBLAS] - Starting...\n");
int devID = 0; //, sizeMult = 5;
unsigned int shapes[7] = {32, 64, 128, 256, 512, 1024, 2038};
for (int i=0; i < 7; ++i)
{
unsigned int tmp = shapes[i];
sMatrixSize matrix_size{tmp, tmp, tmp, tmp, tmp, tmp};
// initializeCUDA(argc, argv, devID, sizeMult, matrix_size);
int matrix_result = matrixMultiply(argc, argv, devID, matrix_size);
}
return 0;
} | the_stack |
#include "../kernels/transformerKernels.h"
#include "../kernels/transformerKernels_int8.h"
#include "../kernels/embKernels_int8.h"
#include "cublas_helper.h"
/**
@file
QuantTransformer decoder, composed by gemm lib and
custom cuda kernel function
*/
namespace lightseq {
namespace cuda {
template <OperationType OpType_>
QuantDecoder<OpType_>::QuantDecoder(int max_batch_size,
const int* p_d_padding_mask,
const _DataType* p_d_encoder_output,
int* p_d_result,
QuantTransformerWeight<OpType_>& tw,
cudaStream_t stream, cublasHandle_t hd,
bool output_topk, const int* p_d_lang_id)
: _max_batch_size(max_batch_size),
_max_thread_per_block(1024),
_h_can_num_batch(0),
_cub_sort_buffer_bytes(max_batch_size * tw._beam_size *
tw._trg_vocab_size * sizeof(_DataType)),
_p_d_padding_mask(p_d_padding_mask),
_p_d_encoder_output(p_d_encoder_output),
_p_d_result(p_d_result),
_p_d_trg_emb_wei(tw.get_trg_emb_wei()),
_p_d_dec_wei(tw.get_dec_wei()),
_tw(tw),
_stream(stream),
_hd(hd),
_output_topk(output_topk),
_p_d_lang_id(p_d_lang_id), // source token id
_layer_size_encdec_k(max_batch_size * tw._max_step * tw._hidden_size),
_layer_size_self_k(max_batch_size * tw._max_step * tw._hidden_size *
tw._beam_size),
_type_one(1.f),
_type_zero(0.f),
_fzero(0.f),
_trg_emb_clip_max(tw.get_trg_emb_clip_max()),
_output_ln_clip_max(tw.get_output_ln_clip_max()),
_logits_clip_max(tw.get_logits_clip_max()),
_encode_output_project_kernel_kv_clip_max(
tw.get_encode_output_project_kernel_kv_clip_max()),
_dec_clip_max(tw.get_dec_clip_max()),
_ione((int32_t)1),
_izero((int32_t)0),
_atten_scaler(sqrt(1.f / tw._dim_per_head)),
_h_alive_seq_probs(max_batch_size * tw._beam_size,
min_log_probability / 2),
_h_length_norm(tw._max_step, 1.f),
_h_unfinished(1) {
for (int i = 0; i < _h_alive_seq_probs.size(); i += tw._beam_size) {
_h_alive_seq_probs[i] = 0.f;
}
if (tw._length_penalty >= 0) {
for (int i = 0; i < _h_length_norm.size(); i++) {
_h_length_norm[i] = length_norm(i + 1, tw._length_penalty);
}
}
CHECK_GPU_ERROR(cublasLtCreate(&_cublas_lt_handle));
return;
}
/**
Init the GPU memory pointer which point to
the memory buffer needed by decoder.
These buffer are used during custom cuda kernel function,
find the corresponding function to see how these buffer are used
*/
template <OperationType OpType_>
void QuantDecoder<OpType_>::init_buffer() {
std::cout << "decoder buffer init start" << std::endl;
// malloc activations and cache
int temp_size = _tw._n_dec_layer * 2 * _layer_size_encdec_k;
_DataType *temp, *sliding_temp;
CHECK_GPU_ERROR(cudaMalloc(&temp, temp_size * sizeof(_DataType)));
sliding_temp = temp;
for (int i = 0; i < _tw._n_dec_layer; i++) {
// encoder ouput after project, the "key" of enc_dec attention
_p_d_encdec_k_bgeem.push_back(sliding_temp);
sliding_temp += _layer_size_encdec_k;
}
for (int i = 0; i < _tw._n_dec_layer; i++) {
// encoder ouput after project, the "value" of enc_dec attention
_p_d_encdec_v_bgeem.push_back(sliding_temp);
sliding_temp += _layer_size_encdec_k;
}
CHECK_GPU_ERROR(cudaMalloc(
&_p_d_cur_step_query,
_max_batch_size * _tw._beam_size * _tw._hidden_size * sizeof(_DataType)));
CHECK_GPU_ERROR(cudaMalloc(
&_p_d_query_buf1,
_max_batch_size * _tw._beam_size * _tw._hidden_size * sizeof(_DataType)));
CHECK_GPU_ERROR(cudaMalloc(&_p_d_c, _max_batch_size * _tw._head_num *
_tw._beam_size * _tw._max_step *
sizeof(_DataType)));
CHECK_GPU_ERROR(cudaMalloc(
&_p_d_can_score,
_max_batch_size * _tw._beam_size * _tw._trg_vocab_size * sizeof(float)));
CHECK_GPU_ERROR(cudaMalloc(&_p_d_alive_seq_probs,
_max_batch_size * _tw._beam_size * sizeof(float)));
CHECK_GPU_ERROR(cudaMalloc(&_p_d_alive_seq_score,
_max_batch_size * _tw._beam_size * sizeof(float)));
CHECK_GPU_ERROR(cudaMalloc(&_p_d_alive_seq, _max_batch_size * _tw._beam_size *
_tw._max_step * sizeof(int)));
CHECK_GPU_ERROR(cudaMalloc(
&_p_d_alive_seq_buf,
_max_batch_size * _tw._beam_size * _tw._max_step * sizeof(int)));
CHECK_GPU_ERROR(cudaMalloc(
&_p_d_can_idx,
_max_batch_size * _tw._beam_size * _tw._trg_vocab_size * sizeof(int)));
CHECK_GPU_ERROR(cudaMalloc(
&_p_d_can_num, (_max_batch_size * _tw._beam_size + 1) * sizeof(int)));
std::vector<int> start_id_vec(
_max_batch_size * _tw._beam_size * _tw._max_step, _tw._start_id);
CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_alive_seq, start_id_vec.data(),
sizeof(int) * start_id_vec.size(),
cudaMemcpyHostToDevice, _stream));
CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_alive_seq_buf, start_id_vec.data(),
sizeof(int) * start_id_vec.size(),
cudaMemcpyHostToDevice, _stream));
CHECK_GPU_ERROR(cudaMalloc(&_p_d_sample_unfinished, sizeof(int)));
CHECK_GPU_ERROR(
cudaMalloc(&_p_d_curandstate, _max_batch_size * sizeof(curandState)));
ker_curand_setup<<<_max_batch_size, 1, 0, _stream>>>(_p_d_curandstate);
int max_batch_dim =
_max_batch_size * _tw._beam_size *
round_up(std::max(_tw._inner_size, _tw._hidden_size * 3), 32);
CHECK_GPU_ERROR(
cudaMalloc(&_int8_ffn_in_buf, max_batch_dim * sizeof(int8_t)));
CHECK_GPU_ERROR(cudaMalloc(
&_int32_ffn_out_buf,
std::max(std::max(max_batch_dim, _max_batch_size * _tw._beam_size *
_tw._head_num * _tw._max_step),
round_up(_tw._trg_vocab_size, 32) * _tw._beam_size *
_max_batch_size) *
sizeof(int32_t)));
CHECK_GPU_ERROR(
cudaMalloc(&_int8_ffn_out_buf,
std::max(max_batch_dim, round_up(_tw._trg_vocab_size, 32) *
_tw._beam_size * _max_batch_size) *
sizeof(int8_t)));
// malloc embeddings
CHECK_GPU_ERROR(
cudaMalloc(&_int8_p_d_trg_emb_wei,
_tw._trg_vocab_size * _tw._hidden_size * sizeof(int8_t)));
quantize_weight(_p_d_trg_emb_wei[0], _int8_p_d_trg_emb_wei, _tw._hidden_size,
_tw._trg_vocab_size, _quant_range / _trg_emb_clip_max,
_stream, _cublas_lt_handle);
CHECK_GPU_ERROR(
cudaMalloc(&_int8_p_d_trg_emb_bottom_wei,
_tw._trg_vocab_size * _tw._hidden_size * sizeof(int8_t)));
quantize_weight(_p_d_trg_emb_wei[0], _int8_p_d_trg_emb_bottom_wei,
_tw._hidden_size, _tw._trg_vocab_size,
_quant_range / _trg_emb_clip_max, _stream, _cublas_lt_handle,
kRowMajor);
_p_device_emb.push_back(nullptr);
_p_device_emb.push_back(
to_gpu(_p_d_trg_emb_wei[1], _tw._max_step * _tw._hidden_size, _stream));
_p_device_emb.push_back(
to_gpu(_p_d_trg_emb_wei[2], _tw._hidden_size, _stream));
_p_device_emb.push_back(
to_gpu(_p_d_trg_emb_wei[3], _tw._hidden_size, _stream));
_p_device_emb.push_back(to_gpu(
_p_d_trg_emb_wei[4],
_tw._hidden_size * _tw._hidden_size * 2 * _tw._n_dec_layer, _stream));
_p_device_emb.push_back(to_gpu(
_p_d_trg_emb_wei[5], _tw._hidden_size * 2 * _tw._n_dec_layer, _stream));
_p_device_emb.push_back(
to_gpu(_p_d_trg_emb_wei[6], _tw._trg_vocab_size, _stream));
if (_tw._multilg_type != 0) {
_p_device_emb.push_back(
to_gpu(_p_d_trg_emb_wei[7], _tw._hidden_size, _stream));
} else {
_p_device_emb.push_back(nullptr);
}
// malloc reused kv cache and encdec output
// _p_d_encoder_out_buf max size: _tw._hidden_size * 2 * _tw._n_dec_layer *
// _max_batch_size * _max_step * sizeof(T)
// so when fp16 their max size is same.
int8_t* self_kv_cache_buffer;
int8_t* sliding_p;
CHECK_GPU_ERROR(
cudaMalloc(&self_kv_cache_buffer,
_layer_size_self_k * _tw._n_dec_layer * 4 * sizeof(int8_t)));
int encoder_out_size = _tw._hidden_size * 2 * _tw._n_dec_layer *
_max_batch_size * _tw._max_step * sizeof(_DataType);
if (encoder_out_size <=
_layer_size_self_k * _tw._n_dec_layer * 4 * sizeof(int8_t)) {
_p_d_encoder_out_buf = reinterpret_cast<_DataType*>(self_kv_cache_buffer);
} else {
CHECK_GPU_ERROR(cudaMalloc(&_p_d_encoder_out_buf, encoder_out_size));
}
sliding_p = self_kv_cache_buffer;
for (int i = 0; i < _tw._n_dec_layer * 2; i++) {
_p_d_self_k_cache.push_back(sliding_p);
sliding_p += _layer_size_self_k;
}
for (int i = 0; i < _tw._n_dec_layer * 2; i++) {
_p_d_self_v_cache.push_back(sliding_p);
sliding_p += _layer_size_self_k;
}
_p_d_self_k_cache1 = _p_d_self_k_cache.data();
_p_d_self_k_cache2 = _p_d_self_k_cache.data() + _tw._n_dec_layer;
_p_d_self_v_cache1 = _p_d_self_v_cache.data();
_p_d_self_v_cache2 = _p_d_self_v_cache.data() + _tw._n_dec_layer;
// malloc weights
_int8_p_d_dec_wei = std::vector<int8_t*>(_tw._n_dec_layer * 6);
_scaled_ffn2_colsum = std::vector<_DataType*>(_tw._n_dec_layer);
for (_layer_id = 0; _layer_id < _tw._n_dec_layer; _layer_id++) {
_weight_offset = _layer_id * _tw._weight_per_dec_layer;
// malloc quantized weights
CHECK_GPU_ERROR(
cudaMalloc(&_int8_p_d_dec_wei[_layer_id * 6],
_tw._hidden_size * 3 * _tw._hidden_size * sizeof(int8_t)));
CHECK_GPU_ERROR(
cudaMalloc(&_int8_p_d_dec_wei[_layer_id * 6 + 1],
_tw._hidden_size * _tw._hidden_size * sizeof(int8_t)));
CHECK_GPU_ERROR(
cudaMalloc(&_int8_p_d_dec_wei[_layer_id * 6 + 2],
_tw._hidden_size * _tw._hidden_size * sizeof(int8_t)));
CHECK_GPU_ERROR(
cudaMalloc(&_int8_p_d_dec_wei[_layer_id * 6 + 3],
_tw._hidden_size * _tw._hidden_size * sizeof(int8_t)));
CHECK_GPU_ERROR(
cudaMalloc(&_int8_p_d_dec_wei[_layer_id * 6 + 4],
_tw._hidden_size * _tw._inner_size * sizeof(int8_t)));
CHECK_GPU_ERROR(
cudaMalloc(&_int8_p_d_dec_wei[_layer_id * 6 + 5],
_tw._inner_size * _tw._hidden_size * sizeof(int8_t)));
// malloc unquantized weights
_p_device_wei.push_back(
to_gpu(_p_d_dec_wei[_weight_offset], _tw._hidden_size, _stream));
_p_device_wei.push_back(
to_gpu(_p_d_dec_wei[_weight_offset + 1], _tw._hidden_size, _stream));
_p_device_wei.push_back(nullptr);
_p_device_wei.push_back(to_gpu(_p_d_dec_wei[_weight_offset + 3],
_tw._hidden_size * 3, _stream));
_p_device_wei.push_back(nullptr);
_p_device_wei.push_back(
to_gpu(_p_d_dec_wei[_weight_offset + 5], _tw._hidden_size, _stream));
_p_device_wei.push_back(
to_gpu(_p_d_dec_wei[_weight_offset + 6], _tw._hidden_size, _stream));
_p_device_wei.push_back(
to_gpu(_p_d_dec_wei[_weight_offset + 7], _tw._hidden_size, _stream));
_p_device_wei.push_back(nullptr);
_p_device_wei.push_back(
to_gpu(_p_d_dec_wei[_weight_offset + 9], _tw._hidden_size, _stream));
_p_device_wei.push_back(nullptr);
_p_device_wei.push_back(
to_gpu(_p_d_dec_wei[_weight_offset + 11], _tw._hidden_size, _stream));
_p_device_wei.push_back(
to_gpu(_p_d_dec_wei[_weight_offset + 12], _tw._hidden_size, _stream));
_p_device_wei.push_back(
to_gpu(_p_d_dec_wei[_weight_offset + 13], _tw._hidden_size, _stream));
_p_device_wei.push_back(nullptr);
_p_device_wei.push_back(
to_gpu(_p_d_dec_wei[_weight_offset + 15], _tw._inner_size, _stream));
_p_device_wei.push_back(nullptr);
_p_device_wei.push_back(
to_gpu(_p_d_dec_wei[_weight_offset + 17], _tw._hidden_size, _stream));
quantize_weight(_p_d_dec_wei[_weight_offset + 2],
_int8_p_d_dec_wei[_layer_id * 6], _tw._hidden_size,
_tw._hidden_size * 3,
_quant_range / _dec_clip_max[_layer_id * 19], _stream,
_cublas_lt_handle);
quantize_weight(_p_d_dec_wei[_weight_offset + 4],
_int8_p_d_dec_wei[_layer_id * 6 + 1], _tw._hidden_size,
_tw._hidden_size,
_quant_range / _dec_clip_max[_layer_id * 19 + 1], _stream,
_cublas_lt_handle, kColMajor);
quantize_weight(_p_d_dec_wei[_weight_offset + 8],
_int8_p_d_dec_wei[_layer_id * 6 + 2], _tw._hidden_size,
_tw._hidden_size,
_quant_range / _dec_clip_max[_layer_id * 19 + 2], _stream,
_cublas_lt_handle);
quantize_weight(_p_d_dec_wei[_weight_offset + 10],
_int8_p_d_dec_wei[_layer_id * 6 + 3], _tw._hidden_size,
_tw._hidden_size,
_quant_range / _dec_clip_max[_layer_id * 19 + 3], _stream,
_cublas_lt_handle, kColMajor);
quantize_weight(_p_d_dec_wei[_weight_offset + 14],
_int8_p_d_dec_wei[_layer_id * 6 + 4], _tw._hidden_size,
_tw._inner_size,
_quant_range / _dec_clip_max[_layer_id * 19 + 4], _stream,
_cublas_lt_handle);
quantize_weight(_p_d_dec_wei[_weight_offset + 16],
_int8_p_d_dec_wei[_layer_id * 6 + 5], _tw._inner_size,
_tw._hidden_size,
_quant_range / _dec_clip_max[_layer_id * 19 + 5], _stream,
_cublas_lt_handle, kColMajor);
if (_tw._use_gelu) {
_scaled_ffn2_colsum[_layer_id] = nullptr;
} else {
CHECK_GPU_ERROR(cudaMalloc(&_scaled_ffn2_colsum[_layer_id],
_tw._hidden_size * sizeof(_DataType)));
float relu_scale = _dec_clip_max[_layer_id * 19 + 11] / 2;
_DataType* temp;
int weight_size = _tw._inner_size * _tw._hidden_size;
CHECK_GPU_ERROR(cudaMalloc(&temp, weight_size * sizeof(_DataType)));
CHECK_GPU_ERROR(cudaMemcpyAsync(temp, _p_d_dec_wei[_weight_offset + 16],
weight_size * sizeof(_DataType),
cudaMemcpyHostToDevice, _stream));
launch_scaled_colsum(temp, _scaled_ffn2_colsum[_layer_id],
_tw._inner_size, _tw._hidden_size, relu_scale,
_stream);
CHECK_GPU_ERROR(cudaGetLastError());
CHECK_GPU_ERROR(cudaFree(temp));
}
}
CHECK_GPU_ERROR(cudaStreamSynchronize(_stream));
CHECK_GPU_ERROR(cudaGetLastError());
std::cout << "decoder buffer init succeed" << std::endl;
return;
}
/**
Some requirements needed by custom cuda kernel function
*/
template <OperationType OpType_>
std::string QuantDecoder<OpType_>::check() {
// if (_max_thread_per_block < _tw._hidden_size) {
// return "violate hidden_size <= max_thread_per_block";
// }
if (_tw._inner_size & 1) {
return "violate inner_size % 2 = 0";
}
if (_tw._dim_per_head & 1) {
return "violate dim_per_head % 2 = 0";
}
if (_tw._multilg_type == 0 && _p_d_trg_emb_wei.size() != 7) {
return "violate p_d_trg_emb_wei.size() = 7";
}
if (_tw._multilg_type != 0 && _p_d_trg_emb_wei.size() != 8) {
return "violate p_d_trg_emb_wei.size() = 8";
}
if (_p_d_dec_wei.size() != _tw._weight_per_dec_layer * _tw._n_dec_layer) {
return "violate p_d_dec_wei.size() = weight_per_dec_layer * n_dec_layer";
}
bool btmp = false;
for (int i = 1; i < 64; i *= 2) {
if (i == _tw._beam_size) {
btmp = true;
break;
}
}
if (!btmp) {
return "wrong beam_size, should be 1, 2, 4, 8, 16 or 32";
}
std::string sampling_method = _tw._sampling_method;
if (kSamplingMethods.find(sampling_method) == kSamplingMethods.end()) {
return std::string("unsupported sampling_method: ") + sampling_method;
}
if (sampling_method == "topk" || sampling_method == "topp") {
_output_topk = false;
}
if (sampling_method == "topk_greedy") {
_output_topk = true;
}
if (_tw._multilg_type != 0 && _p_d_lang_id == nullptr) {
return "lang id should not be null when multilg";
}
if (_tw._max_step > 1024) {
return "max_step should not greater than 1024";
}
return "";
}
/**
QuantDecoder inference
*/
template <OperationType OpType_>
void QuantDecoder<OpType_>::run_one_infer(int batch_size, int batch_seq_len) {
if (batch_size > _max_batch_size) {
throw std::runtime_error("batch size of input greater than max_batch_size");
}
if (batch_seq_len > _tw._max_step) {
throw std::runtime_error("seq len of input greater than max_step");
}
/* ---step1. init--- */
_batch_size = batch_size;
_batch_seq_len = batch_seq_len;
_batch_token_num = batch_size * batch_seq_len;
_step_token_num = batch_size * _tw._beam_size;
_batch_max_decode_length =
min(_tw._max_step, batch_seq_len + _tw._extra_decode_length) - 1;
_is_sampling =
(_tw._sampling_method == "topk" || _tw._sampling_method == "topp" ||
_tw._sampling_method == "topk_greedy");
if (_is_sampling) {
_batch_max_decode_length = _tw._max_step;
}
project_encoder_output(); // project encoder output
// init the first step's token id with target start_id
CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_alive_seq_probs,
_h_alive_seq_probs.data(),
sizeof(float) * _batch_size * _tw._beam_size,
cudaMemcpyHostToDevice, _stream));
/* ---step2. autoregressive decoding--- */
for (_cur_step = 0; _cur_step < _batch_max_decode_length - 1; _cur_step++) {
#ifdef DEBUG_RESULT
std::cout << "*** run step " << _cur_step << " ***" << std::endl;
#endif
if (run_step()) { // one step
break;
}
}
/* ---step3. output the decoding result--- */
if (_output_topk || _is_sampling) {
if (_cur_step == _batch_max_decode_length) {
_cur_step -= 1;
}
ker_write_topk_result<<<_batch_size * _tw._beam_size, _cur_step + 1, 0,
_stream>>>(
_p_d_alive_seq, _p_d_alive_seq_score, _p_d_result, _tw._trg_vocab_size,
_tw._max_step, _tw._beam_size, _tw._end_id);
return;
}
if (_tw._length_penalty >= 0.f || _cur_step == _batch_max_decode_length) {
ker_write_trg_tokenid_pos_penalty<<<_batch_size, _cur_step + 1, 0,
_stream>>>(
_p_d_alive_seq, _p_d_alive_seq_score, _p_d_result, _tw._max_step,
_tw._beam_size);
} else {
ker_write_trg_tokenid_neg_penalty<<<_batch_size, _cur_step + 1, 0,
_stream>>>(
_p_d_alive_seq, _p_d_alive_seq_score, _p_d_result, _tw._max_step,
_tw._beam_size, _tw._trg_vocab_size, _tw._end_id);
}
#ifdef DEBUG_RESULT
for (int i = 0; i < _batch_size; i++) {
print_vec(_p_d_result + i * (_cur_step + 1), "finial res", _cur_step + 1);
}
#endif
return;
}
/**
Project encoder output
*/
template <OperationType OpType_>
void QuantDecoder<OpType_>::project_encoder_output() {
int kv_dim = _tw._hidden_size * 2 * _tw._n_dec_layer;
#ifdef DEBUG_RESULT
CHECK_GPU_ERROR(cudaStreamSynchronize(_stream));
print_vec(_p_d_encoder_output, "_p_d_encoder_output(head):", 5);
print_vec(_p_d_encoder_output + _batch_token_num * _tw._hidden_size - 5,
"_p_d_encoder_output(tail)", 5);
print_vec(_p_device_emb[4], "encoder project(head):", 10);
#endif
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, kv_dim, _batch_token_num, _tw._hidden_size,
&_type_one, _p_device_emb[4], _AType, kv_dim, _p_d_encoder_output, _BType,
_tw._hidden_size, &_type_zero, _p_d_encoder_out_buf, _CType, kv_dim,
_computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// _p_d_encoder_out_buf: [batch_size, batch_seq_len, layer_num, 2,
// hidden_size]
#ifdef DEBUG_RESULT
CHECK_GPU_ERROR(cudaStreamSynchronize(_stream));
print_vec(_p_d_encoder_out_buf, "encoder out(head):", 5);
print_vec(_p_d_encoder_out_buf +
_batch_token_num * _tw._hidden_size * _tw._n_dec_layer - 5,
"encoder out(tail):", 5);
#endif
ker_arrange_encdec_kv_launcher<_DataType>(
_batch_token_num, _tw._n_dec_layer, _tw._hidden_size, _stream,
_p_d_encoder_out_buf, _p_device_emb[5], _p_d_encdec_k_bgeem[0],
_p_d_encdec_v_bgeem[0], _layer_size_encdec_k, _batch_seq_len,
_tw._dim_per_head, _tw._head_num, _max_thread_per_block);
return;
}
/**
Decode one step
*/
template <OperationType OpType_>
bool QuantDecoder<OpType_>::run_step() {
embedding();
decoder_stack();
/* --- Project hidden states to vocab logits--- */
// _step_token_num (beam_size * batch_size) must be 4x
cublasLtMM_withAlgo_i8IO(_int8_ffn_out_buf, 1, _step_token_num,
_tw._trg_vocab_size, _tw._hidden_size, 0, 0, 0,
_output_ln_clip_max * _trg_emb_clip_max /
(_logits_clip_max * _quant_range),
_int8_ffn_in_buf, _int8_p_d_trg_emb_wei,
_cublas_lt_handle, _stream, false);
#ifdef DEBUG_RESULT
for (int i = 0; i < _batch_size; i++) { // batch_id
for (int j = 0; j < _tw._beam_size; j++) { // beam_id
std::cout << "decoder output: batch-" << i << ", beam-" << j << std::endl;
print_vec(_int8_ffn_in_buf + i * _tw._beam_size * _tw._hidden_size +
j * _tw._hidden_size,
"hidden", 10);
print_vec(_int8_ffn_out_buf + i * _tw._beam_size * _tw._trg_vocab_size +
j * _tw._trg_vocab_size,
"logits", 10);
}
}
#endif
if (_tw._sampling_method == "topk") {
return sample();
} else if (_tw._sampling_method == "topp") {
return sample();
} else if (_tw._sampling_method == "topk_greedy") {
return topk_greedy_search();
} else if (_tw._sampling_method == "beam_search") {
return beam_search();
} else {
throw std::runtime_error("not supported sampling_method");
}
} // namespace cuda
/**
Decode embedding
*/
template <OperationType OpType_>
void QuantDecoder<OpType_>::embedding() {
// _p_d_trg_emb_wei: {token_emb, position_emb, norm_scale, norm_bias,
// enc_out_kernel_kv, enc_out_bias_kv, logit_bias}
launch_dec_emb_i8I<_DataType>(
_int8_p_d_trg_emb_bottom_wei, _p_device_emb[1], _p_d_alive_seq,
_p_device_emb[7], _p_d_lang_id, _p_d_cur_step_query, _batch_size,
_tw._beam_size, _tw._hidden_size, _tw._trg_vocab_size, _cur_step,
_tw._max_step, _tw._multilg_type, _stream,
_trg_emb_clip_max / _quant_range, true);
#ifdef DEBUG_RESULT
for (int i = 0; i < _batch_size; i++) { // batch_id
for (int j = 0; j < _tw._beam_size; j++) { // beam_id
std::cout << "decoder emb: batch-" << i << ", beam-" << j << std::endl;
print_vec(_p_d_cur_step_query + i * _tw._beam_size * _tw._hidden_size +
j * _tw._hidden_size,
"emb", 10);
}
}
#endif
return;
}
/**
QuantDecoder feedforward, composed by self_atten,
enc-dec-atten, ffn
*/
template <OperationType OpType_>
void QuantDecoder<OpType_>::decoder_stack() {
// _p_d_dec_wei = {self_norm_scale, self_norm_bias,
// self_qkv_kernel, self_qkv_bias, self_output_kernel, self_output_bias
// encdec_norm_scale, encdec_norm_bias,
// encdec_q_kernel, encdec_q_bias, encdec_output_kernel, encdec_output_bias
// ffn_norm_scale, ffn_norm_bias, ffn_first_kernel, ffn_first_bias,
// ffn_second_kernel, ffn_second_bias} * encoder_layer_num
for (_layer_id = 0; _layer_id < _tw._n_dec_layer; _layer_id++) {
_weight_offset = _layer_id * _tw._weight_per_dec_layer;
self_attention();
encdec_attention();
ffn_add_norm();
}
}
/**
QuantDecoder self attention
*/
template <OperationType OpType_>
void QuantDecoder<OpType_>::self_attention() {
if (_layer_id == 0) {
ker_norm_layer_resual_i8O_launcher<_DataType>(
_step_token_num, _tw._hidden_size, _stream, _p_d_cur_step_query,
_int8_ffn_in_buf, _p_device_wei[_weight_offset],
_p_device_wei[_weight_offset + 1], _p_device_wei[_weight_offset + 5],
_max_thread_per_block, _quant_range / _dec_clip_max[_layer_id * 19 + 6],
_tw._is_post_ln, true);
}
#ifdef DEBUG_RESULT
print_vec(_int8_ffn_in_buf, "self attn ln(head): ", 5);
print_vec(_int8_ffn_in_buf + _step_token_num * _tw._hidden_size - 5,
"self attn ln(tail): ", 5);
CHECK_GPU_ERROR(cudaGetLastError());
#endif
cublasLtMM_withAlgo_i8IO(
_int8_ffn_out_buf, 1, _step_token_num, _tw._hidden_size * 3,
_tw._hidden_size, 0, 0, 0,
_dec_clip_max[_layer_id * 19] * _dec_clip_max[_layer_id * 19 + 6] /
(_dec_clip_max[_layer_id * 19 + 12] * _quant_range),
_int8_ffn_in_buf, _int8_p_d_dec_wei[_layer_id * 6], _cublas_lt_handle,
_stream, false);
#ifdef DEBUG_RESULT
print_vec(_int8_ffn_out_buf, "self qkv(head): ", 5);
print_vec(_int8_ffn_out_buf + _step_token_num * _tw._hidden_size * 3 - 5,
"self qkv(tail): ", 5);
#endif
// get q, k, v by split and reshape qkv
ker_arrange_decself_qkv_i8I_i8O_launcher<_DataType>(
_step_token_num, _tw._hidden_size, _stream, _int8_ffn_out_buf,
_p_device_wei[_weight_offset + 3], _int8_ffn_in_buf,
_p_d_self_k_cache1[_layer_id], _p_d_self_v_cache1[_layer_id],
_tw._head_num, _tw._dim_per_head, _tw._max_step, _cur_step,
_max_thread_per_block, _dec_clip_max[_layer_id * 19 + 12] / _quant_range,
_quant_range / _dec_clip_max[_layer_id * 19 + 18], true);
#ifdef DEBUG_RESULT
print_vec(_int8_ffn_in_buf, "rearanged q(head): ", 5);
print_vec(_int8_ffn_in_buf + _step_token_num * _tw._hidden_size - 5,
"rearanged q(tail): ", 5);
print_vec(_p_d_self_k_cache1[_layer_id], "rearanged k(head): ", 5);
print_vec(_p_d_self_v_cache1[_layer_id], "rearanged v(head): ", 5);
CHECK_GPU_ERROR(cudaGetLastError());
#endif
CHECK_GPU_ERROR(cublasGemmStridedBatchedEx(
_hd, CUBLAS_OP_T, CUBLAS_OP_N, _cur_step + 1, 1, _tw._dim_per_head,
&_ione, _p_d_self_k_cache1[_layer_id], CUDA_R_8I, _tw._dim_per_head,
_tw._max_step * _tw._dim_per_head, _int8_ffn_in_buf, CUDA_R_8I,
_tw._dim_per_head, _tw._dim_per_head, &_izero, _int32_ffn_out_buf,
CUDA_R_32I, _cur_step + 1, _tw._max_step, _step_token_num * _tw._head_num,
CUDA_R_32I, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#ifdef DEBUG_RESULT
print_vec(_int32_ffn_out_buf, "self attn q*k logits(head): ", _cur_step + 1);
print_vec(_int32_ffn_out_buf +
(_step_token_num * _tw._head_num - 1) * _tw._max_step,
"self attn q*k logits(tail): ", _cur_step + 1);
CHECK_GPU_ERROR(cudaGetLastError());
#endif
ker_fuse_softmax_new_value_i32I_i8O_launcher(
_int32_ffn_out_buf, _p_d_self_v_cache1[_layer_id], _int8_ffn_in_buf,
_step_token_num * _tw._head_num, _cur_step + 1, _tw._max_step,
_tw._head_num, _tw._dim_per_head, float(_atten_scaler),
_dec_clip_max[_layer_id * 19 + 18] / _quant_range,
_quant_range / _dec_clip_max[_layer_id * 19 + 7], false, _stream);
#ifdef DEBUG_RESULT
print_vec(_int8_ffn_in_buf, "self attn ffn in(head): ", 40);
print_vec(_int8_ffn_in_buf + _step_token_num * _tw._hidden_size - 40,
"self attn ffn in(tail): ", 40);
CHECK_GPU_ERROR(cudaGetLastError());
#endif
cublaslt_gemm(
_int8_p_d_dec_wei[_layer_id * 6 + 1], _int8_ffn_in_buf, _int8_ffn_out_buf,
1, _tw._hidden_size, _step_token_num, _tw._hidden_size, 0, 0, 0,
_dec_clip_max[_layer_id * 19 + 1] * _dec_clip_max[_layer_id * 19 + 7] /
(_dec_clip_max[_layer_id * 19 + 13] * _quant_range),
_cublas_lt_handle, _stream);
#ifdef DEBUG_RESULT
print_vec(_int8_ffn_out_buf, "self attn ffn out w/o bias(head): ", 40);
print_vec(_int8_ffn_out_buf + _step_token_num * _tw._hidden_size - 40,
"self attn ffn out w/o bias(tail): ", 40);
#endif
ker_residual_bias_ln_i8I_i8O_launcher<_DataType>(
_int8_ffn_out_buf, _p_device_wei[_weight_offset + 6],
_p_device_wei[_weight_offset + 7], _p_device_wei[_weight_offset + 11],
_int8_ffn_in_buf, _p_d_cur_step_query, _step_token_num, _tw._hidden_size,
_dec_clip_max[_layer_id * 19 + 13] / _quant_range,
_quant_range / _dec_clip_max[_layer_id * 19 + 8], _max_thread_per_block,
_stream, _tw._is_post_ln, false, true);
}
/**
Encode-Decoder attention
*/
template <OperationType OpType_>
void QuantDecoder<OpType_>::encdec_attention() {
#ifdef DEBUG_RESULT
print_vec(_int8_ffn_in_buf, "encdec attn ln(head): ", 5);
print_vec(_int8_ffn_in_buf + _step_token_num * _tw._hidden_size - 5,
"encdec attn ln(tail): ", 5);
CHECK_GPU_ERROR(cudaGetLastError());
#endif
cublasLtMM_withAlgo_i8IO(
_int8_ffn_out_buf, 1, _step_token_num, _tw._hidden_size, _tw._hidden_size,
0, 0, 0,
_dec_clip_max[_layer_id * 19 + 2] * _dec_clip_max[_layer_id * 19 + 8] /
(_dec_clip_max[_layer_id * 19 + 14] * _quant_range),
_int8_ffn_in_buf, _int8_p_d_dec_wei[_layer_id * 6 + 2], _cublas_lt_handle,
_stream, false);
#ifdef DEBUG_RESULT
print_vec(_int8_ffn_out_buf, "encdec q(head): ", 5);
print_vec(_int8_ffn_out_buf + _step_token_num * _tw._hidden_size - 5,
"encdec q(tail): ", 5);
#endif
ker_arrange_encdec_q_i8I_launcher<_DataType>(
_step_token_num, _tw._hidden_size, _stream, _int8_ffn_out_buf,
_p_device_wei[_weight_offset + 9], _p_d_query_buf1, _tw._beam_size,
_tw._dim_per_head, _tw._head_num, _max_thread_per_block,
_dec_clip_max[_layer_id * 19 + 14] / _quant_range, true);
#ifdef DEBUG_RESULT
print_vec(_p_d_query_buf1, "rearanged q(head): ", 5);
print_vec(_p_d_query_buf1 + _step_token_num * _tw._hidden_size - 5,
"rearanged q(tail): ", 5);
CHECK_GPU_ERROR(cudaGetLastError());
#endif
/* ---step 2. correlation = q * k, perform softmax on correlation--- */
CHECK_GPU_ERROR(cublasGemmStridedBatchedEx(
_hd, CUBLAS_OP_T, CUBLAS_OP_N, _batch_seq_len, _tw._beam_size,
_tw._dim_per_head, &_atten_scaler, _p_d_encdec_k_bgeem[_layer_id], _AType,
_tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_query_buf1,
_BType, _tw._dim_per_head, _tw._beam_size * _tw._dim_per_head,
&_type_zero, _p_d_c, _CType, _batch_seq_len,
_tw._beam_size * _batch_seq_len, _batch_size * _tw._head_num,
_computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
ker_correlation_softmax_encdec_launcher<_DataType>(
_batch_size, _tw._head_num * _tw._beam_size, _batch_seq_len, _stream,
_p_d_c, _p_d_padding_mask);
/* ---step 3. new_q = correlation * v--- */
CHECK_GPU_ERROR(cublasGemmStridedBatchedEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._dim_per_head, _tw._beam_size,
_batch_seq_len, &_type_one, _p_d_encdec_v_bgeem[_layer_id], _AType,
_tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_c, _BType,
_batch_seq_len, _tw._beam_size * _batch_seq_len, &_type_zero,
_p_d_query_buf1, _CType, _tw._dim_per_head,
_tw._beam_size * _tw._dim_per_head, _batch_size * _tw._head_num,
_computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#ifdef DEBUG_RESULT
print_vec(_p_d_encdec_v_bgeem[_layer_id], "encdec attn value(head): ", 5);
print_vec(_p_d_c, "encdec attn correlation(head): ", 5);
print_vec(_p_d_query_buf1, "encdec attn new value(head): ", 5);
#endif
ker_arrange_atten_output_i8O_launcher<_DataType>(
_step_token_num, _tw._hidden_size, _stream, _p_d_query_buf1,
_int8_ffn_in_buf, _tw._beam_size, _tw._dim_per_head, _tw._head_num,
_max_thread_per_block, _quant_range / _dec_clip_max[_layer_id * 19 + 9],
false);
#ifdef DEBUG_RESULT
print_vec(_int8_ffn_in_buf, "encdec attn ffn in(head): ", 3);
print_vec(_int8_ffn_in_buf + _step_token_num * _tw._hidden_size - 3,
"encdec attn ffn in(tail): ", 3);
CHECK_GPU_ERROR(cudaGetLastError());
#endif
cublaslt_gemm(
_int8_p_d_dec_wei[_layer_id * 6 + 3], _int8_ffn_in_buf, _int8_ffn_out_buf,
1, _tw._hidden_size, _step_token_num, _tw._hidden_size, 0, 0, 0,
_dec_clip_max[_layer_id * 19 + 3] * _dec_clip_max[_layer_id * 19 + 9] /
(_dec_clip_max[_layer_id * 19 + 15] * _quant_range),
_cublas_lt_handle, _stream);
#ifdef DEBUG_RESULT
print_vec(_int8_ffn_out_buf, "encdec attn ffn out w/o bias(head): ", 3);
print_vec(_int8_ffn_out_buf + _step_token_num * _tw._hidden_size - 3,
"encdec attn ffn out w/o bias(tail): ", 3);
#endif
ker_residual_bias_ln_i8I_i8O_launcher<_DataType>(
_int8_ffn_out_buf, _p_device_wei[_weight_offset + 12],
_p_device_wei[_weight_offset + 13], _p_device_wei[_weight_offset + 17],
_int8_ffn_in_buf, _p_d_cur_step_query, _step_token_num, _tw._hidden_size,
_dec_clip_max[_layer_id * 19 + 15] / _quant_range,
_quant_range / _dec_clip_max[_layer_id * 19 + 10], _max_thread_per_block,
_stream, _tw._is_post_ln, false, true);
#ifdef DEBUG_RESULT
CHECK_GPU_ERROR(cudaGetLastError());
print_vec(_p_d_cur_step_query, "encdec attn ffn out(head): ", 3);
print_vec(_p_d_cur_step_query + _step_token_num * _tw._hidden_size - 3,
"encdec attn ffn out(tail): ", 3);
#endif
return;
}
template <OperationType OpType_>
void QuantDecoder<OpType_>::ffn_add_norm() {
#ifdef DEBUG_RESULT
print_vec(_int8_ffn_in_buf, "ffn ln(head): ", 5);
print_vec(_int8_ffn_in_buf + _step_token_num * _tw._hidden_size - 5,
"ffn ln(tail): ", 5);
#endif
cublasLtMM_withAlgo_i8IO(
_int8_ffn_out_buf, 1, _step_token_num, _tw._inner_size, _tw._hidden_size,
0, 0, 0,
_dec_clip_max[_layer_id * 19 + 4] * _dec_clip_max[_layer_id * 19 + 10] /
(_dec_clip_max[_layer_id * 19 + 16] * _quant_range),
_int8_ffn_in_buf, _int8_p_d_dec_wei[_layer_id * 6 + 4], _cublas_lt_handle,
_stream, false);
if (_tw._use_gelu) {
ker_bias_gelu_i8I_i8O_launcher<_DataType>(
_step_token_num, _stream, _int8_ffn_out_buf, _int8_ffn_in_buf,
_p_device_wei[_weight_offset + 15], _tw._inner_size,
_dec_clip_max[_layer_id * 19 + 16] / _quant_range,
_quant_range / _dec_clip_max[_layer_id * 19 + 11], true, false);
} else {
ker_bias_relu_i8I_i8O_launcher<_DataType>(
_step_token_num, _stream, _int8_ffn_out_buf, _int8_ffn_in_buf,
_p_device_wei[_weight_offset + 15], _tw._inner_size,
_dec_clip_max[_layer_id * 19 + 16] / _quant_range,
_quant_range / _dec_clip_max[_layer_id * 19 + 11],
_dec_clip_max[_layer_id * 19 + 11], true, false, true);
}
#ifdef DEBUG_RESULT
print_vec(_int8_ffn_in_buf, "ffn act out(head): ", 5);
print_vec(_int8_ffn_in_buf + _step_token_num * _tw._inner_size - 5,
"ffn act out(tail): ", 5);
CHECK_GPU_ERROR(cudaGetLastError());
#endif
cublaslt_gemm(_int8_p_d_dec_wei[_layer_id * 6 + 5], _int8_ffn_in_buf,
_int32_ffn_out_buf, 1, _tw._hidden_size, _step_token_num,
_tw._inner_size, 0, 0, 0, 1, _cublas_lt_handle, _stream);
const _DataType *scale_ptr, *bias_ptr, *res_bias_ptr;
float clip_max, dequant_scale;
if (_tw._use_gelu) {
dequant_scale = _dec_clip_max[_layer_id * 19 + 5] *
_dec_clip_max[_layer_id * 19 + 11] /
(_quant_range * _quant_range);
} else {
dequant_scale = _dec_clip_max[_layer_id * 19 + 5] *
_dec_clip_max[_layer_id * 19 + 11] /
(2 * _quant_range * _quant_range);
}
if (_layer_id == _tw._n_dec_layer - 1) {
scale_ptr = _p_device_emb[2];
bias_ptr = _p_device_emb[3];
res_bias_ptr = nullptr;
clip_max = _output_ln_clip_max;
} else {
scale_ptr = _p_device_wei[(_layer_id + 1) * _tw._weight_per_dec_layer];
bias_ptr = _p_device_wei[(_layer_id + 1) * _tw._weight_per_dec_layer + 1];
res_bias_ptr =
_p_device_wei[(_layer_id + 1) * _tw._weight_per_dec_layer + 5];
clip_max = _dec_clip_max[(_layer_id + 1) * 19 + 6];
}
ker_residual_bias_ln_i32I_i8O_launcher<_DataType>(
_int32_ffn_out_buf, scale_ptr, bias_ptr, res_bias_ptr, _int8_ffn_in_buf,
_p_d_cur_step_query, _step_token_num, _tw._hidden_size, dequant_scale,
_quant_range / clip_max, _max_thread_per_block, _stream, _tw._is_post_ln,
false, true, _scaled_ffn2_colsum[_layer_id]);
#ifdef DEBUG_RESULT
print_vec(_p_d_cur_step_query, "ffn ln(head): ", 5);
print_vec(_p_d_cur_step_query + _step_token_num * _tw._hidden_size - 5,
"ffn ln(tail): ", 5);
CHECK_GPU_ERROR(cudaGetLastError());
#endif
return;
}
template <OperationType OpType_>
bool QuantDecoder<OpType_>::sample() {
CHECK_GPU_ERROR(
cudaMemsetAsync(_p_d_sample_unfinished, 0, sizeof(int), _stream));
/* --- Sample new tokens from logits --- */
if (_tw._sampling_method == "topk") {
ker_topk_sample_i8I_launcher<_DataType>(
_batch_size, (_cur_step + 1), _tw._max_step, 1, _max_thread_per_block,
_stream, _int8_ffn_out_buf, _p_device_emb[6], _p_d_alive_seq,
_p_d_alive_seq_buf, _tw._trg_vocab_size, _tw._topk,
_p_d_sample_unfinished, _p_d_curandstate, _tw._end_id,
_logits_clip_max / _quant_range, true);
} else {
ker_topp_sample_i8I_launcher<_DataType>(
_batch_size, (_cur_step + 1), _tw._max_step, 1, _max_thread_per_block,
_stream, _int8_ffn_out_buf, _p_device_emb[6], _p_d_alive_seq,
_p_d_alive_seq_buf, _tw._trg_vocab_size, _tw._topp,
_p_d_sample_unfinished, _p_d_curandstate, _tw._end_id,
_logits_clip_max / _quant_range, true);
}
#ifdef DEBUG_RESULT
print_vec(_p_d_sample_unfinished, "unfinished flag", 1);
for (int ii = 0; ii < _batch_size; ii++) {
print_vec(_p_d_alive_seq + ii * _tw._max_step,
"Batch token ids: ", _cur_step + 2);
}
#endif
CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_unfinished, _p_d_sample_unfinished,
sizeof(int), cudaMemcpyDeviceToHost,
_stream));
CHECK_GPU_ERROR(cudaStreamSynchronize(_stream));
return _h_unfinished == 1 ? false : true;
}
template <OperationType OpType_>
bool QuantDecoder<OpType_>::beam_search() {
/*
step 1. logits bias and softmax,
select rough topk candidate for every batch item,
record the candidate's beam_id, vocab_id and probability
*/
update_new_seq_probs();
/* ---step 2. sort the candidate with their probability--- */
CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_can_num_batch, _p_d_can_num, sizeof(int),
cudaMemcpyDeviceToHost, _stream));
CHECK_GPU_ERROR(cudaStreamSynchronize(_stream));
if (_tw._diverse_lambda != 0) {
thrust::sort_by_key(thrust::cuda::par.on(_stream), _p_d_can_score,
_p_d_can_score + _h_can_num_batch, _p_d_can_idx,
thrust::greater<float>());
ker_diverse_beam_search_launcher(_p_d_can_score, _p_d_can_idx, _p_d_can_num,
_step_token_num, _max_thread_per_block,
_stream, _tw._beam_size,
_tw._diverse_lambda, _tw._trg_vocab_size);
}
thrust::sort_by_key(thrust::cuda::par.on(_stream), _p_d_can_score,
_p_d_can_score + _h_can_num_batch, _p_d_can_idx,
thrust::greater<float>());
#ifdef DEBUG_RESULT
print_vec(_p_d_can_score, "can score", _h_can_num_batch);
print_vec(_p_d_can_idx, "can idx", _h_can_num_batch);
#endif
/*
step 3. refresh alive_seq, seq_probs, seq_score, num_finish_beam
based on sorted candidate.
Deciding whether early stop based on num_finish_beam
*/
CHECK_GPU_ERROR(cudaMemsetAsync(_p_d_can_num, 0, sizeof(int), _stream));
ker_refresh_result<<<dim3(_batch_size, _tw._beam_size), _tw._max_step, 0,
_stream>>>(
_p_d_can_idx, _p_d_can_score, _p_d_can_num + 1, _p_d_alive_seq,
_p_d_alive_seq_buf, _p_d_alive_seq_probs, _p_d_alive_seq_score,
_p_d_can_num, _tw._trg_vocab_size, _cur_step, _h_length_norm[_cur_step],
_tw._diverse_lambda, _tw._end_id);
int* tmp = _p_d_alive_seq_buf;
_p_d_alive_seq_buf = _p_d_alive_seq;
_p_d_alive_seq = tmp;
CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_can_num_batch, _p_d_can_num, sizeof(int),
cudaMemcpyDeviceToHost, _stream));
CHECK_GPU_ERROR(cudaStreamSynchronize(_stream));
#ifdef DEBUG_RESULT
for (int ii = 0; ii < _batch_size; ii++) {
for (int jj = 0; jj < _tw._beam_size; jj++) {
print_vec(_p_d_alive_seq + (ii * _tw._beam_size + jj) * _tw._max_step,
"Batch token ids: ", _cur_step + 2);
print_vec(_p_d_alive_seq_probs + ii * _tw._beam_size + jj,
"Batch probs: ", 1);
print_vec(_p_d_alive_seq_score + ii * _tw._beam_size + jj,
"Batch scores: ", 1);
}
}
#endif
if (_h_can_num_batch == _step_token_num) {
#ifdef DEBUG_RESULT
std::cout << "early stop beam search!" << std::endl;
#endif
return true;
}
/* ---step 4. refresh cache: k, v for decoder self attention--- */
if (_cur_step > 0) {
ker_refresh_cache_launcher<int8_t>(
_tw._n_dec_layer, _step_token_num * 2, _max_thread_per_block, _stream,
_p_d_can_num + 1, _p_d_can_idx, _p_d_self_k_cache1[0],
_p_d_self_v_cache1[0], _p_d_self_k_cache2[0], _p_d_self_v_cache2[0],
_layer_size_self_k, _tw._beam_size, _tw._dim_per_head, _tw._head_num,
_tw._trg_vocab_size, _cur_step, _tw._max_step, _tw._diverse_lambda != 0,
_tw._end_id);
int8_t** ftmp = _p_d_self_k_cache2;
_p_d_self_k_cache2 = _p_d_self_k_cache1;
_p_d_self_k_cache1 = ftmp;
ftmp = _p_d_self_v_cache2;
_p_d_self_v_cache2 = _p_d_self_v_cache1;
_p_d_self_v_cache1 = ftmp;
}
return false;
}
/**
Logits bias and softmax.
Select rough topk candidate for every batch item.
Record the candidate's beam_id, vocab_id and probability
*/
template <OperationType OpType_>
void QuantDecoder<OpType_>::update_new_seq_probs() {
CHECK_GPU_ERROR(cudaMemsetAsync(_p_d_can_num, 0, sizeof(int), _stream));
select_beam_rough_topk_i8I_launcher(
_int8_ffn_out_buf, _p_device_emb[6], _p_d_alive_seq_probs,
_p_d_alive_seq_score, _p_d_alive_seq, _logits_clip_max / _quant_range,
_p_d_can_idx, _p_d_can_score, _p_d_can_num, _tw._trg_vocab_size,
_tw._max_step, _h_length_norm[_cur_step], _cur_step, _step_token_num,
_max_thread_per_block, _stream, _tw._beam_size, _tw._diverse_lambda,
_tw._end_id, true);
thrust::exclusive_scan(thrust::cuda::par.on(_stream), _p_d_can_num + 1,
_p_d_can_num + 1 + _step_token_num, _p_d_can_num + 1);
return;
}
template <OperationType OpType_>
bool QuantDecoder<OpType_>::topk_greedy_search() {
_tw._diverse_lambda = 0;
if (_cur_step == 0) {
return beam_search();
}
CHECK_GPU_ERROR(
cudaMemsetAsync(_p_d_sample_unfinished, 0, sizeof(int), _stream));
/* --- Sample new tokens from logits --- */
ker_topk_sample_i8I_launcher<_DataType>(
_step_token_num, (_cur_step + 1), _tw._max_step, 1, _max_thread_per_block,
_stream, _int8_ffn_out_buf, _p_device_emb[6], _p_d_alive_seq,
_p_d_alive_seq_buf, _tw._trg_vocab_size, 1, _p_d_sample_unfinished,
_p_d_curandstate, _tw._end_id, _logits_clip_max / _quant_range, true);
#ifdef DEBUG_RESULT
print_vec(_p_d_sample_unfinished, "unfinished flag", 1);
for (int ii = 0; ii < _batch_size; ii++) {
for (int jj = 0; jj < _tw._beam_size; jj++) {
print_vec(_p_d_alive_seq + (ii * _tw._beam_size + jj) * _tw._max_step,
"Batch token ids: ", _cur_step + 2);
}
}
#endif
CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_unfinished, _p_d_sample_unfinished,
sizeof(int), cudaMemcpyDeviceToHost,
_stream));
CHECK_GPU_ERROR(cudaStreamSynchronize(_stream));
return _h_unfinished == 1 ? false : true;
}
template class QuantDecoder<OperationType::FP16>;
template class QuantDecoder<OperationType::FP32>;
} // namespace cuda
} // namespace lightseq | the_stack |
#pragma once
#include <gunrock/app/problem_base.cuh>
#include <gunrock/oprtr/1D_oprtr/for_all.cuh>
// MF includes
#include <gunrock/app/mf/mf_enactor.cuh>
#include <gunrock/app/mf/mf_test.cuh>
#define debug_aml(a...)
//#define debug_aml(a...) {printf("%s:%d ", __FILE__, __LINE__); printf(a);\
printf("\n");}
namespace gunrock {
namespace app {
namespace gtf {
/**
* @brief Speciflying parameters for GTF Problem
* @param parameters The util::Parameter<...> structure holding all
* parameter info
* \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_problem(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(gunrock::app::UseParameters_problem(parameters));
// TODO: Add problem specific command-line parameter usages here, e.g.:
GUARD_CU(parameters.Use<bool>(
"mark-pred",
util::OPTIONAL_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
false, "Whether to mark predecessor info.", __FILE__, __LINE__));
return retval;
}
/**
* @brief Max Flow Problem structure stores device-side arrays
* @tparam _GraphT Type of the graph
* @tparam _ValueT Type of signed integer to use as capacity and flow
of edges and as excess and height values of vertices.
* @tparam _FLAG Problem flags
*/
template <typename _GraphT, typename _ValueT = typename _GraphT::ValueT,
ProblemFlag _FLAG = Problem_None>
struct Problem : ProblemBase<_GraphT, _FLAG> {
typedef _GraphT GraphT;
static const ProblemFlag FLAG = _FLAG;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef typename GraphT::GpT GpT;
typedef _ValueT ValueT;
typedef ProblemBase<GraphT, FLAG> BaseProblem;
typedef DataSliceBase<GraphT, FLAG> BaseDataSlice;
// Helper structures
/**
* @brief Data structure containing GTF-specific data on indivual GPU.
*/
struct DataSlice : BaseDataSlice {
// GTF-specific storage arrays:
int num_nodes;
int num_org_nodes;
int num_edges;
double error_threshold; // = parameters.Get<double>("error_threshold");
util::Array1D<SizeT, VertexT>
next_communities; //= new VertexT[num_nodes]; // nextlabel
util::Array1D<SizeT, VertexT>
curr_communities; //= new VertexT[num_nodes]; // label
util::Array1D<SizeT, VertexT>
community_sizes; //= new VertexT[num_nodes]; // nums
util::Array1D<SizeT, ValueT>
community_weights; //= new ValueT [num_nodes]; // averages
util::Array1D<SizeT, bool>
community_active; //= new bool [num_nodes]; // !inactivelable
util::Array1D<SizeT, ValueT>
community_accus; // = new ValueT [num_nodes]; // values
util::Array1D<SizeT, bool>
vertex_active; // = new bool [num_nodes]; // alive
util::Array1D<SizeT, bool> vertex_reachabilities; // = new bool[num_nodes];
util::Array1D<SizeT, ValueT>
edge_residuals; // = new ValueT [num_edges]; // graph
util::Array1D<SizeT, ValueT>
edge_flows; // = new ValueT [num_edges]; // edge flows
util::Array1D<SizeT, SizeT> active; // flag active vertices
util::Array1D<SizeT, VertexT> num_comms;
util::Array1D<SizeT, VertexT> previous_num_comms; // flag active vertices
// util::Array1D<SizeT, VertexT> num_comms; // flag active vertices
util::Array1D<SizeT, SizeT> reverse; // for storing mf h_reverse
util::Array1D<SizeT, ValueT> Y; // for storing mf h_reverse
SizeT num_updated_vertices;
VertexT source; // source vertex
VertexT sink; // sink vertex
/*
* @brief Default constructor
*/
DataSlice() : BaseDataSlice() {
source = util::PreDefinedValues<VertexT>::InvalidValue;
sink = util::PreDefinedValues<VertexT>::InvalidValue;
num_nodes = util::PreDefinedValues<VertexT>::InvalidValue;
num_org_nodes = util::PreDefinedValues<VertexT>::InvalidValue;
num_edges = util::PreDefinedValues<VertexT>::InvalidValue;
num_updated_vertices = 1;
next_communities.SetName("next_communities");
curr_communities.SetName("curr_communities");
community_sizes.SetName("community_sizes");
community_weights.SetName("community_weights");
community_active.SetName("community_active");
community_accus.SetName("community_accus");
vertex_active.SetName("vertex_active");
vertex_reachabilities.SetName("vertex_reachabilities");
edge_residuals.SetName("edge_residuals");
edge_flows.SetName("edge_flows");
active.SetName("active");
num_comms.SetName("num_comms");
previous_num_comms.SetName("previous_num_comms");
reverse.SetName("reverse");
Y.SetName("Y");
}
/*
* @brief Default destructor
*/
virtual ~DataSlice() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx));
GUARD_CU(next_communities.Release(target));
GUARD_CU(curr_communities.Release(target));
GUARD_CU(community_sizes.Release(target));
GUARD_CU(community_weights.Release(target));
GUARD_CU(community_active.Release(target));
GUARD_CU(community_accus.Release(target));
GUARD_CU(vertex_active.Release(target));
GUARD_CU(vertex_reachabilities.Release(target));
GUARD_CU(edge_residuals.Release(target));
GUARD_CU(edge_flows.Release(target));
GUARD_CU(BaseDataSlice::Release(target));
GUARD_CU(active.Release(target));
GUARD_CU(num_comms.Release(target));
GUARD_CU(previous_num_comms.Release(target));
GUARD_CU(reverse.Release(target));
GUARD_CU(Y.Release(target));
return retval;
}
/**
* @brief initializing GTF-specific Data Slice a on each gpu
* @param sub_graph Sub graph on the GPU.
* @param[in] gpu_idx GPU device index
* @param[in] target Targeting device location
* @param[in] flag Problem flag containling options
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &sub_graph, int num_gpus = 1, int gpu_idx = 0,
util::Location target = util::DEVICE,
ProblemFlag flag = Problem_None) {
debug_aml("DataSlice Init");
cudaError_t retval = cudaSuccess;
SizeT nodes_size = sub_graph.nodes;
SizeT edges_size = sub_graph.edges;
GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag));
// Allocate data on Gpu
GUARD_CU(next_communities.Allocate(nodes_size, target));
GUARD_CU(curr_communities.Allocate(nodes_size, target));
GUARD_CU(community_sizes.Allocate(nodes_size, target));
GUARD_CU(community_weights.Allocate(nodes_size, target));
GUARD_CU(community_active.Allocate(nodes_size, target));
GUARD_CU(community_accus.Allocate(nodes_size, target));
GUARD_CU(vertex_active.Allocate(nodes_size, target));
GUARD_CU(vertex_reachabilities.Allocate(nodes_size, target));
GUARD_CU(edge_residuals.Allocate(edges_size, target));
GUARD_CU(edge_flows.Allocate(edges_size, target));
GUARD_CU(active.Allocate(1, util::HOST | target));
GUARD_CU(num_comms.Allocate(1, target));
GUARD_CU(previous_num_comms.Allocate(1, target));
GUARD_CU(reverse.Allocate(edges_size, util::HOST));
GUARD_CU(Y.Allocate(nodes_size, target));
GUARD_CU(util::SetDevice(gpu_idx));
GUARD_CU(sub_graph.Move(util::HOST, target, this->stream));
return retval;
} // Init Data Slice
/**
* @brief Reset DataSlice function. Must be called prior to each run.
* @param[in] target Targeting device location
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(const GraphT &graph, ValueT *h_community_accus,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::CsrT CsrT;
debug_aml("DataSlice Reset");
SizeT nodes_size = graph.nodes;
SizeT edges_size = graph.edges;
// Ensure data are allocated
GUARD_CU(next_communities.EnsureSize_(nodes_size, target));
GUARD_CU(curr_communities.EnsureSize_(nodes_size, target));
GUARD_CU(community_sizes.EnsureSize_(nodes_size, target));
GUARD_CU(community_weights.EnsureSize_(nodes_size, target));
GUARD_CU(community_active.EnsureSize_(nodes_size, target));
GUARD_CU(community_accus.EnsureSize_(nodes_size, target));
GUARD_CU(vertex_active.EnsureSize_(nodes_size, target));
GUARD_CU(vertex_reachabilities.EnsureSize_(nodes_size, target));
GUARD_CU(edge_residuals.EnsureSize_(edges_size, target));
GUARD_CU(edge_flows.EnsureSize_(edges_size, target));
GUARD_CU(active.EnsureSize_(1, target | util::HOST));
GUARD_CU(num_comms.EnsureSize_(1, target));
GUARD_CU(previous_num_comms.EnsureSize_(1, target));
GUARD_CU(reverse.EnsureSize_(edges_size, util::HOST));
GUARD_CU(util::SetDevice(this->gpu_idx));
///////////////////////////////
num_org_nodes = graph.nodes - 2;
SizeT offset = graph.edges - num_org_nodes * 2;
printf("offset is %d num edges %d \n", offset, edges_size);
// bool* h_vertex_active = (bool*)malloc(sizeof(bool)*graph.edges);
// bool* h_community_active = (bool*)malloc(sizeof(bool)*graph.nodes);
// VertexT* h_curr_communities =
// (VertexT*)malloc(sizeof(VertexT)*graph.nodes); VertexT*
// h_next_communities = (VertexT*)malloc(sizeof(VertexT)*graph.nodes); for
// (VertexT v = 0; v < num_org_nodes; v++)
// {
// h_vertex_active [v] = true;
// h_community_active[v] = true;
// h_curr_communities[v] = 0;
// h_next_communities[v] = 0; //extra
// }
GUARD_CU(vertex_active.ForAll(
[] __host__ __device__(bool *v_active, const SizeT &pos) {
v_active[pos] = true;
},
graph.nodes, target, this->stream));
GUARD_CU(community_active.ForAll(
[] __host__ __device__(bool *c_active, const SizeT &pos) {
c_active[pos] = true;
},
graph.nodes, target, this->stream));
GUARD_CU(curr_communities.ForAll(
[] __host__ __device__(VertexT * c_communities, const SizeT &pos) {
c_communities[pos] = 0;
},
graph.nodes, target, this->stream));
GUARD_CU(next_communities.ForAll(
[] __host__ __device__(VertexT * n_communities, const SizeT &pos) {
n_communities[pos] = 0;
},
graph.nodes, target, this->stream));
GUARD_CU(vertex_reachabilities.ForAll(
[] __host__ __device__(bool *vertex_reachabilities,
const SizeT &pos) {
vertex_reachabilities[pos] = 0;
},
graph.nodes, target, this->stream));
// GUARD_CU(community_accus.ForAll([h_community_accus]
// __host__ __device__(ValueT *community_accus, const SizeT &pos)
// {
// community_accus[0] = h_community_accus[0];
// }, 1, target, this -> stream));
// GUARD_CU(vertex_active.SetPointer(h_vertex_active, num_org_nodes,
// util::HOST)); GUARD_CU(vertex_active.Move(util::HOST, target,
// num_org_nodes, 0, this->stream));
// GUARD_CU(community_active.SetPointer(h_community_active, num_org_nodes,
// util::HOST)); GUARD_CU(community_active.Move(util::HOST, target,
// num_org_nodes, 0, this->stream));
// GUARD_CU(curr_communities.SetPointer(h_curr_communities, num_org_nodes,
// util::HOST)); GUARD_CU(curr_communities.Move(util::HOST, target,
// num_org_nodes, 0, this->stream));
// GUARD_CU(next_communities.SetPointer(h_next_communities, num_org_nodes,
// util::HOST)); GUARD_CU(next_communities.Move(util::HOST, target,
// num_org_nodes, 0, this->stream));
//
printf("h_community_accus is %f \n", h_community_accus[0]);
GUARD_CU(community_accus.SetPointer(h_community_accus, graph.nodes,
util::HOST));
GUARD_CU(community_accus.Move(util::HOST, target, graph.nodes, 0,
this->stream));
this->num_updated_vertices = 1;
GUARD_CU(active.ForAll(
[] __host__ __device__(SizeT * active_, const VertexT &pos) {
active_[pos] = 1;
},
1, target, this->stream));
GUARD_CU(num_comms.ForAll(
[] __host__ __device__(SizeT * num_comm, const VertexT &pos) {
num_comm[pos] = 1;
},
1, target, this->stream));
//////////////////////////////
// GUARD_CU(reverse.SetPointer(h_reverse, edges_size, util::HOST));
// GUARD_CU(reverse.Move(util::HOST, target, edges_size, 0,
// this->stream));
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed.");
// free(h_vertex_active);
// free(h_community_active);
// free(h_curr_communities);
// free(h_next_communities);
return retval;
}
}; // DataSlice
// Members
// Set of data slices (one for each GPU)
util::Array1D<SizeT, DataSlice> *data_slices;
typedef mf::Problem<GraphT, ValueT, FLAG> MfProblemT;
MfProblemT mf_problem;
// Methods
/**
* @brief GTFProblem default constructor
*/
Problem(util::Parameters &_parameters, ProblemFlag _flag = Problem_None)
: BaseProblem(_parameters, _flag),
mf_problem(_parameters, _flag),
data_slices(NULL) {}
/**
* @brief GTFProblem default destructor
*/
virtual ~Problem() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (data_slices == NULL) return retval;
for (int i = 0; i < this->num_gpus; i++) {
GUARD_CU(data_slices[i].Release(target));
GUARD_CU(mf_problem.data_slices[i].Release(target));
}
if ((target & util::HOST) != 0 &&
data_slices[0].GetPointer(util::DEVICE) == NULL) {
delete[] data_slices;
data_slices = NULL;
}
GUARD_CU(mf_problem.Release(target));
GUARD_CU(BaseProblem::Release(target));
return retval;
}
/**
* \addtogroup PublicInterface
* @{
*/
/**
* @brief Copy result flow computed on GPUs back to host-side arrays.
* @param[out] h_flow Host array to store computed flow on edges
* \return cudaError_t Error message(s), if any
*/
cudaError_t Extract(ValueT *h_Y, ValueT *edge_values,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
auto &data_slice = data_slices[0][0];
SizeT vN = this->org_graph->nodes;
SizeT vE = this->org_graph->edges;
// Set device
if (target == util::DEVICE) {
printf("transfering to host!!!: %d \n", vN);
GUARD_CU(util::SetDevice(this->gpu_idx[0]));
GUARD_CU(data_slice.Y.SetPointer(h_Y, vN, util::HOST));
GUARD_CU(data_slice.Y.Move(util::DEVICE, util::HOST));
GUARD_CU(util::SetDevice(this->gpu_idx[0]));
GUARD_CU(
data_slice.edge_residuals.SetPointer(edge_values, vE, util::HOST));
GUARD_CU(data_slice.edge_residuals.Move(util::DEVICE, util::HOST));
} else if (target == util::HOST) {
GUARD_CU(data_slice.Y.ForEach(
h_Y,
[] __host__ __device__(const ValueT &f, ValueT &h_f) {
{ h_f = f; }
},
vN, util::HOST));
}
return retval;
}
/**
* @brief Init GTF Problem
* @param graph The graph that GTF processes on
* @param[in] Location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &graph, util::Location target = util::DEVICE) {
debug_aml("Problem Init");
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseProblem::Init(graph, target));
data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus];
GUARD_CU(mf_problem.Init(graph, target));
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
auto gpu_name = std::to_string(gpu);
data_slices[gpu].SetName("data_slices[" + gpu_name + "]");
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST));
auto &data_slice = data_slices[gpu][0];
GUARD_CU(data_slice.Init(this->sub_graphs[gpu], this->num_gpus,
this->gpu_idx[gpu], target, this->flag));
GUARD_CU2(cudaStreamSynchronize(data_slices[gpu]->stream),
"sync failed.");
} // end for (gpu)
return retval;
} // End Init GTF Problem
/**
* @brief Reset Problem function. Must be called prior to each run.
* @param[in] src Source vertex to start.
* @param[in] location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(GraphT &graph, ValueT *h_community_accus, SizeT *h_reverse,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
debug_aml("Problem Reset");
auto &reverse = data_slices[0][0].reverse;
for (auto i = 0; i < graph.edges; i++) {
reverse[i] = h_reverse[i];
}
auto source_vertex = graph.nodes - 2;
auto sink_vertex = graph.nodes - 1;
for (int gpu = 0; gpu < this->num_gpus; ++gpu) {
auto &data_slice = data_slices[gpu][0];
data_slice.source = source_vertex;
data_slice.sink = sink_vertex;
// Set device
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu]->Reset(graph, h_community_accus, target));
GUARD_CU(data_slices[gpu].Move(util::HOST, target));
}
// Filling the initial input_queue for GTF problem
int gpu;
VertexT src_;
if (this->num_gpus <= 1) {
gpu = 0;
src_ = source_vertex;
} else {
gpu = this->org_graph->partition_table[source_vertex];
if (this->flag & partitioner::Keep_Node_Num)
src_ = source_vertex;
else
src_ = this->org_graph->GpT::convertion_table[source_vertex];
}
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed");
return retval;
}
/** @} */
};
} // namespace gtf
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End: | the_stack |
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "array.hpp"
#include "functors.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "vector_traits.hpp"
#include "kernel_dispatcher.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include "../cuda4dnn/csl/tensor.hpp"
#include <opencv2/core.hpp>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, class EltwiseOp, std::size_t N>
__global__ void eltwise_op_vec(Span<T> output, View<T> x, View<T> y, const typename EltwiseOp::Params params) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto x_vPtr = vector_type::get_pointer(x.data());
auto y_vPtr = vector_type::get_pointer(y.data());
EltwiseOp eltwise_op(params);
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
vector_type vec_x, vec_y;
v_load(vec_x, x_vPtr[i]);
v_load(vec_y, y_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec_x.data[j] = eltwise_op(vec_x.data[j], vec_y.data[j]);
v_store(output_vPtr[i], vec_x);
}
}
template <class T, class EltwiseOp, std::size_t Rank>
__global__ void eltwise_op_bcast(
Span<T> output, array<size_type, Rank> out_strides,
View<T> x, array<size_type, Rank> x_strides, array<bool, Rank> x_bcast,
View<T> y, array<size_type, Rank> y_strides, array<bool, Rank> y_bcast,
const typename EltwiseOp::Params params) {
EltwiseOp eltwise_op(params);
for (auto i : grid_stride_range(output.size())) {
index_type out_index = i / out_strides[0];
index_type x_index = x_bcast[0] ? 0 : out_index * x_strides[0];
index_type y_index = y_bcast[0] ? 0 : out_index * y_strides[0];
for (int j = 1; j < Rank; j++)
{
out_index = (i % out_strides[j - 1]) / out_strides[j];
if (!x_bcast[j])
x_index += out_index * x_strides[j];
if (!y_bcast[j])
y_index += out_index * y_strides[j];
}
output[i] = eltwise_op(x[x_index], y[y_index]);
}
}
}
template <class T, class EltwiseOp, std::size_t N> static
void launch_vectorized_eltwise_op(const Stream& stream, Span<T> output, View<T> x, View<T> y, const typename EltwiseOp::Params& params) {
CV_Assert(x.size() == y.size());
CV_Assert(x.size() == output.size());
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(x, N));
CV_Assert(is_fully_aligned<T>(y, N));
auto kernel = raw::eltwise_op_vec<T, EltwiseOp, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, x, y, params);
}
template <class T, class EltwiseOp, std::size_t Rank> static
void launch_eltwise_op_bcast(
const Stream& stream,
Span<T> output, const std::vector<std::size_t>& outStride,
View<T> x, const std::vector<std::size_t>& inStride1, const std::vector<int>& inBcast1,
View<T> y, const std::vector<std::size_t>& inStride2, const std::vector<int>& inBcast2,
const typename EltwiseOp::Params& params)
{
CV_Assert(outStride.size() == Rank);
CV_Assert(inStride1.size() == Rank);
CV_Assert(inStride2.size() == Rank);
CV_Assert(inBcast1.size() == Rank);
CV_Assert(inBcast2.size() == Rank);
array<size_type, Rank> outStride_k, inStride1_k, inStride2_k;
outStride_k.assign(std::begin(outStride), std::end(outStride));
inStride1_k.assign(std::begin(inStride1), std::end(inStride1));
inStride2_k.assign(std::begin(inStride2), std::end(inStride2));
array<bool, Rank> inBcast1_k, inBcast2_k;
inBcast1_k.assign(std::begin(inBcast1), std::end(inBcast1));
inBcast2_k.assign(std::begin(inBcast2), std::end(inBcast2));
auto kernel = raw::eltwise_op_bcast<T, EltwiseOp, Rank>;
auto policy = make_policy(kernel, output.size(), 0, stream);
launch_kernel(kernel, policy, output, outStride_k, x, inStride1_k, inBcast1_k, y, inStride2_k, inBcast2_k, params);
}
GENERATE_KERNEL_DISPATCHER_2TP(eltwise_op_bcast_dispatcher, launch_eltwise_op_bcast);
template <class T, class EltwiseOp> static
void eltwise_op(const Stream& stream, TensorSpan<T> output, TensorView<T> x, TensorView<T> y, const typename EltwiseOp::Params& params = {}) {
if (is_shape_same(output, x) && is_shape_same(output, y))
{
/* no broadcasting; use fast path */
CV_Assert(x.size() == y.size());
CV_Assert(x.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(x, 4) && is_fully_aligned<T>(y, 4)) {
launch_vectorized_eltwise_op<T, EltwiseOp, 4>(stream, output, x, y, params);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(x, 2) && is_fully_aligned<T>(y, 2)) {
launch_vectorized_eltwise_op<T, EltwiseOp, 2>(stream, output, x, y, params);
} else {
launch_vectorized_eltwise_op<T, EltwiseOp, 1>(stream, output, x, y, params);
}
}
else
{
CV_Assert(is_shape_compatible(output, x));
CV_Assert(is_shape_compatible(output, y));
/* matching singleton axes in both input tensors can be eliminated
*
* Reasoning:
* ----------
* Singleton axes do not contribute towards address calculation. They are redundant
* unless there is broadcasting. If both input tensors have singleton axis at a
* specified position, there is no broadcasting on that axis.
*
* Example:
* ---------
* x: [1, 256, 32, 32] -> [256, 32, 32]
* y: [1, 256, 1, 1] -> [256, 1, 1]
*/
for (int r = 0; r < output.rank(); r++)
{
while (x.get_axis_size(r) == 1 && y.get_axis_size(r) == 1) {
CV_Assert(output.get_axis_size(r) == 1);
x.squeeze(r);
y.squeeze(r);
output.squeeze(r);
}
}
auto inShape1 = x.shape_as_vector();
auto inShape2 = y.shape_as_vector();
auto outShape = output.shape_as_vector();
/* contiguous axes that do not broadcast can be merged into one axis
*
* Example:
* ---------
* x: [32, 8, 8] -> [32, 64]
* y: [1, 8, 8] -> [1, 64]
*/
for (int i = 0; i < inShape1.size(); i++) {
/* check if axis `i` requires any broadcasting */
if (inShape1[i] == inShape2[i]) {
/* loop invariant: `i` is the first axis in the contiguous axis sequence */
int j = i + 1; /* `j` is the axis which we will attempt to merge */
while (j < inShape1.size() && inShape1[j] == inShape2[j]) {
CV_Assert(outShape[j] == inShape1[j]);
/* `j` axis is also used fully; merge `i` and `j` */
auto new_size = inShape1[i] * inShape1[j];
inShape1[i] = new_size;
inShape2[i] = new_size;
/* delete axis `j` */
inShape1.erase(std::begin(inShape1) + j);
inShape2.erase(std::begin(inShape2) + j);
outShape.erase(std::begin(outShape) + j);
/* optimizations should not break the invariants */
CV_Assert(inShape1.size() == outShape.size());
CV_Assert(inShape2.size() == outShape.size());
CV_Assert(inShape1[i] == outShape[i]);
CV_Assert(inShape2[i] == outShape[i]);
}
}
}
/* contiguous broadcasting axes on the same tensor can be merged into one axis
*
* Example:
* ---------
* x: [256, 8, 8] -> [256, 64]
* y: [256, 1, 1] -> [256, 1]
*/
for (int i = 0; i < inShape1.size(); i++) {
/* check if axis `i` requires any broadcasting in tensor 1 */
if (inShape1[i] == 1 && inShape2[i] != 1) {
/* loop invariant: `i` is the first axis in the contiguous axis sequence */
int j = i + 1; /* `j` is the axis which we will attempt to merge */
while (j < inShape1.size() && inShape1[j] == 1 && inShape2[j] != 1) {
CV_Assert(outShape[j] == inShape2[j]);
/* `j` axis is also used fully; merge `i` and `j` */
inShape1[i] = 1;
inShape2[i] = inShape2[i] * inShape2[j];
outShape[i] = inShape2[i];
/* delete axis `j` */
inShape1.erase(std::begin(inShape1) + j);
inShape2.erase(std::begin(inShape2) + j);
outShape.erase(std::begin(outShape) + j);
/* optimizations should not break the invariants */
CV_Assert(inShape1.size() == outShape.size());
CV_Assert(inShape2.size() == outShape.size());
CV_Assert(inShape1[i] == 1);
CV_Assert(inShape2[i] == outShape[i]);
}
}
/* check if axis `i` requires any broadcasting in tensor 2 */
if (inShape1[i] != 1 && inShape2[i] == 1) {
/* loop invariant: `i` is the first axis in the contiguous axis sequence */
int j = i + 1; /* `j` is the axis which we will attempt to merge */
while (j < inShape1.size() && inShape1[j] != 1 && inShape2[j] == 1) {
CV_Assert(outShape[j] == inShape1[j]);
/* `j` axis is also used fully; merge `i` and `j` */
inShape1[i] = inShape1[i] * inShape1[j];
inShape2[i] = 1;
outShape[i] = inShape1[i];
/* delete axis `j` */
inShape1.erase(std::begin(inShape1) + j);
inShape2.erase(std::begin(inShape2) + j);
outShape.erase(std::begin(outShape) + j);
/* optimizations should not break the invariants */
CV_Assert(inShape1.size() == outShape.size());
CV_Assert(inShape2.size() == outShape.size());
CV_Assert(inShape1[i] == outShape[i]);
CV_Assert(inShape2[i] == 1);
}
}
}
auto rank = outShape.size();
std::vector<std::size_t> inStride1(rank), inStride2(rank), outStride(rank);
inStride1.back() = 1;
inStride2.back() = 1;
outStride.back() = 1;
/* garbage, ..., garbage, 1 */
std::copy(std::begin(inShape1) + 1, std::end(inShape1), std::begin(inStride1));
std::copy(std::begin(inShape2) + 1, std::end(inShape2), std::begin(inStride2));
std::copy(std::begin(outShape) + 1, std::end(outShape), std::begin(outStride));
/* dim[0], dim[1], ..., dim[-1], 1 */
std::partial_sum(inStride1.rbegin(), inStride1.rend(), inStride1.rbegin(), std::multiplies<std::size_t>());
std::partial_sum(inStride2.rbegin(), inStride2.rend(), inStride2.rbegin(), std::multiplies<std::size_t>());
std::partial_sum(outStride.rbegin(), outStride.rend(), outStride.rbegin(), std::multiplies<std::size_t>());
/* stride[0], stride[1], ..., stride[-2], 1 */
std::vector<int> inBcast1(rank), inBcast2(rank);
std::transform(std::begin(inShape1), std::end(inShape1), std::begin(inBcast1), [](std::size_t sz) { return sz == 1; });
std::transform(std::begin(inShape2), std::end(inShape2), std::begin(inBcast2), [](std::size_t sz) { return sz == 1; });
CV_Assert(1 <= rank && rank <= CSL_MAX_TENSOR_RANK);
eltwise_op_bcast_dispatcher<T, EltwiseOp, 1, CSL_MAX_TENSOR_RANK>(rank, stream, output, outStride, x, inStride1, inBcast1, y, inStride2, inBcast2, params);
}
}
template <class T>
void eltwise_max_2(const Stream& stream, TensorSpan<T> output, TensorView<T> x, TensorView<T> y) {
eltwise_op<T, MaxFunctor<T>>(stream, output, x, y);
}
template <class T>
void eltwise_min_2(const Stream& stream, TensorSpan<T> output, TensorView<T> x, TensorView<T> y) {
eltwise_op<T, MinFunctor<T>>(stream, output, x, y);
}
template <class T>
void eltwise_sum_2(const Stream& stream, TensorSpan<T> output, TensorView<T> x, TensorView<T> y) {
eltwise_op<T, SumFunctor<T>>(stream, output, x, y);
}
template <class T>
void eltwise_sum_coeff_2(const Stream& stream, TensorSpan<T> output, T coeff_x, TensorView<T> x, T coeff_y, TensorView<T> y) {
eltwise_op<T, ScaledSumFunctor<T>>(stream, output, x, y, {coeff_x, coeff_y});
}
template <class T>
void eltwise_prod_2(const Stream& stream, TensorSpan<T> output, TensorView<T> x, TensorView<T> y) {
eltwise_op<T, ProductFunctor<T>>(stream, output, x, y);
}
template <class T>
void eltwise_div_2(const Stream& stream, TensorSpan<T> output, TensorView<T> x, TensorView<T> y) {
eltwise_op<T, DivFunctor<T>>(stream, output, x, y);
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void eltwise_div_2(const Stream& stream, TensorSpan<__half> output, TensorView<__half> x, TensorView<__half> y);
template void eltwise_prod_2(const Stream& stream, TensorSpan<__half> output, TensorView<__half> x, TensorView<__half> y);
template void eltwise_sum_coeff_2(const Stream&, TensorSpan<__half>, __half, TensorView<__half>, __half, TensorView<__half>);
template void eltwise_sum_2(const Stream& stream, TensorSpan<__half> output, TensorView<__half> x, TensorView<__half> y);
template void eltwise_max_2(const Stream& stream, TensorSpan<__half> output, TensorView<__half> x, TensorView<__half> y);
template void eltwise_min_2(const Stream& stream, TensorSpan<__half> output, TensorView<__half> x, TensorView<__half> y);
#endif
template void eltwise_div_2(const Stream& stream, TensorSpan<float> output, TensorView<float> x, TensorView<float> y);
template void eltwise_prod_2(const Stream& stream, TensorSpan<float> output, TensorView<float> x, TensorView<float> y);
template void eltwise_sum_coeff_2(const Stream&, TensorSpan<float>, float, TensorView<float>, float, TensorView<float>);
template void eltwise_sum_2(const Stream& stream, TensorSpan<float> output, TensorView<float> x, TensorView<float> y);
template void eltwise_max_2(const Stream& stream, TensorSpan<float> output, TensorView<float> x, TensorView<float> y);
template void eltwise_min_2(const Stream& stream, TensorSpan<float> output, TensorView<float> x, TensorView<float> y);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */ | the_stack |
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace detail {
/**
* @brief Computes the merger of an array of bitmasks using a binary operator
*
* @param op The binary operator used to combine the bitmasks
* @param destination The bitmask to write result into
* @param source Array of source mask pointers. All masks must be of same size
* @param source_begin_bits Array of offsets into corresponding @p source masks.
* Must be same size as source array
* @param source_size_bits Number of bits in each mask in @p source
*/
template <typename Binop>
__global__ void offset_bitmask_binop(Binop op,
device_span<bitmask_type> destination,
device_span<bitmask_type const*> source,
device_span<size_type const> source_begin_bits,
size_type source_size_bits)
{
for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x;
destination_word_index < destination.size();
destination_word_index += blockDim.x * gridDim.x) {
bitmask_type destination_word =
detail::get_mask_offset_word(source[0],
destination_word_index,
source_begin_bits[0],
source_begin_bits[0] + source_size_bits);
for (size_type i = 1; i < source.size(); i++) {
destination_word =
op(destination_word,
detail::get_mask_offset_word(source[i],
destination_word_index,
source_begin_bits[i],
source_begin_bits[i] + source_size_bits));
}
destination[destination_word_index] = destination_word;
}
}
/**
* @copydoc bitmask_binop(Binop op, host_span<bitmask_type const *> const, host_span<size_type>
* const, size_type, rmm::mr::device_memory_resource *)
*
* @param stream CUDA stream used for device memory operations and kernel launches
*/
template <typename Binop>
rmm::device_buffer bitmask_binop(
Binop op,
host_span<bitmask_type const*> masks,
host_span<size_type const> masks_begin_bits,
size_type mask_size_bits,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
auto dest_mask = rmm::device_buffer{bitmask_allocation_size_bytes(mask_size_bits), stream, mr};
inplace_bitmask_binop(op,
device_span<bitmask_type>(static_cast<bitmask_type*>(dest_mask.data()),
num_bitmask_words(mask_size_bits)),
masks,
masks_begin_bits,
mask_size_bits,
stream,
mr);
return dest_mask;
}
/**
* @brief Performs a merge of the specified bitmasks using the binary operator
* provided, and writes in place to destination
*
* @param op The binary operator used to combine the bitmasks
* @param dest_mask Destination to which the merged result is written
* @param masks The list of data pointers of the bitmasks to be merged
* @param masks_begin_bits The bit offsets from which each mask is to be merged
* @param mask_size_bits The number of bits to be ANDed in each mask
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned device_buffer
* @return rmm::device_buffer Output bitmask
*/
template <typename Binop>
void inplace_bitmask_binop(
Binop op,
device_span<bitmask_type> dest_mask,
host_span<bitmask_type const*> masks,
host_span<size_type const> masks_begin_bits,
size_type mask_size_bits,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
CUDF_EXPECTS(
std::all_of(masks_begin_bits.begin(), masks_begin_bits.end(), [](auto b) { return b >= 0; }),
"Invalid range.");
CUDF_EXPECTS(mask_size_bits > 0, "Invalid bit range.");
CUDF_EXPECTS(std::all_of(masks.begin(), masks.end(), [](auto p) { return p != nullptr; }),
"Mask pointer cannot be null");
rmm::device_uvector<bitmask_type const*> d_masks(masks.size(), stream, mr);
rmm::device_uvector<size_type> d_begin_bits(masks_begin_bits.size(), stream, mr);
CUDA_TRY(cudaMemcpyAsync(
d_masks.data(), masks.data(), masks.size_bytes(), cudaMemcpyHostToDevice, stream.value()));
CUDA_TRY(cudaMemcpyAsync(d_begin_bits.data(),
masks_begin_bits.data(),
masks_begin_bits.size_bytes(),
cudaMemcpyHostToDevice,
stream.value()));
cudf::detail::grid_1d config(dest_mask.size(), 256);
offset_bitmask_binop<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>(
op, dest_mask, d_masks, d_begin_bits, mask_size_bits);
CHECK_CUDA(stream.value());
stream.synchronize();
}
/**
* For each range `[first_bit_indices[i], last_bit_indices[i])`
* (where 0 <= i < `num_ranges`), count the number of bits set outside the range
* in the boundary words (i.e. words that include either
* `first_bit_indices[i]'th` bit or `(last_bit_indices[i] - 1)'th` bit) and
* subtract the count from the range's null count.
*
* Expects `0 <= first_bit_indices[i] <= last_bit_indices[i]`.
*
* @param[in] bitmask The bitmask whose non-zero bits outside the range in the
* boundary words will be counted.
* @param[in] num_ranges The number of ranges
* @param[in] first_bit_indices The indices (inclusive) of the first bit in each
* range
* @param[in] last_bit_indices The indices (exclusive) of the last bit in each
* range
* @param[in,out] null_counts The number of non-zero bits in each range to be
* updated
*/
template <typename OffsetIterator, typename OutputIterator>
__global__ void subtract_set_bits_range_boundaries_kernel(bitmask_type const* bitmask,
size_type num_ranges,
OffsetIterator first_bit_indices,
OffsetIterator last_bit_indices,
OutputIterator null_counts)
{
constexpr size_type const word_size_in_bits{detail::size_in_bits<bitmask_type>()};
cudf::size_type const tid = threadIdx.x + blockIdx.x * blockDim.x;
cudf::size_type range_id = tid;
while (range_id < num_ranges) {
size_type const first_bit_index = *(first_bit_indices + range_id);
size_type const last_bit_index = *(last_bit_indices + range_id);
size_type delta = 0;
size_type num_slack_bits = 0;
// compute delta due to the preceding bits in the first word in the range
num_slack_bits = intra_word_index(first_bit_index);
if (num_slack_bits > 0) {
bitmask_type word = bitmask[word_index(first_bit_index)];
bitmask_type slack_mask = set_least_significant_bits(num_slack_bits);
delta -= __popc(word & slack_mask);
}
// compute delta due to the following bits in the last word in the range
num_slack_bits = (last_bit_index % word_size_in_bits) == 0
? 0
: word_size_in_bits - intra_word_index(last_bit_index);
if (num_slack_bits > 0) {
bitmask_type word = bitmask[word_index(last_bit_index)];
bitmask_type slack_mask = set_most_significant_bits(num_slack_bits);
delta -= __popc(word & slack_mask);
}
size_type updated_null_count = *(null_counts + range_id) + delta;
*(null_counts + range_id) = updated_null_count;
range_id += blockDim.x * gridDim.x;
}
}
// convert [first_bit_index,last_bit_index) to
// [first_word_index,last_word_index)
struct to_word_index : public thrust::unary_function<size_type, size_type> {
const bool _inclusive = false;
size_type const* const _d_bit_indices = nullptr;
/**
* @brief Constructor of a functor that converts bit indices to bitmask word
* indices.
*
* @param[in] inclusive Flag that indicates whether bit indices are inclusive
* or exclusive.
* @param[in] d_bit_indices Pointer to an array of bit indices
*/
__host__ to_word_index(bool inclusive, size_type const* d_bit_indices)
: _inclusive(inclusive), _d_bit_indices(d_bit_indices)
{
}
__device__ size_type operator()(const size_type& i) const
{
auto bit_index = _d_bit_indices[i];
return word_index(bit_index) + ((_inclusive || intra_word_index(bit_index) == 0) ? 0 : 1);
}
};
/**
* @brief Functor that returns the number of set bits for a specified word
* of a bitmask array.
*
*/
struct word_num_set_bits_functor {
word_num_set_bits_functor(bitmask_type const* bitmask_) : bitmask(bitmask_) {}
__device__ size_type operator()(size_type i) const
{
return static_cast<size_type>(__popc(bitmask[i]));
}
bitmask_type const* bitmask;
};
/**
* @brief Given a bitmask, counts the number of set (1) bits in every range
* `[indices_begin[2*i], indices_begin[(2*i)+1])` (where 0 <= i < std::distance(indices_begin,
* indices_end) / 2).
*
* Returns an empty vector if `bitmask == nullptr`.
*
* @throws cudf::logic_error if `std::distance(indices_begin, indices_end) % 2 != 0`
* @throws cudf::logic_error if `indices_begin[2*i] < 0 or indices_begin[2*i] >
* indices_begin[(2*i)+1]`
*
* @param bitmask Bitmask residing in device memory whose bits will be counted
* @param indices_begin An iterator representing the beginning of the range of indices specifying
* ranges to count the number of set bits within
* @param indices_end An iterator representing the end of the range of indices specifying ranges to
* count the number of set bits within
* @param streaam CUDA stream used for device memory operations and kernel launches
*
* @return A vector storing the number of non-zero bits in the specified ranges
*/
template <typename IndexIterator>
std::vector<size_type> segmented_count_set_bits(bitmask_type const* bitmask,
IndexIterator indices_begin,
IndexIterator indices_end,
rmm::cuda_stream_view stream)
{
size_t const num_indices = std::distance(indices_begin, indices_end);
CUDF_EXPECTS(num_indices % 2 == 0, "Array of indices needs to have an even number of elements.");
for (size_t i = 0; i < num_indices / 2; i++) {
auto begin = indices_begin[i * 2];
auto end = indices_begin[i * 2 + 1];
CUDF_EXPECTS(begin >= 0, "Starting index cannot be negative.");
CUDF_EXPECTS(end >= begin, "End index cannot be smaller than the starting index.");
}
if (num_indices == 0) {
return std::vector<size_type>{};
} else if (bitmask == nullptr) {
std::vector<size_type> ret(num_indices / 2);
for (size_t i = 0; i < num_indices / 2; i++) {
ret[i] = indices_begin[2 * i + 1] - indices_begin[2 * i];
}
return ret;
}
size_type num_ranges = num_indices / 2;
std::vector<size_type> h_first_indices(num_ranges);
std::vector<size_type> h_last_indices(num_ranges);
thrust::stable_partition_copy(thrust::seq,
indices_begin,
indices_end,
thrust::make_counting_iterator(0),
h_first_indices.begin(),
h_last_indices.begin(),
[](auto i) { return (i % 2) == 0; });
auto d_first_indices = make_device_uvector_async(h_first_indices, stream);
auto d_last_indices = make_device_uvector_async(h_last_indices, stream);
rmm::device_uvector<size_type> d_null_counts(num_ranges, stream);
auto word_num_set_bits = thrust::make_transform_iterator(thrust::make_counting_iterator(0),
word_num_set_bits_functor{bitmask});
auto first_word_indices = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
// We cannot use lambda as cub::DeviceSegmentedReduce::Sum() requires
// first_word_indices and last_word_indices to have the same type.
to_word_index(true, d_first_indices.data()));
auto last_word_indices = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
// We cannot use lambda as cub::DeviceSegmentedReduce::Sum() requires
// first_word_indices and last_word_indices to have the same type.
to_word_index(false, d_last_indices.data()));
// first allocate temporary memory
size_t temp_storage_bytes{0};
CUDA_TRY(cub::DeviceSegmentedReduce::Sum(nullptr,
temp_storage_bytes,
word_num_set_bits,
d_null_counts.begin(),
num_ranges,
first_word_indices,
last_word_indices,
stream.value()));
rmm::device_buffer d_temp_storage(temp_storage_bytes, stream);
// second perform segmented reduction
CUDA_TRY(cub::DeviceSegmentedReduce::Sum(d_temp_storage.data(),
temp_storage_bytes,
word_num_set_bits,
d_null_counts.begin(),
num_ranges,
first_word_indices,
last_word_indices,
stream.value()));
CHECK_CUDA(stream.value());
// third, adjust counts in segment boundaries (if segments are not
// word-aligned)
constexpr size_type block_size{256};
cudf::detail::grid_1d grid(num_ranges, block_size);
subtract_set_bits_range_boundaries_kernel<<<grid.num_blocks,
grid.num_threads_per_block,
0,
stream.value()>>>(
bitmask, num_ranges, d_first_indices.begin(), d_last_indices.begin(), d_null_counts.begin());
CHECK_CUDA(stream.value());
std::vector<size_type> ret(num_ranges);
CUDA_TRY(cudaMemcpyAsync(ret.data(),
d_null_counts.data(),
num_ranges * sizeof(size_type),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize(); // now ret is valid.
return ret;
}
/**
* @brief Given a bitmask, counts the number of unset (0) bits in every range
* `[indices_begin[2*i], indices_begin[(2*i)+1])` (where 0 <= i < std::distance(indices_begin,
* indices_end) / 2).
*
* Returns an empty vector if `bitmask == nullptr`.
*
* @throws cudf::logic_error if `std::distance(indices_begin, indices_end) % 2 != 0`
* @throws cudf::logic_error if `indices_begin[2*i] < 0 or indices_begin[2*i] >
* indices_begin[(2*i)+1]`
*
* @param bitmask Bitmask residing in device memory whose bits will be counted
* @param indices_begin An iterator representing the beginning of the range of indices specifying
* ranges to count the number of unset bits within
* @param indices_end An iterator representing the end of the range of indices specifying ranges to
* count the number of unset bits within
* @param streaam CUDA stream used for device memory operations and kernel launches
*
* @return A vector storing the number of non-zero bits in the specified ranges
*/
template <typename IndexIterator>
std::vector<size_type> segmented_count_unset_bits(bitmask_type const* bitmask,
IndexIterator indices_begin,
IndexIterator indices_end,
rmm::cuda_stream_view stream)
{
size_t const num_indices = std::distance(indices_begin, indices_end);
if (num_indices == 0) {
return std::vector<size_type>{};
} else if (bitmask == nullptr) {
return std::vector<size_type>(num_indices / 2, 0);
}
auto ret = segmented_count_set_bits(bitmask, indices_begin, indices_end, stream);
for (size_t i = 0; i < ret.size(); i++) {
auto begin = indices_begin[i * 2];
auto end = indices_begin[i * 2 + 1];
ret[i] = (end - begin) - ret[i];
}
return ret;
}
} // namespace detail
} // namespace cudf | the_stack |
#include <cuda_runtime_api.h>
template<typename T>
inline __device__ int intRoundGPU(const T a)
{
return int(a + T(0.5));
}
template <typename T>
inline __device__ T process(
const T* bodyPartA, const T* bodyPartB, const T* mapX, const T* mapY, const int heatmapWidth,
const int heatmapHeight, const T interThreshold, const T interMinAboveThreshold, const T defaultNmsThreshold)
{
const auto vectorAToBX = bodyPartB[0] - bodyPartA[0];
const auto vectorAToBY = bodyPartB[1] - bodyPartA[1];
const auto vectorAToBMax = max(abs(vectorAToBX), abs(vectorAToBY));
const auto numberPointsInLine = max(5, min(25, intRoundGPU(sqrt(5 * vectorAToBMax)))); // 5-25 points, d>125 -> 25, d<5 -> 5, 5<d<125 -> 5~25
const auto vectorNorm = T(sqrt(vectorAToBX*vectorAToBX + vectorAToBY * vectorAToBY));
// If the peaksPtr are coincident. Don't connect them.
if (vectorNorm > 1e-6)
{
const auto sX = bodyPartA[0];
const auto sY = bodyPartA[1];
const auto vectorAToBNormX = vectorAToBX / vectorNorm;
const auto vectorAToBNormY = vectorAToBY / vectorNorm;
auto sum = T(0.);
auto count = 0;
const auto vectorAToBXInLine = vectorAToBX / numberPointsInLine;
const auto vectorAToBYInLine = vectorAToBY / numberPointsInLine;
for (auto lm = 0; lm < numberPointsInLine; lm++)
{
const auto mX = min(heatmapWidth - 1, intRoundGPU(sX + lm * vectorAToBXInLine));
const auto mY = min(heatmapHeight - 1, intRoundGPU(sY + lm * vectorAToBYInLine));
const auto idx = mY * heatmapWidth + mX;
const auto score = (vectorAToBNormX*mapX[idx] + vectorAToBNormY * mapY[idx]);
if (score > interThreshold)
{
sum += score;
count++;
}
}
// Return PAF score
if (count / T(numberPointsInLine) > interMinAboveThreshold)
return sum / count;
else
{
// Ideally, if distanceAB = 0, PAF is 0 between A and B, provoking a false negative
// To fix it, we consider PAF-connected keypoints very close to have a minimum PAF score, such that:
// 1. It will consider very close keypoints (where the PAF is 0)
// 2. But it will not automatically connect them (case PAF score = 1), or real PAF might got
// missing
const auto l2Dist = sqrtf(vectorAToBX*vectorAToBX + vectorAToBY * vectorAToBY);
const auto threshold = sqrtf(heatmapWidth*heatmapHeight) / 150; // 3.3 for 368x656, 6.6 for 2x resolution
if (l2Dist < threshold)
return T(defaultNmsThreshold + 1e-6); // Without 1e-6 will not work because I use strict greater
}
}
return -1;
}
// template <typename T>
// __global__ void pafScoreKernelOld(
// T* pairScoresPtr, const T* const heatMapPtr, const T* const peaksPtr, const unsigned int* const bodyPartPairsPtr,
// const unsigned int* const mapIdxPtr, const unsigned int maxPeaks, const int numberBodyPartPairs,
// const int heatmapWidth, const int heatmapHeight, const T interThreshold, const T interMinAboveThreshold,
// const T defaultNmsThreshold)
// {
// const auto pairIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
// const auto peakA = (blockIdx.y * blockDim.y) + threadIdx.y;
// const auto peakB = (blockIdx.z * blockDim.z) + threadIdx.z;
// if (pairIndex < numberBodyPartPairs && peakA < maxPeaks && peakB < maxPeaks)
// {
// const auto baseIndex = 2*pairIndex;
// const auto partA = bodyPartPairsPtr[baseIndex];
// const auto partB = bodyPartPairsPtr[baseIndex + 1];
// const T numberPeaksA = peaksPtr[3*partA*(maxPeaks+1)];
// const T numberPeaksB = peaksPtr[3*partB*(maxPeaks+1)];
// const auto outputIndex = (pairIndex*maxPeaks+peakA)*maxPeaks + peakB;
// if (peakA < numberPeaksA && peakB < numberPeaksB)
// {
// const auto mapIdxX = mapIdxPtr[baseIndex];
// const auto mapIdxY = mapIdxPtr[baseIndex + 1];
// const T* const bodyPartA = peaksPtr + (3*(partA*(maxPeaks+1) + peakA+1));
// const T* const bodyPartB = peaksPtr + (3*(partB*(maxPeaks+1) + peakB+1));
// const T* const mapX = heatMapPtr + mapIdxX*heatmapWidth*heatmapHeight;
// const T* const mapY = heatMapPtr + mapIdxY*heatmapWidth*heatmapHeight;
// pairScoresPtr[outputIndex] = process(
// bodyPartA, bodyPartB, mapX, mapY, heatmapWidth, heatmapHeight, interThreshold,
// interMinAboveThreshold, defaultNmsThreshold);
// }
// else
// pairScoresPtr[outputIndex] = -1;
// }
// }
template <typename T>
__global__ void pafScoreKernel(
T* pairScoresPtr,
const T* const heatMapPtr,
const T* const peaksPtr,
const unsigned int* const bodyPartPairsPtr,
const unsigned int* const mapIdxPtr,
const unsigned int maxPeaks, // 127
const int numberBodyPartPairs, // 26
const int heatmapWidth, // 512
const int heatmapHeight, // 368
const T interThreshold, // 0.05f
const T interMinAboveThreshold, // 0.95f
const T defaultNmsThreshold) // 0.05f
{
const auto peakB = (blockIdx.x * blockDim.x) + threadIdx.x; // 0-126
const auto peakA = (blockIdx.y * blockDim.y) + threadIdx.y; // 0-126
const auto pairIndex = (blockIdx.z * blockDim.z) + threadIdx.z; // 0-25
if (peakA < maxPeaks && peakB < maxPeaks)
// if (pairIndex < numberBodyPartPairs && peakA < maxPeaks && peakB < maxPeaks)
{
const auto baseIndex = 2 * pairIndex;
const auto partA = bodyPartPairsPtr[baseIndex];
const auto partB = bodyPartPairsPtr[baseIndex + 1];
const T numberPeaksA = peaksPtr[3 * partA*(maxPeaks + 1)]; // 3 * 128 * index, means the 0-index of that body part,
const T numberPeaksB = peaksPtr[3 * partB*(maxPeaks + 1)]; // recorded the total number of peaks in that body part
const auto outputIndex = (pairIndex*maxPeaks + peakA)*maxPeaks + peakB; // 25 * 127 * 127
if (peakA < numberPeaksA && peakB < numberPeaksB)
{
const auto mapIdxX = mapIdxPtr[baseIndex];
const auto mapIdxY = mapIdxPtr[baseIndex + 1];
const T* const bodyPartA = peaksPtr + (3 * (partA*(maxPeaks + 1) + peakA + 1));
const T* const bodyPartB = peaksPtr + (3 * (partB*(maxPeaks + 1) + peakB + 1));
const T* const mapX = heatMapPtr + mapIdxX * heatmapWidth*heatmapHeight;
const T* const mapY = heatMapPtr + mapIdxY * heatmapWidth*heatmapHeight;
pairScoresPtr[outputIndex] = process(
bodyPartA, bodyPartB, mapX, mapY, heatmapWidth, heatmapHeight, interThreshold,
interMinAboveThreshold, defaultNmsThreshold);
}
else
pairScoresPtr[outputIndex] = -1;
}
}
template <typename T>
void connectBodyPartsGpu(T* pairScoresGpuPtr,
const T* const heatMapGpuPtr, const int& heatMapSizeW, const int& heatMapSizeH, const T* const peaksGpuPtr,
const unsigned int* const bodyPartPairsGpuPtr, const unsigned int* const mapIdxGpuPtr)
{
// Parts Connection
/*const std::vector<unsigned int> bodyPartPairs{
0, 1, 0, 2, 0, 9, 9, 10, 10, 11,
0, 3, 3, 4, 4, 5, 2, 12, 12, 13,
13,14, 2, 6, 6, 7, 7, 8
};*/
//const int numberBodyPartPairs = (int)(bodyPartPairs.size() / 2); // 14
const int numberBodyPartPairs = 14;
// const int numberBodyParts = 15;
const int maxPeaks = 127;
// const auto totalComputations = numberBodyPartPairs * maxPeaks * maxPeaks;
// bool maximizePositives = false;
const T defaultNmsThreshold = 0.1f; // 0.05f 0.6f
const T interThreshold = 0.05f; //0.05f
const T interMinAboveThreshold = 0.95f;//0.95f;
//const int minSubsetCnt = maximizePositives ? 2u : 3u;
//const T minSubsetScore = maximizePositives ? 0.05f : 0.4f;
// Efficient code
// Run Kernel - pairScoresGpu
const dim3 THREADS_PER_BLOCK{ 128, 1, 1 };
const dim3 numBlocks{
getNumberCudaBlocks(maxPeaks, THREADS_PER_BLOCK.x), // 127
getNumberCudaBlocks(maxPeaks, THREADS_PER_BLOCK.y), // 127
getNumberCudaBlocks(numberBodyPartPairs, THREADS_PER_BLOCK.z) }; // 14
pafScoreKernel<<<numBlocks, THREADS_PER_BLOCK>>>(
pairScoresGpuPtr, heatMapGpuPtr, peaksGpuPtr, bodyPartPairsGpuPtr, mapIdxGpuPtr,
maxPeaks, numberBodyPartPairs, heatMapSizeW, heatMapSizeH, interThreshold,
interMinAboveThreshold, defaultNmsThreshold);
// pairScoresCpu <-- pairScoresGpu // 26 * 127 * 127
//cudaMemcpy(pairScoresCpu.getPtr(), pairScoresGpuPtr, totalComputations * sizeof(T),
// cudaMemcpyDeviceToHost);
// Get pair connections and their scores
/*const auto pairConnections = pafPtrIntoVector(
pairScoresCpu, peaksPtr, maxPeaks, bodyPartPairs, numberBodyPartPairs);
auto peopleVector = pafVectorIntoPeopleVector(
pairConnections, peaksPtr, maxPeaks, bodyPartPairs, numberBodyParts);*/
// // Old code: Get pair connections and their scores
// // std::vector<std::pair<std::vector<int>, double>> refers to:
// // - std::vector<int>: [body parts locations, #body parts found]
// // - double: person subset score
// const T* const tNullptr = nullptr;
// const auto peopleVector = createPeopleVector(
// tNullptr, peaksPtr, poseModel, heatMapSize, maxPeaks, interThreshold, interMinAboveThreshold,
// bodyPartPairs, numberBodyParts, numberBodyPartPairs, defaultNmsThreshold, pairScoresCpu);
// Delete people below the following thresholds:
// a) minSubsetCnt: removed if less than minSubsetCnt body parts
// b) minSubsetScore: removed if global score smaller than this
// c) maxPeaks (POSE_MAX_PEOPLE): keep first maxPeaks people above thresholds
/*int numberPeople;
std::vector<int> validSubsetIndexes;
// validSubsetIndexes.reserve(fastMin((size_t)maxPeaks, peopleVector.size()));
validSubsetIndexes.reserve(peopleVector.size());
removePeopleBelowThresholdsAndFillFaces(
validSubsetIndexes, numberPeople, peopleVector, numberBodyParts, minSubsetCnt, minSubsetScore,
maximizePositives, peaksPtr);
// Fill and return poseKeypoints
peopleVectorToPeopleArray(
poseKeypoints, poseScores, scaleFactor, peopleVector, validSubsetIndexes, peaksPtr, numberPeople,
numberBodyParts, numberBodyPartPairs);*/
}
template void connectBodyPartsGpu(float* pairScoresGpuPtr,
const float* const heatMapGpuPtr, const int& heatMapSizeW, const int& heatMapSizeH, const float* const peaksGpuPtr,
const unsigned int* const bodyPartPairsGpuPtr, const unsigned int* const mapIdxGpuPtr);
/*template void connectBodyPartsGpu(
Array<double>& poseKeypoints, Array<double>& poseScores, const double* const heatMapGpuPtr,
const double* const peaksPtr, const int& heatMapSizeW, const int& heatMapSizeH,
const double scaleFactor,
Array<double> pairScoresCpu, double* pairScoresGpuPtr,
const unsigned int* const bodyPartPairsGpuPtr, const unsigned int* const mapIdxGpuPtr,
const double* const peaksGpuPtr);
*/ | the_stack |
using ::testing::DoubleEq;
using ::testing::Each;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::FloatEq;
class ParamsTest : public ::testing::TestWithParam<RNG::result_type> {
protected:
virtual void SetUp() override {
rng_.seed(GetParam());
}
template <typename FloatT, typename IdxType>
void initialize_range_representations(RepresentationsStorage<FloatT, IdxType>* const representations) {
thrust::copy(thrust::make_counting_iterator(static_cast<size_t>(0)),
thrust::make_counting_iterator(representations->reprs_.size()),
begin(representations->reprs_));
std::vector<float32> expected;
for (size_t i = 0; i < representations->reprs_.size(); ++i) {
expected.push_back(i);
}
ASSERT_THAT(to_host(representations->reprs_),
ElementsAreArray(expected));
}
void initialize_transform(Transform<FloatT>* const transform) {
thrust::copy(thrust::make_counting_iterator(static_cast<size_t>(0)),
thrust::make_counting_iterator(transform->transform_.size()),
begin(transform->transform_));
thrust::copy(thrust::make_counting_iterator(static_cast<size_t>(0)),
thrust::make_counting_iterator(transform->bias_.size()),
begin(transform->bias_));
transform->bias_.scale(transform->bias_.getStream(), 1e-3);
}
RNG rng_;
};
INSTANTIATE_TEST_CASE_P(RandomSeed,
ParamsTest,
::testing::Range<RNG::result_type>(0 /* start, inclusive */,
11 /* end, exclusive */,
1 /* step */));
TEST_P(ParamsTest, get_average_representations) {
Representations<FloatT, DefaultModel::WordIdxType> representations(
WORD_REPRS,
4, /* num_objects */
3, /* repr_size */
ParseProto<UpdateMethodConf>("type: SGD"), /* update_method */
DefaultStream::get());
representations.initialize(&rng_);
initialize_range_representations(&representations);
std::unique_ptr<device_matrix<DefaultModel::WordIdxType>> indices(
device_matrix<DefaultModel::WordIdxType>::create_column(
DefaultStream::get()->next(),
{1, 3, 2,
0, 3, 1}));
std::unique_ptr<device_matrix<FloatT>> averages(
representations.get_average_representations(
DefaultStream::get()->next(),
*indices,
3 /* window_size */));
EXPECT_THAT(
to_host(*averages),
ElementsAre(FPHelper<FloatT>::eq((3 + 9 + 6) / 3.),
FPHelper<FloatT>::eq((4 + 10 + 7) / 3.),
FPHelper<FloatT>::eq((5 + 11 + 8) / 3.),
FPHelper<FloatT>::eq((0 + 9 + 3) / 3.),
FPHelper<FloatT>::eq((1 + 10 + 4) / 3.),
FPHelper<FloatT>::eq((2 + 11 + 5) / 3.)));
}
TEST_P(ParamsTest, get_weighted_average_representations) {
Representations<FloatT, DefaultModel::WordIdxType> representations(
WORD_REPRS,
4, /* num_objects */
3, /* repr_size */
ParseProto<UpdateMethodConf>("type: SGD"), /* update_method */
DefaultStream::get());
representations.initialize(&rng_);
initialize_range_representations(&representations);
std::unique_ptr<device_matrix<DefaultModel::WordIdxType>> indices(
device_matrix<DefaultModel::WordIdxType>::create_column(
DefaultStream::get()->next(),
{1, 3, 2,
0, 3, 1}));
std::unique_ptr<device_matrix<DefaultModel::FloatT>> indices_weights(
device_matrix<DefaultModel::FloatT>::create_column(
DefaultStream::get()->next(),
{0.5, 0.3, 0.1,
1.0, 2.0, 0.2}));
std::unique_ptr<device_matrix<FloatT>> averages(
representations.get_average_representations(
DefaultStream::get()->next(),
*indices,
3, /* window_size */
indices_weights.get()));
EXPECT_THAT(
to_host(*averages),
ElementsAre(FPHelper<FloatT>::eq((0.5 * 3 + 0.3 * 9 + 0.1 * 6) / 3.),
FPHelper<FloatT>::eq((0.5 * 4 + 0.3 * 10 + 0.1 * 7) / 3.),
FPHelper<FloatT>::eq((0.5 * 5 + 0.3 * 11 + 0.1 * 8) / 3.),
FPHelper<FloatT>::eq((1.0 * 0 + 2.0 * 9 + 0.2 * 3) / 3.),
FPHelper<FloatT>::eq((1.0 * 1 + 2.0 * 10 + 0.2 * 4) / 3.),
FPHelper<FloatT>::eq((1.0 * 2 + 2.0 * 11 + 0.2 * 5) / 3.)));
}
TEST_P(ParamsTest, generate_labels) {
const size_t num_words = 100;
const size_t num_objects = 5000;
DefaultModel lse(num_words, num_objects,
ParseProto<lse::ModelDesc>("word_repr_size: 64 entity_repr_size: 32"),
ParseProto<lse::TrainConfig>("update_method: < type: SGD >"));
lse.initialize(&rng_);
const size_t num_labels = 5;
const size_t num_negative_labels = 10;
std::vector<DefaultModel::EntityIdxType> labels = {1, 2, 3, 4, 5};
std::vector<DefaultModel::EntityIdxType> instance_entities;
lse.objective_->generate_labels(
labels.data(),
num_labels,
num_negative_labels,
&instance_entities,
&rng_);
EXPECT_THAT(instance_entities.size(),
Eq(num_labels * (num_negative_labels + 1)));
}
TEST_P(ParamsTest, Representations_update) {
Representations<FloatT, DefaultModel::WordIdxType> representations(
WORD_REPRS,
4, /* num_objects */
3, /* repr_size */
ParseProto<UpdateMethodConf>("type: SGD"), /* update_method */
DefaultStream::get());
representations.initialize(&rng_);
{
initialize_range_representations(&representations);
TextEntity::Objective::ForwardResultType result(
device_matrix<WordIdxType>::create(
DefaultStream::get()->next(), {0, 3, 1, 0}, 1, 4), /* words */
device_matrix<FloatT>::create(
DefaultStream::get()->next(), {1.0, 1.0, 1.0, 1.0}, 1, 4), /* word_weights */
device_matrix<WordIdxType>::create(
DefaultStream::get()->next(), {0}, 1, 1), /* entities */
2, /* window_size */
1, /* num_random_entities */
0.1 /* regularization_lambda */);
SingleGradients<FloatT> gradients(&result);
gradients.grad_phrase_reprs_.reset(
new device_matrix<FloatT>(3 /* repr_size */,
2, /* num_phrase_reprs */
DefaultStream::get()->next()));
gradients.grad_phrase_reprs_->fillwith(
gradients.grad_phrase_reprs_->getStream(), 0.0);
representations.update(gradients,
0.1, /* learning_rate */
result.scaled_regularization_lambda(),
DefaultStream::get());
const FloatT scale_factor = 1.0 - (0.1 * 0.1) / 2.0;
EXPECT_THAT(
to_host(representations.reprs_),
ElementsAreArray({0. * scale_factor, 1. * scale_factor, 2. * scale_factor,
3. * scale_factor, 4. * scale_factor, 5. * scale_factor,
6. * scale_factor, 7. * scale_factor, 8. * scale_factor,
9. * scale_factor, 10. * scale_factor, 11. * scale_factor}));
}
{
initialize_range_representations(&representations);
TextEntity::Objective::ForwardResultType result(
device_matrix<WordIdxType>::create(DefaultStream::get()->next(), {0, 3, 1, 0}, 1, 4), /* words */
device_matrix<FloatT>::create(DefaultStream::get()->next(), {1.0, 1.0, 1.0, 1.0}, 1, 4), /* word_weights */
device_matrix<WordIdxType>::create(DefaultStream::get()->next(), {0}, 1, 1), /* entities */
2, /* window_size */
1, /* num_random_entities */
0.0 /* regularization_lambda */);
SingleGradients<FloatT> gradients(&result);
gradients.grad_phrase_reprs_.reset(
new device_matrix<FloatT>(3, /* repr_size */
2, /* num_phrase_reprs */
DefaultStream::get()->next()));
to_device({5.0, 4.0, 3.0,
-3.0, -2.0, 10.0},
gradients.grad_phrase_reprs_.get());
representations.update(gradients,
0.1, /* learning_rate */
result.scaled_regularization_lambda(),
DefaultStream::get());
const FloatT learning_rate = 0.1;
EXPECT_THAT(
to_host(representations.reprs_),
ElementsAreArray({FPHelper<FloatT>::eq(0. + (5.0 + (-3.0)) * learning_rate),
FPHelper<FloatT>::eq(1. + (4.0 + (-2.0)) * learning_rate),
FPHelper<FloatT>::eq(2. + (3.0 + 10.0) *learning_rate),
FPHelper<FloatT>::eq(3. + (-3.0) * learning_rate),
FPHelper<FloatT>::eq(4. + (-2.0) * learning_rate),
FPHelper<FloatT>::eq(5. + 10.0 * learning_rate),
FPHelper<FloatT>::eq(6.),
FPHelper<FloatT>::eq(7.),
FPHelper<FloatT>::eq(8.),
FPHelper<FloatT>::eq(9. + (5.0) * learning_rate),
FPHelper<FloatT>::eq(10. + (4.0) * learning_rate),
FPHelper<FloatT>::eq(11. + 3.0 * learning_rate)}));
}
}
TEST_P(ParamsTest, RepresentationsStorage_update_dense) {
RepresentationsStorage<FloatT, DefaultModel::WordIdxType> representations(
4, /* num_objects */
3, /* repr_size */
DefaultStream::get());
initialize_range_representations(&representations);
representations.update_dense(
0, /* stream */
thrust::make_constant_iterator(10.0),
0.1, /* learning_rate */
0.01 /* scaled_regularization_lambda */);
EXPECT_THAT(
to_host(representations.reprs_),
ElementsAreArray({
FPHelper<FloatT>::eq(0. * (1.0 - 0.01 * 0.1) + 10.0 * 0.1),
FPHelper<FloatT>::eq(1. * (1.0 - 0.01 * 0.1) + 10.0 * 0.1),
FPHelper<FloatT>::eq(2. * (1.0 - 0.01 * 0.1) + 10.0 * 0.1),
FPHelper<FloatT>::eq(3. * (1.0 - 0.01 * 0.1) + 10.0 * 0.1),
FPHelper<FloatT>::eq(4. * (1.0 - 0.01 * 0.1) + 10.0 * 0.1),
FPHelper<FloatT>::eq(5. * (1.0 - 0.01 * 0.1) + 10.0 * 0.1),
FPHelper<FloatT>::eq(6. * (1.0 - 0.01 * 0.1) + 10.0 * 0.1),
FPHelper<FloatT>::eq(7. * (1.0 - 0.01 * 0.1) + 10.0 * 0.1),
FPHelper<FloatT>::eq(8. * (1.0 - 0.01 * 0.1) + 10.0 * 0.1),
FPHelper<FloatT>::eq(9. * (1.0 - 0.01 * 0.1) + 10.0 * 0.1),
FPHelper<FloatT>::eq(10. * (1.0 - 0.01 * 0.1) + 10.0 * 0.1),
FPHelper<FloatT>::eq(11. * (1.0 - 0.01 * 0.1) + 10.0 * 0.1),
}));
}
TEST_P(ParamsTest, Transform) {
lse::ModelDesc::TransformDesc desc;
Transform<FloatT> transform(
TRANSFORM,
desc,
3 /* word_repr_size */, 5 /* entity_repr_size */,
ParseProto<UpdateMethodConf>("type: SGD"), /* update_method */
DefaultStream::get());
transform.initialize(&rng_);
initialize_transform(&transform);
/*
Transform will be a 3-by-5 matrix:
| 0 1 2 3 4 |
| 5 6 7 8 9 |
| 10 11 12 13 14 |
Bias will be a 3-dimensional vector:
| 0 1 2 3 4 | * 1e-3
*/
device_matrix<FloatT> input_reprs(3, 2, NULL /* stream */);
to_device({0.01, 0.02, 0.03,
0.001, 0.002, 0.003}, &input_reprs);
std::unique_ptr<device_matrix<FloatT>> result(
transform.transform(
DefaultStream::get()->next(),
input_reprs,
nullptr /* batch_normalization */));
/*
The first representation will undergo the following transition:
| 0 + 0.10 + 0.30 | | 0.000 | | 0.400 |
| 0.01 + 0.12 + 0.33 | | 0.001 | | 0.461 |
tanh( | 0.02 + 0.14 + 0.36 | + | 0.002 | ) = tanh( | 0.522 | )
| 0.03 + 0.16 + 0.39 | | 0.003 | | 0.583 |
| 0.04 + 0.18 + 0.42 | | 0.004 | | 0.644 |
For the second representation:
| 0 + 0.010 + 0.030 | | 0.000 | | 0.040 |
| 0.001 + 0.012 + 0.033 | | 0.001 | | 0.047 |
tanh( | 0.002 + 0.014 + 0.036 | + | 0.002 | ) = tanh( | 0.054 | )
| 0.003 + 0.016 + 0.039 | | 0.003 | | 0.061 |
| 0.004 + 0.018 + 0.042 | | 0.004 | | 0.068 |
*/
EXPECT_THAT(
to_host(*result),
ElementsAreArray({FPHelper<FloatT>::eq(tanh(0.400)),
FPHelper<FloatT>::eq(tanh(0.461)),
FPHelper<FloatT>::eq(tanh(0.522)),
FPHelper<FloatT>::eq(tanh(0.583)),
FPHelper<FloatT>::eq(tanh(0.644)),
FPHelper<FloatT>::eq(tanh(0.040)),
FPHelper<FloatT>::eq(tanh(0.047)),
FPHelper<FloatT>::eq(tanh(0.054)),
FPHelper<FloatT>::eq(tanh(0.061)),
FPHelper<FloatT>::eq(tanh(0.068))}));
}
TEST_P(ParamsTest, Transform_backward) {
rng_.seed(10);
lse::ModelDesc model_desc;
model_desc.set_word_repr_size(2);
model_desc.set_entity_repr_size(3);
model_desc.set_bias_negative_samples(true);
DefaultModel lse(5, /* num_words */
3, /* num_objects */
model_desc,
ParseProto<lse::TrainConfig>(
"num_random_entities: 10 "
"regularization_lambda: 0.01 "
"update_method: < type: SGD >") /* update_method */);
lse.initialize(&rng_);
TextEntity::Batch batch(32, /* batch_size */ 2 /* window_size */);
ConstantSource data(
2, /* feature_value */
1 /* label */);
data.next(&batch);
std::unique_ptr<TextEntity::Objective::ForwardResultType> result(
dynamic_cast<TextEntity::Objective::ForwardResultType*>(
lse.compute_cost(batch, &rng_)));
result->get_cost();
std::unique_ptr<TextEntity::Objective::GradientsType> gradients(
dynamic_cast<TextEntity::Objective::GradientsType*>(
lse.compute_gradients(*result)));
// dC / d W
EXPECT_THAT(
to_host(*gradients->grad_transform_matrix_),
ElementsAreArray({
FPHelper<FloatT>::eq(0.19053905536266260712),
FPHelper<FloatT>::eq(-0.40135414958704901389),
FPHelper<FloatT>::eq(-0.1114867197947356503),
FPHelper<FloatT>::eq(0.34321179097285392512),
FPHelper<FloatT>::eq(-0.72294614997420003633),
FPHelper<FloatT>::eq(-0.20081739514038740579)
}));
// dC / d bias
EXPECT_THAT(
to_host(*gradients->grad_bias_),
ElementsAreArray({
FPHelper<FloatT>::eq(-0.59713496418425149326),
FPHelper<FloatT>::eq(1.2578134980395563325),
FPHelper<FloatT>::eq(0.34939093355395389739),
}));
// dC / d x
EXPECT_THAT(
to_host(*gradients->grad_phrase_reprs_),
ElementsAreArray({
FPHelper<FloatT>::eq(0.005498121774796882813),
FPHelper<FloatT>::eq(-0.010496720853248608235),
FPHelper<FloatT>::eq(0.0012117255393835533149),
FPHelper<FloatT>::eq(-0.0093082500327885901031),
FPHelper<FloatT>::eq(-0.016824653822594095448),
FPHelper<FloatT>::eq(-0.010994870322164012819),
FPHelper<FloatT>::eq(-0.018606242663242754387),
FPHelper<FloatT>::eq(-0.023875877464595022387),
FPHelper<FloatT>::eq(-0.027179035134069419455),
FPHelper<FloatT>::eq(-0.021498935823674982654),
FPHelper<FloatT>::eq(-0.0082518613517674373192),
FPHelper<FloatT>::eq(-0.013371811963084052552),
FPHelper<FloatT>::eq(-0.018606242663242754387),
FPHelper<FloatT>::eq(-0.023875877464595022387),
FPHelper<FloatT>::eq(-0.012538257587180764649),
FPHelper<FloatT>::eq(-0.012183341142624032685),
FPHelper<FloatT>::eq(-0.0082518613517674373192),
FPHelper<FloatT>::eq(-0.013371811963084052552),
FPHelper<FloatT>::eq(-0.014319846427829425323),
FPHelper<FloatT>::eq(-0.025064348285055045723),
FPHelper<FloatT>::eq(-0.012538257587180766384),
FPHelper<FloatT>::eq(-0.012183341142624032685),
FPHelper<FloatT>::eq(-0.011647463166856438649),
FPHelper<FloatT>::eq(-0.0057428375714085287684),
FPHelper<FloatT>::eq(0.0012117255393835541823),
FPHelper<FloatT>::eq(-0.0093082500327885883684),
FPHelper<FloatT>::eq(-0.014319846427829425323),
FPHelper<FloatT>::eq(-0.025064348285055045723),
FPHelper<FloatT>::eq(-0.013429052007505097588),
FPHelper<FloatT>::eq(-0.018623844713839536602),
FPHelper<FloatT>::eq(0.015852503086272205085),
FPHelper<FloatT>::eq(7.3446482623583761478e-06),
FPHelper<FloatT>::eq(-0.0091426557720917667887),
FPHelper<FloatT>::eq(-0.019812315534299559938),
FPHelper<FloatT>::eq(0.0012117255393835528812),
FPHelper<FloatT>::eq(-0.0093082500327885883684),
FPHelper<FloatT>::eq(-0.022892638898656086921),
FPHelper<FloatT>::eq(-0.022687406644134999051),
FPHelper<FloatT>::eq(-0.0048562595366784359896),
FPHelper<FloatT>::eq(-0.021000786354759579805),
FPHelper<FloatT>::eq(-0.016824653822594088509),
FPHelper<FloatT>::eq(-0.010994870322164011084),
FPHelper<FloatT>::eq(-0.013429052007505095853),
FPHelper<FloatT>::eq(-0.018623844713839536602),
FPHelper<FloatT>::eq(-0.019497037083567085591),
FPHelper<FloatT>::eq(-0.030316381035810528038),
FPHelper<FloatT>::eq(-0.02289263889865609039),
FPHelper<FloatT>::eq(-0.022687406644134999051),
FPHelper<FloatT>::eq(-0.027179035134069419455),
FPHelper<FloatT>::eq(-0.021498935823674975715),
FPHelper<FloatT>::eq(-0.040929018260633741322),
FPHelper<FloatT>::eq(-0.024374026933510421766),
FPHelper<FloatT>::eq(-0.0082518613517674373192),
FPHelper<FloatT>::eq(-0.013371811963084050817),
FPHelper<FloatT>::eq(-0.012538257587180764649),
FPHelper<FloatT>::eq(-0.01218334114262403442),
FPHelper<FloatT>::eq(-0.018606242663242757857),
FPHelper<FloatT>::eq(-0.023875877464595022387),
FPHelper<FloatT>::eq(0.010675312430534543082),
FPHelper<FloatT>::eq(-0.0052446881024931241849),
FPHelper<FloatT>::eq(-0.022892638898656079982),
FPHelper<FloatT>::eq(-0.022687406644134999051),
FPHelper<FloatT>::eq(-0.036642622025220401849),
FPHelper<FloatT>::eq(-0.025562497753970448572)
}));
}
TEST_P(ParamsTest, Transform_BatchNormalization) {
TextEntity::Objective::ForwardResultType result;
result.batch_normalization_.reset(new BatchNormalization<FloatT>(
5, /* num_features */
0.1, /* momentum */
1e-5, /* epsilon */
true /* cache_input */));
lse::ModelDesc::TransformDesc desc;
Transform<FloatT> transform(
TRANSFORM,
desc,
3 /* word_repr_size */, 5 /* entity_repr_size */,
ParseProto<UpdateMethodConf>("type: SGD"), /* update_method */
DefaultStream::get());
transform.initialize(&rng_);
initialize_transform(&transform);
/*
Transform will be a 3-by-5 matrix:
| 0 1 2 3 4 |
| 5 6 7 8 9 |
| 10 11 12 13 14 |
Bias will be a 3-dimensional vector:
| 0 1 2 3 4 | * 1e-3
*/
device_matrix<FloatT> input_reprs(3, 2, NULL /* stream */);
to_device({0.01, 0.02, 0.03,
0.001, 0.002, 0.003}, &input_reprs);
std::unique_ptr<device_matrix<FloatT>> output(
transform.transform(
DefaultStream::get()->next(),
input_reprs,
result.batch_normalization_.get() /* batch_normalization */));
EXPECT_THAT(
to_host(*output),
ElementsAreArray({FPHelper<FloatT>::eq(0.7615293524851600715),
FPHelper<FloatT>::eq(0.76196488305628828908),
FPHelper<FloatT>::eq(0.76239459573842593976),
FPHelper<FloatT>::eq(0.76282051982955900726),
FPHelper<FloatT>::eq(0.76324378068549525445),
FPHelper<FloatT>::eq(-0.76152935248515996047),
FPHelper<FloatT>::eq(-0.76112478489184165475),
FPHelper<FloatT>::eq(-0.76071446308133705561),
FPHelper<FloatT>::eq(-0.76030038648736808504),
FPHelper<FloatT>::eq(-0.75988366421371911219)}));
SingleGradients<FloatT> gradients(&result);
gradients.grad_transform_matrix_.reset(new device_matrix<FloatT>(
5, 3, DefaultStream::get()->next()));
gradients.grad_bias_.reset(new device_matrix<FloatT>(
5, 1, DefaultStream::get()->next()));
device_matrix<FloatT> grad_output(5, 2, DefaultStream::get()->next());
to_device({0.1, 0.1, 0.1, 0.1, 0.1,
0.2, 0.2, 0.2, 0.2, 0.2}, &grad_output);
transform.backward(
DefaultStream::get()->next(),
result,
input_reprs,
*output,
&grad_output,
&gradients);
EXPECT_THAT(
to_host_isfinite(*gradients.grad_transform_matrix_),
Each(Eq(true)));
EXPECT_THAT(
to_host_isfinite(*gradients.grad_bias_),
Each(Eq(true)));
}
int main(int argc, char* argv[]) {
google::InitGoogleLogging(argv[0]);
testing::InitGoogleTest(&argc, argv);
google::ParseCommandLineFlags(&argc, &argv, true);
return RUN_ALL_TESTS();
} | the_stack |
#pragma once
namespace hornets_nest {
//namespace hornet_alg {
struct InitStreaming {
HostDeviceVar<PrDynamicData> pd;
OPERATOR(vid_t src) {
pd().visited[src] = 0;
pd().visitedDlt[src] = 0;
pd().diffPR[src] = 0.0f;
pd().delta[src] = 0.0f;
*pd().reduction_out = 0;
}
};
//------------------------------------------------------------------------------
struct SetupInsertions {
HostDeviceVar<PrDynamicData> pd;
OPERATOR(Vertex& src_vertex, Vertex& dst_vertex) {
auto src = src_vertex.id();
auto dst = dst_vertex.id();
//atomicAdd(kd().KC + src, kd().alpha);
//atomicAdd(kd().new_paths_prev + src, 1);
//vid_t prev = atomicCAS(kd().active + src, 0, kd().iteration);
//if (prev == 0)
// kd().active_queue.insert(src);
}
};
//------------------------------------------------------------------------------
struct SetupDeletions {
HostDeviceVar<PrDynamicData> pd;
OPERATOR(Vertex& src_vertex, Vertex& dst_vertex) {
//double minus_alpha = -kd().alpha;
auto src = src_vertex.id();
auto dst = dst_vertex.id();
//atomicAdd(kd().KC + src, minus_alpha);
//atomicAdd(kd().new_paths_prev + src, -1);
//vid_t prev = atomicCAS(kd().active + src, 0, kd().iteration);
//if (prev == 0)
// kd().active_queue.insert(src);
}
};
//------------------------------------------------------------------------------
//struct RecomputeInsertionContriUndirected {
struct RecomputeContri {
HostDeviceVar<PrDynamicData> pd;
OPERATOR(Vertex& src_vertex, Vertex& dst_vertex) {
auto src = src_vertex.id();
auto dst = dst_vertex.id();
auto degree_src = src_vertex.degree();
auto degree_dst = dst_vertex.degree();
if (degree_dst == 0) return;
//pd().curr_pr[src] = pd().normalized_damp +
// pd().damp * pd().curr_pr[src];
float update_diff = pd().damp*(pd().prev_pr[dst]/degree_dst);//(pr->usedOld[dst])); //@@old dst
float update_prop = pd().damp*(update_diff/degree_src);
atomicAdd(pd().diffPR+src,update_diff);
atomicAdd(pd().contri+src,update_diff);
if(fabs(update_prop) > pd().epsilon){
if (pd().visited[src] == 0) {
//CAS: old == compare ? val : old
auto temp = pd().visited[src] + 1;
auto old = atomicCAS(pd().visited+src,0,temp);
if (old == 0) {
pd().queue2.insert(src); //pr->
}
}
}else{
atomicAdd(pd().delta+src,update_diff);
if ((pd().visited[src] == 0) && (pd().visitedDlt[src] == 0 )) {
//CAS: old == compare ? val : old
auto temp = pd().visitedDlt[src] + 1;
auto old = atomicCAS(pd().visitedDlt+src,0,temp);
if (old == 0) {
pd().queueDlt.insert(src);
}
}
}
}
};
//------------------------------------------------------------------------------
struct RecomputeDeletionContriUndirected {
//TO DO
};
//------------------------------------------------------------------------------
//struct UpdateDeltaAndMove {
struct UpdateDltMove {
HostDeviceVar<PrDynamicData> pd;
//OPERATOR(Vertex& vertex_src) {
OPERATOR(vid_t vertex_src) {
auto src = vertex_src;
//auto src = vertex_src.id();
if (pd().delta[src] > pd().epsilon)
{
if (pd().visited[src] == 0) {
//CAS: old == compare ? val : old
auto temp = pd().visited[src] + 1;
auto old = atomicCAS(pd().visited+src,0,temp);
if (old == 0) {
//prType delta = pr->delta[src]; //$$pair with recomputeContributionUndirected, updateContributionsUndirected
//atomicAdd(pr->contri+src,delta);
pd().delta[src] = 0.0;
pd().queue2.insert(src);
}
}
}
}
};
//------------------------------------------------------------------------------
//struct UpdateContributionAndCopy {
struct UpdateContriCopy {
HostDeviceVar<PrDynamicData> pd;
#if 0
OPERATOR(Vertex& src) {
atomicAdd(pd().curr_pr + src.id(),
pd().contri[src.id()]);
}
#else
OPERATOR(vid_t src) {
atomicAdd(pd().curr_pr + src, pd().contri[src]);
}
#endif
};
//------------------------------------------------------------------------------
//struct UpdateDeltaAndCopy {
struct UpdateDltCopy {
HostDeviceVar<PrDynamicData> pd;
#if 0
OPERATOR(Vertex& src) {
atomicAdd(pd().curr_pr + src.id(),
pd().contri[src.id()]);
}
#else
OPERATOR(vid_t src) {
atomicAdd(pd().curr_pr + src, pd().contri[src]);
}
#endif
};
//------------------------------------------------------------------------------
//struct UpdateContributionsUndirected {
struct UpdateContri {
HostDeviceVar<PrDynamicData> pd;
OPERATOR(Vertex& vertex, Edge& edge) {
auto src = vertex.id();
auto dst = edge.dst_id();
auto dstVertex = edge.dst();
//auto degree_src = src.degree();
auto degree_src = vertex.degree();
//auto degree_dst = dst.degree();
auto degree_dst = dstVertex.degree(); //????????
if (degree_src == 0) return;
//pd().curr_pr[src] = pd().normalized_damp +
// pd().damp * pd().curr_pr[src];
float update_diff = pd().damp*((pd().prev_pr[src]/degree_src) - (pd().prev_pr[src]/degree_src));//(pr->usedOld[src]));
float update_prop = pd().damp*(update_diff/degree_dst);
//atomicAdd(pd().diffPR+src,update_diff);
atomicAdd(pd().contri+dst,update_diff);
if(fabs(update_prop) > pd().epsilon){
if (pd().visited[dst] == 0) {
//CAS: old == compare ? val : old
auto temp = pd().visited[dst] + 1;
auto old = atomicCAS(pd().visited+dst,0,temp);
if (old == 0) {
//pr->queue2.insert(dst);
pd().queue2.insert(dst);
}
}
}else{
atomicAdd(pd().delta+dst,update_diff);
if ((pd().visited[dst] == 0) && (pd().visitedDlt[dst] == 0 )) {
//CAS: old == compare ? val : old
auto temp = pd().visitedDlt[dst] + 1;
auto old = atomicCAS(pd().visitedDlt+dst,0,temp);
if (old == 0) {
pd().queueDlt.insert(dst);
}
}
}
}
};
//------------------------------------------------------------------------------
//struct RemoveContributionsUndirected {
struct RemoveContri {
HostDeviceVar<PrDynamicData> pd;
#if 0
OPERATOR(Vertex& src) {
float diffPR = pd().diffPR[src];
atomicAdd(pd().curr_pr + src.id(),-diffPR);
}
#else
OPERATOR(vid_t src) {
float diffPR = pd().diffPR[src];
atomicAdd(pd().curr_pr + src, -diffPR);
}
#endif
};
//------------------------------------------------------------------------------
//struct PrevEqualCurr {
struct PrevEqCurr {
HostDeviceVar<PrDynamicData> pd;
#if 0
OPERATOR(Vertex& src) {
pd().prev_pr[src] = pd().curr_pr[src];
}
#else
OPERATOR(vid_t src) {
atomicAdd(pd().prev_pr + src, pd().curr_pr[src]);
}
#endif
};
//------------------------------------------------------------------------------
struct ResetCurr {
HostDeviceVar<PrDynamicData> pd;
OPERATOR(vid_t src) {
pd().curr_pr[src] = 0.0f;
*(pd().reduction_out) = 0;
}
};
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//------------------------------------------------------------------------------
struct ComputeContribuitionPerVertex {
HostDeviceVar<PrDynamicData> pd;
#if 1
OPERATOR(Vertex& vertex_src) {
auto degree = vertex_src.degree();
auto src = vertex_src.id();
pd().contri[src] = degree == 0 ? 0.0f :
pd().prev_pr[src] / degree;
}
#else
OPERATOR(vid_t vertex_src) {
auto degree = vertex_src.degree();
auto src = vertex_src;
atomicAdd(pd().curr_pr + src, pd().contri[src]);
}
#endif
};
//------------------------------------------------------------------------------
struct AddContribuitions {
HostDeviceVar<PrDynamicData> pd;
OPERATOR(Vertex& src, Edge& edge) {
atomicAdd(pd().curr_pr + edge.dst_id(),
pd().contri[src.id()]);
}
};
//------------------------------------------------------------------------------
struct AddContribuitionsUndirected {
HostDeviceVar<PrDynamicData> pd;
OPERATOR(Vertex& src_vertex, Edge& edge) {
atomicAdd(pd().curr_pr+ src_vertex.id(),
pd().contri[edge.dst_id()]);
}
};
//------------------------------------------------------------------------------
struct DampAndDiffAndCopy {
HostDeviceVar<PrDynamicData> pd;
OPERATOR(vid_t src) {
// pd().curr_pr[src]=(1-pd().damp)/float(pd().nV)+
// pd().damp*pd().curr_pr[src];
pd().curr_pr[src] = pd().normalized_damp +
pd().damp * pd().curr_pr[src];
pd().abs_diff[src] = fabsf(pd().curr_pr[src] -
pd().prev_pr[src]);
pd().prev_pr[src] = pd().curr_pr[src];
}
};
//------------------------------------------------------------------------------
struct Sum {
HostDeviceVar<PrDynamicData> pd;
OPERATOR(vid_t src) {
atomicAdd(pd().reduction_out, pd().abs_diff[src]);
}
};
//------------------------------------------------------------------------------
struct SumPr {
HostDeviceVar<PrDynamicData> pd;
OPERATOR(vid_t src) {
atomicAdd(pd().reduction_out, pd().prev_pr[src] );
}
};
//------------------------------------------------------------------------------
struct SetIds {
vid_t* ids;
OPERATOR(vid_t src) {
ids[src] = src;
}
};
//-------------------------------------------------------------------------------
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
} // hornetAlgs namespace | the_stack |
#include "cuda_lyra2v2_sm3.cuh"
#ifdef __INTELLISENSE__
/* just for vstudio code colors */
#define __CUDA_ARCH__ 500
#endif
#define TPB 32
#if __CUDA_ARCH__ >= 500
#include "cuda_lyra2_vectors.h"
#define Nrow 4
#define Ncol 4
#define memshift 3
__device__ uint2x4 *DMatrix;
__device__ __forceinline__ uint2 LD4S(const int index)
{
extern __shared__ uint2 shared_mem[];
return shared_mem[(index * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x];
}
__device__ __forceinline__ void ST4S(const int index, const uint2 data)
{
extern __shared__ uint2 shared_mem[];
shared_mem[(index * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x] = data;
}
__device__ __forceinline__ uint2 shuffle2(uint2 a, uint32_t b, uint32_t c)
{
return make_uint2(__shfl_sync(0xFFFFFFFF,a.x, b, c), __shfl_sync(0xFFFFFFFF,a.y, b, c));
}
__device__ __forceinline__
void Gfunc_v5(uint2 &a, uint2 &b, uint2 &c, uint2 &d)
{
a += b; d ^= a; d = SWAPUINT2(d);
c += d; b ^= c; b = ROR2(b, 24);
a += b; d ^= a; d = ROR2(d, 16);
c += d; b ^= c; b = ROR2(b, 63);
}
__device__ __forceinline__
void round_lyra_v5(uint2x4 s[4])
{
Gfunc_v5(s[0].x, s[1].x, s[2].x, s[3].x);
Gfunc_v5(s[0].y, s[1].y, s[2].y, s[3].y);
Gfunc_v5(s[0].z, s[1].z, s[2].z, s[3].z);
Gfunc_v5(s[0].w, s[1].w, s[2].w, s[3].w);
Gfunc_v5(s[0].x, s[1].y, s[2].z, s[3].w);
Gfunc_v5(s[0].y, s[1].z, s[2].w, s[3].x);
Gfunc_v5(s[0].z, s[1].w, s[2].x, s[3].y);
Gfunc_v5(s[0].w, s[1].x, s[2].y, s[3].z);
}
__device__ __forceinline__
void round_lyra_v5(uint2 s[4])
{
Gfunc_v5(s[0], s[1], s[2], s[3]);
s[1] = shuffle2(s[1], threadIdx.x + 1, 4);
s[2] = shuffle2(s[2], threadIdx.x + 2, 4);
s[3] = shuffle2(s[3], threadIdx.x + 3, 4);
Gfunc_v5(s[0], s[1], s[2], s[3]);
s[1] = shuffle2(s[1], threadIdx.x + 3, 4);
s[2] = shuffle2(s[2], threadIdx.x + 2, 4);
s[3] = shuffle2(s[3], threadIdx.x + 1, 4);
}
__device__ __forceinline__
void reduceDuplexRowSetup2(uint2 state[4])
{
uint2 state1[Ncol][3], state0[Ncol][3], state2[3];
int i, j;
#pragma unroll
for (int i = 0; i < Ncol; i++)
{
#pragma unroll
for (j = 0; j < 3; j++)
state0[Ncol - i - 1][j] = state[j];
round_lyra_v5(state);
}
//#pragma unroll 4
for (i = 0; i < Ncol; i++)
{
#pragma unroll
for (j = 0; j < 3; j++)
state[j] ^= state0[i][j];
round_lyra_v5(state);
#pragma unroll
for (j = 0; j < 3; j++)
state1[Ncol - i - 1][j] = state0[i][j];
#pragma unroll
for (j = 0; j < 3; j++)
state1[Ncol - i - 1][j] ^= state[j];
}
for (i = 0; i < Ncol; i++)
{
const uint32_t s0 = memshift * Ncol * 0 + i * memshift;
const uint32_t s2 = memshift * Ncol * 2 + memshift * (Ncol - 1) - i*memshift;
#pragma unroll
for (j = 0; j < 3; j++)
state[j] ^= state1[i][j] + state0[i][j];
round_lyra_v5(state);
#pragma unroll
for (j = 0; j < 3; j++)
state2[j] = state1[i][j];
#pragma unroll
for (j = 0; j < 3; j++)
state2[j] ^= state[j];
#pragma unroll
for (j = 0; j < 3; j++)
ST4S(s2 + j, state2[j]);
uint2 Data0 = shuffle2(state[0], threadIdx.x - 1, 4);
uint2 Data1 = shuffle2(state[1], threadIdx.x - 1, 4);
uint2 Data2 = shuffle2(state[2], threadIdx.x - 1, 4);
if (threadIdx.x == 0) {
state0[i][0] ^= Data2;
state0[i][1] ^= Data0;
state0[i][2] ^= Data1;
} else {
state0[i][0] ^= Data0;
state0[i][1] ^= Data1;
state0[i][2] ^= Data2;
}
#pragma unroll
for (j = 0; j < 3; j++)
ST4S(s0 + j, state0[i][j]);
#pragma unroll
for (j = 0; j < 3; j++)
state0[i][j] = state2[j];
}
for (i = 0; i < Ncol; i++)
{
const uint32_t s1 = memshift * Ncol * 1 + i*memshift;
const uint32_t s3 = memshift * Ncol * 3 + memshift * (Ncol - 1) - i*memshift;
#pragma unroll
for (j = 0; j < 3; j++)
state[j] ^= state1[i][j] + state0[Ncol - i - 1][j];
round_lyra_v5(state);
#pragma unroll
for (j = 0; j < 3; j++)
state0[Ncol - i - 1][j] ^= state[j];
#pragma unroll
for (j = 0; j < 3; j++)
ST4S(s3 + j, state0[Ncol - i - 1][j]);
uint2 Data0 = shuffle2(state[0], threadIdx.x - 1, 4);
uint2 Data1 = shuffle2(state[1], threadIdx.x - 1, 4);
uint2 Data2 = shuffle2(state[2], threadIdx.x - 1, 4);
if (threadIdx.x == 0) {
state1[i][0] ^= Data2;
state1[i][1] ^= Data0;
state1[i][2] ^= Data1;
} else {
state1[i][0] ^= Data0;
state1[i][1] ^= Data1;
state1[i][2] ^= Data2;
}
#pragma unroll
for (j = 0; j < 3; j++)
ST4S(s1 + j, state1[i][j]);
}
}
__device__
void reduceDuplexRowt2(const int rowIn, const int rowInOut, const int rowOut, uint2 state[4])
{
uint2 state1[3], state2[3];
const uint32_t ps1 = memshift * Ncol * rowIn;
const uint32_t ps2 = memshift * Ncol * rowInOut;
const uint32_t ps3 = memshift * Ncol * rowOut;
for (int i = 0; i < Ncol; i++)
{
const uint32_t s1 = ps1 + i*memshift;
const uint32_t s2 = ps2 + i*memshift;
const uint32_t s3 = ps3 + i*memshift;
#pragma unroll
for (int j = 0; j < 3; j++)
state1[j] = LD4S(s1 + j);
#pragma unroll
for (int j = 0; j < 3; j++)
state2[j] = LD4S(s2 + j);
#pragma unroll
for (int j = 0; j < 3; j++)
state[j] ^= state1[j] + state2[j];
round_lyra_v5(state);
uint2 Data0 = shuffle2(state[0], threadIdx.x - 1, 4);
uint2 Data1 = shuffle2(state[1], threadIdx.x - 1, 4);
uint2 Data2 = shuffle2(state[2], threadIdx.x - 1, 4);
if (threadIdx.x == 0) {
state2[0] ^= Data2;
state2[1] ^= Data0;
state2[2] ^= Data1;
} else {
state2[0] ^= Data0;
state2[1] ^= Data1;
state2[2] ^= Data2;
}
#pragma unroll
for (int j = 0; j < 3; j++)
ST4S(s2 + j, state2[j]);
#pragma unroll
for (int j = 0; j < 3; j++)
ST4S(s3 + j, LD4S(s3 + j) ^ state[j]);
}
}
__device__
void reduceDuplexRowt2x4(const int rowInOut, uint2 state[4])
{
const int rowIn = 2;
const int rowOut = 3;
int i, j;
uint2 last[3];
const uint32_t ps1 = memshift * Ncol * rowIn;
const uint32_t ps2 = memshift * Ncol * rowInOut;
#pragma unroll
for (int j = 0; j < 3; j++)
last[j] = LD4S(ps2 + j);
#pragma unroll
for (int j = 0; j < 3; j++)
state[j] ^= LD4S(ps1 + j) + last[j];
round_lyra_v5(state);
uint2 Data0 = shuffle2(state[0], threadIdx.x - 1, 4);
uint2 Data1 = shuffle2(state[1], threadIdx.x - 1, 4);
uint2 Data2 = shuffle2(state[2], threadIdx.x - 1, 4);
if (threadIdx.x == 0) {
last[0] ^= Data2;
last[1] ^= Data0;
last[2] ^= Data1;
} else {
last[0] ^= Data0;
last[1] ^= Data1;
last[2] ^= Data2;
}
if (rowInOut == rowOut)
{
#pragma unroll
for (j = 0; j < 3; j++)
last[j] ^= state[j];
}
for (i = 1; i < Ncol; i++)
{
const uint32_t s1 = ps1 + i*memshift;
const uint32_t s2 = ps2 + i*memshift;
#pragma unroll
for (j = 0; j < 3; j++)
state[j] ^= LD4S(s1 + j) + LD4S(s2 + j);
round_lyra_v5(state);
}
#pragma unroll
for (int j = 0; j < 3; j++)
state[j] ^= last[j];
}
__global__
__launch_bounds__(TPB, 1)
void lyra2v2_gpu_hash_32_1(uint32_t threads, uint2 *inputHash)
{
const uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x;
const uint2x4 blake2b_IV[2] = {
0xf3bcc908UL, 0x6a09e667UL, 0x84caa73bUL, 0xbb67ae85UL,
0xfe94f82bUL, 0x3c6ef372UL, 0x5f1d36f1UL, 0xa54ff53aUL,
0xade682d1UL, 0x510e527fUL, 0x2b3e6c1fUL, 0x9b05688cUL,
0xfb41bd6bUL, 0x1f83d9abUL, 0x137e2179UL, 0x5be0cd19UL
};
const uint2x4 Mask[2] = {
0x00000020UL, 0x00000000UL, 0x00000020UL, 0x00000000UL,
0x00000020UL, 0x00000000UL, 0x00000001UL, 0x00000000UL,
0x00000004UL, 0x00000000UL, 0x00000004UL, 0x00000000UL,
0x00000080UL, 0x00000000UL, 0x00000000UL, 0x01000000UL
};
uint2x4 state[4];
if (thread < threads)
{
state[0].x = state[1].x = __ldg(&inputHash[thread + threads * 0]);
state[0].y = state[1].y = __ldg(&inputHash[thread + threads * 1]);
state[0].z = state[1].z = __ldg(&inputHash[thread + threads * 2]);
state[0].w = state[1].w = __ldg(&inputHash[thread + threads * 3]);
state[2] = blake2b_IV[0];
state[3] = blake2b_IV[1];
for (int i = 0; i<12; i++)
round_lyra_v5(state);
state[0] ^= Mask[0];
state[1] ^= Mask[1];
for (int i = 0; i<12; i++)
round_lyra_v5(state);
DMatrix[blockDim.x * gridDim.x * 0 + thread] = state[0];
DMatrix[blockDim.x * gridDim.x * 1 + thread] = state[1];
DMatrix[blockDim.x * gridDim.x * 2 + thread] = state[2];
DMatrix[blockDim.x * gridDim.x * 3 + thread] = state[3];
}
}
__global__
__launch_bounds__(TPB, 1)
void lyra2v2_gpu_hash_32_2(uint32_t threads)
{
const uint32_t thread = blockDim.y * blockIdx.x + threadIdx.y;
if (thread < threads)
{
uint2 state[4];
state[0] = ((uint2*)DMatrix)[(0 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x];
state[1] = ((uint2*)DMatrix)[(1 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x];
state[2] = ((uint2*)DMatrix)[(2 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x];
state[3] = ((uint2*)DMatrix)[(3 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x];
reduceDuplexRowSetup2(state);
uint32_t rowa;
int prev = 3;
for (int i = 0; i < 3; i++)
{
rowa = __shfl_sync(0xFFFFFFFF,state[0].x, 0, 4) & 3;
reduceDuplexRowt2(prev, rowa, i, state);
prev = i;
}
rowa = __shfl_sync(0xFFFFFFFF,state[0].x, 0, 4) & 3;
reduceDuplexRowt2x4(rowa, state);
((uint2*)DMatrix)[(0 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x] = state[0];
((uint2*)DMatrix)[(1 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x] = state[1];
((uint2*)DMatrix)[(2 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x] = state[2];
((uint2*)DMatrix)[(3 * gridDim.x * blockDim.y + thread) * blockDim.x + threadIdx.x] = state[3];
}
}
__global__
__launch_bounds__(TPB, 1)
void lyra2v2_gpu_hash_32_3(uint32_t threads, uint2 *outputHash)
{
const uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x;
uint2x4 state[4];
if (thread < threads)
{
state[0] = __ldg4(&DMatrix[blockDim.x * gridDim.x * 0 + thread]);
state[1] = __ldg4(&DMatrix[blockDim.x * gridDim.x * 1 + thread]);
state[2] = __ldg4(&DMatrix[blockDim.x * gridDim.x * 2 + thread]);
state[3] = __ldg4(&DMatrix[blockDim.x * gridDim.x * 3 + thread]);
for (int i = 0; i < 12; i++)
round_lyra_v5(state);
outputHash[thread + threads * 0] = state[0].x;
outputHash[thread + threads * 1] = state[0].y;
outputHash[thread + threads * 2] = state[0].z;
outputHash[thread + threads * 3] = state[0].w;
}
}
#else
#include "cuda_helper.h"
#if __CUDA_ARCH__ < 200
__device__ void* DMatrix;
#endif
__global__ void lyra2v2_gpu_hash_32_1(uint32_t threads, uint2 *inputHash) {}
__global__ void lyra2v2_gpu_hash_32_2(uint32_t threads) {}
__global__ void lyra2v2_gpu_hash_32_3(uint32_t threads, uint2 *outputHash) {}
#endif
__host__
void lyra2v2_cpu_init(int thr_id, uint32_t threads, uint64_t *d_matrix)
{
cuda_get_arch(thr_id);
// just assign the device pointer allocated in main loop
cudaMemcpyToSymbol(DMatrix, &d_matrix, sizeof(uint64_t*), 0, cudaMemcpyHostToDevice);
}
__host__
void lyra2v2_cpu_hash_32(int thr_id, uint32_t threads, uint32_t startNounce, uint64_t *g_hash, int order)
{
int dev_id = device_map[thr_id % MAX_GPUS];
if (device_sm[dev_id] >= 500) {
const uint32_t tpb = TPB;
dim3 grid2((threads + tpb - 1) / tpb);
dim3 block2(tpb);
dim3 grid4((threads * 4 + tpb - 1) / tpb);
dim3 block4(4, tpb / 4);
lyra2v2_gpu_hash_32_1 <<< grid2, block2 >>> (threads, (uint2*)g_hash);
lyra2v2_gpu_hash_32_2 <<< grid4, block4, 48 * sizeof(uint2) * tpb >>> (threads);
lyra2v2_gpu_hash_32_3 <<< grid2, block2 >>> (threads, (uint2*)g_hash);
} else {
uint32_t tpb = 16;
if (cuda_arch[dev_id] >= 350) tpb = TPB35;
else if (cuda_arch[dev_id] >= 300) tpb = TPB30;
else if (cuda_arch[dev_id] >= 200) tpb = TPB20;
dim3 grid((threads + tpb - 1) / tpb);
dim3 block(tpb);
lyra2v2_gpu_hash_32_v3 <<< grid, block >>> (threads, startNounce, (uint2*)g_hash);
}
} | the_stack |
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/strings_column_factories.cuh>
#include <cudf/strings/split/split.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/scan.h>
#include <thrust/transform.h>
namespace cudf {
namespace strings {
namespace detail {
using string_index_pair = thrust::pair<const char*, size_type>;
namespace {
enum class Dir { FORWARD, BACKWARD };
/**
* @brief Compute the number of tokens for the `idx'th` string element of `d_strings`.
*
* The number of tokens is the same regardless if counting from the beginning
* or the end of the string.
*/
struct token_counter_fn {
column_device_view const d_strings; // strings to split
string_view const d_delimiter; // delimiter for split
size_type const max_tokens = std::numeric_limits<size_type>::max();
__device__ size_type operator()(size_type idx) const
{
if (d_strings.is_null(idx)) { return 0; }
auto const d_str = d_strings.element<string_view>(idx);
size_type token_count = 0;
size_type start_pos = 0;
while (token_count < max_tokens - 1) {
auto const delimiter_pos = d_str.find(d_delimiter, start_pos);
if (delimiter_pos < 0) break;
token_count++;
start_pos = delimiter_pos + d_delimiter.length();
}
return token_count + 1; // always at least one token
}
};
/**
* @brief Identify the tokens from the `idx'th` string element of `d_strings`.
*/
template <Dir dir>
struct token_reader_fn {
column_device_view const d_strings; // strings to split
string_view const d_delimiter; // delimiter for split
int32_t* d_token_offsets{}; // for locating tokens in d_tokens
string_index_pair* d_tokens{};
__device__ string_index_pair resolve_token(string_view const& d_str,
size_type start_pos,
size_type end_pos,
size_type delimiter_pos) const
{
if (dir == Dir::FORWARD) {
auto const byte_offset = d_str.byte_offset(start_pos);
return string_index_pair{d_str.data() + byte_offset,
d_str.byte_offset(delimiter_pos) - byte_offset};
} else {
auto const byte_offset = d_str.byte_offset(delimiter_pos + d_delimiter.length());
return string_index_pair{d_str.data() + byte_offset,
d_str.byte_offset(end_pos) - byte_offset};
}
}
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) { return; }
auto const token_offset = d_token_offsets[idx];
auto const token_count = d_token_offsets[idx + 1] - token_offset;
auto d_result = d_tokens + token_offset;
auto const d_str = d_strings.element<string_view>(idx);
if (d_str.empty()) {
// Pandas str.split("") for non-whitespace delimiter is an empty string
*d_result = string_index_pair{"", 0};
return;
}
size_type token_idx = 0;
size_type start_pos = 0; // updates only if moving forward
size_type end_pos = d_str.length(); // updates only if moving backward
while (token_idx < token_count - 1) {
auto const delimiter_pos = dir == Dir::FORWARD ? d_str.find(d_delimiter, start_pos)
: d_str.rfind(d_delimiter, start_pos, end_pos);
if (delimiter_pos < 0) break;
auto const token = resolve_token(d_str, start_pos, end_pos, delimiter_pos);
if (dir == Dir::FORWARD) {
d_result[token_idx] = token;
start_pos = delimiter_pos + d_delimiter.length();
} else {
d_result[token_count - 1 - token_idx] = token;
end_pos = delimiter_pos;
}
token_idx++;
}
// set last token to remainder of the string
if (dir == Dir::FORWARD) {
auto const offset_bytes = d_str.byte_offset(start_pos);
d_result[token_idx] =
string_index_pair{d_str.data() + offset_bytes, d_str.byte_offset(end_pos) - offset_bytes};
} else {
d_result[0] = string_index_pair{d_str.data(), d_str.byte_offset(end_pos)};
}
}
};
/**
* @brief Compute the number of tokens for the `idx'th` string element of `d_strings`.
*/
struct whitespace_token_counter_fn {
column_device_view const d_strings; // strings to split
size_type const max_tokens = std::numeric_limits<size_type>::max();
__device__ size_type operator()(size_type idx) const
{
if (d_strings.is_null(idx)) { return 0; }
auto const d_str = d_strings.element<string_view>(idx);
size_type token_count = 0;
auto spaces = true;
auto reached_max_tokens = false;
for (auto ch : d_str) {
if (spaces != (ch <= ' ')) {
if (!spaces) {
if (token_count < max_tokens - 1) {
token_count++;
} else {
reached_max_tokens = true;
break;
}
}
spaces = !spaces;
}
}
// pandas.Series.str.split("") returns 0 tokens.
if (reached_max_tokens || !spaces) token_count++;
return token_count;
}
};
/**
* @brief Identify the tokens from the `idx'th` string element of `d_strings`.
*/
template <Dir dir>
struct whitespace_token_reader_fn {
column_device_view const d_strings; // strings to split
size_type const max_tokens{};
int32_t* d_token_offsets{};
string_index_pair* d_tokens{};
__device__ void operator()(size_type idx)
{
auto const token_offset = d_token_offsets[idx];
auto const token_count = d_token_offsets[idx + 1] - token_offset;
if (token_count == 0) { return; }
auto d_result = d_tokens + token_offset;
auto const d_str = d_strings.element<string_view>(idx);
whitespace_string_tokenizer tokenizer(d_str, dir != Dir::FORWARD);
size_type token_idx = 0;
position_pair token{0, 0};
if (dir == Dir::FORWARD) {
while (tokenizer.next_token() && (token_idx < token_count)) {
token = tokenizer.get_token();
d_result[token_idx++] =
string_index_pair{d_str.data() + token.first, token.second - token.first};
}
--token_idx;
token.second = d_str.size_bytes() - token.first;
} else {
while (tokenizer.prev_token() && (token_idx < token_count)) {
token = tokenizer.get_token();
d_result[token_count - 1 - token_idx] =
string_index_pair{d_str.data() + token.first, token.second - token.first};
++token_idx;
}
token_idx = token_count - token_idx; // token_count - 1 - (token_idx-1)
token.first = 0;
}
// reset last token only if we hit the max
if (token_count == max_tokens)
d_result[token_idx] = string_index_pair{d_str.data() + token.first, token.second};
}
};
} // namespace
// The output is one list item per string
template <typename TokenCounter, typename TokenReader>
std::unique_ptr<column> split_record_fn(strings_column_view const& strings,
TokenCounter counter,
TokenReader reader,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// create offsets column by counting the number of tokens per string
auto strings_count = strings.size();
auto offsets = make_numeric_column(
data_type{type_id::INT32}, strings_count + 1, mask_state::UNALLOCATED, stream, mr);
auto d_offsets = offsets->mutable_view().data<int32_t>();
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_offsets,
counter);
thrust::exclusive_scan(
rmm::exec_policy(stream), d_offsets, d_offsets + strings_count + 1, d_offsets);
// last entry is the total number of tokens to be generated
auto total_tokens = cudf::detail::get_value<int32_t>(offsets->view(), strings_count, stream);
// split each string into an array of index-pair values
rmm::device_uvector<string_index_pair> tokens(total_tokens, stream);
reader.d_token_offsets = d_offsets;
reader.d_tokens = tokens.data();
thrust::for_each_n(
rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), strings_count, reader);
// convert the index-pairs into one big strings column
auto strings_output = make_strings_column(tokens.begin(), tokens.end(), stream, mr);
// create a lists column using the offsets and the strings columns
return make_lists_column(strings_count,
std::move(offsets),
std::move(strings_output),
strings.null_count(),
copy_bitmask(strings.parent(), stream, mr));
}
template <Dir dir>
std::unique_ptr<column> split_record(
strings_column_view const& strings,
string_scalar const& delimiter = string_scalar(""),
size_type maxsplit = -1,
rmm::cuda_stream_view stream = rmm::cuda_stream_default,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
// makes consistent with Pandas
size_type max_tokens = maxsplit > 0 ? maxsplit + 1 : std::numeric_limits<size_type>::max();
auto d_strings_column_ptr = column_device_view::create(strings.parent(), stream);
if (delimiter.size() == 0) {
return split_record_fn(strings,
whitespace_token_counter_fn{*d_strings_column_ptr, max_tokens},
whitespace_token_reader_fn<dir>{*d_strings_column_ptr, max_tokens},
stream,
mr);
} else {
string_view d_delimiter(delimiter.data(), delimiter.size());
return split_record_fn(strings,
token_counter_fn{*d_strings_column_ptr, d_delimiter, max_tokens},
token_reader_fn<dir>{*d_strings_column_ptr, d_delimiter},
stream,
mr);
}
}
} // namespace detail
// external APIs
std::unique_ptr<column> split_record(strings_column_view const& strings,
string_scalar const& delimiter,
size_type maxsplit,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::split_record<detail::Dir::FORWARD>(
strings, delimiter, maxsplit, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> rsplit_record(strings_column_view const& strings,
string_scalar const& delimiter,
size_type maxsplit,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::split_record<detail::Dir::BACKWARD>(
strings, delimiter, maxsplit, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf | the_stack |
#include <doctest.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cudaflow.hpp>
// ----------------------------------------------------------------------------
// kernel helper
// ----------------------------------------------------------------------------
template <typename T>
__global__ void k_set(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] = value;
}
}
template <typename T>
__global__ void k_single_set(T* ptr, int i, T value) {
ptr[i] = value;
}
template <typename T>
__global__ void k_add(T* ptr, size_t N, T value) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
ptr[i] += value;
}
}
template <typename T>
__global__ void k_single_add(T* ptr, int i, T value) {
ptr[i] += value;
}
// --------------------------------------------------------
// Testcase: Empty
// --------------------------------------------------------
template <typename T>
void empty() {
std::atomic<int> counter{0};
tf::Taskflow taskflow;
tf::Executor executor;
taskflow.emplace([&](T&){
++counter;
});
taskflow.emplace([&](T&){
++counter;
});
taskflow.emplace([&](T&){
++counter;
});
executor.run_n(taskflow, 100).wait();
REQUIRE(counter == 300);
}
TEST_CASE("Empty" * doctest::timeout(300)) {
empty<tf::cudaFlow>();
}
TEST_CASE("EmptyCapture" * doctest::timeout(300)) {
empty<tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Standalone
// --------------------------------------------------------
template <typename T>
void standalone() {
T cf;
REQUIRE(cf.empty());
unsigned N = 1024;
auto cpu = static_cast<int*>(std::calloc(N, sizeof(int)));
auto gpu = tf::cuda_malloc_device<int>(N);
dim3 g = {(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, N);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, N, 17);
auto d2h = cf.copy(cpu, gpu, N);
h2d.precede(kernel);
kernel.precede(d2h);
for(unsigned i=0; i<N; ++i) {
REQUIRE(cpu[i] == 0);
}
cf.offload();
for(unsigned i=0; i<N; ++i) {
REQUIRE(cpu[i] == 17);
}
cf.offload_n(9);
for(unsigned i=0; i<N; ++i) {
REQUIRE(cpu[i] == 170);
}
std::free(cpu);
tf::cuda_free(gpu);
}
TEST_CASE("Standalone.cudaFlow") {
standalone<tf::cudaFlow>();
}
TEST_CASE("Standalone.cudaCapturer") {
standalone<tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: Set
// --------------------------------------------------------
template <typename T>
void set() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_set<T>, gpu, n, (T)17);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
}
TEST_CASE("Set.i8" * doctest::timeout(300)) {
set<int8_t>();
}
TEST_CASE("Set.i16" * doctest::timeout(300)) {
set<int16_t>();
}
TEST_CASE("Set.i32" * doctest::timeout(300)) {
set<int32_t>();
}
// --------------------------------------------------------
// Testcase: Add
// --------------------------------------------------------
template <typename T>
void add() {
for(unsigned n=1; n<=123456; n = n*2 + 1) {
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto ad1 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 1);
auto ad2 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 2);
auto ad3 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 3);
auto ad4 = cf.kernel(g, b, 0, k_add<T>, gpu, n, 4);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(ad1);
ad1.precede(ad2);
ad2.precede(ad3);
ad3.precede(ad4);
ad4.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 10);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
}
TEST_CASE("Add.i8" * doctest::timeout(300)) {
add<int8_t>();
}
TEST_CASE("Add.i16" * doctest::timeout(300)) {
add<int16_t>();
}
TEST_CASE("Add.i32" * doctest::timeout(300)) {
add<int32_t>();
}
// TODO: 64-bit fail?
//TEST_CASE("Add.i64" * doctest::timeout(300)) {
// add<int64_t>();
//}
// --------------------------------------------------------
// Testcase: Binary Set
// --------------------------------------------------------
template <typename T, typename F>
void bset() {
const unsigned n = 10000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](F& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
std::vector<tf::cudaTask> tasks(n+1);
for(unsigned i=1; i<=n; ++i) {
tasks[i] = cf.kernel(g, b, 0, k_single_set<T>, gpu, i-1, (T)17);
auto p = i/2;
if(p != 0) {
tasks[p].precede(tasks[i]);
}
tasks[i].precede(d2h);
h2d.precede(tasks[i]);
}
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)17);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("BSet.i8" * doctest::timeout(300)) {
bset<int8_t, tf::cudaFlow>();
}
TEST_CASE("BSet.i16" * doctest::timeout(300)) {
bset<int16_t, tf::cudaFlow>();
}
TEST_CASE("BSet.i32" * doctest::timeout(300)) {
bset<int32_t, tf::cudaFlow>();
}
TEST_CASE("CapturedBSet.i8" * doctest::timeout(300)) {
bset<int8_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedBSet.i16" * doctest::timeout(300)) {
bset<int16_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedBSet.i32" * doctest::timeout(300)) {
bset<int32_t, tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: Memset
// --------------------------------------------------------
template <typename F>
void memset() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 100;
int* cpu = new int [N];
int* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(int)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = 999;
}
taskflow.emplace([&](F& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<int>, gpu, N, 123);
auto copy = cf.copy(cpu, gpu, N);
auto zero = cf.memset(gpu+start, 0x3f, (N-start)*sizeof(int));
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(cpu[i] == 123);
}
for(int i=start; i<N; ++i) {
REQUIRE(cpu[i] == 0x3f3f3f3f);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Memset" * doctest::timeout(300)) {
memset<tf::cudaFlow>();
}
TEST_CASE("CapturedMemset" * doctest::timeout(300)) {
memset<tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: Memset0
// --------------------------------------------------------
template <typename T, typename F>
void memset0() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 97;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(T)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](F& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.memset(gpu+start, (T)0, (N-start)*sizeof(T));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Memset0.i8") {
memset0<int8_t, tf::cudaFlow>();
}
TEST_CASE("Memset0.i16") {
memset0<int16_t, tf::cudaFlow>();
}
TEST_CASE("Memset0.i32") {
memset0<int32_t, tf::cudaFlow>();
}
TEST_CASE("Memset0.f32") {
memset0<float, tf::cudaFlow>();
}
TEST_CASE("Memset0.f64") {
memset0<double, tf::cudaFlow>();
}
TEST_CASE("CapturedMemset0.i8") {
memset0<int8_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemset0.i16") {
memset0<int16_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemset0.i32") {
memset0<int32_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemset0.f32") {
memset0<float, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemset0.f64") {
memset0<double, tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: Memcpy
// --------------------------------------------------------
template <typename T, typename F>
void memcpy() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 97;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(T)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](F& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.memset(gpu+start, (T)0, (N-start)*sizeof(T));
auto copy = cf.memcpy(cpu, gpu, N*sizeof(T));
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Memcpy.i8") {
memcpy<int8_t, tf::cudaFlow>();
}
TEST_CASE("Memcpy.i16") {
memcpy<int16_t, tf::cudaFlow>();
}
TEST_CASE("Memcpy.i32") {
memcpy<int32_t, tf::cudaFlow>();
}
TEST_CASE("Memcpy.f32") {
memcpy<float, tf::cudaFlow>();
}
TEST_CASE("Memcpy.f64") {
memcpy<double, tf::cudaFlow>();
}
TEST_CASE("CapturedMemcpy.i8") {
memcpy<int8_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemcpy.i16") {
memcpy<int16_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemcpy.i32") {
memcpy<int32_t, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemcpy.f32") {
memcpy<float, tf::cudaFlowCapturer>();
}
TEST_CASE("CapturedMemcpy.f64") {
memcpy<double, tf::cudaFlowCapturer>();
}
// --------------------------------------------------------
// Testcase: fill
// --------------------------------------------------------
template <typename T>
void fill(T value) {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 107;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(T)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto fill = cf.fill(gpu+start, value, (N-start));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(fill);
fill.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(std::fabs(cpu[i] - value) < 1e-4);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Fill.i8") {
fill<int8_t>(+123);
fill<int8_t>(-123);
}
TEST_CASE("Fill.i16") {
fill<int16_t>(+12345);
fill<int16_t>(-12345);
}
TEST_CASE("Fill.i32") {
fill<int32_t>(+123456789);
fill<int32_t>(-123456789);
}
TEST_CASE("Fill.f32") {
fill<float>(+123456789.0f);
fill<float>(-123456789.0f);
}
// --------------------------------------------------------
// Testcase: Zero
// --------------------------------------------------------
template <typename T>
void zero() {
tf::Taskflow taskflow;
tf::Executor executor;
const int N = 100;
T* cpu = new T [N];
T* gpu = nullptr;
REQUIRE(cudaMalloc(&gpu, N*sizeof(T)) == cudaSuccess);
for(int r=1; r<=100; ++r) {
int start = ::rand() % N;
for(int i=0; i<N; ++i) {
cpu[i] = (T)999;
}
taskflow.emplace([&](tf::cudaFlow& cf){
dim3 g = {(unsigned)(N+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kset = cf.kernel(g, b, 0, k_set<T>, gpu, N, (T)123);
auto zero = cf.zero(gpu+start, (N-start));
auto copy = cf.copy(cpu, gpu, N);
kset.precede(zero);
zero.precede(copy);
});
executor.run(taskflow).wait();
for(int i=0; i<start; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)123) < 1e-4);
}
for(int i=start; i<N; ++i) {
REQUIRE(std::fabs(cpu[i] - (T)0) < 1e-4);
}
}
delete [] cpu;
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Zero.i8") {
zero<int8_t>();
}
TEST_CASE("Zero.i16") {
zero<int16_t>();
}
TEST_CASE("Zero.i32") {
zero<int32_t>();
}
TEST_CASE("Zero.f32") {
zero<float>();
}
// --------------------------------------------------------
// Testcase: Barrier
// --------------------------------------------------------
template <typename T>
void barrier() {
const unsigned n = 1000;
tf::Taskflow taskflow;
tf::Executor executor;
T* cpu = nullptr;
T* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<T*>(std::calloc(n, sizeof(T)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {1, 1, 1};
dim3 b = {1, 1, 1};
auto br1 = cf.noop();
auto br2 = cf.noop();
auto br3 = cf.noop();
auto h2d = cf.copy(gpu, cpu, n);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(br1);
for(unsigned i=0; i<n; ++i) {
auto k1 = cf.kernel(g, b, 0, k_single_set<T>, gpu, i, (T)17);
k1.succeed(br1)
.precede(br2);
auto k2 = cf.kernel(g, b, 0, k_single_add<T>, gpu, i, (T)3);
k2.succeed(br2)
.precede(br3);
}
br3.precede(d2h);
});
cputask.precede(gputask);
executor.run(taskflow).wait();
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == (T)20);
}
std::free(cpu);
REQUIRE(cudaFree(gpu) == cudaSuccess);
}
TEST_CASE("Barrier.i8" * doctest::timeout(300)) {
barrier<int8_t>();
}
TEST_CASE("Barrier.i16" * doctest::timeout(300)) {
barrier<int16_t>();
}
TEST_CASE("Barrier.i32" * doctest::timeout(300)) {
barrier<int32_t>();
}
// ----------------------------------------------------------------------------
// NestedRuns
// ----------------------------------------------------------------------------
template <typename F>
void nested_runs() {
int* cpu = nullptr;
int* gpu = nullptr;
constexpr unsigned n = 1000;
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
struct A {
tf::Executor executor;
tf::Taskflow taskflow;
void run(int* cpu, int* gpu, unsigned n) {
taskflow.clear();
auto A1 = taskflow.emplace([&](F& cf) {
cf.copy(gpu, cpu, n);
});
auto A2 = taskflow.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
});
auto A3 = taskflow.emplace([&] (F& cf) {
cf.copy(cpu, gpu, n);
});
A1.precede(A2);
A2.precede(A3);
executor.run_n(taskflow, 10).wait();
}
};
struct B {
tf::Taskflow taskflow;
tf::Executor executor;
A a;
void run(int* cpu, int* gpu, unsigned n) {
taskflow.clear();
auto B0 = taskflow.emplace([] () {});
auto B1 = taskflow.emplace([&] (F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto B2 = taskflow.emplace([&] () { a.run(cpu, gpu, n); });
auto B3 = taskflow.emplace([&] (F&) {
for(unsigned i=0; i<n; ++i) {
cpu[i]++;
}
});
B0.precede(B1);
B1.precede(B2);
B2.precede(B3);
executor.run_n(taskflow, 100).wait();
}
};
B b;
b.run(cpu, gpu, n);
for(unsigned i=0; i<n; i++) {
REQUIRE(cpu[i] == 1200);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
}
TEST_CASE("NestedRuns" * doctest::timeout(300)) {
nested_runs<tf::cudaFlow>();
}
TEST_CASE("CapturedNestedRuns" * doctest::timeout(300)) {
nested_runs<tf::cudaFlowCapturer>();
}
/*
// ----------------------------------------------------------------------------
// WorkerID
// ----------------------------------------------------------------------------
void worker_id(unsigned N, unsigned M) {
tf::Taskflow taskflow;
tf::Executor executor(N + M);
REQUIRE(executor.num_workers() == (N + M));
const unsigned s = 100;
for(unsigned k=0; k<s; ++k) {
auto cputask = taskflow.emplace([&](){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N+M);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow&) {
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N+M);
});
auto chktask = taskflow.emplace([&] () {
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N+M);
});
taskflow.emplace([&](tf::cudaFlow&) {
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N+M);
});
taskflow.emplace([&]() {
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N+M);
});
auto subflow = taskflow.emplace([&](tf::Subflow& sf){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N+M);
auto t1 = sf.emplace([&](){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N+M);
});
auto t2 = sf.emplace([&](tf::cudaFlow&){
auto id = executor.this_worker_id();
REQUIRE(id >= 0);
REQUIRE(id < N+M);
});
t1.precede(t2);
});
cputask.precede(gputask);
gputask.precede(chktask);
chktask.precede(subflow);
}
executor.run_n(taskflow, 10).wait();
}
TEST_CASE("WorkerID.1C1G") {
worker_id(1, 1);
}
TEST_CASE("WorkerID.1C2G") {
worker_id(1, 2);
}
TEST_CASE("WorkerID.1C3G") {
worker_id(1, 3);
}
TEST_CASE("WorkerID.1C4G") {
worker_id(1, 4);
}
TEST_CASE("WorkerID.2C1G") {
worker_id(2, 1);
}
TEST_CASE("WorkerID.2C2G") {
worker_id(2, 2);
}
TEST_CASE("WorkerID.2C3G") {
worker_id(2, 3);
}
TEST_CASE("WorkerID.2C4G") {
worker_id(2, 4);
}
TEST_CASE("WorkerID.3C1G") {
worker_id(3, 1);
}
TEST_CASE("WorkerID.3C2G") {
worker_id(3, 2);
}
TEST_CASE("WorkerID.3C3G") {
worker_id(3, 3);
}
TEST_CASE("WorkerID.3C4G") {
worker_id(3, 4);
}
TEST_CASE("WorkerID.4C1G") {
worker_id(4, 1);
}
TEST_CASE("WorkerID.4C2G") {
worker_id(4, 2);
}
TEST_CASE("WorkerID.4C3G") {
worker_id(4, 3);
}
TEST_CASE("WorkerID.4C4G") {
worker_id(4, 4);
} */
// ----------------------------------------------------------------------------
// Multiruns
// ----------------------------------------------------------------------------
void multiruns(unsigned N, unsigned M) {
tf::Taskflow taskflow;
tf::Executor executor(N + M);
const unsigned n = 1000;
const unsigned s = 100;
int *cpu[s] = {0};
int *gpu[s] = {0};
for(unsigned k=0; k<s; ++k) {
int number = ::rand()%100;
auto cputask = taskflow.emplace([&, k](){
cpu[k] = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu[k], n*sizeof(int)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&, k, number](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu[k], cpu[k], n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu[k], n, number);
auto d2h = cf.copy(cpu[k], gpu[k], n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto chktask = taskflow.emplace([&, k, number] () {
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[k][i] == number);
}
});
cputask.precede(gputask);
gputask.precede(chktask);
}
executor.run(taskflow).wait();
}
TEST_CASE("Multiruns.1C1G") {
multiruns(1, 1);
}
TEST_CASE("Multiruns.1C2G") {
multiruns(1, 2);
}
TEST_CASE("Multiruns.1C3G") {
multiruns(1, 3);
}
TEST_CASE("Multiruns.1C4G") {
multiruns(1, 4);
}
TEST_CASE("Multiruns.2C1G") {
multiruns(2, 1);
}
TEST_CASE("Multiruns.2C2G") {
multiruns(2, 2);
}
TEST_CASE("Multiruns.2C3G") {
multiruns(2, 3);
}
TEST_CASE("Multiruns.2C4G") {
multiruns(2, 4);
}
TEST_CASE("Multiruns.3C1G") {
multiruns(3, 1);
}
TEST_CASE("Multiruns.3C2G") {
multiruns(3, 2);
}
TEST_CASE("Multiruns.3C3G") {
multiruns(3, 3);
}
TEST_CASE("Multiruns.3C4G") {
multiruns(3, 4);
}
TEST_CASE("Multiruns.4C1G") {
multiruns(4, 1);
}
TEST_CASE("Multiruns.4C2G") {
multiruns(4, 2);
}
TEST_CASE("Multiruns.4C3G") {
multiruns(4, 3);
}
TEST_CASE("Multiruns.4C4G") {
multiruns(4, 4);
}
// ----------------------------------------------------------------------------
// Subflow
// ----------------------------------------------------------------------------
template <typename F>
void subflow() {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto gputask = sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
partask.precede(chktask);
executor.run(taskflow).wait();
}
TEST_CASE("Subflow" * doctest::timeout(300)) {
subflow<tf::cudaFlow>();
}
TEST_CASE("CapturedSubflow" * doctest::timeout(300)) {
subflow<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// NestedSubflow
// ----------------------------------------------------------------------------
template <typename F>
void nested_subflow() {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto partask = taskflow.emplace([&](tf::Subflow& sf){
auto gputask1 = sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask1 = sf.emplace([&](tf::Subflow& sf) {
auto gputask2 = sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto subtask2 = sf.emplace([&](tf::Subflow& sf){
sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
});
gputask2.precede(subtask2);
});
gputask1.precede(subtask1);
});
auto chktask = taskflow.emplace([&](){
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 3);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
partask.precede(chktask)
.succeed(cputask);
executor.run(taskflow).wait();
}
TEST_CASE("NestedSubflow" * doctest::timeout(300) ) {
nested_subflow<tf::cudaFlow>();
}
TEST_CASE("CapturedNestedSubflow" * doctest::timeout(300) ) {
nested_subflow<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// DetachedSubflow
// ----------------------------------------------------------------------------
template <typename F>
void detached_subflow() {
tf::Taskflow taskflow;
tf::Executor executor;
int* cpu = nullptr;
int* gpu = nullptr;
const unsigned n = 1000;
taskflow.emplace([&](tf::Subflow& sf){
auto cputask = sf.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto gputask = sf.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
cputask.precede(gputask);
sf.detach();
});
executor.run(taskflow).wait();
for(unsigned i=0; i<n ;++i){
REQUIRE(cpu[i] == 1);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
}
TEST_CASE("DetachedSubflow" * doctest::timeout(300)) {
detached_subflow<tf::cudaFlow>();
}
TEST_CASE("CapturedDetachedSubflow" * doctest::timeout(300)) {
detached_subflow<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// Conditional GPU tasking
// ----------------------------------------------------------------------------
template <typename F>
void loop() {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](F& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto h2d = cf.copy(gpu, cpu, n);
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto d2h = cf.copy(cpu, gpu, n);
h2d.precede(kernel);
kernel.precede(d2h);
});
auto condition = taskflow.emplace([&cpu, round=0] () mutable {
++round;
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == round);
}
return round >= 100;
});
auto freetask = taskflow.emplace([&](){
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(condition);
condition.precede(gputask, freetask);
executor.run(taskflow).wait();
}
TEST_CASE("Loop" * doctest::timeout(300)) {
loop<tf::cudaFlow>();
}
TEST_CASE("CapturedLoop" * doctest::timeout(300)) {
loop<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// Predicate
// ----------------------------------------------------------------------------
TEST_CASE("Predicate") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
REQUIRE(cudaMemcpy(gpu, cpu, n*sizeof(int), cudaMemcpyHostToDevice) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto copy = cf.copy(cpu, gpu, n);
kernel.precede(copy);
cf.offload_until([i=100]() mutable { return i-- == 0; });
});
auto freetask = taskflow.emplace([&](){
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 100);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(freetask);
executor.run(taskflow).wait();
}
// ----------------------------------------------------------------------------
// Repeat
// ----------------------------------------------------------------------------
TEST_CASE("Repeat") {
tf::Taskflow taskflow;
tf::Executor executor;
const unsigned n = 1000;
int* cpu = nullptr;
int* gpu = nullptr;
auto cputask = taskflow.emplace([&](){
cpu = static_cast<int*>(std::calloc(n, sizeof(int)));
REQUIRE(cudaMalloc(&gpu, n*sizeof(int)) == cudaSuccess);
REQUIRE(cudaMemcpy(gpu, cpu, n*sizeof(int), cudaMemcpyHostToDevice) == cudaSuccess);
});
auto gputask = taskflow.emplace([&](tf::cudaFlow& cf) {
dim3 g = {(n+255)/256, 1, 1};
dim3 b = {256, 1, 1};
auto kernel = cf.kernel(g, b, 0, k_add<int>, gpu, n, 1);
auto copy = cf.copy(cpu, gpu, n);
kernel.precede(copy);
cf.offload_n(100);
});
auto freetask = taskflow.emplace([&](){
for(unsigned i=0; i<n; ++i) {
REQUIRE(cpu[i] == 100);
}
REQUIRE(cudaFree(gpu) == cudaSuccess);
std::free(cpu);
});
cputask.precede(gputask);
gputask.precede(freetask);
executor.run(taskflow).wait();
} | the_stack |
using namespace torchx;
namespace rubiks {
// kernel function argument list
#define DECLARE_KERNEL_DIM_ARGS \
const uint32_t input_numel, \
const uint32_t input_H_dim, \
const uint32_t input_W_dim, \
const uint32_t input_HW_dim, \
const uint32_t input_CHW_dim, \
const uint32_t output_numel, \
const uint32_t output_H_dim, \
const uint32_t output_W_dim, \
const uint32_t output_HW_dim, \
const uint32_t output_CHW_dim, \
const uint32_t C_dim
// pass to kernel function from caller
// order must match DECLARE_KERNEL_DIM_ARGS
#define PASS_KERNEL_DIM_ARGS \
input_numel, \
input_H_dim, \
input_W_dim, \
input_HW_dim, \
input_CHW_dim, \
output_numel, \
output_H_dim, \
output_W_dim, \
output_HW_dim, \
output_CHW_dim, \
C_dim
// calculate in caller function and pass as args to kernel
#define CALCULATE_DIM_ARGS(input, output) \
const uint32_t C_dim = input.size(1); \
const uint32_t input_numel = input.numel(); \
const uint32_t input_H_dim = input.size(2); \
const uint32_t input_W_dim = input.size(3); \
const uint32_t input_HW_dim = input_H_dim * input_W_dim; \
const uint32_t input_CHW_dim = input_HW_dim * C_dim; \
const uint32_t output_numel = output.numel(); \
const uint32_t output_H_dim = output.size(2); \
const uint32_t output_W_dim = output.size(3); \
const uint32_t output_HW_dim = output_H_dim * output_W_dim; \
const uint32_t output_CHW_dim = output_HW_dim * C_dim;
// calculate the "unflattened" 4D index inside kernel's grid-stride loop
// `place` should be either "input" or "output"
#define GET_INDICES(place, N_idx, C_idx, H_idx, W_idx) \
const uint32_t N_idx = index / place##_CHW_dim; \
const uint32_t within_N_idx = index % place##_CHW_dim; \
const uint32_t C_idx = within_N_idx / place##_HW_dim; \
const uint32_t within_C_idx = within_N_idx % place##_HW_dim; \
const uint32_t H_idx = within_C_idx / place##_W_dim; \
const uint32_t W_idx = within_C_idx % place##_W_dim;
template<typename T>
__device__ __forceinline__ T interpolate_2d(T pixels[][2], T remainder_H, T remainder_W) {
return pixels[0][0] * (1 - remainder_H) * (1 - remainder_W)
+ pixels[0][1] * (1 - remainder_H) * remainder_W
+ pixels[1][0] * remainder_H * (1 - remainder_W)
+ pixels[1][1] * remainder_H * remainder_W;
}
template<typename T>
__device__ __forceinline__ int floor_fast(T x) {
int ix = (int) x;
return ix - (x < ix);
}
template<typename T>
__device__ __forceinline__ int round_fast(T x) {
if (x < static_cast<T>(0.0f))
return (int)(x - static_cast<T>(0.5f));
else
return (int)(x + static_cast<T>(0.5f));
}
#define WITHIN_BOUND(H, W) (H >= 0 && W >= 0 && H < input_H_dim && W < input_W_dim)
// set a value if (H, W) are within bounds
#define SET_WITHIN_BOUND(val, H, W) \
if (WITHIN_BOUND(H, W)) { \
val = input[N_idx][C_idx][H][W]; \
}
template<typename T>
__global__ void rubiks2d_forward_kernel(
const PTA<T, 4> input,
const PTA<T, 2> shift_field,
const uint32_t stride_H, const uint32_t stride_W,
const uint32_t pad_H, const uint32_t pad_W,
bool quantize,
PTA<T, 4> output,
DECLARE_KERNEL_DIM_ARGS
) {
GRID_STRIDE_LOOP(index, output_numel) {
GET_INDICES(output, N_idx, C_idx, H_idx, W_idx);
const int strided_H_idx = H_idx * stride_H - pad_H;
const int strided_W_idx = W_idx * stride_W - pad_W;
// TODO add this guard back
// if (!WITHIN_BOUND(strided_H_idx, strided_W_idx))
// continue;
T H_offset = shift_field[0][C_idx];
T W_offset = shift_field[1][C_idx];
if (quantize) {
int temp_H = round_fast(strided_H_idx + H_offset);
int temp_W = round_fast(strided_W_idx + W_offset);
SET_WITHIN_BOUND(output[N_idx][C_idx][H_idx][W_idx], temp_H, temp_W);
continue; // skip the rest
}
const int H_offset_int = floor_fast(H_offset);
const int W_offset_int = floor_fast(W_offset);
const T remainder_H = H_offset - H_offset_int;
const T remainder_W = W_offset - W_offset_int;
int input_H_idx = strided_H_idx + H_offset_int;
int input_W_idx = strided_W_idx + W_offset_int;
T pixels[2][2] = {0};
int temp_H, temp_W;
for (int h = 0; h < 2; ++h) {
for (int w = 0; w < 2; ++w) {
temp_H = input_H_idx + h;
temp_W = input_W_idx + w;
SET_WITHIN_BOUND(pixels[h][w], temp_H, temp_W);
}
}
// Just interpolate between the four pixels and stick the result in the right output spot.
output[N_idx][C_idx][H_idx][W_idx] =
interpolate_2d(pixels, remainder_H, remainder_W);
}
}
template <typename T>
__global__ void rubiks2d_backward_shift_kernel(
PTA<T, 4> output_grad,
const PTA<T, 4> input,
const PTA<T, 2> shift_field,
const uint32_t stride_H, const uint32_t stride_W,
const uint32_t pad_H, const uint32_t pad_W,
PTA<T, 4> shift_grad_buffer,
DECLARE_KERNEL_DIM_ARGS
) {
GRID_STRIDE_LOOP(index, output_numel) {
GET_INDICES(output, N_idx, C_idx, H_idx, W_idx);
// Same thing as forward -- accounting for stride and padding
const int strided_H_idx = H_idx * stride_H - pad_H;
const int strided_W_idx = W_idx * stride_W - pad_W;
// TODO add this guard back
// if (!WITHIN_BOUND(strided_H_idx, strided_W_idx))
// continue;
T H_offset = shift_field[0][C_idx];
T W_offset = shift_field[1][C_idx];
// Local gradient values to be multiplied by upstream gradient and placed into correct tensor location.
T local_pixel_W_grad = 0, local_pixel_H_grad = 0;
const int H_offset_int = floor_fast(H_offset);
const int W_offset_int = floor_fast(W_offset);
int input_H_idx = strided_H_idx + H_offset_int;
int input_W_idx = strided_W_idx + W_offset_int;
T remainder_H = H_offset - H_offset_int;
T remainder_W = W_offset - W_offset_int;
#ifdef _DEBUG_GRADIENT_
if (remainder_H < 1e-7 || remainder_W < 1e-7) {
printf("small remainder H = %.3e W = %.3e\n",
static_cast<float>(remainder_H), static_cast<float>(remainder_W));
}
#endif
T ZERO_TOL = static_cast<T>(1e-7f);
bool is_h_int_shift = false;
bool is_w_int_shift = false;
if (ZERO_TOL > remainder_H && remainder_H > -ZERO_TOL) {
is_h_int_shift = true;
remainder_H = 0;
}
if (ZERO_TOL > remainder_W && remainder_W > -ZERO_TOL) {
is_w_int_shift = true;
remainder_W = 0;
}
// regular shift
{
T pixels[2][2] = {0};
int temp_H, temp_W;
// TODO remove array input_H_indices
for (int h = 0; h < 2; ++h) {
for (int w = 0; w < 2; ++w) {
temp_H = input_H_idx + h;
temp_W = input_W_idx + w;
SET_WITHIN_BOUND(pixels[h][w], temp_H, temp_W);
}
}
// del L/remainder_H
local_pixel_H_grad =
(1 - remainder_W) * (pixels[1][0] - pixels[0][0])
+ remainder_W * (pixels[1][1] - pixels[0][1]);
// del L/remainder_W
local_pixel_W_grad =
(1 - remainder_H) * (pixels[0][1] - pixels[0][0])
+ remainder_H * (pixels[1][1] - pixels[1][0]);
}
if (is_h_int_shift || is_w_int_shift) {
// pixels[1][1] is the origin pixel
// TODO remove array input_H_indices
T pixels[3][3] = {0};
int temp_H, temp_W;
for (int h = 0; h < 3; ++h) {
for (int w = 0; w < 3; ++w) {
if (h == 0 && w == 0 || h == 1 && w == 1)
continue; // don't need p[0][0], p[1][1]
temp_H = input_H_idx + h - 1;
temp_W = input_W_idx + w - 1;
SET_WITHIN_BOUND(pixels[h][w], temp_H, temp_W);
}
}
if (is_h_int_shift) {
local_pixel_H_grad =
static_cast<T>(0.5f) * (
(1 - remainder_W) * (pixels[2][1] - pixels[0][1])
+ remainder_W * (pixels[2][2] - pixels[0][2])
);
}
if (is_w_int_shift) {
local_pixel_W_grad =
static_cast<T>(0.5f) * (
(1 - remainder_H) * (pixels[1][2] - pixels[1][0])
+ remainder_H * (pixels[2][2] - pixels[2][0])
);
}
}
// Multiply by the upstream gradient
const T og = output_grad[N_idx][C_idx][H_idx][W_idx];
T pixel_H_grad = local_pixel_H_grad * og;
T pixel_W_grad = local_pixel_W_grad * og;
// output: {2 x C, H x W}
// H_offset = u_h * H + u_w * W + u_t;
// W_offset = v_h * H + v_w * W + v_t;
atomicAdd(&shift_grad_buffer[0][C_idx][H_idx][W_idx], pixel_H_grad);
atomicAdd(&shift_grad_buffer[1][C_idx][H_idx][W_idx], pixel_W_grad);
}
}
template <typename T>
__global__ void rubiks2d_backward_input_kernel(
PTA<T, 4> output_grad,
const PTA<T, 4> input,
const PTA<T, 2> shift,
const uint32_t stride_H, const uint32_t stride_W,
const uint32_t pad_H, const uint32_t pad_W,
bool quantize,
PTA<T, 4> input_grad,
DECLARE_KERNEL_DIM_ARGS
) {
GRID_STRIDE_LOOP(index, input_numel) {
GET_INDICES(input, N_idx, C_idx, H_idx, W_idx);
// Offsets within the (H, W) feature map in the output gradient tensor to pull from
// (note that backward input gradient is just output gradient, reverse shifted)
const int H_offset = H_idx + pad_H;
const int W_offset = W_idx + pad_W;
// Final value to be stuck into the input gradient
T val = 0;
const T shift_H = -shift[0][C_idx];
const T shift_W = -shift[1][C_idx];
if (quantize) {
int temp_H = round_fast(H_offset + shift_H);
int temp_W = round_fast(W_offset + shift_W);
if (temp_H % stride_H == 0 && temp_W % stride_W == 0) {
temp_H /= stride_H;
temp_W /= stride_W;
if (temp_H >= 0 && temp_W >= 0 &&
temp_H < output_H_dim && temp_W < output_W_dim) {
input_grad[N_idx][C_idx][H_idx][W_idx] =
output_grad[N_idx][C_idx][temp_H][temp_W];
}
}
continue; // skip the rest
}
int output_H_idx, output_W_idx;
// "Small" and "large" shifts in each direction. If our shift is 1.4, for instance, the
// "small shift" will give us the pixel offset by 1 and the "large shift" will give us
// the pixel offset by 2.
int small_shift_H = floor_fast(shift_H);
int large_shift_H = small_shift_H + 1;
int small_shift_W = floor_fast(shift_W);
int large_shift_W = small_shift_W + 1;
// Special case -- both shifts are zero; only care about strides and padding with NO interpolation.
if (shift_W == 0 && shift_H == 0) {
output_H_idx = H_offset;
output_W_idx = W_offset;
// Check and see if we're a strided sample or not.
if (output_W_idx % stride_W == 0 && output_H_idx % stride_H == 0) {
output_W_idx = output_W_idx / stride_W;
output_H_idx = output_H_idx / stride_H;
// Basically an in-bounds checker -- if things are in bounds, go ahead and pull from the output
// gradient tensor; otherwise just give zero.
if (output_H_idx >= 0 && output_W_idx >= 0 && \
output_H_idx < output_H_dim && output_W_idx < output_W_dim) {
val = output_grad[N_idx][C_idx][output_H_idx][output_W_idx];
} else {
val = 0;
}
}
} else {
int output_H_indices[2];
int output_W_indices[2];
output_H_indices[0] = H_offset + small_shift_H;
output_W_indices[0] = W_offset + small_shift_W;
output_H_indices[1] = H_offset + large_shift_H;
output_W_indices[1] = W_offset + large_shift_W;
T pixels[2][2] = {0};
int temp_H, temp_W;
for (int h = 0; h < 2; ++h) {
for (int w = 0; w < 2; ++w) {
temp_H = output_H_indices[h];
temp_W = output_W_indices[w];
if (temp_H % stride_H == 0 && temp_W % stride_W == 0) {
temp_H /= stride_H;
temp_W /= stride_W;
if (temp_H >= 0 && temp_W >= 0 &&
temp_H < output_H_dim && temp_W < output_W_dim) {
pixels[h][w] =
output_grad[N_idx][C_idx][temp_H][temp_W];
}
}
}
}
T remainder_H = shift_H - small_shift_H;
T remainder_W = shift_W - small_shift_W;
val = interpolate_2d(pixels, remainder_H, remainder_W);
}
input_grad[N_idx][C_idx][H_idx][W_idx] = val;
}
}
template<typename T>
__global__ void rubiks2d_normalize_shift_grad_kernel(
PTA<T, 2> shift_grad
) {
// total elements is C_dim
GRID_STRIDE_LOOP(c, shift_grad.size(1)) {
// TODO wrong
const T cur_H_grad = shift_grad[0][c];
const T cur_W_grad = shift_grad[1][c];
const T magnitude = sqrt(cur_H_grad * cur_H_grad + cur_W_grad * cur_W_grad);
if (magnitude > 0) {
shift_grad[0][c] = cur_H_grad / magnitude;
shift_grad[1][c] = cur_W_grad / magnitude;
}
}
}
inline bool is_s1p0(
const std::vector<int>& strides, const std::vector<int>& paddings
) {
return strides[0] == 1 && strides[1] == 1
&& paddings[0] == 0 && paddings[1] == 0;
}
void rubiks2d_forward_cuda(
torch::Tensor input,
torch::Tensor shift_field,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool quantize,
torch::Tensor output
) {
int blocks = 0;
int threads_per_block = 0;
CALCULATE_DIM_ARGS(input, output);
get_cuda_device_properties(output_numel, blocks, threads_per_block);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "rubiks2d_forward_cuda", ([&] {
rubiks2d_forward_kernel<scalar_t><<<blocks, threads_per_block>>>(
GET_PTA(input, 4),
GET_PTA(shift_field, 2),
strides[0], strides[1], paddings[0], paddings[1],
quantize,
GET_PTA(output, 4),
PASS_KERNEL_DIM_ARGS
);
}));
}
void rubiks2d_backward_shift_cuda(
torch::Tensor output_grad,
torch::Tensor input,
torch::Tensor shift_field,
const std::vector<int>& strides,
const std::vector<int>& paddings,
torch::Tensor shift_grad_buffer
) {
int blocks = 0;
int threads_per_block = 0;
CALCULATE_DIM_ARGS(input, output_grad);
get_cuda_device_properties(output_numel, blocks, threads_per_block);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "rubiks2d_backward_shift_cuda", ([&] {
rubiks2d_backward_shift_kernel<scalar_t><<<blocks, threads_per_block>>>(
GET_PTA(output_grad, 4),
GET_PTA(input, 4),
GET_PTA(shift_field, 2),
strides[0], strides[1], paddings[0], paddings[1],
GET_PTA(shift_grad_buffer, 4),
PASS_KERNEL_DIM_ARGS
);
}));
}
void rubiks2d_backward_input_cuda(
torch::Tensor output_grad,
torch::Tensor input,
torch::Tensor shift_field,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool quantize,
torch::Tensor input_grad
) {
int blocks = 0;
int threads_per_block = 0;
CALCULATE_DIM_ARGS(input, output_grad);
get_cuda_device_properties(input_numel, blocks, threads_per_block);
// TODO use s1p0
if (true or !is_s1p0(strides, paddings)) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "rubiks2d_backward_input_cuda", ([&] {
rubiks2d_backward_input_kernel<scalar_t><<<blocks, threads_per_block>>>(
GET_PTA(output_grad, 4),
GET_PTA(input, 4),
GET_PTA(shift_field, 2),
strides[0], strides[1], paddings[0], paddings[1],
quantize,
GET_PTA(input_grad, 4),
PASS_KERNEL_DIM_ARGS
);
}));
}
else {
// AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "rubiks2d_backward_input_s1p0", ([&] {
// rubiks2d_backward_input_kernel_s1p0<scalar_t><<<blocks, threads_per_block>>>(
// GET_PTA(input, 4),
// GET_PTA(shift, 2),
// GET_PTA(output_grad, 4),
// GET_PTA(input_grad, 4),
// PASS_KERNEL_DIM_ARGS
// );
// }));
}
}
void rubiks2d_normalize_shift_grad_cuda(
torch::Tensor shift_grad
) {
int blocks = 0;
int threads_per_block = 0;
uint32_t total_elements = shift_grad.size(1); // C_dim
get_cuda_device_properties(total_elements, blocks, threads_per_block);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(shift_grad.type(), "rubiks2d_normalize_shift_grad_cuda", ([&] {
rubiks2d_normalize_shift_grad_kernel<scalar_t><<<blocks, threads_per_block>>>(
GET_PTA(shift_grad, 2)
);
}));
}
} // namespace rubiks | the_stack |
#include <iostream>
#include <stack>
using namespace std;
// 宏:DEBUG
// 定义是否输出调试信息
//#define DEBUG_IMG
//#define DEBUG_TIME
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 宏:PIXEL(x,y)
// 获取图像中(x,y)像素的位置
#define PIXEL(x,y) workimg->imgData[(y)*workimg->width+(x)]
// 宏:VALID(x,y)
// 判断(x,y)像素的位置是否合法
#define VALID(x,y) (x>=0 && x<workimg->width && y>=0 && y<workimg->height)
// 宏:CUDA_PIXEL(x,y)
// 获取内核函数中图像中(x,y)像素的位置
#define CUDA_PIXEL(x,y) imgcud.imgMeta.imgData[(y)*imgcud.pitchBytes+(x)]
// 宏:CUDA_VALID(x,y)
// 判断内核函数中(x,y)像素的位置是否合法
#define CUDA_VALID(x,y) (x>=0 && x<imgcud.imgMeta.width && y>=0 && y<imgcud.imgMeta.height)
// 宏:CUDA_STACK_SIZE
// 自定义的cuda栈最大容量,根据测试,不太复杂的图像,最大深度为4,因此最大值定义64足够
#define CUDA_STACK_SIZE 64
// 结构体:mypoint
// 记录像素点的位置
typedef struct mypoint{
int x;
int y;
}point;
//--------------------------内核方法声明------------------------------------
/*
// Kernel 函数:_seedScanLineConKer(并行的种子扫描线算法,种子在轮廓内部)
static __global__ void _seedScanLineInConKer(
ImageCuda imgcud, // 要填充的轮廓图像
int * stackmaxsize // 返回自定义堆栈最大使用深度
);*/
// Kernel 函数:_seedScanLineOutConKer(并行的种子扫描线算法,种子在轮廓外部)
static __global__ void _seedScanLineOutConKer(
ImageCuda imgcud // 要填充的轮廓图像
//int * stackmaxsiz // 返回自定义堆栈最大使用深度
);
// Kernel 函数:_intersectionKer(求两幅图像交,结果放入outbordercud中)
static __global__ void _intersectionKer(
ImageCuda outborderCud, // 外轮廓被填充过后的图像
ImageCuda inborderCud // 内轮廓被填充过后的图像
);
//--------------------------内核方法实现------------------------------------
/*
// Kernel 函数:_seedScanLineInConKer(并行的种子扫描线算法,种子在轮廓内)
static __global__ void _seedScanLineInConKer(
ImageCuda imgcud, // 要填充的轮廓图像
int * stackmaxsize // 返回自定义堆栈最大使用深度
){
// 计算线程对应的输出点的位置的 x 和 y 分量
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if(x>=imgcud.imgMeta.width || y >= imgcud.imgMeta.height )
return;
int cudastack[CUDA_STACK_SIZE];
// 填充工作,
// 输入:轮廓线workimg,种子seed;
// 输出:填充过的workimg
int stackptr=0;
point seed;
seed.x=40+x*10;
seed.y=40+y*10;
if(seed.x>=imgcud.imgMeta.width || seed.y >= imgcud.imgMeta.height )
return;
int xtemp,xright,xleft;
int spanfill;
// 种子入栈
cudastack[stackptr++]=seed.x;
cudastack[stackptr++]=seed.y;
// stackptr==0表示栈为空,>0说明栈不空,每个像素点占用2个位置
while(stackptr>0){
point cur;
// 统计堆栈最大深度
if(stackptr>stackmaxsize[0])
stackmaxsize[0]=stackptr;
// 入栈顺序x、y,出栈顺序应y、x。
cur.y=cudastack[--stackptr];
cur.x=cudastack[--stackptr];
// 填充当前点
CUDA_PIXEL(cur.x,cur.y)=BORDER_COLOR;
// 向右填充,填充过程中检测当前点坐标,如果越界,说明种子在图形外
for(xtemp=cur.x+1;CUDA_PIXEL(xtemp,cur.y)!=BORDER_COLOR;xtemp++){
if(CUDA_VALID(xtemp,cur.y)==false) return ;
CUDA_PIXEL(xtemp,cur.y)=BORDER_COLOR;}
//纪录当前线段最右位置
xright=xtemp-1;
// 向左填充
for(xtemp=cur.x-1;CUDA_PIXEL(xtemp,cur.y)!=BORDER_COLOR;xtemp--){
if(CUDA_VALID(xtemp,cur.y)==false) return ;
CUDA_PIXEL(xtemp,cur.y)=BORDER_COLOR;
}
// 纪录当前线段最左位置
xleft=xtemp+1;
//cout<<"hang:"<<cur.y<<"["<<xleft<<","<<xright<<"]"<<endl;
// 下方相邻扫描线
xtemp=xleft; cur.y++;
// 循环一次,把一个线段种子放入堆栈(一条扫描线中可能多个线段)
while(xtemp<=xright){
spanfill=0;
// 找到一个线段的最右点
while(CUDA_PIXEL(xtemp,cur.y)!=BORDER_COLOR &&
xtemp<=xright){
spanfill=1;
xtemp++;
}
// 最右点(xtemp-1,cur.y)入栈
if(spanfill==1){
cudastack[stackptr++]=xtemp-1;
cudastack[stackptr++]=cur.y;
}
// 继续向右走,跳过边界和已经填充部分,找到下一段未填充线段
while((CUDA_PIXEL(xtemp,cur.y)==BORDER_COLOR
&& xtemp<=xright)
xtemp++;
} // 下方扫描线结束
//上方相邻扫描线
xtemp=xleft; cur.y-=2;
// 循环一次,把一个线段种子放入堆栈(一条扫描线中可能多个线段)
while(xtemp<=xright){
spanfill=0;
// 找到一个线段的最右点
while(CUDA_PIXEL(xtemp,cur.y)!=BORDER_COLOR &&
xtemp<=xright){
spanfill=1;
xtemp++;
}
// 最右点入栈
if(spanfill==1){
cudastack[stackptr++]=xtemp-1;
cudastack[stackptr++]=cur.y;
}
// 继续向右走,跳过边界和已经填充部分,找到下一段未填充线段
while(CUDA_PIXEL(xtemp,cur.y)==BORDER_COLOR ||
&& xtemp<=xright)
xtemp++;
} // 上方扫描线结束
}// 填充结束
return ;
}*/
// Kernel 函数:_seedScanLineOutConKer(并行的种子扫描线算法,种子在轮廓外部)
static __global__ void _seedScanLineOutConKer(
ImageCuda imgcud // 要填充的轮廓图像
//int * stackmaxsize // 返回自定义堆栈最大使用深度
){
// 计算线程对应的输出点的位置的 x 和 y 分量
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
point seed;
switch(y){
// 第一行当种子
case 0:
seed.x=x;
seed.y=0;
break;
// 最后一行当种子
case 1:
seed.x=x;
seed.y=imgcud.imgMeta.height-1;
break;
// 第一列当种子
case 2:
seed.x=0;
seed.y=x;
break;
// 最后一列当种子
case 3:
seed.x=imgcud.imgMeta.width-1;
seed.y=x;
break;
}
// 如果得到的种子超过图像范围,或者不是背景点(可能是轮廓点或者已经被其他线程
// 填充,则直接退出)
if(seed.x>=imgcud.imgMeta.width ||
seed.y >= imgcud.imgMeta.height ||
CUDA_PIXEL(seed.x,seed.y) != BK_COLOR)
return;
// 填充工作
// 输入:轮廓线workimg,种子seed;
// 输出:填充过的workimg
int cudastack[CUDA_STACK_SIZE];
int stackptr=0;
int xtemp,xright,xleft;
int spanfill;
// 种子入栈
cudastack[stackptr++]=seed.x;
cudastack[stackptr++]=seed.y;
// stackptr==0表示栈为空,>0说明栈不空,每个像素点占用2个位置
while(stackptr>0){
point cur;
// 统计堆栈最大深度
//if(stackptr>stackmaxsize[0])
//stackmaxsize[0]=stackptr;
// 入栈顺序x、y,出栈顺序应y、x。
cur.y=cudastack[--stackptr];
cur.x=cudastack[--stackptr];
// 填充当前点
CUDA_PIXEL(cur.x,cur.y)=BORDER_COLOR;
// 向右填充,填充过程中检测当前点坐标
for(xtemp=cur.x+1;CUDA_VALID(xtemp,cur.y)&&CUDA_PIXEL(xtemp,cur.y)!=BORDER_COLOR;xtemp++){
CUDA_PIXEL(xtemp,cur.y)=BORDER_COLOR;}
//纪录当前线段最右位置
xright=xtemp-1;
// 向左填充
for(xtemp=cur.x-1;CUDA_VALID(xtemp,cur.y)&&CUDA_PIXEL(xtemp,cur.y)!=BORDER_COLOR;xtemp--){
CUDA_PIXEL(xtemp,cur.y)=BORDER_COLOR;
}
// 纪录当前线段最左位置
xleft=xtemp+1;
//cout<<"hang:"<<cur.y<<"["<<xleft<<","<<xright<<"]"<<endl;
// 下方相邻扫描线
xtemp=xleft; cur.y++;
// 每次循环把一个线段种子放入堆栈(一条扫描线中可能多个线段)
while(xtemp<=xright && cur.y>=0 && cur.y<imgcud.imgMeta.height){
spanfill=0;
// 找到一个线段的最右点
while(
CUDA_PIXEL(xtemp,cur.y)!=BORDER_COLOR &&
xtemp<=xright){
spanfill=1;
xtemp++;
}
// 最右点(xtemp-1,cur.y)入栈
if(spanfill==1){
cudastack[stackptr++]=xtemp-1;
cudastack[stackptr++]=cur.y;
}
// 继续向右走,跳过边界和已经填充部分,找到下一段未填充线段
while(
xtemp<=xright &&
CUDA_PIXEL(xtemp,cur.y)==BORDER_COLOR)
xtemp++;
} // 下方扫描线结束
//上方相邻扫描线
xtemp=xleft; cur.y-=2;
// 循环一次,把一个线段种子放入堆栈(一条扫描线中可能多个线段)
while(xtemp<=xright && cur.y>=0 && cur.y<imgcud.imgMeta.height){
spanfill=0;
// 找到一个线段的最右点
while(
xtemp<=xright &&
CUDA_PIXEL(xtemp,cur.y)!=BORDER_COLOR
){
spanfill=1;
xtemp++;
}
// 最右点入栈
if(spanfill==1){
cudastack[stackptr++]=xtemp-1;
cudastack[stackptr++]=cur.y;
}
// 继续向右走,跳过边界和已经填充部分,找到下一段未填充线段
while(CUDA_PIXEL(xtemp,cur.y)==BORDER_COLOR
&& xtemp<=xright)
xtemp++;
} // 上方扫描线结束
}// 填充结束
return ;
}
// Kernel 函数:_intersectionKer(求两幅图像交,结果放入outbordercud中)
static __global__ void _intersectionKer(
ImageCuda outborderCud, // 外轮廓被填充过后的图像
ImageCuda inborderCud // 内轮廓被填充过后的图像
){
// 此版本中,填充色就是轮廓色,因此,逻辑判定简单
// 计算线程对应的输出点的位置的 x 和 y 分量
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index=y*outborderCud.pitchBytes+x;
// 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if(x>=outborderCud.imgMeta.width || y >= outborderCud.imgMeta.height )
return;
// 内边界填充图填充部分 且 外边界填充图未填充部分 是要求的结果,其余部分
// 认为是背景
if(outborderCud.imgMeta.imgData[index] != BORDER_COLOR &&
inborderCud.imgMeta.imgData[index] == BORDER_COLOR)
outborderCud.imgMeta.imgData[index]=BORDER_COLOR;
else
outborderCud.imgMeta.imgData[index]=BK_COLOR;
return;
}
// Kernel 函数:_negateKer(对输入图像求反 BORDER_COLOR<-->BK_COLOR)
static __global__ void _negateKer(
ImageCuda outborderCud // 外轮廓被填充过的图形,反转后是轮廓内部填充
){
// 计算线程对应的输出点的位置的 x 和 y 分量
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index=y*outborderCud.pitchBytes+x;
// 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if(x>=outborderCud.imgMeta.width || y >= outborderCud.imgMeta.height )
return;
// BORDER_COLOR 变成 BK_COLOR ,或者相反
if(outborderCud.imgMeta.imgData[index] == BORDER_COLOR)
outborderCud.imgMeta.imgData[index]=BK_COLOR;
else
outborderCud.imgMeta.imgData[index]=BORDER_COLOR;
return;
}
//--------------------------全局方法声明------------------------------------
// 函数:_findMinMaxCoordinates(根据输入点集的坐标,找到最上、最下、最左、最右
// 的点,从而确定图像的宽和高)
static __host__ int _findMinMaxCoordinates(CoordiSet *guidingset,
int *xmin, int *ymin,
int *xmax, int *ymax);
//--------------------------全局方法实现------------------------------------
// 函数:_findMinMaxCoordinates(根据输入点集的坐标,找到最上、最下、最左、最右
// 的点,从而确定图像的宽和高)
static __host__ int _findMinMaxCoordinates(CoordiSet *guidingset,
int *xmin, int *ymin,
int *xmax, int *ymax)
{
// 声明局部变量。
int errcode;
// 在 host 端申请一个新的 CoordiSet 变量。
CoordiSet *tmpcoordiset;
errcode = CoordiSetBasicOp::newCoordiSet(&tmpcoordiset);
if (errcode != NO_ERROR)
return errcode;
errcode = CoordiSetBasicOp::makeAtHost(tmpcoordiset, guidingset->count);
if (errcode != NO_ERROR)
return errcode;
// 将坐标集拷贝到 Host 端。
errcode = CoordiSetBasicOp::copyToHost(guidingset, tmpcoordiset);
if (errcode != NO_ERROR)
return errcode;
// 初始化 x 和 y 方向上的最小最大值。
xmin[0] = xmax[0] = tmpcoordiset->tplData[0];
ymin[0] = ymax[0] = tmpcoordiset->tplData[1];
// 循环寻找坐标集最左、最右、最上、最下的坐标。
for (int i = 1;i < tmpcoordiset->count;i++) {
// 寻找 x 方向上的最小值。
if (xmin[0] > tmpcoordiset->tplData[2 * i])
xmin[0] = tmpcoordiset->tplData[2 * i];
// 寻找 x 方向上的最大值
if (xmax[0] < tmpcoordiset->tplData[2 * i])
xmax[0] = tmpcoordiset->tplData[2 * i];
// 寻找 y 方向上的最小值。
if (ymin[0] > tmpcoordiset->tplData[2 * i + 1])
ymin[0] = tmpcoordiset->tplData[2 * i + 1];
// 寻找 y 方向上的最大值
if (ymax[0] < tmpcoordiset->tplData[2 * i + 1])
ymax[0] = tmpcoordiset->tplData[2 * i + 1];
}
// 释放临时坐标集变量。
CoordiSetBasicOp::deleteCoordiSet(tmpcoordiset);
return errcode;
}
//--------------------------成员方法实现------------------------------------
// 成员方法:fillCoordiSetSeri(串行方法,填充 coordiset 集合围起的区域)
__host__ int // 返回值:函数是否正确执行,若函数正确执
// 行,返回 NO_ERROR。
FillCoor::seedScanLineSeri(
CoordiSet *incoor, // 输入的coordiset,内容为一条闭合曲线
CoordiSet *outcoor, // 输出填充过的的coordiset,内容一个填充区域
int x, // 种子x坐标
int y // 种子y坐标
){
// 获取坐标集中点的分布范围,即包围盒坐标
int minx,maxx,miny,maxy;
int errorcode=_findMinMaxCoordinates(incoor,&minx,&miny,&maxx,&maxy);
if(errorcode!=NO_ERROR)
return 0;
// 创建工作图像
Image *workimg;
ImageBasicOp::newImage(&workimg);
//给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数
ImageBasicOp::makeAtHost(workimg,maxx+1 ,maxy+1);
// 把坐标集绘制到图像上
ImgConvert imgcvt(BORDER_COLOR,BK_COLOR);
imgcvt.cstConvertToImg(incoor,workimg);
// 把填充前的图像保存到文件
ImageBasicOp::copyToHost(workimg);
ImageBasicOp::writeToFile("biforeFill.bmp",workimg);
//----------------------------------------------------
// 填充工作,
// 输入:轮廓线workimg,种子seed;
// 输出:填充过的workimg
int deepestnum=0;
point seed;
seed.x=x;
seed.y=y;
int xtemp,xright,xleft;
int spanfill;
stack <point>st;
st.push(seed);
int loopnum=0;
while(!st.empty()){
point cur;
loopnum++;
// 统计堆栈最大深度
if(st.size()>deepestnum){
deepestnum=st.size();
}
cur=st.top();
st.pop();
PIXEL(cur.x,cur.y)=BORDER_COLOR;
// 向右填充,填充过程中检测当前点坐标,如果越界,说明种子在图形外
for(xtemp=cur.x+1;PIXEL(xtemp,cur.y)!=BORDER_COLOR;xtemp++){
if(VALID(xtemp,cur.y)==false) return INVALID_DATA;
PIXEL(xtemp,cur.y)=BORDER_COLOR;}
//纪录当前线段最右位置
xright=xtemp-1;
// 向左填充
for(xtemp=cur.x-1;PIXEL(xtemp,cur.y)!=BORDER_COLOR;xtemp--){
if(VALID(xtemp,cur.y)==false) return INVALID_DATA;
PIXEL(xtemp,cur.y)=BORDER_COLOR;
}
// 纪录当前线段最左位置
xleft=xtemp+1;
//cout<<"hang:"<<cur.y<<"["<<xleft<<","<<xright<<"]"<<endl;
// 下方相邻扫描线
xtemp=xleft; cur.y++;
// 循环一次,把一个线段种子放入堆栈(一条扫描线中可能多个线段)
while(xtemp<=xright){
spanfill=0;
// 找到一个线段的最右点
while(PIXEL(xtemp,cur.y)!=BORDER_COLOR &&
xtemp<=xright){
spanfill=1;
xtemp++;
}
// 最右点入栈
if(spanfill==1){
point t;
t.x=xtemp-1;
t.y=cur.y;
st.push(t);
}
// 继续向右走,跳过边界和已经填充部分,找到下一段未填充线段
while(PIXEL(xtemp,cur.y)==BORDER_COLOR &&
xtemp<=xright)
xtemp++;
} // 下方扫描线结束
//上方相邻扫描线
xtemp=xleft; cur.y-=2;
// 循环一次,把一个线段种子放入堆栈(一条扫描线中可能多个线段)
while(xtemp<=xright){
spanfill=0;
// 找到一个线段的最右点
while(PIXEL(xtemp,cur.y)!=BORDER_COLOR &&
xtemp<=xright){
spanfill=1;
xtemp++;
}
// 最右点入栈
if(spanfill==1){
point t;
t.x=xtemp-1;
t.y=cur.y;
st.push(t);
}
// 继续向右走,跳过边界和已经填充部分,找到下一段未填充线段
while(PIXEL(xtemp,cur.y)==BORDER_COLOR
&& xtemp<=xright)
xtemp++;
} // 上方扫描线结束
}// 填充结束
ImageBasicOp::copyToHost(workimg);
#ifdef DEBUG_IMG
printf("loopnum= %3d, deepestnum=%2d \n ",loopnum,deepestnum);
// 填充后的图像保存到文件
ImageBasicOp::writeToFile("afterFill.bmp",workimg);
#endif
imgcvt.imgConvertToCst(workimg,outcoor);
ImageBasicOp::deleteImage(workimg);
return NO_ERROR;
}
// 成员方法:isInCoordiSetSeri(串行方法,判断当前点是否在 coordiset 集合围起的区域中)
__host__ bool // 返回值:在内部返回真,否则返回假
FillCoor::isInCoordiSetSeri(
CoordiSet *incoor, // 输入的coordiset,内容为一条闭合曲线
int x, // 坐标点x坐标
int y // 坐标点y坐标
){
// 获取坐标集中点的分布范围,即包围盒坐标
int minx,maxx,miny,maxy;
int errorcode=_findMinMaxCoordinates(incoor,&minx,&miny,&maxx,&maxy);
if(errorcode!=NO_ERROR)
return 0;
// 创建工作图像
Image *workimg;
ImageBasicOp::newImage(&workimg);
//给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数
ImageBasicOp::makeAtHost(workimg,maxx+1 ,maxy+1);
// 把坐标集绘制到图像上
ImgConvert imgcvt(BORDER_COLOR,BK_COLOR);
imgcvt.cstConvertToImg(incoor,workimg);
// 把填充前的图像保存到文件
//ImageBasicOp::copyToHost(workimg);
//ImageBasicOp::writeToFile("biforeFill.bmp",workimg);
//----------------------------------------------------
// 填充工作,
// 输入:轮廓线workimg,种子seed;
// 输出:填充过的workimg
point seed;
seed.x=x;
seed.y=y;
//ImageBasicOp::readFromFile("bordertest.bmp",workimg);
ImageBasicOp::copyToHost(workimg);
int xtemp,xright,xleft;
int spanfill;
stack <point>st;
st.push(seed);
while(!st.empty()){
point cur;
cur=st.top();
st.pop();
PIXEL(cur.x,cur.y)=BORDER_COLOR;
// 向右填充,填充过程中检测当前点坐标,如果越界,说明种子在图形外
for(xtemp=cur.x+1;PIXEL(xtemp,cur.y)!=BORDER_COLOR;xtemp++){
if(VALID(xtemp,cur.y)==false) return false;
PIXEL(xtemp,cur.y)=BORDER_COLOR;}
//纪录当前线段最右位置
xright=xtemp-1;
// 向左填充
for(xtemp=cur.x-1;PIXEL(xtemp,cur.y)!=BORDER_COLOR;xtemp--){
if(VALID(xtemp,cur.y)==false) return false;
PIXEL(xtemp,cur.y)=BORDER_COLOR;
}
//纪录当前线段最左位置
xleft=xtemp+1;
//cout<<"hang:"<<cur.y<<"["<<xleft<<","<<xright<<"]"<<endl;
//下方相邻扫描线
xtemp=xleft; cur.y++;
// 循环一次,把一个线段种子放入堆栈(一条扫描线中可能多个线段)
while(xtemp<=xright){
spanfill=0;
// 找到一个线段的最右点
while(PIXEL(xtemp,cur.y)!=BORDER_COLOR &&
xtemp<=xright){
spanfill=1;
xtemp++;
}
// 最右点入栈
if(spanfill==1){
point t;
t.x=xtemp-1;
t.y=cur.y;
st.push(t);
}
// 继续向右走,跳过边界和已经填充部分,找到下一段未填充线段
while(PIXEL(xtemp,cur.y)==BORDER_COLOR
&& xtemp<=xright)
xtemp++;
} // 下方扫描线结束
//上方相邻扫描线
xtemp=xleft; cur.y-=2;
// 循环一次,把一个线段种子放入堆栈(一条扫描线中可能多个线段)
while(xtemp<=xright){
spanfill=0;
// 找到一个线段的最右点
while(PIXEL(xtemp,cur.y)!=BORDER_COLOR &&
xtemp<=xright){
spanfill=1;
xtemp++;
}
// 最右点入栈
if(spanfill==1){
point t;
t.x=xtemp-1;
t.y=cur.y;
st.push(t);
}
// 继续向右走,跳过边界和已经填充部分,找到下一段未填充线段
while(PIXEL(xtemp,cur.y)==BORDER_COLOR
&& xtemp<=xright)
xtemp++;
} // 上方扫描线结束
}// 填充结束
//-------------回收工作---------------------------
//ImageBasicOp::copyToHost(workimg);
//ImageBasicOp::writeToFile(outfile.c_str(),workimg);
ImageBasicOp::deleteImage(workimg);
return true;
}
// 成员方法:seedScanLineCon(并行种子扫描线算法填充 coordiset 集合围起的区域)
__host__ int // 返回值:函数是否正确执行,若函数正确执
// 行,返回 NO_ERROR。
FillCoor::seedScanLineCon(
CoordiSet *outbordercoor, // 输入的 coordiset ,内容为封闭区域
// 外轮廓闭合曲线
CoordiSet *inbordercoor, // 输入的 coordiset ,内容为封闭区域
// 内轮廓闭合曲线。如果没有内轮廓,设为NULL
CoordiSet *fillcoor // 输出填充过的的 coordiset
){
// 获取坐标集中点的分布范围,即包围盒坐标
int minx,maxx,miny,maxy;
// ----------------------输入coor参数转化成img----------------------------
Image *outborderimg;
ImageBasicOp::newImage(&outborderimg);
Image *inborderimg;
ImageBasicOp::newImage(&inborderimg);
ImageCuda outborderCud;
ImageCuda inborderCud;
ImgConvert imgcvt(BORDER_COLOR,BK_COLOR);
#ifdef DEBUG_TIME
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float runTime;
cudaEventRecord(start, 0);
#endif
// --------------------------处理外轮廓-------------------------------
if(outbordercoor!=NULL){
// 预处理,得到外轮廓大小
int errorcode=_findMinMaxCoordinates(outbordercoor,&minx,&miny,&maxx,&maxy);
if(errorcode!=NO_ERROR)
return 0;
// 处理外轮廓
// 创建工作图像
//给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数,再+1,保证轮廓外连通
ImageBasicOp::makeAtHost(outborderimg,maxx+2 ,maxy+2);
// 把坐标集绘制到图像上,前景255,背景0
imgcvt.cstConvertToImg(outbordercoor,outborderimg);
#ifdef DEBUG_IMG
// 把填充前的图像保存到文件
ImageBasicOp::copyToHost(outborderimg);
ImageBasicOp::writeToFile("outborder_notFilled.bmp",outborderimg);
#endif
int errcode;
// 将输入图像拷贝入 Device 内存。
errcode = ImageBasicOp::copyToCurrentDevice(outborderimg);
if (errcode != NO_ERROR) {
return errcode;
}
// 提取输入图像的 ROI 子图像。
errcode = ImageBasicOp::roiSubImage(outborderimg, &outborderCud);
if (errcode != NO_ERROR) {
return errcode;
}
// 找长宽的最大值当内核函数的参数
int outmaxsize=outborderimg->height>outborderimg->width?
outborderimg->height:outborderimg->width;
dim3 grid,block;
block.x=DEF_BLOCK_X;
block.y=1;
block.z=1;
grid.x=(outmaxsize+DEF_BLOCK_X-1)/DEF_BLOCK_X;
grid.y=4;
grid.z=1;
_seedScanLineOutConKer<<<grid,block>>>(outborderCud);
#ifdef DEBUG_IMG
ImageBasicOp::copyToHost(outborderimg);
ImageBasicOp::writeToFile("outborder_Filled.bmp",outborderimg);
// 交操作还要在 device 端使用图像
ImageBasicOp::copyToCurrentDevice(outborderimg);
#endif
}// end of out border
#ifdef DEBUG_TIME
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
cout << "out border fill " << runTime << " ms" << endl;
cudaEventRecord(start, 0);
#endif
// --------------------------处理内轮廓-------------------------------
if(outbordercoor!=NULL && inbordercoor!=NULL){
// 注意,内边界图像将来要和外边界图像求交,大小按外边界分配
// 给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数,再+1,保证轮廓外连通
ImageBasicOp::makeAtHost(inborderimg,maxx+2 ,maxy+2);
// 把坐标集绘制到图像上,前景255,背景0
imgcvt.cstConvertToImg(inbordercoor,inborderimg);
#ifdef DEBUG_IMG
// 把填充前的图像保存到文件
ImageBasicOp::copyToHost(inborderimg);
ImageBasicOp::writeToFile("inborder_notFilled.bmp",inborderimg);
#endif
int errcode;
// 将输入图像拷贝入 Device 内存。
errcode = ImageBasicOp::copyToCurrentDevice(inborderimg);
if (errcode != NO_ERROR) {
return errcode;
}
// 提取输入图像的 ROI 子图像。
errcode = ImageBasicOp::roiSubImage(inborderimg, &inborderCud);
if (errcode != NO_ERROR) {
return errcode;
}
// 找长宽的最大值当内核函数的参数
int inmaxsize=inborderimg->height>inborderimg->width?
inborderimg->height:inborderimg->width;
dim3 grid,block;
block.x=DEF_BLOCK_X;
block.y=1;
block.z=1;
grid.x=(inmaxsize+DEF_BLOCK_X-1)/DEF_BLOCK_X;
grid.y=4;
grid.z=1;
_seedScanLineOutConKer<<<grid,block>>>(inborderCud);
#ifdef DEBUG_IMG
ImageBasicOp::copyToHost(inborderimg);
ImageBasicOp::writeToFile("inborderFilled.bmp",inborderimg);
// 交操作还要在 device 端使用图像
ImageBasicOp::copyToCurrentDevice(inborderimg);
#endif
}// end of in border & process
#ifdef DEBUG_TIME
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
cout << "in border fill " << runTime << " ms" << endl;
cudaEventRecord(start, 0);
#endif
//--------------如果有内轮廓,则内外轮廓填充图像求交----------------------
if(outbordercoor!=NULL && inbordercoor!=NULL){
dim3 gridsize,blocksize;
// 计算调用计算局部最大值的 kernel 函数的线程块的尺寸和线程块的数量。
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outborderimg->width + blocksize.x - 1) / blocksize.x;
gridsize.y = (outborderimg->height + blocksize.y - 1) / blocksize.y;
// 调用 kernel 函数求交,结果放入outbordercud中,此时outborderCud和
// inborderCud都在divice中,不用再次copytodevice
_intersectionKer<<<gridsize, blocksize>>>(
outborderCud,
inborderCud
);
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
}
//--------------如果没有内轮廓,则仅仅对外轮廓填充结果求反---------
else{
dim3 gridsize,blocksize;
// 计算调用计算局部最大值的 kernel 函数的线程块的尺寸和线程块的数量。
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outborderimg->width + blocksize.x - 1) / blocksize.x;
gridsize.y = (outborderimg->height + blocksize.y - 1) / blocksize.y;
// 调用 kernel 函数求反
_negateKer<<<gridsize, blocksize>>>(
outborderCud
);
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
}
#ifdef DEBUG_TIME
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
cout << "inter or negate " << runTime << " ms" << endl;
cudaEventRecord(start, 0);
#endif
//------------------------串行图像转化成coor,返回-------------------------
ImageBasicOp::copyToHost(outborderimg);
#ifdef DEBUG_IMG
// 最终图像输出到文件
ImageBasicOp::writeToFile("intersection.bmp",outborderimg);
#endif
// 此时imgcvt的设置是前景255,背景0,灰色部分会忽略,故自定义串行转化方法
//imgcvt.imgConvertToCst(outborderimg,fillcoor);
int w,h;
w=outborderimg->width;
h=outborderimg->height;
int imgsize=w*h;
// 每个点(x,y)占用两个整数存放
int *coorarray=(int *)malloc(2*imgsize*sizeof(int));
int coorcount=0;
for(int i=0;i<w;i++)
for(int j=0;j<h;j++){
int curpix=outborderimg->imgData[j*w+i];
if(curpix==BORDER_COLOR ){
coorarray[coorcount*2]=i;
coorarray[coorcount*2+1]=j;
coorcount++;
}
}
CoordiSetBasicOp::makeAtHost(fillcoor,coorcount);
memcpy(fillcoor->tplData,coorarray,coorcount*2*sizeof(int));
free(coorarray);
#ifdef DEBUG_TIME
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
cout << "seri img to coor " << runTime << " ms" << endl;
#endif
/*
//------------------------并行图像转化成coor,返回-------------------------
// 经过测试,效率不如串行,故不采用
imgcvt.imgConvertToCst(outborderimg,fillcoor);
#ifdef DEBUG_TIME
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
cout << "con img to coor " << runTime << " ms" << endl;
#endif
*/
//------------------------内存回收-------------------------------------
ImageBasicOp::deleteImage(outborderimg);
ImageBasicOp::deleteImage(inborderimg);
return NO_ERROR;
}
// 成员方法:seedScanLineCon(并行种子扫描线算法填充 coordiset 集合围起的区域)
__host__ int // 返回值:函数是否正确执行,若函数正确执
// 行,返回 NO_ERROR。
FillCoor::seedScanLineCon(
Image *outborderimg, // 外轮廓闭合曲线图像,同时也是输出结果
Image *inborderimg // 内轮廓闭合曲线图像,没有内轮廓设为空
){
ImageCuda outborderCud;
ImageCuda inborderCud;
// --------------------------处理外轮廓-------------------------------
if(outborderimg!=NULL){
int errcode;
// 将输入图像拷贝入 Device 内存。
errcode = ImageBasicOp::copyToCurrentDevice(outborderimg);
if (errcode != NO_ERROR) {
return errcode;
}
// 提取输入图像的 ROI 子图像。
errcode = ImageBasicOp::roiSubImage(outborderimg, &outborderCud);
if (errcode != NO_ERROR) {
return errcode;
}
// 找长宽的最大值当内核函数的参数
int outmaxsize=outborderimg->height>outborderimg->width?
outborderimg->height:outborderimg->width;
dim3 grid,block;
block.x=DEF_BLOCK_X;
block.y=1;
block.z=1;
grid.x=(outmaxsize+DEF_BLOCK_X-1)/DEF_BLOCK_X;
grid.y=4;
grid.z=1;
_seedScanLineOutConKer<<<grid,block>>>(outborderCud);
#ifdef DEBUG_IMG
ImageBasicOp::copyToHost(outborderimg);
ImageBasicOp::writeToFile("outborder_Filled.bmp",outborderimg);
// 交操作还要在 device 端使用图像
ImageBasicOp::copyToCurrentDevice(outborderimg);
#endif
}// end of out border
#ifdef DEBUG_TIME
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
cout << "out border fill " << runTime << " ms" << endl;
cudaEventRecord(start, 0);
#endif
// --------------------------处理内轮廓-------------------------------
if(outborderimg!=NULL && inborderimg!=NULL){
int errcode;
// 将输入图像拷贝入 Device 内存。
errcode = ImageBasicOp::copyToCurrentDevice(inborderimg);
if (errcode != NO_ERROR) {
return errcode;
}
// 提取输入图像的 ROI 子图像。
errcode = ImageBasicOp::roiSubImage(inborderimg, &inborderCud);
if (errcode != NO_ERROR) {
return errcode;
}
// 找长宽的最大值当内核函数的参数
int inmaxsize=inborderimg->height>inborderimg->width?
inborderimg->height:inborderimg->width;
dim3 grid,block;
block.x=DEF_BLOCK_X;
block.y=1;
block.z=1;
grid.x=(inmaxsize+DEF_BLOCK_X-1)/DEF_BLOCK_X;
grid.y=4;
grid.z=1;
_seedScanLineOutConKer<<<grid,block>>>(inborderCud);
#ifdef DEBUG_IMG
ImageBasicOp::copyToHost(inborderimg);
ImageBasicOp::writeToFile("inborderFilled.bmp",inborderimg);
// 交操作还要在 device 端使用图像
ImageBasicOp::copyToCurrentDevice(inborderimg);
#endif
}// end of in border & process
#ifdef DEBUG_TIME
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
cout << "in border fill " << runTime << " ms" << endl;
cudaEventRecord(start, 0);
#endif
//--------------如果有内轮廓,则内外轮廓填充图像求交----------------------
if(outborderimg!=NULL && inborderimg!=NULL){
dim3 gridsize,blocksize;
// 计算调用计算局部最大值的 kernel 函数的线程块的尺寸和线程块的数量。
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outborderimg->width + blocksize.x - 1) / blocksize.x;
gridsize.y = (outborderimg->height + blocksize.y - 1) / blocksize.y;
// 调用 kernel 函数求交,结果放入outbordercud中,此时outborderCud和
// inborderCud都在divice中,不用再次copytodevice
_intersectionKer<<<gridsize, blocksize>>>(
outborderCud,
inborderCud
);
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
}
//--------------如果没有内轮廓,则仅仅对外轮廓填充结果求反---------
else{
dim3 gridsize,blocksize;
// 计算调用计算局部最大值的 kernel 函数的线程块的尺寸和线程块的数量。
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outborderimg->width + blocksize.x - 1) / blocksize.x;
gridsize.y = (outborderimg->height + blocksize.y - 1) / blocksize.y;
// 调用 kernel 函数求反
_negateKer<<<gridsize, blocksize>>>(
outborderCud
);
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
}
#ifdef DEBUG_TIME
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
cout << "inter or negate " << runTime << " ms" << endl;
cudaEventRecord(start, 0);
#endif
return NO_ERROR;
} | the_stack |
#ifndef __NBLA_CUDA_FUNCTION_BASE_TRANSFORM_BINARY_CUH__
#define __NBLA_CUDA_FUNCTION_BASE_TRANSFORM_BINARY_CUH__
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/utils/base_transform_binary.hpp>
#include <nbla/cuda/half.hpp>
#include <nbla/cuda/utils/atomic_add.cuh>
#include <assert.h>
#include <string>
#include <tuple>
// Note:
// The all kernels in this file tries to use more precise type intermediately
// (e.g. float instead of half) to keep the numerical precision.
namespace nbla {
using std::tuple;
using std::string;
using std::is_same;
// ----------------------------------------------------------------------------
// Base class to store a binary operation
// ----------------------------------------------------------------------------
class BaseBinaryOpCuda {
public:
template <typename T>
__forceinline__ __device__ T operator()(const T x0, const T x1) {
return 0;
}
template <typename T>
__forceinline__ __device__ T g0(const T dy, const T x0, const T x1, const T y,
const bool inplace) {
return 0;
}
template <typename T>
__forceinline__ __device__ T g1(const T dy, const T x0, const T x1, const T y,
const bool inplace) {
return 0;
}
__host__ void verify_g0() {
NBLA_ERROR(error_code::not_implemented,
"Backward operation for input 0 is not implemented.");
}
__host__ void verify_g1() {
NBLA_ERROR(error_code::not_implemented,
"Backward operation for input 1 is not implemented.");
}
};
namespace transform_binary_cuda {
// ----------------------------------------------------------------------------
// Utilities
// ----------------------------------------------------------------------------
// The macros and functions in cuda/common.hpp are redefined
// for three-dimensional blocks and for Size_t.
#define TRANSFORM_BINARY_CUDA_GRID_DIV 128
// NBLA_CUDA_MAX_BLOCKS is 65536. Probably 65535 is more appropriate
// because CUDA Toolkit Documentation describes taht this is the maximum
// y- or z-dimension of a grid of thread blocks.
#define TRANSFORM_BINARY_CUDA_MAX_BLOCKS 65535
#define TRANSFORM_BINARY_CUDA_MAX_Z_THREADS 64
/** ceil(N/D) where N and D are Size_t */
#define NBLA_CEIL_SIZE_T_DIV(N, D) \
((static_cast<Size_t>(N) + static_cast<Size_t>(D) - 1) / \
static_cast<Size_t>(D))
/** CUDA grid-strided loop of Size_t */
#define TRANSFORM_BINARY_CUDA_KERNEL_LOOP(idx, num) \
for (Size_t idx = (Size_t)blockIdx.x * blockDim.x + threadIdx.x; \
idx < (num); idx += (Size_t)blockDim.x * gridDim.x)
/** Get an appropriate block size given a size of elements.
The kernel is assumed to contain a grid-strided loop.
This is an overload function of Size_t type which originally defined
in cuda/common.hpp.
*/
inline Size_t cuda_get_blocks_by_size(const Size_t size) {
if (size == 0)
return 0;
const Size_t blocks = NBLA_CEIL_SIZE_T_DIV(size, NBLA_CUDA_NUM_THREADS);
const Size_t inkernel_loop =
NBLA_CEIL_SIZE_T_DIV(blocks, NBLA_CUDA_MAX_BLOCKS);
const Size_t total_blocks = NBLA_CEIL_SIZE_T_DIV(blocks, inkernel_loop);
return total_blocks;
}
enum Term { x0, x1 };
struct Dim3KernelParams {
Size_t stride_x0_0, stride_x0_1, stride_x0_2;
Size_t stride_x1_0, stride_x1_1, stride_x1_2;
Size_t stride_y_0, stride_y_1, stride_y_2;
Size_t shape_y_0, shape_y_1, shape_y_2;
};
/* Get the block size for the kernels specialized for three-dimensional data.
If #dim of blocks is 3,
blockDim.x * blockDim.y * blockDim.z <= NBLA_CUDA_NUM_THREADS
If #dim of blocks is 2,
blockDim.x * blockDim.y <= NBLA_CUDA_NUM_THREADS, blockDim.z = 1
If #dim of blocks is 1,
blockDim.x <= NBLA_CUDA_NUM_THREADS, blockDim.y = 1, blockDim.z = 1
Template is used to suppress the duplicated definition of this function
among binary operators. It would be avoidable by declaration and definition
file separation.
*/
template <typename T, typename BinaryOp>
dim3 get_blocks_dim3(const Shape_t &shape, const int num_block_dims) {
if (shape.size() != 3) {
NBLA_ERROR(error_code::value, "Shape is not three-dimensional.");
}
// blockDim.x = 2^pow[2], blockDim.y = 2^pow[1], blockDim.z = 2^pow[0],
Size_t pow[3] = {0, 0, 0};
// Get the smallest power of 2 greater than or equal to each shape
pow[2] = (Size_t)std::ceil(std::log2(shape[2]));
if (num_block_dims != 1)
pow[1] = (Size_t)std::ceil(std::log2(shape[1]));
if (num_block_dims == 3)
pow[0] = (Size_t)std::ceil(std::log2(shape[0]));
// Adjast block sizes <= NBLA_CUDA_NUM_THREADS
// (this value could be converted from Shape_t to dim3 type.)
const Size_t max_pow = (Size_t)std::floor(std::log2(NBLA_CUDA_NUM_THREADS));
const Size_t max_pow_z =
(Size_t)std::floor(std::log2(TRANSFORM_BINARY_CUDA_MAX_Z_THREADS));
pow[2] = std::min(pow[2], max_pow);
if (num_block_dims != 1)
pow[1] = std::min(pow[1], max_pow - pow[2]);
if (num_block_dims == 3)
pow[0] = std::min(max_pow_z, std::min(pow[0], max_pow - pow[2] - pow[1]));
// Power and return
return dim3(1 << pow[2], 1 << pow[1], 1 << pow[0]);
}
/** Get an appropriate grid size given a size of elements.
The kernel is assumed to contain a grid-strided loop.
At the same time, Size_t type for NNabla is converted to uint type for CUDA
safely.
Template is used to suppress the duplicated definition of this function
among binary operators. It would be avoidable by declaration and definition
file separation.
*/
template <typename T, typename BinaryOp>
dim3 get_strided_grids_dim3(const Size_t grid_x, const Size_t grid_y,
const Size_t grid_z) {
const Shape_t internal_loop{
NBLA_CEIL_SIZE_T_DIV(grid_x, TRANSFORM_BINARY_CUDA_MAX_BLOCKS),
NBLA_CEIL_SIZE_T_DIV(grid_y, TRANSFORM_BINARY_CUDA_MAX_BLOCKS),
NBLA_CEIL_SIZE_T_DIV(grid_z, TRANSFORM_BINARY_CUDA_MAX_BLOCKS)};
return dim3{
static_cast<unsigned int>(NBLA_CEIL_SIZE_T_DIV(grid_x, internal_loop[0])),
static_cast<unsigned int>(NBLA_CEIL_SIZE_T_DIV(grid_y, internal_loop[1])),
static_cast<unsigned int>(
NBLA_CEIL_SIZE_T_DIV(grid_z, internal_loop[2]))};
}
inline __device__ void get_indices(Size_t *idxes /* Size_t[2] */, Size_t idx,
const Size_t ndim, const Size_t *strides_x0,
const Size_t *strides_x1,
const Size_t *strides_y,
const Size_t *shape_y) {
idxes[0] = 0;
idxes[1] = 0;
for (Size_t i = 0; i < ndim; ++i) {
const Size_t dim_idx = idx / strides_y[i];
idxes[0] += dim_idx * strides_x0[i];
idxes[1] += dim_idx * strides_x1[i];
idx -= dim_idx * strides_y[i];
}
}
inline __device__ Size_t flatten_idx(const Size_t x, const Size_t y,
const Size_t z, const Size_t stride_0,
const Size_t stride_1,
const Size_t stride_2) {
return x * stride_0 + y * stride_1 + z * stride_2;
}
// This kernel is used to store the results into the required type
// (e.g. half to float). This process is unnecessary when T == PRECISE_T
// (e.g. T == float).
template <typename T, typename PRECISE_T, bool accum>
__global__ void kernel_precise_add(const Size_t size, T *dst,
const PRECISE_T *src) {
TRANSFORM_BINARY_CUDA_KERNEL_LOOP(idx, size) {
dst[idx] = (accum ? (PRECISE_T)dst[idx] : (PRECISE_T)0) + src[idx];
}
}
// ----------------------------------------------------------------------------
// Forward kernels for any dimensions
// ----------------------------------------------------------------------------
// Forward
template <typename T, typename PRECISE_T, typename BinaryOp>
__global__ void
kernel_forward_ndim(const Size_t size, BinaryOp op, const T *__restrict__ x0,
const T *__restrict__ x1, T *y, const Size_t ndim,
const Size_t *strides_x0, const Size_t *strides_x1,
const Size_t *strides_y, const Size_t *shape_y) {
TRANSFORM_BINARY_CUDA_KERNEL_LOOP(idx, size) {
Size_t idxes[2];
get_indices(idxes, idx, ndim, strides_x0, strides_x1, strides_y, shape_y);
y[idx] = op(static_cast<PRECISE_T>(x0[idxes[0]]),
static_cast<PRECISE_T>(x1[idxes[1]]));
}
}
// Backward
template <typename T, typename PRECISE_T, typename BinaryOp, Term term>
__global__ void
kernel_backward_ndim(const Size_t size, BinaryOp op, const T *__restrict__ dy,
const T *__restrict__ x0, const T *__restrict__ x1,
const T *__restrict__ y, PRECISE_T *dst,
const bool inplace, const Size_t ndim,
const Size_t *strides_x0, const Size_t *strides_x1,
const Size_t *strides_y, const Size_t *shape_y) {
TRANSFORM_BINARY_CUDA_KERNEL_LOOP(idx, size) {
Size_t idxes[2];
get_indices(idxes, idx, ndim, strides_x0, strides_x1, strides_y, shape_y);
if (term == Term::x0) {
atomic_add(&dst[idxes[0]],
op.g0(static_cast<PRECISE_T>(dy[idx]),
static_cast<PRECISE_T>(x0[idxes[0]]),
static_cast<PRECISE_T>(x1[idxes[1]]),
static_cast<PRECISE_T>(y[idx]), inplace));
} else {
atomic_add(&dst[idxes[1]],
op.g1(static_cast<PRECISE_T>(dy[idx]),
static_cast<PRECISE_T>(x0[idxes[0]]),
static_cast<PRECISE_T>(x1[idxes[1]]),
static_cast<PRECISE_T>(y[idx]), inplace));
}
}
}
// ----------------------------------------------------------------------------
// Forward kernels for three dimensions
// ----------------------------------------------------------------------------
// Perform binary operation while broadcasting by using only strides.
template <typename T, typename PRECISE_T, typename BinaryOp>
__global__ void kernel_forward_dim3(BinaryOp op, const T *__restrict__ x0,
const T *__restrict__ x1, T *y,
const Dim3KernelParams p) {
const Size_t tid_x = (Size_t)blockIdx.x * blockDim.x + threadIdx.x;
const Size_t tid_y = (Size_t)blockIdx.y * blockDim.y + threadIdx.y;
const Size_t tid_z = (Size_t)blockIdx.z * blockDim.z + threadIdx.z;
if (tid_x >= p.shape_y_2 || tid_y >= p.shape_y_1 || tid_z >= p.shape_y_0)
return;
for (Size_t iz = tid_z; iz < p.shape_y_0;
iz += (Size_t)blockDim.z * gridDim.z) {
for (Size_t iy = tid_y; iy < p.shape_y_1;
iy += (Size_t)blockDim.y * gridDim.y) {
for (Size_t ix = tid_x; ix < p.shape_y_2;
ix += (Size_t)blockDim.x * gridDim.x) {
const Size_t idx =
flatten_idx(ix, iy, iz, p.stride_y_2, p.stride_y_1, p.stride_y_0);
const Size_t idx0 = flatten_idx(ix, iy, iz, p.stride_x0_2,
p.stride_x0_1, p.stride_x0_0);
const Size_t idx1 = flatten_idx(ix, iy, iz, p.stride_x1_2,
p.stride_x1_1, p.stride_x1_0);
y[idx] = op(static_cast<PRECISE_T>(x0[idx0]),
static_cast<PRECISE_T>(x1[idx1]));
}
}
}
}
// Perform binary operation without broadcast for both terms
template <typename T, typename PRECISE_T, typename BinaryOp>
__global__ void
kernel_forward_dim3_not_broadcasted_both_terms(const Size_t size, BinaryOp op,
const T *x0, const T *x1, T *y,
const Dim3KernelParams p) {
TRANSFORM_BINARY_CUDA_KERNEL_LOOP(ix, size) { // size == p.shape_y_2
const Size_t idx = ix * p.stride_y_2;
const Size_t idx0 = ix * p.stride_x0_2;
const Size_t idx1 = ix * p.stride_x1_2;
y[idx] =
op(static_cast<PRECISE_T>(x0[idx0]), static_cast<PRECISE_T>(x1[idx1]));
}
}
// ----------------------------------------------------------------------------
// Backward kernels without reduction for three dimensions
// ----------------------------------------------------------------------------
// The case where th specified "term" by template is not broadcasted.
template <typename T, typename PRECISE_T, typename BinaryOp, Term term>
__global__ void kernel_backward_dim3_broadcasted_other_term(
BinaryOp op, const T *dy, const T *x0, const T *x1, const T *y, T *dx,
const bool inplace, const Dim3KernelParams p) {
const Size_t tid_x = (Size_t)blockIdx.x * blockDim.x + threadIdx.x;
const Size_t tid_y = (Size_t)blockIdx.y * blockDim.y + threadIdx.y;
const Size_t tid_z = (Size_t)blockIdx.z * blockDim.z + threadIdx.z;
if (tid_x >= p.shape_y_2 || tid_y >= p.shape_y_1 || tid_z >= p.shape_y_0)
return;
for (Size_t iz = tid_z; iz < p.shape_y_0;
iz += (Size_t)blockDim.z * gridDim.z) {
for (Size_t iy = tid_y; iy < p.shape_y_1;
iy += (Size_t)blockDim.y * gridDim.y) {
for (Size_t ix = tid_x; ix < p.shape_y_2;
ix += (Size_t)blockDim.x * gridDim.x) {
const Size_t idx =
flatten_idx(ix, iy, iz, p.stride_y_2, p.stride_y_1, p.stride_y_0);
const Size_t idx0 = flatten_idx(ix, iy, iz, p.stride_x0_2,
p.stride_x0_1, p.stride_x0_0);
const Size_t idx1 = flatten_idx(ix, iy, iz, p.stride_x1_2,
p.stride_x1_1, p.stride_x1_0);
if (term == Term::x0) {
dx[idx0] = static_cast<PRECISE_T>(dx[idx0]) +
op.g0(static_cast<PRECISE_T>(dy[idx]),
static_cast<PRECISE_T>(x0[idx0]),
static_cast<PRECISE_T>(x1[idx1]),
static_cast<PRECISE_T>(y[idx]), inplace);
} else {
dx[idx1] = static_cast<PRECISE_T>(dx[idx1]) +
op.g1(static_cast<PRECISE_T>(dy[idx]),
static_cast<PRECISE_T>(x0[idx0]),
static_cast<PRECISE_T>(x1[idx1]),
static_cast<PRECISE_T>(y[idx]), inplace);
}
}
}
}
}
// The case where both term are not broadcasted can reduce the computational
// complexity at the index calculation because the dimension is compressed to
// one.
template <typename T, typename PRECISE_T, typename BinaryOp, Term term>
__global__ void kernel_backward_dim3_not_broadcasted_both_terms(
const Size_t size, BinaryOp op, const T *dy, const T *x0, const T *x1,
const T *y, T *dx, const bool inplace, const Dim3KernelParams p) {
TRANSFORM_BINARY_CUDA_KERNEL_LOOP(ix, size) { // size == p.shape_y_2
const Size_t idx = ix * p.stride_y_2;
const Size_t idx0 = ix * p.stride_x0_2;
const Size_t idx1 = ix * p.stride_x1_2;
if (term == Term::x0) {
dx[idx0] = static_cast<PRECISE_T>(dx[idx0]) +
op.g0(static_cast<PRECISE_T>(dy[idx]),
static_cast<PRECISE_T>(x0[idx0]),
static_cast<PRECISE_T>(x1[idx1]),
static_cast<PRECISE_T>(y[idx]), inplace);
} else {
dx[idx1] = static_cast<PRECISE_T>(dx[idx1]) +
op.g1(static_cast<PRECISE_T>(dy[idx]),
static_cast<PRECISE_T>(x0[idx0]),
static_cast<PRECISE_T>(x1[idx1]),
static_cast<PRECISE_T>(y[idx]), inplace);
}
}
}
// ----------------------------------------------------------------------------
// Backward kernels of x-axis reduction for three dimensions
// ----------------------------------------------------------------------------
// Sub-routine of x-axis reduction
template <typename PRECISE_T, Size_t blockSize>
__device__ PRECISE_T kernel_backward_dim3_block_reduce_x(Size_t tid,
PRECISE_T *buf) {
if (blockSize >= 512 && tid < 256) {
buf[tid] += buf[tid + 256];
}
__syncthreads();
if (blockSize >= 256 && tid < 128) {
buf[tid] += buf[tid + 128];
}
__syncthreads();
if (blockSize >= 128 && tid < 64) {
buf[tid] += buf[tid + 64];
}
__syncthreads();
// warp reduce
PRECISE_T sum = buf[tid];
if (tid < 32) {
if (blockSize >= 64) {
sum += buf[tid + 32];
}
sum += __shfl_down_sync(0xffffffff, sum, 16);
sum += __shfl_down_sync(0xffffffff, sum, 8);
sum += __shfl_down_sync(0xffffffff, sum, 4);
sum += __shfl_down_sync(0xffffffff, sum, 2);
sum += __shfl_down_sync(0xffffffff, sum, 1);
}
return sum;
}
// x-axis reduction after z-axis reduction
template <typename T, typename PRECISE_T, Size_t blockSize>
__global__ void kernel_backward_dim3_reduce_x_after_z(const PRECISE_T *src,
T *dst,
const Size_t x_size,
const Size_t y_size) {
const Size_t tid = threadIdx.x;
const Size_t bid_y = blockIdx.y; // "bid" means block ID.
extern __shared__ PRECISE_T sbuf[];
for (Size_t y = bid_y; y < y_size; y += (Size_t)gridDim.y) {
PRECISE_T sum = 0;
for (Size_t x = tid; x < x_size; x += blockSize) {
sum += (PRECISE_T)src[x + y * x_size];
}
sbuf[tid] = sum;
__syncthreads();
sum = kernel_backward_dim3_block_reduce_x<PRECISE_T, blockSize>(tid, sbuf);
if (tid == 0) {
dst[y] = (PRECISE_T)dst[y] + sum;
}
}
}
// x-axis reduction
template <typename T, typename PRECISE_T, typename BinaryOp, Term term,
Size_t blockSize>
__global__ void kernel_backward_dim3_reduce_x(
BinaryOp op, const T *dy, const T *x0, const T *x1, const T *y, T *dx,
const bool inplace, const Dim3KernelParams p, const Size_t x_size,
const Size_t y_size, const Size_t z_size) {
const Size_t tid_x = threadIdx.x;
const Size_t tid_y = (Size_t)blockIdx.y * blockDim.y + threadIdx.y;
const Size_t tid_z = (Size_t)blockIdx.z * blockDim.z + threadIdx.z;
if (tid_y >= y_size || tid_z >= z_size)
return;
extern __shared__ PRECISE_T sbuf[];
for (Size_t iz = tid_z; iz < z_size; iz += (Size_t)blockDim.z * gridDim.z) {
for (Size_t iy = tid_y; iy < y_size; iy += (Size_t)blockDim.y * gridDim.y) {
PRECISE_T sum = 0;
for (Size_t ix = tid_x; ix < x_size; ix += blockSize) {
const Size_t idx =
flatten_idx(ix, iy, iz, p.stride_y_2, p.stride_y_1, p.stride_y_0);
const Size_t idx0 = flatten_idx(ix, iy, iz, p.stride_x0_2,
p.stride_x0_1, p.stride_x0_0);
const Size_t idx1 = flatten_idx(ix, iy, iz, p.stride_x1_2,
p.stride_x1_1, p.stride_x1_0);
if (term == Term::x0) {
sum += op.g0(static_cast<PRECISE_T>(dy[idx]),
static_cast<PRECISE_T>(x0[idx0]),
static_cast<PRECISE_T>(x1[idx1]),
static_cast<PRECISE_T>(y[idx]), inplace);
} else {
sum += op.g1(static_cast<PRECISE_T>(dy[idx]),
static_cast<PRECISE_T>(x0[idx0]),
static_cast<PRECISE_T>(x1[idx1]),
static_cast<PRECISE_T>(y[idx]), inplace);
}
}
sbuf[tid_x] = sum;
__syncthreads();
sum = kernel_backward_dim3_block_reduce_x<PRECISE_T, blockSize>(tid_x,
sbuf);
if (tid_x == 0) {
dx[iy + iz * y_size] =
static_cast<PRECISE_T>(dx[iy + iz * y_size]) + sum;
}
}
}
}
// ----------------------------------------------------------------------------
// Backward kernels of y-axis reduction for three dimensions
// ----------------------------------------------------------------------------
// y-axis reduction
template <typename T, typename PRECISE_T, typename BinaryOp, Term term,
Size_t y_grid_div>
__global__ void kernel_backward_dim3_reduce_y(
BinaryOp op, const T *dy, const T *x0, const T *x1, const T *y,
PRECISE_T *dst, const bool inplace, const Dim3KernelParams p,
const Size_t x_size, const Size_t y_size, const Size_t z_size) {
const Size_t tid_x = (Size_t)blockIdx.x * blockDim.x + threadIdx.x;
const Size_t tid_y = (Size_t)blockIdx.y * y_grid_div;
const Size_t tid_z = (Size_t)blockIdx.z * blockDim.z + threadIdx.z;
if (tid_x >= x_size || tid_z >= z_size)
return;
const Size_t yend = min(tid_y + y_grid_div, y_size);
for (Size_t iz = tid_z; iz < z_size; iz += (Size_t)blockDim.z * gridDim.z) {
for (Size_t ix = tid_x; ix < x_size; ix += (Size_t)blockDim.x * gridDim.x) {
PRECISE_T sum = 0;
for (Size_t iy = tid_y; iy < yend; ++iy) {
const Size_t idx =
flatten_idx(ix, iy, iz, p.stride_y_2, p.stride_y_1, p.stride_y_0);
const Size_t idx0 = flatten_idx(ix, iy, iz, p.stride_x0_2,
p.stride_x0_1, p.stride_x0_0);
const Size_t idx1 = flatten_idx(ix, iy, iz, p.stride_x1_2,
p.stride_x1_1, p.stride_x1_0);
if (term == Term::x0) {
sum += op.g0(static_cast<PRECISE_T>(dy[idx]),
static_cast<PRECISE_T>(x0[idx0]),
static_cast<PRECISE_T>(x1[idx1]),
static_cast<PRECISE_T>(y[idx]), inplace);
} else {
sum += op.g1(static_cast<PRECISE_T>(dy[idx]),
static_cast<PRECISE_T>(x0[idx0]),
static_cast<PRECISE_T>(x1[idx1]),
static_cast<PRECISE_T>(y[idx]), inplace);
}
}
atomic_add(&dst[ix + iz * x_size], sum);
}
}
}
// ----------------------------------------------------------------------------
// Backward kernels of z-axis reduction for three dimensions
// ----------------------------------------------------------------------------
// z-axis reduction
template <typename T, typename PRECISE_T, typename BinaryOp, Term term,
Size_t z_grid_div>
__global__ void kernel_backward_dim3_reduce_z(
BinaryOp op, const T *dy, const T *x0, const T *x1, const T *y,
PRECISE_T *dst, const bool inplace, const Dim3KernelParams p,
const Size_t x_size, const Size_t y_size, const Size_t z_size) {
const Size_t tid_x = (Size_t)blockIdx.x * blockDim.x + threadIdx.x;
const Size_t tid_y = (Size_t)blockIdx.y * blockDim.y + threadIdx.y;
const Size_t tid_z = (Size_t)blockIdx.z * z_grid_div;
if (tid_x >= x_size || tid_y >= y_size)
return;
const Size_t zend = min(tid_z + z_grid_div, z_size);
for (Size_t iy = tid_y; iy < y_size; iy += (Size_t)blockDim.y * gridDim.y) {
for (Size_t ix = tid_x; ix < x_size; ix += (Size_t)blockDim.x * gridDim.x) {
PRECISE_T sum = 0;
for (Size_t iz = tid_z; iz < zend; ++iz) {
const Size_t idx =
flatten_idx(ix, iy, iz, p.stride_y_2, p.stride_y_1, p.stride_y_0);
const Size_t idx0 = flatten_idx(ix, iy, iz, p.stride_x0_2,
p.stride_x0_1, p.stride_x0_0);
const Size_t idx1 = flatten_idx(ix, iy, iz, p.stride_x1_2,
p.stride_x1_1, p.stride_x1_0);
if (term == Term::x0) {
sum += op.g0(static_cast<PRECISE_T>(dy[idx]),
static_cast<PRECISE_T>(x0[idx0]),
static_cast<PRECISE_T>(x1[idx1]),
static_cast<PRECISE_T>(y[idx]), inplace);
} else {
sum += op.g1(static_cast<PRECISE_T>(dy[idx]),
static_cast<PRECISE_T>(x0[idx0]),
static_cast<PRECISE_T>(x1[idx1]),
static_cast<PRECISE_T>(y[idx]), inplace);
}
}
atomic_add(&dst[ix + iy * x_size], sum);
}
}
}
// ----------------------------------------------------------------------------
// The actual part of forward_iml
// ----------------------------------------------------------------------------
// The actual part of forward_impl
template <typename T, typename BinaryOp>
void forward_impl(const Context &ctx, BinaryOp op, const Variables &inputs,
const Variables &outputs, const bool inplace,
const Size_t ndim, Variable &v_strides_x0,
Variable &v_strides_x1, Variable &v_strides_y,
Variable &v_shape_y) {
using PRECISE_T = typename CudaTypeForceFloat<T>::type;
cuda_set_device(std::stoi(ctx.device_id));
const auto *x0 = inputs[0]->get_data_pointer<T>(ctx);
const auto *x1 = inputs[1]->get_data_pointer<T>(ctx);
auto *y = outputs[0]->cast_data_and_get_pointer<T>(ctx, !inplace);
if (ndim == 3) {
// Three-dimensional case
// SyncedArray::get_data_pointer only request array class by filter_context.
Context cpu_ctx = Context().set_array_class(
SingletonManager::get<Cpu>()->array_classes()[0]);
const auto *stride_x0 = v_strides_x0.get_data_pointer<Size_t>(cpu_ctx);
const auto *stride_x1 = v_strides_x1.get_data_pointer<Size_t>(cpu_ctx);
const auto *stride_y = v_strides_y.get_data_pointer<Size_t>(cpu_ctx);
const auto *shape_y = v_shape_y.get_data_pointer<Size_t>(cpu_ctx);
// The parameters are passed to kernels as constants.
Dim3KernelParams params{stride_x0[0], stride_x0[1], stride_x0[2],
stride_x1[0], stride_x1[1], stride_x1[2],
stride_y[0], stride_y[1], stride_y[2],
shape_y[0], shape_y[1], shape_y[2]};
const Size_t x_size = shape_y[2];
const Size_t y_size = shape_y[1];
const Size_t z_size = shape_y[0];
const Shape_t shape = {z_size, y_size, x_size};
if ((stride_x0[0] == 0) || (stride_x0[1] == 0) || (stride_x0[2] == 0) ||
(stride_x1[0] == 0) || (stride_x1[1] == 0) || (stride_x1[2] == 0)) {
// Broadcast
dim3 blockDim = get_blocks_dim3<T, BinaryOp>(shape, 3);
dim3 gridDim = get_strided_grids_dim3<T, BinaryOp>(
NBLA_CEIL_SIZE_T_DIV(x_size, blockDim.x),
NBLA_CEIL_SIZE_T_DIV(y_size, blockDim.y),
NBLA_CEIL_SIZE_T_DIV(z_size, blockDim.z));
kernel_forward_dim3<T, PRECISE_T, BinaryOp><<<gridDim, blockDim>>>(
op, x0, x1, y, params);
NBLA_CUDA_KERNEL_CHECK();
} else {
// Not broadcast
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_forward_dim3_not_broadcasted_both_terms<T, PRECISE_T,
BinaryOp>),
shape[2], op, x0, x1, y, params);
}
} else {
// Otherwise
// setup_impl gurantees more than four dimensional data.
const auto size = outputs[0]->size();
const auto *stride_x0 = v_strides_x0.get_data_pointer<Size_t>(ctx);
const auto *stride_x1 = v_strides_x1.get_data_pointer<Size_t>(ctx);
const auto *stride_y = v_strides_y.get_data_pointer<Size_t>(ctx);
const auto *shape_y = v_shape_y.get_data_pointer<Size_t>(ctx);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_forward_ndim<T, PRECISE_T, BinaryOp>), size, op, x0, x1, y,
ndim, stride_x0, stride_x1, stride_y, shape_y);
}
}
// ----------------------------------------------------------------------------
// The actual part of backward_iml
// ----------------------------------------------------------------------------
// This function issues the kernel without reduction
template <typename T, typename PRECISE_T, typename BinaryOp, Term term>
void backward_impl_dim3_without_reduction(
BinaryOp op, const T *dy, const T *x0, const T *x1, const T *y, T *dx,
const bool inplace, const Dim3KernelParams ¶ms, const Size_t x_size,
const Size_t y_size, const Size_t z_size,
const bool broadcasted_the_other_term) {
const auto shape = Shape_t{z_size, y_size, x_size};
if (broadcasted_the_other_term) {
// The other term is broadcasted.
dim3 blockDim = get_blocks_dim3<T, BinaryOp>(shape, 3);
dim3 gridDim = get_strided_grids_dim3<T, BinaryOp>(
NBLA_CEIL_SIZE_T_DIV(x_size, blockDim.x),
NBLA_CEIL_SIZE_T_DIV(y_size, blockDim.y),
NBLA_CEIL_SIZE_T_DIV(z_size, blockDim.z));
kernel_backward_dim3_broadcasted_other_term<T, PRECISE_T, BinaryOp,
term><<<gridDim, blockDim>>>(
op, dy, x0, x1, y, dx, inplace, params);
NBLA_CUDA_KERNEL_CHECK();
} else {
// The other term is not broadcasted too. The computation becomes easier.
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_backward_dim3_not_broadcasted_both_terms<T, PRECISE_T, BinaryOp,
term>),
shape[2], op, dy, x0, x1, y, dx, inplace, params);
}
}
// This function issues the kernel of x-axis reduction
// x-axis reduction requires more complicated algorithm than y and z axis
// beacuse of memory layout.
template <typename T, typename PRECISE_T, typename BinaryOp, Term term,
unsigned int blockSize>
void backward_impl_dim3_reduce_x(BinaryOp op, const T *dy, const T *x0,
const T *x1, const T *y,
const PRECISE_T *z_reduced_buff, T *dx,
const bool inplace,
const Dim3KernelParams ¶ms,
const Size_t x_size, const Size_t y_size,
const Size_t z_size) {
dim3 blockDim(blockSize);
const size_t smem_size = blockSize * sizeof(PRECISE_T);
if (z_reduced_buff) {
// z axis is reduced previously.
dim3 gridDim = get_strided_grids_dim3<T, BinaryOp>(1, y_size, 1);
kernel_backward_dim3_reduce_x_after_z<
T, PRECISE_T, blockSize><<<gridDim, blockDim, smem_size>>>(
z_reduced_buff, dx, x_size, y_size);
NBLA_CUDA_KERNEL_CHECK();
} else {
// x axis is only reduced.
dim3 gridDim = get_strided_grids_dim3<T, BinaryOp>(1, y_size, z_size);
kernel_backward_dim3_reduce_x<T, PRECISE_T, BinaryOp, term,
blockSize><<<gridDim, blockDim, smem_size>>>(
op, dy, x0, x1, y, dx, inplace, params, x_size, y_size, z_size);
NBLA_CUDA_KERNEL_CHECK();
}
}
// This function issues the kernel of y-axis reduction
template <typename T, typename PRECISE_T, typename BinaryOp, Term term>
void backward_impl_dim3_reduce_y(const Context &ctx, BinaryOp op, const T *dy,
const T *x0, const T *x1, const T *y, T *dx,
const bool inplace,
const Dim3KernelParams ¶ms,
const Size_t x_size, const Size_t y_size,
const Size_t z_size) {
dim3 blockDim =
get_blocks_dim3<T, BinaryOp>(Shape_t{z_size, y_size, x_size}, 1);
dim3 gridDim = get_strided_grids_dim3<T, BinaryOp>(
NBLA_CEIL_SIZE_T_DIV(x_size, blockDim.x),
NBLA_CEIL_SIZE_T_DIV(y_size, TRANSFORM_BINARY_CUDA_GRID_DIV),
NBLA_CEIL_SIZE_T_DIV(z_size, blockDim.z));
if (is_same<T, PRECISE_T>::value) {
kernel_backward_dim3_reduce_y<
T, T, BinaryOp, term,
TRANSFORM_BINARY_CUDA_GRID_DIV><<<gridDim, blockDim>>>(
op, dy, x0, x1, y, dx, inplace, params, x_size, y_size, z_size);
NBLA_CUDA_KERNEL_CHECK();
} else {
const auto dx_shape = Shape_t{z_size, 1, x_size};
const auto dx_size = z_size * x_size;
NdArray tmp_arr(dx_shape);
tmp_arr.zero();
auto tmp = tmp_arr.cast(get_dtype<PRECISE_T>(), ctx)->pointer<PRECISE_T>();
kernel_backward_dim3_reduce_y<
T, PRECISE_T, BinaryOp, term,
TRANSFORM_BINARY_CUDA_GRID_DIV><<<gridDim, blockDim>>>(
op, dy, x0, x1, y, tmp, inplace, params, x_size, y_size, z_size);
NBLA_CUDA_KERNEL_CHECK();
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_precise_add<T, PRECISE_T, true>),
dx_size, dx, tmp);
}
}
// This function issues the kernel of y-axis reduction
template <typename T, typename PRECISE_T, typename BinaryOp, Term term>
PRECISE_T *backward_impl_dim3_reduce_z(
const Context &ctx, BinaryOp op, const T *dy, const T *x0, const T *x1,
const T *y, NdArray &tmp_arr, T *dx, const bool inplace,
const Dim3KernelParams ¶ms, const Size_t x_size, const Size_t y_size,
const Size_t z_size, const bool reduce_x) {
dim3 blockDim =
get_blocks_dim3<T, BinaryOp>(Shape_t{z_size, y_size, x_size}, 2);
dim3 gridDim = get_strided_grids_dim3<T, BinaryOp>(
NBLA_CEIL_SIZE_T_DIV(x_size, blockDim.x),
NBLA_CEIL_SIZE_T_DIV(y_size, blockDim.y),
NBLA_CEIL_SIZE_T_DIV(z_size, TRANSFORM_BINARY_CUDA_GRID_DIV));
if (is_same<T, PRECISE_T>::value && !reduce_x) {
kernel_backward_dim3_reduce_z<
T, T, BinaryOp, term,
TRANSFORM_BINARY_CUDA_GRID_DIV><<<gridDim, blockDim>>>(
op, dy, x0, x1, y, dx, inplace, params, x_size, y_size, z_size);
NBLA_CUDA_KERNEL_CHECK();
return nullptr;
} else {
const auto dx_shape = Shape_t{1, y_size, x_size};
const auto dx_size = y_size * x_size;
tmp_arr.reshape(dx_shape, true);
tmp_arr.zero();
auto tmp = tmp_arr.cast(get_dtype<PRECISE_T>(), ctx)->pointer<PRECISE_T>();
kernel_backward_dim3_reduce_z<
T, PRECISE_T, BinaryOp, term,
TRANSFORM_BINARY_CUDA_GRID_DIV><<<gridDim, blockDim>>>(
op, dy, x0, x1, y, tmp, inplace, params, x_size, y_size, z_size);
NBLA_CUDA_KERNEL_CHECK();
if (reduce_x) {
return tmp; // reuturn the intermediate buffer.
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_precise_add<T, PRECISE_T, true>),
dx_size, dx, tmp);
return nullptr;
}
}
}
// Three dimensional case of backward_impl
template <class T, typename PRECISE_T, typename BinaryOp, Term term>
void backward_impl_dim3(const Context &ctx, BinaryOp op, const T *dy,
const T *x0, const T *x1, const T *y, T *dx,
const bool inplace,
const Size_t *stride_x0, /* host pointer */
const Size_t *stride_x1, /* host pointer */
const Size_t *stride_y, /* host pointer */
const Size_t *shape_y /* host pointer */) {
const auto x_size = shape_y[2];
const auto y_size = shape_y[1];
const auto z_size = shape_y[0];
const auto shape = Shape_t{z_size, y_size, x_size};
const auto stride = (term == Term::x0) ? stride_x0 : stride_x1;
const auto the_other_stride = (term != Term::x0) ? stride_x0 : stride_x1;
const bool reduce_x = (stride[2] == 0);
const bool reduce_y = (stride[1] == 0);
const bool reduce_z = (stride[0] == 0);
const bool broadcasted_the_other_term =
((the_other_stride[0] == 0) || (the_other_stride[1] == 0) ||
(the_other_stride[2] == 0));
// The parameters are passed to kernels as constants.
const Dim3KernelParams params{stride_x0[0], stride_x0[1], stride_x0[2],
stride_x1[0], stride_x1[1], stride_x1[2],
stride_y[0], stride_y[1], stride_y[2],
shape_y[0], shape_y[1], shape_y[2]};
if (!reduce_x && !reduce_y && !reduce_z) {
// This term is not broadcasted. Reduction is not required.
backward_impl_dim3_without_reduction<T, PRECISE_T, BinaryOp, term>(
op, dy, x0, x1, y, dx, inplace, params, x_size, y_size, z_size,
broadcasted_the_other_term);
} else if (reduce_y) {
// This term is broadcasted along y-axis. Reduce y-axis
backward_impl_dim3_reduce_y<T, PRECISE_T, BinaryOp, term>(
ctx, op, dy, x0, x1, y, dx, inplace, params, x_size, y_size, z_size);
} else {
// This term is broadcasted along x, z, or (x and z) axis.
// Reduce them respectively. An intermediate buffer is used when
// (x and z)-axis reduction as PRECISE_T to preserve precision.
NdArray tmp_arr;
PRECISE_T *tmp = nullptr;
if (reduce_z) {
tmp = backward_impl_dim3_reduce_z<T, PRECISE_T, BinaryOp, term>(
ctx, op, dy, x0, x1, y, tmp_arr, dx, inplace, params, x_size, y_size,
z_size, reduce_x);
}
if (reduce_x) {
dim3 blockDim = get_blocks_dim3<T, BinaryOp>(shape, 1);
NBLA_CHECK(NBLA_CUDA_NUM_THREADS == 512, error_code::value,
"The CUDA kernel to reduce x-axis in base_transform_binary "
"assumes NBLA_CUDA_NUM_THREADS == 512.");
if (blockDim.x == 512) {
backward_impl_dim3_reduce_x<T, PRECISE_T, BinaryOp, term, 512>(
op, dy, x0, x1, y, tmp, dx, inplace, params, x_size, y_size,
z_size);
} else if (blockDim.x == 256) {
backward_impl_dim3_reduce_x<T, PRECISE_T, BinaryOp, term, 256>(
op, dy, x0, x1, y, tmp, dx, inplace, params, x_size, y_size,
z_size);
} else if (blockDim.x == 128) {
backward_impl_dim3_reduce_x<T, PRECISE_T, BinaryOp, term, 128>(
op, dy, x0, x1, y, tmp, dx, inplace, params, x_size, y_size,
z_size);
} else if (blockDim.x == 64) {
backward_impl_dim3_reduce_x<T, PRECISE_T, BinaryOp, term, 64>(
op, dy, x0, x1, y, tmp, dx, inplace, params, x_size, y_size,
z_size);
} else { // blockDim.x <= 32
blockDim.x = 32;
backward_impl_dim3_reduce_x<T, PRECISE_T, BinaryOp, term, 32>(
op, dy, x0, x1, y, tmp, dx, inplace, params, x_size, y_size,
z_size);
}
}
}
}
// Any dimensional case of backward_impl
template <class T, typename PRECISE_T, typename BinaryOp, Term term>
void backward_impl_ndim(const Size_t size, const Context &ctx, BinaryOp op,
const T *dy, const T *x0, const T *x1, const T *y,
Variable *v_x, const bool accum, const bool inplace,
const Size_t ndim,
const Size_t *stride_x0, /* device pointer */
const Size_t *stride_x1, /* device pointer */
const Size_t *stride_y, /* device pointer */
const Size_t *shape_y /* device pointer */) {
if (is_same<T, PRECISE_T>::value) {
if (!accum)
v_x->grad()->zero();
auto *dx = v_x->cast_grad_and_get_pointer<T>(ctx);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_backward_ndim<T, T, BinaryOp, term>),
size, op, dy, x0, x1, y, dx, inplace, ndim,
stride_x0, stride_x1, stride_y, shape_y);
} else {
// Intermediate buffer to preserve precision.
NdArray tmp_arr(v_x->shape());
tmp_arr.zero();
auto *tmp = tmp_arr.cast(get_dtype<PRECISE_T>(), ctx)->pointer<PRECISE_T>();
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_backward_ndim<T, PRECISE_T, BinaryOp, term>), size, op, dy, x0,
x1, y, tmp, inplace, ndim, stride_x0, stride_x1, stride_y, shape_y);
auto *dx = v_x->cast_grad_and_get_pointer<T>(ctx, !accum);
if (accum) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_precise_add<T, PRECISE_T, true>),
v_x->size(), dx, tmp);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_precise_add<T, PRECISE_T, false>),
v_x->size(), dx, tmp);
}
}
}
// The actual part of backward_impl
template <class T, typename BinaryOp>
void backward_impl(const Context &ctx, BinaryOp op, const Variables &inputs,
const Variables &outputs, const vector<bool> &propagate_down,
const vector<bool> &accum, const bool inplace,
const Size_t ndim, Variable &v_strides_x0,
Variable &v_strides_x1, Variable &v_strides_y,
Variable &v_shape_y) {
if (!(propagate_down[0] || propagate_down[1]))
return;
cuda_set_device(std::stoi(ctx.device_id));
using PRECISE_T = typename CudaTypeForceFloat<T>::type;
const auto *dy = outputs[0]->get_grad_pointer<T>(ctx);
const auto *x0 = inputs[0]->get_data_pointer<T>(ctx);
const auto *x1 = inputs[1]->get_data_pointer<T>(ctx);
const auto *y = outputs[0]->get_data_pointer<T>(ctx);
if (ndim == 3) {
// Three-dimensional case
// SyncedArray::get_data_pointer only request array class by filter_context.
Context cpu_ctx = Context().set_array_class(
SingletonManager::get<Cpu>()->array_classes()[0]);
const auto *stride_x0 = v_strides_x0.get_data_pointer<Size_t>(cpu_ctx);
const auto *stride_x1 = v_strides_x1.get_data_pointer<Size_t>(cpu_ctx);
const auto *stride_y = v_strides_y.get_data_pointer<Size_t>(cpu_ctx);
const auto *shape_y = v_shape_y.get_data_pointer<Size_t>(cpu_ctx);
if (propagate_down[0]) { // dx0
op.verify_g0();
if (!accum[0])
inputs[0]->grad()->zero();
auto *dx0 = inputs[0]->cast_grad_and_get_pointer<T>(ctx);
backward_impl_dim3<T, PRECISE_T, BinaryOp, Term::x0>(
ctx, op, dy, x0, x1, y, dx0, inplace, stride_x0, stride_x1, stride_y,
shape_y);
}
if (propagate_down[1]) { // dx1
op.verify_g1();
if (!accum[1])
inputs[1]->grad()->zero();
auto *dx1 = inputs[1]->cast_grad_and_get_pointer<T>(ctx);
backward_impl_dim3<T, PRECISE_T, BinaryOp, Term::x1>(
ctx, op, dy, x0, x1, y, dx1, inplace, stride_x0, stride_x1, stride_y,
shape_y);
}
} else {
// Otherwise
// setup_impl gurantees more than four dimensional data.
const auto *stride_x0 = v_strides_x0.get_data_pointer<Size_t>(ctx);
const auto *stride_x1 = v_strides_x1.get_data_pointer<Size_t>(ctx);
const auto *stride_y = v_strides_y.get_data_pointer<Size_t>(ctx);
const auto *shape_y = v_shape_y.get_data_pointer<Size_t>(ctx);
const auto size_y = outputs[0]->size();
if (propagate_down[0]) { // dx0
op.verify_g0();
backward_impl_ndim<T, PRECISE_T, BinaryOp, Term::x0>(
size_y, ctx, op, dy, x0, x1, y, inputs[0], accum[0], inplace, ndim,
stride_x0, stride_x1, stride_y, shape_y);
}
if (propagate_down[1]) { // dx1
op.verify_g1();
backward_impl_ndim<T, PRECISE_T, BinaryOp, Term::x1>(
size_y, ctx, op, dy, x0, x1, y, inputs[1], accum[1], inplace, ndim,
stride_x0, stride_x1, stride_y, shape_y);
}
}
}
} // end of namespace "nbla::transform_binary_cuda"
// ----------------------------------------------------------------------------
// Common
// ----------------------------------------------------------------------------
#define NBLA_DEFINE_BINARY_OP_CUDA_CLASS(NAME) \
class NAME##BinaryOpCuda : public BaseBinaryOpCuda
#define NBLA_DEFINE_BINARY_OP_CUDA_FORWARD(OP) \
template <typename T> \
__forceinline__ __device__ T operator()(const T x0, const T x1) { \
return OP; \
}
#define NBLA_DEFINE_BINARY_OP_CUDA_BACKWARD(NUM, GOP) \
template <typename T> \
__forceinline__ __device__ T g##NUM(const T dy, const T x0, const T x1, \
const T y, const bool inplace) { \
return GOP; \
} \
__host__ void verify_g##NUM() {}
#define NBLA_DEFINE_TRANSFORM_BINARY_CUDA_FORWARD_BACKWARD(NAME) \
template <typename T> \
void NAME##Cuda<T>::forward_impl(const Variables &inputs, \
const Variables &outputs) { \
transform_binary_cuda::forward_impl<typename CudaType<T>::type>( \
this->ctx_, NAME##BinaryOpCuda(this->args_), inputs, outputs, \
this->inplace_, this->compressed_ndim_, this->strides_x0_, \
this->strides_x1_, this->strides_y_, this->shape_y_); \
} \
\
template <typename T> \
void NAME##Cuda<T>::backward_impl( \
const Variables &inputs, const Variables &outputs, \
const vector<bool> &propagate_down, const vector<bool> &accum) { \
transform_binary_cuda::backward_impl<typename CudaType<T>::type>( \
this->ctx_, NAME##BinaryOpCuda(this->args_), inputs, outputs, \
propagate_down, accum, this->inplace_, this->compressed_ndim_, \
this->strides_x0_, this->strides_x1_, this->strides_y_, \
this->shape_y_); \
}
#define NBLA_DEFINE_BINARY_GRAD_DEPENDS_OUTPUT_DATA(NAME, DEP_Y_0, DEP_Y_1) \
template <typename T> \
bool NAME##Cuda<T>::grad_depends_output_data(int i, int o) const { \
if (i == 0) \
return DEP_Y_0; \
return DEP_Y_1; \
};
#define NBLA_DEFINE_BINARY_GRAD_DEPENDS_INPUT_DATA(NAME, DEP_X_0, DEP_X_1) \
template <typename T> \
bool NAME##Cuda<T>::grad_depends_input_data_impl(int i, int j) const { \
if (i == 0) \
return DEP_X_0; \
return DEP_X_1; \
};
// ----------------------------------------------------------------------------
// Zero argument
// ----------------------------------------------------------------------------
#define NBLA_DEFINE_BINARY_OP_CUDA_NO_GRAD(NAME, OP) \
NBLA_DEFINE_BINARY_OP_CUDA_CLASS(NAME) { \
public: \
__inline__ __host__ __device__ NAME##BinaryOpCuda(const tuple<> &dummy) {} \
NBLA_DEFINE_BINARY_OP_CUDA_FORWARD(OP) \
}
#define NBLA_DEFINE_BINARY_OP_CUDA(NAME, OP, GOP0, GOP1) \
NBLA_DEFINE_BINARY_OP_CUDA_CLASS(NAME) { \
public: \
__inline__ __host__ __device__ NAME##BinaryOpCuda(const tuple<> &dummy) {} \
NBLA_DEFINE_BINARY_OP_CUDA_FORWARD(OP) \
NBLA_DEFINE_BINARY_OP_CUDA_BACKWARD(0, GOP0) \
NBLA_DEFINE_BINARY_OP_CUDA_BACKWARD(1, GOP1) \
}
#define NBLA_DEFINE_TRANSFORM_BINARY_CUDA_NO_GRAD(NAME, OP) \
NBLA_DEFINE_BINARY_OP_CUDA_NO_GRAD(NAME, OP); \
NBLA_DEFINE_TRANSFORM_BINARY_CUDA_FORWARD_BACKWARD(NAME) \
NBLA_DEFINE_BINARY_GRAD_DEPENDS_OUTPUT_DATA(NAME, false, false) \
NBLA_DEFINE_BINARY_GRAD_DEPENDS_INPUT_DATA(NAME, false, false)
#define NBLA_DEFINE_TRANSFORM_BINARY_CUDA(NAME, OP, GOP0, GOP1, DEP_Y_0, \
DEP_Y_1, DEP_X_0, DEP_X_1) \
NBLA_DEFINE_BINARY_OP_CUDA(NAME, OP, GOP0, GOP1); \
NBLA_DEFINE_TRANSFORM_BINARY_CUDA_FORWARD_BACKWARD(NAME) \
NBLA_DEFINE_BINARY_GRAD_DEPENDS_OUTPUT_DATA(NAME, DEP_Y_0, DEP_Y_1) \
NBLA_DEFINE_BINARY_GRAD_DEPENDS_INPUT_DATA(NAME, DEP_X_0, DEP_X_1)
// ----------------------------------------------------------------------------
// One argument
// ----------------------------------------------------------------------------
#define NBLA_DEFINE_BINARY_OP_CUDA_1(NAME, OP, GOP0, GOP1, A0) \
NBLA_DEFINE_BINARY_OP_CUDA_CLASS(NAME) { \
public: \
A0 a0; \
__inline__ NAME##BinaryOpCuda(const tuple<A0> &args) \
: a0(std::get<0>(args)) {} \
NBLA_DEFINE_BINARY_OP_CUDA_FORWARD(OP) \
NBLA_DEFINE_BINARY_OP_CUDA_BACKWARD(0, GOP0) \
NBLA_DEFINE_BINARY_OP_CUDA_BACKWARD(1, GOP1) \
}
#define NBLA_DEFINE_TRANSFORM_BINARY_CUDA_1(NAME, OP, GOP0, GOP1, DEP_Y_0, \
DEP_Y_1, DEP_X_0, DEP_X_1, A0) \
NBLA_DEFINE_BINARY_OP_CUDA_1(NAME, OP, GOP0, GOP1, A0); \
NBLA_DEFINE_TRANSFORM_BINARY_CUDA_FORWARD_BACKWARD(NAME) \
NBLA_DEFINE_BINARY_GRAD_DEPENDS_OUTPUT_DATA(NAME, DEP_Y_0, DEP_Y_1) \
NBLA_DEFINE_BINARY_GRAD_DEPENDS_INPUT_DATA(NAME, DEP_X_0, DEP_X_1)
}
#endif | the_stack |
//#include <sm_utils.inl>
#include <strided_reduction.h>
#include <math.h> //std::pow
#include <matrix_coloring/coloring_utils.h>
#include <map>
#include <thrust/sort.h>
#include <thrust/count.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/binary_search.h>
#include <memory_intrinsics.h>
#include <thrust/replace.h>
#include <thrust_wrapper.h>
#include <algorithm>
#define GET_COLORS_THRUST 0
#define GET_COLORS_TWOPASS 1
#define GET_COLORS_ATOMIC 2
#define GET_COLORS_GATHER 3
#define GETMAX_THRUST 0
#define GETMAX_TWOPASS 1
//CONFIG
#define GET_COLORS GET_COLORS_TWOPASS
//#define GET_COLORS GET_COLORS_THRUST
#define GETMAX_COLOR GETMAX_TWOPASS
//#define GETMAX_COLOR GETMAX_THRUST
//#define DO_SORT 1
#define USE_GTLT 0
#define DISCARD_COLORED (1 && !USE_GTLT)
#define ONLY_FIRSTSTEP 0
namespace amgx
{
//normal hash function
__host__ __device__ __forceinline__ unsigned int hash2(unsigned int a, const unsigned int seed)
{
a ^= seed;
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) + (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a ^ 0xd3a2646c) + (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) + (a >> 16);
return a;
}
//rehashes a given hash (h), rotating its bits.
__host__ __device__ __forceinline__ unsigned int rehash(const unsigned int &a, const unsigned int h, const unsigned int k, const unsigned int seed)
{
return (h << k) ^ (h >> k);
}
//compute hashes for a column, compare with the row's hashes and the updated bitfield is_max/is_min for the row.
template<int HASHES>
__device__ __forceinline__ void compute_hashes(const int &row_id, const int &col_id, const int &base_row_hash, const int &base_col_hash, const int &seed, unsigned long long int &is_min, unsigned long long int &is_max)
{
int col_hash_k = base_col_hash;
int row_hash_k = base_row_hash;
#pragma unroll
for (int k = 0; k < HASHES; k++)
{
col_hash_k = rehash(col_id, base_col_hash, k, seed);
row_hash_k = rehash(row_id, base_row_hash, k, seed);
unsigned long long q = (~(1ull << (63 - k))); //bit of hash k in bitfields
if (col_hash_k > row_hash_k || (col_hash_k == row_hash_k && row_id < col_id))
{
is_max &= q;
}
//unsigned long long x2 = col_hash_k < row_hash_k;
if (col_hash_k < row_hash_k || (col_hash_k == row_hash_k && row_id > col_id))
{
is_min &= q;
}
}
}
template<int COLORING_LEVEL>
struct neigbor_visitor
{
template<int WARP_SIZE, int HASHES, bool DISCARD_COLORED_T>
static __device__ __forceinline__ void visit(const int &row_id, const int &base_row_hash, const int &explore_row, const int *A_rows, const int *A_cols, const int *A_colors_in, const int lane_id, const int seed, unsigned long long int &is_min, unsigned long long int &is_max)
{
int row_begin = A_rows[explore_row ];
int row_end = A_rows[explore_row + 1];
for ( ; row_begin < row_end ; row_begin += WARP_SIZE)
{
int row_it = row_begin + lane_id;
int col_id = -1;
bool do_compute_hashes = true;
col_id = A_cols[row_it];//__load_global(A_cols+row_it);
#if DISCARD_COLORED
if (DISCARD_COLORED_T)
{
int col_color = __load_nc(A_colors_in + col_id);
if (col_color != 0) { do_compute_hashes = false; }
}
#endif
if (do_compute_hashes)
{
//#pragma unroll
//for(int k=0; k<(HASHES/16>0)?HASHES/16:1; k++)
//{
int base_col_hash = hash2(col_id, seed/*+k*12358*/);
compute_hashes<HASHES>(row_id, col_id, base_row_hash, base_col_hash, seed, is_min, is_max);
//}
}
neigbor_visitor < COLORING_LEVEL - 1 >::visit<WARP_SIZE, HASHES, DISCARD_COLORED_T>(row_id, base_row_hash, col_id, A_rows, A_cols, A_colors_in, lane_id, seed, is_min, is_max);
}
}
};
template<>
struct neigbor_visitor<0>
{
template<int WARP_SIZE, int HASHES, bool DISCARD_COLORED_T>
static __device__ __forceinline__ void visit(const int &row_id, const int &base_row_hash, const int &explore_row, const int *A_rows, const int *A_cols, const int *A_colors_in, const int lane_id, const int seed, unsigned long long int &is_min, unsigned long long int &is_max)
{
//is_min |= 0ull;
}
};
template< bool DISCARD_COLORED_T, int HASHES, int COLORING_LEVEL, int CTA_SIZE, int WARP_SIZE>
__global__
void fast_multihash_kernel(
const int A_num_rows, const int *A_rows, const int *A_cols,
int *A_colors, const int *A_colors_in,
int c0, int seed,
int *sets_per_block)
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
const int warp_id = subwarp<WARP_SIZE>::subwarp_id();
const int lane_id = subwarp<WARP_SIZE>::sublane_id();
int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
int warp_count = 0;
for ( ; utils::any(row_id < A_num_rows) ; row_id += NUM_WARPS_PER_GRID )
{
int row_color = -1;
int available_color = 0;
if (row_id < A_num_rows)
{
#if DISCARD_COLORED
if (DISCARD_COLORED_T)
{
row_color = A_colors_in[row_id];
A_colors[row_id] = row_color;
}
else
#endif
{
row_color = A_colors[row_id];
}
}
if (row_color == 0)
{
unsigned long long int is_min = ~0ull;
unsigned long long int is_max = ~0ull;
int base_row_hash = hash2(row_id, seed);
#if 1 //COLORING_LEVELR>1
neigbor_visitor<COLORING_LEVEL>::visit<WARP_SIZE, HASHES, DISCARD_COLORED_T>(row_id, base_row_hash, row_id, A_rows, A_cols, A_colors_in, lane_id, seed, is_min, is_max);
#else
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
for ( ; row_begin < row_end ; row_begin += WARP_SIZE)
{
int row_it = row_begin + lane_id;
int col_id = -1;
col_id = A_cols[row_it];//__load_global(A_cols+row_it);
#if DISCARD_COLORED
if (DISCARD_COLORED_T)
{
int col_color = __load_nc(A_colors_in + col_id);
if (col_color != 0) { continue; }
}
#endif
int base_col_hash = hash2(col_id, seed);
compute_hashes<HASHES>(row_id, col_id, base_row_hash, base_col_hash, seed, is_min, is_max);
}
#endif
if (WARP_SIZE > 1)
{
using namespace amgx::strided_reduction;
amgx::strided_reduction::op_and op;
warp_reduce<1, CTA_SIZE, WARP_SIZE>(is_max, op);
warp_reduce<1, CTA_SIZE, WARP_SIZE>(is_min, op);
}
#if 1
int cmax = 63 - utils::bfind(is_max);
int cmin = 63 - utils::bfind(is_min);
if (cmin < HASHES)
{
available_color = cmin * 2 + 2;
}
if (cmax < HASHES)
{
available_color = cmax * 2 + 1;
}
#else
#pragma unroll
for (int k = 0; k < HASHES; k++)
{
if (is_max & (1ull << 63 - k))
{
available_color = 2 * k + 1;
break;
}
else if (is_min & (1ull << 63 - k))
{
available_color = 2 * k + 2;
break;
}
}
#endif
}
bool has_set_color = (lane_id == 0) && (available_color != 0);
if (has_set_color)
{
A_colors[row_id] = available_color + c0;
}
warp_count += amgx::strided_reduction::warp_binary_count(has_set_color);
}
#if GET_COLORS == GET_COLORS_ATOMIC
amgx::strided_reduction::block_count_atomic<1, CTA_SIZE, 32, int>(warp_count, sets_per_block);
//__threadfence_system();
#elif GET_COLORS == GET_COLORS_TWOPASS
amgx::strided_reduction::block_count<1, CTA_SIZE, 32, int>(warp_count, sets_per_block);
#elif GET_COLORS == GET_COLORS_GATHER
amgx::strided_reduction::block_count<1, CTA_SIZE, 32, int>(warp_count, sets_per_block);
amgx::strided_reduction::block_count_gatherer<int, 1> global_gather(sets_per_block);
#endif
}
////////////////////////////
template< int HASHES, int CTA_SIZE, int WARP_SIZE>
__global__
void fast_multihash_kernel_gtlt_kernel(
const int A_num_rows, const int *A_rows, const int *A_cols,
int *A_colors, unsigned long long int *gtlt_out,
int seed)
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
const int warp_id = subwarp<WARP_SIZE>::subwarp_id();
const int lane_id = subwarp<WARP_SIZE>::sublane_id();
int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
//int warp_count = 0;
for ( ; utils::any(row_id < A_num_rows) ; row_id += NUM_WARPS_PER_GRID )
{
int row_color = -1;
if (row_id < A_num_rows)
{
row_color = A_colors[row_id];
}
if (row_color == 0)
{
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
int base_row_hash = hash2(row_id, seed);
unsigned char count_gt[HASHES];
unsigned char count_lt[HASHES];
#pragma unroll
for (int k = 0; k < HASHES; k++)
{
count_gt[k] = 0;
count_lt[k] = 0;
}
for ( ; row_begin < row_end ; row_begin += WARP_SIZE)
{
int row_it = row_begin + lane_id;
int col_id = -1;
col_id = A_cols[row_it];
int col_color = A_colors[col_id];
if (col_color != 0) { continue; }
int base_col_hash = hash2(col_id, seed);
int col_hash_k = base_col_hash;
int row_hash_k = base_row_hash;
#pragma unroll
for (int k = 0; k < HASHES; k++)
{
col_hash_k = rehash(col_id, base_col_hash, k, seed);
row_hash_k = rehash(row_id, base_row_hash, k, seed);
//unsigned long long q = (~(1ull << 63-k));
if (col_hash_k > row_hash_k || (col_hash_k == row_hash_k && row_id < col_id))
{
if (count_gt[k] < 255) //avoid overflow
{
++count_gt[k];
}
}
if (col_hash_k < row_hash_k || (col_hash_k == row_hash_k && row_id > col_id))
{
if (count_lt[k] < 255)
{
++count_lt[k];
}
}
}
}
//compress count_gt,lt in 2 bits and pack in an integer
unsigned long long int packed_gtlt = 0;
#pragma unroll
for (int k = 0; k < HASHES; k++)
{
unsigned long long int max3_gt = count_gt[k];
unsigned long long int max3_lt = count_lt[k];
if (max3_gt > 3) { max3_gt = 3; } //TODO use only bits above
if (max3_lt > 3) { max3_lt = 3; }
//TODO: use BFI
packed_gtlt |= (max3_gt << (k * 2));
packed_gtlt |= (max3_lt << (k * 2 + 32));
}
if (lane_id == 0)
{
gtlt_out[row_id] = packed_gtlt;
}
}
}
}
template< int HASHES, int CTA_SIZE, int WARP_SIZE>
__global__
void fast_multihash_kernel_gtlt_assign_kernel(
const int A_num_rows, const int *A_rows, const int *A_cols,
int *A_colors, unsigned long long int *gtlt_in,
int c0)
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
const int warp_id = subwarp<WARP_SIZE>::subwarp_id();
const int lane_id = subwarp<WARP_SIZE>::sublane_id();
int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
for ( ; utils::any(row_id < A_num_rows) ; row_id += NUM_WARPS_PER_GRID )
{
int row_color = -1;
unsigned long long int row_gtlt = 0ull;
if (row_id < A_num_rows)
{
row_color = A_colors[row_id];
row_gtlt = gtlt_in[row_id];
}
int available_color = 0;
if (row_color == 0)
{
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
unsigned long long int is_min = ~0ull;
unsigned long long int is_max = ~0ull;
for ( ; row_begin < row_end ; row_begin += WARP_SIZE)
{
int row_it = row_begin + lane_id;
int col_id = -1;
unsigned long long int col_gtlt = 0ull;
if (row_it < row_end)
{
col_id = A_cols[row_it];
col_gtlt = gtlt_in[col_id];
#pragma unroll
for (int k = 0; k < HASHES; k++)
{
int row_gt_k = (row_gtlt >> (k * 2 + 0)) & 3ull;
int row_lt_k = (row_gtlt >> (k * 2 + 32)) & 3ull;
int col_gt_k = (col_gtlt >> (k * 2 + 0)) & 3ull;
int col_lt_k = (col_gtlt >> (k * 2 + 32)) & 3ull;
unsigned long long q = (~(1ull << 63 - k));
if ((row_gt_k > col_gt_k) || (col_gt_k == row_gt_k && row_id < col_id))
{
is_max &= q; //xor with k bit: if was 1, becomes 0, if was 0 stays 0
}
if ((row_lt_k > col_lt_k) || (col_lt_k == row_lt_k && row_id > col_id))
{
is_min &= q;
}
}
}
}
int cmax = 63 - utils::bfind(is_max);
int cmin = 63 - utils::bfind(is_min);
//if(cmin < HASHES)
// available_color = cmin*2+2;
if (cmax < HASHES)
{
available_color = cmax * 2 + 1;
}
}
bool has_set_color = (lane_id == 0) && (available_color != 0);
if (has_set_color)
{
A_colors[row_id] = available_color + c0;
}
}
}
////////////////////////////
template< bool SORTED_ROWS, int COLORING_LEVEL, int CTA_SIZE, int WARP_SIZE>
__global__
void recolor_greedy_kernel(
const int A_num_rows, const int *__restrict A_rows,
const int *__restrict A_cols,
int color_to_reassign,
int *__restrict A_colors,
int *sorted_rows_by_color, int offset_start, int offset_end
, int *maxs_per_block
)
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
const int warp_id = subwarp<WARP_SIZE>::subwarp_id();
const int lane_id = subwarp<WARP_SIZE>::sublane_id();
int job_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
int last_job = A_num_rows;
if (SORTED_ROWS)
{
job_id = job_id + offset_start;
last_job = offset_end;
}
int thread_max = 0;
//const int NUM_THREADS_PER_GRID = gridDim.x * CTA_SIZE;
//int row_id = blockIdx.x * CTA_SIZE + threadIdx.x;
// Iterate over rows.
for ( ; utils::any(job_id < last_job) ; job_id += NUM_WARPS_PER_GRID )
{
int row_color = -1;
int row_id = -1;
if (job_id < last_job)
{
if (SORTED_ROWS)
{
row_id = sorted_rows_by_color[job_id];
}
else
{
row_id = job_id;
}
if (row_id < A_num_rows)
{
row_color = A_colors[row_id];
}
}
used_color_structure_64_bit used_colors;
used_color_structure_64_bit::aux_tmp aux;
//used_color_structure_64_bit_colorbox<5> used_colors; //still buggy
if (row_color == color_to_reassign)
{
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
//used_colors.use_color(color_to_reassign);
//int num_greater = 0;
for ( ; row_begin < row_end ; row_begin += WARP_SIZE)
{
int row_it = row_begin + lane_id;
int col_id = -1;
if (row_it < row_end)
{
col_id = A_cols[row_it];
}
if (row_it < row_end)// && col_id < A_num_rows)
{
int col_color = A_colors[col_id];
//if(col_color>40) printf("%d\n",col_color);
if (col_color != 0)
{
used_colors.use_color(col_color, aux);
}
}
if (COLORING_LEVEL == 2)
{
int row_begin = A_rows[col_id ];
int row_end = A_rows[col_id + 1];
for ( ; row_begin < row_end ; row_begin += WARP_SIZE)
{
int row_it = row_begin + lane_id;
int col_id = -1;
if (row_it < row_end)
{
col_id = A_cols[row_it];
}
if (row_it < row_end)// && col_id < A_num_rows)
{
int col_color = A_colors[col_id];
//if(col_color>40) printf("%d\n",col_color);
if (col_color != 0)
{
used_colors.use_color(col_color, aux);
}
}
}
}
}
}
used_colors.sync_subwarp<CTA_SIZE, WARP_SIZE>(lane_id);
/*if(row_color != color_to_reassign && row_id != -1)
{
printf("ERR %d %d!\n", row_color, color_to_reassign);
}*/
if (row_color == color_to_reassign && lane_id == 0)
{
int new_color = used_colors.first_available_color(aux);
if (new_color != row_color && new_color > 0)
{
//printf("changing %d from %d to %d\n",row_id,row_color,new_color);
A_colors[row_id] = new_color;
}
#if GETMAX_COLOR == GETMAX_TWOPASS
if (new_color > thread_max)
{
thread_max = new_color;
}
#endif
/*if(new_color >= 7)
{
printf("SEVEN %d %d\n", new_color, thread_max);
}*/
}
}
#if GETMAX_COLOR == GETMAX_TWOPASS
using namespace amgx::strided_reduction;
amgx::strided_reduction::op_max op;
warp_reduce<1, CTA_SIZE, 32>(thread_max, op);
block_reduce<1, CTA_SIZE, 32, true>(thread_max, maxs_per_block, op);
#endif
}
////////////////////////////
template< bool SORTED_ROWS, int COLORING_LEVEL, int CTA_SIZE, int WARP_SIZE>
__global__
void recolor_greedy_kernel_anyring(
const int A_num_rows, const int *A_rows, const int *A_cols,
int color_to_reassign, int *A_colors,
int *sorted_rows_by_color, int offset_start, int offset_end, int *maxs_per_block,
used_color_structure_64_bit *used_colors_inout
)
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
const int warp_id = subwarp<WARP_SIZE>::subwarp_id();
const int lane_id = subwarp<WARP_SIZE>::sublane_id();
int job_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
int last_job = A_num_rows;
if (SORTED_ROWS)
{
job_id = job_id + offset_start;
last_job = offset_end;
}
int thread_max = 0;
//const int NUM_THREADS_PER_GRID = gridDim.x * CTA_SIZE;
//int row_id = blockIdx.x * CTA_SIZE + threadIdx.x;
// Iterate over rows.
for ( ; utils::any(job_id < last_job) ; job_id += NUM_WARPS_PER_GRID )
{
int row_color = -1;
int row_id = -1;
if (job_id < last_job)
{
if (SORTED_ROWS)
{
row_id = sorted_rows_by_color[job_id];
}
else
{
row_id = job_id;
}
if (row_id < A_num_rows)
{
row_color = A_colors[row_id];
}
}
used_color_structure_64_bit used_colors;
used_color_structure_64_bit::aux_tmp aux;
used_colors = used_colors_inout[row_id];
//used_color_structure_64_bit_colorbox<5> used_colors; //still buggy
if (row_color == color_to_reassign)
{
int new_color = used_colors.first_available_color(aux);
if (lane_id == 0)
{
if (new_color != row_color && new_color > 0)
{
//printf("changing %d from %d to %d\n",row_id,row_color,new_color);
A_colors[row_id] = new_color;
}
#if GETMAX_COLOR == GETMAX_TWOPASS
if (new_color > thread_max)
{
thread_max = new_color;
}
#endif
}
int row_begin = A_rows[row_id ];
int row_end = A_rows[row_id + 1];
for ( ; row_begin < row_end ; row_begin += WARP_SIZE)
{
int row_it = row_begin + lane_id;
int col_id = -1;
if (row_it < row_end)
{
col_id = A_cols[row_it];
}
if (row_it < row_end)// && col_id < A_num_rows)
{
int col_color = A_colors[col_id];
if (col_color == 0)
{
used_color_structure_64_bit used_colors_col = used_colors_inout[col_id];
used_colors_col.use_color(new_color, aux);
used_colors_inout[col_id] = used_colors_col;
}
if (COLORING_LEVEL == 2)
{
int row_begin = A_rows[col_id ];
int row_end = A_rows[col_id + 1];
for ( ; row_begin < row_end ; row_begin += WARP_SIZE)
{
int row_it = row_begin + lane_id;
int col_id = -1;
if (row_it < row_end)
{
col_id = A_cols[row_it];
}
if (row_it < row_end)// && col_id < A_num_rows)
{
int col_color = A_colors[col_id];
if (col_color == 0)
{
used_color_structure_64_bit used_colors_col = used_colors_inout[col_id];
used_colors_col.use_color(new_color, aux);
used_colors_inout[col_id] = used_colors_col;
}
}
}
}
}
}
}
}
#if GETMAX_COLOR == GETMAX_TWOPASS
using namespace amgx::strided_reduction;
amgx::strided_reduction::op_max op;
warp_reduce<1, CTA_SIZE, 32>(thread_max, op);
block_reduce<1, CTA_SIZE, 32, true>(thread_max, maxs_per_block, op);
#endif
}
// ---------------------------
// Methods
// ---------------------------
template< class T_Config >
Greedy_Recolor_MatrixColoring_Base<T_Config>::Greedy_Recolor_MatrixColoring_Base( AMG_Config &cfg, const std::string &cfg_scope) : MatrixColoring<T_Config>(cfg, cfg_scope)
{
first_pass_config = cfg;
first_pass_config_scope = cfg_scope;
//MULTI_HASH: it would be faster but it leaves some uncolored vertices: uncolored vertices cannot be processed in parallel
first_pass_config.setParameter("matrix_coloring_scheme", std::string("MULTI_HASH"), first_pass_config_scope);
first_pass_config.setParameter("max_uncolored_percentage", 0.0, first_pass_config_scope);
m_coloring_custom_arg = cfg.AMG_Config::getParameter<std::string>( "coloring_custom_arg", cfg_scope );
m_coloring_try_remove_last_color_ = cfg.AMG_Config::getParameter<int>( "coloring_try_remove_last_colors", cfg_scope );
}
// Block version
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void
Greedy_Recolor_MatrixColoring<TemplateConfig<AMGX_device, V, M, I> >::colorMatrix( Matrix_d &A )
{
const int num_rows = A.get_num_rows();
const int num_nz = A.get_num_nz();
int avg_nz = num_nz / num_rows;
//printf("AVG %d ",avg_nz);
//P(single) = 1-((avg-1)/avg)^(K)
//P(all) = P(single)^(N)
// = (1-((avg-1)/avg)^(K))^(N) == 0.99
// = (1-((avg-1)/avg)^(K)) == Pow(0.99,1/N)
//P(single == 0.99) K = Log(1-0.99)/Log(1/avg)
//P(all == 0.99) K = Log(1-Pow(0.99,1/N))/Log(1/avg)
//99% probability
//table here http://www.wolframalpha.com/input/?i=Log%281-0.99%29%2FLog%28%28a-1%29%2Fa%29+for+a+in+[2%2C10]
avg_nz = pow(static_cast<double>(avg_nz), static_cast<double>(this->m_coloring_level));
if (avg_nz > 16) { avg_nz = 16; }
if (this->m_coloring_custom_arg != "")
{
int x = atoi(this->m_coloring_custom_arg.c_str());
if (x > 0)
{
avg_nz = x;
}
}
#define CASENZ(X,ORDER,DO_SORT)\
else if(avg_nz <= X)\
{\
color_matrix_specialized_per_numhash<X,ORDER,DO_SORT>(A);\
}
#define CASEORDER(ORDER,DO_SORT)\
if(0)\
{\
\
}\
CASENZ(2,ORDER,DO_SORT)\
CASENZ(4,ORDER,DO_SORT)\
CASENZ(8,ORDER,DO_SORT)\
CASENZ(12,ORDER,DO_SORT)\
CASENZ(16,ORDER,DO_SORT)\
CASENZ(20,ORDER,DO_SORT)\
CASENZ(24,ORDER,DO_SORT)\
CASENZ(32,ORDER,DO_SORT)\
CASENZ(48,ORDER,DO_SORT)\
else\
{\
color_matrix_specialized_per_numhash<16,ORDER,DO_SORT>(A);\
}
if (0)
{
}
else if (this->m_coloring_level == 1)
{
CASEORDER(1, 0);
}
else if (this->m_coloring_level == 2)
{
CASEORDER(2, 1);
}
else if (this->m_coloring_level == 3)
{
CASEORDER(3, 1);
}
else if (this->m_coloring_level == 4)
{
CASEORDER(4, 1);
}
else if (this->m_coloring_level == 5)
{
CASEORDER(5, 1);
}
}
// Block version
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
template<int K1, int COLORING_LEVEL, int DO_SORT>
void
Greedy_Recolor_MatrixColoring<TemplateConfig<AMGX_device, V, M, I> >::color_matrix_specialized_per_numhash( Matrix_d &A )
{
const int num_rows = A.get_num_rows();
const int n_blocks = 26 * 4; //TODO: choose optimal blocks number
const int block_size = 256;
ViewType oldView = A.currentView();
#if 1 //use fast multihash above as precolorer
this->m_row_colors.resize( A.row_offsets.size() - 1, 0 );
typedef typename Matrix<TemplateConfig<AMGX_host, V, M, I> > ::IVector IVector_h;
IVector sorted_rows_by_color;
IVector_h offsets_rows_per_color;
{
device_vector_alloc<int> sets_per_block(n_blocks + 1);
int *sets_per_block_ptr = thrust::raw_pointer_cast(sets_per_block.data());
if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); }
else { A.setViewExterior(); }
int num_uncolored = num_rows;
int i = 0;
#if DISCARD_COLORED
device_vector_alloc<int> color_in(num_rows);
#else
device_vector_alloc<int> color_in;
#endif
#if USE_GTLT
device_vector_alloc<unsigned long long int> gtlt(num_rows);
const int K = K1 * 0.5;
#else
const int K = K1;
#endif
int prev_num_uncolored = num_uncolored;
unsigned int seed = 0;
while (num_uncolored > 0)
{
//int mx = thrust::reduce( this->m_row_colors.begin(), this->m_row_colors.begin()+num_rows, 0, thrust::maximum<int>() );
//if(mx > 2*K*i) printf("ERR UX %d %d %d\n", num_uncolored, mx, 2*K*i);
#if USE_GTLT
fast_multihash_kernel_gtlt_kernel<K, block_size, 1> <<< n_blocks, block_size>>>
(num_rows, A.row_offsets.raw(), A.col_indices.raw(),
this->m_row_colors.raw(), thrust::raw_pointer_cast(gtlt.data()),
seed);
cudaCheckError();
fast_multihash_kernel_gtlt_assign_kernel<K, block_size, 1> <<< n_blocks, block_size>>>
(num_rows, A.row_offsets.raw(), A.col_indices.raw(),
this->m_row_colors.raw(), thrust::raw_pointer_cast(gtlt.data()),
2 * K * i);
cudaCheckError();
#else
if (num_uncolored == num_rows || (!DISCARD_COLORED))
{
fast_multihash_kernel<false, K, COLORING_LEVEL, block_size, 1> <<< n_blocks, block_size>>>(num_rows, A.row_offsets.raw(), A.col_indices.raw(),
this->m_row_colors.raw(), thrust::raw_pointer_cast(color_in.data()),
2 * K * i, seed,
sets_per_block_ptr);
}
else
{
fast_multihash_kernel<true, K, COLORING_LEVEL, block_size, 1> <<< n_blocks, block_size>>>(num_rows, A.row_offsets.raw(), A.col_indices.raw(),
this->m_row_colors.raw(), thrust::raw_pointer_cast(color_in.data()),
2 * K * i, seed,
sets_per_block_ptr);
}
cudaCheckError();
#endif
seed = hash2(seed, 0);
#if GET_COLORS == GET_COLORS_THRUST || USE_GTLT
num_uncolored = thrust::count( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, 0);
cudaCheckError();
#elif GET_COLORS == GET_COLORS_TWOPASS
num_uncolored -= amgx::strided_reduction::count_block_results_1(n_blocks, sets_per_block_ptr, amgx::strided_reduction::op_sum());
#elif (GET_COLORS == GET_COLORS_ATOMIC) || (GET_COLORS == GET_COLORS_GATHER)
num_uncolored -= sets_per_block[0];
#endif
//printf("%d -> %d\n", prev_num_uncolored,num_uncolored,prev_num_uncolored-num_uncolored);
i++;
if (prev_num_uncolored == num_uncolored)
{
//printf("ERR %d %d %d\n", prev_num_uncolored,num_uncolored,num_rows);
break;
}
prev_num_uncolored = num_uncolored;
#if DISCARD_COLORED
color_in.swap(this->m_row_colors);
#endif
/*if (iteration++%4==0) {
cudaEventRecord(throttle_event);
} else {
cudaEventSynchronize(throttle_event);
}*/
}
#if DISCARD_COLORED
color_in.swap(this->m_row_colors);
#endif
int max_color = i * K * 2; //available_color+c0 = 2*K*i + 2*(K-1)+2=2K(i+1), but i++
//max_color = thrust::reduce( this->m_row_colors.begin(), this->m_row_colors.begin()+num_rows, 0, thrust::maximum<int>() );
//printf("CC %d<%d\n",max_color,i*K*2+2);
this->m_num_colors = max_color + 1;
#if ONLY_FIRSTSTEP
if (1)
#else
if (this->m_coloring_custom_arg == "FIRSTSTEP")
#endif
{
A.setView(oldView);
return;
}
if (DO_SORT)
{
//return;
//this->createColorArrays(A)
IVector row_colors(this->m_row_colors); //useless copy
sorted_rows_by_color.resize(num_rows);
//this->m_sorted_rows_by_color.resize(num_rows);
thrust::sequence(sorted_rows_by_color.begin(), sorted_rows_by_color.end()); //useless sequence
thrust_wrapper::sort_by_key(row_colors.begin(), row_colors.begin() + num_rows, sorted_rows_by_color.begin()); //useless read from sequence
cudaCheckError();
IVector offsets_rows_per_color_d(this->m_num_colors + 1);
thrust::lower_bound(row_colors.begin(),
row_colors.begin() + num_rows,
thrust::counting_iterator<IndexType>(0),
thrust::counting_iterator<IndexType>(offsets_rows_per_color_d.size()),
offsets_rows_per_color_d.begin());
// Copy from device to host
offsets_rows_per_color = offsets_rows_per_color_d;
cudaCheckError();
/*for(int i=0;i<offsets_rows_per_color_d.size(); i++)
{
int x= offsets_rows_per_color[i];
}*/
/*oldView = A.currentView();
if (this->m_halo_coloring == SYNC_COLORS) A.setView(ALL);
else A.setViewExterior();*/
}
}
#else
//1st pass: color with a fast method
MatrixColoring<typename Matrix_d::TConfig> *first_pass_matrix_coloring = MatrixColoringFactory<typename Matrix_d::TConfig>::allocate(this->first_pass_config, this->first_pass_config_scope);
first_pass_matrix_coloring->colorMatrix(A);
#if DO_SORT
first_pass_matrix_coloring->createColorArrays(A);
#endif
this->m_row_colors = first_pass_matrix_coloring->getRowColors();
this->m_num_colors = first_pass_matrix_coloring->getNumColors();
sorted_rows_by_color = first_pass_matrix_coloring->getSortedRowsByColor();
offsets_rows_per_color = first_pass_matrix_coloring->getOffsetsRowsPerColor();
delete first_pass_matrix_coloring;
//ViewType oldView = A.currentView();
//if (this->m_halo_coloring == SYNC_COLORS) A.setView(ALL);
//else A.setViewExterior();
#endif
cudaCheckError();
//2nd pass: optimize using parallel greedy for each color.
//If 1st pass coloring is valid all elements of same color can be processed in parallel by definition
#if GETMAX_COLOR == GETMAX_TWOPASS
device_vector_alloc<int> max_color_per_block(n_blocks + 1, 1);
int *max_color_per_block_ptr = thrust::raw_pointer_cast(max_color_per_block.data());
#else
int *max_color_per_block_ptr = 0;
#endif
float avg_nnz_per_row = A.get_num_nz() / float(A.get_num_rows());
int WARP_SIZE = 2;
if ( avg_nnz_per_row < 4.0f )
{
WARP_SIZE = 2;
}
else if ( avg_nnz_per_row < 8.0f )
{
WARP_SIZE = 4;
}
else if ( avg_nnz_per_row < 32.0f )
{
WARP_SIZE = 8;
}
else
{
WARP_SIZE = 32;
}
if (this->m_coloring_level >= 2)
{
WARP_SIZE = 1;
}
//;
//int max_colorx = thrust::reduce( this->m_row_colors.begin(), this->m_row_colors.begin()+num_rows, 0, thrust::maximum<int>() );
//printf("MAX %d",max_colorx);
#define RECOLOR_CALL_CASE(__WSIZE) if(WARP_SIZE==__WSIZE){ \
recolor_greedy_kernel<DO_SORT,COLORING_LEVEL,block_size,__WSIZE><<<GRID_SIZE, block_size>>>(\
num_rows,\
A.row_offsets.raw(),\
A.col_indices.raw(),\
color, this->m_row_colors.raw(),\
sorted_rows_by_color.raw(),start,end,\
max_color_per_block_ptr\
); cudaCheckError(); }
#if 1 //straight order: generates a color histogram similar to parallel greedy
//start from color=2: 1st color would be assigned to 1, no change
//iteration = 0;
for (int color = 2; color <= this->m_num_colors; color++)
{
const int GRID_SIZE = n_blocks;//
int start, end;
if (DO_SORT)
{
start = offsets_rows_per_color[color];
end = num_rows;
if (color < this->m_num_colors) { end = offsets_rows_per_color[color + 1]; }
if (end >= num_rows) { end = num_rows; }
//printf("F %d %d %d < num_rows: %d\n",color,start,end,num_rows,(int)end<=start);
if (end <= start) { continue; }
//int NUM_WARPS_PER_CTA = block_size/WARP_SIZE;
//const int GRID_SIZE = std::min( 2048, ((end-start) + NUM_WARPS_PER_CTA-1) / NUM_WARPS_PER_CTA );
//printf("%d - %d > %d %d %d\n",end-start,color,start,end,num_rows);
}
else
{
start = 0;
end = num_rows;
}
//printf("%d : %d %d\n", color, start, end);
RECOLOR_CALL_CASE(1);
RECOLOR_CALL_CASE(2);
RECOLOR_CALL_CASE(4);
RECOLOR_CALL_CASE(8);
RECOLOR_CALL_CASE(16);
RECOLOR_CALL_CASE(32);
cudaCheckError();
/*if (iteration++%4==0) {
cudaEventRecord(throttle_event);
} else {
cudaEventSynchronize(throttle_event);
}*/
}
#else //reverse order: generates a different kind of histogram
for (int color = this->m_num_colors; color >= 0; color--)
{
RECOLOR_CALL_CASE(1);
RECOLOR_CALL_CASE(2);
RECOLOR_CALL_CASE(4);
RECOLOR_CALL_CASE(8);
RECOLOR_CALL_CASE(16);
RECOLOR_CALL_CASE(32);
}
#endif
cudaCheckError();
#if GETMAX_COLOR == GETMAX_TWOPASS
int max_color = amgx::strided_reduction::count_block_results_1(n_blocks, max_color_per_block_ptr, amgx::strided_reduction::op_max());
/*int max_colorg = thrust::reduce( this->m_row_colors.begin(), this->m_row_colors.begin()+num_rows, 0, thrust::maximum<int>() );
max_color=0;
for(int i=0;i<n_blocks;i++)
{
if(max_color_per_block[i] > max_color) max_color = max_color_per_block[i];
}
printf("MAXCOLOR ref=%d %d\n",max_colorg,max_color);
if(max_colorg!=max_color)exit(1);*/
#else
int max_color = thrust_wrapper::reduce( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, 0, thrust::maximum<int>() );
#endif
//printf("MAXCOLOR %d %d\n",max_color_gold,max_color);
cudaCheckError();
this->m_num_colors = max_color + 1;
A.setView(oldView);
}
#define AMGX_CASE_LINE(CASE) template class Greedy_Recolor_MatrixColoring_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class Greedy_Recolor_MatrixColoring<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // end namespace amgx | the_stack |
//#include <sys/time.h>
#include <curand.h>
#include <helpers/DebugHelper.h>
using namespace sd;
#include <loops/special_kernels.h>
cudaDeviceProp *deviceProperties;
cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef SD_EXPERIMENTAL_ENABLED
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
// this method just does type conversion in fancy way
int getDeviceId(sd::Pointer ptrToDeviceId) { return (int)(sd::LongType)ptrToDeviceId; }
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else
shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
sd::buffer::Buffer<sd::LongType> *createScalarBuffer(cudaStream_t stream) {
auto scalarShapeInfo = shape::createScalarShapeInfo();
auto buff = sd::buffer::createBuffer(scalarShapeInfo, shape::shapeInfoLength(2), stream);
sd::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
sd::buffer::Buffer<sd::LongType> *scalarDimension;
sd::buffer::Buffer<sd::LongType> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(cudaStream_t stream) {
auto scalarDimensionBuff = reinterpret_cast<sd::LongType *>(malloc(sizeof(sd::LongType)));
CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer", sizeof(sd::LongType));
scalarDimensionBuff[0] = SD_MAX_DIMENSION;
scalarDimension = sd::buffer::createBuffer(scalarDimensionBuff, 1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
sd::buffer::freeBuffer(&scalarShapeInfo);
sd::buffer::freeBuffer(&scalarDimension);
}
sd::LongType *getShapeInfoHostPointer() { return scalarShapeInfo->data; }
sd::LongType *getShapeInfoGpuPointer() { return scalarShapeInfo->gData; }
sd::LongType *getDimensionHostPointer() { return scalarDimension->data; }
sd::LongType *getDimensionGpuPointer() { return scalarDimension->gData; }
};
template <typename T>
class ScalarInfo {
sd::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
cudaStream_t streamRef;
public:
ScalarInfo(cudaStream_t stream) {
T *scalarResult = reinterpret_cast<T *>(malloc(sizeof(T)));
CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer", sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = sd::buffer::createBuffer(scalarResult, 1, stream);
streamRef = stream;
sd::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
sd::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
sd::LongType *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); }
/**
* Get the dZ pointers
*/
T *getDevicePointer() { return scalarData->gData; }
/**
* Get the infinite dimension device pointer
*/
sd::LongType *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); }
~ScalarInfo() {
sd::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
void execPairwiseTransform(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseTransform(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execPairwiseTransformBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseBoolTransform(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsScalar(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, void *extraParams,
OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo,
bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStatsScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execBroadcastBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY, sd::LongType const *hYShapeInfo,
sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto hTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<sd::LongType *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<sd::LongType *>(extraPointers[13]);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcastBool(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param dY
* @param dYShapeInfo
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void execBroadcast(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY, sd::LongType const *hYShapeInfo,
sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension, sd::LongType const *hDimensionShape,
sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<sd::LongType *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<sd::LongType *>(extraPointers[13]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcast(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloatScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSameScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame2(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const sd::LongType *zShapeInfoH = hZShapeInfo;
if (shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<sd::LongType const *>(zPack.primary());
}
std::vector<int> dims =
(zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSame(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong2(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const sd::LongType *zShapeInfoH = hZShapeInfo;
if (shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<sd::LongType const *>(zPack.primary());
}
std::vector<int> dims =
(zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceLong(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::INT64)
throw datatype_exception::build("execReduceLong wrong Z data type", sd::DataType::INT64, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::reduce::ReduceLongFunction,
::execReduceScalar(launchDims, stream, opNum, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo,
extraParams, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hXShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo),
SD_COMMON_TYPES, SD_LONG_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool2(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const sd::LongType *zShapeInfoH = hZShapeInfo;
if (shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<sd::LongType const *>(zPack.primary());
}
std::vector<int> dims =
(zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::BOOL) throw std::runtime_error("execReduceBool requires Z operand to have BOOL type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(
xType, zType, functions::reduce::ReduceBoolFunction,
::execReduceScalar(launchDims, stream, opNum, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), hXShapeInfo,
extraParams, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), hZShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo),
SD_COMMON_TYPES, SD_BOOL_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduce(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack =
sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo, dimension, shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduce(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
(int *)dbDimension->special(), dimensionLength, tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat2(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
const auto zLen = shape::length(hZShapeInfo);
std::vector<int> dimensions(dimension, dimension + dimensionLength);
const sd::LongType *zShapeInfoH = hZShapeInfo;
if (shape::rank(hXShapeInfo) - dimensionLength != shape::rank(hZShapeInfo) && zLen != 1) {
auto zPack = ConstantShapeHelper::getInstance().createShapeInfoWithNoUnitiesForReduce(hZShapeInfo, dimensions);
zShapeInfoH = reinterpret_cast<sd::LongType const *>(zPack.primary());
}
std::vector<int> dims =
(zLen != 1) ? ShapeUtils::evalDimsForReduceOp(shape::rank(hXShapeInfo), dimensions) : std::vector<int>();
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloat(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), zShapeInfoH, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(zShapeInfoH).special(),
dims.data(), dims.size());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduceScalar(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, void *extraParams,
OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduceScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformSame(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformSame(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams, tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformBool(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams, tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformAny(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto streamSpecial = reinterpret_cast<cudaStream_t &>(extraPointers[4]);
LaunchContext lc(stream, streamSpecial, extraPointers[5], extraPointers[3],
reinterpret_cast<int *>(extraPointers[6]));
NativeOpExecutioner::execTransformAny(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraParams, nullptr, nullptr);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformStrict(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformStrict(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformFloat(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<sd::LongType *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformFloat(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void checkP2P() {
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt) curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY) continue;
int canAccess = 0;
cudaSetDevice(dX);
cudaDeviceCanAccessPeer(&canAccess, dX, dY);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
cudaSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void enableP2P(bool enable) {
if (enable == allowedP2P) return;
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt) curDevice = 0;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY) continue;
int canAccess = 0;
cudaSetDevice(dX);
cudaDeviceCanAccessPeer(&canAccess, dX, dY);
if (canAccess) {
if (enable) {
cudaDeviceEnablePeerAccess(dY, 0);
} else {
cudaDeviceDisablePeerAccess(dY);
}
} else {
if (sd::Environment::getInstance().isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY);
}
}
}
cudaSetDevice(curDevice);
}
allowedP2P = enable;
cudaSetDevice(curDevice);
}
bool isP2PAvailable() { return supportedP2P; }
void initializeDevicesAndFunctions() {
try {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
deviceProperties = new cudaDeviceProp[devCnt];
for (int i = 0; i < devCnt; i++) {
cudaSetDevice(i);
cudaGetDeviceProperties(&deviceProperties[i], i);
cudaDeviceSetLimit(cudaLimitStackSize, 4096);
}
cudaSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1) enableP2P(allowedP2P);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void initializeFunctions(sd::Pointer *functions) {
sd::BlasHelper::getInstance().initializeDeviceFunctions(functions);
/*
cublasSgemv = (CublasSgemv)functions[0];
cublasDgemv = (CublasDgemv)functions[1];
cublasHgemm = (CublasHgemm)functions[2];
cublasSgemm = (CublasSgemm)functions[3];
cublasDgemm = (CublasDgemm)functions[4];
cublasSgemmEx = (CublasSgemmEx)functions[5];
cublasHgemmBatched = (CublasHgemmBatched)functions[6];
cublasSgemmBatched = (CublasSgemmBatched)functions[7];
cublasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
sd::Pointer mallocHost(sd::LongType memorySize, int flags) {
sd::Pointer pointer;
// cudaHostAllocMapped |cudaHostAllocPortable
auto res = cudaHostAlloc(reinterpret_cast<void **>(&pointer), memorySize + 8, cudaHostAllocDefault);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaHostAlloc failed");
}
return reinterpret_cast<int8_t *>(pointer);
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
sd::Pointer mallocDevice(sd::LongType memorySize, int deviceId, int flags) {
sd::Pointer pointer;
auto res = cudaMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMalloc failed");
}
return reinterpret_cast<int8_t *>(pointer);
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int freeHost(sd::Pointer pointer) {
auto res = cudaFreeHost(reinterpret_cast<void *>(pointer));
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFreeHost failed");
}
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int freeDevice(sd::Pointer pointer, int deviceId) {
auto res = cudaFree(reinterpret_cast<void *>(pointer));
// we're intentionally skipping
if (res != 0 && res != 1) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFree failed");
}
return res == 0 ? 1L : 0L;
}
sd::Pointer createContext() { return 0L; }
sd::Pointer createStream() {
auto stream = new cudaStream_t();
auto dZ = cudaStreamCreate(stream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamCreate failed");
}
return stream;
}
sd::Pointer createEvent() {
sd::Pointer nativeEvent = (sd::Pointer)malloc(sizeof(cudaEvent_t));
CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer", sizeof(cudaEvent_t));
auto dZ = cudaEventCreateWithFlags(reinterpret_cast<cudaEvent_t *>(&nativeEvent), cudaEventDisableTiming);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventCreateWithFlags failed");
}
return nativeEvent;
}
int registerEvent(sd::Pointer event, sd::Pointer stream) {
auto pEvent = reinterpret_cast<cudaEvent_t *>(&event);
auto pStream = reinterpret_cast<cudaStream_t *>(stream);
auto dZ = cudaEventRecord(*pEvent, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventRecord failed");
}
return 1;
}
int setDevice(int deviceId) {
AffinityManager::setCurrentDevice(deviceId);
return 1;
}
sd::LongType getDeviceFreeMemoryDefault() {
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
return (sd::LongType)memFree;
}
sd::LongType getDeviceFreeMemory(int device) {
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (sd::LongType)memFree;
}
sd::LongType getDeviceTotalMemory(int device) {
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (sd::LongType)memTotal;
}
int memcpySync(sd::Pointer dst, sd::Pointer src, sd::LongType size, int flags, sd::Pointer reserved) {
cudaMemcpyKind kind;
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
} break;
case 1: {
kind = cudaMemcpyHostToDevice;
} break;
case 2: {
kind = cudaMemcpyDeviceToHost;
} break;
case 3: {
kind = cudaMemcpyDeviceToDevice;
} break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = cudaMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)),
static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags,
static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpy failed");
return 0;
}
return 1;
}
int memcpyAsync(sd::Pointer dst, sd::Pointer src, sd::LongType size, int flags, sd::Pointer reserved) {
auto pStream = reinterpret_cast<cudaStream_t *>(reserved);
cudaMemcpyKind kind;
// sd::DebugHelper::checkErrorCode(pStream, "Preliminary sync failed");
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
} break;
case 1: {
kind = cudaMemcpyHostToDevice;
} break;
case 2: {
kind = cudaMemcpyDeviceToHost;
} break;
case 3: {
kind = cudaMemcpyDeviceToDevice;
} break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = cudaMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)),
static_cast<size_t>(size), kind, *pStream);
// auto dZ = cudaMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)),
// static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags,
static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyAsync failed");
return 0;
}
return 1;
}
int memsetSync(sd::Pointer dst, int value, sd::LongType size, int flags, sd::Pointer reserved) {
auto dZ = cudaMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size));
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemset failed");
}
return 1;
}
int memsetAsync(sd::Pointer dst, int value, sd::LongType size, int flags, sd::Pointer reserved) {
auto pStream = reinterpret_cast<cudaStream_t *>(reserved);
auto dZ = cudaMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemsetAsync failed");
}
return 1;
}
int destroyEvent(sd::Pointer event) {
auto pEvent = reinterpret_cast<cudaEvent_t *>(&event);
auto dZ = cudaEventDestroy(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventDestroy failed");
}
return 1;
}
int streamSynchronize(sd::Pointer stream) {
auto pStream = reinterpret_cast<cudaStream_t *>(stream);
auto dZ = cudaStreamSynchronize(*pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamSynchronize failed");
}
return 1L;
}
int eventSynchronize(sd::Pointer event) {
auto pEvent = reinterpret_cast<cudaEvent_t *>(&event);
auto dZ = cudaEventSynchronize(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventSynchronize failed");
}
return 1L;
}
int getAvailableDevices() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
return devCnt;
}
void enableDebugMode(bool reallyEnable) { sd::Environment::getInstance().setDebug(reallyEnable); }
void setGridLimit(int gridSize) {
if (gridSize > 8192) gridSize = 8192;
if (gridSize < 1) gridSize = 1;
blockLimit = gridSize;
}
int ompGetMaxThreads() { return maxThreads; }
int ompGetNumThreads() { return maxThreads; }
void setOmpNumThreads(int threads) {
if (threads > 1024) threads = 1024;
if (threads < 32) threads = 32;
maxThreads = threads;
}
void enableVerboseMode(bool reallyEnable) { sd::Environment::getInstance().setVerbose(reallyEnable); }
int getDeviceMajor(int device) { return deviceProperties[device].major; }
int getDeviceMinor(int device) { return deviceProperties[device].minor; }
const char *getDeviceName(int device) { return deviceProperties[device].name; }
void specialConcat(sd::Pointer *extraPointers, int dimension, int numArrays, sd::Pointer *data,
sd::Pointer *inputShapeInfo, void *dZ, sd::LongType const *dZShapeInfo, sd::Pointer *tadPointers,
sd::Pointer *offsetPointers) {
try {
BUILD_SINGLE_SELECTOR(ArrayOptions::dataType(dZShapeInfo), sd::SpecialMethods,
::concatCpuGeneric(dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo),
SD_COMMON_TYPES);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
* This method saves
*/
sd::TadPack *tadOnlyShapeInfo(sd::LongType const *dXShapeInfo, int *dimension, int dimensionLength) {
try {
auto pack = new TadPack();
*pack = sd::ConstantTadHelper::getInstance().tadForDimensions(dXShapeInfo, dimension, dimensionLength);
return pack;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType const *getPrimaryShapeInfo(sd::TadPack *pack) { return pack->primaryShapeInfo(); }
sd::LongType const *getPrimaryOffsets(sd::TadPack *pack) { return pack->primaryOffsets(); }
sd::LongType const *getSpecialShapeInfo(sd::TadPack *pack) { return pack->specialShapeInfo(); }
sd::LongType const *getSpecialOffsets(sd::TadPack *pack) { return pack->specialOffsets(); }
sd::LongType getNumberOfTads(sd::TadPack *pack) { return pack->numberOfTads(); }
int getShapeInfoLength(sd::TadPack *pack) { return pack->shapeInfoLength(); }
int memcpyConstantAsync(sd::LongType dst, sd::Pointer src, sd::LongType size, int flags, sd::Pointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(reserved);
cudaMemcpyKind kind;
DEBUG_KERNEL(pStream, -1);
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
} break;
case 1: {
kind = cudaMemcpyHostToDevice;
} break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
} break;
}
auto dZ = cudaMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyToSymbolAsync failed");
}
return 1;
}
sd::Pointer getConstantSpace() {
sd::Pointer dConstAddr;
cudaError_t dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaGetSymbolAddress failed");
}
return dConstAddr;
}
void pullRows(sd::Pointer *extraPointers, OpaqueDataBuffer *dbX, sd::LongType const *xShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *zShapeInfo,
sd::LongType const *dZShapeInfo, sd::LongType n, sd::LongType *indexes, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets, sd::LongType const *zTadShapeInfo, sd::LongType const *zTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
dim3 launchDims(64, 256, 1024);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric,
(launchDims, stream, dbX->special(), dbZ->special(), n, indexes, tadShapeInfo, tadOffsets,
zTadShapeInfo, zTadOffsets),
SD_COMMON_TYPES);
DEBUG_KERNEL(stream, -1);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void average(sd::Pointer *extras, sd::Pointer *x, sd::LongType const *xShapeInfo, sd::Pointer *dx,
sd::LongType const *dXShapeInfo, void *z, sd::LongType const *zShapeInfo, void *dz,
sd::LongType const *dzShapeInfo, int n, sd::LongType length, bool propagate) {
try {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("averageFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(256, 256, 4096);
BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate),
SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate),
SD_COMMON_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void accumulate(sd::Pointer *extras, sd::Pointer *x, sd::LongType const *xShapeInfo, sd::Pointer *dx,
sd::LongType const *dXShapeInfo, void *z, sd::LongType const *zShapeInfo, void *dz,
sd::LongType const *dzShapeInfo, int n, sd::LongType length) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("accumulateFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(n, 256, 16384);
BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n, length), SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length),
SD_COMMON_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void shuffle(sd::Pointer *extras, sd::Pointer *x, sd::Pointer *xShapeInfo, sd::Pointer *dx, sd::Pointer *dXShapeInfo,
sd::Pointer *z, sd::Pointer *zShapeInfo, sd::Pointer *dz, sd::Pointer *dZShapeInfo, int N, int *shuffleMap,
sd::Pointer *tadShapeInfo, sd::Pointer *tadOffsets) {
try {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]);
auto dX = reinterpret_cast<void **>(dx);
auto dZ = reinterpret_cast<void **>(dz);
auto xShape = reinterpret_cast<sd::LongType **>(xShapeInfo);
auto dxShape = reinterpret_cast<sd::LongType **>(dXShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<sd::LongType **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<sd::LongType **>(tadOffsets);
auto xType = sd::ArrayOptions::dataType(xShape[0]);
dim3 launchDims(256, 512, 8192);
BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric,
(launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset),
SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "shuffle(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
bool isExperimentalEnabled() { return sd::Environment::getInstance().isExperimentalBuild(); }
void setOmpMinThreads(int threads) {
minThreads = sd::math::sd_max<int>(32, threads);
minThreads = sd::math::sd_min<int>(maxThreads, minThreads);
}
int getDevice() { return sd::AffinityManager::currentDeviceId(); }
void setElementThreshold(int num) {
// this is no-op for CUDA
}
void setTADThreshold(int num) {
// this is no-op for CUDA
}
////////////////////////////////////////////////////////////////////////
void execSummaryStats(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsTad(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo,
OpaqueDataBuffer *dbDimension, sd::LongType const *hDimensionShape,
sd::LongType const *dDimensionShape, bool biasCorrected, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength, tadShapeInfo, tadOffsets, biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbDimension});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParams, dbY->primary(), hYShapeInfo, dbY->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Tad(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape,
sd::LongType const *tadOnlyShapeInfo, sd::LongType const *tadOffsets,
sd::LongType const *yTadOnlyShapeInfo, sd::LongType const *yTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack =
sd::ConstantTadHelper::getInstance().tadForDimensions(hXShapeInfo, dimension, shape::length(hDimensionShape));
auto tadLength = shape::length(tadPack.primaryShapeInfo());
auto yLength = shape::length(hYShapeInfo);
auto xLength = shape::length(hXShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
if (tadLength == yLength || tadLength == xLength) {
// sd_printf("== way\n","");
NativeOpExecutioner::execReduce3(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(),
hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
} else
NativeOpExecutioner::execReduce3TAD(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(),
hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dimension, dimensionLength,
tadOnlyShapeInfo, yTadOffsets, yTadOnlyShapeInfo, yTadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Scalar(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParams, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3Scalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbY->primary(),
hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBool(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbScalar, sd::LongType const *hScalarShapeInfo,
sd::LongType const *dScalarShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBoolTad(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbScalars,
sd::LongType const *hScalarShapeInfo, sd::LongType const *dScalarShapeInfo, void *extraParams,
OpaqueDataBuffer *dbDimension, sd::LongType const *hDimensionShape,
sd::LongType const *dDimensionShape, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets, sd::LongType const *tadShapeInfoZ,
sd::LongType const *tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), extraParams, dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalars->primary(), hScalarShapeInfo, dbScalars->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), dimension, dimensionLength,
tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalar(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbScalar, sd::LongType const *hScalarShapeInfo,
sd::LongType const *dScalarShapeInfo, void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalar(
&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hScalarShapeInfo).special(), extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarTad(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ, sd::LongType const *hZShapeInfo,
sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbScalars, sd::LongType const *hScalarShapeInfo,
sd::LongType const *dScalarShapeInfo, void *extraParams, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape,
sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, sd::LongType const *tadShapeInfoZ,
sd::LongType const *tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != sd::DataType::BOOL && !isExperimentalEnabled())
throw sd::datatype_exception::build("execScalar both operands must have same data type", xType, yType);
dim3 launchDims(256, 256, 16384);
#ifdef SD_EXPERIMENTAL_ENABLED
BUILD_PAIRWISE_SELECTOR(
xType, yType, zType, functions::scalar::ScalarTransform,
::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams,
dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES, SD_COMMON_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(
xType, functions::scalar::ScalarTransform,
::executeCudaAlongDimension(
launchDims, stream, opNum, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), dbScalars->special(),
extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ),
SD_COMMON_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void execAggregate(sd::Pointer *extraPointers, int opNum, void **arguments, int numArguments, sd::LongType **shapes,
int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays,
void *realArguments, int numRealArguments, sd::DataType dtype) {}
void batchExecutor(sd::Pointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments,
sd::DataType dtype) {}
void execAggregateBatch(sd::Pointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments,
sd::DataType dtype) {}
////////////////////////////////////////////////////////////////////////
void execRandom(sd::Pointer *extraPointers, int opNum, sd::Pointer stateHost, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost, dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom2(sd::Pointer *extraPointers, int opNum, sd::Pointer stateHost, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(
&lc, opNum, stateHost, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbZ->primary(), hZShapeInfo,
dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(), extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom3(sd::Pointer *extraPointers, int opNum, sd::Pointer stateHost, OpaqueDataBuffer *dbX,
sd::LongType const *hXShapeInfo, sd::LongType const *dXShapeInfo, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(
&lc, opNum, stateHost, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(), dbY->primary(), hYShapeInfo,
dbY->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(), dbZ->primary(),
hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
sd::Pointer initRandom(sd::Pointer *extraPointers, long seed, long bufferSize, sd::Pointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// cudaStreamSynchronize(*stream);
auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
auto buffer = new sd::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost),
reinterpret_cast<uint64_t *>(ptrDev));
buffer->propagateToDevice(buffer, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A");
// we generate sequence in the host memory
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B");
return buffer;
}
void destroyRandom(sd::Pointer ptrBuffer) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *>(ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
cudaDeviceSynchronize();
delete buffer;
}
void refreshBuffer(sd::Pointer *extraPointers, long seed, sd::Pointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *>(ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
cudaStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream);
}
void reSeedBuffer(sd::Pointer *extraPointers, long seed, sd::Pointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *>(ptrRandom);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
cudaStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int lengthForShapeBufferPointer(sd::Pointer buffer) {
auto shapeBuffer = reinterpret_cast<sd::LongType *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
sd::Pointer pointerForAddress(sd::LongType address) { return reinterpret_cast<sd::Pointer>(address); }
void tear(sd::Pointer *extras, OpaqueDataBuffer *dbX, sd::LongType const *xShapeInfo, sd::LongType const *dXShapeInfo,
sd::Pointer *targets, sd::LongType const *zShapeInfo, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({}, {dbX});
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]);
dim3 launchDims(512, 512, 512);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(
xType, tearKernelGeneric,
(launchDims, stream, dbX->special(), dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets),
SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed");
InteropDataBuffer::registerSpecialUse({}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void prescanArrayRecursive(sd::Pointer *extras, int *dZ, int *dX, int numElements, int level) {
auto stream = reinterpret_cast<cudaStream_t *>(extras[1]);
auto g_scanBlockSums = reinterpret_cast<int **>(extras[2]);
int blockSize = 512; // max size of the thread blocks
int numBlocks = sd::math::sd_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize))));
int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (sd::isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = sd::floorPow2(numElements);
int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
int numEltsLastBlock = numElements - (numBlocks - 1) * numEltsPerBlock;
int numThreadsLastBlock = sd::math::sd_max<int>(1, numEltsLastBlock / 2);
int np2LastBlock = 0;
int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if (!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
int extraSpace = numEltsPerBlock / NUM_BANKS;
int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
dim3 gridOnes(1, 1, 1);
dim3 threadsOnes(numThreadsLastBlock, 1, 1);
if (sharedMemSize < 2048) sharedMemSize = 2048;
if (sharedMemLastBlock < 2048) sharedMemLastBlock = 2048;
// execute the scan
if (numBlocks > 1) {
sd::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level],
numThreads * 2, 0, 0);
if (np2LastBlock) {
sd::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level],
numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level + 1);
sd::uniformAdd<<<grid, threads, 1024, *stream>>>(dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
sd::uniformAdd<<<1, numThreadsLastBlock, 1024, *stream>>>(dZ, g_scanBlockSums[level], numEltsLastBlock,
numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
sd::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0);
} else {
sd::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0);
}
sd::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed");
}
////////////////////////////////////////////////////////////////////////
void execReduce3All(sd::Pointer *extraPointers, int opNum, OpaqueDataBuffer *dbX, sd::LongType const *hXShapeInfo,
sd::LongType const *dXShapeInfo, void *extraParamsVals, OpaqueDataBuffer *dbY,
sd::LongType const *hYShapeInfo, sd::LongType const *dYShapeInfo, OpaqueDataBuffer *dbZ,
sd::LongType const *hZShapeInfo, sd::LongType const *dZShapeInfo, OpaqueDataBuffer *dbDimension,
sd::LongType const *hDimensionShape, sd::LongType const *dDimensionShape,
sd::LongType const *xTadShapeInfo, sd::LongType const *xOffsets, sd::LongType const *yTadShapeInfo,
sd::LongType const *yOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3All(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hXShapeInfo).special(),
extraParamsVals, dbY->primary(), hYShapeInfo, dbY->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hYShapeInfo).special(),
dbZ->primary(), hZShapeInfo, dbZ->special(),
ConstantShapeHelper::getInstance().bufferForShapeInfo(hZShapeInfo).special(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength, xTadShapeInfo,
xOffsets, yTadShapeInfo, yOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sort(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, bool descending) {
try {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric,
(launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending), SD_COMMON_TYPES);
}
}
} else {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
numBlocks = sd::math::sd_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric,
(launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending), SD_COMMON_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
sd::DebugHelper::checkErrorCode(stream, "sort(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByKey(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, void *y, sd::LongType const *yShapeInfo, void *dy,
sd::LongType const *dyShapeInfo, bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo)) return;
if (xLength != yLength) throw std::runtime_error("sortByKey: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, j, k, xLength, descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
}
}
} else {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
numBlocks = sd::math::sd_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, n, xLength, rev, descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByValue(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, void *y, sd::LongType const *yShapeInfo, void *dy,
sd::LongType const *dyShapeInfo, bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo)) return;
if (xLength != yLength) throw std::runtime_error("sortByValue: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, j, k, xLength, descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
}
}
} else {
int numThreads = sd::math::sd_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0) numBlocks++;
numBlocks = sd::math::sd_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, n, xLength, rev, descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByKey(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, void *y, sd::LongType const *yShapeInfo, void *dy,
sd::LongType const *dyShapeInfo, int *dimension, int dimensionLength, bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto context =
extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int)tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, nullptr, dimensionLength,
tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadKey(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByValue(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, void *y, sd::LongType const *yShapeInfo, void *dy,
sd::LongType const *dyShapeInfo, int *dimension, int dimensionLength, bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto context =
extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int)tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, nullptr, dimensionLength,
tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
SD_COMMON_TYPES, SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadValue(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTad(sd::Pointer *extraPointers, void *x, sd::LongType const *xShapeInfo, void *dX,
sd::LongType const *dXShapeInfo, int *dimension, int dimensionLength, sd::LongType const *tadShapeInfo,
sd::LongType const *tadOffsets, bool descending) {
try {
// to be implemented
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto context =
extraPointers[0] == 0 ? LaunchContext::defaultContext() : reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance().tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int)tadPack.numberOfTads(), 512, 33768);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(
xType, oesTadGeneric,
(launchDims, stream, dX, dXShapeInfo, nullptr, dimensionLength, tadShapeInfo, tadOffsets, descending),
SD_COMMON_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTad(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortCooIndices(sd::Pointer *extraPointers, sd::LongType *indices, void *values, sd::LongType length,
const sd::LongType *xShapeInfo) {
throw std::runtime_error("sortCooIndices:: Not implemented yet");
}
void ravelMultiIndex(sd::Pointer *extraPointers, sd::LongType *indices, sd::LongType *flatIndices, sd::LongType length,
sd::LongType *shapeInfo, int mode) {
throw std::runtime_error("ravelMultiIndex:: Not implemented yet");
}
void unravelIndex(sd::Pointer *extraPointers, sd::LongType *indices, sd::LongType *flatIndices, sd::LongType length,
sd::LongType *shapeInfo) {
throw std::runtime_error("unravelIndex:: Not implemented yet");
}
sd::LongType *mmapFile(sd::Pointer *extraPointers, const char *fileName, sd::LongType length) { return nullptr; }
void munmapFile(sd::Pointer *extraPointers, sd::LongType *ptrMap, sd::LongType length) {}
sd::graph::ResultWrapper *executeFlatGraph(sd::Pointer *extraPointers, sd::Pointer flatBufferPointer) {
try {
return sd::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getResultWrapperSize(sd::graph::ResultWrapper *ptr) { return ptr->size(); }
sd::Pointer getResultWrapperPointer(sd::graph::ResultWrapper *ptr) { return ptr->pointer(); }
const char *getAllCustomOps() { return sd::ops::OpRegistrator::getInstance().getAllCustomOperations(); }
sd::ShapeList *_calculateOutputShapes(sd::Pointer *extraPointers, sd::ops::DeclarableOp *op, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int numInputShapes, double *tArgs, int numTArgs,
sd::LongType *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs,
int numDArgs) {
sd::graph::VariableSpace varSpace;
Context block(2, &varSpace);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numBArgs; e++) block.getBArguments()->push_back(bArgs[e]);
for (int e = 0; e < numDArgs; e++) block.getDArguments()->push_back((sd::DataType)dArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = reinterpret_cast<sd::LongType *>(inputShapes[e]);
// we shouldn't copy buffer if that's empty array
void *buffer_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD_ =
sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputShapes];
auto array = new sd::NDArray(buffer_, bufferD_, shape_);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
if (varSpace.launchContext()->getWorkspace() != nullptr) shapeList->detach();
return shapeList;
}
sd::ShapeList *calculateOutputShapes2(sd::Pointer *extraPointers, sd::LongType hash, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int numInputShapes, double *tArgs, int numTArgs,
sd::LongType *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs,
int numDArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs, iArgs,
numIArgs, bArgs, numBArgs, dArgs, numDArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::ShapeList *_calculateOutputShapes(sd::Pointer *extraPointers, sd::ops::DeclarableOp *op, sd::Pointer *inputShapes,
int numInputShapes, double *tArgs, int numTArgs, sd::LongType *iArgs,
int numIArgs) {
Context block(1);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++) block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++) block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++) inShapes.push_back(reinterpret_cast<sd::LongType *>(inputShapes[e]));
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
sd::ShapeList *calculateOutputShapes(sd::Pointer *extraPointers, sd::LongType hash, sd::Pointer *inputShapes,
int numInputShapes, double *tArgs, int numTArgs, sd::LongType *iArgs,
int numIArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getShapeListSize(sd::ShapeList *list) { return list->size(); }
sd::LongType const *getShape(sd::ShapeList *list, sd::LongType i) { return list->at(i); }
static SD_INLINE sd::Status realExec(sd::ops::DeclarableOp *op, sd::Pointer *extraPointers, sd::LongType hash,
sd::Pointer *inputBuffers, sd::Pointer *inputShapes, int numInputs,
sd::Pointer *outputBuffers, sd::Pointer *outputShapes, int numOutputs,
double *tArgs, int numTArgs, sd::LongType *iArgs, int numIArgs, bool *bArgs,
int numBArgs, bool isInplace) {
if (op == nullptr) sd_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<sd::NDArray *> inputs(numInputs);
std::vector<sd::NDArray *> outputs(numOutputs);
std::vector<double> ttArgs(numTArgs);
std::vector<bool> bbArgs(numBArgs);
std::vector<sd::LongType> iiArgs(numIArgs);
// filling block now with inputs
for (int e = 0; e < numInputs; e++) {
auto shape = reinterpret_cast<sd::LongType *>(inputShapes[e]);
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputs];
inputs[e] = new sd::NDArray(buffer, bufferD, shape);
}
// if not inplace - transferring output arrays
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
// we want to keep original output shape intact
auto shape = shape::copyShape(reinterpret_cast<sd::LongType *>(outputShapes[e]));
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e + numOutputs];
// FIXME: revisit this.
bool canNullify = true;
for (int i = 0; i < numInputs; i++) {
void *ibuffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i];
if (ibuffer == buffer) {
canNullify = false;
break;
}
}
if (canNullify && buffer != nullptr)
memset((uint8_t *)buffer, '\0',
shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape)));
auto array = new sd::NDArray(buffer, bufferD, shape);
outputs[e] = array;
}
for (int e = 0; e < numIArgs; e++) iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++) ttArgs[e] = tArgs[e];
for (int e = 0; e < numBArgs; e++) bbArgs[e] = bArgs[e];
// hypothetically at this point we have everything filled
auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, std::vector<sd::DataType>(), isInplace);
// auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
if (outputs[e]->ordering() != shape::order(reinterpret_cast<sd::LongType *>(outputShapes[e])))
outputs[e]->streamline(shape::order(reinterpret_cast<sd::LongType *>(outputShapes[e])));
}
for (auto v : inputs) delete v;
for (auto v : outputs) delete v;
return Status::OK;
}
Status execCustomOp(sd::Pointer *extraPointers, sd::LongType hash, sd::Pointer *inputBuffers, sd::Pointer *inputShapes,
int numInputs, sd::Pointer *outputBuffers, sd::Pointer *outputShapes, int numOutputs, double *tArgs,
int numTArgs, sd::LongType *iArgs, int numIArgs, bool *bArgs, int numBArgs, bool isInplace) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return Status::BAD_INPUT;
}
}
Status execCustomOp2(sd::Pointer *extraPointers, sd::LongType hash, sd::Pointer opContext) {
try {
auto op = sd::ops::OpRegistrator::getInstance().getOperation(hash);
auto context = reinterpret_cast<Context *>(opContext);
auto result = op->execute(context);
auto res = cudaStreamSynchronize(*context->launchContext()->getCudaStream());
if (res != 0) throw sd::cuda_exception::build("customOp execution failed", res);
for (auto v : context->fastpath_in()) {
if (!v->isEmpty()) v->syncToDevice();
}
for (auto v : context->fastpath_out()) {
if (!v->isEmpty()) v->syncToDevice();
}
return result;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return Status::BAD_INPUT;
}
}
Status registerGraph(sd::Pointer *extraPointers, sd::LongType graphId, sd::Pointer flatBufferPointer) {
try {
auto graph = sd::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer);
sd::graph::GraphHolder::getInstance().registerGraph(graphId, graph);
return Status::OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return Status::BAD_INPUT;
}
}
static VariablesSet *executeStoredGraphT(sd::Pointer *extraPointers, sd::LongType graphId, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int *inputIndices, int numInputs) {
auto graph = sd::graph::GraphHolder::getInstance().pullGraph(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<sd::NDArray *> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new sd::NDArray(inputBuffers[e], reinterpret_cast<sd::LongType *>(inputShapes[e]));
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray()) delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto dZ = sd::graph::GraphExecutioner::execute(graph, varSpace);
auto varSet = new sd::graph::VariablesSet(dZ);
if (dZ == Status::OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet *executeStoredGraph(sd::Pointer *extraPointers, sd::LongType graphId, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int *inputIndices, int numInputs) {
try {
return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getVariablesSetSize(sd::graph::VariablesSet *set) { return set->size(); }
sd::Status getVariablesSetStatus(sd::graph::VariablesSet *set) { return set->status(); }
sd::graph::Variable *getVariable(sd::graph::VariablesSet *set, sd::LongType i) { return set->at(i); }
int getVariableId(sd::graph::Variable *variable) { return variable->id(); }
int getVariableIndex(sd::graph::Variable *variable) { return variable->index(); }
const char *getVariableName(sd::graph::Variable *variable) { return variable->getName()->c_str(); }
sd::LongType const *getVariableShape(sd::graph::Variable *variable) { return variable->getNDArray()->shapeInfo(); }
void *getVariableBuffer(sd::graph::Variable *variable) { return variable->getNDArray()->buffer(); }
sd::Status unregisterGraph(sd::Pointer *extraPointers, sd::LongType graphId) {
try {
sd::graph::GraphHolder::getInstance().dropGraphAny(graphId);
return Status::OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return Status::BAD_INPUT;
}
}
void deletePointerArray(sd::Pointer pointer) {
sd::Pointer *ptr = reinterpret_cast<sd::Pointer *>(pointer);
delete[] ptr;
}
void deleteCharArray(sd::Pointer pointer) {
auto ptr = reinterpret_cast<char *>(pointer);
delete[] ptr;
}
void deleteIntArray(sd::Pointer pointer) {
auto ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
void deleteLongArray(sd::Pointer pointer) {
auto ptr = reinterpret_cast<sd::LongType *>(pointer);
delete[] ptr;
}
void deleteVariablesSet(sd::graph::VariablesSet *pointer) { delete pointer; }
void deleteShapeList(sd::Pointer shapeList) {
sd::ShapeList *list = reinterpret_cast<sd::ShapeList *>(shapeList);
// list->destroy();
delete list;
}
const char *getAllOperations() { return sd::OpTracker::getInstance().exportOperations(); }
sd::Pointer getGraphState(sd::LongType id) { return (sd::Pointer) new sd::graph::GraphState(id); }
void deleteGraphState(sd::Pointer state) {
auto stateP = reinterpret_cast<sd::graph::GraphState *>(state);
delete stateP;
}
sd::Status execCustomOpWithScope(sd::Pointer *extraPointers, sd::graph::GraphState *state, sd::LongType opHash,
sd::LongType *scopes, int numScopes, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int numInputs, sd::Pointer *outputBuffers,
sd::Pointer *outputShapes, int numOutputs) {
/**
* That's basically exec, with VariableSpace provided in GraphState:
* depending on operation (i.e. while of if), different logic executors could be used
*/
auto graph = state->graph();
auto varSpace = state->variableSpace();
// Node is dynamically created, and has nothing beyond it: only inputs and outputs
// this node has id of 0, and inputs are
Node node(OpType_LOGIC, opHash, 0);
// mapping inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = inputBuffers[e];
auto shapeInfo = reinterpret_cast<sd::LongType *>(inputShapes[e]);
auto array = new sd::NDArray(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace
varSpace->putVariable(0, e, array);
node.pickInput(0, e);
}
// mapping scopes
for (int e = 0; e < numScopes; e++) {
// we should check scope existence in GraphState/Graph
int scopeId = (int)scopes[e];
if (!state->hasScope(scopeId)) {
// sd_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId);
return Logger::logKernelFailureMsg();
}
node.pickInput(scopeId, 0);
}
auto dZ = LogicExecutor::processNode(graph, &node);
if (dZ != Status::OK) return dZ;
// mapping outputs
for (int e = 0; e < numOutputs; e++) {
auto buffer = outputBuffers[e];
auto shapeInfo = reinterpret_cast<sd::LongType *>(outputShapes[e]);
NDArray array(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace to the same ID
// varSpace->putVariable(0, e, array);
auto t = varSpace->getVariable(0, e)->getNDArray();
array.assign(t);
}
// removing input variables
for (int e = 0; e < numInputs; e++) {
varSpace->dropVariable(0, e);
}
// after some bla-bla-bla we should have Graph and Node for current op
return Status::OK;
}
sd::Status execCustomOpWithScope(sd::Pointer *extraPointers, sd::Pointer state, sd::LongType opHash,
sd::LongType *scopes, int numScopes, sd::Pointer *inputBuffers,
sd::Pointer *inputShapes, int numInputs, sd::Pointer *outputBuffers,
sd::Pointer *outputShapes, int numOutputs) {
try {
return execCustomOpWithScope(extraPointers, reinterpret_cast<sd::graph::GraphState *>(state), opHash, scopes,
numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return sd::Status::BAD_INPUT;
}
}
void deleteResultWrapper(sd::Pointer ptr) {
// just 0 room for compiler s@!t
auto p = reinterpret_cast<sd::graph::ResultWrapper *>(ptr);
delete p;
}
int estimateThreshold(sd::Pointer *extraPointers, sd::Pointer dX, sd::LongType const *dXShapeInfo, int N,
float threshold) {
throw std::runtime_error("estimateThreshold: Not implemented yet");
}
/*
* TypeDef:
* void convertTypes(sd::Pointer *extras, int srcType, sd::Pointer dX, long N, int dstType, sd::Pointer dZ);
*/
void convertTypes(sd::Pointer *extras, int srcType, sd::Pointer dX, sd::LongType N, int dstType, sd::Pointer dZ) {
try {
auto dx = reinterpret_cast<void *>(dX);
auto dz = reinterpret_cast<void *>(dZ);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertKernel<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
// sd::TypeCast::convertGenericCuda<sd::float8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
// sd::TypeCast::convertGenericCuda<sd::float8, sd::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
// sd::TypeCast::convertGenericCuda<sd::float8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
// sd::TypeCast::convertGenericCuda<sd::float8, sd::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
// sd::TypeCast::convertGenericCuda<sd::float8, sd::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
// sd::TypeCast::convertGenericCuda<sd::float8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
// sd::TypeCast::convertGenericCuda<sd::float8, double>(extras, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<sd::int8, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
// convertKernel<sd::int8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: eventually we might want to add it
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<uint8_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: still might want to add
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<float16, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: .... ^^^
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
// sd::convertToThreshold<float16>(nullptr, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<int16_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO...
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<float, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
// sd::convertToThreshold<float>(nullptr, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
// sd::TypeCast::convertGenericCuda<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
// sd::convertToThreshold<double>(nullptr, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
// sd::convertFromThreshold<float16>(nullptr, dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
// sd::convertFromThreshold<float>(nullptr, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
// sd::convertFromThreshold<double>(nullptr, dx, N, dz);
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
sd_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
sd::Pointer createUtf8String(sd::Pointer *extraPointers, const char *string, int length) {
auto u = new sd::utf8string(string, length);
return reinterpret_cast<sd::Pointer>(u);
}
sd::LongType getUtf8StringLength(sd::Pointer *extraPointers, sd::Pointer ptr) {
return reinterpret_cast<sd::utf8string *>(ptr)->_length;
}
char *getUtf8StringBuffer(sd::Pointer *extraPointers, sd::Pointer ptr) {
return reinterpret_cast<sd::utf8string *>(ptr)->_buffer;
}
void deleteUtf8String(sd::Pointer *extraPointers, sd::Pointer ptr) { delete (reinterpret_cast<sd::utf8string *>(ptr)); }
///////////////////////////////////////////////////////////////////
template <typename T, typename I>
SD_KERNEL static void scatterUpdateCuda(const int opCode, const int numOfSubArrs, void *vx,
const sd::LongType *xShapeInfo, const sd::LongType *xOffsets, void *vy,
const sd::LongType *yShapeInfo, const sd::LongType *yOffsets,
const void *vindexes) {
__shared__ T *x, *y;
__shared__ sd::LongType arrLenX, arrLenY;
auto indexes = reinterpret_cast<const I *>(vindexes);
for (int e = 0; e < numOfSubArrs; e++) {
const auto xIndex = indexes[e];
const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x;
if (!isOwner) continue;
if (threadIdx.x == 0) {
x = reinterpret_cast<T *>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T *>(vy) + yOffsets[e];
arrLenX = shape::length(xShapeInfo);
arrLenY = shape::length(yShapeInfo);
}
__syncthreads();
if (arrLenX != arrLenY) return;
for (sd::LongType i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo);
switch (opCode) {
case 0:
x[xOffset] += y[yOffset];
break;
case 1:
x[xOffset] -= y[yOffset];
break;
case 2:
x[xOffset] *= y[yOffset];
break;
case 3:
x[xOffset] /= y[yOffset];
break;
case 4:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case 5:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case 6:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
}
template <typename T, typename I>
SD_HOST static void scatterUpdateCudaLauncher(const cudaStream_t *stream, const int opCode, const int numOfSubArrs,
void *vx, const sd::LongType const *xShapeInfo,
const sd::LongType *xOffsets, void *vy, const sd::LongType *yShapeInfo,
const sd::LongType *yOffsets, const void *indexes) {
scatterUpdateCuda<T, I><<<512, 256, SD_MAX_NUM_THREADS, *stream>>>(opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy,
yShapeInfo, yOffsets, indexes);
}
//////////////////////////////////////////////////////////////////////////
void scatterUpdate(sd::Pointer *extraPointers, int opCode, int numOfSubArrs, void *hX, sd::LongType const *hXShapeInfo,
sd::LongType const *hXOffsets, void *dX, sd::LongType const *dXShapeInfo,
sd::LongType const *dXOffsets, void *hY, sd::LongType const *hYShapeInfo,
sd::LongType const *hYOffsets, void *dY, sd::LongType const *dYShapeInfo,
sd::LongType const *dYOffsets, void *hIindexes, sd::LongType const *hIndicesShapeInfo,
void *dIindexes, sd::LongType const *dIndicesShapeInfo) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto type = ArrayOptions::dataType(hXShapeInfo);
auto iType = ArrayOptions::dataType(hIndicesShapeInfo);
BUILD_DOUBLE_SELECTOR(
type, iType, scatterUpdateCudaLauncher,
(stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIindexes),
SD_COMMON_TYPES, SD_INDEXING_TYPES);
sd::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void inspectArray(sd::Pointer *extraPointers, sd::Pointer buffer, sd::LongType *shapeInfo, sd::Pointer specialBuffer,
sd::LongType *specialShapeInfo, sd::Pointer debugInfo) {
try {
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
auto p = reinterpret_cast<sd::DebugInfo *>(debugInfo);
NDArray array(buffer, specialBuffer, shapeInfo, &lc);
sd::DebugHelper::retrieveDebugStatistics(p, &array);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void SD_KERNEL tryPointerKernel(void *p, int len) {
auto buf = reinterpret_cast<int8_t *>(p);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int b;
if (tid < len) atomicAdd(&b, buf[tid]);
__syncthreads();
if (threadIdx.x == 0 && blockIdx.x == 0) printf("Pointer check complete: %i\n", b);
}
void tryPointer(sd::Pointer extra, sd::Pointer p, int len) {
try {
cudaStream_t stream;
cudaStreamCreate(&stream);
tryPointerKernel<<<256, 512, len + 64, stream>>>(p, len);
auto e = cudaStreamSynchronize(stream);
if (e != 0) throw sd::cuda_exception::build("tryPointer failed", e);
cudaStreamDestroy(stream);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
int dataTypeFromNpyHeader(void *header) { return (int)cnpy::dataTypeFromHeader(reinterpret_cast<char *>(header)); }
OpaqueConstantShapeBuffer *shapeBuffer(int rank, sd::LongType *shape, sd::LongType *strides, sd::DataType dtype,
char order, sd::LongType ews, bool empty) {
return shapeBufferEx(rank, shape, strides, dtype, order, ews, empty ? ARRAY_EMPTY : 0);
}
OpaqueConstantShapeBuffer *shapeBufferEx(int rank, sd::LongType *shape, sd::LongType *strides, sd::DataType dtype,
char order, sd::LongType ews, sd::LongType extras) {
try {
auto buffer = new ConstantShapeBuffer();
*buffer = sd::ConstantShapeHelper::getInstance().bufferForShapeInfo(
ShapeDescriptor(dtype, order, shape, strides, rank, ews, extras));
return buffer;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
void deleteConstantShapeBuffer(OpaqueConstantShapeBuffer *ptr) { delete ptr; }
void deleteConstantDataBuffer(OpaqueConstantDataBuffer *ptr) { delete ptr; }
void deleteTadPack(sd::TadPack *ptr) { delete ptr; }
bool isBlasVersionMatches(int major, int minor, int build) {
auto result = major == Environment::getInstance()._blasMajorVersion &&
minor == Environment::getInstance()._blasMinorVersion &&
build == Environment::getInstance()._blasPatchVersion;
if (!result) {
sd_printf("CUDA/cuBLAS version mismatch. Expected: %i.%i.%i but got %i.%i.%i instead\n",
Environment::getInstance()._blasMajorVersion, Environment::getInstance()._blasMinorVersion,
Environment::getInstance()._blasPatchVersion, major, minor, build);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(152);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("CUDA/cuBLAS version mismatch");
}
return result;
}
sd::ConstantDataBuffer *constantBufferLong(sd::DataType dtype, sd::LongType const *data, int length) {
return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer *constantBufferDouble(sd::DataType dtype, double *data, int length) {
return sd::ConstantHelper::getInstance().constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer *constantBuffer(sd::DataType dtype, sd::ConstantDescriptor *descriptor) {
return sd::ConstantHelper::getInstance().constantBuffer(*descriptor, dtype);
}
sd::Pointer getConstantDataBufferPrimary(sd::ConstantDataBuffer *dbf) { return dbf->primary(); }
sd::Pointer getConstantDataBufferSpecial(sd::ConstantDataBuffer *dbf) { return dbf->special(); }
sd::LongType getConstantDataBufferLength(sd::ConstantDataBuffer *dbf) { return dbf->length(); }
sd::LongType getConstantDataBufferSizeOf(sd::ConstantDataBuffer *dbf) { return dbf->sizeOf(); }
sd::Pointer getConstantShapeBufferPrimary(OpaqueConstantShapeBuffer *dbf) {
return const_cast<sd::LongType *>(dbf->primary());
}
sd::Pointer getConstantShapeBufferSpecial(OpaqueConstantShapeBuffer *dbf) {
return const_cast<sd::LongType *>(dbf->special());
}
sd::graph::Context *createGraphContext(int nodeId) { return new sd::graph::Context(nodeId); }
sd::graph::RandomGenerator *getGraphContextRandomGenerator(sd::graph::Context *ptr) { return &ptr->randomGenerator(); }
void markGraphContextInplace(sd::graph::Context *ptr, bool reallyInplace) { ptr->markInplace(reallyInplace); }
void setGraphContextCudaContext(sd::graph::Context *ptr, void *stream, void *reductionPointer,
void *allocationPointer) {
ptr->setCudaContext(stream, reductionPointer, allocationPointer);
}
void setGraphContextInputArray(sd::graph::Context *ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer,
void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextOutputArray(sd::graph::Context *ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer,
void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextInputBuffer(OpaqueContext *ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo,
void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextOutputBuffer(OpaqueContext *ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo,
void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextTArguments(sd::graph::Context *ptr, double *arguments, int numberOfArguments) {
ptr->setTArguments(arguments, numberOfArguments);
}
void setGraphContextIArguments(sd::graph::Context *ptr, sd::LongType *arguments, int numberOfArguments) {
ptr->setIArguments(arguments, numberOfArguments);
}
void setGraphContextBArguments(sd::graph::Context *ptr, bool *arguments, int numberOfArguments) {
ptr->setBArguments(arguments, numberOfArguments);
}
void setGraphContextDArguments(OpaqueContext *ptr, int *arguments, int numberOfArguments) {
std::vector<sd::DataType> dtypes(numberOfArguments);
for (int e = 0; e < numberOfArguments; e++) dtypes[e] = (sd::DataType)arguments[e];
ptr->setDArguments(dtypes);
}
void deleteGraphContext(sd::graph::Context *ptr) { delete ptr; }
sd::graph::RandomGenerator *createRandomGenerator(sd::LongType rootSeed, sd::LongType nodeSeed) {
try {
return new sd::graph::RandomGenerator(rootSeed, nodeSeed);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getRandomGeneratorRootState(sd::graph::RandomGenerator *ptr) { return ptr->rootState(); }
sd::LongType getRandomGeneratorNodeState(sd::graph::RandomGenerator *ptr) { return ptr->nodeState(); }
void setRandomGeneratorStates(sd::graph::RandomGenerator *ptr, sd::LongType rootSeed, sd::LongType nodeSeed) {
ptr->setStates(rootSeed, nodeSeed);
}
float getRandomGeneratorRelativeFloat(sd::graph::RandomGenerator *ptr, sd::LongType index) {
return ptr->relativeT<float>(index);
}
double getRandomGeneratorRelativeDouble(sd::graph::RandomGenerator *ptr, sd::LongType index) {
return ptr->relativeT<double>(index);
}
int getRandomGeneratorRelativeInt(sd::graph::RandomGenerator *ptr, sd::LongType index) {
return ptr->relativeInt(index);
}
sd::LongType getRandomGeneratorRelativeLong(sd::graph::RandomGenerator *ptr, sd::LongType index) {
return ptr->relativeLong(index);
}
int getRandomGeneratorNextInt(sd::graph::RandomGenerator *ptr) {
// to nullify _nodeState._long ^= (steps ^ 0xdeadbeef);
// we will use step = 0xdeadbeef
auto result = ptr->relativeInt(1);
ptr->rewindH(0xdeadbeef);
return result;
}
sd::LongType getRandomGeneratorNextLong(sd::graph::RandomGenerator *ptr) {
auto result = ptr->relativeLong(1);
ptr->rewindH(0xdeadbeef);
return result;
}
float getRandomGeneratorNextFloat(sd::graph::RandomGenerator *ptr) {
auto result = ptr->relativeT<float>(1);
ptr->rewindH(0xdeadbeef);
return result;
}
double getRandomGeneratorNextDouble(sd::graph::RandomGenerator *ptr) {
auto result = ptr->relativeT<double>(1);
ptr->rewindH(0xdeadbeef);
return result;
}
void deleteRandomGenerator(sd::graph::RandomGenerator *ptr) { delete ptr; }
sd::Pointer shapeBufferForNumpy(sd::Pointer npyArray) {
try {
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
unsigned int shapeSize = arr.shape.size();
std::vector<sd::LongType> shape(shapeSize);
bool _empty = false;
for (unsigned int i = 0; i < shapeSize; i++) {
shape[i] = arr.shape[i];
if (arr.shape[i] == 0) _empty = true;
}
auto dtype = cnpy::dataTypeFromHeader(reinterpret_cast<char *>(npyArray));
sd::LongType *shapeBuffer;
if (shape.size() == 1 && shape[0] == 0) {
// scalar case
shapeBuffer = sd::ShapeBuilders::createScalarShapeInfo(dtype);
} else if (_empty) {
if (shapeSize > 0)
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
else
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype);
} else {
shapeBuffer = sd::ShapeBuilders::createShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
}
return (sd::Pointer)(sd::ConstantShapeHelper::getInstance().createFromExisting(
shapeBuffer, true)); // TO DO: this can lead to unpleasant crash sometimes
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::LongType getCachedMemory(int deviceId) { return sd::ConstantHelper::getInstance().getCachedAmount(deviceId); }
sd::LaunchContext *defaultLaunchContext() { return LaunchContext::defaultContext(); }
sd::Pointer lcScalarPointer(OpaqueLaunchContext *lc) { return lc->getScalarPointer(); }
sd::Pointer lcReductionPointer(OpaqueLaunchContext *lc) { return lc->getReductionPointer(); }
sd::Pointer lcAllocationPointer(OpaqueLaunchContext *lc) { return lc->getAllocationPointer(); }
sd::Pointer lcExecutionStream(OpaqueLaunchContext *lc) { return lc->getCudaStream(); }
sd::Pointer lcCopyStream(OpaqueLaunchContext *lc) { return lc->getCudaSpecialStream(); }
sd::Pointer lcBlasHandle(OpaqueLaunchContext *lc) { return lc->getCublasHandle(); }
sd::Pointer lcSolverHandle(OpaqueLaunchContext *lc) { return lc->getCusolverHandle(); }
int lastErrorCode() { return sd::LaunchContext::defaultContext()->errorReference()->errorCode(); }
const char *lastErrorMessage() { return sd::LaunchContext::defaultContext()->errorReference()->errorMessage(); }
void ctxShapeFunctionOverride(OpaqueContext *ptr, bool reallyOverride) {
ptr->setShapeFunctionOverride(reallyOverride);
}
void ctxPurge(OpaqueContext *ptr) { ptr->clearFastPath(); }
int binaryLevel() { return 0; }
int optimalLevel() { return 0; }
bool isMinimalRequirementsMet() { return true; }
bool isOptimalRequirementsMet() { return true; }
void ctxAllowHelpers(OpaqueContext *ptr, bool reallyAllow) { ptr->allowHelpers(reallyAllow); }
void ctxSetExecutionMode(OpaqueContext *ptr, int execMode) {
if (execMode < 0 || execMode > 2) execMode = 0;
ptr->setExecutionMode((samediff::ExecutionMode)execMode);
}
OpaqueDataBuffer *dbCreateExternalDataBuffer(sd::LongType elements, int dataType, sd::Pointer primary,
sd::Pointer special) {
auto buffer = dbAllocateDataBuffer(0, dataType, false);
if (primary != nullptr) buffer->setPrimary(primary, elements);
if (special != nullptr) buffer->setSpecial(special, elements);
return buffer;
}
OpaqueDataBuffer *dbAllocateDataBuffer(sd::LongType elements, int dataType, bool allocateBoth) {
return allocateDataBuffer(elements, dataType, allocateBoth);
}
OpaqueDataBuffer *allocateDataBuffer(sd::LongType elements, int dataType, bool allocateBoth) {
try {
auto dtype = DataTypeUtils::fromInt(dataType);
return new sd::InteropDataBuffer(elements * DataTypeUtils::sizeOf(dtype), dtype, allocateBoth);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::Pointer dbPrimaryBuffer(OpaqueDataBuffer *dataBuffer) { return dataBuffer->primary(); }
sd::Pointer dbSpecialBuffer(OpaqueDataBuffer *dataBuffer) { return dataBuffer->special(); }
void deleteDataBuffer(OpaqueDataBuffer *dataBuffer) { delete dataBuffer; }
void dbSetPrimaryBuffer(OpaqueDataBuffer *dataBuffer, sd::Pointer primaryBuffer, sd::LongType numBytes) {
dataBuffer->setPrimary(primaryBuffer, numBytes);
}
void dbSetSpecialBuffer(OpaqueDataBuffer *dataBuffer, sd::Pointer specialBuffer, sd::LongType numBytes) {
dataBuffer->setSpecial(specialBuffer, numBytes);
}
void dbAllocatePrimaryBuffer(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->allocatePrimary(); }
void dbAllocateSpecialBuffer(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->allocateSpecial(); }
void dbExpandBuffer(OpaqueDataBuffer *dataBuffer, sd::LongType elements) {
try {
dataBuffer->dataBuffer()->expand(elements * DataTypeUtils::sizeOf(dataBuffer->dataBuffer()->getDataType()));
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
OpaqueDataBuffer *dbCreateView(OpaqueDataBuffer *dataBuffer, sd::LongType length, sd::LongType offset) {
return new InteropDataBuffer(*dataBuffer, length, offset);
}
void dbSyncToSpecial(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->syncToSpecial(); }
void dbSyncToPrimary(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->syncToPrimary(nullptr); }
void dbTickHostRead(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->readPrimary(); }
void dbTickHostWrite(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->writePrimary(); }
void dbTickDeviceRead(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->readSpecial(); }
void dbTickDeviceWrite(OpaqueDataBuffer *dataBuffer) { dataBuffer->dataBuffer()->writeSpecial(); }
void dbExpand(OpaqueDataBuffer *dataBuffer, sd::LongType elements) { dataBuffer->expand(elements); }
void dbClose(OpaqueDataBuffer *dataBuffer) { dataBuffer->getDataBuffer()->close(); }
int dbDeviceId(OpaqueDataBuffer *dataBuffer) { return dataBuffer->deviceId(); }
void dbSetDeviceId(OpaqueDataBuffer *dataBuffer, int deviceId) { dataBuffer->setDeviceId(deviceId); }
int dbLocality(OpaqueDataBuffer *dataBuffer) {
auto p = dataBuffer->dataBuffer()->isPrimaryActual();
auto d = dataBuffer->dataBuffer()->isSpecialActual();
if (p && d)
return 0;
else if (p)
return -1;
else
return 1;
}
void setVedaDeviceLibFolder(std::string path){
} | the_stack |
#include "internal_utils.h"
#include <cuda_fp16.h>
// Check async error.
// Sync and get kernel status in Debug builds.
#ifndef NDEBUG
#define SYNC_AND_CHECK_STREAM(stream) do { \
cudaError_t status = cudaStreamSynchronize(stream); \
if (status != cudaSuccess) \
return status; \
}while(false)
#else
#define SYNC_AND_CHECK_STREAM(stream)
#endif
#define CHECKK(stream) do { \
cudaError_t status = cudaGetLastError(); \
if (status != cudaSuccess) \
return status; \
SYNC_AND_CHECK_STREAM(stream); \
}while(false)
namespace redtail { namespace tensorrt
{
using namespace nvinfer1;
static const int kMaxGridSizeY = 65535;
static const int kMaxGridSizeZ = 65535;
// -----------------------------------------------------------------
// Helper function to get block count.
// -----------------------------------------------------------------
static uint32_t getBlockCount(uint32_t total_size, uint32_t block_size)
{
uint32_t res = (total_size + block_size - 1) / block_size;
assert(res > 0);
assert((size_t)res * block_size >= total_size);
return res;
}
// REVIEW alexeyk: kernels are not optimized for now.
// -----------------------------------------------------------------
// Cost volume kernels.
// -----------------------------------------------------------------
template<typename T>
__global__ void costVolumeCopyKernel(const T* src, int32_t c, int32_t h, int32_t w, int32_t disp, T* dst)
{
assert(src != nullptr);
assert(dst != nullptr);
const uint32_t ix = blockIdx.x * blockDim.x + threadIdx.x;
const uint32_t iy = blockIdx.y * blockDim.y + threadIdx.y;
const uint32_t iz = blockIdx.z * blockDim.z + threadIdx.z;
if (ix >= w || iy >= h || iz >= c)
return;
const size_t isrc = iz * h * w + iy * w + ix;
const size_t stride = 2 * c * h * w;
T val = src[isrc];
T* pdst = dst + isrc;
for (int32_t idst = 0; idst < disp; idst++)
{
*pdst = val;
pdst += stride;
}
}
template<typename T>
__global__ void costVolumeCopyPadKernel(const T* src, int32_t c, int32_t h, int32_t w, int32_t disp, T* dst)
{
assert(src != nullptr);
assert(dst != nullptr);
const uint32_t ix = blockIdx.x * blockDim.x + threadIdx.x;
const uint32_t iy = blockIdx.y * blockDim.y + threadIdx.y;
const uint32_t iz = blockIdx.z * blockDim.z + threadIdx.z;
if (ix >= w || iy >= h || iz >= c)
return;
const size_t isrc = iz * h * w + iy * w + ix;
size_t stride = c * h * w;
const size_t idst = isrc + stride;
stride *= 2;
T* pdst = dst + idst;
for (int32_t pad = 0; pad < disp; pad++)
{
if (ix < pad)
*pdst = 0;
else
*pdst = src[isrc - pad];
pdst += stride;
}
}
template<typename T>
__global__ void costVolumeKernel(const T* left, const T* right, int32_t c, int32_t h, int32_t w, int32_t disp, T* dst)
{
assert(left != nullptr);
assert(right != nullptr);
assert(dst != nullptr);
const uint32_t ix = blockIdx.x * blockDim.x + threadIdx.x;
const uint32_t iy = blockIdx.y * blockDim.y + threadIdx.y;
const uint32_t iz = blockIdx.z * blockDim.z + threadIdx.z;
if (ix >= w || iy >= h || iz >= c)
return;
// Setup initial indices.
size_t stride = c * h * w;
// Left and right source is the same.
const size_t ileft = iz * h * w + iy * w + ix;
T* pdst_l = dst + ileft;
const size_t iright = ileft;
// Right destination is offset by 1 in c dimension.
T* pdst_r = dst + iright + stride;
// Final stride is 2 in c dimension.
stride *= 2;
T val_l = left[ileft];
for (int32_t pad = 0; pad < disp; pad++)
{
if (ix < pad)
*pdst_r = 0;
else
*pdst_r = right[iright - pad];
*pdst_l = val_l;
pdst_l += stride;
pdst_r += stride;
}
}
template<>
cudaError_t CudaKernels::computeCostVolume(DataType data_type, const float* left, const float* right, Dims in_dims,
float* cost_vol, Dims out_dims, cudaStream_t stream)
{
assert(data_type == DataType::kFLOAT);
assert(in_dims.nbDims == 3);
assert(out_dims.nbDims == 4);
dim3 b_dim{16, 16, 1};
dim3 g_dim;
g_dim.x = getBlockCount(in_dims.d[2], b_dim.x);
g_dim.y = getBlockCount(in_dims.d[1], b_dim.y);
g_dim.z = getBlockCount(in_dims.d[0], b_dim.z);
// REVIEW alexeyk: using 2 kernels instead of one as it's not yet optimized so 2 kernels are faster.
// REVIEW alexeyk: optimize, see gld_efficiency,gst_efficiency,gld_transactions,gst_transactions.
// costVolumeKernel<<<g_dim, b_dim, 0, stream>>>(left, right, in_dims.d[0], in_dims.d[1], in_dims.d[2], out_dims.d[0],
// cost_vol);
costVolumeCopyKernel<<<g_dim, b_dim, 0, stream>>>(left, in_dims.d[0], in_dims.d[1], in_dims.d[2], out_dims.d[0],
cost_vol);
CHECKK(stream);
costVolumeCopyPadKernel<<<g_dim, b_dim, 0, stream>>>(right, in_dims.d[0], in_dims.d[1], in_dims.d[2], out_dims.d[0],
cost_vol);
CHECKK(stream);
return cudaSuccess;
}
// -----------------------------------------------------------------
// Correlation cost volume kernels.
// -----------------------------------------------------------------
// FP32, NCHW kernel.
template<typename T>
__global__ void corrCostVolumeKernel(const T* left, const T* right, int32_t c, int32_t h, int32_t w, int32_t disp, T* dst)
{
assert(left != nullptr);
assert(right != nullptr);
assert(dst != nullptr);
const uint32_t ix = blockIdx.x * blockDim.x + threadIdx.x;
const uint32_t iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= w || iy >= h)
return;
uint32_t pad = blockIdx.z;
assert(pad < disp);
size_t stride = h * w;
T val = 0;
if (ix >= pad)
{
const T* pl = left + iy * w + ix;
const T* pr = right + iy * w + ix - pad;
for (int32_t i = 0; i < c; i++)
{
val += *pl * (*pr);
pl += stride;
pr += stride;
}
}
// Disparity feature maps are arranged from to min to max.
size_t idst = pad * h * w + iy * w + ix;
dst[idst] = val;
}
// FP16, NC2HW2 kernel.
__global__ void corrCostVolumeFP16NC2HW2Kernel(const float* left, const float* right, int32_t c, int32_t h, int32_t w, int32_t disp, float* dst)
{
assert(left != nullptr);
assert(right != nullptr);
assert(dst != nullptr);
const uint32_t ix = blockIdx.x * blockDim.x + threadIdx.x;
const uint32_t iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= w || iy >= h)
return;
uint32_t pad = 2 * blockIdx.z;
assert(pad < disp);
size_t stride = h * w;
// REVIEW alexeyk: using FP32 arithmetic for better precision. FP16 works fine too
// but does not give any perf increase and causes slight loss in accuracy.
// __half2 val1{0, 0};
// __half2 val2{0, 0};
float val1 = 0;
float val2 = 0;
if (ix >= pad)
{
const float* pl = left + iy * w + ix;
const float* pr = right + iy * w + ix - pad;
for (int32_t i = 0; i < (c + 1) / 2; i++)
{
// auto l = *(__half2*)pl;
// auto r1 = *(__half2*)pr;
// auto r2 = ix >= pad + 1 ? *(__half2*)(pr - 1) : __half2{0, 0};
// val1 = __hfma2(l, r1, val1);
// val2 = __hfma2(l, r2, val2);
auto l = __half22float2(*(__half2*)pl);
auto r1 = __half22float2(*(__half2*)pr);
auto r2 = ix >= pad + 1 ? __half22float2(*(__half2*)(pr - 1)) : float2{0, 0};
val1 += l.x * r1.x + l.y * r1.y;
val2 += l.x * r2.x + l.y * r2.y;
pl += stride;
pr += stride;
}
}
// Disparity feature maps are arranged from to min to max.
size_t idst = blockIdx.z * h * w + iy * w + ix;
// auto val = __half2(__hadd(val1.x, val1.y), __hadd(val2.x, val2.y));
auto val = __half2((__half)val1, (__half)val2);
dst[idst] = *(float*)&val;
}
template<>
cudaError_t CudaKernels::computeCorrCostVolume(DataType data_type, const float* left, const float* right, Dims in_dims,
float* cost_vol, Dims out_dims, cudaStream_t stream)
{
assert(data_type == DataType::kFLOAT || data_type == DataType::kHALF);
assert(in_dims.nbDims == 3);
assert(out_dims.nbDims == 3);
if (data_type == DataType::kFLOAT)
{
dim3 b_dim{16, 16, 1};
dim3 g_dim;
g_dim.x = getBlockCount(in_dims.d[2], b_dim.x);
g_dim.y = getBlockCount(in_dims.d[1], b_dim.y);
// Each block handles a particular disparity.
g_dim.z = out_dims.d[0];
corrCostVolumeKernel<<<g_dim, b_dim, 0, stream>>>(left, right, in_dims.d[0], in_dims.d[1], in_dims.d[2], out_dims.d[0],
cost_vol);
CHECKK(stream);
}
else if (data_type == DataType::kHALF)
{
dim3 b_dim{16, 16, 1};
dim3 g_dim;
g_dim.x = getBlockCount(in_dims.d[2], b_dim.x);
g_dim.y = getBlockCount(in_dims.d[1], b_dim.y);
// Each block handles 2 disparity values.
g_dim.z = (out_dims.d[0] + 1) / 2;
corrCostVolumeFP16NC2HW2Kernel<<<g_dim, b_dim, 0, stream>>>(left, right, in_dims.d[0], in_dims.d[1], in_dims.d[2], out_dims.d[0],
cost_vol);
CHECKK(stream);
}
return cudaSuccess;
}
// -----------------------------------------------------------------
// Some convolution-related kernels.
// -----------------------------------------------------------------
template<typename T>
__global__ void addDBiasTo3DConvKernel(const T* bias, int32_t c, int32_t d, int32_t h, int32_t w, T* conv)
{
assert(bias != nullptr);
assert(conv != nullptr);
const uint32_t ix = blockIdx.x * blockDim.x + threadIdx.x;
const uint32_t iy = blockIdx.y * blockDim.y + threadIdx.y;
const uint32_t iz = blockIdx.z * blockDim.z + threadIdx.z;
if (ix >= w || iy >= h || iz >= d * c)
return;
int32_t cur_d = iz % d;
const size_t idst = iz * h * w + iy * w + ix;
conv[idst] += bias[cur_d];
}
template<>
cudaError_t CudaKernels::addDBiasTo3DConv(const float* bias, Dims bias_dims, float* conv, Dims conv_dims, cudaStream_t stream)
{
assert(bias_dims.nbDims == 5);
assert(conv_dims.nbDims == 4);
// REVIEW alexeyk: minibatch size 1 for now.
assert(bias_dims.d[0] == 1);
assert(bias_dims.d[2] == conv_dims.d[1]);
UNUSEDR(bias_dims);
dim3 b_dim{16, 16, 1};
dim3 g_dim;
g_dim.x = getBlockCount(conv_dims.d[3], b_dim.x);
g_dim.y = getBlockCount(conv_dims.d[2], b_dim.y);
g_dim.z = getBlockCount(conv_dims.d[0] * conv_dims.d[1], b_dim.z);
// REVIEW alexeyk: no block striding for now.
assert(g_dim.y <= kMaxGridSizeY);
assert(g_dim.z <= kMaxGridSizeZ);
UNUSEDR(kMaxGridSizeY);
UNUSEDR(kMaxGridSizeZ);
addDBiasTo3DConvKernel<<<g_dim, b_dim, 0, stream>>>(bias, conv_dims.d[0], conv_dims.d[1], conv_dims.d[2], conv_dims.d[3], conv);
CHECKK(stream);
return cudaSuccess;
}
// -----------------------------------------------------------------
// Conversion kernels.
// -----------------------------------------------------------------
__global__ void fp32Tofp16Kernel(const float* src, uint16_t* dst, size_t size)
{
assert(src != nullptr);
assert(dst != nullptr);
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= size)
return;
__half val(src[tid]);
dst[tid] = *(uint16_t*)&val;
}
cudaError_t CudaKernels::fp32Tofp16(const float* src, uint16_t* dst, size_t size, cudaStream_t stream)
{
dim3 b_dim{256, 1, 1};
dim3 g_dim;
g_dim.x = getBlockCount(size, b_dim.x);
fp32Tofp16Kernel<<<g_dim, b_dim, 0, stream>>>(src, dst, size);
CHECKK(stream);
return cudaSuccess;
}
__global__ void fp16Tofp32Kernel(const uint16_t* src, float* dst, size_t size)
{
assert(src != nullptr);
assert(dst != nullptr);
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= size)
return;
dst[tid] = (float)(*(__half*)(src + tid));
}
cudaError_t CudaKernels::fp16Tofp32(const uint16_t* src, float* dst, size_t size, cudaStream_t stream)
{
dim3 b_dim{256, 1, 1};
dim3 g_dim;
g_dim.x = getBlockCount(size, b_dim.x);
fp16Tofp32Kernel<<<g_dim, b_dim, 0, stream>>>(src, dst, size);
CHECKK(stream);
return cudaSuccess;
}
} } | the_stack |
//
// *** System
//
#include <iostream>
//
// *** Boost
//
#include <boost/numeric/ublas/io.hpp>
#include <boost/numeric/ublas/triangular.hpp>
#include <boost/numeric/ublas/matrix_sparse.hpp>
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/matrix_proxy.hpp>
#include <boost/numeric/ublas/lu.hpp>
#include <boost/numeric/ublas/io.hpp>
//
// *** ViennaCL
//
//#define VIENNACL_DEBUG_ALL
//#define VIENNACL_DEBUG_BUILD
#define VIENNACL_HAVE_UBLAS 1
#include "viennacl/scalar.hpp"
#include "viennacl/matrix.hpp"
#include "viennacl/matrix_proxy.hpp"
#include "viennacl/vector.hpp"
#include "viennacl/linalg/prod.hpp"
#include "viennacl/linalg/norm_2.hpp"
#include "viennacl/linalg/direct_solve.hpp"
#include "examples/tutorial/Random.hpp"
//
// -------------------------------------------------------------
//
using namespace boost::numeric;
//
// -------------------------------------------------------------
//
template <typename ScalarType>
ScalarType diff(ScalarType & s1, viennacl::scalar<ScalarType> & s2)
{
viennacl::backend::finish();
if (s1 != s2)
return (s1 - s2) / std::max(fabs(s1), fabs(s2));
return 0;
}
template <typename ScalarType>
ScalarType diff(ublas::vector<ScalarType> & v1, viennacl::vector<ScalarType> & v2)
{
ublas::vector<ScalarType> v2_cpu(v2.size());
viennacl::backend::finish();
viennacl::copy(v2.begin(), v2.end(), v2_cpu.begin());
viennacl::backend::finish();
for (std::size_t i=0;i<v1.size(); ++i)
{
if ( std::max( fabs(v2_cpu[i]), fabs(v1[i]) ) > 0 )
v2_cpu[i] = fabs(v2_cpu[i] - v1[i]) / std::max( fabs(v2_cpu[i]), fabs(v1[i]) );
else
v2_cpu[i] = 0.0;
}
return norm_inf(v2_cpu);
}
template <typename ScalarType, typename VCLMatrixType>
ScalarType diff(ublas::matrix<ScalarType> & mat1, VCLMatrixType & mat2)
{
ublas::matrix<ScalarType> mat2_cpu(mat2.size1(), mat2.size2());
viennacl::backend::finish(); //workaround for a bug in APP SDK 2.7 on Trinity APUs (with Catalyst 12.8)
viennacl::copy(mat2, mat2_cpu);
double ret = 0;
double act = 0;
for (unsigned int i = 0; i < mat2_cpu.size1(); ++i)
{
for (unsigned int j = 0; j < mat2_cpu.size2(); ++j)
{
act = fabs(mat2_cpu(i,j) - mat1(i,j)) / std::max( fabs(mat2_cpu(i, j)), fabs(mat1(i,j)) );
if (act > ret)
ret = act;
}
}
//std::cout << ret << std::endl;
return ret;
}
//
// Triangular solvers
//
template <typename RHSTypeRef, typename RHSTypeCheck, typename Epsilon >
void run_solver_check(RHSTypeRef & B_ref, RHSTypeCheck & B_check, int & retval, Epsilon const & epsilon)
{
double act_diff = fabs(diff(B_ref, B_check));
if( act_diff > epsilon )
{
std::cout << " FAILED!" << std::endl;
std::cout << "# Error at operation: matrix-matrix solve" << std::endl;
std::cout << " diff: " << act_diff << std::endl;
retval = EXIT_FAILURE;
}
else
std::cout << " passed! " << act_diff << std::endl;
}
template< typename NumericT, typename Epsilon,
typename ReferenceMatrixTypeA, typename ReferenceMatrixTypeB, typename ReferenceMatrixTypeC,
typename MatrixTypeA, typename MatrixTypeB, typename MatrixTypeC, typename MatrixTypeResult>
int test_solve(Epsilon const& epsilon,
ReferenceMatrixTypeA const & A,
ReferenceMatrixTypeB const & B_start,
ReferenceMatrixTypeC const & C_start,
MatrixTypeA const & vcl_A,
MatrixTypeB & vcl_B,
MatrixTypeC & vcl_C,
MatrixTypeResult const &
)
{
int retval = EXIT_SUCCESS;
// --------------------------------------------------------------------------
ReferenceMatrixTypeA result;
ReferenceMatrixTypeC C_trans;
ReferenceMatrixTypeB B = B_start;
ReferenceMatrixTypeC C = C_start;
MatrixTypeResult vcl_result;
// Test: A \ B with various tags --------------------------------------------------------------------------
std::cout << "Testing A \\ B: " << std::endl;
std::cout << " * upper_tag: ";
result = ublas::solve(A, B, ublas::upper_tag());
vcl_result = viennacl::linalg::solve(vcl_A, vcl_B, viennacl::linalg::upper_tag());
run_solver_check(result, vcl_result, retval, epsilon);
std::cout << " * unit_upper_tag: ";
result = ublas::solve(A, B, ublas::unit_upper_tag());
vcl_result = viennacl::linalg::solve(vcl_A, vcl_B, viennacl::linalg::unit_upper_tag());
run_solver_check(result, vcl_result, retval, epsilon);
std::cout << " * lower_tag: ";
result = ublas::solve(A, B, ublas::lower_tag());
vcl_result = viennacl::linalg::solve(vcl_A, vcl_B, viennacl::linalg::lower_tag());
run_solver_check(result, vcl_result, retval, epsilon);
std::cout << " * unit_lower_tag: ";
result = ublas::solve(A, B, ublas::unit_lower_tag());
vcl_result = viennacl::linalg::solve(vcl_A, vcl_B, viennacl::linalg::unit_lower_tag());
run_solver_check(result, vcl_result, retval, epsilon);
if (retval == EXIT_SUCCESS)
std::cout << "Test A \\ B passed!" << std::endl;
B = B_start;
C = C_start;
// Test: A \ B^T --------------------------------------------------------------------------
std::cout << "Testing A \\ B^T: " << std::endl;
std::cout << " * upper_tag: ";
viennacl::copy(C, vcl_C); C_trans = trans(C);
//check solve():
result = ublas::solve(A, C_trans, ublas::upper_tag());
vcl_result = viennacl::linalg::solve(vcl_A, trans(vcl_C), viennacl::linalg::upper_tag());
run_solver_check(result, vcl_result, retval, epsilon);
//check compute kernels:
std::cout << " * upper_tag: ";
ublas::inplace_solve(A, C_trans, ublas::upper_tag());
viennacl::linalg::inplace_solve(vcl_A, trans(vcl_C), viennacl::linalg::upper_tag());
C = trans(C_trans); run_solver_check(C, vcl_C, retval, epsilon);
std::cout << " * unit_upper_tag: ";
viennacl::copy(C, vcl_C); C_trans = trans(C);
ublas::inplace_solve(A, C_trans, ublas::unit_upper_tag());
viennacl::linalg::inplace_solve(vcl_A, trans(vcl_C), viennacl::linalg::unit_upper_tag());
C = trans(C_trans); run_solver_check(C, vcl_C, retval, epsilon);
std::cout << " * lower_tag: ";
viennacl::copy(C, vcl_C); C_trans = trans(C);
ublas::inplace_solve(A, C_trans, ublas::lower_tag());
viennacl::linalg::inplace_solve(vcl_A, trans(vcl_C), viennacl::linalg::lower_tag());
C = trans(C_trans); run_solver_check(C, vcl_C, retval, epsilon);
std::cout << " * unit_lower_tag: ";
viennacl::copy(C, vcl_C); C_trans = trans(C);
ublas::inplace_solve(A, C_trans, ublas::unit_lower_tag());
viennacl::linalg::inplace_solve(vcl_A, trans(vcl_C), viennacl::linalg::unit_lower_tag());
C = trans(C_trans); run_solver_check(C, vcl_C, retval, epsilon);
if (retval == EXIT_SUCCESS)
std::cout << "Test A \\ B^T passed!" << std::endl;
B = B_start;
C = C_start;
// Test: A \ B with various tags --------------------------------------------------------------------------
std::cout << "Testing A^T \\ B: " << std::endl;
std::cout << " * upper_tag: ";
viennacl::copy(B, vcl_B);
result = ublas::solve(trans(A), B, ublas::upper_tag());
vcl_result = viennacl::linalg::solve(trans(vcl_A), vcl_B, viennacl::linalg::upper_tag());
run_solver_check(result, vcl_result, retval, epsilon);
std::cout << " * unit_upper_tag: ";
viennacl::copy(B, vcl_B);
result = ublas::solve(trans(A), B, ublas::unit_upper_tag());
vcl_result = viennacl::linalg::solve(trans(vcl_A), vcl_B, viennacl::linalg::unit_upper_tag());
run_solver_check(result, vcl_result, retval, epsilon);
std::cout << " * lower_tag: ";
viennacl::copy(B, vcl_B);
result = ublas::solve(trans(A), B, ublas::lower_tag());
vcl_result = viennacl::linalg::solve(trans(vcl_A), vcl_B, viennacl::linalg::lower_tag());
run_solver_check(result, vcl_result, retval, epsilon);
std::cout << " * unit_lower_tag: ";
viennacl::copy(B, vcl_B);
result = ublas::solve(trans(A), B, ublas::unit_lower_tag());
vcl_result = viennacl::linalg::solve(trans(vcl_A), vcl_B, viennacl::linalg::unit_lower_tag());
run_solver_check(result, vcl_result, retval, epsilon);
if (retval == EXIT_SUCCESS)
std::cout << "Test A^T \\ B passed!" << std::endl;
B = B_start;
C = C_start;
// Test: A^T \ B^T --------------------------------------------------------------------------
std::cout << "Testing A^T \\ B^T: " << std::endl;
std::cout << " * upper_tag: ";
viennacl::copy(C, vcl_C); C_trans = trans(C);
//check solve():
result = ublas::solve(trans(A), C_trans, ublas::upper_tag());
vcl_result = viennacl::linalg::solve(trans(vcl_A), trans(vcl_C), viennacl::linalg::upper_tag());
run_solver_check(result, vcl_result, retval, epsilon);
//check kernels:
std::cout << " * upper_tag: ";
ublas::inplace_solve(trans(A), C_trans, ublas::upper_tag());
viennacl::linalg::inplace_solve(trans(vcl_A), trans(vcl_C), viennacl::linalg::upper_tag());
C = trans(C_trans); run_solver_check(C, vcl_C, retval, epsilon);
std::cout << " * unit_upper_tag: ";
viennacl::copy(C, vcl_C); C_trans = trans(C);
ublas::inplace_solve(trans(A), C_trans, ublas::unit_upper_tag());
viennacl::linalg::inplace_solve(trans(vcl_A), trans(vcl_C), viennacl::linalg::unit_upper_tag());
C = trans(C_trans); run_solver_check(C, vcl_C, retval, epsilon);
std::cout << " * lower_tag: ";
viennacl::copy(C, vcl_C); C_trans = trans(C);
ublas::inplace_solve(trans(A), C_trans, ublas::lower_tag());
viennacl::linalg::inplace_solve(trans(vcl_A), trans(vcl_C), viennacl::linalg::lower_tag());
C = trans(C_trans); run_solver_check(C, vcl_C, retval, epsilon);
std::cout << " * unit_lower_tag: ";
viennacl::copy(C, vcl_C); C_trans = trans(C);
ublas::inplace_solve(trans(A), C_trans, ublas::unit_lower_tag());
viennacl::linalg::inplace_solve(trans(vcl_A), trans(vcl_C), viennacl::linalg::unit_lower_tag());
C = trans(C_trans); run_solver_check(C, vcl_C, retval, epsilon);
if (retval == EXIT_SUCCESS)
std::cout << "Test A^T \\ B^T passed!" << std::endl;
return retval;
}
template< typename NumericT, typename F_A, typename F_B, typename Epsilon >
int test_solve(Epsilon const& epsilon)
{
int ret = EXIT_SUCCESS;
long matrix_size = 135; //some odd number, not too large
long rhs_num = 67;
std::cout << "--- Part 2: Testing matrix-matrix solver ---" << std::endl;
ublas::matrix<NumericT> A(matrix_size, matrix_size);
ublas::matrix<NumericT> B_start(matrix_size, rhs_num);
ublas::matrix<NumericT> C_start(rhs_num, matrix_size);
for (std::size_t i = 0; i < A.size1(); ++i)
{
for (std::size_t j = 0; j < A.size2(); ++j)
A(i,j) = static_cast<NumericT>(-0.5) * random<NumericT>();
A(i,i) = NumericT(1.0) + NumericT(2.0) * random<NumericT>(); //some extra weight on diagonal for stability
}
for (std::size_t i = 0; i < B_start.size1(); ++i)
for (std::size_t j = 0; j < B_start.size2(); ++j)
B_start(i,j) = random<NumericT>();
for (std::size_t i = 0; i < C_start.size1(); ++i)
for (std::size_t j = 0; j < C_start.size2(); ++j)
C_start(i,j) = random<NumericT>();
// A
viennacl::range range1_A(matrix_size, 2*matrix_size);
viennacl::range range2_A(2*matrix_size, 3*matrix_size);
viennacl::slice slice1_A(matrix_size, 2, matrix_size);
viennacl::slice slice2_A(0, 3, matrix_size);
viennacl::matrix<NumericT, F_A> vcl_A(matrix_size, matrix_size);
viennacl::copy(A, vcl_A);
viennacl::matrix<NumericT, F_A> vcl_big_range_A(4*matrix_size, 4*matrix_size);
viennacl::matrix_range<viennacl::matrix<NumericT, F_A> > vcl_range_A(vcl_big_range_A, range1_A, range2_A);
viennacl::copy(A, vcl_range_A);
viennacl::matrix<NumericT, F_A> vcl_big_slice_A(4*matrix_size, 4*matrix_size);
viennacl::matrix_slice<viennacl::matrix<NumericT, F_A> > vcl_slice_A(vcl_big_slice_A, slice1_A, slice2_A);
viennacl::copy(A, vcl_slice_A);
// B
viennacl::range range1_B(matrix_size, 2*matrix_size);
viennacl::range range2_B(2*rhs_num, 3*rhs_num);
viennacl::slice slice1_B(matrix_size, 2, matrix_size);
viennacl::slice slice2_B(0, 3, rhs_num);
viennacl::matrix<NumericT, F_B> vcl_B(matrix_size, rhs_num);
viennacl::copy(B_start, vcl_B);
viennacl::matrix<NumericT, F_B> vcl_big_range_B(4*matrix_size, 4*rhs_num);
viennacl::matrix_range<viennacl::matrix<NumericT, F_B> > vcl_range_B(vcl_big_range_B, range1_B, range2_B);
viennacl::copy(B_start, vcl_range_B);
viennacl::matrix<NumericT, F_B> vcl_big_slice_B(4*matrix_size, 4*rhs_num);
viennacl::matrix_slice<viennacl::matrix<NumericT, F_B> > vcl_slice_B(vcl_big_slice_B, slice1_B, slice2_B);
viennacl::copy(B_start, vcl_slice_B);
// C
viennacl::range range1_C(rhs_num, 2*rhs_num);
viennacl::range range2_C(2*matrix_size, 3*matrix_size);
viennacl::slice slice1_C(rhs_num, 2, rhs_num);
viennacl::slice slice2_C(0, 3, matrix_size);
viennacl::matrix<NumericT, F_B> vcl_C(rhs_num, matrix_size);
viennacl::copy(C_start, vcl_C);
viennacl::matrix<NumericT, F_B> vcl_big_range_C(4*rhs_num, 4*matrix_size);
viennacl::matrix_range<viennacl::matrix<NumericT, F_B> > vcl_range_C(vcl_big_range_C, range1_C, range2_C);
viennacl::copy(C_start, vcl_range_C);
viennacl::matrix<NumericT, F_B> vcl_big_slice_C(4*rhs_num, 4*matrix_size);
viennacl::matrix_slice<viennacl::matrix<NumericT, F_B> > vcl_slice_C(vcl_big_slice_C, slice1_C, slice2_C);
viennacl::copy(C_start, vcl_slice_C);
std::cout << "Now using A=matrix, B=matrix" << std::endl;
ret = test_solve<NumericT>(epsilon,
A, B_start, C_start,
vcl_A, vcl_B, vcl_C, vcl_B
);
if (ret != EXIT_SUCCESS)
return ret;
std::cout << "Now using A=matrix, B=range" << std::endl;
ret = test_solve<NumericT>(epsilon,
A, B_start, C_start,
vcl_A, vcl_range_B, vcl_range_C, vcl_B
);
if (ret != EXIT_SUCCESS)
return ret;
std::cout << "Now using A=matrix, B=slice" << std::endl;
ret = test_solve<NumericT>(epsilon,
A, B_start, C_start,
vcl_A, vcl_slice_B, vcl_slice_C, vcl_B
);
if (ret != EXIT_SUCCESS)
return ret;
std::cout << "Now using A=range, B=matrix" << std::endl;
ret = test_solve<NumericT>(epsilon,
A, B_start, C_start,
vcl_range_A, vcl_B, vcl_C, vcl_B
);
if (ret != EXIT_SUCCESS)
return ret;
std::cout << "Now using A=range, B=range" << std::endl;
ret = test_solve<NumericT>(epsilon,
A, B_start, C_start,
vcl_range_A, vcl_range_B, vcl_range_C, vcl_B
);
if (ret != EXIT_SUCCESS)
return ret;
std::cout << "Now using A=range, B=slice" << std::endl;
ret = test_solve<NumericT>(epsilon,
A, B_start, C_start,
vcl_range_A, vcl_slice_B, vcl_slice_C, vcl_B
);
if (ret != EXIT_SUCCESS)
return ret;
std::cout << "Now using A=slice, B=matrix" << std::endl;
ret = test_solve<NumericT>(epsilon,
A, B_start, C_start,
vcl_slice_A, vcl_B, vcl_C, vcl_B
);
if (ret != EXIT_SUCCESS)
return ret;
std::cout << "Now using A=slice, B=range" << std::endl;
ret = test_solve<NumericT>(epsilon,
A, B_start, C_start,
vcl_slice_A, vcl_range_B, vcl_range_C, vcl_B
);
if (ret != EXIT_SUCCESS)
return ret;
std::cout << "Now using A=slice, B=slice" << std::endl;
ret = test_solve<NumericT>(epsilon,
A, B_start, C_start,
vcl_slice_A, vcl_slice_B, vcl_slice_C, vcl_B
);
if (ret != EXIT_SUCCESS)
return ret;
return ret;
}
//
// Control functions
//
template< typename NumericT, typename Epsilon >
int test(Epsilon const& epsilon)
{
int ret;
std::cout << "////////////////////////////////" << std::endl;
std::cout << "/// Now testing A=row, B=row ///" << std::endl;
std::cout << "////////////////////////////////" << std::endl;
ret = test_solve<NumericT, viennacl::row_major, viennacl::row_major>(epsilon);
if (ret != EXIT_SUCCESS)
return ret;
std::cout << "////////////////////////////////" << std::endl;
std::cout << "/// Now testing A=row, B=col ///" << std::endl;
std::cout << "////////////////////////////////" << std::endl;
ret = test_solve<NumericT, viennacl::row_major, viennacl::column_major>(epsilon);
if (ret != EXIT_SUCCESS)
return ret;
std::cout << "////////////////////////////////" << std::endl;
std::cout << "/// Now testing A=col, B=row ///" << std::endl;
std::cout << "////////////////////////////////" << std::endl;
ret = test_solve<NumericT, viennacl::column_major, viennacl::row_major>(epsilon);
if (ret != EXIT_SUCCESS)
return ret;
std::cout << "////////////////////////////////" << std::endl;
std::cout << "/// Now testing A=col, B=col ///" << std::endl;
std::cout << "////////////////////////////////" << std::endl;
ret = test_solve<NumericT, viennacl::column_major, viennacl::column_major>(epsilon);
if (ret != EXIT_SUCCESS)
return ret;
return ret;
}
//
// -------------------------------------------------------------
//
int main()
{
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "## Test :: BLAS 3 routines" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
int retval = EXIT_SUCCESS;
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
{
typedef float NumericT;
NumericT epsilon = NumericT(1.0E-3);
std::cout << "# Testing setup:" << std::endl;
std::cout << " eps: " << epsilon << std::endl;
std::cout << " numeric: float" << std::endl;
retval = test<NumericT>(epsilon);
if( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
}
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
#ifdef VIENNACL_HAVE_OPENCL
if( viennacl::ocl::current_device().double_support() )
#endif
{
{
typedef double NumericT;
NumericT epsilon = 1.0E-11;
std::cout << "# Testing setup:" << std::endl;
std::cout << " eps: " << epsilon << std::endl;
std::cout << " numeric: double" << std::endl;
retval = test<NumericT>(epsilon);
if( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
}
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << "------- Test completed --------" << std::endl;
std::cout << std::endl;
return retval;
} | the_stack |
#include <string.h>
#include <stdint.h>
#include <sph/blake2b.h>
#include <cuda_helper.h>
#include <cuda_vectors.h>
#define TPB 512
#define NBN 2
static uint32_t *d_resNonces[MAX_GPUS];
static uint32_t *h_resNonces[MAX_GPUS];
static __constant__ uint2 _ALIGN(16) c_data[10];
static __constant__ uint2 _ALIGN(16) c_v[16];
static __constant__ const uint32_t blake2b_sigma[12][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } , { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } ,
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 } , { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 } ,
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 } , { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 } ,
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 } , { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 } ,
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 } , { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 } ,
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } , { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
};
extern "C" void blake2b_hash(void *output, const void *input)
{
uint8_t _ALIGN(64) hash[32];
blake2b_ctx ctx;
blake2b_init(&ctx, 32, NULL, 0);
blake2b_update(&ctx, input, 80);
blake2b_final(&ctx, hash);
memcpy(output, hash, 32);
}
// ----------------------------------------------------------------
__device__ __forceinline__
static void G(const int r, const int i, uint2 &a, uint2 &b, uint2 &c, uint2 &d,const uint2 m[16])
{
a = a + b + m[ blake2b_sigma[r][2*i] ];
d = SWAPUINT2( d ^ a );
c = c + d;
b = ROR24( b ^ c );
a = a + b + m[ blake2b_sigma[r][2*i+1] ];
d = ROR16( d ^ a );
c = c + d;
b = ROR2( b ^ c, 63);
}
#define ROUND(r) \
G(r, 0, v[0], v[4], v[ 8], v[12], m); \
G(r, 1, v[1], v[5], v[ 9], v[13], m); \
G(r, 2, v[2], v[6], v[10], v[14], m); \
G(r, 3, v[3], v[7], v[11], v[15], m); \
G(r, 4, v[0], v[5], v[10], v[15], m); \
G(r, 5, v[1], v[6], v[11], v[12], m); \
G(r, 6, v[2], v[7], v[ 8], v[13], m); \
G(r, 7, v[3], v[4], v[ 9], v[14], m);
__global__ __launch_bounds__(512,1)
void blake2b_gpu_hash(const uint32_t threads, const uint32_t startNonce, uint32_t *resNonce, const uint32_t target6)
{
const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
if(thread<threads){
const uint32_t nonce = thread + startNonce;
uint2 v[16];
uint2 m[16];
*(uint2x4*)&m[0] = *(uint2x4*)&c_data[0];
*(uint2x4*)&m[4] = *(uint2x4*)&c_data[4];
m[4].x = nonce;
m[8] = c_data[8];
m[9] = c_data[9];
m[10] = m[11] = make_uint2(0,0);
m[12] = m[13] = m[14] = m[15] = make_uint2(0,0);
#pragma unroll 4
for(uint32_t i=0;i<16;i+=4){
*(uint2x4*)&v[i] = *(uint2x4*)&c_v[i];
}
v[ 2] = v[ 2] + m[4];
v[14] = SWAPUINT2( v[14] ^ v[2] );
v[10] = v[10] + v[14];
v[ 6] = ROR24( v[ 6] ^ v[10] );
v[ 2] = v[ 2] + v[ 6] + m[ 5];
v[14] = ROR16( v[14] ^ v[ 2] );
v[10] = v[10] + v[14];
v[ 6] = ROR2( v[ 6] ^ v[10], 63);
v[10] = v[10] + v[15];
v[ 5] = ROR24( v[ 5] ^ v[10] );
v[ 0] = v[ 0] + v[ 5];
v[15] = ROR16(v[15] ^ v[0]);
v[10] = v[10] + v[15];
v[ 5] = ROR2( v[ 5] ^ v[10], 63);
G(0, 5, v[1], v[6], v[11], v[12], m);
G(0, 6, v[2], v[7], v[ 8], v[13], m);
G(0, 7, v[3], v[4], v[ 9], v[14], m);
ROUND( 1 );
ROUND( 2 );
ROUND( 3 );
ROUND( 4 );
ROUND( 5 );
ROUND( 6 );
ROUND( 7 );
ROUND( 8 );
ROUND( 9 );
ROUND( 10 );
// ROUND_F( 11 );
G(11, 0, v[0], v[4], v[ 8], v[12], m);
G(11, 1, v[1], v[5], v[ 9], v[13], m);
G(11, 2, v[2], v[6], v[10], v[14], m);
G(11, 3, v[3], v[7], v[11], v[15], m);
// G(11, 4, v[0], v[5], v[10], v[15], m);
v[ 0] = v[ 0] + v[ 5] + m[ 1];
v[15] = SWAPUINT2( v[15] ^ v[0] );
v[10] = v[10] + v[15];
v[ 5] = ROR24( v[ 5] ^ v[10] );
v[ 0] = v[ 0] + v[ 5];
// G(11, 5, v[1], v[6], v[11], v[12], m);
// H(11, 6, v[2], v[7], v[ 8], v[13], m);
v[ 2] = v[ 2] + v[ 7] + m[blake2b_sigma[11][12]];
v[13] = SWAPUINT2( v[13] ^ v[2]);
v[ 8] = v[ 8] + v[13];
v[ 7] = ROR24( v[7] ^ v[8] );
v[ 2] = v[ 2] + v[ 7] + m[blake2b_sigma[11][13]];
v[13] = ROR16( v[13] ^ v[2] );
v[ 8] = v[ 8] + v[13];
if (xor3x(v[8].x, v[0].x, 0xf2bdc928) == 0){
if (cuda_swab32(0x6a09e667 ^ v[0].y ^ v[8].y ) <= target6) {
uint32_t tmp = atomicExch(&resNonce[0], nonce);
if (tmp != UINT32_MAX)
resNonce[1] = tmp;
}
}
}
}
__host__
uint32_t blake2b_hash_cuda(const int thr_id, const uint32_t threads, const uint32_t startNonce, const uint32_t target6, uint32_t &secNonce)
{
uint32_t resNonces[NBN] = { UINT32_MAX, UINT32_MAX };
uint32_t result = UINT32_MAX;
if (cudaSuccess == cudaMemcpy(resNonces, d_resNonces[thr_id], NBN*sizeof(uint32_t), cudaMemcpyDeviceToHost)) {
result = resNonces[0];
secNonce = resNonces[1];
if (secNonce == result) secNonce = UINT32_MAX;
}
return result;
}
__host__
void blake2b_setBlock(uint32_t *data)
{
uint64_t v[16] = {
0x6a09e667f2bdc928, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, 0x510e527fade68281, 0x9b05688c2b3e6c1f, 0xe07c265404be4294, 0x5be0cd19137e2179
};
uint64_t m[16];
memcpy(m,data,80);
memset(&m[10],0x00,6*sizeof(uint64_t));
v[ 0]+= v[ 4] + m[ 0];
v[12] = ROTR64(v[12] ^ v[ 0],32);
v[ 8]+= v[12];
v[ 4] = ROTR64(v[ 4] ^ v[ 8],24);
v[ 0]+= v[ 4] + m[ 1];
v[12] = ROTR64(v[12] ^ v[ 0],16);
v[ 8]+= v[12];
v[ 4] = ROTR64(v[ 4] ^ v[ 8],63);
v[ 1] = v[ 1] + v[ 5] + m[ 2];
v[13] = ROTR64( v[13] ^ v[1],32);
v[ 9] = v[ 9] + v[13];
v[ 5] = ROTR64( v[5] ^ v[9],24);
v[ 1] = v[ 1] + v[ 5] + m[ 3];
v[13] = ROTR64( v[13] ^ v[1],16);
v[ 9] = v[ 9] + v[13];
v[ 5] = ROTR64( v[5] ^ v[9], 63);
v[ 2] = v[ 2] + v[ 6];
v[ 3] = v[ 3] + v[ 7] + m[6];
v[15] = ROTR64( v[15] ^ v[3] ,32);
v[11] = v[11] + v[15];
v[ 7] = ROTR64( v[7] ^ v[11] ,24);
v[ 3] = v[ 3] + v[ 7] + m[7];
v[15] = ROTR64( v[15] ^ v[3] ,16);
v[11] = v[11] + v[15];
v[ 7] = ROTR64( v[7] ^ v[11], 63);
v[ 0] = v[ 0] + v[ 5] + m[8];
v[15] = ROTR64( v[15] ^ v[0] ,32);
v[ 0] = v[ 0] + m[9];
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_data, data, 80, 0, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_v, v, 16*sizeof(uint64_t), 0, cudaMemcpyHostToDevice));
}
static bool init[MAX_GPUS] = { 0 };
int scanhash_sia(int thr_id, struct work *work, uint32_t max_nonce, unsigned long *hashes_done){
int dev_id = device_map[thr_id];
uint32_t _ALIGN(64) hash[8];
uint32_t _ALIGN(64) vhashcpu[8];
uint32_t _ALIGN(64) endiandata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t Htarg = ptarget[7];
const uint32_t first_nonce = pdata[8];
int intensity = (device_sm[dev_id] > 500)?29:28;
uint32_t throughput = cuda_default_throughput(thr_id, 1U << intensity);
if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce);
if (!init[thr_id])
{
cudaSetDevice(dev_id);
if (opt_cudaschedule == -1 && gpu_threads == 1) {
cudaDeviceReset();
// reduce cpu usage (linux)
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
//cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO,dev_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_CALL_OR_RET_X(cudaMalloc(&d_resNonces[thr_id], NBN * sizeof(uint32_t)), -1);
CUDA_CALL_OR_RET_X(cudaMallocHost(&h_resNonces[thr_id], NBN * sizeof(uint32_t)), -1);
init[thr_id] = true;
}
const dim3 grid((throughput + TPB-1)/TPB);
const dim3 block(TPB);
memcpy(endiandata, pdata, 80);
endiandata[11] = 0; // nbits
blake2b_setBlock(endiandata);
cudaMemset(d_resNonces[thr_id], 0xff, NBN*sizeof(uint32_t));
do {
blake2b_gpu_hash <<<grid, block, 8>>> (throughput, pdata[8], d_resNonces[thr_id], ptarget[6]);
cudaMemcpy(h_resNonces[thr_id], d_resNonces[thr_id], NBN*sizeof(uint32_t), cudaMemcpyDeviceToHost);
if (h_resNonces[thr_id][0] != UINT32_MAX){
int res = 0;
endiandata[8] = h_resNonces[thr_id][0];
blake2b_hash(hash, endiandata);
// sia hash target is reversed (start of hash)
swab256(vhashcpu, hash);
if (vhashcpu[7] <= Htarg && fulltest(vhashcpu, ptarget)) {
work_set_target_ratio(work, vhashcpu);
*hashes_done = pdata[8] - first_nonce + throughput +1;
work->nonces[0] = h_resNonces[thr_id][0];
pdata[8] = h_resNonces[thr_id][0];
res=1;
if (h_resNonces[thr_id][1] != UINT32_MAX) {
endiandata[8] = h_resNonces[thr_id][1];
blake2b_hash(hash, endiandata);
// if(!opt_quiet)
// gpulog(LOG_BLUE, dev_id, "Found 2nd nonce: %08x", h_resNonces[thr_id][1]);
swab256(vhashcpu, hash);
work->nonces[1] = h_resNonces[thr_id][1];
pdata[21] = h_resNonces[thr_id][1];
if (bn_hash_target_ratio(vhashcpu, ptarget) > work->shareratio[0]) {
work_set_target_ratio(work, vhashcpu);
xchg(work->nonces[0], work->nonces[1]);
xchg(pdata[8], pdata[21]);
}
res=2;
}
return res;
}
}
pdata[8] += throughput;
}while(!work_restart[thr_id].restart && ((uint64_t)max_nonce > (uint64_t)throughput + pdata[8]));
*hashes_done = pdata[8] - first_nonce +1;
return 0;
}
// cleanup
extern "C" void free_sia(int thr_id)
{
if (!init[thr_id])
return;
cudaDeviceSynchronize();
cudaFree(d_resNonces[thr_id]);
init[thr_id] = false;
cudaDeviceSynchronize();
}
// ---- SIA LONGPOLL --------------------------------------------------------------------------------
struct data_buffer {
void *buf;
size_t len;
};
extern void calc_network_diff(struct work *work);
size_t sia_data_cb(const void *ptr, size_t size, size_t nmemb, void *user_data){
struct data_buffer *db = (struct data_buffer *)user_data;
size_t len = size * nmemb;
size_t oldlen, newlen;
void *newmem;
static const uchar zero = 0;
oldlen = db->len;
newlen = oldlen + len;
newmem = realloc(db->buf, newlen + 1);
if (!newmem)
return 0;
db->buf = newmem;
db->len = newlen;
memcpy((char*)db->buf + oldlen, ptr, len);
memcpy((char*)db->buf + newlen, &zero, 1); /* null terminate */
return len;
}
char* sia_getheader(CURL *curl, struct pool_infos *pool)
{
char curl_err_str[CURL_ERROR_SIZE] = { 0 };
struct data_buffer all_data = { 0 };
struct curl_slist *headers = NULL;
char data[256] = { 0 };
char url[512];
// nanopool
snprintf(url, 512, "%s/miner/header?address=%s&worker=%s", //&longpoll
pool->url, pool->user, pool->pass);
if (opt_protocol)
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_POST, 0);
curl_easy_setopt(curl, CURLOPT_ENCODING, "");
curl_easy_setopt(curl, CURLOPT_FAILONERROR, 0);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1);
curl_easy_setopt(curl, CURLOPT_TIMEOUT, opt_timeout);
curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, sia_data_cb);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data);
headers = curl_slist_append(headers, "Accept: application/octet-stream");
headers = curl_slist_append(headers, "Expect:"); // disable Expect hdr
headers = curl_slist_append(headers, "User-Agent: Sia-Agent"); // required for now
// headers = curl_slist_append(headers, "User-Agent: " USER_AGENT);
// headers = curl_slist_append(headers, "X-Mining-Extensions: longpoll");
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
int rc = curl_easy_perform(curl);
if (rc && strlen(curl_err_str)) {
applog(LOG_WARNING, "%s", curl_err_str);
}
if (all_data.len >= 112)
cbin2hex(data, (const char*) all_data.buf, 112);
if (opt_protocol || all_data.len != 112)
applog(LOG_DEBUG, "received %d bytes: %s", (int) all_data.len, data);
curl_slist_free_all(headers);
return rc == 0 && all_data.len ? strdup(data) : NULL;
}
bool sia_work_decode(const char *hexdata, struct work *work)
{
uint8_t target[32];
if (!work) return false;
hex2bin((uchar*)target, &hexdata[0], 32);
swab256(work->target, target);
work->targetdiff = target_to_diff(work->target);
hex2bin((uchar*)work->data, &hexdata[64], 80);
// high 16 bits of the 64 bits nonce
work->data[9] = rand() << 16;
// use work ntime as job id
cbin2hex(work->job_id, (const char*)&work->data[10], 4);
calc_network_diff(work);
if (stratum_diff != work->targetdiff) {
stratum_diff = work->targetdiff;
applog(LOG_WARNING, "Pool diff set to %g", stratum_diff);
}
return true;
}
extern int share_result(int result, int pooln, double sharediff, const char *reason);
bool sia_submit(CURL *curl, struct pool_infos *pool, struct work *work){
char curl_err_str[CURL_ERROR_SIZE] = { 0 };
struct data_buffer all_data = { 0 };
struct curl_slist *headers = NULL;
char buf[256] = { 0 };
char url[512];
if (opt_protocol)
applog_hex(work->data, 80);
//applog_hex(&work->data[8], 16);
//applog_hex(&work->data[10], 4);
// nanopool
snprintf(url, 512, "%s/miner/header?address=%s&worker=%s",
pool->url, pool->user, pool->pass);
if (opt_protocol)
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_ENCODING, "");
curl_easy_setopt(curl, CURLOPT_FAILONERROR, 0);
curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1);
curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
curl_easy_setopt(curl, CURLOPT_TIMEOUT, 10);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, sia_data_cb);
memcpy(buf, work->data, 80);
curl_easy_setopt(curl, CURLOPT_POST, 1);
curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, 80);
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, (void*) buf);
// headers = curl_slist_append(headers, "Content-Type: application/octet-stream");
// headers = curl_slist_append(headers, "Content-Length: 80");
headers = curl_slist_append(headers, "Accept:"); // disable Accept hdr
headers = curl_slist_append(headers, "Expect:"); // disable Expect hdr
headers = curl_slist_append(headers, "User-Agent: Sia-Agent");
// headers = curl_slist_append(headers, "User-Agent: " USER_AGENT);
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
int res = curl_easy_perform(curl) == 0;
long errcode;
CURLcode c = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &errcode);
if (errcode != 204) {
if (strlen(curl_err_str))
applog(LOG_ERR, "submit err %ld %s", errcode, curl_err_str);
res = 0;
}
share_result(res, work->pooln, work->sharediff[0], res ? NULL : (char*) all_data.buf);
curl_slist_free_all(headers);
return true;
} | the_stack |
#include <tensorflow/core/framework/tensor.h>
#include "tf_cuda_utils.h"
#include "rasterise_grad_common.h"
using namespace tensorflow;
__global__ void upload_vertices(
TTypes<Vertex, 2>::Tensor buffer,
TTypes<float, 3>::ConstTensor const vertices, TTypes<int, 3>::ConstTensor const faces,
dim3 const total_threads
) {
CUDA_AXIS_KERNEL_LOOP(iib, total_threads, x) {
CUDA_AXIS_KERNEL_LOOP(face_index, total_threads, y) {
for (int vertex_in_face = 0; vertex_in_face < 3; ++vertex_in_face) {
auto &dest_vertex = buffer(iib, face_index * 3 + vertex_in_face);
auto const &src_vertex_index = faces(iib, face_index, vertex_in_face);
dest_vertex.position[0] = vertices(iib, src_vertex_index, 0); // ** it'd be nice to do this as a 16-byte block, but unfortunately Vertex is not 16-byte aligned
dest_vertex.position[1] = vertices(iib, src_vertex_index, 1);
dest_vertex.position[2] = vertices(iib, src_vertex_index, 2);
dest_vertex.position[3] = vertices(iib, src_vertex_index, 3);
dest_vertex.barycentric[0] = vertex_in_face == 0 ? 1.f : 0.f;
dest_vertex.barycentric[1] = vertex_in_face == 1 ? 1.f : 0.f;
dest_vertex.indices[0] = faces(iib, face_index, 0);
dest_vertex.indices[1] = faces(iib, face_index, 1);
dest_vertex.indices[2] = faces(iib, face_index, 2);
}
}
}
}
void launch_vertex_upload(
TTypes<Vertex, 2>::Tensor &buffer, // use of Tensor here is just for indexing convenience: it's backed by the GL vertex-buffer
Tensor const &vertices, Tensor const &faces,
Eigen::GpuDevice const &device
) {
if (faces.dim_size(0) == 0 || faces.dim_size(1) == 0)
return; // no-op in this case; the following assumes >0
auto const config = GetCuda2DLaunchConfig(faces.dim_size(0), faces.dim_size(1), device);
upload_vertices<<<config.block_count, config.thread_per_block, 0, device.stream()>>>(
buffer,
vertices.tensor<float, 3>(), faces.tensor<int, 3>(),
config.virtual_thread_count
);
}
namespace {
struct Vec3 {
float x, y, z;
__device__ Vec3(float const x_, float const y_, float const z_) : x(x_), y(y_), z(z_) {}
__device__ static Vec3 from_xyz(float4 const &f4) {
return Vec3(f4.x, f4.y, f4.z);
}
__device__ float operator [](int const i) const {
switch (i) {
case 0: return x;
case 1: return y;
case 2: return z;
default: return std::numeric_limits<float>::quiet_NaN();
};
}
__device__ Vec3 operator +(Vec3 const &other) const {
return Vec3(x + other.x, y + other.y, z + other.z);
}
__device__ Vec3 operator -(Vec3 const &other) const {
return Vec3(x - other.x, y - other.y, z - other.z);
}
__device__ Vec3 operator *(float const &other) const {
return Vec3(x * other, y * other, z * other);
}
__device__ float L1() const {
return std::abs(x) + std::abs(y) + std::abs(z);
}
__device__ bool operator !=(Vec3 const &other) const {
// Note these are exact float comparisons with zero tolerance!
return x != other.x || y != other.y || z != other.z;
}
};
}
__global__ void assemble_grads(
TTypes<float, 3>::Tensor grad_vertices, TTypes<float, 3>::Tensor grad_vertex_colors, TTypes<float, 4>::Tensor grad_background, TTypes<float, 4>::Tensor debug_thingy,
cudaSurfaceObject_t const barycentrics_and_depth_surface, cudaSurfaceObject_t const indices_surface,
TTypes<float, 4>::ConstTensor const pixels, TTypes<float, 4>::ConstTensor const grad_pixels, TTypes<float, 3>::ConstTensor const vertices,
int const frames_per_row,
dim3 const total_threads
) {
auto const batch_size = static_cast<int>(grad_pixels.dimension(0));
auto const frame_height = static_cast<int>(grad_pixels.dimension(1));
auto const frame_width = static_cast<int>(grad_pixels.dimension(2));
auto const channels = static_cast<int>(grad_pixels.dimension(3));
CUDA_AXIS_KERNEL_LOOP(buffer_x, total_threads, x) {
CUDA_AXIS_KERNEL_LOOP(buffer_y, total_threads, y) {
auto const iib = buffer_y / frame_height * frames_per_row + buffer_x / frame_width;
if (iib < batch_size) {
auto const x_in_frame = buffer_x % frame_width;
auto const y_in_frame = frame_height - 1 - buffer_y % frame_height; // the vertical flip is because our images are top-row-first, as in tensorflow
auto const at = [&] (int const offset_x, int const offset_y) {
// This returns the nearest edge pixel for out-of-bounds accesses
auto const unclipped_x = x_in_frame + offset_x;
auto const unclipped_y = y_in_frame - offset_y; // the negation here is again due to vertical-flipping of our pixels
auto const clipped_x = max(0, min(frame_width - 1, unclipped_x));
auto const clipped_y = max(0, min(frame_height - 1, unclipped_y));
return Vec3(
pixels(iib, clipped_y, clipped_x, 0),
pixels(iib, clipped_y, clipped_x, 1),
pixels(iib, clipped_y, clipped_x, 2)
);
};
// Note that the following filters are negative-offset minus positive-offset!
auto const scharr_x = (at(-1, -1) + at(-1, +1) - at(+1, -1) - at(+1, +1)) * (3.f / 32.f) + (at(-1, 0) - at(+1, 0)) * (10.f / 32.f);
auto const scharr_y = (at(-1, -1) + at(+1, -1) - at(-1, +1) - at(+1, +1)) * (3.f / 32.f) + (at(0, -1) - at(0, +1)) * (10.f / 32.f);
auto const barycentric_and_depth = surf2Dread<float4>(barycentrics_and_depth_surface, buffer_x * 16, buffer_y);
auto barycentric = Vec3::from_xyz(barycentric_and_depth);
auto clip_w = barycentric_and_depth.w; // this will be infinity if we're not over a fragment (i.e. iff barycentric == index_f == 1.f)
auto index_f = Vec3::from_xyz(surf2Dread<float4>(indices_surface, buffer_x * 16, buffer_y));
// Accumulate colour gradients; see notes p37-38
if (barycentric.x != -1.f) {
for (int index_in_primitive = 0; index_in_primitive < 3; ++index_in_primitive) {
int const vertex_index = static_cast<int>(index_f[index_in_primitive]);
for (int channel = 0; channel < channels; ++channel) {
auto const color_grad = grad_pixels(iib, y_in_frame, x_in_frame, channel) * barycentric[index_in_primitive];
atomicAdd(&grad_vertex_colors(iib, vertex_index, channel), color_grad);
}
}
} else {
for (int channel = 0; channel < channels; ++channel) {
auto const grad_pixel = grad_pixels(iib, y_in_frame, x_in_frame, channel);
grad_background(iib, y_in_frame, x_in_frame, channel) = grad_pixel; // no need to accumulate, as each background-pixel maps to at most one output-pixel
}
}
debug_thingy(iib, y_in_frame, x_in_frame, 1) = grad_pixels(iib, y_in_frame, x_in_frame, 1);
debug_thingy(iib, y_in_frame, x_in_frame, 2) = grad_pixels(iib, y_in_frame, x_in_frame, 2);
// Dilate edges of occluders at occlusion boundaries, so we add gradients through pixels just outside triangles
// to the occluder, not to the occludee (see notes p55-59); also applies when the current pixel is background
if (x_in_frame > 0 && y_in_frame > 0 && x_in_frame < frame_width - 1 && y_in_frame < frame_height - 1) {
bool dilated = false;
auto const dilate_from_offset = [&] (int2 const offset) {
auto const index_f_at_offset = Vec3::from_xyz(surf2Dread<float4>(indices_surface, (buffer_x + offset.x) * 16, buffer_y + offset.y));
auto const barycentric_and_depth_at_offset = surf2Dread<float4>(barycentrics_and_depth_surface, (buffer_x + offset.x) * 16, buffer_y + offset.y);
auto const clip_w_at_offset = barycentric_and_depth_at_offset.w;
if (index_f_at_offset.x != -1.f && index_f_at_offset != index_f && clip_w > clip_w_at_offset) {
// The adjacent pixel is over a triangle, and that triangle is not the same as ours, and the adjacent pixel is
// nearer the camera than us -- hence, the adjacent triangle should dilate into our pixel
barycentric = Vec3::from_xyz(barycentric_and_depth_at_offset);
index_f = index_f_at_offset;
clip_w = clip_w_at_offset;
dilated = true;
debug_thingy(iib, y_in_frame, x_in_frame, 0) = 1.e-2f;
}
};
// ** this doesn't handle the case of one-pixel-wide faces correctly -- *both* the adjacent faces should be dilated into the
// ** same pixel, which requires a structural change (as currently each pixel only passes gradients to vertices of one triangle)
// ** also, we should consider diagonal neighbours, i.e. points over the occludee that are diagonally-adjacent to a pixel of
// ** the occluder, as the 3x3 support of the scharr filter implies these will pick up incorrect gradients too
// ** perhaps best to do a loop, considering dilation from preferred orthogonal direction, then other, then diagonals, stopping
// ** if one results in a dilation (or, ideally, summing)
auto offset_direction = scharr_x.L1() > scharr_y.L1() ? int2{1, 0} : int2{0, 1};
if ((x_in_frame + y_in_frame) % 2 == 1) {
// Dither the preferred direction of offset to reduce bias
offset_direction.x = -offset_direction.x;
offset_direction.y = -offset_direction.y;
}
dilate_from_offset(offset_direction);
if (!dilated)
dilate_from_offset({-offset_direction.x, -offset_direction.y});
}
if (barycentric.x != -1.f) {
// Accumulate position gradients; see notes p25-27, 32-35, 65-66
auto const width_f = static_cast<float>(frame_width);
auto const height_f = static_cast<float>(frame_height);
float dL_dx = 0.f, dL_dy = 0.f; // 'dx' being physical/pixel/fragment x-position, not that of any particular vertex
for (int channel = 0; channel < channels; ++channel) {
auto const dL_dchannel = grad_pixels(iib, y_in_frame, x_in_frame, channel);
dL_dx += dL_dchannel * scharr_x[channel];
dL_dy += dL_dchannel * scharr_y[channel];
}
float clip_x = 0.f, clip_y = 0.f; // of the fragment
for (int index_in_primitive = 0; index_in_primitive < 3; ++index_in_primitive) {
int const vertex_index = static_cast<int>(index_f[index_in_primitive]);
clip_x += barycentric[index_in_primitive] * vertices(iib, vertex_index, 0);
clip_y += barycentric[index_in_primitive] * vertices(iib, vertex_index, 1);
}
for (int index_in_primitive = 0; index_in_primitive < 3; ++index_in_primitive) {
auto const d_xview_by_xclip = .5f * width_f / clip_w;
auto const d_yview_by_yclip = .5f * height_f / clip_w;
auto const d_xview_by_wclip = -.5f * width_f * clip_x / (clip_w * clip_w);
auto const d_yview_by_wclip = -.5f * height_f * clip_y / (clip_w * clip_w);
auto const dL_dxview_times_dclip_dvertex = dL_dx * barycentric[index_in_primitive]; // barycentric can be seen as d_clip / d_vertex, and is logically the final step mapping the change in fragment clip-location to vertex clip-locations
auto const dL_dyview_times_dclip_dvertex = dL_dy * barycentric[index_in_primitive];
int const vertex_index = static_cast<int>(index_f[index_in_primitive]);
atomicAdd(&grad_vertices(iib, vertex_index, 0), dL_dxview_times_dclip_dvertex * d_xview_by_xclip);
atomicAdd(&grad_vertices(iib, vertex_index, 1), dL_dyview_times_dclip_dvertex * d_yview_by_yclip);
atomicAdd(&grad_vertices(iib, vertex_index, 3), dL_dxview_times_dclip_dvertex * d_xview_by_wclip + dL_dyview_times_dclip_dvertex * d_yview_by_wclip);
}
}
}
}
}
}
void launch_grad_assembly(
Tensor &grad_vertices, Tensor &grad_vertex_colors, Tensor &grad_background, Tensor &debug_thingy,
cudaArray_t const &barycentrics_and_depth_array, cudaArray_t const &indices_array,
Tensor const &pixels, Tensor const &grad_pixels, Tensor const &vertices,
int const buffer_width, int const buffer_height, Eigen::GpuDevice const &device
) {
if (
cudaMemsetAsync(grad_vertices.tensor<float, 3>().data(), 0, sizeof(float) * grad_vertices.NumElements(), device.stream()) ||
cudaMemsetAsync(grad_vertex_colors.tensor<float, 3>().data(), 0, sizeof(float) * grad_vertex_colors.NumElements(), device.stream()) ||
cudaMemsetAsync(grad_background.tensor<float, 4>().data(), 0, sizeof(float) * grad_background.NumElements(), device.stream()) ||
cudaMemsetAsync(debug_thingy.tensor<float, 4>().data(), 0, sizeof(float) * debug_thingy.NumElements(), device.stream())
)
LOG(FATAL) << "one or more calls to cudaMemsetAsync failed";
// ** these would almost certainly be better passed as texture-objects, due to the neighbour-access in scharr-filtering & edge-dilating
cudaResourceDesc barycentrics_and_depth_descriptor, indices_descriptor;
barycentrics_and_depth_descriptor.resType = indices_descriptor.resType = cudaResourceTypeArray;
barycentrics_and_depth_descriptor.res.array.array = barycentrics_and_depth_array;
indices_descriptor.res.array.array = indices_array;
cudaSurfaceObject_t barycentrics_and_depth_surface, indices_surface;
if (
cudaCreateSurfaceObject(&barycentrics_and_depth_surface, &barycentrics_and_depth_descriptor) ||
cudaCreateSurfaceObject(&indices_surface, &indices_descriptor)
)
LOG(FATAL) << "one or more calls to cudaCreateSurfaceObject failed";
auto const assembly_config = GetCuda2DLaunchConfig(buffer_width, buffer_height, device);
assemble_grads<<<assembly_config.block_count, assembly_config.thread_per_block, 0, device.stream()>>>(
grad_vertices.tensor<float, 3>(), grad_vertex_colors.tensor<float, 3>(), grad_background.tensor<float, 4>(), debug_thingy.tensor<float, 4>(),
barycentrics_and_depth_surface, indices_surface,
pixels.tensor<float, 4>(), grad_pixels.tensor<float, 4>(), vertices.tensor<float, 3>(),
buffer_width / grad_pixels.dim_size(2),
assembly_config.virtual_thread_count
);
if (
cudaDestroySurfaceObject(barycentrics_and_depth_surface) ||
cudaDestroySurfaceObject(indices_surface)
)
LOG(FATAL) << "one or more calls to cudaDestroySurfaceObject failed";
} | the_stack |
* \file
* cub::DeviceRadixSort provides device-wide, parallel operations for computing a radix sort across a sequence of data items residing within global memory.
*/
#pragma once
#include <stdio.h>
#include <iterator>
#include "../block_region/block_region_radix_sort_upsweep.cuh"
#include "../block_region/block_region_radix_sort_downsweep.cuh"
#include "../block_region/block_region_scan.cuh"
#include "../../grid/grid_even_share.cuh"
#include "../../util_debug.cuh"
#include "../../util_device.cuh"
#include "../../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Kernel entry points
*****************************************************************************/
/**
* Upsweep pass kernel entry point (multi-block). Computes privatized digit histograms, one per block.
*/
template <
typename BlockRegionRadixSortUpsweepPolicy, ///< Parameterized BlockRegionRadixSortUpsweepPolicy tuning policy type
bool DESCENDING, ///< Whether or not the sorted-order is high-to-low
typename Key, ///< Key type
typename Offset> ///< Signed integer type for global offsets
__launch_bounds__ (int(BlockRegionRadixSortUpsweepPolicy::BLOCK_THREADS), 1)
__global__ void RadixSortUpsweepKernel(
Key *d_keys, ///< [in] Input keys buffer
Offset *d_spine, ///< [out] Privatized (per block) digit histograms (striped, i.e., 0s counts from each block, then 1s counts from each block, etc.)
Offset num_items, ///< [in] Total number of input data items
int current_bit, ///< [in] Bit position of current radix digit
bool first_pass, ///< [in] Whether this is the first digit pass
GridEvenShare<Offset> even_share) ///< [in] Even-share descriptor for mapping an equal number of tiles onto each thread block
{
// Parameterize BlockRegionRadixSortUpsweep type for the current configuration
typedef BlockRegionRadixSortUpsweep<BlockRegionRadixSortUpsweepPolicy, Key, Offset> BlockRegionRadixSortUpsweepT; // Primary
// Shared memory storage
__shared__ typename BlockRegionRadixSortUpsweepT::TempStorage temp_storage;
// Initialize even-share descriptor for this thread block
even_share.BlockInit();
Offset bin_count;
BlockRegionRadixSortUpsweepT(temp_storage, d_keys, current_bit).ProcessRegion(
even_share.block_offset,
even_share.block_end,
bin_count);
// Write out digit counts (striped)
if (threadIdx.x < BlockRegionRadixSortUpsweepT::RADIX_DIGITS)
{
int bin_idx = (DESCENDING) ?
BlockRegionRadixSortUpsweepT::RADIX_DIGITS - threadIdx.x - 1 :
threadIdx.x;
d_spine[(gridDim.x * bin_idx) + blockIdx.x] = bin_count;
}
}
/**
* Spine scan kernel entry point (single-block). Computes an exclusive prefix sum over the privatized digit histograms
*/
template <
typename BlockRegionScanPolicy, ///< Parameterizable tuning policy type for cub::BlockRegionScan abstraction
typename Offset> ///< Signed integer type for global offsets
__launch_bounds__ (int(BlockRegionScanPolicy::BLOCK_THREADS), 1)
__global__ void RadixSortScanKernel(
Offset *d_spine, ///< [in,out] Privatized (per block) digit histograms (striped, i.e., 0s counts from each block, then 1s counts from each block, etc.)
int num_counts) ///< [in] Total number of bin-counts
{
// Parameterize the BlockRegionScan type for the current configuration
typedef BlockRegionScan<BlockRegionScanPolicy, Offset*, Offset*, cub::Sum, Offset, Offset> BlockRegionScanT;
// Shared memory storage
__shared__ typename BlockRegionScanT::TempStorage temp_storage;
if (blockIdx.x > 0) return;
// Block scan instance
BlockRegionScanT block_scan(temp_storage, d_spine, d_spine, cub::Sum(), Offset(0)) ;
// Process full input tiles
int block_offset = 0;
RunningBlockPrefixCallbackOp<Offset, Sum> prefix_op(0, Sum());
while (block_offset + BlockRegionScanT::TILE_ITEMS <= num_counts)
{
block_scan.ConsumeTile<true, false>(block_offset, prefix_op);
block_offset += BlockRegionScanT::TILE_ITEMS;
}
}
/**
* Downsweep pass kernel entry point (multi-block). Scatters keys (and values) into corresponding bins for the current digit place.
*/
template <
typename BlockRegionRadixSortDownsweepPolicy, ///< Parameterizable tuning policy type for cub::BlockRegionRadixSortUpsweep abstraction
bool DESCENDING, ///< Whether or not the sorted-order is high-to-low
typename Key, ///< Key type
typename Value, ///< Value type
typename Offset> ///< Signed integer type for global offsets
__launch_bounds__ (int(BlockRegionRadixSortDownsweepPolicy::BLOCK_THREADS))
__global__ void RadixSortDownsweepKernel(
Key *d_keys_in, ///< [in] Input keys ping buffer
Key *d_keys_out, ///< [in] Output keys pong buffer
Value *d_values_in, ///< [in] Input values ping buffer
Value *d_values_out, ///< [in] Output values pong buffer
Offset *d_spine, ///< [in] Scan of privatized (per block) digit histograms (striped, i.e., 0s counts from each block, then 1s counts from each block, etc.)
Offset num_items, ///< [in] Total number of input data items
int current_bit, ///< [in] Bit position of current radix digit
bool first_pass, ///< [in] Whether this is the first digit pass
bool last_pass, ///< [in] Whether this is the last digit pass
GridEvenShare<Offset> even_share) ///< [in] Even-share descriptor for mapping an equal number of tiles onto each thread block
{
// Parameterize BlockRegionRadixSortDownsweep type for the current configuration
typedef BlockRegionRadixSortDownsweep<BlockRegionRadixSortDownsweepPolicy, DESCENDING, Key, Value, Offset> BlockRegionRadixSortDownsweepT;
// Shared memory storage
__shared__ typename BlockRegionRadixSortDownsweepT::TempStorage temp_storage;
// Initialize even-share descriptor for this thread block
even_share.BlockInit();
// Process input tiles
BlockRegionRadixSortDownsweepT(temp_storage, num_items, d_spine, d_keys_in, d_keys_out, d_values_in, d_values_out, current_bit).ProcessRegion(
even_share.block_offset,
even_share.block_end);
}
/******************************************************************************
* Dispatch
******************************************************************************/
/**
* Utility class for dispatching the appropriately-tuned kernels for DeviceRadixSort
*/
template <
bool DESCENDING, ///< Whether or not the sorted-order is high-to-low
typename Key, ///< Key type
typename Value, ///< Value type
typename Offset> ///< Signed integer type for global offsets
struct DeviceRadixSortDispatch
{
/******************************************************************************
* Tuning policies
******************************************************************************/
/// SM35
struct Policy350
{
enum {
KEYS_ONLY = (Equals<Value, NullType>::VALUE),
SCALE_FACTOR = (CUB_MAX(sizeof(Key), sizeof(Value)) + 3) / 4,
RADIX_BITS = 5,
};
// Primary UpsweepPolicy
typedef BlockRegionRadixSortUpsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR), LOAD_LDG, RADIX_BITS> UpsweepPolicyKeys;
typedef BlockRegionRadixSortUpsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR), LOAD_LDG, RADIX_BITS> UpsweepPolicyPairs;
typedef typename If<KEYS_ONLY, UpsweepPolicyKeys, UpsweepPolicyPairs>::Type UpsweepPolicy;
// Alternate UpsweepPolicy for (RADIX_BITS-1)-bit passes
typedef BlockRegionRadixSortUpsweepPolicy <64, CUB_MAX(1, 22 / SCALE_FACTOR), LOAD_LDG, RADIX_BITS - 1> AltUpsweepPolicyKeys;
typedef BlockRegionRadixSortUpsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR), LOAD_LDG, RADIX_BITS - 1> AltUpsweepPolicyPairs;
typedef typename If<KEYS_ONLY, AltUpsweepPolicyKeys, AltUpsweepPolicyPairs>::Type AltUpsweepPolicy;
// ScanPolicy
typedef BlockRegionScanPolicy <1024, 4, BLOCK_LOAD_VECTORIZE, false, LOAD_DEFAULT, BLOCK_STORE_VECTORIZE, false, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy;
// Primary DownsweepPolicy
typedef BlockRegionRadixSortDownsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR), BLOCK_LOAD_DIRECT, LOAD_LDG, false, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeEightByte, RADIX_BITS> DownsweepPolicyKeys;
typedef BlockRegionRadixSortDownsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR), BLOCK_LOAD_DIRECT, LOAD_LDG, false, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeEightByte, RADIX_BITS> DownsweepPolicyPairs;
typedef typename If<KEYS_ONLY, DownsweepPolicyKeys, DownsweepPolicyPairs>::Type DownsweepPolicy;
// Alternate DownsweepPolicy for (RADIX_BITS-1)-bit passes
typedef BlockRegionRadixSortDownsweepPolicy <128, CUB_MAX(1, 11 / SCALE_FACTOR), BLOCK_LOAD_DIRECT, LOAD_LDG, false, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeEightByte, RADIX_BITS - 1> AltDownsweepPolicyKeys;
typedef BlockRegionRadixSortDownsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR), BLOCK_LOAD_DIRECT, LOAD_LDG, false, true, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeEightByte, RADIX_BITS - 1> AltDownsweepPolicyPairs;
typedef typename If<KEYS_ONLY, AltDownsweepPolicyKeys, AltDownsweepPolicyPairs>::Type AltDownsweepPolicy;
};
/// SM30
struct Policy300
{
enum {
KEYS_ONLY = (Equals<Value, NullType>::VALUE),
SCALE_FACTOR = (CUB_MAX(sizeof(Key), sizeof(Value)) + 3) / 4,
RADIX_BITS = 5,
};
// UpsweepPolicy
typedef BlockRegionRadixSortUpsweepPolicy <256, CUB_MAX(1, 7 / SCALE_FACTOR), LOAD_DEFAULT, RADIX_BITS> UpsweepPolicyKeys;
typedef BlockRegionRadixSortUpsweepPolicy <256, CUB_MAX(1, 5 / SCALE_FACTOR), LOAD_DEFAULT, RADIX_BITS> UpsweepPolicyPairs;
typedef typename If<KEYS_ONLY, UpsweepPolicyKeys, UpsweepPolicyPairs>::Type UpsweepPolicy;
// Alternate UpsweepPolicy for (RADIX_BITS-1)-bit passes
typedef BlockRegionRadixSortUpsweepPolicy <256, CUB_MAX(1, 7 / SCALE_FACTOR), LOAD_DEFAULT, RADIX_BITS - 1> AltUpsweepPolicyKeys;
typedef BlockRegionRadixSortUpsweepPolicy <256, CUB_MAX(1, 5 / SCALE_FACTOR), LOAD_DEFAULT, RADIX_BITS - 1> AltUpsweepPolicyPairs;
typedef typename If<KEYS_ONLY, AltUpsweepPolicyKeys, AltUpsweepPolicyPairs>::Type AltUpsweepPolicy;
// ScanPolicy
typedef BlockRegionScanPolicy <1024, 4, BLOCK_LOAD_VECTORIZE, false, LOAD_DEFAULT, BLOCK_STORE_VECTORIZE, false, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy;
// DownsweepPolicy
typedef BlockRegionRadixSortDownsweepPolicy <128, CUB_MAX(1, 14 / SCALE_FACTOR), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeEightByte, RADIX_BITS> DownsweepPolicyKeys;
typedef BlockRegionRadixSortDownsweepPolicy <128, CUB_MAX(1, 10 / SCALE_FACTOR), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeEightByte, RADIX_BITS> DownsweepPolicyPairs;
typedef typename If<KEYS_ONLY, DownsweepPolicyKeys, DownsweepPolicyPairs>::Type DownsweepPolicy;
// Alternate DownsweepPolicy for (RADIX_BITS-1)-bit passes
typedef BlockRegionRadixSortDownsweepPolicy <128, CUB_MAX(1, 14 / SCALE_FACTOR), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeEightByte, RADIX_BITS - 1> AltDownsweepPolicyKeys;
typedef BlockRegionRadixSortDownsweepPolicy <128, CUB_MAX(1, 10 / SCALE_FACTOR), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeEightByte, RADIX_BITS - 1> AltDownsweepPolicyPairs;
typedef typename If<KEYS_ONLY, AltDownsweepPolicyKeys, AltDownsweepPolicyPairs>::Type AltDownsweepPolicy;
};
/// SM20
struct Policy200
{
enum {
KEYS_ONLY = (Equals<Value, NullType>::VALUE),
SCALE_FACTOR = (CUB_MAX(sizeof(Key), sizeof(Value)) + 3) / 4,
RADIX_BITS = 5,
};
// UpsweepPolicy
typedef BlockRegionRadixSortUpsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR), LOAD_DEFAULT, RADIX_BITS> UpsweepPolicyKeys;
typedef BlockRegionRadixSortUpsweepPolicy <128, CUB_MAX(1, 13 / SCALE_FACTOR), LOAD_DEFAULT, RADIX_BITS> UpsweepPolicyPairs;
typedef typename If<KEYS_ONLY, UpsweepPolicyKeys, UpsweepPolicyPairs>::Type UpsweepPolicy;
// Alternate UpsweepPolicy for (RADIX_BITS-1)-bit passes
typedef BlockRegionRadixSortUpsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR), LOAD_DEFAULT, RADIX_BITS - 1> AltUpsweepPolicyKeys;
typedef BlockRegionRadixSortUpsweepPolicy <128, CUB_MAX(1, 13 / SCALE_FACTOR), LOAD_DEFAULT, RADIX_BITS - 1> AltUpsweepPolicyPairs;
typedef typename If<KEYS_ONLY, AltUpsweepPolicyKeys, AltUpsweepPolicyPairs>::Type AltUpsweepPolicy;
// ScanPolicy
typedef BlockRegionScanPolicy <512, 4, BLOCK_LOAD_VECTORIZE, false, LOAD_DEFAULT, BLOCK_STORE_VECTORIZE, false, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy;
// DownsweepPolicy
typedef BlockRegionRadixSortDownsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_RAKING_MEMOIZE, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeFourByte, RADIX_BITS> DownsweepPolicyKeys;
typedef BlockRegionRadixSortDownsweepPolicy <128, CUB_MAX(1, 13 / SCALE_FACTOR), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_RAKING_MEMOIZE, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeFourByte, RADIX_BITS> DownsweepPolicyPairs;
typedef typename If<KEYS_ONLY, DownsweepPolicyKeys, DownsweepPolicyPairs>::Type DownsweepPolicy;
// Alternate DownsweepPolicy for (RADIX_BITS-1)-bit passes
typedef BlockRegionRadixSortDownsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_RAKING_MEMOIZE, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeFourByte, RADIX_BITS - 1> AltDownsweepPolicyKeys;
typedef BlockRegionRadixSortDownsweepPolicy <128, CUB_MAX(1, 13 / SCALE_FACTOR), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_RAKING_MEMOIZE, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeFourByte, RADIX_BITS - 1> AltDownsweepPolicyPairs;
typedef typename If<KEYS_ONLY, AltDownsweepPolicyKeys, AltDownsweepPolicyPairs>::Type AltDownsweepPolicy;
};
/// SM13
struct Policy130
{
enum {
KEYS_ONLY = (Equals<Value, NullType>::VALUE),
SCALE_FACTOR = (CUB_MAX(sizeof(Key), sizeof(Value)) + 3) / 4,
RADIX_BITS = 5,
};
// UpsweepPolicy
typedef BlockRegionRadixSortUpsweepPolicy <128, CUB_MAX(1, 19 / SCALE_FACTOR), LOAD_DEFAULT, RADIX_BITS> UpsweepPolicyKeys;
typedef BlockRegionRadixSortUpsweepPolicy <128, CUB_MAX(1, 19 / SCALE_FACTOR), LOAD_DEFAULT, RADIX_BITS> UpsweepPolicyPairs;
typedef typename If<KEYS_ONLY, UpsweepPolicyKeys, UpsweepPolicyPairs>::Type UpsweepPolicy;
// Alternate UpsweepPolicy for (RADIX_BITS-1)-bit passes
typedef BlockRegionRadixSortUpsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR), LOAD_DEFAULT, RADIX_BITS - 1> AltUpsweepPolicyKeys;
typedef BlockRegionRadixSortUpsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR), LOAD_DEFAULT, RADIX_BITS - 1> AltUpsweepPolicyPairs;
typedef typename If<KEYS_ONLY, AltUpsweepPolicyKeys, AltUpsweepPolicyPairs>::Type AltUpsweepPolicy;
// ScanPolicy
typedef BlockRegionScanPolicy <256, 4, BLOCK_LOAD_VECTORIZE, false, LOAD_DEFAULT, BLOCK_STORE_VECTORIZE, false, BLOCK_SCAN_WARP_SCANS> ScanPolicy;
// DownsweepPolicy
typedef BlockRegionRadixSortDownsweepPolicy <64, CUB_MAX(1, 19 / SCALE_FACTOR), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeFourByte, RADIX_BITS> DownsweepPolicyKeys;
typedef BlockRegionRadixSortDownsweepPolicy <64, CUB_MAX(1, 19 / SCALE_FACTOR), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeFourByte, RADIX_BITS> DownsweepPolicyPairs;
typedef typename If<KEYS_ONLY, DownsweepPolicyKeys, DownsweepPolicyPairs>::Type DownsweepPolicy;
// Alternate DownsweepPolicy for (RADIX_BITS-1)-bit passes
typedef BlockRegionRadixSortDownsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeFourByte, RADIX_BITS - 1> AltDownsweepPolicyKeys;
typedef BlockRegionRadixSortDownsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeFourByte, RADIX_BITS - 1> AltDownsweepPolicyPairs;
typedef typename If<KEYS_ONLY, AltDownsweepPolicyKeys, AltDownsweepPolicyPairs>::Type AltDownsweepPolicy;
};
/// SM10
struct Policy100
{
enum {
RADIX_BITS = 4,
};
// UpsweepPolicy
typedef BlockRegionRadixSortUpsweepPolicy <64, 9, LOAD_DEFAULT, RADIX_BITS> UpsweepPolicy;
// Alternate UpsweepPolicy for (RADIX_BITS-1)-bit passes
typedef BlockRegionRadixSortUpsweepPolicy <64, 9, LOAD_DEFAULT, RADIX_BITS - 1> AltUpsweepPolicy;
// ScanPolicy
typedef BlockRegionScanPolicy <256, 4, BLOCK_LOAD_VECTORIZE, false, LOAD_DEFAULT, BLOCK_STORE_VECTORIZE, false, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy;
// DownsweepPolicy
typedef BlockRegionRadixSortDownsweepPolicy <64, 9, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeFourByte, RADIX_BITS> DownsweepPolicy;
// Alternate DownsweepPolicy for (RADIX_BITS-1)-bit passes
typedef BlockRegionRadixSortDownsweepPolicy <64, 9, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, false, BLOCK_SCAN_WARP_SCANS, RADIX_SORT_SCATTER_TWO_PHASE, cudaSharedMemBankSizeFourByte, RADIX_BITS - 1> AltDownsweepPolicy;
};
/******************************************************************************
* Tuning policies of current PTX compiler pass
******************************************************************************/
#if (CUB_PTX_VERSION >= 350)
typedef Policy350 PtxPolicy;
#elif (CUB_PTX_VERSION >= 300)
typedef Policy300 PtxPolicy;
#elif (CUB_PTX_VERSION >= 200)
typedef Policy200 PtxPolicy;
#elif (CUB_PTX_VERSION >= 130)
typedef Policy130 PtxPolicy;
#else
typedef Policy100 PtxPolicy;
#endif
// "Opaque" policies (whose parameterizations aren't reflected in the type signature)
struct PtxUpsweepPolicy : PtxPolicy::UpsweepPolicy {};
struct PtxAltUpsweepPolicy : PtxPolicy::AltUpsweepPolicy {};
struct PtxScanPolicy : PtxPolicy::ScanPolicy {};
struct PtxDownsweepPolicy : PtxPolicy::DownsweepPolicy {};
struct PtxAltDownsweepPolicy : PtxPolicy::AltDownsweepPolicy {};
/******************************************************************************
* Utilities
******************************************************************************/
/**
* Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use
*/
template <
typename Policy,
typename KernelConfig,
typename UpsweepKernelPtr, ///< Function type of cub::RadixSortUpsweepKernel
typename ScanKernelPtr, ///< Function type of cub::SpineScanKernel
typename DownsweepKernelPtr> ///< Function type of cub::RadixSortUpsweepKernel
__host__ __device__ __forceinline__
static cudaError_t InitConfigs(
int sm_version,
int sm_count,
KernelConfig &upsweep_config,
KernelConfig &alt_upsweep_config,
KernelConfig &scan_config,
KernelConfig &downsweep_config,
KernelConfig &alt_downsweep_config,
UpsweepKernelPtr upsweep_kernel,
UpsweepKernelPtr alt_upsweep_kernel,
ScanKernelPtr scan_kernel,
DownsweepKernelPtr downsweep_kernel,
DownsweepKernelPtr alt_downsweep_kernel)
{
cudaError_t error;
do {
if (CubDebug(error = upsweep_config.template InitUpsweepPolicy<typename Policy::UpsweepPolicy>( sm_version, sm_count, upsweep_kernel))) break;
if (CubDebug(error = alt_upsweep_config.template InitUpsweepPolicy<typename Policy::AltUpsweepPolicy>( sm_version, sm_count, alt_upsweep_kernel))) break;
if (CubDebug(error = scan_config.template InitScanPolicy<typename Policy::ScanPolicy>( sm_version, sm_count, scan_kernel))) break;
if (CubDebug(error = downsweep_config.template InitDownsweepPolicy<typename Policy::DownsweepPolicy>( sm_version, sm_count, downsweep_kernel))) break;
if (CubDebug(error = alt_downsweep_config.template InitDownsweepPolicy<typename Policy::AltDownsweepPolicy>( sm_version, sm_count, alt_downsweep_kernel))) break;
} while (0);
return error;
}
/**
* Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use
*/
template <
typename KernelConfig,
typename UpsweepKernelPtr, ///< Function type of cub::RadixSortUpsweepKernel
typename ScanKernelPtr, ///< Function type of cub::SpineScanKernel
typename DownsweepKernelPtr> ///< Function type of cub::RadixSortUpsweepKernel
__host__ __device__ __forceinline__
static cudaError_t InitConfigs(
int ptx_version,
int sm_version,
int sm_count,
KernelConfig &upsweep_config,
KernelConfig &alt_upsweep_config,
KernelConfig &scan_config,
KernelConfig &downsweep_config,
KernelConfig &alt_downsweep_config,
UpsweepKernelPtr upsweep_kernel,
UpsweepKernelPtr alt_upsweep_kernel,
ScanKernelPtr scan_kernel,
DownsweepKernelPtr downsweep_kernel,
DownsweepKernelPtr alt_downsweep_kernel)
{
#if (CUB_PTX_VERSION > 0)
// We're on the device, so initialize the kernel dispatch configurations with the current PTX policy
cudaError_t error;
do {
if (CubDebug(error = upsweep_config.template InitUpsweepPolicy<PtxUpsweepPolicy>( sm_version, sm_count, upsweep_kernel))) break;
if (CubDebug(error = alt_upsweep_config.template InitUpsweepPolicy<PtxAltUpsweepPolicy>( sm_version, sm_count, alt_upsweep_kernel))) break;
if (CubDebug(error = scan_config.template InitScanPolicy<PtxScanPolicy>( sm_version, sm_count, scan_kernel))) break;
if (CubDebug(error = downsweep_config.template InitDownsweepPolicy<PtxDownsweepPolicy>( sm_version, sm_count, downsweep_kernel))) break;
if (CubDebug(error = alt_downsweep_config.template InitDownsweepPolicy<PtxAltDownsweepPolicy>( sm_version, sm_count, alt_downsweep_kernel))) break;
} while (0);
return error;
#else
// We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version
cudaError_t error;
if (ptx_version >= 350)
{
error = InitConfigs<Policy350>(sm_version, sm_count, upsweep_config, alt_upsweep_config, scan_config, downsweep_config, alt_downsweep_config, upsweep_kernel, alt_upsweep_kernel, scan_kernel, downsweep_kernel, alt_downsweep_kernel);
}
else if (ptx_version >= 300)
{
error = InitConfigs<Policy300>(sm_version, sm_count, upsweep_config, alt_upsweep_config, scan_config, downsweep_config, alt_downsweep_config, upsweep_kernel, alt_upsweep_kernel, scan_kernel, downsweep_kernel, alt_downsweep_kernel);
}
else if (ptx_version >= 200)
{
error = InitConfigs<Policy200>(sm_version, sm_count, upsweep_config, alt_upsweep_config, scan_config, downsweep_config, alt_downsweep_config, upsweep_kernel, alt_upsweep_kernel, scan_kernel, downsweep_kernel, alt_downsweep_kernel);
}
else if (ptx_version >= 130)
{
error = InitConfigs<Policy130>(sm_version, sm_count, upsweep_config, alt_upsweep_config, scan_config, downsweep_config, alt_downsweep_config, upsweep_kernel, alt_upsweep_kernel, scan_kernel, downsweep_kernel, alt_downsweep_kernel);
}
else
{
error = InitConfigs<Policy100>(sm_version, sm_count, upsweep_config, alt_upsweep_config, scan_config, downsweep_config, alt_downsweep_config, upsweep_kernel, alt_upsweep_kernel, scan_kernel, downsweep_kernel, alt_downsweep_kernel);
}
return error;
#endif
}
/**
* Kernel kernel dispatch configurations
*/
struct KernelConfig
{
int block_threads;
int items_per_thread;
int tile_size;
cudaSharedMemConfig smem_config;
int radix_bits;
int sm_occupancy; // Amount of CTAs to oversubscribe the device beyond actively-resident (heuristic)
int max_grid_size;
int subscription_factor;
template <typename UpsweepPolicy, typename UpsweepKernelPtr>
__host__ __device__ __forceinline__ cudaError_t InitUpsweepPolicy(
int sm_version, int sm_count, UpsweepKernelPtr upsweep_kernel)
{
block_threads = UpsweepPolicy::BLOCK_THREADS;
items_per_thread = UpsweepPolicy::ITEMS_PER_THREAD;
radix_bits = UpsweepPolicy::RADIX_BITS;
smem_config = cudaSharedMemBankSizeFourByte;
tile_size = block_threads * items_per_thread;
cudaError_t retval = MaxSmOccupancy(sm_occupancy, sm_version, upsweep_kernel, block_threads);
subscription_factor = CUB_SUBSCRIPTION_FACTOR(sm_version);
max_grid_size = (sm_occupancy * sm_count) * subscription_factor;
return retval;
}
template <typename ScanPolicy, typename ScanKernelPtr>
__host__ __device__ __forceinline__ cudaError_t InitScanPolicy(
int sm_version, int sm_count, ScanKernelPtr scan_kernel)
{
block_threads = ScanPolicy::BLOCK_THREADS;
items_per_thread = ScanPolicy::ITEMS_PER_THREAD;
radix_bits = 0;
smem_config = cudaSharedMemBankSizeFourByte;
tile_size = block_threads * items_per_thread;
sm_occupancy = 1;
subscription_factor = 1;
max_grid_size = 1;
return cudaSuccess;
}
template <typename DownsweepPolicy, typename DownsweepKernelPtr>
__host__ __device__ __forceinline__ cudaError_t InitDownsweepPolicy(
int sm_version, int sm_count, DownsweepKernelPtr downsweep_kernel)
{
block_threads = DownsweepPolicy::BLOCK_THREADS;
items_per_thread = DownsweepPolicy::ITEMS_PER_THREAD;
radix_bits = DownsweepPolicy::RADIX_BITS;
smem_config = DownsweepPolicy::SMEM_CONFIG;
tile_size = block_threads * items_per_thread;
cudaError_t retval = MaxSmOccupancy(sm_occupancy, sm_version, downsweep_kernel, block_threads);
subscription_factor = CUB_SUBSCRIPTION_FACTOR(sm_version);
max_grid_size = (sm_occupancy * sm_count) * subscription_factor;
return retval;
}
};
/******************************************************************************
* Allocation of device temporaries
******************************************************************************/
__host__ __device__ __forceinline__
static cudaError_t AllocateTemporaries(
void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
Offset* &d_spine, ///< [out] Digit count histograms per thread block
KernelConfig &scan_config, ///< [in] Dispatch parameters that match the policy that \p scan_kernel was compiled for
KernelConfig &downsweep_config) ///< [in] Dispatch parameters that match the policy that \p downsweep_kernel was compiled for
{
cudaError error = cudaSuccess;
do
{
// Get spine size (conservative)
int spine_size = (downsweep_config.max_grid_size * (1 << downsweep_config.radix_bits)) + scan_config.tile_size;
// Temporary storage allocation requirements
void* allocations[1];
size_t allocation_sizes[1] =
{
spine_size * sizeof(Offset), // bytes needed for privatized block digit histograms
};
// Alias the temporary allocations from the single storage blob (or set the necessary size of the blob)
if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break;
// Return if the caller is simply requesting the size of the storage allocation
if (d_temp_storage == NULL)
return cudaSuccess;
// Alias the allocation for the privatized per-block digit histograms
d_spine = (Offset*) allocations[0];
} while(0);
return error;
}
/******************************************************************************
* Dispatch entrypoints
******************************************************************************/
/**
* Internal dispatch routine for computing a device-wide radix sort using the
* specified kernel functions.
*/
template <
typename UpsweepKernelPtr, ///< Function type of cub::RadixSortUpsweepKernel
typename ScanKernelPtr, ///< Function type of cub::SpineScanKernel
typename DownsweepKernelPtr> ///< Function type of cub::RadixSortUpsweepKernel
__host__ __device__ __forceinline__
static cudaError_t Dispatch(
DoubleBuffer<Key> &d_keys, ///< [in,out] Double-buffer whose current buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys
DoubleBuffer<Value> &d_values, ///< [in,out] Double-buffer whose current buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values
Offset *d_spine, ///< [in] Digit count histograms per thread block
int spine_size, ///< [in] Number of histogram counters
Offset num_items, ///< [in] Number of items to reduce
int begin_bit, ///< [in] The beginning (least-significant) bit index needed for key comparison
int end_bit, ///< [in] The past-the-end (most-significant) bit index needed for key comparison
cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false.
KernelConfig &upsweep_config, ///< [in] Dispatch parameters that match the policy that \p upsweep_kernel was compiled for
KernelConfig &scan_config, ///< [in] Dispatch parameters that match the policy that \p scan_kernel was compiled for
KernelConfig &downsweep_config, ///< [in] Dispatch parameters that match the policy that \p downsweep_kernel was compiled for
UpsweepKernelPtr upsweep_kernel, ///< [in] Kernel function pointer to parameterization of cub::RadixSortUpsweepKernel
ScanKernelPtr scan_kernel, ///< [in] Kernel function pointer to parameterization of cub::SpineScanKernel
DownsweepKernelPtr downsweep_kernel) ///< [in] Kernel function pointer to parameterization of cub::RadixSortUpsweepKernel
{
#ifndef CUB_RUNTIME_ENABLED
// Kernel launch not supported from this device
return CubDebug(cudaErrorNotSupported );
#else
cudaError error = cudaSuccess;
do
{
// Get even-share work distribution descriptor
GridEvenShare<Offset> even_share(num_items, downsweep_config.max_grid_size, CUB_MAX(downsweep_config.tile_size, upsweep_config.tile_size));
#if (CUB_PTX_VERSION == 0)
// Get current smem bank configuration
cudaSharedMemConfig original_smem_config;
if (CubDebug(error = cudaDeviceGetSharedMemConfig(&original_smem_config))) break;
cudaSharedMemConfig current_smem_config = original_smem_config;
#endif
// Iterate over digit places
int current_bit = begin_bit;
while (current_bit < end_bit)
{
#if (CUB_PTX_VERSION == 0)
// Update smem config if necessary
if (current_smem_config != upsweep_config.smem_config)
{
if (CubDebug(error = cudaDeviceSetSharedMemConfig(upsweep_config.smem_config))) break;
current_smem_config = upsweep_config.smem_config;
}
#endif
// Log upsweep_kernel configuration
if (debug_synchronous)
CubLog("Invoking upsweep_kernel<<<%d, %d, 0, %lld>>>(), %d smem config, %d items per thread, %d SM occupancy, selector %d, current bit %d, bit_grain %d\n",
even_share.grid_size, upsweep_config.block_threads, (long long) stream, upsweep_config.smem_config, upsweep_config.items_per_thread, upsweep_config.sm_occupancy, d_keys.selector, current_bit, downsweep_config.radix_bits);
// Invoke upsweep_kernel with same grid size as downsweep_kernel
upsweep_kernel<<<even_share.grid_size, upsweep_config.block_threads, 0, stream>>>(
d_keys.d_buffers[d_keys.selector],
d_spine,
num_items,
current_bit,
(current_bit == begin_bit),
even_share);
// Sync the stream if specified
if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break;
// Log scan_kernel configuration
if (debug_synchronous) CubLog("Invoking scan_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread\n",
1, scan_config.block_threads, (long long) stream, scan_config.items_per_thread);
// Invoke scan_kernel
scan_kernel<<<1, scan_config.block_threads, 0, stream>>>(
d_spine,
spine_size);
// Sync the stream if specified
if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break;
#if (CUB_PTX_VERSION == 0)
// Update smem config if necessary
if (current_smem_config != downsweep_config.smem_config)
{
if (CubDebug(error = cudaDeviceSetSharedMemConfig(downsweep_config.smem_config))) break;
current_smem_config = downsweep_config.smem_config;
}
#endif
// Log downsweep_kernel configuration
if (debug_synchronous) CubLog("Invoking downsweep_kernel<<<%d, %d, 0, %lld>>>(), %d smem config, %d items per thread, %d SM occupancy\n",
even_share.grid_size, downsweep_config.block_threads, (long long) stream, downsweep_config.smem_config, downsweep_config.items_per_thread, downsweep_config.sm_occupancy);
// Invoke downsweep_kernel
downsweep_kernel<<<even_share.grid_size, downsweep_config.block_threads, 0, stream>>>(
d_keys.d_buffers[d_keys.selector],
d_keys.d_buffers[d_keys.selector ^ 1],
d_values.d_buffers[d_values.selector],
d_values.d_buffers[d_values.selector ^ 1],
d_spine,
num_items,
current_bit,
(current_bit == begin_bit),
(current_bit + downsweep_config.radix_bits >= end_bit),
even_share);
// Sync the stream if specified
if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break;
// Invert selectors
d_keys.selector ^= 1;
d_values.selector ^= 1;
// Update current bit position
current_bit += downsweep_config.radix_bits;
}
#if (CUB_PTX_VERSION == 0)
// Reset smem config if necessary
if (current_smem_config != original_smem_config)
{
if (CubDebug(error = cudaDeviceSetSharedMemConfig(original_smem_config))) break;
}
#endif
}
while (0);
return error;
#endif // CUB_RUNTIME_ENABLED
}
/**
* Internal dispatch routine
*/
template <
typename UpsweepKernelPtr, ///< Function type of cub::RadixSortUpsweepKernel
typename ScanKernelPtr, ///< Function type of cub::SpineScanKernel
typename DownsweepKernelPtr> ///< Function type of cub::RadixSortUpsweepKernel
__host__ __device__ __forceinline__
static cudaError_t Dispatch(
void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
DoubleBuffer<Key> &d_keys, ///< [in,out] Double-buffer whose current buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys
DoubleBuffer<Value> &d_values, ///< [in,out] Double-buffer whose current buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values
Offset num_items, ///< [in] Number of items to reduce
int begin_bit, ///< [in] The beginning (least-significant) bit index needed for key comparison
int end_bit, ///< [in] The past-the-end (most-significant) bit index needed for key comparison
cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false.
UpsweepKernelPtr upsweep_kernel, ///< [in] Kernel function pointer to parameterization of cub::RadixSortUpsweepKernel
UpsweepKernelPtr alt_upsweep_kernel, ///< [in] Alternate kernel function pointer to parameterization of cub::RadixSortUpsweepKernel
ScanKernelPtr scan_kernel, ///< [in] Kernel function pointer to parameterization of cub::SpineScanKernel
DownsweepKernelPtr downsweep_kernel, ///< [in] Kernel function pointer to parameterization of cub::RadixSortUpsweepKernel
DownsweepKernelPtr alt_downsweep_kernel) ///< [in] Alternate kernel function pointer to parameterization of cub::RadixSortUpsweepKernel
{
#ifndef CUB_RUNTIME_ENABLED
// Kernel launch not supported from this device
return CubDebug(cudaErrorNotSupported );
#else
cudaError error = cudaSuccess;
do
{
// Get PTX version
int ptx_version;
#if (CUB_PTX_VERSION == 0)
if (CubDebug(error = PtxVersion(ptx_version))) break;
#else
ptx_version = CUB_PTX_VERSION;
#endif
// Get device ordinal
int device_ordinal;
if (CubDebug(error = cudaGetDevice(&device_ordinal))) break;
// Get device SM version
int sm_version;
if (CubDebug(error = SmVersion(sm_version, device_ordinal))) break;
// Get SM count
int sm_count;
if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break;
// Get kernel kernel dispatch configurations
KernelConfig upsweep_config;
KernelConfig alt_upsweep_config;
KernelConfig scan_config;
KernelConfig downsweep_config;
KernelConfig alt_downsweep_config;
if (CubDebug(error = InitConfigs(ptx_version, sm_version, sm_count,
upsweep_config, alt_upsweep_config, scan_config, downsweep_config, alt_downsweep_config,
upsweep_kernel, alt_upsweep_kernel, scan_kernel, downsweep_kernel, alt_downsweep_kernel))) break;
// Get spine sizes (conservative)
int spine_size = (downsweep_config.max_grid_size * (1 << downsweep_config.radix_bits)) + scan_config.tile_size;
int alt_spine_size = (alt_downsweep_config.max_grid_size * (1 << alt_downsweep_config.radix_bits)) + scan_config.tile_size;
// Allocate temporaries
Offset *d_spine = NULL;
if (spine_size > alt_spine_size)
{
if (CubDebug(error = AllocateTemporaries(d_temp_storage, temp_storage_bytes, d_spine, scan_config, downsweep_config))) break;
}
else
{
if (CubDebug(error = AllocateTemporaries(d_temp_storage, temp_storage_bytes, d_spine, scan_config, alt_downsweep_config))) break;
}
// Return if the caller is simply requesting the size of the storage allocation
if (d_temp_storage == NULL)
return cudaSuccess;
// Run radix sorting passes
int num_bits = end_bit - begin_bit;
int remaining_bits = num_bits % downsweep_config.radix_bits;
if (remaining_bits != 0)
{
// Run passes of alternate configuration
int max_alt_passes = downsweep_config.radix_bits - remaining_bits;
int alt_end_bit = CUB_MIN(end_bit, begin_bit + (max_alt_passes * alt_downsweep_config.radix_bits));
if (CubDebug(error = Dispatch(
d_keys,
d_values,
d_spine,
alt_spine_size,
num_items,
begin_bit,
alt_end_bit,
stream,
debug_synchronous,
alt_upsweep_config,
scan_config,
alt_downsweep_config,
alt_upsweep_kernel,
scan_kernel,
alt_downsweep_kernel))) break;
begin_bit = alt_end_bit;
}
// Run passes of primary configuration
if (CubDebug(error = Dispatch(
d_keys,
d_values,
d_spine,
spine_size,
num_items,
begin_bit,
end_bit,
stream,
debug_synchronous,
upsweep_config,
scan_config,
downsweep_config,
upsweep_kernel,
scan_kernel,
downsweep_kernel))) break;
}
while (0);
return error;
#endif // CUB_RUNTIME_ENABLED
}
/**
* Internal dispatch routine
*/
__host__ __device__ __forceinline__
static cudaError_t Dispatch(
void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
DoubleBuffer<Key> &d_keys, ///< [in,out] Double-buffer whose current buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys
DoubleBuffer<Value> &d_values, ///< [in,out] Double-buffer whose current buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values
Offset num_items, ///< [in] Number of items to reduce
int begin_bit, ///< [in] The beginning (least-significant) bit index needed for key comparison
int end_bit, ///< [in] The past-the-end (most-significant) bit index needed for key comparison
cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool debug_synchronous) ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false.
{
return Dispatch(
d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
begin_bit,
end_bit,
stream,
debug_synchronous,
RadixSortUpsweepKernel<PtxUpsweepPolicy, DESCENDING, Key, Offset>,
RadixSortUpsweepKernel<PtxAltUpsweepPolicy, DESCENDING, Key, Offset>,
RadixSortScanKernel<PtxScanPolicy, Offset>,
RadixSortDownsweepKernel<PtxDownsweepPolicy, DESCENDING, Key, Value, Offset>,
RadixSortDownsweepKernel<PtxAltDownsweepPolicy, DESCENDING, Key, Value, Offset>);
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s) | the_stack |
#include "caffe/layers/lrn_layer.hpp"
#include "caffe/util/benchmark.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype, typename MItype, typename MOtype>
void LRNLayer<Dtype, MItype, MOtype>::GenerateProgram() {
this->device_program_ = this->device_->CreateProgram();
stringstream ss;
ss << this->device_program_->setup();
ss << this->device_program_->template define_type<Dtype>("Dtype");
ss << this->device_program_->template define_type<MItype>("MItype");
ss << this->device_program_->template define_type<MOtype>("MOtype");
ss << this->device_program_->template helper_functions<Dtype>();
{
KernelArgs args;
args.push_back(this->device_program_->template create_kernel_arg<uint_tp>(
"nthreads", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<Dtype>(
"in", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"num", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"channels", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"height", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"width", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"size", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<Dtype>(
"alpha_over_size", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<Dtype>(
"k", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<Dtype>(
"scale", KERNEL_ARG_GLOBAL_MEM));
ss << this->device_program_->function("LRNFillScale", args);
ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads");
// find out the local offset
ss << "const int_tp w = index % width;" << std::endl;
ss << "const int_tp h = (index / width) % height;" << std::endl;
ss << "const int_tp n = index / width / height;" << std::endl;
ss << "const int_tp offset = (n * channels * height + h) * width + w;"
<< std::endl;
ss << "const int_tp step = height * width;" << std::endl;
ss << this->device_program_->global_ptr("const Dtype", "in_off")
<< " = in + offset;" << std::endl;
ss << this->device_program_->global_ptr("Dtype", "scale_off")
<< " = scale + offset;" << std::endl;
ss << "int_tp head = 0;" << std::endl;
ss << "const int_tp pre_pad = (size - 1) / 2;" << std::endl;
ss << "const int_tp post_pad = size - pre_pad - 1;" << std::endl;
ss << "Dtype accum_scale = 0;" << std::endl;
// fill the scale at [n, :, h, w]
// accumulate values
ss << "while (head < post_pad && head < channels) {" << std::endl;
ss << "accum_scale += in_off[head * step] * in_off[head * step];"
<< std::endl;
ss << "++head;" << std::endl;
ss << "}" << std::endl;
// both add and subtract
ss << "while (head < channels) {" << std::endl;
ss << "accum_scale += in_off[head * step] * in_off[head * step];"
<< std::endl;
ss << "if (head - size >= 0) {" << std::endl;
ss << "accum_scale -= in_off[(head - size) * step]"
<< " * in_off[(head - size) * step];" << std::endl;
ss << "}" << std::endl;
ss << "scale_off[(head - post_pad) * step] = k + accum_scale"
<< " * alpha_over_size;" << std::endl;
ss << "++head;" << std::endl;
ss << "}" << std::endl;
// subtract only
ss << "while (head < channels + post_pad) {" << std::endl;
ss << "if (head - size >= 0) {" << std::endl;
ss << "accum_scale -= in_off[(head - size) * step]"
<< " * in_off[(head - size) * step];" << std::endl;
ss << "}" << std::endl;
ss << "scale_off[(head - post_pad) * step] = k + accum_scale"
<< " * alpha_over_size;" << std::endl;
ss << "++head;" << std::endl;
ss << "}" << std::endl;
ss << "}" << std::endl;
ss << "}" << std::endl;
}
// TODO: check if it would be faster to just put it into the previous kernel.
{
KernelArgs args;
args.push_back(this->device_program_->template create_kernel_arg<uint_tp>(
"nthreads", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<Dtype>(
"in", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
args.push_back(this->device_program_->template create_kernel_arg<Dtype>(
"scale", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
args.push_back(this->device_program_->template create_kernel_arg<Dtype>(
"negative_beta", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<Dtype>(
"out", KERNEL_ARG_GLOBAL_MEM));
ss << this->device_program_->function("LRNComputeOutput", args);
ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads");
ss << "out[index] = in[index] * pow((Dtype)(scale[index]), "
<< "(Dtype)(negative_beta));"
<< std::endl;
ss << "}" << std::endl;
ss << "}" << std::endl;
}
{
KernelArgs args;
args.push_back(this->device_program_->template create_kernel_arg<uint_tp>(
"nthreads", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<MItype>(
"bottom_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
args.push_back(this->device_program_->template create_kernel_arg<MOtype>(
"top_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
args.push_back(this->device_program_->template create_kernel_arg<Dtype>(
"scale", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
args.push_back(this->device_program_->template create_kernel_arg<MOtype>(
"top_diff", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"num", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"channels", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"height", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"width", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"size", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<Dtype>(
"negative_beta", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<Dtype>(
"cache_ratio", KERNEL_ARG_CONST));
args.push_back(this->device_program_->template create_kernel_arg<MItype>(
"bottom_diff", KERNEL_ARG_GLOBAL_MEM));
ss << this->device_program_->function("LRNComputeDiff", args);
ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads");
// find out the local offset
ss << "const int_tp w = index % width;" << std::endl;
ss << "const int_tp h = (index / width) % height;" << std::endl;
ss << "const int_tp n = index / width / height;" << std::endl;
ss << "const int_tp offset = (n * channels * height + h) * width + w;"
<< std::endl;
ss << "const int_tp step = height * width;" << std::endl;
ss << this->device_program_->global_ptr("const Dtype", "bottom_off")
<< " = bottom_data + offset;" << std::endl;
ss << this->device_program_->global_ptr("const Dtype", "top_off")
<< " = top_data + offset;" << std::endl;
ss << this->device_program_->global_ptr("const Dtype", "scale_off")
<< " = scale + offset;" << std::endl;
ss << this->device_program_->global_ptr("const Dtype", "top_diff_off")
<< " = top_diff + offset;" << std::endl;
ss << this->device_program_->global_ptr("Dtype", "bottom_diff_off")
<< " = bottom_diff + offset;" << std::endl;
ss << "int_tp head = 0;" << std::endl;
ss << "const int_tp pre_pad = size - (size + 1) / 2;" << std::endl;
ss << "const int_tp post_pad = size - pre_pad - 1;" << std::endl;
ss << "Dtype accum_ratio = 0;" << std::endl;
// accumulate values
ss << "while (head < post_pad && head < channels) {" << std::endl;
ss << "accum_ratio += top_diff_off[head * step] * top_off[head * step]"
<< " / scale_off[head * step];" << std::endl;
ss << "++head;" << std::endl;
ss << "}" << std::endl;
// both add and subtract
ss << "while (head < channels) {" << std::endl;
ss << "accum_ratio += top_diff_off[head * step] * top_off[head * step]"
<< " / scale_off[head * step];" << std::endl;
ss << "if (head - size >= 0) {" << std::endl;
ss << "accum_ratio -= top_diff_off[(head - size) * step]"
<< " * top_off[(head - size) * step] / scale_off[(head - size) * step];"
<< std::endl;
ss << "}" << std::endl;
ss << "bottom_diff_off[(head - post_pad) * step]"
<< " = top_diff_off[(head - post_pad)"
<< " * step] * pow((Dtype)(scale_off[(head - post_pad) * step]), "
<< "(Dtype)(negative_beta))"
<< " - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;"
<< std::endl;
ss << "++head;" << std::endl;
ss << "}" << std::endl;
// subtract only
ss << "while (head < channels + post_pad) {" << std::endl;
ss << "if (head - size >= 0) {" << std::endl;
ss << "accum_ratio -= top_diff_off[(head - size) * step]"
<< " * top_off[(head - size) * step] / scale_off[(head - size) * step];"
<< std::endl;
ss << "}" << std::endl;
ss << "bottom_diff_off[(head - post_pad) * step]"
<< " = top_diff_off[(head - post_pad)"
<< " * step] * pow((Dtype)(scale_off[(head - post_pad) * step]), "
<< "(Dtype)(negative_beta))"
<< " - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;"
<< std::endl;
ss << "++head;" << std::endl;
ss << "}" << std::endl;
ss << "}" << std::endl;
ss << "}" << std::endl;
}
this->device_program_->set_source(ss.str());
this->device_program_->Compile(true, true);
}
template<typename Dtype, typename MItype, typename MOtype>
void LRNLayer<Dtype, MItype, MOtype>::Forward_gpu(
const vector<Blob<MItype>*>& bottom,
const vector<Blob<MOtype>*>& top) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelForward_gpu(bottom, top);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelForward(bottom, top);
break;
default:
LOG(FATAL)<< "Unknown normalization region.";
}
}
template<typename Dtype, typename MItype, typename MOtype>
void LRNLayer<Dtype, MItype, MOtype>::CrossChannelForward_gpu(
const vector<Blob<MItype>*>& bottom,
const vector<Blob<MOtype>*>& top) {
// First, compute scale
vptr<const Dtype> bottom_data = bottom[0]->gpu_data();
vptr<Dtype> top_data = top[0]->mutable_gpu_data();
vptr<Dtype> scale_data = scale_.mutable_gpu_data();
Dtype neg_beta = -beta_;
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int_tp n_threads = num_ * height_ * width_;
{
shared_ptr<DeviceKernel> kernel =
this->device_program_->GetKernel("LRNFillScale");
kernel->add_arg(&n_threads);
kernel->add_arg(&bottom_data);
kernel->add_arg(&num_);
kernel->add_arg(&channels_);
kernel->add_arg(&height_);
kernel->add_arg(&width_);
kernel->add_arg(&size_);
Dtype alpha_size = alpha_ / size_;
kernel->add_arg(&alpha_size);
kernel->add_arg(&k_);
kernel->add_arg(&scale_data);
vector<size_t> work_size(1, n_threads);
vector<size_t> group;
vector<size_t> local;
this->device_->get_threads(&work_size, &group, &local, kernel.get(), true);
kernel->Execute(group, local);
}
n_threads = bottom[0]->count();
{
shared_ptr<DeviceKernel> kernel =
this->device_program_->GetKernel("LRNComputeOutput");
kernel->add_arg(&n_threads);
kernel->add_arg(&bottom_data);
kernel->add_arg(&scale_data);
kernel->add_arg(&neg_beta);
kernel->add_arg(&top_data);
vector<size_t> work_size(1, n_threads);
vector<size_t> group;
vector<size_t> local;
this->device_->get_threads(&work_size, &group, &local, kernel.get(), true);
kernel->Execute(group, local);
}
}
template<typename Dtype, typename MItype, typename MOtype>
void LRNLayer<Dtype, MItype, MOtype>::Backward_gpu(
const vector<Blob<MOtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<MItype>*>& bottom) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelBackward_gpu(top, propagate_down, bottom);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelBackward(top, propagate_down, bottom);
break;
default:
LOG(FATAL)<< "Unknown normalization region.";
}
}
template<typename Dtype, typename MItype, typename MOtype>
void LRNLayer<Dtype, MItype, MOtype>::CrossChannelBackward_gpu(
const vector<Blob<MOtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<MItype>*>& bottom) {
int_tp n_threads = num_ * height_ * width_;
Dtype neg_beta = -beta_;
Dtype cache_ratio = Dtype(2. * alpha_ * beta_ / size_);
vptr<const Dtype> bottom_data = bottom[0]->gpu_data();
vptr<const Dtype> top_data = top[0]->gpu_data();
vptr<const Dtype> scale_data = scale_.gpu_data();
vptr<const Dtype> top_diff = top[0]->gpu_diff();
vptr<Dtype> bottom_diff = bottom[0]->mutable_gpu_diff();
shared_ptr<DeviceKernel> kernel =
this->device_program_->GetKernel("LRNComputeDiff");
kernel->add_arg(&n_threads);
kernel->add_arg(&bottom_data);
kernel->add_arg(&top_data);
kernel->add_arg(&scale_data);
kernel->add_arg(&top_diff);
kernel->add_arg(&num_);
kernel->add_arg(&channels_);
kernel->add_arg(&height_);
kernel->add_arg(&width_);
kernel->add_arg(&size_);
kernel->add_arg(&neg_beta);
kernel->add_arg(&cache_ratio);
kernel->add_arg(&bottom_diff);
vector<size_t> work_size(1, n_threads);
vector<size_t> group;
vector<size_t> local;
this->device_->get_threads(&work_size, &group, &local, kernel.get(), true);
kernel->Execute(group, local);
}
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, GenerateProgram,
(half_fp), (half_fp), (half_fp));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, GenerateProgram,
(float), (float), (float));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, GenerateProgram,
(double), (double), (double));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, Forward_gpu,
(half_fp), (half_fp), (half_fp));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, Forward_gpu,
(float), (float), (float));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, Forward_gpu,
(double), (double), (double));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, CrossChannelForward_gpu,
(half_fp), (half_fp), (half_fp));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, CrossChannelForward_gpu,
(float), (float), (float));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, CrossChannelForward_gpu,
(double), (double), (double));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, Backward_gpu,
(half_fp), (half_fp), (half_fp));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, Backward_gpu,
(float), (float), (float));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, Backward_gpu,
(double), (double), (double));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, CrossChannelBackward_gpu,
(half_fp), (half_fp), (half_fp));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, CrossChannelBackward_gpu,
(float), (float), (float));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(LRNLayer, CrossChannelBackward_gpu,
(double), (double), (double));
} // namespace caffe | the_stack |
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
#define LENGTH 16
#define SHIFT 4
#define LENGTH_X 128
#define LENGTH_Y 4
#define SHIFT_X 7
#define SHIFT_Y 2
template <typename T>
__global__
void rotate90Kernel(const T* src, int src_rows, int src_cols, int src_stride,
T* dst, int dst_stride) {
__shared__ T data[LENGTH][LENGTH + 1];
int element_x = (blockIdx.x << SHIFT) + threadIdx.x;
int element_y = (blockIdx.y << SHIFT) + threadIdx.y;
int empty_x = (gridDim.y << SHIFT) - src_rows;
if (blockIdx.x < gridDim.x - 1 && blockIdx.y < gridDim.y - 1) {
T* input = (T*)((uchar*)src + element_y * src_stride);
T value = input[element_x];
data[threadIdx.x][LENGTH - threadIdx.y - 1] = value;
__syncthreads();
int dst_x = ((gridDim.y - blockIdx.y - 1) << SHIFT) + threadIdx.x;
int dst_y = (blockIdx.x << SHIFT) + threadIdx.y;
T* output = (T*)((uchar*)dst + dst_y * dst_stride);
output[dst_x - empty_x] = data[threadIdx.y][threadIdx.x];
}
else {
if (element_x < src_cols && element_y < src_rows) {
T* input = (T*)((uchar*)src + element_y * src_stride);
T value = input[element_x];
data[threadIdx.x][LENGTH - threadIdx.y - 1] = value;
}
__syncthreads();
int threadIdx_x = blockIdx.y == gridDim.y - 1 ?
src_rows - (blockIdx.y << SHIFT) : LENGTH;
int threadIdx_y = blockIdx.x == gridDim.x - 1 ?
src_cols - (blockIdx.x << SHIFT) : LENGTH;
if (threadIdx.x < threadIdx_x && threadIdx.y < threadIdx_y) {
int dst_x = ((gridDim.y - blockIdx.y - 1) << SHIFT) + threadIdx.x;
if (blockIdx.y < gridDim.y - 1) {
dst_x -= empty_x;
}
int dst_y = (blockIdx.x << SHIFT) + threadIdx.y;
T* output = (T*)((uchar*)dst + dst_y * dst_stride);
if (blockIdx.y < gridDim.y - 1) {
output[dst_x] = data[threadIdx.y][threadIdx.x];
}
else {
output[dst_x] = data[threadIdx.y][threadIdx.x + empty_x];
}
}
}
}
template <typename T>
__global__
void rotate180C1Kernel(const T* src, int src_rows, int src_cols, int src_stride,
T* dst, int dst_stride) {
int element_x = (blockIdx.x << SHIFT_X) + threadIdx.x;
int element_y = (blockIdx.y << SHIFT_Y) + threadIdx.y;
T* input = (T*)((uchar*)src + element_y * src_stride);
T value = input[element_x];
T* output = (T*)((uchar*)dst + (src_rows - 1 - element_y) * dst_stride);
output[src_cols - 1 - element_x] = value;
}
template <typename T>
__global__
void rotate180CnKernel(const T* src, int src_rows, int src_cols, int src_stride,
T* dst, int dst_stride) {
__shared__ T data[LENGTH_Y][LENGTH_X];
int element_x = (blockIdx.x << SHIFT_X) + threadIdx.x;
int element_y = (blockIdx.y << SHIFT_Y) + threadIdx.y;
int empty_x = (gridDim.x << SHIFT_X) - src_cols;
int empty_y = (gridDim.y << SHIFT_Y) - src_rows;
if (blockIdx.x < gridDim.x - 1 && blockIdx.y < gridDim.y - 1) {
T* input = (T*)((uchar*)src + element_y * src_stride);
T value = input[element_x];
data[LENGTH_Y - threadIdx.y - 1][LENGTH_X - threadIdx.x - 1] = value;
__syncthreads();
int dst_x = ((gridDim.x - blockIdx.x - 1) << SHIFT_X) + threadIdx.x;
int dst_y = ((gridDim.y - blockIdx.y - 1) << SHIFT_Y) + threadIdx.y;
T* output = (T*)((uchar*)dst + (dst_y - empty_y) * dst_stride);
output[dst_x - empty_x] = data[threadIdx.y][threadIdx.x];
}
else {
if (element_x < src_cols && element_y < src_rows) {
T* input = (T*)((uchar*)src + element_y * src_stride);
T value = input[element_x];
data[LENGTH_Y - threadIdx.y - 1][LENGTH_X - threadIdx.x - 1] = value;
}
__syncthreads();
int threadIdx_x = blockIdx.x == gridDim.x - 1 ?
src_cols - (blockIdx.x << SHIFT_X) : LENGTH_X;
int threadIdx_y = blockIdx.y == gridDim.y - 1 ?
src_rows - (blockIdx.y << SHIFT_Y) : LENGTH_Y;
if (threadIdx.x < threadIdx_x && threadIdx.y < threadIdx_y) {
int dst_x = ((gridDim.x - blockIdx.x - 1) << SHIFT_X) + threadIdx.x;
int dst_y = ((gridDim.y - blockIdx.y - 1) << SHIFT_Y) + threadIdx.y;
if (blockIdx.x < gridDim.x - 1) {
dst_x -= empty_x;
}
if (blockIdx.y < gridDim.y - 1) {
dst_y -= empty_y;
}
T* output = (T*)((uchar*)dst + dst_y * dst_stride);
if (blockIdx.y < gridDim.y - 1) {
output[dst_x] = data[threadIdx.y][threadIdx.x + empty_x];
}
else if (blockIdx.x < gridDim.x - 1) {
output[dst_x] = data[threadIdx.y + empty_y][threadIdx.x];
}
else {
output[dst_x] = data[threadIdx.y + empty_y][threadIdx.x + empty_x];
}
}
}
}
template <typename T>
__global__
void rotate270Kernel(const T* src, int src_rows, int src_cols, int src_stride,
T* dst, int dst_stride) {
__shared__ T data[LENGTH][LENGTH + 1];
int element_x = (blockIdx.x << SHIFT) + threadIdx.x;
int element_y = (blockIdx.y << SHIFT) + threadIdx.y;
int empty_y = (gridDim.x << SHIFT) - src_cols;
if (blockIdx.x < gridDim.x - 1 && blockIdx.y < gridDim.y - 1) {
T* input = (T*)((uchar*)src + element_y * src_stride);
T value = input[element_x];
data[LENGTH - threadIdx.x - 1][threadIdx.y] = value;
__syncthreads();
int dst_x = (blockIdx.y << SHIFT) + threadIdx.x;
int dst_y = ((gridDim.x - blockIdx.x - 1) << SHIFT) + threadIdx.y;
T* output = (T*)((uchar*)dst + (dst_y - empty_y) * dst_stride);
output[dst_x] = data[threadIdx.y][threadIdx.x];
}
else {
if (element_x < src_cols && element_y < src_rows) {
T* input = (T*)((uchar*)src + element_y * src_stride);
T value = input[element_x];
data[LENGTH - threadIdx.x - 1][threadIdx.y] = value;
}
__syncthreads();
int threadIdx_x = blockIdx.y == gridDim.y - 1 ?
src_rows - (blockIdx.y << SHIFT) : LENGTH;
int threadIdx_y = blockIdx.x == gridDim.x - 1 ?
src_cols - (blockIdx.x << SHIFT) : LENGTH;
if (threadIdx.x < threadIdx_x && threadIdx.y < threadIdx_y) {
int dst_x = (blockIdx.y << SHIFT) + threadIdx.x;
int dst_y = ((gridDim.x - blockIdx.x - 1) << SHIFT) + threadIdx.y;
if (blockIdx.x < gridDim.x - 1) {
dst_y -= empty_y;
}
T* output = (T*)((uchar*)dst + dst_y * dst_stride);
if (blockIdx.x < gridDim.x - 1) {
output[dst_x] = data[threadIdx.y][threadIdx.x];
}
else {
output[dst_x] = data[threadIdx.y + empty_y][threadIdx.x];
}
}
}
}
RetCode rotate(const uchar* src, int src_rows, int src_cols, int channels,
int src_stride, uchar* dst, int dst_rows, int dst_cols,
int dst_stride, int degree, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(src_rows >= 1 && src_cols >= 1);
PPL_ASSERT(dst_rows >= 1 && dst_cols >= 1);
PPL_ASSERT((src_rows == dst_rows && src_cols == dst_cols) ||
(src_rows == dst_cols && src_cols == dst_rows));
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= src_cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= dst_cols * channels * (int)sizeof(uchar));
PPL_ASSERT(degree == 90 || degree == 180 || degree == 270);
dim3 block0, grid0;
block0.x = LENGTH;
block0.y = LENGTH;
grid0.x = divideUp(src_cols, LENGTH, SHIFT);
grid0.y = divideUp(src_rows, LENGTH, SHIFT);
dim3 block1, grid1;
block1.x = LENGTH_X;
block1.y = LENGTH_Y;
grid1.x = divideUp(src_cols, LENGTH_X, SHIFT_X);
grid1.y = divideUp(src_rows, LENGTH_Y, SHIFT_Y);
if (degree == 90) {
if (channels == 1) {
rotate90Kernel<<<grid0, block0, 0, stream>>>((uchar*)src, src_rows,
src_cols, src_stride, (uchar*)dst, dst_stride);
}
else if (channels == 3) {
rotate90Kernel<<<grid0, block0, 0, stream>>>((uchar3*)src, src_rows,
src_cols, src_stride, (uchar3*)dst, dst_stride);
}
else { // channels == 4
rotate90Kernel<<<grid0, block0, 0, stream>>>((uchar4*)src, src_rows,
src_cols, src_stride, (uchar4*)dst, dst_stride);
}
} else if (degree == 180) {
if (channels == 1) {
rotate180C1Kernel<<<grid1, block1, 0, stream>>>((uchar*)src, src_rows,
src_cols, src_stride, (uchar*)dst, dst_stride);
}
else if (channels == 3) {
rotate180CnKernel<<<grid1, block1, 0, stream>>>((uchar3*)src, src_rows,
src_cols, src_stride, (uchar3*)dst, dst_stride);
}
else { // channels == 4
rotate180CnKernel<<<grid1, block1, 0, stream>>>((uchar4*)src, src_rows,
src_cols, src_stride, (uchar4*)dst, dst_stride);
}
}
else { // degree == 270
if (channels == 1) {
rotate270Kernel<<<grid0, block0, 0, stream>>>((uchar*)src, src_rows,
src_cols, src_stride, (uchar*)dst, dst_stride);
}
else if (channels == 3) {
rotate270Kernel<<<grid0, block0, 0, stream>>>((uchar3*)src, src_rows,
src_cols, src_stride, (uchar3*)dst, dst_stride);
}
else { // channels == 4
rotate270Kernel<<<grid0, block0, 0, stream>>>((uchar4*)src, src_rows,
src_cols, src_stride, (uchar4*)dst, dst_stride);
}
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode rotate(const float* src, int src_rows, int src_cols, int channels,
int src_stride, float* dst, int dst_rows, int dst_cols,
int dst_stride, int degree, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(src_rows >= 1 && src_cols >= 1);
PPL_ASSERT(dst_rows >= 1 && dst_cols >= 1);
PPL_ASSERT((src_rows == dst_rows && src_cols == dst_cols) ||
(src_rows == dst_cols && src_cols == dst_rows));
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= src_cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= dst_cols * channels * (int)sizeof(float));
PPL_ASSERT(degree == 90 || degree == 180 || degree == 270);
dim3 block0, grid0;
block0.x = LENGTH;
block0.y = LENGTH;
grid0.x = divideUp(src_cols, LENGTH, SHIFT);
grid0.y = divideUp(src_rows, LENGTH, SHIFT);
dim3 block1, grid1;
block1.x = LENGTH_X;
block1.y = LENGTH_Y;
grid1.x = divideUp(src_cols, LENGTH_X, SHIFT_X);
grid1.y = divideUp(src_rows, LENGTH_Y, SHIFT_Y);
if (degree == 90) {
if (channels == 1) {
rotate90Kernel<<<grid0, block0, 0, stream>>>((float*)src, src_rows,
src_cols, src_stride, (float*)dst, dst_stride);
}
else if (channels == 3) {
rotate90Kernel<<<grid0, block0, 0, stream>>>((float3*)src, src_rows,
src_cols, src_stride, (float3*)dst, dst_stride);
}
else { // channels == 4
rotate90Kernel<<<grid0, block0, 0, stream>>>((float4*)src, src_rows,
src_cols, src_stride, (float4*)dst, dst_stride);
}
} else if (degree == 180) {
if (channels == 1) {
rotate180CnKernel<<<grid1, block1, 0, stream>>>((float*)src, src_rows,
src_cols, src_stride, (float*)dst, dst_stride);
}
else if (channels == 3) {
rotate180CnKernel<<<grid1, block1, 0, stream>>>((float3*)src, src_rows,
src_cols, src_stride, (float3*)dst, dst_stride);
}
else { // channels == 4
rotate180CnKernel<<<grid1, block1, 0, stream>>>((float4*)src, src_rows,
src_cols, src_stride, (float4*)dst, dst_stride);
}
}
else { // degree == 270
if (channels == 1) {
rotate270Kernel<<<grid0, block0, 0, stream>>>((float*)src, src_rows,
src_cols, src_stride, (float*)dst, dst_stride);
}
else if (channels == 3) {
rotate270Kernel<<<grid0, block0, 0, stream>>>((float3*)src, src_rows,
src_cols, src_stride, (float3*)dst, dst_stride);
}
else { // channels == 4
rotate270Kernel<<<grid0, block0, 0, stream>>>((float4*)src, src_rows,
src_cols, src_stride, (float4*)dst, dst_stride);
}
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode Rotate<uchar, 1>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData,
int degree) {
RetCode code = rotate(inData, inHeight, inWidth, 1, inWidthStride, outData,
outHeight, outWidth, outWidthStride, degree, stream);
return code;
}
template <>
RetCode Rotate<uchar, 3>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData,
int degree) {
RetCode code = rotate(inData, inHeight, inWidth, 3, inWidthStride, outData,
outHeight, outWidth, outWidthStride, degree, stream);
return code;
}
template <>
RetCode Rotate<uchar, 4>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData,
int degree) {
RetCode code = rotate(inData, inHeight, inWidth, 4, inWidthStride, outData,
outHeight, outWidth, outWidthStride, degree, stream);
return code;
}
template <>
RetCode Rotate<float, 1>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData,
int degree) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = rotate(inData, inHeight, inWidth, 1, inWidthStride, outData,
outHeight, outWidth, outWidthStride, degree, stream);
return code;
}
template <>
RetCode Rotate<float, 3>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData,
int degree) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = rotate(inData, inHeight, inWidth, 3, inWidthStride, outData,
outHeight, outWidth, outWidthStride, degree, stream);
return code;
}
template <>
RetCode Rotate<float, 4>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData,
int degree) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = rotate(inData, inHeight, inWidth, 4, inWidthStride, outData,
outHeight, outWidth, outWidthStride, degree, stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl | the_stack |
#include "caffe/layer.hpp"
#include "caffe/layers/flow_warp_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/benchmark.hpp"
#include <iostream>
#include <fstream>
#define CUDART_NAN_F __int_as_float(0x7fffffff)
namespace caffe {
#define min(a,b) ((a<b)?(a):(b))
#define max(a,b) ((a>b)?(a):(b))
//#define DISPLAY_TIMINGS
#define RA_TILE 32
#define RA_ROWS 8
template <typename Dtype>
__global__ void flow_warp_rearrange_kernel(const Dtype* in, Dtype* out, int num, int channels, int cblocks, int width, int height, int widthheight)
{
__shared__ float buffer[RA_TILE][RA_TILE+1];
int n = blockIdx.x/cblocks;
if(n>=num) return;
int c0 = (blockIdx.x%cblocks)*RA_TILE;
int x0 = blockIdx.y*RA_TILE;
int y = blockIdx.z;
int xoff=threadIdx.x;
int coff=threadIdx.y;
int x=x0+xoff;
if(x<width)
for(int i=coff; i<RA_TILE && c0+i<channels; i+=RA_ROWS)
buffer[i][xoff] = in[((n*channels + c0 + i)*height + y)*width + x];
__syncthreads();
coff = threadIdx.x;
xoff = threadIdx.y;
int c = c0 + coff;
if(c<channels)
for(int j=xoff; j<RA_TILE && x0+j<width; j+=RA_ROWS)
out[((n*height + y)*width + x0+j)*channels + c] = buffer[coff][j];
}
#define FW_THREADS 32
#define FW_TILE_X FW_THREADS
#define FW_TILE_C FW_THREADS
template <typename Dtype>
__global__ void flow_warp_kernel_smem(const Dtype* image, const Dtype* flow, Dtype* warped, int num, int channels, int cblocks, int width, int wblocks, int height, int widthheight, float fillValue)
{
int y = blockIdx.y;
int n = blockIdx.z;
__shared__ float x2_buf[FW_TILE_X], y2_buf[FW_TILE_X];
__shared__ float buffer[FW_TILE_C][FW_TILE_X+1];
int x;
int c;
x = blockIdx.x*FW_TILE_X + threadIdx.x;
if(threadIdx.y==0 && x<width)
{
x2_buf[threadIdx.x] = float(x) + flow[((2*n )*height + y)*width + x];
y2_buf[threadIdx.x] = float(y) + flow[((2*n+1)*height + y)*width + x];
}
__syncthreads();
float x2 = x2_buf[threadIdx.y];
float y2 = y2_buf[threadIdx.y];
int ix2_L = int(x2);
int iy2_T = int(y2);
int ix2_R = min(ix2_L+1, width-1);
int iy2_B = min(iy2_T+1, height-1);
int off_TL = ((n*height + iy2_T)*width + ix2_L)*channels;
int off_TR = ((n*height + iy2_T)*width + ix2_R)*channels;
int off_BL = ((n*height + iy2_B)*width + ix2_L)*channels;
int off_BR = ((n*height + iy2_B)*width + ix2_R)*channels;
float alpha = x2-ix2_L;
float beta = y2-iy2_T;
float coeffTL = (1-alpha)*(1-beta);
float coeffTR = alpha*(1-beta);
float coeffBL = (1-alpha)*beta;
float coeffBR = alpha*beta;
for(int cb=0; cb<cblocks; cb++)
{
__syncthreads();
buffer[threadIdx.y][threadIdx.x] = fillValue;
__syncthreads();
c = cb*FW_TILE_C + threadIdx.x;
if(x2>=0 && y2>=0 && x2<width && y2<height && c<channels)
buffer[threadIdx.y][threadIdx.x] = // buffer [x][c]
coeffTL * image[off_TL + c] +
coeffTR * image[off_TR + c] +
coeffBL * image[off_BL + c] +
coeffBR * image[off_BR + c];
__syncthreads();
c = cb*FW_TILE_C + threadIdx.y;
x = blockIdx.x*FW_TILE_X + threadIdx.x;
if(c<channels && x<width)
warped[((n*channels+c)*height + y)*width + x] = buffer[threadIdx.x][threadIdx.y];
}
}
template <typename Dtype>
__global__ void flow_warp_kernel_no_smem(const Dtype* image, const Dtype* flow, Dtype* warped, int num, int channels, int width, int wblocks, int height, int widthheight)
{
int x = blockIdx.x*FW_TILE_X + threadIdx.x;
if(x>=width)
return;
int y = blockIdx.y;
int n = blockIdx.z;
float x2 = float(x) + flow[((2*n )*height + y)*width + x];
float y2 = float(y) + flow[((2*n+1)*height + y)*width + x];
if(x2>=0.f && y2>=0.f && x2<width && y2<height)
{
int ix2_L = int(x2);
int iy2_T = int(y2);
int ix2_R = min(ix2_L+1, width-1);
int iy2_B = min(iy2_T+1, height-1);
float alpha = x2-ix2_L;
float beta = y2-iy2_T;
for(int c=0; c<channels; c++)
{
int ch_off = (n*channels+c)*height;
int off_TL = (ch_off + iy2_T)*width + ix2_L;
int off_TR = (ch_off + iy2_T)*width + ix2_R;
int off_BL = (ch_off + iy2_B)*width + ix2_L;
int off_BR = (ch_off + iy2_B)*width + ix2_R;
float coeffTL = (1-alpha)*(1-beta);
float coeffTR = alpha*(1-beta);
float coeffBL = (1-alpha)*beta;
float coeffBR = alpha*beta;
warped[(ch_off + y)*width + x] =
coeffTL * image[off_TL] +
coeffTR * image[off_TR] +
coeffBL * image[off_BL] +
coeffBR * image[off_BR];
}
}
}
template <typename Dtype>
__global__ void flow_warp_backward_kernel_no_smem(
const Dtype* image_data, float* image_diff, const Dtype* flow_data, Dtype* flow_diff, const Dtype* warped_diff,
int num, int channels, int cblocks, int width, int wblocks, int height, int widthheight)
{
int x = blockIdx.x*FW_TILE_X + threadIdx.x;
if(x>=width)
return;
int y = blockIdx.y;
int n = blockIdx.z;
float x2 = float(x) + flow_data[((2*n )*height + y)*width + x];
float y2 = float(y) + flow_data[((2*n+1)*height + y)*width + x];
if(x2>=0.f && y2>=0.f && x2<width && y2<height)
{
int ix2_L = int(x2);
int iy2_T = int(y2);
int ix2_R = min(ix2_L+1, width-1);
int iy2_B = min(iy2_T+1, height-1);
float alpha=x2-ix2_L;
float beta=y2-iy2_T;
for(int c=0; c<channels; c++)
{
int ch_off = (n*channels + c)*height;
float warped_diff_value = warped_diff[(ch_off + y)*width + x];
atomicAdd(&image_diff[(ch_off + iy2_T)*width + ix2_L], warped_diff_value * (1-alpha)*(1-beta));
atomicAdd(&image_diff[(ch_off + iy2_T)*width + ix2_R], warped_diff_value * alpha*(1-beta));
atomicAdd(&image_diff[(ch_off + iy2_B)*width + ix2_L], warped_diff_value * (1-alpha)*beta);
atomicAdd(&image_diff[(ch_off + iy2_B)*width + ix2_R], warped_diff_value * alpha*beta);
}
float gamma = iy2_B - y2;
float bot_diff = 0;
for(int c=0; c<channels; c++)
{
int ch_off = (n*channels + c)*height;
float temp = 0;
temp += gamma * (image_data[(ch_off + iy2_T)*width + ix2_R] - image_data[(ch_off + iy2_T)*width + ix2_L]);
temp += (1-gamma) * (image_data[(ch_off + iy2_B)*width + ix2_R] - image_data[(ch_off + iy2_B)*width + ix2_L]);
bot_diff += warped_diff[(ch_off + y)*width + x] * temp;
}
flow_diff[(2*n*height + y)*width + x] = bot_diff;
gamma = ix2_R - x2;
bot_diff = 0;
for(int c=0; c<channels; c++)
{
int ch_off = (n*channels + c)*height;
float temp = 0;
temp += gamma * (image_data[(ch_off + iy2_B)*width + ix2_L] - image_data[(ch_off + iy2_T)*width + ix2_L]);
temp += (1-gamma) * (image_data[(ch_off + iy2_B)*width + ix2_R] - image_data[(ch_off + iy2_T)*width + ix2_R]);
bot_diff += warped_diff[(ch_off + y)*width + x] * temp;
}
flow_diff[((2*n+1)*height + y)*width + x] = bot_diff;
}
}
template <typename Dtype>
__global__ void flow_warp_backward_kernel_smem(const Dtype* trans_image_data, Dtype* image_diff, const Dtype* flow_data, Dtype* flow_diff, const Dtype* warped_diff, int num, int channels, int cblocks, int width, int wblocks, int height, int widthheight)
{
// int y = blockIdx.y;
// int n = blockIdx.z;
// __shared__ float x2_buf[FW_TILE_X], y2_buf[FW_TILE_X];
// __shared__ float buffer[FW_TILE_C][FW_TILE_X+1];
// int x;
// int c;
// x = blockIdx.x*FW_TILE_X + threadIdx.x;
// if(threadIdx.y==0 && x<width)
// {
// x2_buf[threadIdx.x] = float(x) + flow[((2*n )*height + y)*width + x];
// y2_buf[threadIdx.x] = float(y) + flow[((2*n+1)*height + y)*width + x];
// }
// __syncthreads();
// float x2 = x2_buf[threadIdx.y];
// float y2 = y2_buf[threadIdx.y];
// int ix2_L = int(x2);
// int iy2_T = int(y2);
// int ix2_R = min(ix2_L+1, width-1);
// int iy2_B = min(iy2_T+1, height-1);
// int off_TL = ((n*height + iy2_T)*width + ix2_L)*channels;
// int off_TR = ((n*height + iy2_T)*width + ix2_R)*channels;
// int off_BL = ((n*height + iy2_B)*width + ix2_L)*channels;
// int off_BR = ((n*height + iy2_B)*width + ix2_R)*channels;
// float alpha = x2-ix2_L;
// float beta = y2-iy2_T;
// float coeffTL = (1-alpha)*(1-beta);
// float coeffTR = alpha*(1-beta);
// float coeffBL = (1-alpha)*beta;
// float coeffBR = alpha*beta;
// for(int cb=0; cb<cblocks; cb++)
// {
// __syncthreads();
// buffer[threadIdx.y][threadIdx.x] = 0;
// __syncthreads();
// c = cb*FW_TILE_C + threadIdx.y;
// x = blockIdx.x*FW_TILE_X + threadIdx.x;
// if(c<channels && x<width)
// buffer[threadIdx.y][threadIdx.x] = warped_diff[((n*channels + c)*height + y)*width + x]; // buffer[c][x]
// __syncthreads();
// c = cb*FW_TILE_C + threadIdx.x;
// float wd = buffer[threadIdx.x][threadIdx.y];
// if(x2>=0 && y2>=0 && x2<width && y2<height && c<channels && x<width)
// {
// atomicAdd(&image_diff[((n*channels + c)*height + iy2_T)*width + ix2_L], wd * coeffTL);
// atomicAdd(&image_diff[((n*channels + c)*height + iy2_T)*width + ix2_R], wd * coeffTR);
// atomicAdd(&image_diff[((n*channels + c)*height + iy2_B)*width + ix2_L], wd * coeffBL);
// atomicAdd(&image_diff[((n*channels + c)*height + iy2_B)*width + ix2_R], wd * coeffBR);
// float gamma = iy2_B - y2;
// c = cb*FW_TILE_C + threadIdx.x;
// float imgTR = trans_image_data[((n*height + iy2_T)*width + ix2_R)*channels + c];
// float imgTL = trans_image_data[((n*height + iy2_T)*width + ix2_L)*channels + c];
// float imgBR = trans_image_data[((n*height + iy2_B)*width + ix2_R)*channels + c];
// float imgBL = trans_image_data[((n*height + iy2_B)*width + ix2_L)*channels + c];
// float temp = 0;
// temp += gamma * (imgTR - imgTL);
// temp += (1-gamma) * (imgBR - imhBL);
// temp *= buffer[threadIdx.x][threadIdx.y]; // warped_diff[((n*channels + c)*height + y)*width + x]
// atomicAdd(&flow_diff[(2*n*height + y)*width + x], wd * coeffBR);
// }
// for(int c=0; c<channels; c++)
// {
// float temp = 0;
// temp += gamma * (imgTR - imgTL);
// temp += (1-gamma) * (imgBR - imhBL);
// bot_diff += warped_diff[((n*channels + c)*height + y)*width + x] * temp;
// }
// flow_diff[(2*n*height + y)*width + x] = bot_diff;
// gamma = ix2_R - x2;
// bot_diff = 0;
// for(int c=0; c<channels; c++)
// {
// float temp = 0;
// temp += gamma * (image_data[((n*channels + c)*height + iy2_B)*width + ix2_L] - image_data[((n*channels + c)*height + iy2_T)*width + ix2_L]);
// temp += (1-gamma) * (image_data[((n*channels + c)*height + iy2_B)*width + ix2_R] - image_data[((n*channels + c)*height + iy2_T)*width + ix2_R]);
// bot_diff += warped_diff[((n*channels + c)*height + y)*width + x] * temp;
// }
// flow_diff[((2*n+1)*height + y)*width + x] = bot_diff;
// int c = cb*FW_TILE_C + threadIdx.x;
// if(x2>=0 && y2>=0 && x2<width && y2<height && c<channels)
// buffer[threadIdx.y][threadIdx.x] = // buffer [x][c]
// coeffTL * image[off_TL + c] +
// coeffTR * image[off_TR + c] +
// coeffBL * image[off_BL + c] +
// coeffBR * image[off_BR + c];
// __syncthreads();
// c = cb*FW_TILE_C + threadIdx.y;
// int x = blockIdx.x*FW_TILE_X + threadIdx.x;
// if(c<channels && x<width)
// warped[((n*channels+c)*height + y)*width + x] = buffer[threadIdx.x][threadIdx.y];
// }
}
template <typename Dtype>
void FlowWarpLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
int width = top[0]->width();
int height = top[0]->height();
int channels = top[0]->channels();
int num = top[0]->num();
const int wh_size = width * height;
const int whc_size = width * height * channels;
Dtype* warped_data = top[0]->mutable_gpu_data(); // dest
const Dtype* image_data = bottom[0]->gpu_data(); // source image
Dtype* trans_image_data = transposed_image_.mutable_gpu_data(); // source image
const Dtype* flow_data = bottom[1]->gpu_data(); // source flow
int nan = 0xFFE00000;
float nanf = *(reinterpret_cast<float*>(&nan));
Dtype fillValue = this->layer_param().flow_warp_param().fill_value() == FlowWarpParameter_FillParameter_ZERO ? 0 : nanf;
cudaMemset(warped_data, fillValue, width*height*channels*num*sizeof(float));
#ifdef DISPLAY_TIMINGS
caffe::Timer t1;
t1.Start();
#endif
dim3 rearrangeThreads(RA_TILE,RA_ROWS,1);
int cblocks = ((channels-1)/RA_TILE+1);
dim3 rearrangeBlocks(cblocks*num, (width-1)/RA_TILE+1, height);
flow_warp_rearrange_kernel<Dtype><<<rearrangeBlocks, rearrangeThreads>>>(
image_data,
trans_image_data,
num,
channels,
cblocks,
width,
height,
wh_size
);
CUDA_POST_KERNEL_CHECK;
#ifdef DISPLAY_TIMINGS
t1.Stop();
LOG(INFO) << "rearrange time " << t1.MilliSeconds() << "ms";
#endif
// if(channels>8)
{
#ifdef DISPLAY_TIMINGS
caffe::Timer t2;
t2.Start();
#endif
int wblocks = ((width-1)/FW_TILE_X+1);
int cblocks = ((channels-1)/FW_TILE_C+1);
dim3 warpThreads(FW_TILE_X,FW_TILE_C);
dim3 warpBlocks(wblocks, height, num);
flow_warp_kernel_smem<Dtype><<<warpBlocks, warpThreads>>>(
trans_image_data,
flow_data,
warped_data,
num,
channels,
cblocks,
width,
wblocks,
height,
wh_size,
fillValue
);
CUDA_POST_KERNEL_CHECK;
#ifdef DISPLAY_TIMINGS
t2.Stop();
LOG(INFO) << "warp time 1a: " << t2.MilliSeconds() << "ms";
#endif
}
// else
// {
//#ifdef DISPLAY_TIMINGS
// caffe::Timer t2a;
// t2a.Start();
//#endif
// int wblocks = ((width-1)/FW_TILE_X+1);
// dim3 warpThreads(FW_TILE_X);
// dim3 warpBlocks(wblocks, height, num);
// flow_warp_kernel_no_smem<Dtype><<<warpBlocks, warpThreads>>>(
// image_data,
// flow_data,
// warped_data,
// num,
// channels,
// width,
// wblocks,
// height,
// wh_size
// );
// CUDA_POST_KERNEL_CHECK;
//#ifdef DISPLAY_TIMINGS
// t2a.Stop();
// LOG(INFO) << "warp time 1b: " << t2a.MilliSeconds() << "ms";
//#endif
// }
}
template <typename Dtype>
void FlowWarpLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
int width = top[0]->width();
int height = top[0]->height();
int channels = top[0]->channels();
int num = top[0]->num();
const int wh_size = width * height;
const int whc_size = width * height * channels;
const Dtype* warped_data = top[0]->gpu_data(); // dest
const Dtype* warped_diff = top[0]->gpu_diff(); // dest
const Dtype* image_data = bottom[0]->gpu_data(); // source image
Dtype* image_diff = bottom[0]->mutable_gpu_diff(); // source image
const Dtype* flow_data = bottom[1]->gpu_data(); // source flow
Dtype* flow_diff = bottom[1]->mutable_gpu_diff(); // source flow
cudaMemset(image_diff, 0, width*height*channels*num*sizeof(float));
cudaMemset(flow_diff, 0, width*height*2*num*sizeof(float));
//Backward_cpu(top, propagate_down, bottom);
//return;
#ifdef DISPLAY_TIMINGS
caffe::Timer t3a;
t3a.Start();
#endif
int wblocks = ((width-1)/FW_TILE_X+1);
int cblocks = ((channels-1)/FW_TILE_C+1);
dim3 warpThreads(FW_TILE_X,1);
dim3 warpBlocks(wblocks, height, num);
flow_warp_backward_kernel_no_smem<Dtype><<<warpBlocks, warpThreads>>>(
image_data,
(float*)image_diff,
flow_data,
flow_diff,
warped_diff,
num,
channels,
cblocks,
width,
wblocks,
height,
wh_size
);
CUDA_POST_KERNEL_CHECK;
#ifdef DISPLAY_TIMINGS
t3a.Stop();
LOG(INFO) << "backward time 1a: " << t3a.MilliSeconds() << "ms";
#endif
if(!propagate_down[0]) caffe_gpu_memset(bottom[0]->count()*sizeof(Dtype), 0, image_diff);
if(!propagate_down[1]) caffe_gpu_memset(bottom[1]->count()*sizeof(Dtype), 0, flow_diff);
// {
// printf("gpu flow u:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[1]->data_at(0, 0, y, x));
// }
// printf("\n");
// }
// printf("gpu flow v:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[1]->data_at(0, 1, y, x));
// }
// printf("\n");
// }
// printf("gpu image:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[0]->data_at(0, 0, y, x));
// }
// printf("\n");
// }
// printf("gpu flow diff u:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[1]->diff_at(0, 0, y, x));
// }
// printf("\n");
// }
// printf("gpu flow diff v:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[1]->diff_at(0, 1, y, x));
// }
// printf("\n");
// }
// printf("gpu image diff:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[0]->diff_at(0, 0, y, x));
// }
// printf("\n");
// }
// }
}
INSTANTIATE_LAYER_GPU_FUNCS(FlowWarpLayer);
} // namespace caffe | the_stack |
/* partial verif info */
/**********************/
int test_index_array[TEST_ARRAY_SIZE],
test_rank_array[TEST_ARRAY_SIZE],
S_test_index_array[TEST_ARRAY_SIZE] = {48427,17148,23627,62548,4431},
S_test_rank_array[TEST_ARRAY_SIZE] = {0,18,346,64917,65463},
W_test_index_array[TEST_ARRAY_SIZE] = {357773,934767,875723,898999,404505},
W_test_rank_array[TEST_ARRAY_SIZE] = {1249,11698,1039987,1043896,1048018},
A_test_index_array[TEST_ARRAY_SIZE] = {2112377,662041,5336171,3642833,4250760},
A_test_rank_array[TEST_ARRAY_SIZE] = {104,17523,123928,8288932,8388264},
B_test_index_array[TEST_ARRAY_SIZE] = {41869,812306,5102857,18232239,26860214},
B_test_rank_array[TEST_ARRAY_SIZE] = {33422937,10244,59149,33135281,99},
C_test_index_array[TEST_ARRAY_SIZE] = {44172927,72999161,74326391,129606274,21736814},
C_test_rank_array[TEST_ARRAY_SIZE] = {61147,882988,266290,133997595,133525895},
D_test_index_array[TEST_ARRAY_SIZE] = {1317351170,995930646,1157283250,1503301535,1453734525},
D_test_rank_array[TEST_ARRAY_SIZE] = {1,36538729,1978098519,2145192618,2147425337};
/* is */
int main(int argc, char** argv){
int i, iteration;
int passed_verification;
int* key_array_device;
int* key_buff1_device;
int* key_buff2_device;
int* index_array_device;
int* rank_array_device;
int* partial_verify_vals_device;
int* passed_verification_device;
int* sum_device;
size_t size_key_array_device;
size_t size_key_buff1_device;
size_t size_key_buff2_device;
size_t size_index_array_device;
size_t size_rank_array_device;
size_t size_partial_verify_vals_device;
size_t size_passed_verification_device;
size_t size_sum_device;
int size_shared_data_on_rank_4;
int size_shared_data_on_rank_5;
int size_shared_data_on_full_verify_3;
int threads_per_block_on_create_seq;
int threads_per_block_on_rank;
int threads_per_block_on_rank_1;
int threads_per_block_on_rank_2;
int threads_per_block_on_rank_3;
int threads_per_block_on_rank_4;
int threads_per_block_on_rank_5;
int threads_per_block_on_rank_6;
int threads_per_block_on_rank_7;
int threads_per_block_on_full_verify;
int threads_per_block_on_full_verify_1;
int threads_per_block_on_full_verify_2;
int threads_per_block_on_full_verify_3;
int blocks_per_grid_on_create_seq;
int blocks_per_grid_on_rank_1;
int blocks_per_grid_on_rank_2;
int blocks_per_grid_on_rank_3;
int blocks_per_grid_on_rank_4;
int blocks_per_grid_on_rank_5;
int blocks_per_grid_on_rank_6;
int blocks_per_grid_on_rank_7;
int blocks_per_grid_on_full_verify_1;
int blocks_per_grid_on_full_verify_2;
int blocks_per_grid_on_full_verify_3;
int amount_of_work_on_create_seq;
int amount_of_work_on_rank_1;
int amount_of_work_on_rank_2;
int amount_of_work_on_rank_3;
int amount_of_work_on_rank_4;
int amount_of_work_on_rank_5;
int amount_of_work_on_rank_6;
int amount_of_work_on_rank_7;
int amount_of_work_on_full_verify_1;
int amount_of_work_on_full_verify_2;
int amount_of_work_on_full_verify_3;
/* initialize the verification arrays for a valid class */
for(i=0; i<TEST_ARRAY_SIZE; i++){
switch(CLASS){
case 'S':
test_index_array[i] = S_test_index_array[i];
test_rank_array[i] = S_test_rank_array[i];
break;
case 'A':
test_index_array[i] = A_test_index_array[i];
test_rank_array[i] = A_test_rank_array[i];
break;
case 'W':
test_index_array[i] = W_test_index_array[i];
test_rank_array[i] = W_test_rank_array[i];
break;
case 'B':
test_index_array[i] = B_test_index_array[i];
test_rank_array[i] = B_test_rank_array[i];
break;
case 'C':
test_index_array[i] = C_test_index_array[i];
test_rank_array[i] = C_test_rank_array[i];
break;
case 'D':
test_index_array[i] = D_test_index_array[i];
test_rank_array[i] = D_test_rank_array[i];
break;
};
}
/* printout initial NPB info */
printf("\n\n NAS Parallel Benchmarks 4.1 IS Benchmark\n\n");
printf(" Size: %ld (class %c)\n", (long)TOTAL_KEYS, CLASS);
printf(" Iterations: %d\n", MAX_ITERATIONS);
/* define threads_per_block */
threads_per_block_on_create_seq = atoi(argv[1]);
threads_per_block_on_rank = atoi(argv[2]);
threads_per_block_on_full_verify = atoi(argv[3]);
threads_per_block_on_rank_1=1;
threads_per_block_on_rank_2=threads_per_block_on_rank;
threads_per_block_on_rank_3=threads_per_block_on_rank;
threads_per_block_on_rank_4=threads_per_block_on_rank;
threads_per_block_on_rank_5=threads_per_block_on_rank;
threads_per_block_on_rank_6=threads_per_block_on_rank;
threads_per_block_on_rank_7=1;
threads_per_block_on_full_verify_1=threads_per_block_on_full_verify;
threads_per_block_on_full_verify_2=threads_per_block_on_full_verify;
threads_per_block_on_full_verify_3=threads_per_block_on_full_verify;
amount_of_work_on_create_seq=threads_per_block_on_create_seq*threads_per_block_on_create_seq;
amount_of_work_on_rank_1=1;
amount_of_work_on_rank_2=MAX_KEY;
amount_of_work_on_rank_3=NUM_KEYS;
amount_of_work_on_rank_4=threads_per_block_on_rank_4*threads_per_block_on_rank_4;
amount_of_work_on_rank_5=threads_per_block_on_rank_5;
amount_of_work_on_rank_6=threads_per_block_on_rank_6*threads_per_block_on_rank_6;
amount_of_work_on_rank_7=1;
amount_of_work_on_full_verify_1=NUM_KEYS;
amount_of_work_on_full_verify_2=NUM_KEYS;
amount_of_work_on_full_verify_3=NUM_KEYS;
blocks_per_grid_on_create_seq=(ceil((double)(amount_of_work_on_create_seq)/(double)(threads_per_block_on_create_seq)));
blocks_per_grid_on_rank_1=1;
blocks_per_grid_on_rank_2=(ceil((double)(amount_of_work_on_rank_2)/(double)(threads_per_block_on_rank_2)));
blocks_per_grid_on_rank_3=(ceil((double)(amount_of_work_on_rank_3)/(double)(threads_per_block_on_rank_3)));
if(amount_of_work_on_rank_4 > MAX_KEY){amount_of_work_on_rank_4=MAX_KEY;}
blocks_per_grid_on_rank_4=(ceil((double)(amount_of_work_on_rank_4)/(double)(threads_per_block_on_rank_4)));
blocks_per_grid_on_rank_5=1;
if(amount_of_work_on_rank_6 > MAX_KEY){amount_of_work_on_rank_6=MAX_KEY;}
blocks_per_grid_on_rank_6=(ceil((double)(amount_of_work_on_rank_6)/(double)(threads_per_block_on_rank_6)));
blocks_per_grid_on_rank_7=1;
blocks_per_grid_on_full_verify_1=(ceil((double)(amount_of_work_on_full_verify_1)/(double)(threads_per_block_on_full_verify_1)));
blocks_per_grid_on_full_verify_2=(ceil((double)(amount_of_work_on_full_verify_2)/(double)(threads_per_block_on_full_verify_2)));
blocks_per_grid_on_full_verify_3=(ceil((double)(amount_of_work_on_full_verify_3)/(double)(threads_per_block_on_full_verify_3)));
size_key_array_device=SIZE_OF_BUFFERS*sizeof(int);
size_key_buff1_device=MAX_KEY*sizeof(int);
size_key_buff2_device=SIZE_OF_BUFFERS*sizeof(int);
size_index_array_device=TEST_ARRAY_SIZE*sizeof(int);
size_rank_array_device=TEST_ARRAY_SIZE*sizeof(int);
size_partial_verify_vals_device=TEST_ARRAY_SIZE*sizeof(int);
size_passed_verification_device=1*sizeof(int);
size_sum_device=threads_per_block_on_rank*sizeof(int);
size_shared_data_on_rank_4=2*threads_per_block_on_rank_4*sizeof(int);
size_shared_data_on_rank_5=2*threads_per_block_on_rank_5*sizeof(int);
size_shared_data_on_full_verify_3=threads_per_block_on_full_verify_3*sizeof(int);
cudaMalloc(&key_array_device, size_key_array_device);
cudaMalloc(&key_buff1_device, size_key_buff1_device);
cudaMalloc(&key_buff2_device, size_key_buff2_device);
cudaMalloc(&index_array_device, size_index_array_device);
cudaMalloc(&rank_array_device, size_rank_array_device);
cudaMalloc(&partial_verify_vals_device, size_partial_verify_vals_device);
cudaMalloc(&passed_verification_device, size_passed_verification_device);
cudaMalloc(&sum_device, size_sum_device);
cudaMemcpy(index_array_device, test_index_array, size_index_array_device, cudaMemcpyHostToDevice);
cudaMemcpy(rank_array_device, test_rank_array, size_rank_array_device, cudaMemcpyHostToDevice);
/* generate random number sequence and subsequent keys on all procs */
create_seq_gpu_kernel<<<blocks_per_grid_on_create_seq,
threads_per_block_on_create_seq>>>(key_array_device,
314159265.00, /* random number gen seed */
1220703125.00, /* random number gen mult */
blocks_per_grid_on_create_seq,
amount_of_work_on_create_seq);
/* reset verification counter */
passed_verification = 0;
cudaMemcpy(passed_verification_device, &passed_verification, size_passed_verification_device, cudaMemcpyHostToDevice);
for(iteration=1; iteration<=MAX_ITERATIONS; iteration++){
rank_gpu_kernel_1<<<blocks_per_grid_on_rank_1,
threads_per_block_on_rank_1>>>(key_array_device,
partial_verify_vals_device,
index_array_device,
iteration,
blocks_per_grid_on_rank_1,
amount_of_work_on_rank_1);
rank_gpu_kernel_2<<<blocks_per_grid_on_rank_2,
threads_per_block_on_rank_2>>>(key_buff1_device,
blocks_per_grid_on_rank_2,
amount_of_work_on_rank_2);
rank_gpu_kernel_3<<<blocks_per_grid_on_rank_3,
threads_per_block_on_rank_3>>>(key_buff1_device,
key_array_device,
blocks_per_grid_on_rank_3,
amount_of_work_on_rank_3);
rank_gpu_kernel_4<<<blocks_per_grid_on_rank_4,
threads_per_block_on_rank_4,
size_shared_data_on_rank_4>>>(key_buff1_device,
key_buff1_device,
sum_device,
blocks_per_grid_on_rank_4,
amount_of_work_on_rank_4);
rank_gpu_kernel_5<<<blocks_per_grid_on_rank_5,
threads_per_block_on_rank_5,
size_shared_data_on_rank_5>>>(sum_device,
sum_device,
blocks_per_grid_on_rank_5,
amount_of_work_on_rank_5);
rank_gpu_kernel_6<<<blocks_per_grid_on_rank_6,
threads_per_block_on_rank_6>>>(key_buff1_device,
key_buff1_device,
sum_device,
blocks_per_grid_on_rank_6,
amount_of_work_on_rank_6);
rank_gpu_kernel_7<<<blocks_per_grid_on_rank_7,
threads_per_block_on_rank_7>>>(partial_verify_vals_device,
key_buff1_device,
rank_array_device,
passed_verification_device,
iteration,
blocks_per_grid_on_rank_7,
amount_of_work_on_rank_7);
}
cudaMemcpy(&passed_verification, passed_verification_device, size_passed_verification_device, cudaMemcpyDeviceToHost);
/*
* this tests that keys are in sequence: sorting of last ranked key seq
* occurs here, but is an untimed operation
*/
int* memory_aux_device;
int size_aux = amount_of_work_on_full_verify_3/threads_per_block_on_full_verify_3;
int size_memory_aux=sizeof(int)*size_aux;
cudaMalloc(&memory_aux_device, size_memory_aux);
/* full_verify_gpu_kernel_1 */
full_verify_gpu_kernel_1<<<blocks_per_grid_on_full_verify_1,
threads_per_block_on_full_verify_1>>>(key_array_device,
key_buff2_device,
blocks_per_grid_on_full_verify_1,
amount_of_work_on_full_verify_1);
/* full_verify_gpu_kernel_2 */
full_verify_gpu_kernel_2<<<blocks_per_grid_on_full_verify_2,
threads_per_block_on_full_verify_2>>>(key_buff2_device,
key_buff1_device,
key_array_device,
blocks_per_grid_on_full_verify_2,
amount_of_work_on_full_verify_2);
/* full_verify_gpu_kernel_3 */
full_verify_gpu_kernel_3<<<blocks_per_grid_on_full_verify_3,
threads_per_block_on_full_verify_3,
size_shared_data_on_full_verify_3>>>(key_array_device,
memory_aux_device,
blocks_per_grid_on_full_verify_3,
amount_of_work_on_full_verify_3);
/* reduce on cpu */
int j = 0;
int* memory_aux_host=(int*)malloc(size_memory_aux);
cudaMemcpy(memory_aux_host, memory_aux_device, size_memory_aux, cudaMemcpyDeviceToHost);
for(i=0; i<size_aux; i++){
j += memory_aux_host[i];
}
if(j!=0){
printf( "Full_verify: number of keys out of sort: %ld\n", (long)j );
}else{
passed_verification++;
}
cudaFree(memory_aux_device);
free(memory_aux_host);
char gpu_config[256];
char gpu_config_string[2048];
sprintf(gpu_config, "%5s\t%25s\n", "GPU Kernel", "Threads Per Block");
strcpy(gpu_config_string, gpu_config);
sprintf(gpu_config, "%29s\t%25d\n", " create", threads_per_block_on_create_seq);
strcat(gpu_config_string, gpu_config);
sprintf(gpu_config, "%29s\t%25d\n", " rank", threads_per_block_on_rank);
strcat(gpu_config_string, gpu_config);
sprintf(gpu_config, "%29s\t%25d\n", " verify", threads_per_block_on_full_verify);
strcat(gpu_config_string, gpu_config);
/* the final printout */
if(passed_verification != 5*MAX_ITERATIONS+1) {passed_verification = 0;}
printf("%s\n", passed_verification ? "PASS" : "FAIL");
cudaFree(key_array_device);
cudaFree(key_buff1_device);
cudaFree(key_buff2_device);
cudaFree(index_array_device);
cudaFree(rank_array_device);
cudaFree(partial_verify_vals_device);
cudaFree(passed_verification_device);
cudaFree(sum_device);
return 0;
} | the_stack |
namespace MLCommon {
namespace Score {
class ScoreTest : public ::testing::Test {
protected:
void SetUp() override {}
void TearDown() override {}
};
typedef ScoreTest ScoreTestHighScore;
TEST(ScoreTestHighScore, Result)
{
float y[5] = {0.1, 0.2, 0.3, 0.4, 0.5};
float y_hat[5] = {0.12, 0.22, 0.32, 0.42, 0.52};
cudaStream_t stream = 0;
CUDA_CHECK(cudaStreamCreate(&stream));
rmm::device_uvector<float> d_y(5, stream);
rmm::device_uvector<float> d_y_hat(5, stream);
raft::update_device(d_y_hat.data(), y_hat, 5, stream);
raft::update_device(d_y.data(), y, 5, stream);
auto result = MLCommon::Score::r2_score(d_y.data(), d_y_hat.data(), 5, stream);
ASSERT_TRUE(result == 0.98f);
CUDA_CHECK(cudaStreamDestroy(stream));
}
typedef ScoreTest ScoreTestLowScore;
TEST(ScoreTestLowScore, Result)
{
float y[5] = {0.1, 0.2, 0.3, 0.4, 0.5};
float y_hat[5] = {0.012, 0.022, 0.032, 0.042, 0.052};
cudaStream_t stream = 0;
CUDA_CHECK(cudaStreamCreate(&stream));
rmm::device_uvector<float> d_y(5, stream);
rmm::device_uvector<float> d_y_hat(5, stream);
raft::update_device(d_y_hat.data(), y_hat, 5, stream);
raft::update_device(d_y.data(), y, 5, stream);
auto result = MLCommon::Score::r2_score(d_y.data(), d_y_hat.data(), 5, stream);
std::cout << "Result: " << result - -3.4012f << std::endl;
ASSERT_TRUE(result - -3.4012f < 0.00001);
CUDA_CHECK(cudaStreamDestroy(stream));
}
// Tests for accuracy_score
struct AccuracyInputs {
/**
* Number of predictions.
*/
int n;
/**
* Number of predictions w/ different values than their corresponding element in reference
* predictions. Valid range [0, n]. changed_n in [0, n] will yield accuracy of (n - changed_n) /
* n.
*/
int changed_n;
/**
* Seed for randomly generated predictions.
*/
unsigned long long int seed;
};
std::ostream& operator<<(::std::ostream& os, const AccuracyInputs& acc_inputs)
{
os << "AccuracyInputs are {" << acc_inputs.n << ", " << acc_inputs.changed_n << ", "
<< acc_inputs.seed << "}" << std::endl;
return os;
}
template <typename T>
__global__ void change_vals(T* predictions, T* ref_predictions, const int changed_n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < changed_n) {
predictions[tid] = ref_predictions[tid] + 1; // change first changed_n predictions
}
}
template <typename T>
class AccuracyTest : public ::testing::TestWithParam<AccuracyInputs> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<AccuracyInputs>::GetParam();
ASSERT((params.changed_n <= params.n) && (params.changed_n >= 0), "Invalid params.");
raft::random::Rng r(params.seed);
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(predictions, params.n, stream);
raft::allocate(ref_predictions, params.n, stream);
r.normal(ref_predictions, params.n, (T)0.0, (T)1.0, stream);
raft::copy_async(predictions, ref_predictions, params.n, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
// Modify params.changed_n unique predictions to a different value. New value is irrelevant.
if (params.changed_n > 0) {
int threads = 64;
int blocks = raft::ceildiv(params.changed_n, threads);
//@todo Could also generate params.changed_n unique random positions in [0, n) range, instead
// of changing the first ones.
change_vals<T>
<<<blocks, threads, 0, stream>>>(predictions, ref_predictions, params.changed_n);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaStreamSynchronize(stream));
}
computed_accuracy =
MLCommon::Score::accuracy_score<T>(predictions, ref_predictions, params.n, stream);
ref_accuracy = (params.n - params.changed_n) * 1.0f / params.n;
// std::cout << "computed_accuracy is " << computed_accuracy << " ref_accuracy is " <<
// ref_accuracy << std::endl;
}
void TearDown() override
{
CUDA_CHECK(cudaFree(predictions));
CUDA_CHECK(cudaFree(ref_predictions));
CUDA_CHECK(cudaStreamDestroy(stream));
computed_accuracy = -1.0f;
ref_accuracy = -1.0f;
}
AccuracyInputs params;
T *predictions, *ref_predictions;
float computed_accuracy, ref_accuracy;
cudaStream_t stream = 0;
};
const std::vector<AccuracyInputs> inputs = {
{1, 1, 1234ULL}, // single element, wrong prediction
{1, 0, 1234ULL}, // single element, perfect prediction
{2, 1, 1234ULL}, // multiple elements, 0.5 accuracy
{1000, 0, 1234ULL}, // multiple elements, perfect predictions
{1000, 1000, 1234ULL}, // multiple elements, no correct predictions
{1000, 80, 1234ULL}, // multiple elements, prediction mix
{1000, 45, 1234ULL} // multiple elements, prediction mix
};
typedef AccuracyTest<float> AccuracyTestF;
TEST_P(AccuracyTestF, Result) { ASSERT_TRUE(computed_accuracy == ref_accuracy); }
typedef AccuracyTest<double> AccuracyTestD;
TEST_P(AccuracyTestD, Result) { ASSERT_TRUE(computed_accuracy == ref_accuracy); }
INSTANTIATE_TEST_CASE_P(AccuracyTests, AccuracyTestF, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(AccuracyTests, AccuracyTestD, ::testing::ValuesIn(inputs));
// Tests for regression_metrics
template <typename T>
struct RegressionInputs {
T tolerance;
int n; // number of predictions
bool hardcoded_preds; // (hardcoded_preds) ? use predictions, ref_predictions : use randomly
// generated arrays.
std::vector<T> predictions;
std::vector<T> ref_predictions;
T predictions_range[2]; // predictions in predictions_range if not hardcoded_preds
T ref_predictions_range[2]; // predictions in ref_predictions_range if not hardcoded_preds
unsigned long long int seed;
};
template <typename T>
std::ostream& operator<<(std::ostream& os, const RegressionInputs<T>& reg_inputs)
{
os << "RegressionInputs are {" << reg_inputs.tolerance << ", " << reg_inputs.n << ", "
<< reg_inputs.hardcoded_preds << ", ";
if (reg_inputs.hardcoded_preds) {
os << "{";
for (int i = 0; i < reg_inputs.n; i++)
os << reg_inputs.predictions[i] << ", ";
os << "}, {";
for (int i = 0; i < reg_inputs.n; i++)
os << reg_inputs.ref_predictions[i] << ", ";
os << "}";
os << "{" << reg_inputs.predictions_range[0] << ", " << reg_inputs.predictions_range[1]
<< "}, ";
os << "{" << reg_inputs.ref_predictions_range[0] << ", " << reg_inputs.ref_predictions_range[1]
<< "}";
} else {
os << "{}, {}, {}, {}";
}
os << ", " << reg_inputs.seed;
return os;
}
template <typename T>
void host_regression_computations(std::vector<T>& predictions,
std::vector<T>& ref_predictions,
const int n,
std::vector<double>& regression_metrics)
{
double abs_difference_sum = 0;
double mse_sum = 0;
std::vector<double> abs_diffs(n);
for (int i = 0; i < n; i++) {
double abs_diff = raft::abs(predictions[i] - ref_predictions[i]);
abs_difference_sum += abs_diff;
mse_sum += pow(predictions[i] - ref_predictions[i], 2);
abs_diffs[i] = abs_diff;
}
regression_metrics[0] = abs_difference_sum / n;
regression_metrics[1] = mse_sum / n;
std::sort(abs_diffs.begin(), abs_diffs.end());
int middle = n / 2;
if (n % 2 == 1) {
regression_metrics[2] = abs_diffs[middle];
} else {
regression_metrics[2] = (abs_diffs[middle] + abs_diffs[middle - 1]) / 2;
}
}
template <typename T>
class RegressionMetricsTest : public ::testing::TestWithParam<RegressionInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<RegressionInputs<T>>::GetParam();
computed_regression_metrics.assign(3, -1.0);
ref_regression_metrics.assign(3, -1.0);
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(d_predictions, params.n, stream);
raft::allocate(d_ref_predictions, params.n, stream);
if (params.hardcoded_preds) {
raft::update_device(d_predictions, params.predictions.data(), params.n, stream);
raft::update_device(d_ref_predictions, params.ref_predictions.data(), params.n, stream);
} else {
params.predictions.resize(params.n);
params.ref_predictions.resize(params.n);
raft::random::Rng r(params.seed);
// randomly generate arrays
r.uniform(
d_predictions, params.n, params.predictions_range[0], params.predictions_range[1], stream);
r.uniform(d_ref_predictions,
params.n,
params.ref_predictions_range[0],
params.ref_predictions_range[1],
stream);
// copy to host to compute reference regression metrics
raft::update_host(params.predictions.data(), d_predictions, params.n, stream);
raft::update_host(params.ref_predictions.data(), d_ref_predictions, params.n, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
MLCommon::Score::regression_metrics(d_predictions,
d_ref_predictions,
params.n,
stream,
computed_regression_metrics[0],
computed_regression_metrics[1],
computed_regression_metrics[2]);
host_regression_computations(
params.predictions, params.ref_predictions, params.n, ref_regression_metrics);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override
{
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(d_predictions));
CUDA_CHECK(cudaFree(d_ref_predictions));
}
RegressionInputs<T> params;
T *d_predictions, *d_ref_predictions;
std::vector<double> computed_regression_metrics;
std::vector<double> ref_regression_metrics;
cudaStream_t stream = 0;
};
const std::vector<RegressionInputs<float>> regression_inputs_float = {
{0.00001f, 1, true, {10.2f}, {20.2f}, {}, {}, 1234ULL}, // single element
{0.00001f, 2, true, {10.2f, 40.2f}, {20.2f, 80.2f}, {}, {}, 1234ULL}, // two elements, mean same
// as median
// next three inputs should result in identical regression metrics values
{0.00001f,
6,
true,
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{20.5f, 40.5f, 55.5f, 80.5f, 100.5f, 120.5f},
{},
{},
1234ULL}, // diffs all negative, reverse sorted
{0.00001f,
6,
true,
{20.5f, 40.5f, 55.5f, 80.5f, 100.5f, 120.5f},
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{},
{},
1234ULL}, // diffs all positive, already sorted
{0.00001f,
6,
true,
{40.5f, 55.5f, 20.5f, 120.5f, 100.5f, 80.5f},
{20.5f, 30.5f, 10.5f, 60.5f, 50.5f, 40.5f},
{},
{},
1234ULL}, // mix
{0.00001f,
6,
true,
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{},
{},
1234ULL}, // identical predictions (0 error)
{0.00001f,
6,
true,
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{20.5f, 30.5f, 40.5f, 50.5f, 60.5f, 70.5f},
{},
{},
1234ULL}, // predictions[i] - ref_predictions[i] const for each i
{0.00001f,
2048,
false,
{},
{},
{-2048.0f, 2048.0f},
{-2048.0f, 2048.0f},
1234ULL}, // random mix, even number of elements
{0.00001f,
2049,
false,
{},
{},
{-2048.0f, 2048.0f},
{-2048.0f, 2048.0f},
1234ULL}, // random mix, odd number of elements
{0.00001f,
1024,
false,
{},
{},
{0.0f, 2048.0f},
{8192.0f, 16384.0f},
1234ULL}, // random mix, diffs are all negative
{0.00001f,
1024,
false,
{},
{},
{8192.0f, 16384.0f},
{0.0f, 2048.0f},
1234ULL} // random mix, diffs are all positive
};
const std::vector<RegressionInputs<double>> regression_inputs_double = {
{0.0000001, 1, true, {10.2}, {20.2}, {}, {}, 1234ULL}, // single element
{0.0000001, 2, true, {10.2, 40.2}, {20.2, 80.2}, {}, {}, 1234ULL}, // two elements
{0.0000001,
6,
true,
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{20.5, 40.5, 55.5, 80.5, 100.5, 120.5},
{},
{},
1234ULL}, // diffs all negative, reverse sorted
{0.0000001,
6,
true,
{20.5, 40.5, 55.5, 80.5, 100.5, 120.5},
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{},
{},
1234ULL}, // diffs all positive, already sorted
{0.0000001,
6,
true,
{40.5, 55.5, 20.5, 120.5, 100.5, 80.5},
{20.5, 30.5, 10.5, 60.5, 50.5, 40.5},
{},
{},
1234ULL}, // mix
{0.0000001,
6,
true,
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{},
{},
1234ULL}, // identical predictions (0 error)
{0.0000001,
6,
true,
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{20.5, 30.5, 40.5, 50.5, 60.5, 70.5},
{},
{},
1234ULL}, // predictions[i] - ref_predictions[i] const for each i
{0.0000001,
2048,
false,
{},
{},
{-2048.0, 2048.0},
{-2048.0, 2048.0},
1234ULL}, // random mix, even number of elements
{0.0000001,
2049,
false,
{},
{},
{-2048.0, 2048.0},
{-2048.0, 2048.0},
1234ULL}, // random mix, odd number of elements
{0.0000001, 1024, false, {}, {}, {0, 2048}, {8192.0, 16384.0}, 1234ULL}, // random mix, diffs are
// all negative
{0.0000001, 1024, false, {}, {}, {8192.0, 16384.0}, {0.0, 2048}, 1234ULL} // random mix, diffs
// are all positive
};
typedef RegressionMetricsTest<float> RegressionMetricsTestF;
TEST_P(RegressionMetricsTestF, Result)
{
for (int i = 0; i < 3; i++) {
ASSERT_TRUE(match(computed_regression_metrics[i],
ref_regression_metrics[i],
raft::CompareApprox<float>(params.tolerance)));
}
}
typedef RegressionMetricsTest<double> RegressionMetricsTestD;
TEST_P(RegressionMetricsTestD, Result)
{
for (int i = 0; i < 3; i++) {
ASSERT_TRUE(match(computed_regression_metrics[i],
ref_regression_metrics[i],
raft::CompareApprox<double>(params.tolerance)));
}
}
INSTANTIATE_TEST_CASE_P(RegressionMetricsTests,
RegressionMetricsTestF,
::testing::ValuesIn(regression_inputs_float));
INSTANTIATE_TEST_CASE_P(RegressionMetricsTests,
RegressionMetricsTestD,
::testing::ValuesIn(regression_inputs_double));
} // end namespace Score
} // end namespace MLCommon | the_stack |
#include "IPsecAuthHMACSHA1_kernel.hh"
/* The index is given by the order in get_used_datablocks(). */
#define dbid_enc_payloads_d (0)
#define dbid_flow_ids_d (1)
#define SHA1_THREADS_PER_BLK 32
extern "C" {
//__global__ uint32_t d_pad_buffer[16 * 2 * MAX_CHUNK_SIZE * MAX_GROUP_SIZE];
__device__ static uint32_t swap(uint32_t v) {
return ((v & 0x000000ffU) << 24) | ((v & 0x0000ff00U) << 8)
| ((v & 0x00ff0000U) >> 8) | ((v & 0xff000000U) >> 24);
}
typedef struct hash_digest {
uint32_t h1;
uint32_t h2;
uint32_t h3;
uint32_t h4;
uint32_t h5;
} hash_digest_t;
#define HMAC
__inline__ __device__ static void getBlock(char* buf, int offset, int len, uint32_t* dest)
{
uint32_t *tmp;
unsigned int tempbuf[16];
tmp = (uint32_t*) (buf + offset);
//printf("%d %d\n", offset, len);
if (offset + 64 <= len) {
//printf("--0--\n");
#pragma unroll 16
for (int i = 0; i < 16; i++) {
dest[i] = swap(tmp[i]);
}
} else if (len > offset && (len - offset) < 56) { //case 1 enough space in last block for padding
//prtinf("--1--\n");
int i;
for (i = 0; i < (len - offset) / 4; i++) {
//printf("%d %d\n",offset,i);
//printf("%p %p\n", buf, dest);
//tempbuf[i] = buf[i];
tempbuf[i] = swap(tmp[i]);
}
//printf("len%%4 %d\n",len%4);
switch (len % 4) {
case 0:
tempbuf[i] = swap(0x00000080);
i++;
break;
case 1:
tempbuf[i] = swap(0x00008000 | (tmp[i] & 0x000000FF));
i++;
break;
case 2:
tempbuf[i] = swap(0x00800000 | (tmp[i] & 0x0000FFFF));
i++;
break;
case 3:
tempbuf[i] = swap(0x80000000 | (tmp[i] & 0x00FFFFFF));
i++;
break;
};
for (; i < 14; i++) {
tempbuf[i] = 0;
}
#pragma unroll 14
for (i = 0; i < 14; i++) {
dest[i] = tempbuf[i];
}
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else if (len > offset && (len - offset) >= 56) { //case 2 not enough space in last block (containing message) for padding
//printf("--2--\n");
int i;
for (i = 0; i < (len - offset) / 4; i++) {
tempbuf[i] = swap(tmp[i]);
}
switch (len % 4) {
case 0:
tempbuf[i] = swap(0x00000080);
i++;
break;
case 1:
tempbuf[i] = swap(0x00008000 | (tmp[i] & 0x000000FF));
i++;
break;
case 2:
tempbuf[i] = swap(0x00800000 | (tmp[i] & 0x0000FFFF));
i++;
break;
case 3:
tempbuf[i] = swap(0x80000000 | (tmp[i] & 0x00FFFFFF));
i++;
break;
};
for (; i < 16; i++) {
tempbuf[i] = 0x00000000;
}
#pragma unroll 16
for (i = 0; i < 16; i++) {
dest[i] = tempbuf[i];
}
} else if (offset == len) { //message end is aligned in 64 bytes
//printf("--3--\n");
dest[0] = swap(0x00000080);
#pragma unroll 13
for (int i = 1; i < 14; i++)
dest[i] = 0x00000000;
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else if (offset > len) { //the last block in case 2
//printf("--4--\n");
#pragma unroll 14
for (int i = 0; i < 14; i++)
dest[i] = 0x00000000;
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else {
printf("Not supposed to happen\n");
}
}
__device__ static void computeSHA1Block(char* in, uint32_t* w, int offset, int len,
hash_digest_t &h) {
uint32_t a = h.h1;
uint32_t b = h.h2;
uint32_t c = h.h3;
uint32_t d = h.h4;
uint32_t e = h.h5;
uint32_t f;
uint32_t k;
uint32_t temp;
getBlock(in, offset, len, w);
//for (int i = 0; i < 16 ; i++) {
// printf("%0X\n", w[i]);
//}
//printf("\n");
k = 0x5A827999;
//0 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//1 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//2 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//3 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//4 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//5 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//6 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//7 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//8 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//9 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//10 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//11 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//12 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//13 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//14 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//15 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//16 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//17 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//18 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//19 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
k = 0x6ED9EBA1;
//20 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//21 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//22 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//23 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//24 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//25 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//26 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//27 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//28 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//29 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//30 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//31 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//32 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//33 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//34 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//35 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//36 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//37 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//38 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//39 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
k = 0x8F1BBCDC;
//40 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//41 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//42 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//43 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//44 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//45 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//46 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//47 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//48 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//49 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//50 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//51 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//52 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//53 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//54 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//55 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//56 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//57 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//58 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//59 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
k = 0xCA62C1D6;
//60 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//61 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//62 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//63 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//64 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//65 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//66 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//67 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//68 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//69 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//70 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//71 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//72 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//73 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//74 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//75 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//76 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//77 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//78 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//79 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
h.h1 += a;
h.h2 += b;
h.h3 += c;
h.h4 += d;
h.h5 += e;
}
/*
__global__ void computeSHA1(char* buf, int *offsets, int *len, char* output, int N)
{
//__shared__ uint32_t w_shared[16*SHA1_THREADS_PER_BLK];
uint32_t w_register[16];
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
uint32_t *w = w_register;//w_shared + 16*threadIdx.x;
hash_digest_t h;
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
int num_iter = (len[index]+63+9)/64;
printf("num_iter %d\n", num_iter);
for(int i = 0; i < num_iter; i++)
computeSHA1Block(buf + offsets[index], w, i*64 , len[index], h);
h.h1 = swap(h.h1);
h.h2 = swap(h.h2);
h.h3 = swap(h.h3);
h.h4 = swap(h.h4);
h.h5 = swap(h.h5);
uint32_t * out = (uint32_t*)(output + index*20);
*(out++) = h.h1;
*(out++) = h.h2;
*(out++) = h.h3;
*(out++) = h.h4;
*(out++) = h.h5;
}
}*/
/*
some how *pad = *pad++ ^ *key++
was optimized and does not work correctly in GPU oTL.
*/
__device__ static void xorpads(uint32_t *pad, const uint32_t* key) {
#pragma unroll 16
for (int i = 0; i < 16; i++)
*(pad + i) = *(pad + i) ^ *(key + i);
}
uint32_t opad[16] =
{ 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, };
uint32_t ipad[16] =
{ 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, };
// in: start pointer of the data to be authenticated by hsha1.
// out: start pointer of the data where hsha1 signature will be recorded.
// length: length of the data to be authenticated by hsha1.
// key: hmac key.
__device__ static void HMAC_SHA1(uint32_t *in, uint32_t *out, uint32_t length,
const char *key) {
uint32_t w_register[16];
uint32_t *w = w_register; //w_shared + 16*threadIdx.x;
hash_digest_t h;
for (int i = 0; i < 16; i++)
w[i] = 0x36363636;
xorpads(w, (uint32_t*) (key));
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
//SHA1 compute on ipad
computeSHA1Block((char*) w, w, 0, 64, h);
//SHA1 compute on mesage
int num_iter = (length + 63 + 9) / 64;
for (int i = 0; i < num_iter; i++)
computeSHA1Block((char*) in, w, i * 64, length, h);
*(out) = swap(h.h1);
*(out + 1) = swap(h.h2);
*(out + 2) = swap(h.h3);
*(out + 3) = swap(h.h4);
*(out + 4) = swap(h.h5);
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
for (int i = 0; i < 16; i++)
w[i] = 0x5c5c5c5c;
xorpads(w, (uint32_t*) (key));
//SHA 1 compute on opads
computeSHA1Block((char*) w, w, 0, 64, h);
//SHA 1 compute on (hash of ipad|m)
computeSHA1Block((char*) out, w, 0, 20, h);
*(out) = swap(h.h1);
*(out + 1) = swap(h.h2);
*(out + 2) = swap(h.h3);
*(out + 3) = swap(h.h4);
*(out + 4) = swap(h.h5);
}
#if 0
__global__ void computeHMAC_SHA1(char* buf, char* keys, uint32_t *offsets,
uint32_t *lengths, uint32_t *outputs, int N) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
printf("index%d threadid%d\n", index, threadIdx.x);
uint32_t offset = offsets[index];
uint32_t length = lengths[index];
uint32_t *out = (uint32_t*) (buf + outputs[index]);
HMAC_SHA1((uint32_t*) (buf + offset), out, length, keys + 64 * index);
}
}
__global__ void computeHMAC_SHA1_2(char* buf, char* keys, uint32_t *offsets,
uint16_t *lengths, int N) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
printf("index%d threadid%d\n", index, threadIdx.x);
uint32_t offset = offsets[index];
uint32_t length = lengths[index];
uint32_t *out = (uint32_t*) (buf + offset + length);
HMAC_SHA1((uint32_t*) (buf + offset), out, length, keys + 64 * index);
}
}
#endif
__global__ void computeHMAC_SHA1_3(
struct datablock_kernel_arg **datablocks,
uint32_t count, uint32_t *item_counts, uint32_t num_batches,
uint8_t *checkbits_d,
struct hmac_sa_entry *hmac_key_array)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < count && count != 0) {
uint32_t batch_idx, item_idx;
nba::error_t err;
err = nba::get_accum_idx(item_counts, num_batches, idx, batch_idx, item_idx);
assert(err == nba::NBA_SUCCESS);
const struct datablock_kernel_arg *db_enc_payloads = datablocks[dbid_enc_payloads_d];
const struct datablock_kernel_arg *db_flow_ids = datablocks[dbid_flow_ids_d];
const uint8_t *enc_payload_base = (uint8_t *) db_enc_payloads->batches[batch_idx].buffer_bases;
const uintptr_t offset = (uintptr_t) db_enc_payloads->batches[batch_idx].item_offsets[item_idx].as_value<uintptr_t>();
const uintptr_t length = (uintptr_t) db_enc_payloads->batches[batch_idx].item_sizes[item_idx];
if (enc_payload_base != NULL && length != 0) {
const uint64_t flow_id = ((uint64_t *) db_flow_ids->batches[batch_idx].buffer_bases)[item_idx];
if (flow_id != 65536) {
assert(flow_id < 1024);
const char *hmac_key = (char *) hmac_key_array[flow_id].hmac_key;
HMAC_SHA1((uint32_t *) (enc_payload_base + offset),
(uint32_t *) (enc_payload_base + offset + length),
length, hmac_key);
}
}
__syncthreads();
if (threadIdx.x == 0 && checkbits_d != NULL)
checkbits_d[blockIdx.x] = 1;
} // endif(valid-idx)
}
}
void *nba::ipsec_hsha1_encryption_get_cuda_kernel() {
return reinterpret_cast<void *> (computeHMAC_SHA1_3);
}
// vim: ts=8 sts=4 sw=4 et tw=150 | the_stack |
// Reference outputs (calculated on an M40 GPU)
// > ./RNN 20 2 512 64 0
// Forward: 1299 GFLOPs
// Backward: 2171 GFLOPs, (1564 GFLOPs), (3549 GFLOPs)
// i checksum 1.315793E+06 h checksum 1.315212E+05
// di checksum 6.676003E+01 dh checksum 6.425067E+01
// dw checksum 1.453750E+09
//
// > ./RNN 20 2 512 64 1
// Forward: 1296 GFLOPs
// Backward: 2235 GFLOPs, (1567 GFLOPs), (3896 GFLOPs)
// i checksum 6.319591E+05 h checksum 6.319605E+04
// di checksum 4.501830E+00 dh checksum 4.489546E+00
// dw checksum 5.012598E+07
//
// > ./RNN 20 2 512 64 2
// Forward: 2635 GFLOPs
// Backward: 2757 GFLOPs, (2001 GFLOPs), (4433 GFLOPs)
// i checksum 5.749536E+05 c checksum 4.365091E+05 h checksum 5.774818E+04
// di checksum 3.842206E+02 dc checksum 9.323785E+03 dh checksum 1.182566E+01
// dw checksum 4.313461E+08
//
// > ./RNN 20 2 512 64 3
// Forward: 2428 GFLOPs
// Backward: 2645 GFLOPs, (1915 GFLOPs), (4270 GFLOPs)
// i checksum 6.358978E+05 h checksum 6.281680E+04
// di checksum 6.296622E+00 dh checksum 2.289960E+05
// dw checksum 5.397419E+07
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cudnnErrCheck(stat) { cudnnErrCheck_((stat), __FILE__, __LINE__); }
void cudnnErrCheck_(cudnnStatus_t stat, const char *file, int line) {
if (stat != CUDNN_STATUS_SUCCESS) {
fprintf(stderr, "cuDNN Error: %s %s %d\n", cudnnGetErrorString(stat), file, line);
}
}
__global__ void initGPUData_ker(float *data, int numElements, float value) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numElements) {
data[tid] = value;
}
}
void initGPUData(float *data, int numElements, float value) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (numElements + blockDim.x - 1) / blockDim.x;
initGPUData_ker <<< gridDim, blockDim >>> (data, numElements, value);
}
extern "C" float runRNN(int seqLength, int numLayers,
int hiddenSize, int inputSize,
int miniBatch, float dropout, bool bidirectional,
int mode)
{
// -------------------------
// Create cudnn context
// -------------------------
cudnnHandle_t cudnnHandle;
// printf("Size of the handle is %d\n", sizeof(cudnnHandle));
cudnnErrCheck(cudnnCreate(&cudnnHandle));
// -------------------------
// Set up inputs and outputs
// -------------------------
void *x;
//printf("Size of the object is %d\n", sizeof(&x));
//printf("Value of copy is %d\n", cudaMemcpyHostToDevice);
void *hx = NULL;
void *cx = NULL;
void *dx;
void *dhx = NULL;
void *dcx = NULL;
void *y;
void *hy = NULL;
void *cy = NULL;
void *dy;
void *dhy = NULL;
void *dcy = NULL;
// Memory allocation. hx, cx, dhx, dcx, hy, cy, dhy and dcy can be NULL.
cudaErrCheck(cudaMalloc((void**)&x, seqLength * inputSize * miniBatch * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&dx, seqLength * inputSize * miniBatch * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&dhx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&dcx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
int ysize = seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1);
float y_train[ysize];
float y_test[ysize];
for(int j = 0; j < ysize; j++){
y_train[j] = (j % 10) + 20.5;
}
cudaErrCheck(cudaMalloc((void**)&y, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&hy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&cy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
// Set up tensor descriptors. x/y/dx/dy are arrays, one per time step.
cudnnTensorDescriptor_t *xDesc, *yDesc, *dxDesc, *dyDesc;
cudnnTensorDescriptor_t hxDesc, cxDesc;
cudnnTensorDescriptor_t hyDesc, cyDesc;
cudnnTensorDescriptor_t dhxDesc, dcxDesc;
cudnnTensorDescriptor_t dhyDesc, dcyDesc;
xDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t));
yDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t));
dxDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t));
dyDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t));
int dimA[3];
int strideA[3];
// In this example dimA[1] is constant across the whole sequence
// This isn't required, all that is required is that it does not increase.
for (int i = 0; i < seqLength; i++) {
cudnnErrCheck(cudnnCreateTensorDescriptor(&xDesc[i]));
cudnnErrCheck(cudnnCreateTensorDescriptor(&yDesc[i]));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dxDesc[i]));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dyDesc[i]));
dimA[0] = miniBatch;
dimA[1] = inputSize;
dimA[2] = 1;
strideA[0] = dimA[2] * dimA[1];
strideA[1] = dimA[2];
strideA[2] = 1;
cudnnErrCheck(cudnnSetTensorNdDescriptor(xDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dxDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA));
dimA[0] = miniBatch;
dimA[1] = bidirectional ? hiddenSize * 2 : hiddenSize;
dimA[2] = 1;
strideA[0] = dimA[2] * dimA[1];
strideA[1] = dimA[2];
strideA[2] = 1;
cudnnErrCheck(cudnnSetTensorNdDescriptor(yDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dyDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA));
}
dimA[0] = numLayers * (bidirectional ? 2 : 1);
dimA[1] = miniBatch;
dimA[2] = hiddenSize;
strideA[0] = dimA[2] * dimA[1];
strideA[1] = dimA[2];
strideA[2] = 1;
cudnnErrCheck(cudnnCreateTensorDescriptor(&hxDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&cxDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&hyDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&cyDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dhxDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dcxDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dhyDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dcyDesc));
cudnnErrCheck(cudnnSetTensorNdDescriptor(hxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(cxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(hyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(cyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dhxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dcxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dhyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dcyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
// -------------------------
// Set up the dropout descriptor (needed for the RNN descriptor)
// -------------------------
unsigned long long seed = 1337ull; // Pick a seed.
cudnnDropoutDescriptor_t dropoutDesc;
cudnnErrCheck(cudnnCreateDropoutDescriptor(&dropoutDesc));
// How much memory does dropout need for states?
// These states are used to generate random numbers internally
// and should not be freed until the RNN descriptor is no longer used
size_t stateSize;
void *states;
cudnnErrCheck(cudnnDropoutGetStatesSize(cudnnHandle, &stateSize));
cudaErrCheck(cudaMalloc(&states, stateSize));
cudnnErrCheck(cudnnSetDropoutDescriptor(dropoutDesc,
cudnnHandle,
dropout,
states,
stateSize,
seed));
// -------------------------
// Set up the RNN descriptor
// -------------------------
cudnnRNNDescriptor_t rnnDesc;
cudnnRNNMode_t RNNMode;
cudnnErrCheck(cudnnCreateRNNDescriptor(&rnnDesc));
if (mode == 0) RNNMode = CUDNN_RNN_RELU;
else if (mode == 1) RNNMode = CUDNN_RNN_TANH;
else if (mode == 2) RNNMode = CUDNN_LSTM;
else if (mode == 3) RNNMode = CUDNN_GRU;
cudnnErrCheck(cudnnSetRNNDescriptor(rnnDesc,
hiddenSize,
numLayers,
dropoutDesc,
CUDNN_LINEAR_INPUT, // We can also skip the input matrix transformation
bidirectional ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL,
RNNMode,
CUDNN_DATA_FLOAT));
// -------------------------
// Set up parameters
// -------------------------
// This needs to be done after the rnn descriptor is set as otherwise
// we don't know how many parameters we have to allocate
void *w;
void *dw;
cudnnFilterDescriptor_t wDesc, dwDesc;
cudnnErrCheck(cudnnCreateFilterDescriptor(&wDesc));
cudnnErrCheck(cudnnCreateFilterDescriptor(&dwDesc));
size_t weightsSize;
cudnnErrCheck(cudnnGetRNNParamsSize(cudnnHandle, rnnDesc, xDesc[0], &weightsSize, CUDNN_DATA_FLOAT));
int dimW[3];
dimW[0] = weightsSize / sizeof(float);
dimW[1] = 1;
dimW[2] = 1;
cudnnErrCheck(cudnnSetFilterNdDescriptor(wDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW));
cudnnErrCheck(cudnnSetFilterNdDescriptor(dwDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW));
cudaErrCheck(cudaMalloc((void**)&w, weightsSize));
cudaErrCheck(cudaMalloc((void**)&dw, weightsSize));
// -------------------------
// Set up work space and reserved memory
// -------------------------
void *workspace;
void *reserveSpace;
size_t workSize;
size_t reserveSize;
// Need for every pass
cudnnErrCheck(cudnnGetRNNWorkspaceSize(cudnnHandle, rnnDesc, seqLength, xDesc, &workSize));
// Only needed in training, shouldn't be touched between passes.
cudnnErrCheck(cudnnGetRNNTrainingReserveSize(cudnnHandle, rnnDesc, seqLength, xDesc, &reserveSize));
cudaErrCheck(cudaMalloc((void**)&workspace, workSize));
cudaErrCheck(cudaMalloc((void**)&reserveSpace, reserveSize));
// *********************************************************************************************************
// Initialise weights and inputs
// *********************************************************************************************************
// We initialise to something simple.
// Matrices are initialised to 1 / matrixSize, biases to 1, data is 1.
initGPUData((float*)x, seqLength * inputSize * miniBatch, 1.f);
if (hx != NULL) initGPUData((float*)hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
if (cx != NULL) initGPUData((float*)cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
initGPUData((float*)dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
if (dhy != NULL) initGPUData((float*)dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
if (dcy != NULL) initGPUData((float*)dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
// Weights
int numLinearLayers = 0;
if (RNNMode == CUDNN_RNN_RELU || RNNMode == CUDNN_RNN_TANH) {
numLinearLayers = 2;
}
else if (RNNMode == CUDNN_LSTM) {
numLinearLayers = 8;
}
else if (RNNMode == CUDNN_GRU) {
numLinearLayers = 6;
}
for (int layer = 0; layer < numLayers * (bidirectional ? 2 : 1); layer++) {
for (int linLayerID = 0; linLayerID < numLinearLayers; linLayerID++) {
cudnnFilterDescriptor_t linLayerMatDesc;
cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerMatDesc));
float *linLayerMat;
cudnnErrCheck(cudnnGetRNNLinLayerMatrixParams( cudnnHandle,
rnnDesc,
layer,
xDesc[0],
wDesc,
w,
linLayerID,
linLayerMatDesc,
(void**)&linLayerMat));
cudnnDataType_t dataType;
cudnnTensorFormat_t format;
int nbDims;
int filterDimA[3];
cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerMatDesc,
3,
&dataType,
&format,
&nbDims,
filterDimA));
initGPUData(linLayerMat, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f / (float)(filterDimA[0] * filterDimA[1] * filterDimA[2]));
cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerMatDesc));
cudnnFilterDescriptor_t linLayerBiasDesc;
cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerBiasDesc));
float *linLayerBias;
cudnnErrCheck(cudnnGetRNNLinLayerBiasParams( cudnnHandle,
rnnDesc,
layer,
xDesc[0],
wDesc,
w,
linLayerID,
linLayerBiasDesc,
(void**)&linLayerBias));
cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerBiasDesc,
3,
&dataType,
&format,
&nbDims,
filterDimA));
initGPUData(linLayerBias, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f);
cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerBiasDesc));
}
}
// *********************************************************************************************************
// At this point all of the setup is done. We now need to pass through the RNN.
// *********************************************************************************************************
// int alpha_param[1];
// alpha_param[0] = -0.05;
// int beta_param[1];
// beta_param[0] = 1.0;
float rval;
cudaErrCheck(cudaDeviceSynchronize());
for(int i = 0; i < 300; i++){
cudaEvent_t start, stop;
float timeForward, timeBackward1, timeBackward2;
cudaErrCheck(cudaEventCreate(&start));
cudaErrCheck(cudaEventCreate(&stop));
cudaErrCheck(cudaEventRecord(start));
// If we're not training we use this instead
// cudnnErrCheck(cudnnRNNForwardInference(cudnnHandle,
// rnnDesc,
// xDesc,
// x,
// hxDesc,
// hx,
// cxDesc,
// cx,
// wDesc,
// w,
// yDesc,
// y,
// hyDesc,
// hy,
// cyDesc,
// cy,
// workspace,
// workSize));
cudnnErrCheck(cudnnRNNForwardTraining(cudnnHandle,
rnnDesc,
seqLength,
xDesc,
x,
hxDesc,
hx,
cxDesc,
cx,
wDesc,
w,
yDesc,
y,
hyDesc,
hy,
cyDesc,
cy,
workspace,
workSize,
reserveSpace,
reserveSize));
cudaErrCheck(cudaEventRecord(stop));
cudaErrCheck(cudaEventSynchronize(stop));
cudaErrCheck(cudaEventElapsedTime(&timeForward, start, stop));
cudaErrCheck(cudaEventRecord(start));
//Compute a dy
cudaErrCheck(cudaMemcpy(y_test, y, ysize, cudaMemcpyDeviceToHost));
for(int j = 0; j < ysize; j++){
y_test[j] -= y_train[j];
}
cudaErrCheck(cudaMemcpy(dy, y_test, ysize, cudaMemcpyHostToDevice));
//cudaErrCheck(cudaMemcpy(y, y_train, ysize, cudaMemcpyHostToDevice));
cudnnErrCheck(cudnnRNNBackwardData(cudnnHandle,
rnnDesc,
seqLength,
yDesc,
y,
dyDesc,
dy,
dhyDesc,
dhy,
dcyDesc,
dcy,
wDesc,
w,
hxDesc,
hx,
cxDesc,
cx,
dxDesc,
dx,
dhxDesc,
dhx,
dcxDesc,
dcx,
workspace,
workSize,
reserveSpace,
reserveSize ));
cudaErrCheck(cudaEventRecord(stop));
cudaErrCheck(cudaEventSynchronize(stop));
cudaErrCheck(cudaEventElapsedTime(&timeBackward1, start, stop));
cudaErrCheck(cudaEventRecord(start));
// cudnnRNNBackwardWeights adds to the data in dw.
cudaErrCheck(cudaMemset(dw, 0, weightsSize));
cudnnErrCheck(cudnnRNNBackwardWeights( cudnnHandle,
rnnDesc,
seqLength,
xDesc,
x,
hxDesc,
hx,
yDesc,
y,
workspace,
workSize,
dwDesc,
dw,
reserveSpace,
reserveSize ));
cudaErrCheck(cudaEventSynchronize(stop));
cudaErrCheck(cudaEventRecord(stop));
cudaErrCheck(cudaEventSynchronize(stop));
cudaErrCheck(cudaEventElapsedTime(&timeBackward2, start, stop));
if (true) {
float* testOutputdw;
float testOutputw[weightsSize];
float* testY;
testOutputdw = (float*)malloc(weightsSize);
testY = (float*)malloc(ysize);
cudaErrCheck(cudaMemcpy(testY, y, ysize, cudaMemcpyDeviceToHost));
cudaErrCheck(cudaMemcpy(testOutputdw, dw, weightsSize, cudaMemcpyDeviceToHost));
cudaErrCheck(cudaMemcpy(testOutputw, w, weightsSize, cudaMemcpyDeviceToHost));
free(testOutputdw);
free(testY);
}
printf("Epoch %d \n",i);
int numMats = 0;
if (RNNMode == CUDNN_RNN_RELU || RNNMode == CUDNN_RNN_TANH) {
numMats = 2;
}
else if (RNNMode == CUDNN_LSTM) {
numMats = 8;
}
else if (RNNMode == CUDNN_GRU) {
numMats = 6;
}
printf("Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward));
rval = numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward);
}
// Calculate FLOPS
//sprintf(buffer, "Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward));
//cudaDeviceSynchronize();
// *********************************************************************************************************
// Print checksums.
// *********************************************************************************************************
// Can these be passed back and saved?
cudaFree(x);
cudaFree(hx);
cudaFree(cx);
cudaFree(y);
cudaFree(hy);
cudaFree(cy);
cudaFree(dx);
cudaFree(dhx);
cudaFree(dcx);
cudaFree(dy);
cudaFree(dhy);
cudaFree(dcy);
cudaFree(workspace);
cudaFree(reserveSpace);
cudaFree(w);
cudaFree(dw);
cudnnDestroy(cudnnHandle);
return rval;
}
//extern "C" int cudamain() { return 0; } | the_stack |
#include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCHalf.h"
#include "THCTensorCopy.h"
#include "THCApply.cuh"
#include "THCNumerics.cuh"
#include "THCReduce.cuh"
template <typename T>
struct TensorSigmoidOp {
__device__ __forceinline__ void operator()(T* out, T* in) const {
T one = (T) 1.0;
*out = one / (one + THCNumerics<T>::exp(- *in));
}
__device__ __forceinline__ void operator()(T* v) const {
T one = (T) 1.0;
*v = one / (one + THCNumerics<T>::exp(- *v));
}
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorSigmoidOp<half> {
__device__ __forceinline__ void operator()(half* out, half* in) const {
#ifdef CUDA_HALF_INSTRUCTIONS
half one = ScalarConvert<int, half>::to(1);
*out = hdiv(one, __hadd(one, hexp(__hneg(*in))));
#else
float fin = __half2float(*in);
*out = __float2half(1.0f / (1.0f + expf(- fin)));
#endif
}
__device__ __forceinline__ void operator()(half* v) const {
#ifdef CUDA_HALF_INSTRUCTIONS
half one = ScalarConvert<int, half>::to(1);
*v = hdiv(one, __hadd(one, hexp(__hneg(*v))));
#else
float fv = __half2float(*v);
*v = __float2half(1.0f / (1.0f + expf(- fv)));
#endif
}
};
#endif
template <typename T>
struct TensorSignOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
T orig = *in;
*out = (orig > 0) - (orig < 0);
}
__device__ __forceinline__ void operator()(T* v) {
T orig = *v;
*v = (orig > 0) - (orig < 0);
}
};
template <>
struct TensorSignOp<unsigned char> {
__device__ __forceinline__ void operator()(unsigned char* out, unsigned char* in) {
unsigned char orig = *in;
*out = (orig == 0) ? 0 : 1;
}
__device__ __forceinline__ void operator()(unsigned char* v) {
unsigned char orig = *v;
*v = (orig == 0) ? 0 : 1;
}
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorSignOp<half> {
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
half zero = ScalarConvert<int, half>::to(0);
half orig = *in;
*out = __float2half((float) __hgt(orig, zero) - (float) __hlt(orig, zero));
#else
float orig = __half2float(*in);
*out = __float2half((orig > 0) - (orig < 0));
#endif
}
__device__ __forceinline__ void operator()(half* v) {
#ifdef CUDA_HALF_INSTRUCTIONS
half zero = ScalarConvert<int, half>::to(0);
half orig = *v;
*v = __float2half((float) __hgt(orig, zero) - (float) __hlt(orig, zero));
#else
float orig = __half2float(*v);
*v = __float2half((orig > 0) - (orig < 0));
#endif
}
};
#endif
template <typename T>
struct TensorAddOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out += *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 + *in2;
}
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorAddOp<half> {
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hadd(*out, *in);
#else
float fout = __half2float(*out);
float fin = __half2float(*in);
fout += fin;
*out = __float2half(fout);
#endif
}
__device__ __forceinline__ void operator()(half* out, half* in1, half* in2) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hadd(*in1, *in2);
#else
float fin1 = __half2float(*in1);
float fin2 = __half2float(*in2);
float fout = fin1 + fin2;
*out = __float2half(fout);
#endif
}
};
#endif // CUDA_HALF_TENSOR
template <typename T>
struct TensorCAddOp {
TensorCAddOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out += val * *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 + val * *in2;
}
T val;
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorCAddOp<half> {
TensorCAddOp(half v) : val(v) {}
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hadd(*out, __hmul(val, *in));
#else
float fout = __half2float(*out);
float fval = __half2float(val);
float fin = __half2float(*in);
fout += fval * fin;
*out = __float2half(fout);
#endif
}
__device__ __forceinline__ void operator()(half* out, half* in1, half* in2) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hadd(*in1, __hmul(val, *in2));
#else
float fin1 = __half2float(*in1);
float fin2 = __half2float(*in2);
float fval = __half2float(val);
float fout = fin1 + fval * fin2;
*out = __float2half(fout);
#endif
}
half val;
};
#endif // CUDA_HALF_TENSOR
template <typename T>
struct TensorSubOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out -= *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 - *in2;
}
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorSubOp<half> {
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hsub(*out, *in);
#else
float fout = __half2float(*out);
float fin = __half2float(*in);
fout -= fin;
*out = __float2half(fout);
#endif
}
__device__ __forceinline__ void operator()(half* out, half* in1, half* in2) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hsub(*in1, *in2);
#else
float fin1 = __half2float(*in1);
float fin2 = __half2float(*in2);
float fout = fin1 - fin2;
*out = __float2half(fout);
#endif
}
};
#endif // CUDA_HALF_TENSOR
template <typename T>
struct TensorMulOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out *= *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 * *in2;
}
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorMulOp<half> {
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hmul(*out, *in);
#else
float fout = __half2float(*out);
float fin = __half2float(*in);
fout *= fin;
*out = __float2half(fout);
#endif
}
__device__ __forceinline__ void operator()(half* out, half* in1, half* in2) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hmul(*in1, *in2);
#else
float fin1 = __half2float(*in1);
float fin2 = __half2float(*in2);
float fout = fin1 * fin2;
*out = __float2half(fout);
#endif
}
};
#endif // CUDA_HALF_TENSOR
template<typename T>
struct TensorPowOp {
TensorPowOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = powf((float) *in, (float) val);
}
__device__ __forceinline__ void operator()(T* v) {
*v = powf((float) *v, (float) val);
}
const T val;
};
template <>
struct TensorPowOp<double> {
TensorPowOp(double v) : val(v) {}
__device__ __forceinline__ void operator()(double* out, double* in) {
*out = pow(*in, val);
}
__device__ __forceinline__ void operator()(double* v) {
*v = pow(*v, val);
}
const double val;
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorPowOp<half> {
TensorPowOp(half v) : val(v) {}
__device__ __forceinline__ void operator()(half* out, half* in) {
// No fp16 pow function yet
float fin = __half2float(*in);
float fval = __half2float(val);
float fout = powf(fin, fval);
*out = __float2half(fout);
}
__device__ __forceinline__ void operator()(half* v) {
// No fp16 pow function yet
float fv = __half2float(*v);
float fval = __half2float(val);
float fout = powf(fv, fval);
*v = __float2half(fout);
}
const half val;
};
#endif // CUDA_HALF_TENSOR
template<typename T>
struct TensorTPowOp {
TensorTPowOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = THCNumerics<T>::pow(val, *in);
}
__device__ __forceinline__ void operator()(T* v) {
*v = THCNumerics<T>::pow(val, *v);
}
const T val;
};
template <typename T>
struct TensorCPowOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = powf((float) *out, (float) *in);
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = powf((float) *in1, (float) *in2);
}
};
template <>
struct TensorCPowOp<double> {
__device__ __forceinline__ void operator()(double* out, double* in) {
*out = pow(*out, *in);
}
__device__ __forceinline__ void operator()(double* out, double* in1, double* in2) {
*out = pow(*in1, *in2);
}
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorCPowOp<half> {
__device__ __forceinline__ void operator()(half* out, half* in) {
// No fp16 pow function yet
float fout = __half2float(*out);
float fin = __half2float(*in);
fout = powf(fout, fin);
*out = __float2half(fout);
}
__device__ __forceinline__ void operator()(half* out, half* in1, half* in2) {
// No fp16 pow function yet
float fin1 = __half2float(*in1);
float fin2 = __half2float(*in2);
float fout = powf(fin1, fin2);
*out = __float2half(fout);
}
};
#endif // CUDA_HALF_TENSOR
template <typename T>
struct TensorDivOp {
__device__ __forceinline__ void
operator()(T* out, T* in) {
*out /= *in;
}
__device__ __forceinline__ void
operator()(T* out, T* in1, T* in2) {
*out = *in1 / *in2;
}
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorDivOp<half> {
__device__ __forceinline__ void
operator()(half* out, half* in) {
// No fp16 div instruction yet
float fout = __half2float(*out);
float fin = __half2float(*in);
fout /= fin;
*out = __float2half(fout);
}
__device__ __forceinline__ void
operator()(half* out, half* in1, half* in2) {
// No fp16 div instruction yet
float fin1 = __half2float(*in1);
float fin2 = __half2float(*in2);
float fout = fin1 / fin2;
*out = __float2half(fout);
}
};
#endif // CUDA_HALF_TENSOR
template <typename T>
struct TensorCRemainderOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *in != 0 ? *out - *in * (*out / *in) : NAN;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in2 != 0 ? *in1 - *in2 * (*in1 / *in2) : NAN;
}
};
template <>
struct TensorCRemainderOp<float> {
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = *in != 0 ? *out - *in * floorf(*out / *in) : NAN;
}
__device__ __forceinline__ void operator()(float* out, float* in1, float* in2) {
*out = *in2 != 0 ? *in1 - *in2 * floorf(*in1 / *in2) : NAN;
}
};
template <>
struct TensorCRemainderOp<double> {
__device__ __forceinline__ void operator()(double* out, double* in) {
*out = *in != 0 ? *out - *in * floor(*out / *in) : NAN;
}
__device__ __forceinline__ void operator()(double* out, double* in1, double* in2) {
*out = *in2 != 0 ? *in1 - *in2 * floor(*in1 / *in2) : NAN;
}
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorCRemainderOp<half> {
__device__ __forceinline__ void operator()(half* out, half* in) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hsub(*out, __hmul(*in, hfloor(__hdiv(*out, *in))));
#else
float fout = __half2float(*out);
float fin = __half2float(*in);
*out = fin != 0 ? __float2half(fout - fin * floorf(fout / fin)) : __float2half(NAN);
#endif
}
__device__ __forceinline__ void operator()(half* out, half* in1, half* in2) {
#ifdef CUDA_HALF_INSTRUCTIONS
*out = __hsub(*in1, __hmul(*in2, hfloor(__hdiv(*in1, *in2))));
#else
float fin1 = __half2float(*in1);
float fin2 = __half2float(*in2);
*out = fin2 != 0 ? __float2half(fin1 - fin2 * floorf(fin1 / fin2)) : __float2half(NAN);
#endif
}
};
#endif // CUDA_HALF_TENSOR
template <typename T>
struct TensorCFmodOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = *out % *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 % *in2;
}
};
template <>
struct TensorCFmodOp<float> {
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = fmodf(*out, *in);
}
__device__ __forceinline__ void operator()(float* out, float* in1, float* in2) {
*out = fmodf(*in1, *in2);
}
};
template <>
struct TensorCFmodOp<double> {
__device__ __forceinline__ void operator()(double* out, double* in) {
*out = fmod(*out, *in);
}
__device__ __forceinline__ void operator()(double* out, double* in1, double* in2) {
*out = fmod(*in1, *in2);
}
};
#ifdef CUDA_HALF_TENSOR
template <>
struct TensorCFmodOp<half> {
__device__ __forceinline__ void operator()(half* out, half* in) {
*out = __float2half(fmodf(__half2float(*out), __half2float(*in)));
}
__device__ __forceinline__ void operator()(half* out, half* in1, half* in2) {
*out = __float2half(fmodf(__half2float(*in1), __half2float(*in2)));
}
};
#endif // CUDA_HALF_TENSOR
template <typename T>
struct TensorClampOp {
TensorClampOp(T min, T max) : minValue(min), maxValue(max) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
T val = THCNumerics<T>::lt(*in, maxValue) ? *in : maxValue;
*out = THCNumerics<T>::gt(minValue, val) ? minValue : val;
}
__device__ __forceinline__ void operator()(T* v) {
T val = THCNumerics<T>::lt(*v, maxValue) ? *v : maxValue;
*v = THCNumerics<T>::gt(minValue, val) ? minValue : val;
}
const T minValue;
const T maxValue;
};
template <typename T>
struct TensorLerpOp {
TensorLerpOp(T w) : w(w) {}
__device__ __forceinline__ void operator()(T *out, T *a, T *b) {
*out = THCNumerics<T>::add(
*a,
THCNumerics<T>::mul(
w,
THCNumerics<T>::sub(*b, *a)
)
);
}
const T w;
};
template <typename T>
struct TensorCrossOp {
TensorCrossOp(long sx, long sy, long so) : sx(sx), sy(sy), so(so) {}
__device__ __forceinline__ void operator()(T* out, T* x, T*y) {
out[0 * so] = THCNumerics<T>::sub(
THCNumerics<T>::mul(x[1 * sx], y[2 * sy]),
THCNumerics<T>::mul(x[2 * sx], y[1 * sy])
);
out[1 * so] = THCNumerics<T>::sub(
THCNumerics<T>::mul(x[2 * sx], y[0 * sy]),
THCNumerics<T>::mul(x[0 * sx], y[2 * sy])
);
out[2 * so] = THCNumerics<T>::sub(
THCNumerics<T>::mul(x[0 * sx], y[1 * sy]),
THCNumerics<T>::mul(x[1 * sx], y[0 * sy])
);
}
const long sx, sy, so;
};
template <typename T>
struct TensorMaxOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = THCNumerics<T>::gt(*out, *in) ? *out : *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = THCNumerics<T>::gt(*in1, *in2) ? *in1 : *in2;
}
};
template <typename T>
struct TensorMinOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = THCNumerics<T>::lt(*out, *in) ? *out : *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = THCNumerics<T>::lt(*in1, *in2) ? *in1 : *in2;
}
};
template <typename T>
struct TensorMaxValueOp {
TensorMaxValueOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out) {
*out = THCNumerics<T>::gt(*out, val) ? *out : val;
}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = THCNumerics<T>::gt(*in, val) ? *in : val;
}
T val;
};
template <typename T>
struct TensorMinValueOp {
TensorMinValueOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out) {
*out = THCNumerics<T>::lt(*out, val) ? *out : val;
}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out = THCNumerics<T>::lt(*in, val) ? *in : val;
}
T val;
};
template <typename T>
struct TensorAddCMulOp {
TensorAddCMulOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = THCNumerics<T>::add(
*out,
THCNumerics<T>::mul(
val,
THCNumerics<T>::mul(*in1, *in2)
)
);
}
T val;
};
template <typename T>
struct TensorAddCDivOp {
TensorAddCDivOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = THCNumerics<T>::add(
*out,
THCNumerics<T>::mul(
val,
THCNumerics<T>::div(*in1, *in2)
)
);
}
T val;
};
template <typename T>
struct TensorLShiftOp {
__device__ __forceinline__ void
operator()(T* out, T* in) {
*out <<= *in;
}
__device__ __forceinline__ void
operator()(T* out, T* in1, T* in2) {
*out = *in1 << *in2;
}
};
template <>
struct TensorLShiftOp<float> {
__device__ __forceinline__ void
operator()(float* out, float* in) {
*out *= powf(2.0f, *in);
}
__device__ __forceinline__ void
operator()(float* out, float* in1, float* in2) {
*out = *in1 * powf(2.0f, *in2);
}
};
template <>
struct TensorLShiftOp<double> {
__device__ __forceinline__ void
operator()(double* out, double* in) {
*out *= pow(2.0, *in);
}
__device__ __forceinline__ void
operator()(double* out, double* in1, double* in2) {
*out = *in1 * pow(2.0, *in2);
}
};
template <typename T>
struct TensorRShiftOp {
__device__ __forceinline__ void
operator()(T* out, T* in) {
*out >>= *in;
}
__device__ __forceinline__ void
operator()(T* out, T* in1, T* in2) {
*out = *in1 >> *in2;
}
};
template <>
struct TensorRShiftOp<float> {
__device__ __forceinline__ void
operator()(float* out, float* in) {
*out /= powf(2.0f, *in);
}
__device__ __forceinline__ void
operator()(float* out, float* in1, float* in2) {
*out = *in1 / powf(2.0f, *in2);
}
};
template <>
struct TensorRShiftOp<double> {
__device__ __forceinline__ void
operator()(double* out, double* in) {
*out /= pow(2.0, *in);
}
__device__ __forceinline__ void
operator()(double* out, double* in1, double* in2) {
*out = *in1 / pow(2.0, *in2);
}
};
template <typename T>
struct TensorBitAndOp {
__device__ __forceinline__ void
operator()(T* out, T* in) {
*out &= *in;
}
__device__ __forceinline__ void
operator()(T* out, T* in1, T* in2) {
*out = *in1 & *in2;
}
};
template <typename T>
struct TensorBitOrOp {
__device__ __forceinline__ void
operator()(T* out, T* in) {
*out |= *in;
}
__device__ __forceinline__ void
operator()(T* out, T* in1, T* in2) {
*out = *in1 | *in2;
}
};
template <typename T>
struct TensorBitXorOp {
__device__ __forceinline__ void
operator()(T* out, T* in) {
*out ^= *in;
}
__device__ __forceinline__ void
operator()(T* out, T* in1, T* in2) {
*out = *in1 ^ *in2;
}
};
#endif // THC_TENSORMATH_POINTWISE_CUH | the_stack |
/*
* Shabal implementation.
*
* ==========================(LICENSE BEGIN)============================
*
* Copyright (c) 2007-2010 Projet RNRT SAPHIR
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS B[14] LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* ===========================(LICENSE END)=============================
*
* @author Thomas Pornin <thomas.pornin@cryptolog.com>
*/
__device__ __forceinline__ void PERM_ELT(uint32_t &xa0,const uint32_t xa1,uint32_t &xb0,const uint32_t xb1,const uint32_t xb2,const uint32_t xb3,const uint32_t xc,const uint32_t xm){
uint32_t tmp;
#if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050
asm ("lop3.b32 %0, %1, %2, %3, 0x9A;" : "=r"(tmp) : "r"(xb2),"r"(xb3),"r"(xm)); // 0x9A = (F0 &(~CC)) ^ (AA)
#else
tmp = (xb2 & ~xb3) ^ xm;
#endif
xa0 = ((xa0 ^ xc ^ (ROTL32(xa1, 15) * 5U)) * 3U) ^ xb1 ^ tmp;
xb0 = xor3x(0xFFFFFFFF, xa0, ROTL32(xb0, 1));
}
__device__ __forceinline__
void PERM_STEP_0(uint32_t *A,uint32_t *B,const uint32_t *C,const uint32_t* M){
PERM_ELT(A[ 0], A[11], B[ 0], B[13], B[ 9], B[ 6], C[ 8], M[ 0]); PERM_ELT(A[ 1], A[ 0], B[ 1], B[14], B[10], B[ 7], C[ 7], M[ 1]);
PERM_ELT(A[ 2], A[ 1], B[ 2], B[15], B[11], B[ 8], C[ 6], M[ 2]); PERM_ELT(A[ 3], A[ 2], B[ 3], B[ 0], B[12], B[ 9], C[ 5], M[ 3]);
PERM_ELT(A[ 4], A[ 3], B[ 4], B[ 1], B[13], B[10], C[ 4], M[ 4]); PERM_ELT(A[ 5], A[ 4], B[ 5], B[ 2], B[14], B[11], C[ 3], M[ 5]);
PERM_ELT(A[ 6], A[ 5], B[ 6], B[ 3], B[15], B[12], C[ 2], M[ 6]); PERM_ELT(A[ 7], A[ 6], B[ 7], B[ 4], B[ 0], B[13], C[ 1], M[ 7]);
PERM_ELT(A[ 8], A[ 7], B[ 8], B[ 5], B[ 1], B[14], C[ 0], M[ 8]); PERM_ELT(A[ 9], A[ 8], B[ 9], B[ 6], B[ 2], B[15], C[15], M[ 9]);
PERM_ELT(A[10], A[ 9], B[10], B[ 7], B[ 3], B[ 0], C[14], M[10]); PERM_ELT(A[11], A[10], B[11], B[ 8], B[ 4], B[ 1], C[13], M[11]);
PERM_ELT(A[ 0], A[11], B[12], B[ 9], B[ 5], B[ 2], C[12], M[12]); PERM_ELT(A[ 1], A[ 0], B[13], B[10], B[ 6], B[ 3], C[11], M[13]);
PERM_ELT(A[ 2], A[ 1], B[14], B[11], B[ 7], B[ 4], C[10], M[14]); PERM_ELT(A[ 3], A[ 2], B[15], B[12], B[ 8], B[ 5], C[ 9], M[15]);
}
__device__ __forceinline__
void PERM_STEP_1(uint32_t *A,uint32_t *B,const uint32_t *C,const uint32_t* M){
PERM_ELT(A[ 4], A[ 3], B[ 0], B[13], B[ 9], B[ 6], C[ 8], M[ 0]); PERM_ELT(A[ 5], A[ 4], B[ 1], B[14], B[10], B[ 7], C[ 7], M[ 1]);
PERM_ELT(A[ 6], A[ 5], B[ 2], B[15], B[11], B[ 8], C[ 6], M[ 2]); PERM_ELT(A[ 7], A[ 6], B[ 3], B[ 0], B[12], B[ 9], C[ 5], M[ 3]);
PERM_ELT(A[ 8], A[ 7], B[ 4], B[ 1], B[13], B[10], C[ 4], M[ 4]); PERM_ELT(A[ 9], A[ 8], B[ 5], B[ 2], B[14], B[11], C[ 3], M[ 5]);
PERM_ELT(A[10], A[ 9], B[ 6], B[ 3], B[15], B[12], C[ 2], M[ 6]); PERM_ELT(A[11], A[10], B[ 7], B[ 4], B[ 0], B[13], C[ 1], M[ 7]);
PERM_ELT(A[ 0], A[11], B[ 8], B[ 5], B[ 1], B[14], C[ 0], M[ 8]); PERM_ELT(A[ 1], A[ 0], B[ 9], B[ 6], B[ 2], B[15], C[15], M[ 9]);
PERM_ELT(A[ 2], A[ 1], B[10], B[ 7], B[ 3], B[ 0], C[14], M[10]); PERM_ELT(A[ 3], A[ 2], B[11], B[ 8], B[ 4], B[ 1], C[13], M[11]);
PERM_ELT(A[ 4], A[ 3], B[12], B[ 9], B[ 5], B[ 2], C[12], M[12]); PERM_ELT(A[ 5], A[ 4], B[13], B[10], B[ 6], B[ 3], C[11], M[13]);
PERM_ELT(A[ 6], A[ 5], B[14], B[11], B[ 7], B[ 4], C[10], M[14]); PERM_ELT(A[ 7], A[ 6], B[15], B[12], B[ 8], B[ 5], C[ 9], M[15]);
}
__device__ __forceinline__
void PERM_STEP_2(uint32_t *A,uint32_t *B,const uint32_t *C,const uint32_t* M){
PERM_ELT(A[ 8], A[ 7], B[ 0], B[13], B[ 9], B[ 6], C[ 8], M[ 0]); PERM_ELT(A[ 9], A[ 8], B[ 1], B[14], B[10], B[ 7], C[ 7], M[ 1]);
PERM_ELT(A[10], A[ 9], B[ 2], B[15], B[11], B[ 8], C[ 6], M[ 2]); PERM_ELT(A[11], A[10], B[ 3], B[ 0], B[12], B[ 9], C[ 5], M[ 3]);
PERM_ELT(A[ 0], A[11], B[ 4], B[ 1], B[13], B[10], C[ 4], M[ 4]); PERM_ELT(A[ 1], A[ 0], B[ 5], B[ 2], B[14], B[11], C[ 3], M[ 5]);
PERM_ELT(A[ 2], A[ 1], B[ 6], B[ 3], B[15], B[12], C[ 2], M[ 6]); PERM_ELT(A[ 3], A[ 2], B[ 7], B[ 4], B[ 0], B[13], C[ 1], M[ 7]);
PERM_ELT(A[ 4], A[ 3], B[ 8], B[ 5], B[ 1], B[14], C[ 0], M[ 8]); PERM_ELT(A[ 5], A[ 4], B[ 9], B[ 6], B[ 2], B[15], C[15], M[ 9]);
PERM_ELT(A[ 6], A[ 5], B[10], B[ 7], B[ 3], B[ 0], C[14], M[10]); PERM_ELT(A[ 7], A[ 6], B[11], B[ 8], B[ 4], B[ 1], C[13], M[11]);
PERM_ELT(A[ 8], A[ 7], B[12], B[ 9], B[ 5], B[ 2], C[12], M[12]); PERM_ELT(A[ 9], A[ 8], B[13], B[10], B[ 6], B[ 3], C[11], M[13]);
PERM_ELT(A[10], A[ 9], B[14], B[11], B[ 7], B[ 4], C[10], M[14]); PERM_ELT(A[11], A[10], B[15], B[12], B[ 8], B[ 5], C[ 9], M[15]);
}
__device__ __forceinline__
void ADD_BLOCK(uint32_t* A, const uint32_t *B){
A[11]+= B[ 6]; A[10]+= B[ 5]; A[ 9]+= B[ 4]; A[ 8]+= B[ 3]; A[ 7]+= B[ 2]; A[ 6]+= B[ 1]; A[ 5]+= B[ 0]; A[ 4]+= B[15]; A[ 3]+= B[14]; A[ 2]+= B[13]; A[ 1]+= B[12]; A[ 0]+= B[11];
A[11]+= B[10]; A[10]+= B[ 9]; A[ 9]+= B[ 8]; A[ 8]+= B[ 7]; A[ 7]+= B[ 6]; A[ 6]+= B[ 5]; A[ 5]+= B[ 4]; A[ 4]+= B[ 3]; A[ 3]+= B[ 2]; A[ 2]+= B[ 1]; A[ 1]+= B[ 0]; A[ 0]+= B[15];
A[11]+= B[14]; A[10]+= B[13]; A[ 9]+= B[12]; A[ 8]+= B[11]; A[ 7]+= B[10]; A[ 6]+= B[ 9]; A[ 5]+= B[ 8]; A[ 4]+= B[ 7]; A[ 3]+= B[ 6]; A[ 2]+= B[ 5]; A[ 1]+= B[ 4]; A[ 0]+= B[ 3];
}
__device__ __forceinline__
void ROTATE(uint32_t* A){
#pragma unroll 16
for(int i=0;i<16;i++){
A[ i] = ROTL32(A[ i],17);
}
}
/***************************************************/
// GPU Hash Function
__global__ __launch_bounds__(384,3)
void x14_shabal512_gpu_hash_64(uint32_t threads, uint32_t *g_hash){
const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
uint32_t A[]={
0x20728DFD, 0x46C0BD53, 0xE782B699, 0x55304632, 0x71B4EF90, 0x0EA9E82C, 0xDBB930F1, 0xFAD06B8B,
0xBE0CAE40, 0x8BD14410, 0x76D2ADAC, 0x28ACAB7F};
uint32_t B[]={
0xC1099CB7, 0x07B385F3, 0xE7442C26, 0xCC8AD640, 0xEB6F56C7, 0x1EA81AA9, 0x73B9D314, 0x1DE85D08,
0x48910A5A, 0x893B22DB, 0xC5A0DF44, 0xBBC4324E, 0x72D2F240, 0x75941D99, 0x6D8BDE82, 0xA1A7502B};
uint32_t C[]={
0xD9BF68D1, 0x58BAD750, 0x56028CB2, 0x8134F359, 0xB5D469D8, 0x941A8CC2, 0x418B2A6E, 0x04052780,
0x7F07D787, 0x5194358F, 0x3C60D665, 0xBE97D79A, 0x950C3434, 0xAED9A06D, 0x2537DC8D, 0x7CDB5969};
uint32_t M[16];
if (thread < threads){
uint32_t *Hash = &g_hash[thread<<4];
*(uint2x4*)&M[ 0] = __ldg4((uint2x4*)&Hash[ 0]);
*(uint2x4*)&M[ 8] = __ldg4((uint2x4*)&Hash[ 8]);
*(uint16*)&B[ 0]+= *(uint16*)&M[ 0];
A[ 0] ^= 1;
ROTATE(B);
PERM_STEP_0(A,B,C,M);
PERM_STEP_1(A,B,C,M);
PERM_STEP_2(A,B,C,M);
ADD_BLOCK(A,C);
*(uint16*)&C[ 0]-= *(uint16*)&M[ 0];
// SWAP_BC;
M[ 0] = 0x80;
M[ 1] = M[ 2] = M[ 3] = M[ 4] = M[ 5] = M[ 6] = M[ 7] = M[ 8] = M[ 9] = M[10] = M[11] = M[12] = M[13] = M[14] = M[15] = 0;
C[ 0]+= M[ 0];
A[ 0]^= 0x02;
ROTATE(C);
PERM_STEP_0(A,C,B,M);
PERM_STEP_1(A,C,B,M);
PERM_STEP_2(A,C,B,M);
ADD_BLOCK(A,B);
A[ 0] ^= 0x02;
ROTATE(B);
PERM_STEP_0(A,B,C,M);
PERM_STEP_1(A,B,C,M);
PERM_STEP_2(A,B,C,M);
ADD_BLOCK(A,C);
A[ 0] ^= 0x02;
ROTATE(C);
PERM_STEP_0(A,C,B,M);
PERM_STEP_1(A,C,B,M);
PERM_STEP_2(A,C,B,M);
ADD_BLOCK(A,B);
A[ 0] ^= 0x02;
ROTATE(B);
PERM_STEP_0(A,B,C,M);
PERM_STEP_1(A,B,C,M);
PERM_STEP_2(A,B,C,M);
*(uint2x4*)&Hash[ 0] = *(uint2x4*)&B[ 0];
*(uint2x4*)&Hash[ 8] = *(uint2x4*)&B[ 8];
}
}
__host__ void x14_shabal512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t *d_hash)
{
const uint32_t threadsperblock = 384;
// berechne wie viele Thread Blocks wir brauchen
dim3 grid((threads + threadsperblock-1)/threadsperblock);
dim3 block(threadsperblock);
x14_shabal512_gpu_hash_64<<<grid, block>>>(threads, d_hash);
}
__global__ __launch_bounds__(512,2)
void x14_shabal512_gpu_hash_64_final(uint32_t threads,const uint32_t* __restrict__ g_hash,uint32_t* resNonce, const uint64_t target){
const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
uint32_t A[]={
0x20728DFD, 0x46C0BD53, 0xE782B699, 0x55304632, 0x71B4EF90, 0x0EA9E82C, 0xDBB930F1, 0xFAD06B8B,
0xBE0CAE40, 0x8BD14410, 0x76D2ADAC, 0x28ACAB7F};
uint32_t B[]={
0xC1099CB7, 0x07B385F3, 0xE7442C26, 0xCC8AD640, 0xEB6F56C7, 0x1EA81AA9, 0x73B9D314, 0x1DE85D08,
0x48910A5A, 0x893B22DB, 0xC5A0DF44, 0xBBC4324E, 0x72D2F240, 0x75941D99, 0x6D8BDE82, 0xA1A7502B};
uint32_t C[]={
0xD9BF68D1, 0x58BAD750, 0x56028CB2, 0x8134F359, 0xB5D469D8, 0x941A8CC2, 0x418B2A6E, 0x04052780,
0x7F07D787, 0x5194358F, 0x3C60D665, 0xBE97D79A, 0x950C3434, 0xAED9A06D, 0x2537DC8D, 0x7CDB5969};
uint32_t M[16];
if (thread < threads){
const uint32_t *Hash = &g_hash[thread<<4];
*(uint2x4*)&M[ 0] = __ldg4((uint2x4*)&Hash[ 0]);
*(uint2x4*)&M[ 8] = __ldg4((uint2x4*)&Hash[ 8]);
*(uint16*)&B[ 0]+= *(uint16*)&M[ 0];
A[ 0] ^= 1;
ROTATE(B);
PERM_STEP_0(A,B,C,M);
PERM_STEP_1(A,B,C,M);
PERM_STEP_2(A,B,C,M);
ADD_BLOCK(A,C);
*(uint16*)&C[ 0]-= *(uint16*)&M[ 0];
// SWAP_BC;
M[ 0] = 0x80;
M[ 1] = M[ 2] = M[ 3] = M[ 4] = M[ 5] = M[ 6] = M[ 7] = M[ 8] = M[ 9] = M[10] = M[11] = M[12] = M[13] = M[14] = M[15] = 0;
C[ 0]+= M[ 0];
A[ 0]^= 0x02;
ROTATE(C);
PERM_STEP_0(A,C,B,M);
PERM_STEP_1(A,C,B,M);
PERM_STEP_2(A,C,B,M);
ADD_BLOCK(A,B);
A[ 0] ^= 0x02;
ROTATE(B);
PERM_STEP_0(A,B,C,M);
PERM_STEP_1(A,B,C,M);
PERM_STEP_2(A,B,C,M);
ADD_BLOCK(A,C);
A[ 0] ^= 0x02;
ROTATE(C);
PERM_STEP_0(A,C,B,M);
PERM_STEP_1(A,C,B,M);
PERM_STEP_2(A,C,B,M);
ADD_BLOCK(A,B);
A[ 0] ^= 0x02;
ROTATE(B);
PERM_STEP_0(A,B,C,M);
PERM_STEP_1(A,B,C,M);
// PERM_STEP_2(A,B,C,M);
PERM_ELT(A[ 8], A[ 7], B[ 0], B[13], B[ 9], B[ 6], C[ 8], M[ 0]); PERM_ELT(A[ 9], A[ 8], B[ 1], B[14], B[10], B[ 7], C[ 7], M[ 1]);
PERM_ELT(A[10], A[ 9], B[ 2], B[15], B[11], B[ 8], C[ 6], M[ 2]); PERM_ELT(A[11], A[10], B[ 3], B[ 0], B[12], B[ 9], C[ 5], M[ 3]);
PERM_ELT(A[ 0], A[11], B[ 4], B[ 1], B[13], B[10], C[ 4], M[ 4]); PERM_ELT(A[ 1], A[ 0], B[ 5], B[ 2], B[14], B[11], C[ 3], M[ 5]);
PERM_ELT(A[ 2], A[ 1], B[ 6], B[ 3], B[15], B[12], C[ 2], M[ 6]); PERM_ELT(A[ 3], A[ 2], B[ 7], B[ 4], B[ 0], B[13], C[ 1], M[ 7]);
if(*(uint64_t*)&B[ 6] <= target){
uint32_t tmp = atomicExch(&resNonce[0], thread);
if (tmp != UINT32_MAX)
resNonce[1] = tmp;
}
}
}
__host__ void x14_shabal512_cpu_hash_64_final(int thr_id, uint32_t threads, uint32_t *d_hash, uint32_t *d_resNonce, const uint64_t target){
const uint32_t threadsperblock = 512;
// berechne wie viele Thread Blocks wir brauchen
dim3 grid((threads + threadsperblock-1)/threadsperblock);
dim3 block(threadsperblock);
x14_shabal512_gpu_hash_64_final<<<grid, block>>>(threads, d_hash, d_resNonce, target);
} | the_stack |
#include <stdint.h>
#include "launch_params.h"
#include "types.h"
#include "path_tracer.h"
#include "disney_bsdf.h"
#include "lights.h"
#include "math.h"
#include <optix_device.h>
#include <owl/common/math/random.h>
#include <owl/common/math/box.h>
#include "nvisii/utilities/procedural_sky.h"
#include <glm/gtx/matrix_interpolation.hpp>
typedef owl::common::LCG<4> Random;
extern "C" __constant__ LaunchParams optixLaunchParams;
struct RayPayload {
int instanceID = -1;
int primitiveID = -1;
float2 barycentrics;
// for volumes
float3 objectSpaceRayOrigin;
float3 objectSpaceRayDirection;
float t0;
float t1;
int eventID = -1;
float3 gradient;
float3 mp;
float density;
float tHit = -1.f;
float localToWorld[12];
float localToWorldT0[12];
float localToWorldT1[12];
LCGRand rng;
};
__device__
vec2 toUV(vec3 n)
{
n.z = -n.z;
n.x = -n.x;
vec2 uv;
uv.x = approx_atan2f(float(-n.x), float(n.y));
uv.x = (uv.x + M_PI / 2.0f) / (M_PI * 2.0f) + M_PI * (28.670f / 360.0f);
uv.y = ::clamp(float(acosf(n.z) / M_PI), .001f, .999f);
return uv;
}
// Uv range: [0, 1]
__device__
vec3 toPolar(vec2 uv)
{
float theta = 2.0 * M_PI * uv.x + - M_PI / 2.0;
float phi = M_PI * uv.y;
vec3 n;
n.x = __cosf(theta) * __sinf(phi);
n.y = __sinf(theta) * __sinf(phi);
n.z = __cosf(phi);
n.z = -n.z;
n.x = -n.x;
return n;
}
__device__
cudaTextureObject_t getEnvironmentTexture()
{
auto &LP = optixLaunchParams;
cudaTextureObject_t tex = 0;
if (LP.environmentMapID >= 0) {
GET(tex, cudaTextureObject_t, LP.textureObjects, LP.environmentMapID);
return tex;
} else if ((LP.environmentMapID == -2) && (LP.proceduralSkyTexture != 0)) {
return LP.proceduralSkyTexture;
}
return tex;
}
inline __device__
float3 missColor(const float3 n_dir, cudaTextureObject_t &tex)
{
auto &LP = optixLaunchParams;
vec3 rayDir = LP.environmentMapRotation * make_vec3(n_dir);
if (tex)
{
vec2 tc = toUV(vec3(rayDir.x, rayDir.y, rayDir.z));
float4 texColor = tex2D<float4>(tex, tc.x,tc.y);
return make_float3(texColor);
}
if (glm::any(glm::greaterThanEqual(LP.domeLightColor, glm::vec3(0.f)))) return make_float3(LP.domeLightColor);
float t = 0.5f*(rayDir.z + 1.0f);
float3 c = (1.0f - t) * make_float3(pow(vec3(1.0f), vec3(2.2f))) + t * make_float3( pow(vec3(0.5f, 0.7f, 1.0f), vec3(2.2f)) );
return c;
}
inline __device__
float3 missColor(const owl::Ray &ray, cudaTextureObject_t &tex)
{
return missColor(ray.direction, tex);
}
OPTIX_MISS_PROGRAM(miss)()
{
}
OPTIX_CLOSEST_HIT_PROGRAM(TriangleMesh)()
{
auto &LP = optixLaunchParams;
RayPayload &prd = owl::getPRD<RayPayload>();
prd.instanceID = optixGetInstanceIndex();
prd.tHit = optixGetRayTmax();
prd.barycentrics = optixGetTriangleBarycentrics();
prd.primitiveID = optixGetPrimitiveIndex();
// const OptixTraversableHandle handle = optixGetTransformListHandle(prd.instanceID);
// const OptixTransformType type = optixGetTransformTypeFromHandle( handle );
// if (type == OPTIX_TRANSFORM_TYPE_MATRIX_MOTION_TRANSFORM) {
// const OptixMatrixMotionTransform* transformData = optixGetMatrixMotionTransformFromHandle( handle );
// memcpy(prd.localToWorld, &transformData->transform[0][0], 12 * sizeof(float));
// }
// else if (type == OPTIX_TRANSFORM_TYPE_SRT_MOTION_TRANSFORM) {
// const OptixSRTMotionTransform* transformData = optixGetMatrixMotionTransformFromHandle( handle );
// memcpy(prd.localToWorld, &transformData->transform[0][0], 12 * sizeof(float));
// }
// optixGetInterpolatedTransformation( trf0, trf1, trf2, transformData, time );
// const float4* transform = (const float4*)( &transformData->transform[key][0] );
// const float4* transform = (const float4*)( &transformData->transform[key][0] );
optixGetObjectToWorldTransformMatrix(prd.localToWorld);
// If we don't need motion vectors, (or in the future if an object
// doesn't have motion blur) then return.
if (LP.renderDataMode == RenderDataFlags::NONE) return;
OptixTraversableHandle handle = optixGetTransformListHandle(prd.instanceID);
float4 trf00, trf01, trf02;
float4 trf10, trf11, trf12;
optix_impl::optixGetInterpolatedTransformationFromHandle( trf00, trf01, trf02, handle, /* time */ 0.f, true );
optix_impl::optixGetInterpolatedTransformationFromHandle( trf10, trf11, trf12, handle, /* time */ 1.f, true );
memcpy(&prd.localToWorldT0[0], &trf00, sizeof(trf00));
memcpy(&prd.localToWorldT0[4], &trf01, sizeof(trf01));
memcpy(&prd.localToWorldT0[8], &trf02, sizeof(trf02));
memcpy(&prd.localToWorldT1[0], &trf10, sizeof(trf10));
memcpy(&prd.localToWorldT1[4], &trf11, sizeof(trf11));
memcpy(&prd.localToWorldT1[8], &trf12, sizeof(trf12));
}
OPTIX_CLOSEST_HIT_PROGRAM(ShadowRay)()
{
RayPayload &prd = owl::getPRD<RayPayload>();
prd.instanceID = optixGetInstanceIndex();
prd.tHit = optixGetRayTmax();
}
/// Taken and modified from Algorithm 2 in "Pixar's Production Volume Rendering" paper.
/// \param x The origin of the ray.
/// \param w The direction of the light (opposite of ray direction).
/// \param d The distance along the ray to the boundary.
/// \param t The returned hit distance.
/// \param event Will be updated to represent the event that occured during tracking.
/// 0 means the boundary was hit
/// 1 means an absorption/emission occurred
/// 2 means a scattering collision occurred
/// 3 means a null collision occurred
template<typename AccT>
__device__
void SampleDeltaTracking(
LCGRand &rng,
AccT& acc,
float majorant_extinction,
float linear_attenuation_unit,
float absorption_,
float scattering_,
vec3 x,
vec3 w,
float d,
float &t,
int &event,
bool debug = false
) {
float rand1 = lcg_randomf(rng);
float rand2 = lcg_randomf(rng);
// Set new t for the current x.
t = (-log(1.0f - rand1) / majorant_extinction) * linear_attenuation_unit;
// A boundary has been hit
if (t >= d) {
event = 0;
t = d;
return;
}
// Update current position
x = x + t * w;
auto coord_pos = nanovdb::Coord::Floor( nanovdb::Vec3f(x.x, x.y, x.z) );
float densityValue = acc.getValue(coord_pos);
float absorption = densityValue * absorption_; //sample_volume_absorption(x);
float scattering = densityValue * scattering_; //sample_volume_scattering(x);
float extinction = absorption + scattering;
//float null_collision = 1.f - extinction;
float null_collision = majorant_extinction - extinction;
//extinction = extinction / majorant_extinction;
absorption = absorption / majorant_extinction;
scattering = scattering / majorant_extinction;
null_collision = null_collision / majorant_extinction;
// An absorption/emission collision occured
if (rand2 < absorption)
{
event = 1;
return;
}
// A scattering collision occurred
else if (rand2 < (absorption + scattering)) {
//else if (rand2 < (1.f - null_collision)) {
event = 2;
return;
}
// A null collision occurred
else {
event = 3;
return;
}
}
OPTIX_CLOSEST_HIT_PROGRAM(VolumeMesh)()
{
auto &LP = optixLaunchParams;
RayPayload &prd = owl::getPRD<RayPayload>();
optixGetObjectToWorldTransformMatrix(prd.localToWorld);
// If we don't need motion vectors, (or in the future if an object
// doesn't have motion blur) then return.
if (LP.renderDataMode == RenderDataFlags::NONE) return;
OptixTraversableHandle handle = optixGetTransformListHandle(prd.instanceID);
float4 trf00, trf01, trf02;
float4 trf10, trf11, trf12;
optix_impl::optixGetInterpolatedTransformationFromHandle( trf00, trf01, trf02, handle, /* time */ 0.f, true );
optix_impl::optixGetInterpolatedTransformationFromHandle( trf10, trf11, trf12, handle, /* time */ 1.f, true );
memcpy(&prd.localToWorldT0[0], &trf00, sizeof(trf00));
memcpy(&prd.localToWorldT0[4], &trf01, sizeof(trf01));
memcpy(&prd.localToWorldT0[8], &trf02, sizeof(trf02));
memcpy(&prd.localToWorldT1[0], &trf10, sizeof(trf10));
memcpy(&prd.localToWorldT1[4], &trf11, sizeof(trf11));
memcpy(&prd.localToWorldT1[8], &trf12, sizeof(trf12));
}
OPTIX_CLOSEST_HIT_PROGRAM(VolumeShadowRay)()
{
}
OPTIX_INTERSECT_PROGRAM(VolumeIntersection)()
{
auto &LP = optixLaunchParams;
const auto &self = owl::getProgramData<VolumeGeomData>();
RayPayload &prd = owl::getPRD<RayPayload>();
float3 origin = optixGetObjectRayOrigin();
// note, this is _not_ normalized. Useful for computing world space tmin/mmax
float3 direction = optixGetObjectRayDirection();
float3 lb = make_float3(self.bbmin.x, self.bbmin.y, self.bbmin.z);
float3 rt = make_float3(self.bbmax.x, self.bbmax.y, self.bbmax.z);
// typical ray AABB intersection test
float3 dirfrac;
// direction is unit direction vector of ray
dirfrac.x = 1.0f / direction.x;
dirfrac.y = 1.0f / direction.y;
dirfrac.z = 1.0f / direction.z;
// lb is the corner of AABB with minimal coordinates - left bottom, rt is maximal corner
// origin is origin of ray
float t1 = (lb.x - origin.x)*dirfrac.x;
float t2 = (rt.x - origin.x)*dirfrac.x;
float t3 = (lb.y - origin.y)*dirfrac.y;
float t4 = (rt.y - origin.y)*dirfrac.y;
float t5 = (lb.z - origin.z)*dirfrac.z;
float t6 = (rt.z - origin.z)*dirfrac.z;
float thit0 = max(max(min(t1, t2), min(t3, t4)), min(t5, t6));
float thit1 = min(min(max(t1, t2), max(t3, t4)), max(t5, t6));
// if tmax < 0, ray (line) is intersecting AABB, but the whole AABB is behind us
if (thit1 < 0) { return; }
// if tmin > tmax, ray doesn't intersect AABB
if (thit0 >= thit1) { return; }
// clip hit to near position
thit0 = max(thit0, optixGetRayTmin());
// Load the volume we hit
GET(VolumeStruct volume, VolumeStruct, LP.volumes, self.volumeID);
uint8_t *hdl = (uint8_t*)LP.volumeHandles.get(self.volumeID, __LINE__).data;
const auto grid = reinterpret_cast<const nanovdb::FloatGrid*>(hdl);
const auto& tree = grid->tree();
auto acc = tree.getAccessor();
auto nvdbSampler = nanovdb::SampleFromVoxels<nanovdb::DefaultReadAccessor<float>,
/*Interpolation Degree*/1, /*UseCache*/false>(acc);
float majorant_extinction = acc.root().valueMax();
float gradient_factor = volume.gradient_factor;
float linear_attenuation_unit = volume.scale;
float absorption = volume.absorption;
float scattering = volume.scattering;
auto bbox = acc.root().bbox();
auto mx = bbox.max();
auto mn = bbox.min();
float3 offset = make_float3(glm::vec3(mn[0], mn[1], mn[2]) +
(glm::vec3(mx[0], mx[1], mx[2]) -
glm::vec3(mn[0], mn[1], mn[2])) * .5f);
// Sample the free path distance to see if our ray makes it to the boundary
float t = thit0;
int event;
bool hitVolume = false;
float unit = volume.scale / length(direction);
#define MAX_NULL_COLLISIONS 1000
for (int i = 0; i < MAX_NULL_COLLISIONS; ++i) {
// Sample a distance
t = t - (log(1.0f - lcg_randomf(prd.rng)) / majorant_extinction) * unit;
// A boundary has been hit, no intersection
if (t >= thit1) return;
// Update current position
float3 x = offset + origin + t * direction;
// Sample heterogeneous media
float densityValue = nvdbSampler(nanovdb::Vec3f(x.x, x.y, x.z));
float a = densityValue * absorption;
float s = densityValue * scattering;
float e = a + s;
float n = majorant_extinction - e;
a = a / majorant_extinction;
s = s / majorant_extinction;
n = n / majorant_extinction;
float event = lcg_randomf(prd.rng);
// An absorption/emission collision occured
if (event < (a + s)) {
if (optixReportIntersection(t, /* hit kind */ 0)) {
auto g = nvdbSampler.gradient(nanovdb::Vec3f(x.x, x.y, x.z));
prd.objectSpaceRayOrigin = origin;
prd.objectSpaceRayDirection = direction;
prd.eventID = (event < a) ? 1 : 2;
prd.instanceID = optixGetInstanceIndex();
prd.tHit = t;
prd.mp = x - offset; // not super confident about this offset...
prd.gradient = make_float3(g[0], g[1], g[2]);// TEMPORARY FOR BUNNY
prd.density = densityValue;
}
return;
}
// A null collision occurred
else {
event = 3;
continue;
}
}
}
OPTIX_BOUNDS_PROGRAM(VolumeBounds)(
const void *geomData,
owl::common::box3f &primBounds,
const int primID)
{
const VolumeGeomData &self = *(const VolumeGeomData*)geomData;
primBounds = owl::common::box3f();
primBounds.lower.x = self.bbmin.x;
primBounds.lower.y = self.bbmin.y;
primBounds.lower.z = self.bbmin.z;
primBounds.upper.x = self.bbmax.x;
primBounds.upper.y = self.bbmax.y;
primBounds.upper.z = self.bbmax.z;
}
inline __device__
bool loadCamera(EntityStruct &cameraEntity, CameraStruct &camera, TransformStruct &transform)
{
auto &LP = optixLaunchParams;
cameraEntity = LP.cameraEntity;
if (!cameraEntity.initialized) return false;
if ((cameraEntity.transform_id < 0) || (cameraEntity.transform_id >= LP.transforms.count)) return false;
if ((cameraEntity.camera_id < 0) || (cameraEntity.camera_id >= LP.cameras.count)) return false;
GET(camera, CameraStruct, LP.cameras, cameraEntity.camera_id);
GET(transform, TransformStruct, LP.transforms, cameraEntity.transform_id);
return true;
}
inline __device__
float3 sampleTexture(int32_t textureId, float2 texCoord, float3 defaultVal) {
auto &LP = optixLaunchParams;
if (textureId < 0 || textureId >= (LP.textures.count + LP.materials.count * NUM_MAT_PARAMS)) return defaultVal;
GET(cudaTextureObject_t tex, cudaTextureObject_t, LP.textureObjects, textureId);
if (!tex) return defaultVal;
GET(TextureStruct texInfo, TextureStruct, LP.textures, textureId);
texCoord.x = texCoord.x / texInfo.scale.x;
texCoord.y = texCoord.y / texInfo.scale.y;
return make_float3(tex2D<float4>(tex, texCoord.x, texCoord.y));
}
inline __device__
float sampleTexture(int32_t textureId, float2 texCoord, int8_t channel, float defaultVal) {
auto &LP = optixLaunchParams;
if (textureId < 0 || textureId >= (LP.textures.count + LP.materials.count * NUM_MAT_PARAMS)) return defaultVal;
GET(cudaTextureObject_t tex, cudaTextureObject_t, LP.textureObjects, textureId);
if (!tex) return defaultVal;
GET(TextureStruct texInfo, TextureStruct, LP.textures, textureId);
texCoord.x = texCoord.x / texInfo.scale.x;
texCoord.y = texCoord.y / texInfo.scale.y;
if (channel == 0) return tex2D<float4>(tex, texCoord.x, texCoord.y).x;
if (channel == 1) return tex2D<float4>(tex, texCoord.x, texCoord.y).y;
if (channel == 2) return tex2D<float4>(tex, texCoord.x, texCoord.y).z;
if (channel == 3) return tex2D<float4>(tex, texCoord.x, texCoord.y).w;
return defaultVal;
}
__device__
void loadMeshTriIndices(int meshID, int numIndices, int primitiveID, int3 &triIndices)
{
auto &LP = optixLaunchParams;
GET(Buffer<int3> indices, Buffer<int3>, LP.indexLists, meshID);
GET(triIndices, int3, indices, primitiveID);
}
__device__
void loadMeshVertexData(int meshID, int numVertices, int3 indices, float2 barycentrics, float3 &position, float3 &geometricNormal)
{
auto &LP = optixLaunchParams;
GET(Buffer<float3> vertices, Buffer<float3>, LP.vertexLists, meshID);
GET(const float3 A, float3, vertices, indices.x);
GET(const float3 B, float3, vertices, indices.y);
GET(const float3 C, float3, vertices, indices.z);
position = A * (1.f - (barycentrics.x + barycentrics.y)) + B * barycentrics.x + C * barycentrics.y;
geometricNormal = normalize(cross(B-A,C-A));
}
__device__
void loadMeshUVData(int meshID, int numTexCoords, int3 indices, float2 barycentrics, float2 &uv)
{
auto &LP = optixLaunchParams;
GET(Buffer<float2> texCoords, Buffer<float2>, LP.texCoordLists, meshID);
GET(const float2 A, float2, texCoords, indices.x);
GET(const float2 B, float2, texCoords, indices.y);
GET(const float2 C, float2, texCoords, indices.z);
uv = A * (1.f - (barycentrics.x + barycentrics.y)) + B * barycentrics.x + C * barycentrics.y;
}
__device__
void loadMeshNormalData(int meshID, int numNormals, int3 indices, float2 barycentrics, float3 &normal)
{
auto &LP = optixLaunchParams;
GET(Buffer<float4> normals, Buffer<float4>, LP.normalLists, meshID);
GET(const float4 A, float4, normals, indices.x);
GET(const float4 B, float4, normals, indices.y);
GET(const float4 C, float4, normals, indices.z);
normal = make_float3(A) * (1.f - (barycentrics.x + barycentrics.y)) + make_float3(B) * barycentrics.x + make_float3(C) * barycentrics.y;
}
__device__
void loadMeshTangentData(int meshID, int numTangents, int3 indices, float2 barycentrics, float3 &tangent)
{
auto &LP = optixLaunchParams;
GET(Buffer<float4> tangents, Buffer<float4>, LP.tangentLists, meshID);
GET(const float4 A, float4, tangents, indices.x);
GET(const float4 B, float4, tangents, indices.y);
GET(const float4 C, float4, tangents, indices.z);
tangent = make_float3(A) * (1.f - (barycentrics.x + barycentrics.y)) + make_float3(B) * barycentrics.x + make_float3(C) * barycentrics.y;
}
__device__
void loadDisneyMaterial(const MaterialStruct &p, float2 uv, DisneyMaterial &mat, float roughnessMinimum) {
mat.base_color = sampleTexture(p.base_color_texture_id, uv, make_float3(.8f, .8f, .8f));
mat.metallic = sampleTexture(p.metallic_texture_id, uv, p.metallic_texture_channel, .0f);
mat.specular = sampleTexture(p.specular_texture_id, uv, p.specular_texture_channel, .5f);
mat.roughness = sampleTexture(p.roughness_texture_id, uv, p.roughness_texture_channel, .5f);
mat.specular_tint = sampleTexture(p.specular_tint_texture_id, uv, p.specular_tint_texture_channel, 0.f);
mat.anisotropy = sampleTexture(p.anisotropic_texture_id, uv, p.anisotropic_texture_channel, 0.f);
mat.sheen = sampleTexture(p.sheen_texture_id, uv, p.sheen_texture_channel, 0.f);
mat.sheen_tint = sampleTexture(p.sheen_tint_texture_id, uv, p.sheen_tint_texture_channel, 0.5f);
mat.clearcoat = sampleTexture(p.clearcoat_texture_id, uv, p.clearcoat_texture_channel, 0.f);
float clearcoat_roughness = sampleTexture(p.clearcoat_roughness_texture_id, uv, p.clearcoat_roughness_texture_channel, 0.3f);
mat.ior = sampleTexture(p.ior_texture_id, uv, p.ior_texture_channel, 1.45f);
mat.specular_transmission = sampleTexture(p.transmission_texture_id, uv, p.transmission_texture_channel, 0.f);
mat.flatness = sampleTexture(p.subsurface_texture_id, uv, p.subsurface_texture_channel, 0.f);
mat.subsurface_color = sampleTexture(p.subsurface_color_texture_id, uv, make_float3(0.8f, 0.8f, 0.8f));
mat.transmission_roughness = sampleTexture(p.transmission_roughness_texture_id, uv, p.transmission_roughness_texture_channel, 0.f);
mat.alpha = sampleTexture(p.alpha_texture_id, uv, p.alpha_texture_channel, 1.f);
mat.transmission_roughness = max(max(mat.transmission_roughness, MIN_ROUGHNESS), roughnessMinimum);
mat.roughness = max(max(mat.roughness, MIN_ROUGHNESS), roughnessMinimum);
clearcoat_roughness = max(clearcoat_roughness, roughnessMinimum);
mat.clearcoat_gloss = 1.0 - clearcoat_roughness * clearcoat_roughness;
}
__device__
float sampleTime(float xi) {
auto &LP = optixLaunchParams;
return LP.timeSamplingInterval[0] +
(LP.timeSamplingInterval[1] -
LP.timeSamplingInterval[0]) * xi;
}
inline __device__
owl::Ray generateRay(const CameraStruct &camera, const TransformStruct &transform, int2 pixelID, float2 frameSize, LCGRand &rng, float time)
{
auto &LP = optixLaunchParams;
/* Generate camera rays */
glm::quat r0 = glm::quat_cast(LP.viewT0);
glm::quat r1 = glm::quat_cast(LP.viewT1);
glm::vec4 p0 = glm::column(LP.viewT0, 3);
glm::vec4 p1 = glm::column(LP.viewT1, 3);
glm::vec4 pos = glm::mix(p0, p1, time);
glm::quat rot = (glm::all(glm::equal(r0, r1))) ? r0 : glm::slerp(r0, r1, time);
glm::mat4 camLocalToWorld = glm::mat4_cast(rot);
camLocalToWorld = glm::column(camLocalToWorld, 3, pos);
mat4 projinv = glm::inverse(LP.proj);
mat4 viewinv = glm::inverse(camLocalToWorld);
vec2 aa = vec2(LP.xPixelSamplingInterval[0], LP.yPixelSamplingInterval[0])
+ (vec2(LP.xPixelSamplingInterval[1], LP.yPixelSamplingInterval[1])
- vec2(LP.xPixelSamplingInterval[0], LP.yPixelSamplingInterval[0])
) * vec2(lcg_randomf(rng),lcg_randomf(rng));
vec2 inUV = (vec2(pixelID.x, pixelID.y) + aa) / make_vec2(frameSize);
vec3 right = normalize(glm::column(viewinv, 0));
vec3 up = normalize(glm::column(viewinv, 1));
vec3 origin = glm::column(viewinv, 3);
float cameraLensRadius = camera.apertureDiameter;
vec3 p(0.f);
if (cameraLensRadius > 0.0) {
do {
p = 2.0f*vec3(lcg_randomf(rng),lcg_randomf(rng),0.f) - vec3(1.f,1.f,0.f);
} while (dot(p,p) >= 1.0f);
}
vec3 rd = cameraLensRadius * p;
vec3 lens_offset = (right * rd.x) / float(frameSize.x) + (up * rd.y) / float(frameSize.y);
origin = origin + lens_offset;
vec2 dir = inUV * 2.f - 1.f; dir.y *= -1.f;
vec4 t = (projinv * vec4(dir.x, dir.y, -1.f, 1.f));
vec3 target = vec3(t) / float(t.w);
vec3 direction = normalize(vec3(viewinv * vec4(target, 0.f))) * camera.focalDistance;
direction = normalize(direction - lens_offset);
owl::Ray ray;
ray.tmin = .001f;
ray.tmax = 1e20f;//10000.0f;
ray.origin = make_float3(origin) ;
ray.direction = make_float3(direction);
return ray;
}
__device__
void initializeRenderData(float3 &renderData)
{
auto &LP = optixLaunchParams;
// these might change in the future...
if (LP.renderDataMode == RenderDataFlags::NONE) {
renderData = make_float3(0.0f);
}
else if (LP.renderDataMode == RenderDataFlags::DEPTH) {
renderData = make_float3(-FLT_MAX);
}
else if (LP.renderDataMode == RenderDataFlags::POSITION) {
renderData = make_float3(-FLT_MAX);
}
else if (LP.renderDataMode == RenderDataFlags::NORMAL) {
renderData = make_float3(0.0f);
}
else if (LP.renderDataMode == RenderDataFlags::TANGENT) {
renderData = make_float3(0.0f);
}
else if (LP.renderDataMode == RenderDataFlags::SCREEN_SPACE_NORMAL) {
renderData = make_float3(0.0f);
}
else if (LP.renderDataMode == RenderDataFlags::ENTITY_ID) {
renderData = make_float3(FLT_MAX);
}
else if (LP.renderDataMode == RenderDataFlags::BASE_COLOR) {
renderData = make_float3(0.0, 0.0, 0.0);
}
else if (LP.renderDataMode == RenderDataFlags::TEXTURE_COORDINATES) {
renderData = make_float3(0.0, 0.0, 0.0);
}
else if (LP.renderDataMode == RenderDataFlags::DIFFUSE_MOTION_VECTORS) {
renderData = make_float3(0.0, 0.0, -1.0);
}
else if (LP.renderDataMode == RenderDataFlags::HEATMAP) {
renderData = make_float3(0.0, 0.0, 0.0);
}
}
__device__
void saveLightingColorRenderData (
float3 &renderData, int bounce,
float3 w_n, float3 w_o, float3 w_i,
DisneyMaterial &mat
)
{
auto &LP = optixLaunchParams;
if (LP.renderDataMode == RenderDataFlags::NONE) return;
if (bounce != LP.renderDataBounce) return;
// Note, dillum and iillum are expected to change outside this function depending on the
// render data flags.
if (LP.renderDataMode == RenderDataFlags::DIFFUSE_COLOR) {
renderData = disney_diffuse_color(mat, w_n, w_o, w_i, normalize(w_o + w_i));
}
else if (LP.renderDataMode == RenderDataFlags::GLOSSY_COLOR) {
renderData = disney_microfacet_reflection_color(mat, w_n, w_o, w_i, normalize(w_o + w_i));
}
else if (LP.renderDataMode == RenderDataFlags::TRANSMISSION_COLOR) {
renderData = disney_microfacet_transmission_color(mat, w_n, w_o, w_i, normalize(w_o + w_i));
}
}
__device__
void saveLightingIrradianceRenderData(
float3 &renderData, int bounce,
float3 dillum, float3 iillum,
int sampledBsdf)
{
auto &LP = optixLaunchParams;
if (LP.renderDataMode == RenderDataFlags::NONE) return;
if (bounce != LP.renderDataBounce) return;
// Note, dillum and iillum are expected to change outside this function depending on the
// render data flags.
if (LP.renderDataMode == RenderDataFlags::DIFFUSE_DIRECT_LIGHTING) {
renderData = dillum;
}
else if (LP.renderDataMode == RenderDataFlags::DIFFUSE_INDIRECT_LIGHTING) {
renderData = iillum;
}
else if (LP.renderDataMode == RenderDataFlags::GLOSSY_DIRECT_LIGHTING) {
renderData = dillum;
}
else if (LP.renderDataMode == RenderDataFlags::GLOSSY_INDIRECT_LIGHTING) {
renderData = iillum;
}
else if (LP.renderDataMode == RenderDataFlags::TRANSMISSION_DIRECT_LIGHTING) {
renderData = dillum;
}
else if (LP.renderDataMode == RenderDataFlags::TRANSMISSION_INDIRECT_LIGHTING) {
renderData = iillum;
}
}
__device__
void saveMissRenderData(
float3 &renderData,
int bounce,
float3 mvec)
{
auto &LP = optixLaunchParams;
if (LP.renderDataMode == RenderDataFlags::NONE) return;
if (bounce != LP.renderDataBounce) return;
if (LP.renderDataMode == RenderDataFlags::DIFFUSE_MOTION_VECTORS) {
renderData = mvec;
}
}
__device__
void saveGeometricRenderData(
float3 &renderData,
int bounce, float depth,
float3 w_p, float3 w_n, float3 w_x, float3 w_o, float2 uv,
int entity_id, float3 diffuse_mvec, float time,
DisneyMaterial &mat)
{
auto &LP = optixLaunchParams;
if (LP.renderDataMode == RenderDataFlags::NONE) return;
if (bounce != LP.renderDataBounce) return;
if (LP.renderDataMode == RenderDataFlags::DEPTH) {
renderData = make_float3(depth);
}
else if (LP.renderDataMode == RenderDataFlags::POSITION) {
renderData = w_p;
}
else if (LP.renderDataMode == RenderDataFlags::NORMAL) {
renderData = w_n;
}
else if (LP.renderDataMode == RenderDataFlags::TANGENT) {
renderData = w_x;
}
else if (LP.renderDataMode == RenderDataFlags::SCREEN_SPACE_NORMAL) {
glm::quat r0 = glm::quat_cast(LP.viewT0);
glm::quat r1 = glm::quat_cast(LP.viewT1);
glm::quat rot = (glm::all(glm::equal(r0, r1))) ? r0 : glm::slerp(r0, r1, time);
vec3 tmp = normalize(glm::mat3_cast(rot) * make_vec3(w_n));
tmp = normalize(vec3(LP.proj * vec4(tmp, 0.f)));
renderData.x = tmp.x;
renderData.y = tmp.y;
renderData.z = tmp.z;
}
else if (LP.renderDataMode == RenderDataFlags::ENTITY_ID) {
renderData = make_float3(float(entity_id));
}
else if (LP.renderDataMode == RenderDataFlags::DIFFUSE_MOTION_VECTORS) {
renderData = diffuse_mvec;
}
else if (LP.renderDataMode == RenderDataFlags::BASE_COLOR) {
renderData = mat.base_color;
}
else if (LP.renderDataMode == RenderDataFlags::TEXTURE_COORDINATES) {
renderData = make_float3(uv.x, uv.y, 0.0);
}
else if (LP.renderDataMode == RenderDataFlags::RAY_DIRECTION) {
renderData = -w_o;
}
}
__device__
void saveHeatmapRenderData(
float3 &renderData,
int bounce,
uint64_t start_clock
)
{
auto &LP = optixLaunchParams;
if (LP.renderDataMode != RenderDataFlags::HEATMAP) return;
// if (bounce < LP.renderDataBounce) return;
uint64_t absClock = clock()-start_clock;
float relClock = /*global.clockScale **/ absClock / 10000000.f;
relClock = min(relClock, 1.f);
renderData = make_float3(relClock);
}
__device__
void saveDeviceAssignment(
float3 &renderData,
int bounce,
uint32_t deviceIndex
)
{
auto &LP = optixLaunchParams;
if (LP.renderDataMode != RenderDataFlags::DEVICE_ID) return;
renderData = make_float3(deviceIndex);
}
__device__
bool debugging() {
#ifndef DEBUGGING
return false;
#endif
auto &LP = optixLaunchParams;
auto pixelID = ivec2(owl::getLaunchIndex()[0], owl::getLaunchIndex()[1]);
return glm::all(glm::equal(pixelID, ivec2(LP.frameSize.x / 2, LP.frameSize.y / 2)));
}
OPTIX_RAYGEN_PROGRAM(rayGen)()
{
const RayGenData &self = owl::getProgramData<RayGenData>();
auto &LP = optixLaunchParams;
auto launchIndex = optixGetLaunchIndex().x;
auto launchDim = optixGetLaunchDimensions().x;
auto pixelID = make_int2(launchIndex % LP.frameSize.x, launchIndex / LP.frameSize.x);
// Terminate thread if current pixel not assigned to this device
GET(float start, float, LP.assignmentBuffer, self.deviceIndex);
GET(float stop, float, LP.assignmentBuffer, self.deviceIndex + 1);
start *= (LP.frameSize.x * LP.frameSize.y);
stop *= (LP.frameSize.x * LP.frameSize.y);
// if (launchIndex == 0) {
// printf("device %d start %f stop %f\n", self.deviceIndex, start, stop);
// }
if( pixelID.x > LP.frameSize.x-1 || pixelID.y > LP.frameSize.y-1 ) return;
if( (launchIndex < start) || (stop <= launchIndex) ) return;
// if (self.deviceIndex == 1) return;
cudaTextureObject_t envTex = getEnvironmentTexture();
bool debug = (pixelID.x == int(LP.frameSize.x / 2) && pixelID.y == int(LP.frameSize.y / 2));
float tmax = 1e20f; //todo: customize depending on scene bounds //glm::distance(LP.sceneBBMin, LP.sceneBBMax);
auto dims = ivec2(LP.frameSize.x, LP.frameSize.x);
uint64_t start_clock = clock();
int numLights = LP.numLightEntities;
int numLightSamples = LP.numLightSamples;
bool enableDomeSampling = LP.enableDomeSampling;
LCGRand rng = get_rng(LP.frameID + LP.seed * 10007, make_uint2(pixelID.x, pixelID.y), make_uint2(dims.x, dims.y));
float time = sampleTime(lcg_randomf(rng));
// If no camera is in use, just display some random noise...
owl::Ray ray;
EntityStruct camera_entity;
TransformStruct camera_transform;
CameraStruct camera;
if (!loadCamera(camera_entity, camera, camera_transform)) {
auto fbOfs = pixelID.x+LP.frameSize.x * ((LP.frameSize.y - 1) - pixelID.y);
LP.frameBuffer[fbOfs] = vec4(lcg_randomf(rng), lcg_randomf(rng), lcg_randomf(rng), 1.f);
return;
}
// Trace an initial ray through the scene
ray = generateRay(camera, camera_transform, pixelID, make_float2(LP.frameSize), rng, time);
ray.tmax = tmax;
float3 accum_illum = make_float3(0.f);
float3 pathThroughput = make_float3(1.f);
float3 renderData = make_float3(0.f);
float3 primaryAlbedo = make_float3(0.f);
float3 primaryNormal = make_float3(0.f);
initializeRenderData(renderData);
uint8_t depth = 0;
uint8_t diffuseDepth = 0;
uint8_t glossyDepth = 0;
uint8_t transparencyDepth = 0;
uint8_t transmissionDepth = 0;
uint8_t volumeDepth = 0;
int sampledBsdf = -1;
bool useBRDF = true;
// direct here is used for final image clamping
float3 directIllum = make_float3(0.f);
float3 illum = make_float3(0.f);
RayPayload payload;
payload.tHit = -1.f;
ray.time = time;
ray.visibilityMask = ENTITY_VISIBILITY_CAMERA_RAYS;
owl::traceRay( /*accel to trace against*/ LP.IAS,
/*the ray to trace*/ ray,
/*prd*/ payload,
OPTIX_RAY_FLAG_DISABLE_ANYHIT);
// Shade each hit point on a path using NEE with MIS
do {
float alpha = 0.f;
// If ray misses, terminate the ray
if (payload.tHit <= 0.f) {
// Compute lighting from environment
if (depth == 0) {
float3 col = missColor(ray, envTex);
illum = illum + pathThroughput * (col * LP.domeLightIntensity);
directIllum = illum;
primaryAlbedo = col;
}
else if (enableDomeSampling)
illum = illum + pathThroughput * (missColor(ray, envTex) * LP.domeLightIntensity * pow(2.f, LP.domeLightExposure));
const float envDist = 10000.0f; // large value
/* Compute miss motion vector */
float3 mvec;
// Point far away
float3 pFar = ray.origin + ray.direction * envDist;
// TODO: account for motion from rotating dome light
vec4 tmp1 = LP.proj * LP.viewT0 * /*xfmt0 **/ make_vec4(pFar, 1.0f);
float3 pt0 = make_float3(tmp1 / tmp1.w) * .5f;
vec4 tmp2 = LP.proj * LP.viewT1 * /*xfmt1 **/ make_vec4(pFar, 1.0f);
float3 pt1 = make_float3(tmp2 / tmp2.w) * .5f;
mvec = pt1 - pt0;
saveMissRenderData(renderData, depth, mvec);
break;
}
// Load the object we hit.
GET(int entityID, int, LP.instanceToEntity, payload.instanceID);
GET(EntityStruct entity, EntityStruct, LP.entities, entityID);
GET(TransformStruct transform, TransformStruct, LP.transforms, entity.transform_id);
bool isVolume = (entity.volume_id != -1);
MeshStruct mesh;
VolumeStruct volume;
if (!isVolume) { GET(mesh, MeshStruct, LP.meshes, entity.mesh_id); }
else { GET(volume, VolumeStruct, LP.volumes, entity.volume_id); }
// Set new outgoing light direction and hit position.
const float3 w_o = -ray.direction;
float3 hit_p = ray.origin + payload.tHit * ray.direction;
// Load geometry data for the hit object
float3 mp, p, v_x, v_y, v_z, v_gz, v_bz;
float2 uv;
float3 diffuseMotion;
if (isVolume) {
v_x = v_y = make_float3(0.f); // Perhaps I could use divergence / curl here?
v_z = v_gz = normalize(payload.gradient);
if (any(isnan(make_vec3(v_z)))) v_z = v_gz = make_float3(0.f);
mp = payload.mp;
uv = make_float2(payload.density, length(payload.gradient));
}
else {
int3 indices;
loadMeshTriIndices(entity.mesh_id, mesh.numTris, payload.primitiveID, indices);
loadMeshVertexData(entity.mesh_id, mesh.numVerts, indices, payload.barycentrics, mp, v_gz);
loadMeshUVData(entity.mesh_id, mesh.numVerts, indices, payload.barycentrics, uv);
loadMeshNormalData(entity.mesh_id, mesh.numVerts, indices, payload.barycentrics, v_z);
loadMeshTangentData(entity.mesh_id, mesh.numVerts, indices, payload.barycentrics, v_x);
}
// Load material data for the hit object
DisneyMaterial mat; MaterialStruct entityMaterial;
if (entity.material_id >= 0 && entity.material_id < LP.materials.count) {
GET(entityMaterial, MaterialStruct, LP.materials, entity.material_id);
loadDisneyMaterial(entityMaterial, uv, mat, MIN_ROUGHNESS);
}
// Transform geometry data into world space
{
glm::mat4 xfm = to_mat4(payload.localToWorld);
p = make_float3(xfm * make_vec4(mp, 1.0f));
hit_p = p;
glm::mat3 nxfm = transpose(glm::inverse(glm::mat3(xfm)));
v_gz = make_float3(normalize(nxfm * make_vec3(v_gz)));
v_z = make_float3(normalize(nxfm * make_vec3(v_z)));
v_x = make_float3(normalize(nxfm * make_vec3(v_x)));
v_y = cross(v_z, v_x);
// v_x = cross(v_y, v_z);
if (LP.renderDataMode != RenderDataFlags::NONE) {
glm::mat4 xfmt0 = to_mat4(payload.localToWorldT0);
glm::mat4 xfmt1 = to_mat4(payload.localToWorldT1);
vec4 tmp1 = LP.proj * LP.viewT0 * xfmt0 * make_vec4(mp, 1.0f);
vec4 tmp2 = LP.proj * LP.viewT1 * xfmt1 * make_vec4(mp, 1.0f);
float3 pt0 = make_float3(tmp1 / tmp1.w) * .5f;
float3 pt1 = make_float3(tmp2 / tmp2.w) * .5f;
diffuseMotion = pt1 - pt0;
} else {
diffuseMotion = make_float3(0.f, 0.f, 0.f);
}
}
// Fallback for tangent and bitangent if UVs result in degenerate vectors.
if (
all(lessThan(abs(make_vec3(v_x)), vec3(EPSILON))) ||
all(lessThan(abs(make_vec3(v_y)), vec3(EPSILON))) ||
any(isnan(make_vec3(v_x))) ||
any(isnan(make_vec3(v_y)))
) {
ortho_basis(v_x, v_y, v_z);
}
// Construct TBN matrix, sample normal map
{
glm::mat3 tbn;
tbn = glm::column(tbn, 0, make_vec3(v_x) );
tbn = glm::column(tbn, 1, make_vec3(v_y) );
tbn = glm::column(tbn, 2, make_vec3(v_z) );
float3 dN;
if (entity.light_id >= 0 && entity.light_id < LP.lights.count) {
dN = make_float3(0.5f, .5f, 1.f);
} else {
dN = sampleTexture(entityMaterial.normal_map_texture_id, uv, make_float3(0.5f, .5f, 0.f));
GET(TextureStruct tex, TextureStruct, LP.textures, entityMaterial.normal_map_texture_id);
// For DirectX normal maps.
// if (!tex.rightHanded) {
// dN.y = 1.f - dN.y;
// }
}
dN = normalize( (dN * make_float3(2.0f)) - make_float3(1.f) );
v_z = make_float3(tbn * make_vec3(dN));
// make sure geometric and shading normal face the same direction.
if (dot(v_z, v_gz) < 0.f) {
v_z = -v_z;
}
v_bz = v_z;
}
// // TEMP CODE
// auto fbOfs = pixelID.x+LP.frameSize.x * ((LP.frameSize.y - 1) - pixelID.y);
// LP.frameBuffer[fbOfs] = make_vec4(v_z, 1.f);
// return;
// If we didn't hit glass, flip the surface normal to face forward.
if ((mat.specular_transmission == 0.f) && (entity.light_id == -1)) {
if (dot(w_o, v_gz) < 0.f) {
v_z = -v_z;
v_gz = -v_gz;
}
// compute bent normal
float3 r = reflect(-w_o, v_z);
float a = dot(v_gz, r);
v_bz = v_z;
if (a < 0.f) {
float b = max(0.001f, dot(v_z, v_gz));
v_bz = normalize(w_o + normalize(r - v_z * a / b));
}
}
if (any(isnan(make_vec3(v_z)))) {
// Since gradient can be 0, normalizing can cause nans.
// Doesn't really matter, since 0 length normals result in a phase function (no surface present).
v_z = v_x = v_y = make_float3(0.f);
}
// For segmentations, save geometric metadata
saveGeometricRenderData(renderData, depth, payload.tHit, hit_p, v_z, v_x, w_o, uv, entityID, diffuseMotion, time, mat);
if (depth == 0) {
primaryAlbedo = mat.base_color;
primaryNormal = v_z;
}
// Potentially skip forward if the hit object is transparent
if ((entity.light_id == -1) && (mat.alpha < 1.f)) {
float alpha_rnd = lcg_randomf(rng);
if (alpha_rnd > mat.alpha) {
ray.origin = ray.origin + ray.direction * (payload.tHit + EPSILON);
payload.tHit = -1.f;
ray.time = time;
// ray.visibilityMask reuses the last visibility mask here
owl::traceRay( LP.IAS, ray, payload, OPTIX_RAY_FLAG_DISABLE_ANYHIT);
++depth;
transparencyDepth++;
continue;
}
}
// If the entity we hit is a light, terminate the path.
// Note that NEE/MIS will also potentially terminate the path, preventing double-counting.
// todo: account for volumetric emission here...
if (entity.light_id >= 0 && entity.light_id < LP.lights.count) {
float dotNWi = max(dot(ray.direction, v_z), 0.f);
if ((dotNWi > EPSILON) && (depth != 0)) break;
GET(LightStruct entityLight, LightStruct, LP.lights, entity.light_id);
float3 lightEmission;
if (entityLight.color_texture_id == -1) lightEmission = make_float3(entityLight.r, entityLight.g, entityLight.b);
else lightEmission = sampleTexture(entityLight.color_texture_id, uv, make_float3(0.f, 0.f, 0.f));
float dist = payload.tHit;
lightEmission = (lightEmission * entityLight.intensity);
if (depth != 0) lightEmission = (lightEmission * pow(2.f, entityLight.exposure)) / max((dist * dist), 1.f);
float3 contribution = pathThroughput * lightEmission;
illum = illum + contribution;
if (depth == 0) directIllum = illum;
break;
}
// Next, we'll be sampling direct light sources
int32_t sampledLightID = -2;
float lightPDF = 0.f;
float3 irradiance = make_float3(0.f);
// If we hit a volume, use hybrid scattering to determine whether or not to use a BRDF or a phase function.
if (isVolume) {
float opacity = mat.alpha; // would otherwise be sampled from a transfer function
float grad_len = uv.y;
float p_brdf = opacity * (1.f - exp(-25.f * pow(volume.gradient_factor, 3.f) * grad_len));
float pdf;
float rand_brdf = lcg_randomf(rng);
if (rand_brdf < p_brdf) {
useBRDF = true;
} else {
useBRDF = false;
}
}
// First, sample the BRDF / phase function so that we can use the sampled direction for MIS
float3 w_i;
float bsdfPDF;
float3 bsdf;
if (useBRDF) {
sample_disney_brdf(
mat, rng, v_gz, v_z, v_bz, v_x, v_y, w_o, // inputs
w_i, bsdfPDF, sampledBsdf, bsdf); // outputs
} else {
/* a scatter event occurred */
if (payload.eventID == 2) {
// currently isotropic. Todo: implement henyey greenstien...
float rand1 = lcg_randomf(rng);
float rand2 = lcg_randomf(rng);
// Sample isotropic phase function to get new ray direction
float phi = 2.0f * M_PI * rand1;
float cos_theta = 1.0f - 2.0f * rand2;
float sin_theta = sqrt (1.0f - cos_theta * cos_theta);
bsdfPDF = 1.f / (4.0 * M_PI);
bsdf = make_float3(1.f / (4.0 * M_PI));
w_i = make_float3(cos(phi) * sin_theta, sin(phi) * sin_theta, cos_theta);
}
/* An absorption / emission event occurred */
if (payload.eventID == 1) {
bsdfPDF = 1.f / (4.0 * M_PI);
bsdf = make_float3(1.f / (4.0 * M_PI));
w_i = -w_o;
}
// For all events, modify throughput by base color.
bsdf = bsdf * mat.base_color;
}
// At this point, if we are refracting and we ran out of transmission bounces, skip forward.
// This avoids creating black regions on glass objects due to bounce limits
if (sampledBsdf == DISNEY_TRANSMISSION_BRDF && transmissionDepth >= LP.maxTransmissionDepth) {
ray.origin = ray.origin + ray.direction * (payload.tHit + EPSILON);
payload.tHit = -1.f;
ray.time = time;
// ray.visibilityMask reuses the last visibility mask here
owl::traceRay( LP.IAS, ray, payload, OPTIX_RAY_FLAG_DISABLE_ANYHIT);
// Count this as a "transparent" bounce.
++depth;
transparencyDepth++;
continue;
}
// Next, sample the light source by importance sampling the light
const uint32_t occlusion_flags = OPTIX_RAY_FLAG_DISABLE_ANYHIT | OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT;
uint32_t randmax = (enableDomeSampling) ? numLights + 1 : numLights;
uint32_t randomID = uint32_t(min(lcg_randomf(rng) * randmax, float(randmax-1)));
float dotNWi = 0.f;
float3 l_bsdf = make_float3(0.f);
float3 lightEmission = make_float3(0.f);
float3 lightDir = make_float3(0.f);
float lightDistance = 1e20f;
float falloff = 2.0f;
int numTris = 0;
// sample background
if (randomID == numLights) {
sampledLightID = -1;
if (
(LP.environmentMapWidth != 0) && (LP.environmentMapHeight != 0) &&
(LP.environmentMapRows != nullptr) && (LP.environmentMapCols != nullptr)
)
{
// Reduces noise for strangely noisy dome light textures, but at the expense
// of a highly uncoalesced binary search through a 2D CDF.
// disabled by default to avoid the hit to performance
float rx = lcg_randomf(rng);
float ry = lcg_randomf(rng);
float* rows = LP.environmentMapRows;
float* cols = LP.environmentMapCols;
int width = LP.environmentMapWidth;
int height = LP.environmentMapHeight;
float invjacobian = width * height / float(4 * M_PI);
float row_pdf, col_pdf;
unsigned x, y;
ry = sample_cdf(rows, height, ry, &y, &row_pdf);
y = max(min(y, height - 1), 0);
rx = sample_cdf(cols + y * width, width, rx, &x, &col_pdf);
lightDir = make_float3(toPolar(vec2((x /*+ rx*/) / float(width), (y/* + ry*/)/float(height))));
lightDir = glm::inverse(LP.environmentMapRotation) * lightDir;
lightPDF = row_pdf * col_pdf * invjacobian;
}
else
{
glm::mat3 tbn;
tbn = glm::column(tbn, 0, make_vec3(v_x) );
tbn = glm::column(tbn, 1, make_vec3(v_y) );
tbn = glm::column(tbn, 2, make_vec3(v_z) );
const float3 hemi_dir = (cos_sample_hemisphere(make_float2(lcg_randomf(rng), lcg_randomf(rng))));
lightDir = make_float3(tbn * make_vec3(hemi_dir));
lightPDF = 1.f / float(2.0 * M_PI);
}
numTris = 1.f;
lightEmission = (missColor(lightDir, envTex) * LP.domeLightIntensity * pow(2.f, LP.domeLightExposure));
}
// sample light sources
else
{
// Sample the light to compute an incident light ray to this point
if (numLights == 0) continue;
GET( sampledLightID, int, LP.lightEntities, randomID );
GET( EntityStruct light_entity, EntityStruct, LP.entities, sampledLightID );
GET( LightStruct light_light, LightStruct, LP.lights, light_entity.light_id );
GET( TransformStruct transform, TransformStruct, LP.transforms, light_entity.transform_id );
auto <w = transform.localToWorld;
float3 dir; float2 uv;
float3 pos = hit_p;
// The sampled light is a point light
if ((light_entity.mesh_id < 0) || (light_entity.mesh_id >= LP.meshes.count)) {
numTris = 1.f;
float3 tmp = make_float3(ltw[3]) - pos;
lightDistance = length(tmp);
dir = tmp / lightDistance;
lightPDF = PdfAtoW(1.f/(4.f * M_PI), lightDistance * lightDistance, 1.f);
uv = make_float2(0.f, 0.f);
}
// The sampled light is a mesh light
else {
GET( MeshStruct mesh, MeshStruct, LP.meshes, light_entity.mesh_id );
uint32_t random_tri_id = uint32_t(min(lcg_randomf(rng) * mesh.numTris, float(mesh.numTris - 1)));
GET( Buffer<int3> indices, Buffer<int3>, LP.indexLists, light_entity.mesh_id );
GET( Buffer<float3> vertices, Buffer<float3>, LP.vertexLists, light_entity.mesh_id );
GET( Buffer<float4> normals, Buffer<float4>, LP.normalLists, light_entity.mesh_id );
GET( Buffer<float2> texCoords, Buffer<float2>, LP.texCoordLists, light_entity.mesh_id );
GET( int3 triIndex, int3, indices, random_tri_id );
GET(float3 n1, float3, normals, triIndex.x );
GET(float3 n2, float3, normals, triIndex.y );
GET(float3 n3, float3, normals, triIndex.z );
GET(float3 v1, float3, vertices, triIndex.x );
GET(float3 v2, float3, vertices, triIndex.y );
GET(float3 v3, float3, vertices, triIndex.z );
GET(float2 uv1, float2, texCoords, triIndex.x );
GET(float2 uv2, float2, texCoords, triIndex.y );
GET(float2 uv3, float2, texCoords, triIndex.z );
// Might be a bug here with normal transform...
n1 = make_float3(ltw * make_float4(n1, 0.0f));
n2 = make_float3(ltw * make_float4(n2, 0.0f));
n3 = make_float3(ltw * make_float4(n3, 0.0f));
v1 = make_float3(ltw * make_float4(v1, 1.0f));
v2 = make_float3(ltw * make_float4(v2, 1.0f));
v3 = make_float3(ltw * make_float4(v3, 1.0f));
sampleTriangle(pos, n1, n2, n3, v1, v2, v3, uv1, uv2, uv3,
lcg_randomf(rng), lcg_randomf(rng), dir, lightDistance, lightPDF, uv,
/*double_sided*/ false, /*use surface area*/ light_light.use_surface_area);
numTris = mesh.numTris;
}
falloff = light_light.falloff;
lightDir = make_float3(dir.x, dir.y, dir.z);
if (light_light.color_texture_id == -1) lightEmission = make_float3(light_light.r, light_light.g, light_light.b) * (light_light.intensity * pow(2.f, light_light.exposure));
else lightEmission = sampleTexture(light_light.color_texture_id, uv, make_float3(0.f, 0.f, 0.f)) * (light_light.intensity * pow(2.f, light_light.exposure));
}
if (useBRDF) {
disney_brdf(
mat, v_gz, v_z, v_bz, v_x, v_y,
w_o, lightDir, normalize(w_o + lightDir), l_bsdf
);
dotNWi = max(dot(lightDir, v_z), 0.f);
// auto fbOfs = pixelID.x+LP.frameSize.x * ((LP.frameSize.y - 1) - pixelID.y);
// LP.frameBuffer[fbOfs] = vec4(l_bsdf.x, l_bsdf.y, l_bsdf.z, 1.f);
// return;
} else {
// currently isotropic. Todo: implement henyey greenstien...
l_bsdf = make_float3(1.f / (4.0 * M_PI)) * mat.base_color;
dotNWi = 1.f; // no geom term for phase function
}
lightPDF *= (1.f / float(numLights + 1.f)) * (1.f / float(numTris));
if ((lightPDF > 0.0) && (dotNWi > EPSILON)) {
RayPayload payload; payload.instanceID = -2;
RayPayload volPayload = payload;
owl::RayT</*type*/1, /*prd*/1> ray; // shadow ray
ray.tmin = EPSILON * 10.f; ray.tmax = lightDistance + EPSILON; // needs to be distance to light, else anyhit logic breaks.
ray.origin = hit_p; ray.direction = lightDir;
ray.time = time;
ray.visibilityMask = ENTITY_VISIBILITY_SHADOW_RAYS;
owl::traceRay( LP.IAS, ray, payload, occlusion_flags);
ray.tmax = (payload.instanceID == -2) ? ray.tmax : payload.tHit;
bool visible;
if (randomID == numLights) {
// If we sampled the dome light, just check to see if we hit anything
visible = (payload.instanceID == -2);
} else {
// If we sampled a light source, then check to see if we hit something other than the light
int surfEntity;
if (payload.instanceID == -2) surfEntity = -1;
else { GET(surfEntity, int, LP.instanceToEntity, payload.instanceID); }
visible = (payload.instanceID == -2 || surfEntity == sampledLightID);
}
if (visible) {
if (randomID != numLights) lightEmission = lightEmission / max(pow(payload.tHit, falloff),1.f);
float w = power_heuristic(1.f, lightPDF, 1.f, bsdfPDF);
float3 Li = (lightEmission * w) / lightPDF;
irradiance = irradiance + (l_bsdf * Li);
}
}
// For segmentations, save lighting metadata
saveLightingColorRenderData(renderData, depth, v_z, w_o, w_i, mat);
// Terminate the path if the bsdf probability is impossible, or if the bsdf filters out all light
if (bsdfPDF < EPSILON || all_zero(bsdf)) {
float3 contribution = pathThroughput * irradiance;
illum = illum + contribution;
break;
}
// Next, sample a light source using the importance sampled BDRF direction.
ray.origin = hit_p;
ray.direction = w_i;
ray.tmin = EPSILON;//* 100.f;
payload.instanceID = -1;
payload.tHit = -1.f;
ray.time = sampleTime(lcg_randomf(rng));
if (isVolume) ray.visibilityMask = ENTITY_VISIBILITY_VOLUME_SCATTER_RAYS;
else if (sampledBsdf == DISNEY_TRANSMISSION_BRDF) ray.visibilityMask = ENTITY_VISIBILITY_TRANSMISSION_RAYS;
else if (sampledBsdf == DISNEY_DIFFUSE_BRDF) ray.visibilityMask = ENTITY_VISIBILITY_DIFFUSE_RAYS;
else if (sampledBsdf == DISNEY_GLOSSY_BRDF) ray.visibilityMask = ENTITY_VISIBILITY_GLOSSY_RAYS;
else if (sampledBsdf == DISNEY_CLEARCOAT_BRDF) ray.visibilityMask = ENTITY_VISIBILITY_GLOSSY_RAYS;
owl::traceRay(LP.IAS, ray, payload, OPTIX_RAY_FLAG_DISABLE_ANYHIT);
// Check if we hit any of the previously sampled lights
bool hitLight = false;
if (lightPDF > EPSILON)
{
float dotNWi = (useBRDF) ? max(dot(ray.direction, v_gz), 0.f) : 1.f; // geometry term
// if by sampling the brdf we also hit the dome light...
if ((payload.instanceID == -1) && (sampledLightID == -1) && enableDomeSampling) {
// Case where we hit the background, and also previously sampled the background
float w = power_heuristic(1.f, bsdfPDF, 1.f, lightPDF);
float3 lightEmission = missColor(ray, envTex) * LP.domeLightIntensity * pow(2.f, LP.domeLightExposure);
float3 Li = (lightEmission * w) / bsdfPDF;
if (dotNWi > 0.f) {
irradiance = irradiance + (bsdf * Li);
}
hitLight = true;
}
// else if by sampling the brdf we also hit an area light
// TODO: consider hitting emissive voxels?
else if (payload.instanceID != -1) {
GET(int entityID, int, LP.instanceToEntity, payload.instanceID);
bool visible = (entityID == sampledLightID);
// We hit the light we sampled previously
if (visible) {
int3 indices; float3 p; float3 lv_gz; float2 uv;
GET(EntityStruct light_entity, EntityStruct, LP.entities, sampledLightID);
GET(MeshStruct light_mesh, MeshStruct, LP.meshes, light_entity.mesh_id);
GET(LightStruct light_light, LightStruct, LP.lights, light_entity.light_id);
loadMeshTriIndices(light_entity.mesh_id, light_mesh.numTris, payload.primitiveID, indices);
loadMeshUVData(light_entity.mesh_id, light_mesh.numVerts, indices, payload.barycentrics, uv);
float dist = payload.tHit;
float3 lightEmission;
if (light_light.color_texture_id == -1) lightEmission = make_float3(light_light.r, light_light.g, light_light.b) * (light_light.intensity * pow(2.f, light_light.exposure));
else lightEmission = sampleTexture(light_light.color_texture_id, uv, make_float3(0.f, 0.f, 0.f)) * (light_light.intensity * pow(2.f, light_light.exposure));
lightEmission = lightEmission / max(pow(dist, light_light.falloff), 1.f);
if (dotNWi > EPSILON)
{
float w = power_heuristic(1.f, bsdfPDF, 1.f, lightPDF);
float3 Li = (lightEmission * w) / bsdfPDF;
irradiance = irradiance + (bsdf * Li);
}
hitLight = true;
}
}
}
// Accumulate radiance (ie pathThroughput * irradiance), and update the path throughput using the sampled BRDF
float3 contribution = pathThroughput * irradiance;
illum = illum + contribution;
pathThroughput = (pathThroughput * bsdf) / bsdfPDF;
if (depth == 0) directIllum = illum;
// Avoid double counting light sources by terminating here if we hit a light sampled thorugh NEE/MIS
if (hitLight) break;
// Russian Roulette
// Randomly terminate a path with a probability inversely equal to the throughput
float pmax = max(pathThroughput.x, max(pathThroughput.y, pathThroughput.z));
if (lcg_randomf(rng) > pmax) {
break;
}
// // Do path regularization to reduce fireflies
// // Note, .35f was chosen emperically, but could be exposed as a parameter later on.
// EDIT: finding that path regularization doesn't generalize well with transmissive objects...
// if (sampledSpecular) {
// roughnessMinimum = min((roughnessMinimum + .35f), 1.f);
// }
// if the bounce count is less than the max bounce count, potentially add on radiance from the next hit location.
++depth;
if (!useBRDF) volumeDepth++;
else if (sampledBsdf == DISNEY_DIFFUSE_BRDF) diffuseDepth++;
else if (sampledBsdf == DISNEY_GLOSSY_BRDF) glossyDepth++;
else if (sampledBsdf == DISNEY_CLEARCOAT_BRDF) glossyDepth++;
else if (sampledBsdf == DISNEY_TRANSMISSION_BRDF) transmissionDepth++;
// transparency depth handled earlier
// for transmission, once we hit the limit, we'll stop refracting instead
// of terminating, just so that we don't get black regions in our glass
if (transmissionDepth >= LP.maxTransmissionDepth) continue;
} while (
// Terminate the path if the sampled BRDF's corresponding bounce depth exceeds the max bounce for that bounce type minus the overall path depth.
// This prevents long tails that can otherwise occur from mixing BRDF events
(!(sampledBsdf == DISNEY_DIFFUSE_BRDF && diffuseDepth > (LP.maxDiffuseDepth - (depth - 1)))) &&
(!(sampledBsdf == DISNEY_GLOSSY_BRDF && glossyDepth > LP.maxGlossyDepth - (depth - 1)) ) &&
(!(sampledBsdf == DISNEY_CLEARCOAT_BRDF && glossyDepth > LP.maxGlossyDepth - (depth - 1)) ) &&
(!(useBRDF == false && volumeDepth > LP.maxVolumeDepth - (depth - 1))) &&
(!(transparencyDepth > LP.maxTransparencyDepth - (depth - 1)))
// (!(sampledBsdf == DISNEY_TRANSMISSION_BRDF && transmissionDepth < LP.maxTransmissionDepth - (depth - 1)) ) && // see comment above
);
// For segmentations, save heatmap metadata
saveHeatmapRenderData(renderData, depth, start_clock);
// Device assignment data
saveDeviceAssignment(renderData, depth, self.deviceIndex);
// clamp out any extreme fireflies
glm::vec3 gillum = vec3(illum.x, illum.y, illum.z);
glm::vec3 dillum = vec3(directIllum.x, directIllum.y, directIllum.z);
glm::vec3 iillum = gillum - dillum;
// For segmentations, indirect/direct lighting metadata extraction
// float3 aovGIllum = aovIllum;
// aovIndirectIllum = aovGIllum - aovDirectIllum;
// saveLightingIrradianceRenderData(renderData, bounce, aovDirectIllum, aovIndirectIllum, rdSampledBsdf);
if (LP.indirectClamp > 0.f)
iillum = clamp(iillum, vec3(0.f), vec3(LP.indirectClamp));
if (LP.directClamp > 0.f)
dillum = clamp(dillum, vec3(0.f), vec3(LP.directClamp));
gillum = dillum + iillum;
// just in case we get inf's or nans, remove them.
if (glm::any(glm::isnan(gillum))) gillum = vec3(0.f);
if (glm::any(glm::isinf(gillum))) gillum = vec3(0.f);
illum = make_float3(gillum.r, gillum.g, gillum.b);
// accumulate the illumination from this sample into what will be an average illumination from all samples in this pixel
accum_illum = illum;
/* Write to AOVs, progressively refining results */
auto fbOfs = pixelID.x+LP.frameSize.x * ((LP.frameSize.y - 1) - pixelID.y);
float4* accumPtr = (float4*) LP.accumPtr;
float4* fbPtr = (float4*) LP.frameBuffer;
float4* normalPtr = (float4*) LP.normalBuffer;
float4* albedoPtr = (float4*) LP.albedoBuffer;
float4 prev_color = accumPtr[fbOfs];
float4 prev_normal = normalPtr[fbOfs];
float4 prev_albedo = albedoPtr[fbOfs];
float4 accum_color;
if (LP.renderDataMode == RenderDataFlags::NONE)
{
accum_color = make_float4((accum_illum + float(LP.frameID) * make_float3(prev_color)) / float(LP.frameID + 1), 1.0f);
}
else {
// Override framebuffer output if user requested to render metadata
accum_illum = make_float3(renderData.x, renderData.y, renderData.z);
if (isnan(renderData.x) || isnan(renderData.y) || isnan(renderData.z) ||
isinf(renderData.x) || isinf(renderData.y) || isinf(renderData.z) ||
isnan(prev_color.x) || isnan(prev_color.y) || isnan(prev_color.z) ||
isinf(prev_color.x) || isinf(prev_color.y) || isinf(prev_color.z)) {
accum_illum = make_float3(0.f, 0.f, 0.f);
prev_color = make_float4(0.f, 0.f, 0.f, 1.f);
}
accum_color = make_float4((accum_illum + float(LP.frameID) * make_float3(prev_color)) / float(LP.frameID + 1), 1.0f);
// if (debug) {
// printf("output: %f %f %f\n", accum_color.x, accum_color.y, accum_color.z);
// }
}
// compute screen space normal / albedo
vec4 oldAlbedo = make_vec4(prev_albedo);
vec4 oldNormal = make_vec4(prev_normal);
if (any(isnan(oldAlbedo))) oldAlbedo = vec4(0.f);
if (any(isnan(oldNormal))) oldNormal = vec4(0.f);
vec4 newAlbedo = vec4(primaryAlbedo.x, primaryAlbedo.y, primaryAlbedo.z, 1.f);
vec4 accumAlbedo = (newAlbedo + float(LP.frameID) * oldAlbedo) / float(LP.frameID + 1);
vec4 newNormal = vec4(make_vec3(primaryNormal), 1.f);
if (!all(equal(make_vec3(primaryNormal), vec3(0.f, 0.f, 0.f)))) {
glm::quat r0 = glm::quat_cast(LP.viewT0);
glm::quat r1 = glm::quat_cast(LP.viewT1);
glm::quat rot = (glm::all(glm::equal(r0, r1))) ? r0 : glm::slerp(r0, r1, time);
vec3 tmp = normalize(glm::mat3_cast(rot) * make_vec3(primaryNormal));
tmp = normalize(vec3(LP.proj * vec4(tmp, 0.f)));
newNormal = vec4(tmp, 1.f);
}
vec4 accumNormal = (newNormal + float(LP.frameID) * oldNormal) / float(LP.frameID + 1);
// save data to frame buffers
accumPtr[fbOfs] = accum_color;
fbPtr[fbOfs] = accum_color;
albedoPtr[fbOfs] = make_float4(accumAlbedo);
normalPtr[fbOfs] = make_float4(accumNormal);
} | the_stack |
template <typename FloatT>
Gradients<FloatT>* MergeGradientsFn<FloatT>::operator()(const GradientAndWeights& gradients_and_weights) const {
CHECK_GT(gradients_and_weights.size(), 1);
std::vector<Gradients<FloatT>*> gradients;
gradients.reserve(gradients_and_weights.size());
for (auto it = gradients_and_weights.begin();
it != gradients_and_weights.end();
++it) {
Gradients<FloatT>* const grad = std::get<0>(*it);
gradients.push_back(grad);
}
std::unique_ptr<Gradients<FloatT>> result(new CompositeGradients<FloatT>(gradients));
FloatT summed_weight = 0.0;
for (auto& pair : gradients_and_weights) {
summed_weight += std::get<1>(pair);
}
for (auto it = gradients_and_weights.begin();
it != gradients_and_weights.end();
++it) {
Gradients<FloatT>* const grad = std::get<0>(*it);
const FloatT weight = std::get<1>(*it);
// Scale the gradients.
for (auto member_ptr : {&Gradients<FloatT>::grad_entity_repr_,
&Gradients<FloatT>::grad_phrase_reprs_,
&Gradients<FloatT>::grad_transform_matrix_,
&Gradients<FloatT>::grad_bias_}) {
if ((grad->*member_ptr) != nullptr) {
(grad->*member_ptr)->scale((grad->*member_ptr)->getStream(), weight / summed_weight);
}
}
// Merge gradients.
for (auto member_ptr : {&Gradients<FloatT>::grad_transform_matrix_,
&Gradients<FloatT>::grad_bias_}) {
if ((grad->*member_ptr) != nullptr) {
if ((result.get()->*member_ptr) == nullptr) {
(result.get()->*member_ptr).reset((grad->*member_ptr).release());
} else {
elemwise_plus(
thrust::cuda::par.on(merge_streams((result.get()->*member_ptr)->getStream(),
(grad->*member_ptr)->getStream())),
*(grad->*member_ptr),
(result.get()->*member_ptr).get());
(grad->*member_ptr).reset(); // Release memory of constituent.
}
}
}
}
return result.release();
}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
SimpleForwardResult<FloatT, WordIdxType, EntityIdxType>::SimpleForwardResult(
const size_t batch_size,
const FloatT regularization_lambda)
: batch_size_(batch_size),
regularization_lambda_(regularization_lambda),
similarity_probs_(nullptr),
pointwise_mass_(nullptr),
cost_(NAN) {
CHECK_GT(batch_size_, 0);
}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
SimpleForwardResult<FloatT, WordIdxType, EntityIdxType>::SimpleForwardResult()
: batch_size_(0), regularization_lambda_(0.0), cost_(NAN) {}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
FloatT SimpleForwardResult<FloatT, WordIdxType, EntityIdxType>::get_cost() const {
if (std::isnan(cost_) && pointwise_mass_.get() != nullptr) {
PROFILE_FUNCTION_WITH_STREAM(pointwise_mass_->getStream());
FloatT log_data_prob;
{
device_matrix<FloatT> device_log_data_prob(
1, 1,
pointwise_mass_->getStream());
CHECK_MATRIX(*pointwise_mass_);
reduce_axis(device_log_data_prob.getStream(),
SECOND_AXIS,
*pointwise_mass_,
&device_log_data_prob);
FloatT* log_data_prob_ptr;
CCE(cudaHostAlloc(&log_data_prob_ptr,
1 * sizeof(FloatT),
cudaHostAllocDefault));
device_log_data_prob.transfer(
device_log_data_prob.getStream(),
log_data_prob_ptr, 1 /* num */);
cudaStreamSynchronize(device_log_data_prob.getStream());
log_data_prob = *log_data_prob_ptr;
cudaFreeHost(log_data_prob_ptr);
log_data_prob /= this->batch_size_;
}
CHECK(isfinite(log_data_prob));
*const_cast<FloatT*>(&cost_) =
- log_data_prob +
this->scaled_regularization_lambda() * 0.0; // TODO(cvangysel): fill in regularization in cost?
}
return cost_;
}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
FloatT SimpleForwardResult<FloatT, WordIdxType, EntityIdxType>::scaled_regularization_lambda() const {
return this->regularization_lambda_ / this->batch_size_;
}
template <typename FloatT, typename ... ForwardResultT>
class ConstructorFn {
public:
ConstructorFn(const std::tuple<std::pair<ForwardResultT*, FloatT> ...>& src,
std::tuple<std::pair<std::unique_ptr<ForwardResultT>, FloatT> ...>& dst)
: src_(&src), dst_(&dst) {}
template <typename Index>
void operator()(const Index& idx) {
std::get<0>(std::get<Index::value>(*dst_)).reset(std::get<0>(std::get<Index::value>(*src_)));
std::get<1>(std::get<Index::value>(*dst_)) = std::get<1>(std::get<Index::value>(*src_));
}
private:
const std::tuple<std::pair<ForwardResultT*, FloatT> ...>* const src_;
std::tuple<std::pair<std::unique_ptr<ForwardResultT>, FloatT> ...>* const dst_;
DISALLOW_COPY_AND_ASSIGN(ConstructorFn);
};
template <typename FloatT, typename WordIdxT, typename EntityIdxT, typename ... ForwardResultT>
class HStackFn {
public:
typedef device_matrix<FloatT>* (ForwardResult<FloatT, WordIdxT, EntityIdxT>::*MatrixForwardResultFn)() const;
HStackFn(const std::tuple<std::pair<std::unique_ptr<ForwardResultT>, FloatT> ...>& forward_results,
MatrixForwardResultFn fn)
: forward_results_(&forward_results), fn_(fn), args_() {}
template <typename Index>
void operator()(const Index& idx) {
args_.push_back(
std::make_pair(((std::get<0>(std::get<Index::value>(*forward_results_)).get())->*fn_)(),
std::get<1>(std::get<Index::value>(*forward_results_))));
}
device_matrix<FloatT>* get_result() const {
return hstack(DefaultStream::get()->next(), args_);
}
private:
const std::tuple<std::pair<std::unique_ptr<ForwardResultT>, FloatT> ...>* const forward_results_;
MatrixForwardResultFn fn_;
std::vector<std::pair<device_matrix<FloatT>*, FloatT>> args_;
DISALLOW_COPY_AND_ASSIGN(HStackFn);
};
template <typename FloatT, typename WordIdxT, typename EntityIdxT, typename ... ForwardResultT>
MultiForwardResultBase<FloatT, WordIdxT, EntityIdxT, ForwardResultT ...>::MultiForwardResultBase(
const ForwardResultsType& forward_results)
: similarity_probs_(nullptr) {
for_tuple_range(forward_results_, ConstructorFn<FloatT, ForwardResultT ...>(
forward_results, forward_results_));
HStackFn<FloatT, WordIdxT, EntityIdxT, ForwardResultT ...> fn(
this->forward_results_, &ForwardResult<FloatT, WordIdxT, EntityIdxT>::get_similarity_probs);
for_tuple_range(forward_results_, fn);
similarity_probs_.reset(fn.get_result());
}
template <typename FloatT, typename WordIdxT, typename EntityIdxT, typename ... ForwardResultT>
class AverageFn {
public:
typedef FloatT (ForwardResult<FloatT, WordIdxT, EntityIdxT>::*ScalarForwardResultFn)() const;
AverageFn(const std::tuple<std::pair<std::unique_ptr<ForwardResultT>, FloatT> ...>& forward_results,
ScalarForwardResultFn fn)
: forward_results_(&forward_results), fn_(fn), agg_(0.0) {}
template <typename Index>
void operator()(const Index& idx) {
agg_ += ((std::get<0>(std::get<Index::value>(*forward_results_)).get())->*fn_)();
}
FloatT get_result() const {
return agg_ / std::tuple_size<std::tuple<ForwardResultT ...>>::value;
}
private:
const std::tuple<std::pair<std::unique_ptr<ForwardResultT>, FloatT> ...>* const forward_results_;
ScalarForwardResultFn fn_;
FloatT agg_;
DISALLOW_COPY_AND_ASSIGN(AverageFn);
};
template <typename FloatT, typename WordIdxT, typename EntityIdxT, typename ... ForwardResultT>
FloatT MultiForwardResultBase<FloatT, WordIdxT, EntityIdxT, ForwardResultT ...>::get_cost() const {
AverageFn<FloatT, WordIdxT, EntityIdxT, ForwardResultT ...> fn(
forward_results_, &ForwardResult<FloatT, WordIdxT, EntityIdxT>::get_cost);
for_tuple_range(forward_results_, fn);
return fn.get_result();
}
template <typename FloatT, typename WordIdxT, typename EntityIdxT, typename ... ForwardResultT>
FloatT MultiForwardResultBase<FloatT, WordIdxT, EntityIdxT, ForwardResultT ...>::scaled_regularization_lambda() const {
AverageFn<FloatT, WordIdxT, EntityIdxT, ForwardResultT ...> fn(
forward_results_, &ForwardResult<FloatT, WordIdxT, EntityIdxT>::scaled_regularization_lambda);
for_tuple_range(forward_results_, fn);
return fn.get_result();
}
template <typename FloatT, typename WordIdxT, typename EntityIdxT, typename ... ForwardResultT>
device_matrix<FloatT>* MultiForwardResultBase<FloatT, WordIdxT, EntityIdxT, ForwardResultT ...>::get_similarity_probs() const {
return similarity_probs_.get();
}
template <typename ... Types>
std::tuple<Types&& ...>* forward_as_tuple_ptr(Types&& ... args) {
return new std::tuple<Types&& ...>(
std::forward<Types>(args) ...);
}
template <typename FloatT, typename IntT>
std::tuple<device_matrix<FloatT>&, const device_matrix<IntT>&, const size_t>* forward_as_tuple_ptr(
device_matrix<FloatT>& a, device_matrix<IntT>& b, const size_t c) {
return new std::tuple<device_matrix<FloatT>&, const device_matrix<IntT>&, const size_t>(a, b, c);
}
template <typename FloatT, typename IntT>
std::tuple<device_matrix<FloatT>&, const device_matrix<IntT>&, const size_t, const device_matrix<FloatT>*>* forward_as_tuple_ptr(
device_matrix<FloatT>& a, device_matrix<IntT>& b, const size_t c, const device_matrix<FloatT>* d) {
return new std::tuple<device_matrix<FloatT>&, const device_matrix<IntT>&, const size_t, const device_matrix<FloatT>*>(a, b, c, d);
}
template <typename FloatT>
typename TransformStorage<FloatT>::GradientType* Gradients<FloatT>::get_transform_gradient(
const ParamIdentifier param_id) const {
switch (param_id) {
case TRANSFORM:
if (this->grad_transform_matrix_ != nullptr) {
return forward_as_tuple_ptr(*this->grad_transform_matrix_, *this->grad_bias_);
} else {
return nullptr;
}
default:
break;
};
LOG(FATAL) << "Unable to construct gradient.";
throw 0;
}
template <typename FloatT>
typename RepresentationsStorage<FloatT, int32>::GradientType* SingleGradients<FloatT>::get_representations_gradient(
const ParamIdentifier param_id) const {
switch (param_id) {
case WORD_REPRS:
if (this->grad_phrase_reprs_ != nullptr) {
DCHECK_GT(result_->get_window_size(), 0);
return new std::vector<typename RepresentationsStorage<FloatT, typename Storage<FloatT>::ForwardResult::WordIdxType>::SingleGradientType> {
std::forward_as_tuple(
*this->grad_phrase_reprs_,
*result_->get_word_indices(),
result_->get_window_size(),
static_cast<const device_matrix<FloatT>*>(result_->get_word_weights()))};
} else {
return nullptr;
}
case ENTITY_REPRS:
if (this->grad_entity_repr_ != nullptr) {
return new std::vector<typename RepresentationsStorage<FloatT, typename Storage<FloatT>::ForwardResult::EntityIdxType>::SingleGradientType> {
std::forward_as_tuple(
*this->grad_entity_repr_,
*result_->get_entity_indices(),
static_cast<size_t>(1), /* window size */
static_cast<const device_matrix<FloatT>*>(nullptr) /* weights */)};
} else {
return nullptr;
}
default:
break;
};
LOG(FATAL) << "Unable to construct gradient.";
throw 0;
}
template <typename FloatT>
typename RepresentationsStorage<FloatT, int32>::GradientType* CompositeGradients<FloatT>::get_representations_gradient(
const ParamIdentifier param_id) const {
typename RepresentationsStorage<FloatT, int32>::GradientType* gradients =
new typename RepresentationsStorage<FloatT, int32>::GradientType;
gradients->reserve(constituent_gradients_.size());
for (auto& constituent_gradients : constituent_gradients_) {
std::unique_ptr<
std::vector<
typename RepresentationsStorage<FloatT, typename Storage<FloatT>::ForwardResult::WordIdxType>
::SingleGradientType>> v(
constituent_gradients->get_representations_gradient(param_id));
if (v != nullptr) {
std::move(v->begin(), v->end(), std::back_inserter(*gradients));
}
}
CHECK_LE(gradients->size(), constituent_gradients_.size());
return gradients;
}
namespace TextEntity {
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
ForwardResult<FloatT, WordIdxType, EntityIdxType>::ForwardResult(
device_matrix<WordIdxType>* const flattened_words,
device_matrix<FloatT>* const flattened_word_weights,
device_matrix<EntityIdxType>* const entity_ids,
const size_t window_size,
const size_t num_random_entities,
const FloatT regularization_lambda)
: ::SimpleForwardResult<FloatT, WordIdxType, EntityIdxType>(
flattened_words->size() / window_size, /* batch_size */
regularization_lambda),
window_size_(window_size),
num_random_entities_(num_random_entities),
entity_ids_(entity_ids),
flattened_words_(flattened_words),
flattened_word_weights_(flattened_word_weights),
phrase_reprs_(nullptr),
broadcasted_instance_weights_(nullptr),
word_projections_(nullptr),
broadcasted_word_projections_(nullptr),
entity_representations_(nullptr) {
CHECK_GT(window_size_, 0);
CHECK_EQ(flattened_words->size() % window_size, 0);
CHECK_NE(flattened_words_.get(), (device_matrix<WordIdxType>*) nullptr);
CHECK_DIMENSIONS(*flattened_words_, 1, this->batch_size_ * window_size_);
CHECK_DIMENSIONS_EQUAL(*flattened_words_, *flattened_word_weights_);
CHECK_NE(entity_ids_.get(), (device_matrix<EntityIdxType>*) nullptr);
CHECK_DIMENSIONS(*entity_ids_, 1, entity_ids_->size());
}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
ForwardResult<FloatT, WordIdxType, EntityIdxType>::ForwardResult()
: ::SimpleForwardResult<FloatT, WordIdxType, EntityIdxType>(),
window_size_(0), num_random_entities_(0) {}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
bool ForwardResult<FloatT, WordIdxType, EntityIdxType>::complete() const {
#ifdef NDEBUG
LOG_EVERY_N(ERROR, 10000000000000000000000)
<< "Calling this function is non-debug mode can decrease performance.";
#endif
return !std::isnan(this->get_cost()) &&
phrase_reprs_ != nullptr &&
broadcasted_instance_weights_ != nullptr &&
word_projections_ != nullptr &&
broadcasted_word_projections_ != nullptr &&
entity_representations_ != nullptr &&
this->similarity_probs_ != nullptr &&
this->pointwise_mass_ != nullptr;
}
// Explicit instantiations.
template class ForwardResult<FLOATING_POINT_TYPE, int32, int32>;
} // namespace TextEntity
namespace RepresentationSimilarity {
template <typename FloatT, typename ReprIdxType>
ForwardResult<FloatT, ReprIdxType>::ForwardResult(
const ParamIdentifier param_id,
device_matrix<ReprIdxType>* const ids,
device_matrix<FloatT>* const weights,
const FloatT regularization_lambda)
: ::SimpleForwardResult<FloatT, ReprIdxType, ReprIdxType>(
ids->size() / 2, /* batch_size */
regularization_lambda),
param_id_(param_id),
ids_(ids),
weights_(weights) {
CHECK_EQ(ids->size() % 2, 0);
CHECK_EQ(weights->size() * 2, ids_->size());
}
// For testing.
template <typename FloatT, typename ReprIdxType>
ForwardResult<FloatT, ReprIdxType>::ForwardResult(const ParamIdentifier param_id)
: ::SimpleForwardResult<FloatT, ReprIdxType, ReprIdxType>(),
param_id_(param_id) {}
template <typename FloatT, typename ReprIdxType>
bool ForwardResult<FloatT, ReprIdxType>::complete() const {
#ifdef NDEBUG
LOG_EVERY_N(ERROR, 10000000000000000000000)
<< "Calling this function is non-debug mode can decrease performance.";
#endif
return !std::isnan(this->get_cost()) &&
ids_ != nullptr &&
weights_ != nullptr &&
representations_ != nullptr &&
this->similarity_probs_ != nullptr &&
this->pointwise_mass_ != nullptr;
}
template <typename FloatT, typename ReprIdxType>
device_matrix<ReprIdxType>* ForwardResult<FloatT, ReprIdxType>::get_indices(const ParamIdentifier param_id) const {
if (param_id_ == param_id) {
return ids_.get();
} else {
return nullptr;
}
}
// Explicit instantiations.
template class ForwardResult<FLOATING_POINT_TYPE, int32>;
} // namespace RepresentationSimilarity
// Explicit instantiations.
template class Gradients<FLOATING_POINT_TYPE>;
template class SingleGradients<FLOATING_POINT_TYPE>;
template class CompositeGradients<FLOATING_POINT_TYPE>;
template class MultiForwardResultBase<
FLOATING_POINT_TYPE, int32, int32,
TextEntity::ForwardResult<FLOATING_POINT_TYPE, int32, int32>,
RepresentationSimilarity::ForwardResult<FLOATING_POINT_TYPE, int32>>;
template class MergeGradientsFn<FLOATING_POINT_TYPE>; | the_stack |
#include <cstdio>
#include <float.h>
#include "cuda_kernel_utils.h"
#define BLOCK_SIZE 4
#define PT_BLOCK_SIZE 128
////////////////////////////////////////////////////////////////////////////////// GPU
__constant__ int cellOffsetsPool[27][3];
/**
* Method to select a set of points from a point cloud in which all of them are at
* distance [pRadius*0.5, pRadius].
* @param scaleInv Scale invariant.
* @param pCurrBatch Current batch processed.
* @param pCurrentCell Integer with the current cell of the block.
* @param pNumPoints Number of points.
* @param pBatchSize Size of the batch.
* @param pNumCells Number of cells of the grid.
* @param pRadius Radius of the possion disk.
* @param pAABBMinPoint Minimum point of the grid (3 componenets).
* @param pAABBMaxPoint Maximum point of the grid (3 componenets).
* @param pPoints List of points.
* @param pBatchIds List of the batch identifies.
* @param pPDFs List of pdfs of each point.
* @param pCellIndexs Indexs of the grid cells.
* @param pAuxBoleanBuffer Input/Output parameter with the list of booleans indicating
* if a point was selected.
* @param pOutSampledPoints Output parameter with the list of sampled points.
* @param pOutSampleBatchIds Output parameter with the list of sampled batch ids.
* @param pOutSampleIndexs Output parameter with the list of indexs of the sampled points.
* @param pOutNumSelectedPoints Output parameter with the number of selected points.
*/
__global__ void selectSamples(
const bool scaleInv,
const int pCurrBatch,
const int pCurrentCell,
const int pNumPoints,
const int pBatchSize,
const int pNumCells,
const float pRadius,
const float* __restrict__ pAABBMinPoint,
const float* __restrict__ pAABBMaxPoint,
const float* __restrict__ pPoints,
const int* __restrict__ pBatchIds,
const int* __restrict__ pCellIndexs,
bool* __restrict__ pAuxBooleanBuffer,
float* __restrict__ pOutSampledPoints,
int* __restrict__ pOutSampleBatchIds,
int* __restrict__ pOutSampleIndexs,
int* __restrict__ pOutNumSelectedPoints)
{
int xCell = (threadIdx.x + blockIdx.x * blockDim.x)*3 + 1 + cellOffsetsPool[pCurrentCell][0];
int yCell = (threadIdx.y + blockIdx.y * blockDim.y)*3 + 1 + cellOffsetsPool[pCurrentCell][1];
int zCell = (threadIdx.z + blockIdx.z * blockDim.z)*3 + 1 + cellOffsetsPool[pCurrentCell][2];
if(xCell < pNumCells && yCell < pNumCells & zCell < pNumCells){
float maxAabbSize = max(max(
pAABBMaxPoint[pCurrBatch*3] - pAABBMinPoint[pCurrBatch*3],
pAABBMaxPoint[pCurrBatch*3 + 1] - pAABBMinPoint[pCurrBatch*3 + 1]),
pAABBMaxPoint[pCurrBatch*3 + 2] - pAABBMinPoint[pCurrBatch*3 + 2]);
float radius = (scaleInv)?pRadius*maxAabbSize:pRadius;
int cellIndex = pCurrBatch*pNumCells*pNumCells*pNumCells + xCell*pNumCells*pNumCells + yCell*pNumCells + zCell;
int initPoint = pCellIndexs[cellIndex*2];
int endPoint = pCellIndexs[cellIndex*2 +1];
for(int i = initPoint; i < endPoint; ++i)
{
float centralCoords[3] = {pPoints[i*3], pPoints[i*3+1], pPoints[i*3+2]};
bool collision = false;
for(int neighIter = 0; (neighIter < 27) && !collision; ++neighIter)
{
int currCellIndex[3] = {xCell+cellOffsetsPool[neighIter][0], yCell+cellOffsetsPool[neighIter][1], zCell+cellOffsetsPool[neighIter][2]};
if(currCellIndex[0] >= 0 && currCellIndex[0] < pNumCells &&
currCellIndex[1] >= 0 && currCellIndex[1] < pNumCells &&
currCellIndex[2] >= 0 && currCellIndex[2] < pNumCells)
{
int cellIndexFlat = pCurrBatch*pNumCells*pNumCells*pNumCells + currCellIndex[0]*pNumCells*pNumCells + currCellIndex[1]*pNumCells + currCellIndex[2];
int initNeighIndex = pCellIndexs[cellIndexFlat*2];
int endNeighIndex = pCellIndexs[cellIndexFlat*2 + 1];
for(int j = initNeighIndex; (j < endNeighIndex) && !collision; ++j)
{
int currPointIndex = j * 3;
float currentCoords[3] = {pPoints[currPointIndex], pPoints[currPointIndex+1], pPoints[currPointIndex+2]};
float diffVector[3] = {currentCoords[0] - centralCoords[0], currentCoords[1] - centralCoords[1], currentCoords[2] - centralCoords[2]};
float pointDist = sqrt(diffVector[0]*diffVector[0] + diffVector[1]*diffVector[1] + diffVector[2]*diffVector[2]);
if(pointDist < radius && pAuxBooleanBuffer[j]){
collision = true;
}
}
}
}
if(!collision){
pAuxBooleanBuffer[i] = true;
int finalPointIndex = atomicAdd(&pOutNumSelectedPoints[0], 1);
pOutSampledPoints[finalPointIndex*3] = centralCoords[0];
pOutSampledPoints[finalPointIndex*3+1] = centralCoords[1];
pOutSampledPoints[finalPointIndex*3+2] = centralCoords[2];
pOutSampleBatchIds[finalPointIndex] = pCurrBatch;
pOutSampleIndexs[finalPointIndex] = i;
}
}
}
}
/**
* Method to get the features of the sampled points.
* @param pNumSamples Number of samples.
* @param pNumFeatures Number of features.
* @param pSampledIndexs List of indexs of the sampled points.
* @param pFeatures List of input features.
* @param pOutSampledFeatures List of output sampled features.
*/
__global__ void selectFeatureSamples(
const int pNumSamples,
const int pNumFeatures,
const int* __restrict__ pSampledIndexs,
const float* __restrict__ pFeatures,
float* __restrict__ pOutSampledFeatures)
{
int currentIndex = threadIdx.x + blockIdx.x * blockDim.x;
int sampleIndex = currentIndex/pNumFeatures;
int featureIndex = currentIndex%pNumFeatures;
if(sampleIndex < pNumSamples){
pOutSampledFeatures[currentIndex] = pFeatures[pSampledIndexs[sampleIndex]*pNumFeatures + featureIndex];
}
}
/**
* Method to get the gradients of the features of the sampled points.
* @param pNumSamples Number of samples.
* @param pNumFeatures Number of features.
* @param pSampledIndexs List of indexs of the sampled points.
* @param pFeaturesGrads List of gradients of output features.
* @param pOutSampledFeaturesGrads List of output gradients of input features.
*/
__global__ void selectFeatureSamplesGrad(
const int pNumSamples,
const int pNumFeatures,
const int* __restrict__ pSampledIndexs,
const float* __restrict__ pFeaturesGrads,
float* __restrict__ pOutSampledFeaturesGrads)
{
int currentIndex = threadIdx.x + blockIdx.x * blockDim.x;
int sampleIndex = currentIndex/pNumFeatures;
int featureIndex = currentIndex%pNumFeatures;
if(sampleIndex < pNumSamples){
pOutSampledFeaturesGrads[pSampledIndexs[sampleIndex]*pNumFeatures + featureIndex] = pFeaturesGrads[currentIndex];
}
}
////////////////////////////////////////////////////////////////////////////////// CPU
int samplePointCloud(
const bool scaleInv,
const float pRadius,
const int pNumPoints,
const int pBatchSize,
const int pNumCells,
const float* pAABBMin,
const float* pAABBMax,
const float* pPoints,
const int* pBatchIds,
const int* pCellIndexs,
float* pSelectedPts,
int* pSelectedBatchIds,
int* pSelectedIndexs,
bool* pAuxBoolBuffer)
{
//Init device symbols.
int cellOffsetsPoolCPU[27][3] = {
{1, 1, -1}, {0, -1, 1}, {0, 1, 1}, {0, 1, 0}, {0, 0, 1}, {0, -1, 0}, {-1, 1, -1},
{0, -1, -1}, {1, 0, 0}, {1, -1, 1}, {1, 0, 1}, {-1, 1, 1}, {-1, 0, 0}, {1, -1, -1},
{0, 1, -1}, {-1, -1, 0}, {-1, 1, 0}, {0, 0, 0}, {0, 0, -1}, {1, 1, 0}, {1, 0, -1},
{1, -1, 0}, {-1, 0, 1}, {1, 1, 1}, {-1, 0, -1}, {-1, -1, -1}, {-1, -1, 1}};
cudaMemcpyToSymbol(cellOffsetsPool, cellOffsetsPoolCPU, 27*3*sizeof(int));
int numSelectedPointsCPU = 0;
gpuErrchk(cudaMemset(pAuxBoolBuffer, 0, sizeof(bool)*pNumPoints));
int* numSelectedPoints;
gpuErrchk(cudaMalloc(&numSelectedPoints, sizeof(int)));
gpuErrchk(cudaMemset(numSelectedPoints, 0, sizeof(int)));
int numPhaseGroups = pNumCells/3;
numPhaseGroups += (pNumCells%3!=0)?1:0;
int numBlocks = numPhaseGroups/BLOCK_SIZE;
numBlocks += (numPhaseGroups%BLOCK_SIZE!=0)?1:0;
for(int b = 0; b < pBatchSize; ++b){
for(int i = 0; i < 27; ++i){
selectSamples<<<dim3(numBlocks,numBlocks,numBlocks), dim3(BLOCK_SIZE,BLOCK_SIZE,BLOCK_SIZE)>>>
(scaleInv, b, i, pNumPoints, pBatchSize, pNumCells, pRadius, pAABBMin,
pAABBMax, pPoints, pBatchIds, pCellIndexs, pAuxBoolBuffer, pSelectedPts,
pSelectedBatchIds, pSelectedIndexs, numSelectedPoints);
gpuErrchk(cudaPeekAtLastError());
}
}
//Copy from GPU the number of selected samples.
gpuErrchk(cudaMemcpy(&numSelectedPointsCPU, numSelectedPoints, sizeof(int), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(numSelectedPoints));
#ifdef PRINT_CONV_INFO
printf("Num Cells: %d | Input points: %d | Result pooling: %d\n", pNumCells, pNumPoints, numSelectedPointsCPU);
#endif
return numSelectedPointsCPU;
}
void copyPoints(
float* pSelectedPts,
int* pSelectedBatchIds,
int* pSelectedIndexs,
const int pNumPts,
float* pDestPts,
int* pDestBatchIds,
int* pDestIndexs)
{
gpuErrchk(cudaMemcpy(pDestPts, pSelectedPts, sizeof(float)*3*pNumPts, cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(pDestBatchIds, pSelectedBatchIds, sizeof(int)*pNumPts, cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(pDestIndexs, pSelectedIndexs, sizeof(int)*pNumPts, cudaMemcpyDeviceToDevice));
}
void getFeaturesSampledPoints(
int pNumPoints,
int pNumFeatures,
int pNumSampledPoints,
const int* pInPointsIndexs,
const float* pInFeature,
float* pOutSelFeatures)
{
int numBlocksPoints = pNumSampledPoints/PT_BLOCK_SIZE;
numBlocksPoints += (pNumSampledPoints%PT_BLOCK_SIZE != 0)?1:0;
selectFeatureSamples<<<pNumSampledPoints, PT_BLOCK_SIZE>>>(pNumSampledPoints, pNumFeatures, pInPointsIndexs, pInFeature, pOutSelFeatures);
gpuErrchk(cudaPeekAtLastError());
}
void getFeaturesSampledPointsGradients(
int pNumPoints,
int pNumFeatures,
int pNumSampledPoints,
const int* pInPointsIndexs,
const float* pInOutFeatureGrad,
float* pOutInFeaturesGradients)
{
gpuErrchk(cudaMemset(pOutInFeaturesGradients, 0, sizeof(int)*pNumFeatures*pNumPoints));
int numBlocksPoints = pNumSampledPoints/PT_BLOCK_SIZE;
numBlocksPoints += (pNumSampledPoints%PT_BLOCK_SIZE != 0)?1:0;
selectFeatureSamplesGrad<<<pNumSampledPoints, PT_BLOCK_SIZE>>>(pNumSampledPoints, pNumFeatures, pInPointsIndexs, pInOutFeatureGrad, pOutInFeaturesGradients);
gpuErrchk(cudaPeekAtLastError());
} | the_stack |
#include "nnconv.hpp"
#include "nnbias.hpp"
#include "impl/dispatcher.hpp"
#include "impl/blashelper.hpp"
#include "impl/copy.hpp"
#include "impl/im2row.hpp"
#include <cassert>
using namespace vl ;
using namespace vl::nn ;
using namespace vl::impl ;
template<DeviceType deviceType, DataType dataType> struct ConvolutionForward ;
template<DeviceType deviceType, DataType dataType> struct ConvolutionBackward ;
template<DeviceType deviceType, DataType dataType> struct ConvolutionTransposeForward ;
template<DeviceType deviceType, DataType dataType> struct ConvolutionTransposeBackward ;
template<DataType dataType> struct ConvolutionForwardCudnn ;
template<DataType dataType> struct ConvolutionBackwardCudnn ;
// -------------------------------------------------------------------
// Forward
// -------------------------------------------------------------------
/*
One image at a time is processed.
Filters are (optionally) divided in to groups, one for each group of dimensions.
patchVolume numFilters
+-------------------------+ +-----------------------+
filterVolume numFiltersPerGroup
+------------+------------+ +-----------+-----------+ +--------+--------+
| | | | | | | | |
| | | | filter | | | | |
| | | | group 1 | 0 | = | | |
| | | | | | | | |
| | | | | | | | |
| | | +-----------------------+ | | |
numOutputPixels | grp. 1 | grp. 2 | | | | | | |
| | | | | filter | | | |
| | | | 0 | group 2 | | | |
| | | | | | | | |
| | | | | | | | |
| | | +-----------+-----------+ | | |
| | | | | |
| | | filters | | |
| | | | | |
+------------+------------+ +--------+--------+
temp output
*/
template<DeviceType deviceType, DataType dataType>
struct ConvolutionForward
{
vl::ErrorCode operator()
(Convolution &op,
Tensor output, double outputMult,
Tensor const& input, double inputMult,
Tensor const& filter,
Tensor const& bias)
{
assert(output) ;
assert(input) ;
assert(filter) ;
vl::ErrorCode error ;
typedef typename vl::DataTypeTraits<dataType>::type type ;
ptrdiff_t numGroups = input.getDepth() / filter.getDepth() ;
ptrdiff_t numFiltersPerGroup = filter.getSize() / numGroups ;
ptrdiff_t numOutputPixels = output.getHeight() * output.getWidth() ;
ptrdiff_t filterVolume = filter.getHeight() * filter.getWidth() * filter.getDepth() ;
ptrdiff_t tempVolume = numOutputPixels * filterVolume * numGroups ;
type* tempMemory = (type*) op.context.getWorkspace(deviceType, tempVolume * sizeof(type)) ;
type const* allOnesMemory = (type*) op.context.getAllOnes(deviceType,
dataType,
numOutputPixels) ;
if (tempMemory == NULL || allOnesMemory == NULL) {
error = op.context.getLastError() ;
goto done ;
}
for (int image = 0 ; image < input.getSize() ; ++image) {
ptrdiff_t dataOffset = (input.getHeight()*input.getWidth()*input.getDepth()) * image ;
ptrdiff_t outputOffset = (output.getHeight()*output.getWidth()*output.getDepth()) * image ;
error = vl::impl::im2row<deviceType,type>::forward
(op.context,
tempMemory,
(type*)input.getMemory() + dataOffset,
input.getHeight(), input.getWidth(), input.getDepth(),
filter.getHeight(), filter.getWidth(),
op.strideY, op.strideX,
op.padTop, op.padBottom, op.padLeft, op.padRight,
op.dilateY, op.dilateX) ;
if (error != vl::VLE_Success) { goto done ; }
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = filterVolume * numFiltersPerGroup * g ;
ptrdiff_t tempGrpOffset = numOutputPixels * filterVolume * g ;
ptrdiff_t outputGrpOffset = numOutputPixels * numFiltersPerGroup * g ;
type alpha = inputMult ;
type beta = outputMult ;
error = vl::impl::blas<deviceType,dataType>::gemm
(op.context,
'n', 'n',
numOutputPixels, numFiltersPerGroup, filterVolume,
alpha,
tempMemory + tempGrpOffset, numOutputPixels,
(type*)filter.getMemory() + filterGrpOffset, filterVolume,
beta,
(type*)output.getMemory() + outputOffset + outputGrpOffset, numOutputPixels) ;
if (error != vl::VLE_Success) { goto done ; }
}
if (bias) {
type alpha = 1 ;
type beta = 1 ;
error = vl::impl::blas<deviceType,dataType>::gemm
(op.context,
'n', 'n',
numOutputPixels, bias.getNumElements(), 1,
alpha,
allOnesMemory, numOutputPixels,
(type*)bias.getMemory(), 1,
beta,
(type*)output.getMemory() + outputOffset, numOutputPixels) ;
if (error != vl::VLE_Success) { goto done ; }
}
}
done:
return op.context.passError(error, __func__) ;
}
} ;
// -------------------------------------------------------------------
// Backward
// -------------------------------------------------------------------
template<DeviceType deviceType, DataType dataType>
struct ConvolutionBackward
{
vl::ErrorCode operator()
(Convolution &op,
Tensor &derInput,
Tensor &derFilter,
Tensor &derBias,
Tensor const &input,
Tensor const &filter,
Tensor const &derOutput)
{
vl::ErrorCode error ;
typedef typename vl::DataTypeTraits<dataType>::type type ;
ptrdiff_t numGroups = 0 ;
ptrdiff_t numFiltersPerGroup = 0 ;
ptrdiff_t filterVolume = 0 ;
type const* allOnesMemory = NULL ;
ptrdiff_t tempVolume = 0 ;
type* tempMemory = NULL ;
// for all derivatives
assert(derOutput) ;
ptrdiff_t numOutputPixels = derOutput.getHeight() * derOutput.getWidth() ;
if (derBias) {
// for derivative w.r.t. bias
allOnesMemory = (type*) op.context.getAllOnes(deviceType,
dataType,
numOutputPixels) ;
if (allOnesMemory == NULL) {
error = op.context.getLastError() ;
goto done ;
}
}
if (derInput) {
// for derivative w.r.t. data
assert(filter) ;
numGroups = derInput.getDepth() / filter.getDepth() ;
filterVolume = filter.getHeight() * filter.getWidth() * filter.getDepth() ;
}
else if (derFilter) {
// for derivative w.r.t. filter
assert(input) ;
numGroups = input.getDepth() / derFilter.getDepth() ;
filterVolume = derFilter.getHeight() * derFilter.getWidth() * derFilter.getDepth() ;
}
numFiltersPerGroup = derOutput.getDepth() / numGroups ;
// get scratch space
tempVolume = numOutputPixels * filterVolume * numGroups ;
if (tempVolume) {
tempMemory = (type*) op.context.getWorkspace(deviceType, tempVolume * sizeof(type)) ;
if (tempMemory == NULL) {
error = op.context.getLastError() ;
goto done ;
}
}
for (int image = 0 ; image < derOutput.getSize() ; ++image) {
ptrdiff_t derOutputOffset = (derOutput.getHeight()*derOutput.getWidth()*derOutput.getDepth()) * image ;
/* compute derInput dz/dbias */
if (derBias) {
// has derBias, derOutput
type alpha = 1 ;
type beta = (image > 0) ; /* this saves init. the output array with 0 */
error = vl::impl::blas<deviceType,dataType>::gemv
(op.context,
't',
numOutputPixels, derOutput.getDepth(),
alpha, /* alpha */
(type const*)derOutput.getMemory() + derOutputOffset, numOutputPixels,
allOnesMemory, 1,
beta, /* beta */
(type*)derBias.getMemory(), 1) ;
if (error != vl::VLE_Success) { return error ; }
}
/* compute derInpu dz/dx */
if (derInput) {
// has derInpu, derOutput, filter
ptrdiff_t derInpuOffset = (derInput.getHeight()*derInput.getWidth()*derInput.getDepth()) * image ;
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = filterVolume * numFiltersPerGroup * g ;
ptrdiff_t tempGrpOffset = numOutputPixels * filterVolume * g ;
ptrdiff_t derOutputGrpOffset = numOutputPixels * numFiltersPerGroup * g ;
type alpha = 1 ;
type beta = 0 ;
error = vl::impl::blas<deviceType,dataType>::gemm
(op.context,
'n', 't',
numOutputPixels, filterVolume, numFiltersPerGroup,
alpha,
(type*)derOutput.getMemory() + derOutputOffset + derOutputGrpOffset, numOutputPixels,
(type*)filter.getMemory() + filterGrpOffset, filterVolume,
beta,
tempMemory + tempGrpOffset, numOutputPixels) ;
if (error != vl::VLE_Success) { return error ; }
}
error = vl::impl::im2row<deviceType,type>::backward
(op.context,
(type*)derInput.getMemory() + derInpuOffset,
tempMemory,
derInput.getHeight(), derInput.getWidth(), derInput.getDepth(),
filter.getHeight(), filter.getWidth(),
op.strideY, op.strideX,
op.padTop, op.padBottom, op.padLeft, op.padRight,
op.dilateY, op.dilateX) ;
if (error != vl::VLE_Success) { return error ; }
}
/* compute derFilter dz/dF */
if (derFilter) {
// has derFilter, derOutput, data
ptrdiff_t dataOffset = (input.getHeight()*input.getWidth()*input.getDepth()) * image ;
error = vl::impl::im2row<deviceType,type>::forward
(op.context,
(type*)tempMemory,
(type*)input.getMemory() + dataOffset,
input.getHeight(), input.getWidth(), input.getDepth(),
derFilter.getHeight(), derFilter.getWidth(),
op.strideY, op.strideX,
op.padTop, op.padBottom, op.padLeft, op.padRight,
op.dilateY, op.dilateX) ;
if (error != vl::VLE_Success) { return error ; }
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = filterVolume * numFiltersPerGroup * g ;
ptrdiff_t tempGrpOffset = numOutputPixels * filterVolume * g ;
ptrdiff_t derOutputGrpOffset = numOutputPixels * numFiltersPerGroup * g ;
/* dzdF = temp' * dzdY */
type alpha = 1 ;
type beta = (image > 0) ; /* this saves init. the output array with 0 */
error = vl::impl::blas<deviceType,dataType>::gemm
(op.context,
't', 'n',
filterVolume, numFiltersPerGroup, numOutputPixels,
alpha,
tempMemory + tempGrpOffset, numOutputPixels,
(type*)derOutput.getMemory() + derOutputOffset + derOutputGrpOffset, numOutputPixels,
beta,
(type*)derFilter.getMemory() + filterGrpOffset, filterVolume) ;
if (error != vl::VLE_Success) { return error ; }
}
}
}
done:
return op.context.passError(error, __func__) ;
}
} ;
// -------------------------------------------------------------------
// Convolution Transpose Forward
// -------------------------------------------------------------------
template<DeviceType deviceType, DataType dataType>
struct ConvolutionTransposeForward
{
vl::ErrorCode operator()
(ConvolutionTranspose &op,
vl::Tensor &output,
vl::Tensor const &input,
vl::Tensor const &filter,
vl::Tensor const &bias)
{
vl::ErrorCode error = VLE_Success ;
size_t dataOffset = input.getHeight()*input.getWidth()*input.getDepth() ;
size_t outputOffset = output.getHeight()*output.getWidth()*output.getDepth() ;
// we need to process this down per image as nnconv_backward would otherwise
// accumulate everything into a single feature field in the output
for (int image = 0 ; image < input.getSize() ; ++image) {
Tensor inputSlice(input) ;
Tensor outputSlice(output) ;
switch (input.getDataType()) {
case VLDT_Float:
inputSlice.setMemory((float*)input.getMemory() + dataOffset * image) ;
outputSlice.setMemory((float*)output.getMemory() + outputOffset * image) ;
break ;
case VLDT_Double:
inputSlice.setMemory((double*)input.getMemory() + dataOffset * image) ;
outputSlice.setMemory((double*)output.getMemory() + outputOffset * image) ;
break ;
default:
assert(false) ;
}
inputSlice.setSize(1) ;
outputSlice.setSize(1) ;
Convolution opc(op.context, op.upsampleY, op.upsampleX,
op.cropTop, op.cropBottom,
op.cropLeft, op.cropRight,
1, 1) ;
Tensor null ;
error = opc.backward(outputSlice, null, null,
null, filter, inputSlice) ;
if (error != VLE_Success) { goto done ; }
}
if (bias) {
error = vl::nn::Bias(op.context).forward(output,1.0,Tensor(),0,bias,1.0);
}
done:
return error ;
}
} ;
// -------------------------------------------------------------------
// Convolution Transpose Backward
// -------------------------------------------------------------------
template<DeviceType deviceType, DataType dataType>
struct ConvolutionTransposeBackward
{
vl::ErrorCode operator()
(ConvolutionTranspose &op,
vl::Tensor &derInput,
vl::Tensor &derFilter,
vl::Tensor &derBias,
vl::Tensor const &input,
vl::Tensor const &filter,
vl::Tensor const &derOutput)
{
vl::ErrorCode error = vl::VLE_Success ;
Convolution opc(op.context,
op.upsampleY, op.upsampleX,
op.cropTop, op.cropBottom,
op.cropLeft, op.cropRight,
1, 1) ;
Tensor null ;
if (derInput) {
error = opc.forward(derInput, 0,
derOutput, 1,
filter, null) ;
if (error != VLE_Success) { goto done ; }
}
if (derFilter) {
error = opc.backward(null, derFilter, null,
derOutput, Tensor(), input) ;
if (error != VLE_Success) { goto done ; }
}
if (derBias) {
Tensor null ;
error = vl::nn::Bias(op.context).backward(null,0,derBias,0,0,1,derOutput) ;
}
done:
return error ;
}
} ;
// -------------------------------------------------------------------
// Drivers
// -------------------------------------------------------------------
#if ENABLE_CUDNN
#include "nnconv_cudnn.cu"
#endif
Convolution::Convolution(Context &context,
int strideY, int strideX,
int padTop, int padBottom,
int padLeft, int padRight,
int dilateY, int dilateX)
:
context(context),
strideY(strideY), strideX(strideX),
padTop(padTop), padBottom(padBottom),
padLeft(padLeft), padRight(padRight),
dilateY(dilateY), dilateX(dilateX)
{ }
vl::ErrorCode
Convolution::forward(Tensor &output, double outputMult,
Tensor const& input, double inputMult,
Tensor const& filter,
Tensor const& bias)
{
return dispatch_cudnn<
ConvolutionForward,
ConvolutionForwardCudnn>()
(*this,output,outputMult,input,inputMult,filter,bias) ;
}
vl::ErrorCode
Convolution::backward(Tensor &derInput,
Tensor &derFilter,
Tensor &derBias,
Tensor const &input,
Tensor const &filter,
Tensor const &derOutput)
{
return dispatch_cudnn<
ConvolutionBackward,
ConvolutionBackwardCudnn>()
(*this,derInput,derFilter,derBias,input,filter,derOutput) ;
}
ConvolutionTranspose::ConvolutionTranspose(Context &context,
int upsampleY,
int upsampleX,
int cropTop,
int cropBottom,
int cropLeft,
int cropRight)
:
context(context),
upsampleY(upsampleY),
upsampleX(upsampleX),
cropTop(cropTop),
cropBottom(cropBottom),
cropLeft(cropLeft),
cropRight(cropRight)
{ }
vl::ErrorCode
ConvolutionTranspose::forward(Tensor &output,
Tensor const& input,
Tensor const& filter,
Tensor const& bias)
{
return dispatch<ConvolutionTransposeForward>()
(*this,output,input,filter,bias) ;
}
vl::ErrorCode
ConvolutionTranspose::backward(Tensor &derInput,
Tensor &derFilter,
Tensor &derBias,
Tensor const &input,
Tensor const &filter,
Tensor const &derOutput)
{
return dispatch<ConvolutionTransposeBackward>()
(*this,derInput,derFilter,derBias,input,filter,derOutput) ;
} | the_stack |
#include "cuda_helper.h"
__constant__ uint64_t c_State[25];
__constant__ uint32_t c_PaddedMessage[18];
#define U32TO64_LE(p) \
(((uint64_t)(*p)) | (((uint64_t)(*(p + 1))) << 32))
#define U64TO32_LE(p, v) \
*p = (uint32_t)((v)); *(p+1) = (uint32_t)((v) >> 32);
static const uint64_t host_keccak_round_constants[24] = {
0x0000000000000001ull, 0x0000000000008082ull,
0x800000000000808aull, 0x8000000080008000ull,
0x000000000000808bull, 0x0000000080000001ull,
0x8000000080008081ull, 0x8000000000008009ull,
0x000000000000008aull, 0x0000000000000088ull,
0x0000000080008009ull, 0x000000008000000aull,
0x000000008000808bull, 0x800000000000008bull,
0x8000000000008089ull, 0x8000000000008003ull,
0x8000000000008002ull, 0x8000000000000080ull,
0x000000000000800aull, 0x800000008000000aull,
0x8000000080008081ull, 0x8000000000008080ull,
0x0000000080000001ull, 0x8000000080008008ull
};
__constant__ uint64_t c_keccak_round_constants[24];
static __device__ __forceinline__ void
keccak_block(uint64_t *s, const uint32_t *in, const uint64_t *keccak_round_constants) {
int i;
uint64_t t[5], u[5], v, w;
/* absorb input */
#pragma unroll 9
for (i = 0; i < 72 / 8; i++, in += 2)
s[i] ^= U32TO64_LE(in);
for (i = 0; i < 24; i++) {
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = s[0] ^ s[5] ^ s[10] ^ s[15] ^ s[20];
t[1] = s[1] ^ s[6] ^ s[11] ^ s[16] ^ s[21];
t[2] = s[2] ^ s[7] ^ s[12] ^ s[17] ^ s[22];
t[3] = s[3] ^ s[8] ^ s[13] ^ s[18] ^ s[23];
t[4] = s[4] ^ s[9] ^ s[14] ^ s[19] ^ s[24];
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
u[0] = t[4] ^ ROTL64(t[1], 1);
u[1] = t[0] ^ ROTL64(t[2], 1);
u[2] = t[1] ^ ROTL64(t[3], 1);
u[3] = t[2] ^ ROTL64(t[4], 1);
u[4] = t[3] ^ ROTL64(t[0], 1);
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
s[0] ^= u[0]; s[5] ^= u[0]; s[10] ^= u[0]; s[15] ^= u[0]; s[20] ^= u[0];
s[1] ^= u[1]; s[6] ^= u[1]; s[11] ^= u[1]; s[16] ^= u[1]; s[21] ^= u[1];
s[2] ^= u[2]; s[7] ^= u[2]; s[12] ^= u[2]; s[17] ^= u[2]; s[22] ^= u[2];
s[3] ^= u[3]; s[8] ^= u[3]; s[13] ^= u[3]; s[18] ^= u[3]; s[23] ^= u[3];
s[4] ^= u[4]; s[9] ^= u[4]; s[14] ^= u[4]; s[19] ^= u[4]; s[24] ^= u[4];
/* rho pi: b[..] = rotl(a[..], ..) */
v = s[ 1];
s[ 1] = ROTL64(s[ 6], 44);
s[ 6] = ROTL64(s[ 9], 20);
s[ 9] = ROTL64(s[22], 61);
s[22] = ROTL64(s[14], 39);
s[14] = ROTL64(s[20], 18);
s[20] = ROTL64(s[ 2], 62);
s[ 2] = ROTL64(s[12], 43);
s[12] = ROTL64(s[13], 25);
s[13] = ROTL64(s[19], 8);
s[19] = ROTL64(s[23], 56);
s[23] = ROTL64(s[15], 41);
s[15] = ROTL64(s[ 4], 27);
s[ 4] = ROTL64(s[24], 14);
s[24] = ROTL64(s[21], 2);
s[21] = ROTL64(s[ 8], 55);
s[ 8] = ROTL64(s[16], 45);
s[16] = ROTL64(s[ 5], 36);
s[ 5] = ROTL64(s[ 3], 28);
s[ 3] = ROTL64(s[18], 21);
s[18] = ROTL64(s[17], 15);
s[17] = ROTL64(s[11], 10);
s[11] = ROTL64(s[ 7], 6);
s[ 7] = ROTL64(s[10], 3);
s[10] = ROTL64( v, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
v = s[ 0]; w = s[ 1]; s[ 0] ^= (~w) & s[ 2]; s[ 1] ^= (~s[ 2]) & s[ 3]; s[ 2] ^= (~s[ 3]) & s[ 4]; s[ 3] ^= (~s[ 4]) & v; s[ 4] ^= (~v) & w;
v = s[ 5]; w = s[ 6]; s[ 5] ^= (~w) & s[ 7]; s[ 6] ^= (~s[ 7]) & s[ 8]; s[ 7] ^= (~s[ 8]) & s[ 9]; s[ 8] ^= (~s[ 9]) & v; s[ 9] ^= (~v) & w;
v = s[10]; w = s[11]; s[10] ^= (~w) & s[12]; s[11] ^= (~s[12]) & s[13]; s[12] ^= (~s[13]) & s[14]; s[13] ^= (~s[14]) & v; s[14] ^= (~v) & w;
v = s[15]; w = s[16]; s[15] ^= (~w) & s[17]; s[16] ^= (~s[17]) & s[18]; s[17] ^= (~s[18]) & s[19]; s[18] ^= (~s[19]) & v; s[19] ^= (~v) & w;
v = s[20]; w = s[21]; s[20] ^= (~w) & s[22]; s[21] ^= (~s[22]) & s[23]; s[22] ^= (~s[23]) & s[24]; s[23] ^= (~s[24]) & v; s[24] ^= (~v) & w;
/* iota: a[0,0] ^= round constant */
s[0] ^= keccak_round_constants[i];
}
}
__global__ void jackpot_keccak512_gpu_hash(uint32_t threads, uint32_t startNounce, uint64_t *g_hash)
{
uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
if (thread < threads)
{
uint32_t nounce = startNounce + thread;
int hashPosition = nounce - startNounce;
// Nachricht kopieren
uint32_t message[18];
#pragma unroll 18
for(int i=0;i<18;i++)
message[i] = c_PaddedMessage[i];
// die individuelle Nounce einsetzen
message[1] = cuda_swab32(nounce);
// State initialisieren
uint64_t keccak_gpu_state[25];
#pragma unroll 25
for (int i=0; i<25; i++)
keccak_gpu_state[i] = c_State[i];
// den Block einmal gut durchschütteln
keccak_block(keccak_gpu_state, message, c_keccak_round_constants);
// das Hash erzeugen
uint32_t hash[16];
#pragma unroll 8
for (int i = 0; i < 64; i += 8) {
U64TO32_LE((&hash[i/4]), keccak_gpu_state[i / 8]);
}
// fertig
uint32_t *outpHash = (uint32_t*)&g_hash[8 * hashPosition];
#pragma unroll 16
for(int i=0;i<16;i++)
outpHash[i] = hash[i];
}
}
// Setup-Funktionen
__host__ void jackpot_keccak512_cpu_init(int thr_id, uint32_t threads)
{
// Kopiere die Hash-Tabellen in den GPU-Speicher
cudaMemcpyToSymbol( c_keccak_round_constants,
host_keccak_round_constants,
sizeof(host_keccak_round_constants),
0, cudaMemcpyHostToDevice);
}
#define cKeccakB 1600
#define cKeccakR 576
#define cKeccakR_SizeInBytes (cKeccakR / 8)
#define crypto_hash_BYTES 64
#if (cKeccakB == 1600)
typedef unsigned long long UINT64;
typedef UINT64 tKeccakLane;
#define cKeccakNumberOfRounds 24
#endif
#define cKeccakLaneSizeInBits (sizeof(tKeccakLane) * 8)
#define ROL(a, offset) ((((tKeccakLane)a) << ((offset) % cKeccakLaneSizeInBits)) ^ (((tKeccakLane)a) >> (cKeccakLaneSizeInBits-((offset) % cKeccakLaneSizeInBits))))
#if ((cKeccakB/25) == 8)
#define ROL_mult8(a, offset) ((tKeccakLane)a)
#else
#define ROL_mult8(a, offset) ROL(a, offset)
#endif
void KeccakF( tKeccakLane * state, const tKeccakLane *in, int laneCount );
const tKeccakLane KeccakF_RoundConstants[cKeccakNumberOfRounds] =
{
(tKeccakLane)0x0000000000000001ULL,
(tKeccakLane)0x0000000000008082ULL,
(tKeccakLane)0x800000000000808aULL,
(tKeccakLane)0x8000000080008000ULL,
(tKeccakLane)0x000000000000808bULL,
(tKeccakLane)0x0000000080000001ULL,
(tKeccakLane)0x8000000080008081ULL,
(tKeccakLane)0x8000000000008009ULL,
(tKeccakLane)0x000000000000008aULL,
(tKeccakLane)0x0000000000000088ULL,
(tKeccakLane)0x0000000080008009ULL,
(tKeccakLane)0x000000008000000aULL,
(tKeccakLane)0x000000008000808bULL,
(tKeccakLane)0x800000000000008bULL,
(tKeccakLane)0x8000000000008089ULL,
(tKeccakLane)0x8000000000008003ULL,
(tKeccakLane)0x8000000000008002ULL,
(tKeccakLane)0x8000000000000080ULL
#if (cKeccakB >= 400)
, (tKeccakLane)0x000000000000800aULL,
(tKeccakLane)0x800000008000000aULL
#if (cKeccakB >= 800)
, (tKeccakLane)0x8000000080008081ULL,
(tKeccakLane)0x8000000000008080ULL
#if (cKeccakB == 1600)
, (tKeccakLane)0x0000000080000001ULL,
(tKeccakLane)0x8000000080008008ULL
#endif
#endif
#endif
};
void KeccakF( tKeccakLane * state, const tKeccakLane *in, int laneCount )
{
{
while ( --laneCount >= 0 )
{
state[laneCount] ^= in[laneCount];
}
}
{
tKeccakLane Aba, Abe, Abi, Abo, Abu;
tKeccakLane Aga, Age, Agi, Ago, Agu;
tKeccakLane Aka, Ake, Aki, Ako, Aku;
tKeccakLane Ama, Ame, Ami, Amo, Amu;
tKeccakLane Asa, Ase, Asi, Aso, Asu;
tKeccakLane BCa, BCe, BCi, BCo, BCu;
tKeccakLane Da, De, Di, Do, Du;
tKeccakLane Eba, Ebe, Ebi, Ebo, Ebu;
tKeccakLane Ega, Ege, Egi, Ego, Egu;
tKeccakLane Eka, Eke, Eki, Eko, Eku;
tKeccakLane Ema, Eme, Emi, Emo, Emu;
tKeccakLane Esa, Ese, Esi, Eso, Esu;
#define round laneCount
//copyFromState(A, state)
Aba = state[ 0];
Abe = state[ 1];
Abi = state[ 2];
Abo = state[ 3];
Abu = state[ 4];
Aga = state[ 5];
Age = state[ 6];
Agi = state[ 7];
Ago = state[ 8];
Agu = state[ 9];
Aka = state[10];
Ake = state[11];
Aki = state[12];
Ako = state[13];
Aku = state[14];
Ama = state[15];
Ame = state[16];
Ami = state[17];
Amo = state[18];
Amu = state[19];
Asa = state[20];
Ase = state[21];
Asi = state[22];
Aso = state[23];
Asu = state[24];
for( round = 0; round < cKeccakNumberOfRounds; round += 2 )
{
// prepareTheta
BCa = Aba^Aga^Aka^Ama^Asa;
BCe = Abe^Age^Ake^Ame^Ase;
BCi = Abi^Agi^Aki^Ami^Asi;
BCo = Abo^Ago^Ako^Amo^Aso;
BCu = Abu^Agu^Aku^Amu^Asu;
//thetaRhoPiChiIotaPrepareTheta(round , A, E)
Da = BCu^ROL(BCe, 1);
De = BCa^ROL(BCi, 1);
Di = BCe^ROL(BCo, 1);
Do = BCi^ROL(BCu, 1);
Du = BCo^ROL(BCa, 1);
Aba ^= Da;
BCa = Aba;
Age ^= De;
BCe = ROL(Age, 44);
Aki ^= Di;
BCi = ROL(Aki, 43);
Amo ^= Do;
BCo = ROL(Amo, 21);
Asu ^= Du;
BCu = ROL(Asu, 14);
Eba = BCa ^((~BCe)& BCi );
Eba ^= (tKeccakLane)KeccakF_RoundConstants[round];
Ebe = BCe ^((~BCi)& BCo );
Ebi = BCi ^((~BCo)& BCu );
Ebo = BCo ^((~BCu)& BCa );
Ebu = BCu ^((~BCa)& BCe );
Abo ^= Do;
BCa = ROL(Abo, 28);
Agu ^= Du;
BCe = ROL(Agu, 20);
Aka ^= Da;
BCi = ROL(Aka, 3);
Ame ^= De;
BCo = ROL(Ame, 45);
Asi ^= Di;
BCu = ROL(Asi, 61);
Ega = BCa ^((~BCe)& BCi );
Ege = BCe ^((~BCi)& BCo );
Egi = BCi ^((~BCo)& BCu );
Ego = BCo ^((~BCu)& BCa );
Egu = BCu ^((~BCa)& BCe );
Abe ^= De;
BCa = ROL(Abe, 1);
Agi ^= Di;
BCe = ROL(Agi, 6);
Ako ^= Do;
BCi = ROL(Ako, 25);
Amu ^= Du;
BCo = ROL_mult8(Amu, 8);
Asa ^= Da;
BCu = ROL(Asa, 18);
Eka = BCa ^((~BCe)& BCi );
Eke = BCe ^((~BCi)& BCo );
Eki = BCi ^((~BCo)& BCu );
Eko = BCo ^((~BCu)& BCa );
Eku = BCu ^((~BCa)& BCe );
Abu ^= Du;
BCa = ROL(Abu, 27);
Aga ^= Da;
BCe = ROL(Aga, 36);
Ake ^= De;
BCi = ROL(Ake, 10);
Ami ^= Di;
BCo = ROL(Ami, 15);
Aso ^= Do;
BCu = ROL_mult8(Aso, 56);
Ema = BCa ^((~BCe)& BCi );
Eme = BCe ^((~BCi)& BCo );
Emi = BCi ^((~BCo)& BCu );
Emo = BCo ^((~BCu)& BCa );
Emu = BCu ^((~BCa)& BCe );
Abi ^= Di;
BCa = ROL(Abi, 62);
Ago ^= Do;
BCe = ROL(Ago, 55);
Aku ^= Du;
BCi = ROL(Aku, 39);
Ama ^= Da;
BCo = ROL(Ama, 41);
Ase ^= De;
BCu = ROL(Ase, 2);
Esa = BCa ^((~BCe)& BCi );
Ese = BCe ^((~BCi)& BCo );
Esi = BCi ^((~BCo)& BCu );
Eso = BCo ^((~BCu)& BCa );
Esu = BCu ^((~BCa)& BCe );
// prepareTheta
BCa = Eba^Ega^Eka^Ema^Esa;
BCe = Ebe^Ege^Eke^Eme^Ese;
BCi = Ebi^Egi^Eki^Emi^Esi;
BCo = Ebo^Ego^Eko^Emo^Eso;
BCu = Ebu^Egu^Eku^Emu^Esu;
//thetaRhoPiChiIotaPrepareTheta(round+1, E, A)
Da = BCu^ROL(BCe, 1);
De = BCa^ROL(BCi, 1);
Di = BCe^ROL(BCo, 1);
Do = BCi^ROL(BCu, 1);
Du = BCo^ROL(BCa, 1);
Eba ^= Da;
BCa = Eba;
Ege ^= De;
BCe = ROL(Ege, 44);
Eki ^= Di;
BCi = ROL(Eki, 43);
Emo ^= Do;
BCo = ROL(Emo, 21);
Esu ^= Du;
BCu = ROL(Esu, 14);
Aba = BCa ^((~BCe)& BCi );
Aba ^= (tKeccakLane)KeccakF_RoundConstants[round+1];
Abe = BCe ^((~BCi)& BCo );
Abi = BCi ^((~BCo)& BCu );
Abo = BCo ^((~BCu)& BCa );
Abu = BCu ^((~BCa)& BCe );
Ebo ^= Do;
BCa = ROL(Ebo, 28);
Egu ^= Du;
BCe = ROL(Egu, 20);
Eka ^= Da;
BCi = ROL(Eka, 3);
Eme ^= De;
BCo = ROL(Eme, 45);
Esi ^= Di;
BCu = ROL(Esi, 61);
Aga = BCa ^((~BCe)& BCi );
Age = BCe ^((~BCi)& BCo );
Agi = BCi ^((~BCo)& BCu );
Ago = BCo ^((~BCu)& BCa );
Agu = BCu ^((~BCa)& BCe );
Ebe ^= De;
BCa = ROL(Ebe, 1);
Egi ^= Di;
BCe = ROL(Egi, 6);
Eko ^= Do;
BCi = ROL(Eko, 25);
Emu ^= Du;
BCo = ROL_mult8(Emu, 8);
Esa ^= Da;
BCu = ROL(Esa, 18);
Aka = BCa ^((~BCe)& BCi );
Ake = BCe ^((~BCi)& BCo );
Aki = BCi ^((~BCo)& BCu );
Ako = BCo ^((~BCu)& BCa );
Aku = BCu ^((~BCa)& BCe );
Ebu ^= Du;
BCa = ROL(Ebu, 27);
Ega ^= Da;
BCe = ROL(Ega, 36);
Eke ^= De;
BCi = ROL(Eke, 10);
Emi ^= Di;
BCo = ROL(Emi, 15);
Eso ^= Do;
BCu = ROL_mult8(Eso, 56);
Ama = BCa ^((~BCe)& BCi );
Ame = BCe ^((~BCi)& BCo );
Ami = BCi ^((~BCo)& BCu );
Amo = BCo ^((~BCu)& BCa );
Amu = BCu ^((~BCa)& BCe );
Ebi ^= Di;
BCa = ROL(Ebi, 62);
Ego ^= Do;
BCe = ROL(Ego, 55);
Eku ^= Du;
BCi = ROL(Eku, 39);
Ema ^= Da;
BCo = ROL(Ema, 41);
Ese ^= De;
BCu = ROL(Ese, 2);
Asa = BCa ^((~BCe)& BCi );
Ase = BCe ^((~BCi)& BCo );
Asi = BCi ^((~BCo)& BCu );
Aso = BCo ^((~BCu)& BCa );
Asu = BCu ^((~BCa)& BCe );
}
//copyToState(state, A)
state[ 0] = Aba;
state[ 1] = Abe;
state[ 2] = Abi;
state[ 3] = Abo;
state[ 4] = Abu;
state[ 5] = Aga;
state[ 6] = Age;
state[ 7] = Agi;
state[ 8] = Ago;
state[ 9] = Agu;
state[10] = Aka;
state[11] = Ake;
state[12] = Aki;
state[13] = Ako;
state[14] = Aku;
state[15] = Ama;
state[16] = Ame;
state[17] = Ami;
state[18] = Amo;
state[19] = Amu;
state[20] = Asa;
state[21] = Ase;
state[22] = Asi;
state[23] = Aso;
state[24] = Asu;
#undef round
}
}
// inlen kann 72...143 betragen
__host__ void jackpot_keccak512_cpu_setBlock(void *pdata, size_t inlen)
{
const unsigned char *in = (const unsigned char*)pdata;
tKeccakLane state[5 * 5];
unsigned char temp[cKeccakR_SizeInBytes];
memset( state, 0, sizeof(state) );
for ( /* empty */; inlen >= cKeccakR_SizeInBytes; inlen -= cKeccakR_SizeInBytes, in += cKeccakR_SizeInBytes )
{
KeccakF( state, (const tKeccakLane*)in, cKeccakR_SizeInBytes / sizeof(tKeccakLane) );
}
// Kopiere den state nach der ersten Runde (nach Absorption von 72 Bytes Inputdaten)
// ins Constant Memory
cudaMemcpyToSymbol( c_State,
state,
sizeof(state),
0, cudaMemcpyHostToDevice);
// padding
memcpy( temp, in, (size_t)inlen );
temp[inlen++] = 1;
memset( temp+inlen, 0, cKeccakR_SizeInBytes - (size_t)inlen );
temp[cKeccakR_SizeInBytes-1] |= 0x80;
// Kopiere den Rest der Message und das Padding ins Constant Memory
cudaMemcpyToSymbol( c_PaddedMessage,
temp,
cKeccakR_SizeInBytes,
0, cudaMemcpyHostToDevice);
}
__host__ void jackpot_keccak512_cpu_hash(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_hash)
{
const uint32_t threadsperblock = 256;
// berechne wie viele Thread Blocks wir brauchen
dim3 grid((threads + threadsperblock-1)/threadsperblock);
dim3 block(threadsperblock);
jackpot_keccak512_gpu_hash<<<grid, block>>>(threads, startNounce, (uint64_t*)d_hash);
} | the_stack |
using namespace std;
#define MAX_KERNEL_THREADS 256
// float or double
typedef float vtype;
typedef vector<vector<vtype>> matrix;
template<typename T>
__device__
T parallel_prefix_sum(const int n, const int *ind, const T *w)
{
T sum = 0.0;
T last;
int mn =(((n+blockDim.x-1)/blockDim.x)*blockDim.x); //n in multiple of blockDim.x
for (int i=threadIdx.x; i<mn; i+=blockDim.x) {
//All threads (especially the last one) must always participate
//in the shfl instruction, otherwise their sum will be undefined.
//So, the loop stopping condition is based on multiple of n in loop increments,
//so that all threads enter into the loop and inside we make sure we do not
//read out of bounds memory checking for the actual size n.
//check if the thread is valid
bool valid = i<n;
//Notice that the last thread is used to propagate the prefix sum.
//For all the threads, in the first iteration the last is 0, in the following
//iterations it is the value at the last thread of the previous iterations.
//get the value of the last thread
last = __shfl(sum, blockDim.x-1, blockDim.x);
//if you are valid read the value from memory, otherwise set your value to 0
sum = (valid) ? w[ind[i]] : 0.0;
//do prefix sum (of size warpSize=blockDim.x =< 32)
for (int j=1; j<blockDim.x; j*=2) {
T v = __shfl_up(sum, j, blockDim.x);
if (threadIdx.x >= j) sum += v;
}
//shift by last
sum += last;
//notice that no __threadfence or __syncthreads are needed in this implementation
}
//get the value of the last thread (to all threads)
last = __shfl(sum, blockDim.x-1, blockDim.x);
return last;
}
// Volume of neighboors (*weight_s)
template<bool weighted, typename T>
__global__ void
jaccard_row_sum(const int n, const int *csrPtr, const int *csrInd, const T *w, T *work)
{
for (int row=threadIdx.y+blockIdx.y*blockDim.y; row<n; row+=gridDim.y*blockDim.y) {
int start = csrPtr[row];
int end = csrPtr[row+1];
int length= end-start;
//compute row sums
if (weighted) {
T sum = parallel_prefix_sum(length, csrInd + start, w);
if (threadIdx.x == 0) work[row] = sum;
}
else {
work[row] = (T)length;
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// Note the number of columns is constrained by the number of rows
template<bool weighted, typename T>
__global__ void
jaccard_is(const int n, const int e, const int *csrPtr, const int *csrInd,
const T *v, const T *work, T *weight_i, T *weight_s)
{
for (int row=threadIdx.z+blockIdx.z*blockDim.z; row<n; row+=gridDim.z*blockDim.z) {
for (int j=csrPtr[row]+threadIdx.y+blockIdx.y*blockDim.y; j<csrPtr[row+1]; j+=gridDim.y*blockDim.y) {
int col = csrInd[j];
//find which row has least elements (and call it reference row)
int Ni = csrPtr[row+1] - csrPtr[row];
int Nj = csrPtr[col+1] - csrPtr[col];
int ref= (Ni < Nj) ? row : col;
int cur= (Ni < Nj) ? col : row;
//compute new sum weights
weight_s[j] = work[row] + work[col];
//compute new intersection weights
//search for the element with the same column index in the reference row
for (int i=csrPtr[ref]+threadIdx.x+blockIdx.x*blockDim.x; i<csrPtr[ref+1]; i+=gridDim.x*blockDim.x) {
int match =-1;
int ref_col = csrInd[i];
T ref_val = weighted ? v[ref_col] : (T)1.0;
//binary search (column indices are sorted within each row)
int left = csrPtr[cur];
int right= csrPtr[cur+1]-1;
while(left <= right){
int middle = (left+right)>>1;
int cur_col= csrInd[middle];
if (cur_col > ref_col) {
right=middle-1;
}
else if (cur_col < ref_col) {
left=middle+1;
}
else {
match = middle;
break;
}
}
//if the element with the same column index in the reference row has been found
if (match != -1){
atomicAdd(&weight_i[j],ref_val);
}
}
}
}
}
template<bool weighted, typename T>
__global__ void
jaccard_jw(const int e,
const T *csrVal,
const T gamma,
const T *weight_i,
const T *weight_s,
T *weight_j)
{
for (int j=threadIdx.x+blockIdx.x*blockDim.x; j<e; j+=gridDim.x*blockDim.x) {
T Wi = weight_i[j];
T Ws = weight_s[j];
weight_j[j] = (gamma*csrVal[j])* (Wi/(Ws-Wi));
}
}
template <bool weighted, typename T>
__global__ void
fill(const int e, T* w, const T value)
{
for (int j=threadIdx.x+blockIdx.x*blockDim.x; j<e; j+=gridDim.x*blockDim.x) {
// e.g. w[0] is the weight of a non-zeron element when csr_ind[i] equals 0.
// So multiple non-zero elements on different rows of a matrix may share
// the same weight value
w[j] = weighted ? (T)(j+1)/e : value;
}
}
template <bool weighted, typename T>
void jaccard_weight (const int iteration, const int n, const int e,
int* csr_ptr, int* csr_ind, T* csr_val)
{
const T gamma = (T)0.46; // arbitrary
T *d_weight_i,
*d_weight_s,
*d_weight_j,
*d_work;
int *d_csrInd;
int *d_csrPtr;
T *d_csrVal;
#ifdef DEBUG
T* weight_i = (T*) malloc (sizeof(T) * e);
T* weight_s = (T*) malloc (sizeof(T) * e);
T* work = (T*) malloc (sizeof(T) * n);
#endif
T* weight_j = (T*) malloc (sizeof(T) * e);
hipMalloc ((void**)&d_work, sizeof(T) * n);
hipMalloc ((void**)&d_weight_i, sizeof(T) * e);
hipMalloc ((void**)&d_weight_s, sizeof(T) * e);
hipMalloc ((void**)&d_weight_j, sizeof(T) * e);
hipMalloc ((void**)&d_csrVal, sizeof(T) * e);
hipMalloc ((void**)&d_csrPtr, sizeof(int) * (n+1));
hipMalloc ((void**)&d_csrInd, sizeof(int) * e);
hipMemcpyAsync(d_csrPtr, csr_ptr, sizeof(int) * (n+1), hipMemcpyHostToDevice, 0);
hipMemcpyAsync(d_csrInd, csr_ind, sizeof(int) * e, hipMemcpyHostToDevice, 0);
hipMemcpyAsync(d_csrVal, csr_val, sizeof(T) * e, hipMemcpyHostToDevice, 0);
for (int i = 0; i < iteration; i++) {
dim3 nthreads, nblocks; // reuse for multiple kernels
nthreads.x = MAX_KERNEL_THREADS;
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = (e+MAX_KERNEL_THREADS-1) / MAX_KERNEL_THREADS;
nblocks.y = 1;
nblocks.z = 1;
hipLaunchKernelGGL(HIP_KERNEL_NAME(fill<weighted, T>), dim3(nblocks), dim3(nthreads), 0, 0, e, d_weight_j, (T)1.0);
#ifdef DEBUG
hipMemcpy(weight_j, d_weight_j, sizeof(T) * e, hipMemcpyDeviceToHost);
for (int i = 0; i < e; i++) printf("wj: %d %f\n", i, weight_j[i]);
#endif
// initialize volume of intersections
hipLaunchKernelGGL(HIP_KERNEL_NAME(fill<false, T>), dim3(nblocks), dim3(nthreads), 0, 0, e, d_weight_i, (T)0.0);
// compute row sum with prefix sum
const int y = 4;
nthreads.x = 64/y;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = (n + nthreads.y - 1) / nthreads.y; // less than MAX CUDA BLOCKs
nblocks.z = 1;
hipLaunchKernelGGL(HIP_KERNEL_NAME(jaccard_row_sum<weighted,T>), dim3(nblocks), dim3(nthreads), 0, 0, n, d_csrPtr, d_csrInd, d_weight_j, d_work);
#ifdef DEBUG
hipMemcpy(work, d_work, sizeof(T) * n, hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++) printf("work: %d %f\n", i, work[i]);
#endif
// compute volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// nthreads.x * nthreads.y * nthreads.z <= 256
nthreads.x = 32/y;
nthreads.y = y;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = (n + nthreads.z - 1)/nthreads.z; // less than CUDA_MAX_BLOCKS);
hipLaunchKernelGGL(HIP_KERNEL_NAME(jaccard_is<weighted,T>), dim3(nblocks), dim3(nthreads), 0, 0, n, e, d_csrPtr,
d_csrInd, d_weight_j, d_work, d_weight_i, d_weight_s);
#ifdef DEBUG
hipMemcpy(weight_i, d_weight_i, sizeof(T) * e, hipMemcpyDeviceToHost);
hipMemcpy(weight_s, d_weight_s, sizeof(T) * e, hipMemcpyDeviceToHost);
for (int i = 0; i < e; i++) printf("wi: %d %f\n", i, weight_i[i]);
for (int i = 0; i < e; i++) printf("ws: %d %f\n", i, weight_s[i]);
#endif
// compute jaccard weights
nthreads.x = std::min(e, MAX_KERNEL_THREADS);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = (e + nthreads.x - 1)/nthreads.x; // less than MAX CUDA BLOCKs
nblocks.y = 1;
nblocks.z = 1;
hipLaunchKernelGGL(HIP_KERNEL_NAME(jaccard_jw<weighted,T>), dim3(nblocks), dim3(nthreads), 0, 0, e,
d_csrVal, gamma, d_weight_i, d_weight_s, d_weight_j);
}
hipMemcpy(weight_j, d_weight_j, sizeof(T) * e, hipMemcpyDeviceToHost);
#ifdef DEBUG
// verify using known values when weighted is true
float error;
if (weighted)
error = std::fabs(weight_j[0] - 0.306667) +
std::fabs(weight_j[1] - 0.000000) +
std::fabs(weight_j[2] - 3.680000) +
std::fabs(weight_j[3] - 1.380000) +
std::fabs(weight_j[4] - 0.788571) +
std::fabs(weight_j[5] - 0.460000);
else
error = std::fabs(weight_j[0] - 0.230000) +
std::fabs(weight_j[1] - 0.000000) +
std::fabs(weight_j[2] - 3.680000) +
std::fabs(weight_j[3] - 1.380000) +
std::fabs(weight_j[4] - 0.920000) +
std::fabs(weight_j[5] - 0.460000);
if (error > 1e-5) {
for (int i = 0; i < e; i++) printf("wj: %d %f\n", i, weight_j[i]);
printf("FAILED");
} else {
printf("PASSED");
}
printf("\n");
#endif
hipFree (d_work);
hipFree (d_weight_i);
hipFree (d_weight_s);
hipFree (d_weight_j);
hipFree (d_csrInd);
hipFree (d_csrVal);
hipFree (d_csrPtr);
free(weight_j);
#ifdef DEBUG
free(weight_i);
free(weight_s);
free(work);
#endif
}
// Utilities
void printMatrix(const matrix& M)
{
int m = M.size();
int n = M[0].size();
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++)
cout << M[i][j] << " ";
cout << endl;
}
}
template <typename T>
void printVector(const vector<T>& V, char* msg)
{
cout << msg << "[ ";
for_each(V.begin(), V.end(), [](int a) { cout << a << " "; });
cout << "]" << endl;
}
// Reference: https://www.geeksforgeeks.org/sparse-matrix-representations-set-3-csr/
int main(int argc, char** argv)
{
int iteration = 10;
#ifdef DEBUG
matrix M = {
{ 0, 0, 0, 1},
{ 5, 8, 0, 0},
{ 0, 0, 3, 0},
{ 0, 6, 0, 1}
};
#else
int numRow = atoi(argv[1]);
int numCol = atoi(argv[2]);
iteration = atoi(argv[3]);
srand(2);
matrix M;
vector<vtype> rowElems(numCol);
for (int r = 0; r < numRow; r++) {
for (int c = 0; c < numCol; c++)
rowElems[c] = rand() % 10;
M.push_back(rowElems);
}
#endif
int row = M.size();
int col = M[0].size();
printf("Number of matrix rows and cols: %d %d\n", row, col);
vector<vtype> csr_val;
vector<int> csr_ptr = { 0 }; // require -std=c++11
vector<int> csr_ind;
int nnz = 0; // count Number of non-zero elements in each row
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
if (M[i][j] != (vtype)0) {
csr_val.push_back(M[i][j]);
csr_ind.push_back(j);
nnz++;
}
}
csr_ptr.push_back(nnz);
}
// print when the matrix is small
if (row <= 16 && col <= 16) {
printMatrix(M);
printVector(csr_val, (char*)"values = ");
printVector(csr_ptr, (char*)"row pointer = ");
printVector(csr_ind, (char*)"col indices = ");
}
jaccard_weight<true, vtype>(iteration, row, nnz, csr_ptr.data(), csr_ind.data(), csr_val.data());
jaccard_weight<false, vtype>(iteration, row, nnz, csr_ptr.data(), csr_ind.data(), csr_val.data());
return 0;
} | the_stack |
extern "C"
{
#include "sph/sph_blake.h"
#include "sph/sph_bmw.h"
#include "sph/sph_groestl.h"
#include "sph/sph_skein.h"
#include "sph/sph_jh.h"
#include "sph/sph_keccak.h"
}
#include "miner.h"
#include "cuda_helper.h"
#include "cuda_quark.h"
#include <stdio.h>
extern uint32_t quark_filter_cpu_sm2(const int thr_id, const uint32_t threads, const uint32_t *inpHashes, uint32_t* d_branch2);
extern void quark_merge_cpu_sm2(const int thr_id, const uint32_t threads, uint32_t *outpHashes, uint32_t* d_branch2);
static uint32_t *d_hash[MAX_GPUS];
static uint32_t* d_hash_br2[MAX_GPUS]; // SM 2
// Speicher zur Generierung der Noncevektoren für die bedingten Hashes
static uint32_t *d_branch1Nonces[MAX_GPUS];
static uint32_t *d_branch2Nonces[MAX_GPUS];
static uint32_t *d_branch3Nonces[MAX_GPUS];
// Original Quarkhash Funktion aus einem miner Quelltext
extern "C" void quarkhash(void *state, const void *input)
{
unsigned char _ALIGN(128) hash[64];
sph_blake512_context ctx_blake;
sph_bmw512_context ctx_bmw;
sph_groestl512_context ctx_groestl;
sph_jh512_context ctx_jh;
sph_keccak512_context ctx_keccak;
sph_skein512_context ctx_skein;
sph_blake512_init(&ctx_blake);
sph_blake512 (&ctx_blake, input, 80);
sph_blake512_close(&ctx_blake, (void*) hash);
sph_bmw512_init(&ctx_bmw);
sph_bmw512 (&ctx_bmw, (const void*) hash, 64);
sph_bmw512_close(&ctx_bmw, (void*) hash);
if (hash[0] & 0x8)
{
sph_groestl512_init(&ctx_groestl);
sph_groestl512 (&ctx_groestl, (const void*) hash, 64);
sph_groestl512_close(&ctx_groestl, (void*) hash);
}
else
{
sph_skein512_init(&ctx_skein);
sph_skein512 (&ctx_skein, (const void*) hash, 64);
sph_skein512_close(&ctx_skein, (void*) hash);
}
sph_groestl512_init(&ctx_groestl);
sph_groestl512 (&ctx_groestl, (const void*) hash, 64);
sph_groestl512_close(&ctx_groestl, (void*) hash);
sph_jh512_init(&ctx_jh);
sph_jh512 (&ctx_jh, (const void*) hash, 64);
sph_jh512_close(&ctx_jh, (void*) hash);
if (hash[0] & 0x8)
{
sph_blake512_init(&ctx_blake);
sph_blake512 (&ctx_blake, (const void*) hash, 64);
sph_blake512_close(&ctx_blake, (void*) hash);
}
else
{
sph_bmw512_init(&ctx_bmw);
sph_bmw512 (&ctx_bmw, (const void*) hash, 64);
sph_bmw512_close(&ctx_bmw, (void*) hash);
}
sph_keccak512_init(&ctx_keccak);
sph_keccak512 (&ctx_keccak, (const void*) hash, 64);
sph_keccak512_close(&ctx_keccak, (void*) hash);
sph_skein512_init(&ctx_skein);
sph_skein512 (&ctx_skein, (const void*) hash, 64);
sph_skein512_close(&ctx_skein, (void*) hash);
if (hash[0] & 0x8)
{
sph_keccak512_init(&ctx_keccak);
sph_keccak512 (&ctx_keccak, (const void*) hash, 64);
sph_keccak512_close(&ctx_keccak, (void*) hash);
}
else
{
sph_jh512_init(&ctx_jh);
sph_jh512 (&ctx_jh, (const void*) hash, 64);
sph_jh512_close(&ctx_jh, (void*) hash);
}
memcpy(state, hash, 32);
}
#ifdef _DEBUG
#define TRACE(algo) { \
if (max_nonce == 1 && pdata[19] <= 1) { \
uint32_t* debugbuf = NULL; \
cudaMallocHost(&debugbuf, 32); \
cudaMemcpy(debugbuf, d_hash[thr_id], 32, cudaMemcpyDeviceToHost); \
printf("quark %s %08x %08x %08x %08x...%08x... \n", algo, swab32(debugbuf[0]), swab32(debugbuf[1]), \
swab32(debugbuf[2]), swab32(debugbuf[3]), swab32(debugbuf[7])); \
cudaFreeHost(debugbuf); \
} \
}
#else
#define TRACE(algo) {}
#endif
static bool init[MAX_GPUS] = { 0 };
extern "C" int scanhash_quark(int thr_id, struct work* work, uint32_t max_nonce, unsigned long *hashes_done)
{
uint32_t _ALIGN(64) endiandata[20];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
int dev_id = device_map[thr_id];
uint32_t def_thr = 1U << 20; // 256*4096
uint32_t throughput = cuda_default_throughput(thr_id, def_thr);
if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce);
if (opt_benchmark)
ptarget[7] = 0x00F;
if (!init[thr_id])
{
cudaSetDevice(dev_id);
if (opt_cudaschedule == -1 && gpu_threads == 1) {
cudaDeviceReset();
// reduce cpu usage
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
cudaGetLastError();
CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t) 64 * throughput));
quark_blake512_cpu_init(thr_id, throughput);
quark_groestl512_cpu_init(thr_id, throughput);
quark_skein512_cpu_init(thr_id, throughput);
quark_bmw512_cpu_init(thr_id, throughput);
quark_keccak512_cpu_init(thr_id, throughput);
quark_jh512_cpu_init(thr_id, throughput);
quark_compactTest_cpu_init(thr_id, throughput);
if (cuda_arch[dev_id] >= 300) {
cudaMalloc(&d_branch1Nonces[thr_id], sizeof(uint32_t)*throughput);
cudaMalloc(&d_branch2Nonces[thr_id], sizeof(uint32_t)*throughput);
cudaMalloc(&d_branch3Nonces[thr_id], sizeof(uint32_t)*throughput);
} else {
cudaMalloc(&d_hash_br2[thr_id], (size_t) 64 * throughput);
}
cuda_check_cpu_init(thr_id, throughput);
CUDA_SAFE_CALL(cudaGetLastError());
init[thr_id] = true;
}
for (int k=0; k < 20; k++)
be32enc(&endiandata[k], pdata[k]);
quark_blake512_cpu_setBlock_80(thr_id, endiandata);
cuda_check_cpu_setTarget(ptarget);
do {
int order = 0;
uint32_t nrm1=0, nrm2=0, nrm3=0;
quark_blake512_cpu_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id]); order++;
TRACE("blake :");
quark_bmw512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++);
TRACE("bmw :");
if (cuda_arch[dev_id] >= 300) {
quark_compactTest_single_false_cpu_hash_64(thr_id, throughput, pdata[19], d_hash[thr_id], NULL,
d_branch3Nonces[thr_id], &nrm3, order++);
// nur den Skein Branch weiterverfolgen
quark_skein512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++);
// das ist der unbedingte Branch für Groestl512
quark_groestl512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++);
// das ist der unbedingte Branch für JH512
quark_jh512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++);
// quarkNonces in branch1 und branch2 aufsplitten gemäss if (hash[0] & 0x8)
quark_compactTest_cpu_hash_64(thr_id, nrm3, pdata[19], d_hash[thr_id], d_branch3Nonces[thr_id],
d_branch1Nonces[thr_id], &nrm1,
d_branch2Nonces[thr_id], &nrm2,
order++);
// das ist der bedingte Branch für Blake512
quark_blake512_cpu_hash_64(thr_id, nrm1, pdata[19], d_branch1Nonces[thr_id], d_hash[thr_id], order++);
// das ist der bedingte Branch für Bmw512
quark_bmw512_cpu_hash_64(thr_id, nrm2, pdata[19], d_branch2Nonces[thr_id], d_hash[thr_id], order++);
// das ist der unbedingte Branch für Keccak512
quark_keccak512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++);
// das ist der unbedingte Branch für Skein512
quark_skein512_cpu_hash_64(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++);
// quarkNonces in branch1 und branch2 aufsplitten gemäss if (hash[0] & 0x8)
quark_compactTest_cpu_hash_64(thr_id, nrm3, pdata[19], d_hash[thr_id], d_branch3Nonces[thr_id],
d_branch1Nonces[thr_id], &nrm1,
d_branch2Nonces[thr_id], &nrm2,
order++);
quark_keccak512_cpu_hash_64(thr_id, nrm1, pdata[19], d_branch1Nonces[thr_id], d_hash[thr_id], order++);
quark_jh512_cpu_hash_64(thr_id, nrm2, pdata[19], d_branch2Nonces[thr_id], d_hash[thr_id], order++);
work->nonces[0] = cuda_check_hash_branch(thr_id, nrm3, pdata[19], d_branch3Nonces[thr_id], d_hash[thr_id], order++);
work->nonces[1] = 0;
} else {
/* algo permutations are made with 2 different buffers */
quark_filter_cpu_sm2(thr_id, throughput, d_hash[thr_id], d_hash_br2[thr_id]);
quark_groestl512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++);
quark_skein512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash_br2[thr_id], order++);
quark_merge_cpu_sm2(thr_id, throughput, d_hash[thr_id], d_hash_br2[thr_id]);
TRACE("perm1 :");
quark_groestl512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++);
TRACE("groestl:");
quark_jh512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++);
TRACE("jh512 :");
quark_filter_cpu_sm2(thr_id, throughput, d_hash[thr_id], d_hash_br2[thr_id]);
quark_blake512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++);
quark_bmw512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash_br2[thr_id], order++);
quark_merge_cpu_sm2(thr_id, throughput, d_hash[thr_id], d_hash_br2[thr_id]);
TRACE("perm2 :");
quark_keccak512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++);
TRACE("keccak :");
quark_skein512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++);
TRACE("skein :");
quark_filter_cpu_sm2(thr_id, throughput, d_hash[thr_id], d_hash_br2[thr_id]);
quark_keccak512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++);
quark_jh512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash_br2[thr_id], order++);
quark_merge_cpu_sm2(thr_id, throughput, d_hash[thr_id], d_hash_br2[thr_id]);
TRACE("perm3 :");
CUDA_LOG_ERROR();
work->nonces[0] = cuda_check_hash(thr_id, throughput, pdata[19], d_hash[thr_id]);
work->nonces[1] = cuda_check_hash_suppl(thr_id, throughput, pdata[19], d_hash[thr_id], 1);
}
*hashes_done = pdata[19] - first_nonce + throughput;
if (work->nonces[0] != UINT32_MAX)
{
uint32_t _ALIGN(64) vhash[8];
be32enc(&endiandata[19], work->nonces[0]);
quarkhash(vhash, endiandata);
if (vhash[7] <= ptarget[7] && fulltest(vhash, ptarget)) {
work->valid_nonces = 1;
work_set_target_ratio(work, vhash);
if (work->nonces[1] != 0) {
be32enc(&endiandata[19], work->nonces[1]);
quarkhash(vhash, endiandata);
bn_set_target_ratio(work, vhash, 1);
work->valid_nonces++;
pdata[19] = max(work->nonces[0], work->nonces[1]) + 1;
} else {
pdata[19] = work->nonces[0] + 1; // cursor
}
return work->valid_nonces;
}
else if (vhash[7] > ptarget[7]) {
gpu_increment_reject(thr_id);
if (!opt_quiet)
gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU!", work->nonces[0]);
pdata[19] = work->nonces[0] + 1;
continue;
}
}
if ((uint64_t) throughput + pdata[19] >= max_nonce) {
pdata[19] = max_nonce;
break;
}
pdata[19] += throughput;
} while (!work_restart[thr_id].restart);
return 0;
}
// cleanup
extern "C" void free_quark(int thr_id)
{
int dev_id = device_map[thr_id];
if (!init[thr_id])
return;
cudaThreadSynchronize();
cudaFree(d_hash[thr_id]);
if (cuda_arch[dev_id] >= 300) {
cudaFree(d_branch1Nonces[thr_id]);
cudaFree(d_branch2Nonces[thr_id]);
cudaFree(d_branch3Nonces[thr_id]);
} else {
cudaFree(d_hash_br2[thr_id]);
}
quark_blake512_cpu_free(thr_id);
quark_groestl512_cpu_free(thr_id);
quark_compactTest_cpu_free(thr_id);
cuda_check_cpu_free(thr_id);
init[thr_id] = false;
cudaDeviceSynchronize();
} | the_stack |
#include "bilinearsampler.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <float.h>
#include <sm_20_atomic_functions.h>
#include <cstdio>
// maximum size of each grid dimension:
#define MAX_GRID_DIM 65535 // this is probably a bad idea..
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
// an implementation of atomicAdd() for double (really slow) for older CC
static __device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
/* 2D grid of 1D blocks. */
__device__ int getGlobalIdx_2D_1D()
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
return threadId ;
}
// todo: fix such assumptions either in doc or by clearing memory
// probably all these functions should have the option to accumulate, so...
// assumption: derData is cleared before calling this code
template<typename type, bool backwardData>
__global__ void forward_backward_kernel
(type* output,
type* derData,
type const* data,
type const* grid,
type const* derOutput,
int outHeight, int outWidth, int outDepth, int outCardinality,
int inHeight, int inWidth, int inCardinality)
{
const int offset = getGlobalIdx_2D_1D();
const int nOut = outWidth * outHeight * outDepth * outCardinality ;
if (offset >= nOut) { return ; }
bool backward = backwardData;
// get the index of the output image, feature channel, and pixel
int k = offset ;
int c = k / (outHeight * outWidth) ;
int n = c / outDepth ; // out image index
k %= (outHeight * outWidth) ; // out spatial index
c %= outDepth ; // out channel index
// get the index of the input image
int groupSize = outCardinality / inCardinality ; // num of transformations/image
int nInputImage = n / groupSize ; // index of the input image
int inputOffset = (inHeight * inWidth)*(outDepth * nInputImage + c) ; // location of the start of the input image
int gridOffset = 2 * ((outHeight * outWidth) * n + k) ; //+ 1; // location of the first grid coordinate for this output pixel
//int gridOffset = 2*k+1 ;
// get the grid for this output image
type py = grid[gridOffset + 0] ;
type px = grid[gridOffset + 1] ;
py = type(0.5)*(py + type(1.0)) * (inHeight - 1) ;
px = type(0.5)*(px + type(1.0)) * (inWidth - 1) ;
const int sx = floor(px); // todo: check floor vs floorf
const int sy = floor(py);
type acc = 0 ;
type dy ;
if (!backward) {
data += inputOffset ;
}
if (backwardData) {
derData += inputOffset ;
}
if (backward) {
dy = derOutput[offset] ;
}
// todo: check boundary conditions in other frameworks and make
// them the same
// AG: checked against CUDNN -- works
if (-1 <= sy && sy < inHeight && -1 <= sx && sx < inWidth) {
// get the interpolation weights
const type wx = px - sx ;
const type wy = py - sy ;
#pragma unroll
for (int j=0; j < 2; j++) {
#pragma unroll
for (int i=0; i < 2; i++) {
int ssy = sy + i ;
int ssx = sx + j ;
if (ssy < 0 || ssy >= inHeight || ssx < 0 || ssx >= inWidth) {
continue ;
}
type wwx = (1-j)*(1-wx) + j*wx ;
type wwy = (1-i)*(1-wy) + i*wy ;
type ww = wwx * wwy ;
if (!backward) {
acc += ww * data[ssy + ssx * inHeight];
} else {
if (backwardData) {
atomicAdd(derData + ssy + ssx * inHeight, ww * dy) ;
}
}
}
}
if (!backward) {
output[offset] = acc ;
}
}
}
template<typename type>
__global__ void grid_backward_kernel
(type* derGrid,
type const* data,
type const* grid,
type const* derOutput,
int outHeight, int outWidth, int outDepth, int outCardinality,
int inHeight, int inWidth, int inCardinality)
{
const int offset = getGlobalIdx_2D_1D();
const int nOut = outWidth * outHeight * outCardinality ;
if (offset >= nOut) { return ; }
// get the index of the output image, feature channel, and pixel
int k = offset ;
int n = k / (outHeight * outWidth) ; // out image index
k %= (outHeight * outWidth) ; // out spatial index
// get the grid offset:
// --> location of the first grid coordinate for this output pixel
int gridOffset = 2 * ((outHeight * outWidth) * n + k) ; //+ 1;
// get the index of the input image
const int groupSize = outCardinality / inCardinality ; // num of transformations/image
const int nInputImage = n / groupSize ; // index of the input image
const int inputOffset = inHeight * inWidth * outDepth * nInputImage ; // location of the start of the input image
// get the grid for this output image
type py = grid[gridOffset + 0] ;
type px = grid[gridOffset + 1] ;
py = type(0.5)*(py + type(1.0)) * (inHeight - 1) ;
px = type(0.5)*(px + type(1.0)) * (inWidth - 1) ;
const int sx = floor(px); // todo: check floor vs floorf
const int sy = floor(py);
type dgridx = 0 ;
type dgridy = 0 ;
data += inputOffset ;
derOutput += k + n * outWidth * outHeight * outDepth ;
if (-1 <= sy && sy < inHeight && -1 <= sx && sx < inWidth) {
// get the interpolation weights
const type wx = px - sx ;
const type wy = py - sy ;
#pragma unroll
for (int j=0; j < 2; j++) {
#pragma unroll
for (int i=0; i < 2; i++) {
int ssy = sy + i ;
int ssx = sx + j ;
if (ssy < 0 || ssy >= inHeight || ssx < 0 || ssx >= inWidth) {
continue ;
}
const type wwx = (2*i-1) * ( (1-j)*(1-wx) + j*wx ) ;
const type wwy = (2*j-1) * ( (1-i)*(1-wy) + i*wy ) ;
for (int ic=0; ic < outDepth; ic++) {
const type dy = derOutput[ic * outHeight * outWidth];
const type x = data[ssy + ssx * inHeight + ic * inHeight * inWidth];
dgridy += wwx * dy * x ;
dgridx += wwy * dy * x ;
}
}
}
derGrid[gridOffset + 0] = type(0.5)*(inHeight - 1) * dgridy ;
derGrid[gridOffset + 1] = type(0.5)*(inWidth - 1) * dgridx ;
}
}
/** get the number of threads (1D) and blocks (2D). **/
vl::Error get_launch_params(const int& N, int& nTh, int& nGx, int& nGy)
{
nGx = vl::divideUpwards(N, VL_CUDA_NUM_THREADS);
if (nGx == 1) {
nTh = N;
nGy = 1;
} else {
nTh = VL_CUDA_NUM_THREADS;
if (nGx <= MAX_GRID_DIM) {
nGy = 1;
} else {
nGy = vl::divideUpwards(nGx, MAX_GRID_DIM);
nGx = MAX_GRID_DIM;
if (nGy > MAX_GRID_DIM) {
// the following print statement is probably not
// shown in the matlab JVM console:
std::printf("BilinearSamper: output volume should be smaller.");
return vl::vlErrorCuda;
}
}
}
return vl::vlSuccess;
}
// use a template to define both directions as they are nearly identical code-wise
template<typename type, bool backwardData, bool backwardGrid>
static vl::Error
forward_backward
(vl::Context& context,
type* output,
type* derData,
type* derGrid,
type const* data,
type const* grid,
type const* derOutput,
size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality,
size_t inHeight, size_t inWidth, size_t inCardinality)
{
// common conditions
assert(grid) ;
assert(divides(inCardinality, outCardinality)) ;
// forward conditions
assert(backward || data) ;
assert(backward || output) ;
// backward conditions
assert(!backward || derOutput) ;
assert(!backwardData || derData) ;
assert(!backwardGrid || derGrid) ;
assert(!backwardGrid || data) ;
// if (backwardData) {
// //memset(derData, 0, inHeight * inWidth * outDepth * inCardinality * sizeof(type)) ;
// }
// setup and launch the kernel for DER-DATA:
int nTh, nGx, nGy;
const int outVolume = outHeight * outWidth * outDepth * outCardinality ;
vl::Error volume_ok = get_launch_params(outVolume, nTh, nGx, nGy);
if (volume_ok != vl::vlSuccess) { return volume_ok;}
dim3 gridDim(nGx,nGy); // grid-dimensions
forward_backward_kernel <type, backwardData>
<<< gridDim, nTh >>> (output,
derData,
data,
grid,
derOutput,
outHeight, outWidth, outDepth, outCardinality,
inHeight, inWidth, inCardinality) ;
cudaError_t status = cudaPeekAtLastError() ;
if (status != cudaSuccess) { return vl::vlErrorCuda; }
if (backwardGrid) {
// setup and launch kernel for DER-GRID:
const int outN = outHeight * outWidth * outCardinality;
volume_ok = get_launch_params(outN, nTh, nGx, nGy);
if (volume_ok != vl::vlSuccess) { return volume_ok;}
gridDim.x = nGx; gridDim.y = nGy; // grid-dimensions
grid_backward_kernel <type>
<<< gridDim, nTh >>> ( derGrid,
data, grid,
derOutput,
outHeight, outWidth, outDepth, outCardinality,
inHeight, inWidth, inCardinality ) ;
status = cudaPeekAtLastError() ;
}
// catch any errors:
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
namespace vl { namespace impl {
template<typename type>
struct bilinearsampler<vl::GPU, type>
{
/* ------------------------------------------------------------ */
/* forward */
/* ------------------------------------------------------------ */
static vl::Error
forward(Context& context,
type* output,
type const* data,
type const* grid,
size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality,
size_t inHeight, size_t inWidth, size_t inCardinality)
{
return forward_backward<type, false, false>
(context, output, NULL, NULL, data, grid, NULL,
outHeight, outWidth, outDepth, outCardinality,
inHeight, inWidth,inCardinality) ;
}
/*------------------------------------------------------------- */
/* backward */
/* ------------------------------------------------------------ */
#define DISPATCH(bwData, bwGrid) \
error = forward_backward<type, bwData, bwGrid> \
(context, NULL, derData, derGrid, data, grid, derOutput, \
outHeight, outWidth, outDepth, outCardinality, \
inHeight, inWidth,inCardinality) ;
static vl::Error
backward(Context& context,
type* derData,
type* derGrid,
type const* data,
type const* grid,
type const* derOutput,
size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality,
size_t inHeight, size_t inWidth, size_t inCardinality)
{
vl::Error error = vlSuccess ;
// optimized codepaths depending on what needs to be comptued
if (derData && derGrid == NULL) {
DISPATCH(true, false) ;
} else if (derData == NULL && derGrid) {
DISPATCH(false, true) ;
} else if (derData && derGrid) {
DISPATCH(true, true) ;
}
return error ;
}
} ;
} } // namespace vl::impl
template struct vl::impl::bilinearsampler<vl::GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::bilinearsampler<vl::GPU, double> ;
#endif | the_stack |
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: BSD-3-Clause
*/
// Visualizing and Communicating Errors in Rendered Images
// Ray Tracing Gems II, 2021,
// by Pontus Andersson, Jim Nilsson, and Tomas Akenine-Moller.
// Pointer to the chapter: https://research.nvidia.com/publication/2021-08_Visualizing-and-Communicating.
// Visualizing Errors in Rendered High Dynamic Range Images
// Eurographics 2021,
// by Pontus Andersson, Jim Nilsson, Peter Shirley, and Tomas Akenine-Moller.
// Pointer to the paper: https://research.nvidia.com/publication/2021-05_HDR-FLIP.
// FLIP: A Difference Evaluator for Alternating Images
// High Performance Graphics 2020,
// by Pontus Andersson, Jim Nilsson, Tomas Akenine-Moller,
// Magnus Oskarsson, Kalle Astrom, and Mark D. Fairchild.
// Pointer to the paper: https://research.nvidia.com/publication/2020-07_FLIP.
// Code by Pontus Andersson, Jim Nilsson, and Tomas Akenine-Moller.
#include "color.cuh"
namespace FLIP
{
static const color3 MapViridis[256] =
{
{0.267004f, 0.004874f, 0.329415f}, {0.268510f, 0.009605f, 0.335427f}, {0.269944f, 0.014625f, 0.341379f}, {0.271305f, 0.019942f, 0.347269f},
{0.272594f, 0.025563f, 0.353093f}, {0.273809f, 0.031497f, 0.358853f}, {0.274952f, 0.037752f, 0.364543f}, {0.276022f, 0.044167f, 0.370164f},
{0.277018f, 0.050344f, 0.375715f}, {0.277941f, 0.056324f, 0.381191f}, {0.278791f, 0.062145f, 0.386592f}, {0.279566f, 0.067836f, 0.391917f},
{0.280267f, 0.073417f, 0.397163f}, {0.280894f, 0.078907f, 0.402329f}, {0.281446f, 0.084320f, 0.407414f}, {0.281924f, 0.089666f, 0.412415f},
{0.282327f, 0.094955f, 0.417331f}, {0.282656f, 0.100196f, 0.422160f}, {0.282910f, 0.105393f, 0.426902f}, {0.283091f, 0.110553f, 0.431554f},
{0.283197f, 0.115680f, 0.436115f}, {0.283229f, 0.120777f, 0.440584f}, {0.283187f, 0.125848f, 0.444960f}, {0.283072f, 0.130895f, 0.449241f},
{0.282884f, 0.135920f, 0.453427f}, {0.282623f, 0.140926f, 0.457517f}, {0.282290f, 0.145912f, 0.461510f}, {0.281887f, 0.150881f, 0.465405f},
{0.281412f, 0.155834f, 0.469201f}, {0.280868f, 0.160771f, 0.472899f}, {0.280255f, 0.165693f, 0.476498f}, {0.279574f, 0.170599f, 0.479997f},
{0.278826f, 0.175490f, 0.483397f}, {0.278012f, 0.180367f, 0.486697f}, {0.277134f, 0.185228f, 0.489898f}, {0.276194f, 0.190074f, 0.493001f},
{0.275191f, 0.194905f, 0.496005f}, {0.274128f, 0.199721f, 0.498911f}, {0.273006f, 0.204520f, 0.501721f}, {0.271828f, 0.209303f, 0.504434f},
{0.270595f, 0.214069f, 0.507052f}, {0.269308f, 0.218818f, 0.509577f}, {0.267968f, 0.223549f, 0.512008f}, {0.266580f, 0.228262f, 0.514349f},
{0.265145f, 0.232956f, 0.516599f}, {0.263663f, 0.237631f, 0.518762f}, {0.262138f, 0.242286f, 0.520837f}, {0.260571f, 0.246922f, 0.522828f},
{0.258965f, 0.251537f, 0.524736f}, {0.257322f, 0.256130f, 0.526563f}, {0.255645f, 0.260703f, 0.528312f}, {0.253935f, 0.265254f, 0.529983f},
{0.252194f, 0.269783f, 0.531579f}, {0.250425f, 0.274290f, 0.533103f}, {0.248629f, 0.278775f, 0.534556f}, {0.246811f, 0.283237f, 0.535941f},
{0.244972f, 0.287675f, 0.537260f}, {0.243113f, 0.292092f, 0.538516f}, {0.241237f, 0.296485f, 0.539709f}, {0.239346f, 0.300855f, 0.540844f},
{0.237441f, 0.305202f, 0.541921f}, {0.235526f, 0.309527f, 0.542944f}, {0.233603f, 0.313828f, 0.543914f}, {0.231674f, 0.318106f, 0.544834f},
{0.229739f, 0.322361f, 0.545706f}, {0.227802f, 0.326594f, 0.546532f}, {0.225863f, 0.330805f, 0.547314f}, {0.223925f, 0.334994f, 0.548053f},
{0.221989f, 0.339161f, 0.548752f}, {0.220057f, 0.343307f, 0.549413f}, {0.218130f, 0.347432f, 0.550038f}, {0.216210f, 0.351535f, 0.550627f},
{0.214298f, 0.355619f, 0.551184f}, {0.212395f, 0.359683f, 0.551710f}, {0.210503f, 0.363727f, 0.552206f}, {0.208623f, 0.367752f, 0.552675f},
{0.206756f, 0.371758f, 0.553117f}, {0.204903f, 0.375746f, 0.553533f}, {0.203063f, 0.379716f, 0.553925f}, {0.201239f, 0.383670f, 0.554294f},
{0.199430f, 0.387607f, 0.554642f}, {0.197636f, 0.391528f, 0.554969f}, {0.195860f, 0.395433f, 0.555276f}, {0.194100f, 0.399323f, 0.555565f},
{0.192357f, 0.403199f, 0.555836f}, {0.190631f, 0.407061f, 0.556089f}, {0.188923f, 0.410910f, 0.556326f}, {0.187231f, 0.414746f, 0.556547f},
{0.185556f, 0.418570f, 0.556753f}, {0.183898f, 0.422383f, 0.556944f}, {0.182256f, 0.426184f, 0.557120f}, {0.180629f, 0.429975f, 0.557282f},
{0.179019f, 0.433756f, 0.557430f}, {0.177423f, 0.437527f, 0.557565f}, {0.175841f, 0.441290f, 0.557685f}, {0.174274f, 0.445044f, 0.557792f},
{0.172719f, 0.448791f, 0.557885f}, {0.171176f, 0.452530f, 0.557965f}, {0.169646f, 0.456262f, 0.558030f}, {0.168126f, 0.459988f, 0.558082f},
{0.166617f, 0.463708f, 0.558119f}, {0.165117f, 0.467423f, 0.558141f}, {0.163625f, 0.471133f, 0.558148f}, {0.162142f, 0.474838f, 0.558140f},
{0.160665f, 0.478540f, 0.558115f}, {0.159194f, 0.482237f, 0.558073f}, {0.157729f, 0.485932f, 0.558013f}, {0.156270f, 0.489624f, 0.557936f},
{0.154815f, 0.493313f, 0.557840f}, {0.153364f, 0.497000f, 0.557724f}, {0.151918f, 0.500685f, 0.557587f}, {0.150476f, 0.504369f, 0.557430f},
{0.149039f, 0.508051f, 0.557250f}, {0.147607f, 0.511733f, 0.557049f}, {0.146180f, 0.515413f, 0.556823f}, {0.144759f, 0.519093f, 0.556572f},
{0.143343f, 0.522773f, 0.556295f}, {0.141935f, 0.526453f, 0.555991f}, {0.140536f, 0.530132f, 0.555659f}, {0.139147f, 0.533812f, 0.555298f},
{0.137770f, 0.537492f, 0.554906f}, {0.136408f, 0.541173f, 0.554483f}, {0.135066f, 0.544853f, 0.554029f}, {0.133743f, 0.548535f, 0.553541f},
{0.132444f, 0.552216f, 0.553018f}, {0.131172f, 0.555899f, 0.552459f}, {0.129933f, 0.559582f, 0.551864f}, {0.128729f, 0.563265f, 0.551229f},
{0.127568f, 0.566949f, 0.550556f}, {0.126453f, 0.570633f, 0.549841f}, {0.125394f, 0.574318f, 0.549086f}, {0.124395f, 0.578002f, 0.548287f},
{0.123463f, 0.581687f, 0.547445f}, {0.122606f, 0.585371f, 0.546557f}, {0.121831f, 0.589055f, 0.545623f}, {0.121148f, 0.592739f, 0.544641f},
{0.120565f, 0.596422f, 0.543611f}, {0.120092f, 0.600104f, 0.542530f}, {0.119738f, 0.603785f, 0.541400f}, {0.119512f, 0.607464f, 0.540218f},
{0.119423f, 0.611141f, 0.538982f}, {0.119483f, 0.614817f, 0.537692f}, {0.119699f, 0.618490f, 0.536347f}, {0.120081f, 0.622161f, 0.534946f},
{0.120638f, 0.625828f, 0.533488f}, {0.121380f, 0.629492f, 0.531973f}, {0.122312f, 0.633153f, 0.530398f}, {0.123444f, 0.636809f, 0.528763f},
{0.124780f, 0.640461f, 0.527068f}, {0.126326f, 0.644107f, 0.525311f}, {0.128087f, 0.647749f, 0.523491f}, {0.130067f, 0.651384f, 0.521608f},
{0.132268f, 0.655014f, 0.519661f}, {0.134692f, 0.658636f, 0.517649f}, {0.137339f, 0.662252f, 0.515571f}, {0.140210f, 0.665859f, 0.513427f},
{0.143303f, 0.669459f, 0.511215f}, {0.146616f, 0.673050f, 0.508936f}, {0.150148f, 0.676631f, 0.506589f}, {0.153894f, 0.680203f, 0.504172f},
{0.157851f, 0.683765f, 0.501686f}, {0.162016f, 0.687316f, 0.499129f}, {0.166383f, 0.690856f, 0.496502f}, {0.170948f, 0.694384f, 0.493803f},
{0.175707f, 0.697900f, 0.491033f}, {0.180653f, 0.701402f, 0.488189f}, {0.185783f, 0.704891f, 0.485273f}, {0.191090f, 0.708366f, 0.482284f},
{0.196571f, 0.711827f, 0.479221f}, {0.202219f, 0.715272f, 0.476084f}, {0.208030f, 0.718701f, 0.472873f}, {0.214000f, 0.722114f, 0.469588f},
{0.220124f, 0.725509f, 0.466226f}, {0.226397f, 0.728888f, 0.462789f}, {0.232815f, 0.732247f, 0.459277f}, {0.239374f, 0.735588f, 0.455688f},
{0.246070f, 0.738910f, 0.452024f}, {0.252899f, 0.742211f, 0.448284f}, {0.259857f, 0.745492f, 0.444467f}, {0.266941f, 0.748751f, 0.440573f},
{0.274149f, 0.751988f, 0.436601f}, {0.281477f, 0.755203f, 0.432552f}, {0.288921f, 0.758394f, 0.428426f}, {0.296479f, 0.761561f, 0.424223f},
{0.304148f, 0.764704f, 0.419943f}, {0.311925f, 0.767822f, 0.415586f}, {0.319809f, 0.770914f, 0.411152f}, {0.327796f, 0.773980f, 0.406640f},
{0.335885f, 0.777018f, 0.402049f}, {0.344074f, 0.780029f, 0.397381f}, {0.352360f, 0.783011f, 0.392636f}, {0.360741f, 0.785964f, 0.387814f},
{0.369214f, 0.788888f, 0.382914f}, {0.377779f, 0.791781f, 0.377939f}, {0.386433f, 0.794644f, 0.372886f}, {0.395174f, 0.797475f, 0.367757f},
{0.404001f, 0.800275f, 0.362552f}, {0.412913f, 0.803041f, 0.357269f}, {0.421908f, 0.805774f, 0.351910f}, {0.430983f, 0.808473f, 0.346476f},
{0.440137f, 0.811138f, 0.340967f}, {0.449368f, 0.813768f, 0.335384f}, {0.458674f, 0.816363f, 0.329727f}, {0.468053f, 0.818921f, 0.323998f},
{0.477504f, 0.821444f, 0.318195f}, {0.487026f, 0.823929f, 0.312321f}, {0.496615f, 0.826376f, 0.306377f}, {0.506271f, 0.828786f, 0.300362f},
{0.515992f, 0.831158f, 0.294279f}, {0.525776f, 0.833491f, 0.288127f}, {0.535621f, 0.835785f, 0.281908f}, {0.545524f, 0.838039f, 0.275626f},
{0.555484f, 0.840254f, 0.269281f}, {0.565498f, 0.842430f, 0.262877f}, {0.575563f, 0.844566f, 0.256415f}, {0.585678f, 0.846661f, 0.249897f},
{0.595839f, 0.848717f, 0.243329f}, {0.606045f, 0.850733f, 0.236712f}, {0.616293f, 0.852709f, 0.230052f}, {0.626579f, 0.854645f, 0.223353f},
{0.636902f, 0.856542f, 0.216620f}, {0.647257f, 0.858400f, 0.209861f}, {0.657642f, 0.860219f, 0.203082f}, {0.668054f, 0.861999f, 0.196293f},
{0.678489f, 0.863742f, 0.189503f}, {0.688944f, 0.865448f, 0.182725f}, {0.699415f, 0.867117f, 0.175971f}, {0.709898f, 0.868751f, 0.169257f},
{0.720391f, 0.870350f, 0.162603f}, {0.730889f, 0.871916f, 0.156029f}, {0.741388f, 0.873449f, 0.149561f}, {0.751884f, 0.874951f, 0.143228f},
{0.762373f, 0.876424f, 0.137064f}, {0.772852f, 0.877868f, 0.131109f}, {0.783315f, 0.879285f, 0.125405f}, {0.793760f, 0.880678f, 0.120005f},
{0.804182f, 0.882046f, 0.114965f}, {0.814576f, 0.883393f, 0.110347f}, {0.824940f, 0.884720f, 0.106217f}, {0.835270f, 0.886029f, 0.102646f},
{0.845561f, 0.887322f, 0.099702f}, {0.855810f, 0.888601f, 0.097452f}, {0.866013f, 0.889868f, 0.095953f}, {0.876168f, 0.891125f, 0.095250f},
{0.886271f, 0.892374f, 0.095374f}, {0.896320f, 0.893616f, 0.096335f}, {0.906311f, 0.894855f, 0.098125f}, {0.916242f, 0.896091f, 0.100717f},
{0.926106f, 0.897330f, 0.104071f}, {0.935904f, 0.898570f, 0.108131f}, {0.945636f, 0.899815f, 0.112838f}, {0.955300f, 0.901065f, 0.118128f},
{0.964894f, 0.902323f, 0.123941f}, {0.974417f, 0.903590f, 0.130215f}, {0.983868f, 0.904867f, 0.136897f}, {0.993248f, 0.906157f, 0.143936f}
};
} | the_stack |
#include <helper_cuda.h>
#include "../../util/include/sync.h"
#include "nvmatrix_kernels.cuh"
#define GPU_ALLOC_FRACTION 0.95 // Take 95% of available GPU memory
#define HOST_ALLOC_CHUNK (1UL << 32)
#define SYNC_ON_FREE true
#define BUCKET_TYPE unsigned int
// Allocte memory from up to this many buckets higher than desired without subdividing
#define BUCKET_DIVISION_THRESHOLD 1
#define NUM_BUCKETS static_cast<int>(sizeof(BUCKET_TYPE) * 8)
#define CLZ(x) ((x) == 0 ? (NUM_BUCKETS) : __builtin_clz(x))
#define CEIL_LOG2(x) (NUM_BUCKETS - CLZ(x)) // Ceiling of log base 2 of (x + 1)
#define LOG_FIRST_BUCKET_SIZE 12
#define FIRST_BUCKET_SIZE (1 << LOG_FIRST_BUCKET_SIZE) // First bucket is for 4K bytes
#define GET_ALLOC_BUCKET(size) (CEIL_LOG2(((size) - 1) >> LOG_FIRST_BUCKET_SIZE))
#define GET_DEALLOC_BUCKET(size) (CEIL_LOG2((size) >> (1 + LOG_FIRST_BUCKET_SIZE)))
#define GET_BUCKET_SIZE(b) (1UL << (LOG_FIRST_BUCKET_SIZE + b))
#define BUCKET_MASK(b) (1UL << (b))
#define PREV_BUCKETS_MASK(b) (BUCKET_MASK(b) - 1)
#define AVAILABLE_NEXT_MASK(b, buckets) ((buckets) & ~PREV_BUCKETS_MASK(b))
/*
* Returns the "best-matching" available bucket as defined by policy.
* The two policies are:
*
* TAKE_FROM_BIGGEST = true: If a bucket in the range
* b...{b + BUCKET_DIVISION_THRESHOLD} is available, return the smallest
* available bucket in that range. Otherwise return the *biggest* available
* bucket greater than or equal to b.
*
* TAKE_FROM_BIGGEST = false: Return the *smallest* available bucket greater
* than or equal to b.
*
* Returns -1 when no satisfactory bucket is available.
*/
#define TAKE_FROM_BIGGEST true
#if TAKE_FROM_BIGGEST
#define GET_AVAILABLE_BUCKET(b, buckets) \
(-1 + (((AVAILABLE_NEXT_MASK(b, buckets)) \
& (PREV_BUCKETS_MASK((b) + 1 + BUCKET_DIVISION_THRESHOLD))) \
/* Smallest bucket >= b */ ? __builtin_ffs(AVAILABLE_NEXT_MASK(b, buckets)) \
/* Biggest bucket >= b */ : CEIL_LOG2(AVAILABLE_NEXT_MASK(b, buckets))))
#else
#define GET_AVAILABLE_BUCKET(b, buckets) __builtin_ffs(AVAILABLE_NEXT_MASK(b, buckets))
#endif
/*
* Bit get/set/clear.
*/
#define GET_BIT(x, bit) ((x) & (1 << (bit)))
#define SET_BIT(x, bit) ((x) |= (1 << (bit)))
#define CLEAR_BIT(x, bit) ((x) &= ~(1 << (bit)))
typedef struct __align__(512) {
char data;
} DataType;
#define SIZE_ROUNDUP(size) (sizeof(DataType) * DIVUP((size), sizeof(DataType)))
class MemorySegment {
friend class FastMemoryManager;
protected:
DataType* _data;
size_t _size;
int _deviceID;
// Resizes itself to _size - size and
// returns pointer to new memory segment
MemorySegment* subdivide(size_t size) {
assert(size < _size);
// assert(size % sizeof(DataType) == 0);
_size -= size;
return new MemorySegment(_data + _size / sizeof(DataType), size, _deviceID);
}
inline size_t getSize() const {
return _size;
}
public:
MemorySegment(DataType* data, size_t size, int deviceID) : _data(data), _size(size), _deviceID(deviceID) {
assert(size % sizeof(DataType) == 0);
}
// In some cases size is irrelevant
template<typename T> MemorySegment(T* data) : _data(reinterpret_cast<DataType*>(data)), _size(0), _deviceID(-1) {
}
template <class T /*= DataType*/>
inline T* getData() const {
return reinterpret_cast<T*>(_data);
}
template <class T /*= DataType*/>
inline T** getDataPtr() {
return reinterpret_cast<T**>(&_data);
}
inline int getDeviceID() const {
return _deviceID;
}
};
class MemoryManager {
protected:
static Lock _globalLock;
public:
virtual MemoryManager* init() = 0;
virtual MemorySegment* malloc(size_t size) = 0;
virtual void free(MemorySegment* mem) = 0;
virtual ~MemoryManager() {
}
};
class FastMemoryManager : public MemoryManager {
protected:
int _deviceID;
Lock _lock;
DataType* _data;
size_t _size;
BUCKET_TYPE _buckets; // Bucket availability bit vector
std::vector<std::vector<MemorySegment*> > _freeSegments; // bucket idx -> vector of segments
static std::map<int, MemoryManager*> _memoryManagers;
virtual void allocateInitialSegment() {
assert(_deviceID >= 0);
assert(FIRST_BUCKET_SIZE % sizeof(DataType) == 0);
checkCudaErrors(cudaSetDevice(_deviceID));
size_t memFree, memTotal;
checkCudaErrors(cudaMemGetInfo(&memFree, &memTotal));
_size = sizeof(DataType) * (size_t(round(double(memFree) * GPU_ALLOC_FRACTION)) / sizeof(DataType));
printf("FastMemoryManager[%d] allocating %lu-byte initial segment\n", _deviceID, _size);
checkCudaErrors(cudaMalloc(&_data, _size));
}
virtual void freeInitialSegment() {
checkCudaErrors(cudaFree(_data));
}
public:
static MemoryManager& getInstance(int deviceID);
static void destroyInstance(int deviceID);
FastMemoryManager(int deviceID) : _deviceID(deviceID), _data(NULL), _size(0), _buckets(0) {
}
~FastMemoryManager() {
freeInitialSegment();
for (int i = 0; i < _freeSegments.size(); ++i) {
for (int j = 0; j < _freeSegments[i].size(); ++j) {
delete _freeSegments[i][j];
}
}
}
virtual MemoryManager* init() {
allocateInitialSegment();
for (int i = 0; i < NUM_BUCKETS; ++i) {
_freeSegments.push_back(std::vector<MemorySegment*>());
}
int bucket = GET_DEALLOC_BUCKET(_size);
SET_BIT(_buckets, bucket);
_freeSegments[bucket].push_back(new MemorySegment(_data, _size, _deviceID));
return this;
}
MemorySegment* malloc(size_t size) {
assert(size > 0);
int requestedBucket = GET_ALLOC_BUCKET(size);
_lock.acquire();
int bucket = GET_AVAILABLE_BUCKET(requestedBucket, _buckets);
// if (bucket - requestedBucket > BUCKET_DIVISION_THRESHOLD) {
// printf("MemoryManager[%d] requested size: %lu, requested bucket: %d, available bucket: %d\n", _deviceID, size, requestedBucket, bucket);
// }
assert(bucket >= requestedBucket); // Out of memory
MemorySegment* sourceSegment = _freeSegments[bucket].back();
MemorySegment* ret = sourceSegment;
if (bucket - requestedBucket > BUCKET_DIVISION_THRESHOLD) { // We got a much bigger chunk than we wanted
ret = sourceSegment->subdivide(GET_BUCKET_SIZE(requestedBucket));
int newSrcBucket = GET_DEALLOC_BUCKET(sourceSegment->getSize());
if (newSrcBucket != bucket) {
_freeSegments[bucket].pop_back();
_freeSegments[newSrcBucket].push_back(sourceSegment);
SET_BIT(_buckets, newSrcBucket);
}
} else {
_freeSegments[bucket].pop_back();
}
if (_freeSegments[bucket].size() == 0) {
CLEAR_BIT(_buckets, bucket);
}
_lock.release();
return ret;
}
void free(MemorySegment* mem) {
assert(mem != NULL);
assert(mem->getSize() >= FIRST_BUCKET_SIZE);
int bucket = GET_DEALLOC_BUCKET(mem->getSize());
// Synchronize for safety, so that we don't free memory that's being used. Not synchronizing
// could potentially cause a problem if we re-allocate the just-freed chunk and attempt to
// use it in a different stream.
if (SYNC_ON_FREE) {
int d;
checkCudaErrors(cudaGetDevice(&d));
checkCudaErrors(cudaSetDevice(mem->getDeviceID()));
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaSetDevice(d));
}
_lock.acquire();
_freeSegments[bucket].push_back(mem);
SET_BIT(_buckets, bucket);
// printf("MemoryManager[%d] Freed segment of size %lu into bucket %lu\n", _deviceID, mem->getSize(), bucket);
_lock.release();
}
};
class FastHostMemoryManager : public FastMemoryManager {
protected:
static MemoryManager* _memoryManager;
void allocateInitialSegment() {
_size = HOST_ALLOC_CHUNK;
checkCudaErrors(cudaHostAlloc(&_data, _size, cudaHostAllocPortable));
}
void freeInitialSegment () {
checkCudaErrors(cudaFreeHost(_data));
}
public:
FastHostMemoryManager() : FastMemoryManager(DEVICE_HOST) {
}
static MemoryManager& getInstance();
static void destroyInstance();
};
class CUDAMemoryManager : public MemoryManager {
protected:
static MemoryManager* _memoryManager;
virtual void _malloc(DataType** data, size_t size) {
checkCudaErrors(cudaMalloc(data, size));
}
virtual void _free(MemorySegment* mem) {
checkCudaErrors(cudaFree(mem->getData<DataType>()));
}
public:
static MemoryManager& getInstance(int deviceID);
static void destroyInstance(int deviceID);
CUDAMemoryManager() {
}
MemoryManager* init() {
return this;
}
MemorySegment* malloc(size_t size) {
MemorySegment* seg = new MemorySegment(reinterpret_cast<DataType*>(NULL));
DataType** data = seg->getDataPtr<DataType>();
_malloc(data, size);
return seg;
}
void free(MemorySegment* mem) {
assert(mem != NULL);
_free(mem);
delete mem;
}
};
class CUDAHostMemoryManager : public CUDAMemoryManager {
protected:
static MemoryManager* _memoryManager;
void _free(MemorySegment* mem) {
checkCudaErrors(cudaFreeHost(mem->getData<DataType>()));
}
void _malloc(DataType** data, size_t size) {
checkCudaErrors(cudaHostAlloc(data, size, cudaHostAllocPortable));
}
public:
static MemoryManager& getInstance();
static void destroyInstance();
CUDAHostMemoryManager() : CUDAMemoryManager() {
}
};
#endif /* MEMORY_CUH_H_ */ | the_stack |
#include "gtest/gtest.h"
#include "k2/csrc/tensor_ops.h"
#include "k2/csrc/test_utils.h"
#include "k2/csrc/array_ops.h"
namespace k2 {
/* Return a 1-D tensor with random entries.
@param [in] context It specifies the device where the output tensor resides.
@param [in] dim Number of elements contained in the returned tensor.
@param [in] stride A positive number indicating the expected stride
of the output `tensor`.
@return Returns a 1-D tensor with the given `dim` and `stride`.
*/
template <typename T>
static Tensor GenerateRandTensor1D(ContextPtr context, int32_t dim,
int32_t stride) {
K2_CHECK_GT(stride, 0);
std::vector<T> data_vec(dim);
for (T &d : data_vec) d = RandInt(-1000, 1000);
Shape shape({dim}, {stride});
Array1<T> array(context, data_vec);
const T *array_data = array.Data();
Tensor ans(context, DtypeOf<T>::dtype, shape);
T *ans_data = ans.Data<T>();
K2_EVAL(
context, dim, lambda_set,
(int32_t i)->void { ans_data[i * stride] = array_data[i]; });
return ans;
}
/* Return a 2-D tensor filled with random values.
@param [in] context It specifies the device where the output tensor
resides.
@param [in] num_rows Number of rows in the returned tensor.
@param [in] num_cols Number of columns in the returned tensor.
@param [in] stride A positive number indicating the expected row stride
of the output `tensor`.
@return Returns a 2-D tensor with the given `num_rows`, `num_cols` and
`stride`.
*/
template <typename T>
static Tensor GenerateRandTensor2D(ContextPtr context, int32_t num_rows,
int32_t num_cols, int32_t stride) {
int32_t num_tensor_elements = num_rows * num_cols;
K2_CHECK_GT(num_cols, 0);
K2_CHECK_GE(stride, num_cols);
K2_CHECK_GE(num_rows, 0);
std::vector<T> data_vec(num_tensor_elements);
for (T &d : data_vec) d = RandInt(-1000, 1000);
Shape shape({num_rows, num_cols}, {stride, 1});
Array1<T> array(context, data_vec);
const T *array_data = array.Data();
Tensor ans(context, DtypeOf<T>::dtype, shape);
T *ans_data = ans.Data<T>();
K2_EVAL2(
context, num_rows, num_cols, lambda_set, (int32_t i, int32_t j)->void {
ans_data[i * stride + j] = array_data[i * num_cols + j];
});
return ans;
}
template <typename T>
static void TestIndex1D() {
bool allow_minus_one;
int32_t stride;
int32_t indexes_dim;
int32_t numel;
for (int32_t i = 0; i != 8; ++i) {
stride = RandInt(1, 10);
allow_minus_one = RandInt(-1000, 1000) & 1;
indexes_dim = RandInt(1, 20000);
numel = RandInt(1, 20000);
T default_value = 1 - i;
ContextPtr context = (i & 1) ? GetCpuContext() : GetCudaContext();
Array1<int32_t> indexes =
GenerateRandomIndexes(context, allow_minus_one, indexes_dim, numel - 1);
Tensor src = GenerateRandTensor1D<T>(context, numel, stride);
Tensor ans = Index(src, indexes, allow_minus_one, default_value);
ASSERT_TRUE(ans.IsContiguous());
ASSERT_EQ(ans.NumAxes(), 1);
ASSERT_EQ(ans.Dim(0), indexes.Dim());
ans = ans.To(GetCpuContext());
indexes = indexes.To(ans.Context());
src = src.To(ans.Context());
ASSERT_TRUE(src.IsContiguous());
const T *ans_data = ans.Data<T>();
int32_t ans_dim = ans.Dim(0);
const T *src_data = src.Data<T>();
const int32_t *indexes_data = indexes.Data();
for (int32_t i = 0; i != ans_dim; ++i) {
int32_t index = indexes[i];
if (index != -1)
EXPECT_EQ(ans_data[i], src_data[index]);
else
EXPECT_EQ(ans_data[i], default_value);
}
}
}
template <typename T>
static void TestIndex2D() {
bool allow_minus_one;
int32_t stride;
int32_t num_rows;
int32_t num_cols;
int32_t indexes_dim;
for (int32_t i = 0; i != 8; ++i) {
num_rows = RandInt(1, 100);
num_cols = RandInt(1, 100);
stride = RandInt(0, 10) + num_cols;
indexes_dim = RandInt(1, 10000);
allow_minus_one = RandInt(-1000, 1000) & 1;
ContextPtr context = (i & 1) ? GetCpuContext() : GetCudaContext();
Array1<int32_t> indexes = GenerateRandomIndexes(context, allow_minus_one,
indexes_dim, num_rows - 1);
Tensor src = GenerateRandTensor2D<T>(context, num_rows, num_cols, stride);
Tensor ans = Index(src, indexes, allow_minus_one);
ASSERT_TRUE(ans.IsContiguous());
ASSERT_EQ(ans.NumAxes(), 2);
ASSERT_EQ(ans.Dim(0), indexes.Dim());
ASSERT_EQ(ans.Dim(1), src.Dim(1));
ans = ans.To(GetCpuContext());
indexes = indexes.To(ans.Context());
src = src.To(ans.Context());
ASSERT_TRUE(src.IsContiguous());
const T *ans_data = ans.Data<T>();
int32_t ans_dim0 = ans.Dim(0);
int32_t ans_dim1 = ans.Dim(1);
const T *src_data = src.Data<T>();
const int32_t *indexes_data = indexes.Data();
for (int32_t i = 0; i != ans_dim0; ++i) {
int32_t index = indexes[i];
if (index != -1) {
for (int32_t j = 0; j != ans_dim1; ++j) {
EXPECT_EQ(ans_data[i * ans_dim1 + j], src_data[index * ans_dim1 + j]);
}
} else {
for (int32_t j = 0; j != ans_dim1; ++j)
EXPECT_EQ(ans_data[i * ans_dim1 + j], 0);
}
}
}
}
TEST(Index, Index1D) {
TestIndex1D<float>();
TestIndex1D<int32_t>();
}
TEST(Index, Index2D) {
TestIndex2D<float>();
TestIndex2D<int32_t>();
}
template <typename T>
static void TestIndexAdd1D() {
bool allow_minus_one;
int32_t src_stride;
int32_t dest_stride;
int32_t src_dim;
int32_t dest_dim;
for (int32_t i = 0; i != 8; ++i) {
src_stride = RandInt(1, 10);
dest_stride = RandInt(1, 10);
allow_minus_one = RandInt(-1000, 1000) & 1;
src_dim = RandInt(1, 20000);
dest_dim = RandInt(1, 20000);
ContextPtr context = (i & 1) ? GetCpuContext() : GetCudaContext();
Array1<int32_t> indexes =
GenerateRandomIndexes(context, allow_minus_one, src_dim, dest_dim - 1);
Tensor src = GenerateRandTensor1D<T>(context, src_dim, src_stride);
Tensor dest = GenerateRandTensor1D<T>(context, dest_dim, dest_stride);
Tensor saved_dest = dest.Clone();
IndexAdd(src, indexes, allow_minus_one, &dest);
src = src.To(GetCpuContext());
dest = dest.To(src.Context());
indexes = indexes.To(dest.Context());
saved_dest = saved_dest.To(src.Context());
const T *src_data = src.Data<T>();
const T *dest_data = dest.Data<T>();
const int32_t *indexes_data = indexes.Data();
T *saved_dest_data = saved_dest.Data<T>();
for (int32_t i = 0; i != src_dim; ++i) {
int32_t index = indexes_data[i];
if (index == -1) continue;
saved_dest_data[index] += src_data[i];
}
for (int32_t i = 0; i != dest_dim; ++i)
EXPECT_EQ(dest_data[i], saved_dest_data[i]);
}
}
template <typename T>
static void TestIndexAdd2D() {
bool allow_minus_one;
int32_t src_stride;
int32_t dest_stride;
int32_t num_src_rows;
int32_t num_dest_rows;
int32_t num_cols;
for (int32_t i = 0; i != 8; ++i) {
num_src_rows = RandInt(1, 100);
num_dest_rows = RandInt(1, 100);
num_cols = RandInt(1, 100);
src_stride = RandInt(0, 10) + num_cols;
dest_stride = RandInt(0, 10) + num_cols;
allow_minus_one = RandInt(-1000, 1000) & 1;
ContextPtr context = (i & 1) ? GetCpuContext() : GetCudaContext();
Array1<int32_t> indexes = GenerateRandomIndexes(
context, allow_minus_one, num_src_rows, num_dest_rows - 1);
Tensor src =
GenerateRandTensor2D<T>(context, num_src_rows, num_cols, src_stride);
Tensor dest =
GenerateRandTensor2D<T>(context, num_dest_rows, num_cols, dest_stride);
Tensor saved_dest = dest.Clone();
IndexAdd(src, indexes, allow_minus_one, &dest);
src = src.To(GetCpuContext());
dest = dest.To(src.Context());
indexes = indexes.To(dest.Context());
saved_dest = saved_dest.To(src.Context());
const T *src_data = src.Data<T>();
const T *dest_data = dest.Data<T>();
const int32_t *indexes_data = indexes.Data();
T *saved_dest_data = saved_dest.Data<T>();
for (int32_t i = 0; i != num_src_rows; ++i) {
int32_t index = indexes_data[i];
if (index == -1) continue;
for (int j = 0; j != num_cols; ++j)
saved_dest_data[index * num_cols + j] += src_data[i * num_cols + j];
}
int32_t n = num_dest_rows * num_cols;
for (int32_t i = 0; i != n; ++i)
EXPECT_EQ(dest_data[i], saved_dest_data[i]);
}
}
TEST(IndexAdd, IndexAdd1D) {
TestIndexAdd1D<float>();
TestIndexAdd1D<double>();
TestIndexAdd1D<int32_t>();
}
TEST(IndexAdd, IndexAdd2D) {
TestIndexAdd2D<float>();
TestIndexAdd2D<double>();
TestIndexAdd2D<int32_t>();
}
template <typename T>
static void TestSimpleRaggedIndexSelect1D() {
// test with simple case should be good enough
for (auto &context : {GetCpuContext(), GetCudaContext()}) {
// create src
int32_t stride = RandInt(1, 10);
std::vector<T> src_vec_data = {0, 2, 0, 10, 0, -1};
int32_t src_dim = static_cast<int32_t>(src_vec_data.size());
Shape shape({src_dim}, {stride});
Array1<T> array(context, src_vec_data);
const T *array_data = array.Data();
Tensor src(context, DtypeOf<T>::dtype, shape);
T *src_data = src.Data<T>();
K2_EVAL(
context, src_dim, lambda_set_src_data,
(int32_t i)->void { src_data[i * stride] = array_data[i]; });
// create indexes
std::vector<int32_t> row_splits_vec = {0, 3, 5, 6, 6, 9};
Array1<int32_t> row_splits(context, row_splits_vec);
RaggedShape indexes_shape = RaggedShape2(&row_splits, nullptr, -1);
std::vector<int32_t> indexes_values_vec = {1, 0, 4, 2, 3, 0, 4, 5, 2};
Array1<int32_t> indexes_values(context, indexes_values_vec);
Ragged<int32_t> indexes(indexes_shape, indexes_values);
Tensor ans = SimpleRaggedIndexSelect1D(src, indexes);
ASSERT_TRUE(ans.IsContiguous());
ASSERT_EQ(ans.NumAxes(), 1);
ASSERT_EQ(ans.Dim(0), indexes.Dim0());
ans = ans.To(GetCpuContext());
std::vector<T> expected_data = {2, 10, 0, 0, -1};
const T *ans_data = ans.Data<T>();
int32_t ans_dim = ans.Dim(0);
for (int32_t i = 0; i != ans_dim; ++i) {
EXPECT_EQ(ans_data[i], expected_data[i]);
}
}
}
TEST(Index, SimpleRaggedIndexSelect1D) {
TestSimpleRaggedIndexSelect1D<float>();
TestSimpleRaggedIndexSelect1D<double>();
TestSimpleRaggedIndexSelect1D<int32_t>();
}
template <typename Real>
void TestDiscountedCumSum() {
for (int32_t i = 0; i < 4; i++) {
int32_t M = RandInt(0, 1000),
T = RandInt(1, 2000); // TODO: increase.
while (M * T > 10000) { // don't want test to take too long.
M /= 2;
T /= 2;
}
ContextPtr cuda_context = GetCudaContext(),
cpu_context = GetCpuContext();
Array2<Real> x = RandUniformArray2<Real>(cuda_context, M, T, -2.0, 2.0);
Array2<Real> gamma = RandUniformArray2<Real>(cuda_context, M, T, 0.0, 1.0);
Array2<Real> y(cuda_context, M, T);
y = -10.0;
bool flip = (i % 2 == 1);
Array2<Real> x_cpu = x.To(cpu_context),
gamma_cpu = gamma.To(cpu_context),
y_cpu(cpu_context, M, T);
Tensor x_ten = x.ToTensor(),
gamma_ten = gamma.ToTensor(),
y_ten = y.ToTensor();
Tensor x_ten_cpu = x_cpu.ToTensor(),
gamma_ten_cpu = gamma_cpu.ToTensor(),
y_ten_cpu = y_cpu.ToTensor();
if (flip) {
x_ten = Flip(x_ten, 1);
gamma_ten = Flip(gamma_ten, 1);
y_ten = Flip(y_ten, 1);
x_ten_cpu = Flip(x_ten_cpu, 1);
gamma_ten_cpu = Flip(gamma_ten_cpu, 1);
y_ten_cpu = Flip(y_ten_cpu, 1);
}
DiscountedCumSum(x_ten, gamma_ten, &y_ten);
DiscountedCumSum(x_ten_cpu, gamma_ten_cpu, &y_ten_cpu);
Array2<Real> y_cpu_copy = y.To(cpu_context);
/*K2_LOG(INFO) << "x_cpu = " << x_cpu
<< ", gamma_cpu = " << gamma_cpu
<< ", y_cpu = " << y_cpu
<< ", y = " << y_cpu_copy; */
// We are using the CPU and GPU versions to check each other.
EXPECT_EQ(true, ApproxEqual(y_cpu, y_cpu_copy, (Real)0.01));
}
}
TEST(Tensor, DiscountedCumSum) {
TestDiscountedCumSum<float>();
TestDiscountedCumSum<double>();
}
} // namespace k2 | the_stack |
#include <assert.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <map>
#include <iostream>
using namespace std;
/* every tool needs to include this once */
#include "nvbit_tool.h"
/* nvbit interface file */
#include "nvbit.h"
/* provide some __device__ functions */
#include "utils/utils.h"
/* kernel id counter, maintained in system memory */
uint32_t kernel_id = 0;
/* total instruction counter, maintained in system memory, incremented by
* "counter" every time a kernel completes */
uint64_t tot_app_instrs = 0;
/* kernel instruction counter, updated by the GPU threads */
__managed__ uint64_t counter = 0;
/* pointer to memory location containing BBVs */
__managed__ int *bbv;
// Total threads of the kernel being launched
unsigned int tot_blocks = 0;
// Unique kernel ID
unsigned int kid = 0;
// Bool First allocation
bool first = true;
std::string fname = "bb_log_";
// Total number of basic blocks to keep track of
__managed__ unsigned int basic_blocks = 0;
std::map <std::string, int> kbb_map;
std::map <std::string, std::vector<int>> kbb_insns;
/* global control variables for this tool */
uint32_t ker_begin_interval = 0;
uint32_t ker_end_interval = UINT32_MAX;
int verbose = 1;
int count_warp_level = 1;
int exclude_pred_off = 0;
/* a pthread mutex, used to prevent multiple kernels to run concurrently and
* therefore to "corrupt" the counter variable */
pthread_mutex_t mutex;
/* instrumentation function that we want to inject, please note the use of
* 1. "extern "C" __device__ __noinline__" to prevent code elimination by the
* compiler.
* 2. NVBIT_EXPORT_FUNC(count_instrs) to notify nvbit the name of the function
* we want to inject. This name must match exactly the function name */
extern "C" __device__ __noinline__ void count_instrs(int num_instrs,
int count_warp_level,
int bb) {
// Get the global warp id to update the bbv
int global_wid = get_global_warp_id();
/* all the active threads will compute the active mask */
const int active_mask = __ballot(1);
/* each thread will get a lane id (get_lane_id is in utils/utils.h) */
const int laneid = get_laneid();
/* get the id of the first active thread */
const int first_laneid = __ffs(active_mask) - 1;
/* count all the active thread */
const int num_threads = __popc(active_mask);
/* only the first active thread will perform the atomic */
if (first_laneid == laneid) {
// Index based upon the bb param and the threadblock number
int global_tb = blockIdx.y * gridDim.x + blockIdx.x;
// Must use atomic here to avoid coherence problems
atomicAdd(&bbv[global_tb * basic_blocks + bb], num_threads);
}
}
NVBIT_EXPORT_FUNC(count_instrs);
extern "C" __device__ __noinline__ void count_pred_off(int predicate,
int count_warp_level) {
const int active_mask = __ballot(1);
const int laneid = get_laneid();
const int first_laneid = __ffs(active_mask) - 1;
const int predicate_mask = __ballot(predicate);
const int mask_off = active_mask ^ predicate_mask;
const int num_threads_off = __popc(mask_off);
if (first_laneid == laneid) {
if (count_warp_level) {
/* if the predicate mask was off we reduce the count of 1 */
if (predicate_mask == 0)
atomicAdd((unsigned long long *)&counter, -1);
} else {
atomicAdd((unsigned long long *)&counter, -num_threads_off);
}
}
}
NVBIT_EXPORT_FUNC(count_pred_off)
/* nvbit_at_init() is executed as soon as the nvbit tool is loaded. We
* typically do initializations in this call. In this case for instance we get
* some environment variables values which we use as input arguments to the tool
*/
void nvbit_at_init() {
/* just make sure all managed variables are allocated on GPU */
setenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC", "1", 1);
/* we get some environment variables that are going to be use to selectively
* instrument (within a interval of kernel indexes and instructions). By
* default we instrument everything. */
GET_VAR_INT(ker_begin_interval, "KERNEL_BEGIN", 0,
"Beginning of the kernel launch interval where to apply "
"instrumentation");
GET_VAR_INT(
ker_end_interval, "KERNEL_END", UINT32_MAX,
"End of the kernel launch interval where to apply instrumentation");
GET_VAR_INT(count_warp_level, "COUNT_WARP_LEVEL", 1,
"Count warp level or thread level instructions");
GET_VAR_INT(exclude_pred_off, "EXCLUDE_PRED_OFF", 0,
"Exclude predicated off instruction from count");
GET_VAR_INT(verbose, "TOOL_VERBOSE", 0, "Enable verbosity inside the tool");
std::string pad(100, '-');
printf("%s\n", pad.c_str());
}
/* nvbit_at_function_first_load() is executed every time a function is loaded
* for the first time. Inside this call-back we typically get the vector of SASS
* instructions composing the loaded CUfunction. We can iterate on this vector
* and insert call to instrumentation functions before or after each one of
* them. */
void nvbit_at_function_first_load(CUcontext ctx, CUfunction func) {
/* Get the static control flow graph of instruction */
const CFG_t &cfg = nvbit_get_CFG(ctx, func);
if (cfg.is_degenerate) {
printf(
"Warning: Function %s is degenerated, we can't compute basic "
"blocks statically",
nvbit_get_func_name(ctx, func));
}
if (verbose) {
printf("Function %s\n", nvbit_get_func_name(ctx, func));
/* print */
int cnt = 0;
for (auto &bb : cfg.bbs) {
printf("Basic block id %d - num instructions %ld\n", cnt++,
bb->instrs.size());
for (auto &i : bb->instrs) {
i->print(" ");
}
}
}
if (0) {
printf("inspecting %s - number basic blocks %ld\n",
nvbit_get_func_name(ctx, func), cfg.bbs.size());
}
/* Iterate on basic block and inject the first instruction */
int local_bb = 0;
for (auto &bb : cfg.bbs) {
Instr *i = bb->instrs[0];
/* inject device function */
nvbit_insert_call(i, "count_instrs", IPOINT_BEFORE);
/* add size of basic block in number of instruction */
nvbit_add_call_arg_const_val32(i, bb->instrs.size());
/* add count warp level option */
nvbit_add_call_arg_const_val32(i, count_warp_level);
/* add basic block number */
nvbit_add_call_arg_const_val32(i, local_bb++);
if (verbose) {
i->print("Inject count_instr before - ");
}
}
// First time seeing the kernel, allocate space here too
kbb_map.insert(std::pair<std::string,int>(nvbit_get_func_name(ctx, func), cfg.bbs.size()));
// Add instruction counts to a vector and another map
std::vector<int> i_counts;
for (auto &bb : cfg.bbs) {
i_counts.push_back(bb->instrs.size());
}
kbb_insns.insert(std::pair<std::string,std::vector<int>>(nvbit_get_func_name(ctx, func), i_counts));
int *bbs;
basic_blocks = cfg.bbs.size();
cudaMallocManaged(&bbs, (tot_blocks) * (basic_blocks) * sizeof(int));
// Set our __managed__ pointer to our narrowly allocated region
bbv = bbs;
for(unsigned int i = 0; i < (tot_blocks) * (basic_blocks); i++){
bbv[i] = 0;
}
if (exclude_pred_off) {
/* iterate on instructions */
for (auto i : nvbit_get_instrs(ctx, func)) {
/* inject only if instruction has predicate */
if (i->hasPred()) {
/* inject function */
nvbit_insert_call(i, "count_pred_off", IPOINT_BEFORE);
/* add predicate as argument */
nvbit_add_call_arg_pred_val(i);
/* add count warp level option */
nvbit_add_call_arg_const_val32(i, count_warp_level);
if (verbose) {
i->print("Inject count_instr before - ");
}
}
}
}
}
/* This call-back is triggered every time a CUDA driver call is encountered.
* Here we can look for a particular CUDA driver call by checking at the
* call back ids which are defined in tools_cuda_api_meta.h.
* This call back is triggered bith at entry and at exit of each CUDA driver
* call, is_exit=0 is entry, is_exit=1 is exit.
* */
void nvbit_at_cuda_event(CUcontext ctx, int is_exit, nvbit_api_cuda_t cbid,
const char *name, void *params, CUresult *pStatus) {
/* Identify all the possible CUDA launch events */
if (cbid == API_CUDA_cuLaunch || cbid == API_CUDA_cuLaunchKernel_ptsz ||
cbid == API_CUDA_cuLaunchGrid || cbid == API_CUDA_cuLaunchGridAsync ||
cbid == API_CUDA_cuLaunchKernel) {
/* cast params to cuLaunch_params since if we are here we know these are
* the right parameters type */
cuLaunch_params *p = (cuLaunch_params *)params;
if (!is_exit) {
/* if we are entering in a kernel launch:
* 1. Lock the mutex to prevent multiple kernels to run concurrently
* (overriding the counter) in case the user application does that
* 2. Select if we want to run the instrumented or original
* version of the kernel
* 3. Reset the kernel instruction counter */
pthread_mutex_lock(&mutex);
// Get the launch parameters to we can narrowly allocate memory
cuLaunchKernel_params_st *p_test = (cuLaunchKernel_params_st *)params;
// Only look at 2D kernels (common case, might change if 3D ones exist)
unsigned int gx = p_test->gridDimX;
unsigned int gy = p_test->gridDimY;
// Set global number of threablocks
tot_blocks = gx * gy;
// Allocate some space for each warp
if(first){
first = false;
}else{
cudaFree(bbv);
}
if (kernel_id >= ker_begin_interval &&
kernel_id < ker_end_interval) {
nvbit_enable_instrumented(ctx, p->f, true);
} else {
nvbit_enable_instrumented(ctx, p->f, false);
}
counter = 0;
// Allocate space here if we have called the kernel before
auto it = kbb_map.find(nvbit_get_func_name(ctx, p->f));
if(it != kbb_map.end()){
basic_blocks = it->second;
int *bbs;
cudaMallocManaged(&bbs, tot_blocks * (basic_blocks) * sizeof(int));
// Set our __managed__ pointer to our narrowly allocated region
bbv = bbs;
for(unsigned int i = 0; i < tot_blocks * (basic_blocks); i++){
bbv[i] = 0;
}
}
} else {
/* if we are exiting a kernel launch:
* 1. Wait until the kernel is completed using
* cudaDeviceSynchronize()
* 2. Get number of thread blocks in the kernel
* 3. Print the thread instruction counters
* 4. Release the lock*/
CUDA_SAFECALL(cudaDeviceSynchronize());
// Get the launch parameters to we can narrowly allocate memory
cuLaunchKernel_params_st *p_test = (cuLaunchKernel_params_st *)params;
auto it = kbb_insns.find(nvbit_get_func_name(ctx, p->f));
std::vector<int> test = it->second;
FILE *f = fopen((fname + std::to_string(kid) + ".txt").c_str(), "w+");
kid++;
fprintf(f, "%s\n", nvbit_get_func_name(ctx, p->f));
// For each basic block vector
for(unsigned int i = 0; i < tot_blocks; i++){
for(unsigned int j = 0; j < (basic_blocks); j++){
fprintf(f, "%d ", bbv[i * (basic_blocks) + j] * test[j] );
}
fprintf(f, "\n");
}
fclose(f);
pthread_mutex_unlock(&mutex);
}
}
} | the_stack |
namespace timemachine {
template <typename RealType>
InertialRestraint<RealType>::InertialRestraint(
const std::vector<int> &group_a_idxs,
const std::vector<int> &group_b_idxs,
const std::vector<double> &masses,
const double k)
: N_(masses.size()), N_A_(group_a_idxs.size()), N_B_(group_b_idxs.size()), k_(k), h_a_idxs_(group_a_idxs),
h_b_idxs_(group_b_idxs), h_masses_(masses), h_x_buffer_(N_ * 3), h_conf_adjoint_(N_ * 3) {
for (int i = 0; i < group_a_idxs.size(); i++) {
if (group_a_idxs[i] >= N_ || group_a_idxs[i] < 0) {
throw std::runtime_error("Invalid group_a_idx!");
}
h_c_idxs_.push_back(group_a_idxs[i]);
}
for (int i = 0; i < group_b_idxs.size(); i++) {
if (group_b_idxs[i] >= N_ || group_b_idxs[i] < 0) {
throw std::runtime_error("Invalid group_a_idx!");
}
h_c_idxs_.push_back(group_b_idxs[i]);
}
// (ytz): take care of special corner case when a_idxs and b_idxs
// are not disjoint
std::set<int> c_set(h_c_idxs_.begin(), h_c_idxs_.end());
h_c_idxs_.clear();
for (auto idx : c_set) {
h_c_idxs_.push_back(idx);
}
N_C_ = h_c_idxs_.size();
gpuErrchk(cudaMalloc(&d_c_idxs_, N_C_ * sizeof(*d_c_idxs_)));
gpuErrchk(cudaMemcpy(d_c_idxs_, &h_c_idxs_[0], N_C_ * sizeof(*d_c_idxs_), cudaMemcpyHostToDevice));
gpuErrchk(cudaMallocHost(&h_x_memcpy_buf_pinned_, N_C_ * 3 * sizeof(*h_x_memcpy_buf_pinned_)));
gpuErrchk(cudaMalloc(&d_x_memcpy_buf_, N_C_ * 3 * sizeof(*d_x_memcpy_buf_)));
};
template <typename RealType> InertialRestraint<RealType>::~InertialRestraint() {
gpuErrchk(cudaFree(d_c_idxs_));
gpuErrchk(cudaFree(d_x_memcpy_buf_));
gpuErrchk(cudaFreeHost(h_x_memcpy_buf_pinned_));
};
// center of mass inertia tensor
void inertia_tensor(const int NX, const int *h_idxs, const double *h_masses, const double *h_x_in, double *out_tensor) {
double centroid[3] = {0};
double sum = 0;
for (int i = 0; i < NX; i++) {
int atom_idx = h_idxs[i];
double mass = h_masses[atom_idx];
for (int d = 0; d < 3; d++) {
centroid[d] += mass * h_x_in[atom_idx * 3 + d];
}
sum += mass;
}
centroid[0] /= sum;
centroid[1] /= sum;
centroid[2] /= sum;
double xx = 0;
double xy = 0;
double xz = 0;
double yy = 0;
double yz = 0;
double zz = 0;
for (int i = 0; i < NX; i++) {
int atom_idx = h_idxs[i];
double mass = h_masses[atom_idx];
double ci[3];
for (int d = 0; d < 3; d++) {
ci[d] = h_x_in[atom_idx * 3 + d] - centroid[d];
}
xx += mass * (ci[1] * ci[1] + ci[2] * ci[2]);
yy += mass * (ci[0] * ci[0] + ci[2] * ci[2]);
zz += mass * (ci[1] * ci[1] + ci[0] * ci[0]);
xy -= mass * ci[0] * ci[1];
xz -= mass * ci[0] * ci[2];
yz -= mass * ci[2] * ci[1];
}
out_tensor[0 * 3 + 0] = xx / sum;
out_tensor[0 * 3 + 1] = xy / sum;
out_tensor[0 * 3 + 2] = xz / sum;
out_tensor[1 * 3 + 0] = xy / sum;
out_tensor[1 * 3 + 1] = yy / sum;
out_tensor[1 * 3 + 2] = yz / sum;
out_tensor[2 * 3 + 0] = xz / sum;
out_tensor[2 * 3 + 1] = yz / sum;
out_tensor[2 * 3 + 2] = zz / sum;
}
__global__ void k_atomic_add(double *addr, double var) { atomicAdd(addr, var); }
__global__ void k_gather_x(const double *src, const int C, const int *c_idxs, double *dst) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= C) {
return;
}
const int dim = blockIdx.y;
dst[tid * 3 + dim] = src[c_idxs[tid] * 3 + dim];
}
__global__ void k_accumulate_scatter(const int C, const int *c_idxs, const double *src, unsigned long long *dst) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= C) {
return;
}
const int dim = blockIdx.y;
atomicAdd(
dst + c_idxs[tid] * 3 + dim, static_cast<unsigned long long>((long long)(src[tid * 3 + dim] * FIXED_EXPONENT)));
}
template <typename T> int sgn(T val) { return (T(0) < val) - (val < T(0)); }
void print_matrix(double x[3][3], std::string name) {
std::cout << "matrix: " << name << std::endl;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
std::cout << x[i][j] << " ";
}
std::cout << std::endl;
}
}
void grad_inertia_tensor(
int N,
const int *idxs,
const double *masses,
const double *conf,
const double adjoint_tensor[3][3],
double *conf_adjoint) {
double centroid[3] = {0};
double sum = 0;
for (int i = 0; i < N; i++) {
int atom_idx = idxs[i];
double mass = masses[atom_idx];
for (int d = 0; d < 3; d++) {
centroid[d] += mass * conf[atom_idx * 3 + d];
}
sum += mass;
}
centroid[0] /= sum;
centroid[1] /= sum;
centroid[2] /= sum;
double dxx = adjoint_tensor[0][0];
double dxy = adjoint_tensor[0][1];
double dxz = adjoint_tensor[0][2];
double dyy = adjoint_tensor[1][1];
double dyz = adjoint_tensor[1][2];
double dzz = adjoint_tensor[2][2];
double mass_sum = 0;
for (int i = 0; i < N; i++) {
mass_sum += masses[idxs[i]];
}
for (int i = 0; i < N; i++) {
int a_idx = idxs[i];
double mass = masses[a_idx];
double xs = conf[a_idx * 3 + 0] - centroid[0];
double ys = conf[a_idx * 3 + 1] - centroid[1];
double zs = conf[a_idx * 3 + 2] - centroid[2];
conf_adjoint[a_idx * 3 + 0] +=
(dyy * 2 * xs + dzz * 2 * xs + -dxy * 2 * ys + -dxz * 2 * zs) * (mass / mass_sum);
conf_adjoint[a_idx * 3 + 1] +=
(dzz * 2 * ys + dxx * 2 * ys + -dxy * 2 * xs + -dyz * 2 * zs) * (mass / mass_sum);
conf_adjoint[a_idx * 3 + 2] +=
(dxx * 2 * zs + dyy * 2 * zs + -dxz * 2 * xs + -dyz * 2 * ys) * (mass / mass_sum);
}
}
void grad_eigh(
const double w[3], // eigenvalues
const double v[3][3], // eigenvectors
const double vg[3][3], // eigenvector adjoints
double a_adjoint[3][3] // input array adjoints
) {
/*
(ytz): I really hate this code. See timemachine.lib.pmi.grad_eigh for a slightly more
readable python implementation.
Reference implementation of the vector jacobian product of the derivative of column
eigenvectors with respect to the input matrix. This code is derived from perturbation theory,
and originally ported over from autograd in a long series of conversions.
*/
double off_diag[3][3] = {
{0, 1, 1},
{1, 0, 1},
{1, 1, 0},
};
double F[3][3];
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
F[i][j] = off_diag[i][j] / (w[j] - w[i] + (1 - off_diag[i][j]));
}
}
double C[3][3] = {0};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
double sum = 0;
for (int k = 0; k < 3; k++) {
sum += v[k][i] * vg[k][j];
}
C[i][j] = sum * F[i][j];
}
}
double D[3][3] = {0};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
double sum = 0;
for (int k = 0; k < 3; k++) {
sum += v[i][k] * C[k][j];
}
D[i][j] = sum;
}
}
double vjp_temp[3][3] = {0};
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
double sum = 0;
for (int k = 0; k < 3; k++) {
sum += D[i][k] * v[j][k];
}
vjp_temp[i][j] = sum;
}
}
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
if (i == j) {
a_adjoint[i][j] = vjp_temp[i][j];
} else {
a_adjoint[i][j] = (vjp_temp[i][j] + vjp_temp[j][i]) / 2;
}
}
}
}
template <typename RealType>
void InertialRestraint<RealType>::execute_device(
const int N,
const int P,
const double *d_x,
const double *d_p,
const double *d_box,
const double lambda,
unsigned long long *d_du_dx,
double *d_du_dp,
double *d_du_dl,
double *d_u,
cudaStream_t stream) {
int tpb = 32;
// (ytz): This function proceeds as follows:
// 0. (GPU) Gather a subset of the coordinates that will be used
// 1. (GPU->CPU) Copy only coordinates for atoms in c_idxs
// 2. (CPU) Scatter gathered coordinates
// 3. (CPU) Compute the 3x3 inertia tensor (real and symmetric) for each set of indices..
// 4. (CPU) Solve for the eigenvalues and eigenvector analytically. Sorted in ascending order.
// 5. (CPU) Compute the energy function function using the paired column eigenvectors.
// 6. (CPU) Backpropagate the derivative into a single [Nx3] array.
// 7. (CPU) Gather the forces
// 8. (CPU->GPU) Copy only the forces for atoms in c_idxs
// 9. (GPU) Update forces and energy
// cudaDeviceSynchronize();
// auto start = std::chrono::high_resolution_clock::now();
dim3 dimGather((N_C_ + tpb - 1) / tpb, 3, 1);
k_gather_x<<<dimGather, tpb>>>(d_x, N_C_, d_c_idxs_, d_x_memcpy_buf_);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpy(
h_x_memcpy_buf_pinned_, d_x_memcpy_buf_, N_C_ * 3 * sizeof(*d_x_memcpy_buf_), cudaMemcpyDeviceToHost));
std::vector<double> &h_x_in = h_x_buffer_;
for (int i = 0; i < h_c_idxs_.size(); i++) {
for (int d = 0; d < 3; d++) {
h_x_in[h_c_idxs_[i] * 3 + d] = h_x_memcpy_buf_pinned_[i * 3 + d];
}
}
const std::vector<double> &h_masses = h_masses_;
const std::vector<int> &h_a_idxs = h_a_idxs_;
const std::vector<int> &h_b_idxs = h_b_idxs_;
std::vector<double> a_tensor(3 * 3);
std::vector<double> b_tensor(3 * 3);
inertia_tensor(N_A_, &h_a_idxs[0], &h_masses[0], &h_x_in[0], &a_tensor[0]);
inertia_tensor(N_B_, &h_b_idxs[0], &h_masses[0], &h_x_in[0], &b_tensor[0]);
double(&a_array)[3][3] = *reinterpret_cast<double(*)[3][3]>(&a_tensor[0]);
double a_w[3]; // eigenvalues
double a_v[3][3]; // eigenvectors
dsyevv3(a_array, a_v, a_w);
double(&b_array)[3][3] = *reinterpret_cast<double(*)[3][3]>(&b_tensor[0]);
double b_w[3]; // eigenvalues
double b_v[3][3]; // eigenvectors
dsyevv3(b_array, b_v, b_w);
// this is equivalent to:
// R' = matmul(A^T, B)
// sum_i (1 - dot(R'[i], e[i]))^2 where e is the identity matrix (the standard basis)
// see reference python code for more information
double loss = 0;
double dl_da_v[3][3]; // derivatives of loss wrt. a's eigenvectors
double dl_db_v[3][3]; // derivatives of loss wrt. b's eigenvectors
for (int j = 0; j < 3; j++) {
double dot_prod = 0;
for (int i = 0; i < 3; i++) {
dot_prod += a_v[i][j] * b_v[i][j];
}
double delta = 1 - abs(dot_prod);
loss += delta * delta;
double prefactor = -sgn(dot_prod) * 2 * delta * k_;
for (int i = 0; i < 3; i++) {
dl_da_v[i][j] = prefactor * b_v[i][j];
dl_db_v[i][j] = prefactor * a_v[i][j];
}
}
double dl_da_tensor[3][3];
double dl_db_tensor[3][3];
grad_eigh(a_w, a_v, dl_da_v, dl_da_tensor);
grad_eigh(b_w, b_v, dl_db_v, dl_db_tensor);
for (int i = 0; i < h_c_idxs_.size(); i++) {
for (int d = 0; d < 3; d++) {
h_conf_adjoint_[h_c_idxs_[i] * 3 + d] = 0;
}
}
grad_inertia_tensor(N_A_, &h_a_idxs[0], &h_masses[0], &h_x_in[0], dl_da_tensor, &h_conf_adjoint_[0]);
grad_inertia_tensor(N_B_, &h_b_idxs[0], &h_masses[0], &h_x_in[0], dl_db_tensor, &h_conf_adjoint_[0]);
for (int i = 0; i < h_c_idxs_.size(); i++) {
for (int d = 0; d < 3; d++) {
h_x_memcpy_buf_pinned_[i * 3 + d] = h_conf_adjoint_[h_c_idxs_[i] * 3 + d];
}
}
gpuErrchk(cudaMemcpy(
d_x_memcpy_buf_, h_x_memcpy_buf_pinned_, N_C_ * 3 * sizeof(*d_x_memcpy_buf_), cudaMemcpyHostToDevice));
if (d_u) {
k_atomic_add<<<1, 1, 0>>>(d_u, loss * k_);
gpuErrchk(cudaPeekAtLastError());
}
const int B = (N + tpb - 1) / tpb;
dim3 dimGrid(B, 3, 1);
gpuErrchk(cudaPeekAtLastError());
if (d_du_dx) {
k_accumulate_scatter<<<dimGrid, tpb, 0>>>(N_C_, d_c_idxs_, d_x_memcpy_buf_, d_du_dx);
gpuErrchk(cudaPeekAtLastError());
}
cudaDeviceSynchronize();
// auto end = std::chrono::high_resolution_clock::now();
// std::cout << "total: " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << "us" << std::endl;;
gpuErrchk(cudaPeekAtLastError());
};
template class InertialRestraint<double>;
template class InertialRestraint<float>;
} // namespace timemachine | the_stack |
#include <nvToolsExt.h>
#define DELETE 1
using namespace timer;
namespace hornets_nest {
template <bool device>
void print_ptr(vid_t* src, vid_t* dst, int count, bool sort = false) {
vid_t * s = nullptr;
vid_t * d = nullptr;
std::vector<vid_t> S, D;
if (!device) {
s = src; d = dst;
} else {
S.resize(count);
D.resize(count);
s = S.data();
d = D.data();
cudaMemcpy(s, src, sizeof(vid_t)*count, cudaMemcpyDeviceToHost);
cudaMemcpy(d, dst, sizeof(vid_t)*count, cudaMemcpyDeviceToHost);
}
std::vector<std::pair<vid_t, vid_t>> v;
for (int i = 0; i < count; ++i) {
v.push_back(std::make_pair(s[i], d[i]));
}
if (sort) { std::sort(v.begin(), v.end()); }
for (unsigned i = 0; i < v.size(); ++i) {
std::cout<<i<<"\t"<<v[i].first<<"\t"<<v[i].second<<"\n";
}
}
template <bool device>
void print_ptr(vid_t* src, int count, bool sort = false) {
vid_t * s = nullptr;
std::vector<vid_t> S;
if (!device) {
s = src;
} else {
S.resize(count);
s = S.data();
cudaMemcpy(s, src, sizeof(vid_t)*count, cudaMemcpyDeviceToHost);
}
if (sort) { std::sort(S.begin(), S.end()); }
for (unsigned i = 0; i < S.size(); ++i) {
std::cout<<i<<"\t"<<S[i]<<"\n";
}
}
KCore::KCore(HornetGraph &hornet) :
StaticAlgorithm(hornet),
vqueue(hornet),
peel_vqueue(hornet),
active_queue(hornet),
iter_queue(hornet),
load_balancing(hornet)
{
gpu::allocate(vertex_pres, hornet.nV());
gpu::allocate(vertex_color, hornet.nV());
gpu::allocate(vertex_deg, hornet.nV());
gpu::allocate(hd_data().src, hornet.nE());
gpu::allocate(hd_data().dst, hornet.nE());
gpu::allocate(hd_data().counter, 1);
}
KCore::~KCore() {
gpu::free(vertex_pres);
gpu::free(vertex_color);
gpu::free(vertex_deg);
gpu::free(hd_data().src);
gpu::free(hd_data().dst);
}
struct Comp {
using Tuple = thrust::tuple<vid_t, vid_t, uint32_t>;
__host__
bool operator()(Tuple a, Tuple b) {
if ( (thrust::get<0>(a) < thrust::get<0>(b)) ||
( (thrust::get<1>(a) == thrust::get<1>(b)) && (thrust::get<1>(a) < thrust::get<1>(b)) ) ) {
return true;
} else {
return false;
}
}
};
struct ActiveVertices {
vid_t *vertex_pres;
vid_t *deg;
TwoLevelQueue<vid_t> active_queue;
OPERATOR(Vertex &v) {
vid_t id = v.id();
if (v.degree() > 0) {
vertex_pres[id] = 1;
active_queue.insert(id);
deg[id] = v.degree();
}
}
};
struct PeelVertices {
vid_t *vertex_pres;
vid_t *deg;
uint32_t peel;
TwoLevelQueue<vid_t> peel_queue;
TwoLevelQueue<vid_t> iter_queue;
//mark vertices with degrees less than peel
OPERATOR(Vertex &v) {
vid_t id = v.id();
if (vertex_pres[id] == 1 && deg[id] <= peel) {
vertex_pres[id] = 2;
peel_queue.insert(id);
iter_queue.insert(id);
}
}
};
struct RemovePres {
vid_t *vertex_pres;
OPERATOR(Vertex &v) {
vid_t id = v.id();
if (vertex_pres[id] == 2) {
vertex_pres[id] = 0;
}
}
};
struct DecrementDegree {
vid_t *deg;
OPERATOR(Vertex &v, Edge &e) {
vid_t src = v.id();
vid_t dst = e.dst_id();
atomicAdd(°[src], -1);
atomicAdd(°[dst], -1);
}
};
struct ExtractSubgraph {
HostDeviceVar<KCoreData> hd;
vid_t *vertex_pres;
OPERATOR(Vertex &v, Edge &e) {
vid_t src = v.id();
vid_t dst = e.dst_id();
if (vertex_pres[src] == 2 && vertex_pres[dst] == 2) {
int spot = atomicAdd(hd().counter, 1);
hd().src[spot] = src;
hd().dst[spot] = dst;
}
}
};
struct GetDegOne {
TwoLevelQueue<vid_t> vqueue;
vid_t *vertex_color;
OPERATOR(Vertex &v) {
vid_t id = v.id();
if (v.degree() == 1) {
vqueue.insert(id);
vertex_color[id] = 1;
}
}
};
struct DegOneEdges {
HostDeviceVar<KCoreData> hd;
vid_t *vertex_color;
OPERATOR(Vertex &v, Edge &e) {
vid_t src = v.id();
vid_t dst = e.dst_id();
if (vertex_color[src] || vertex_color[dst]) {
int spot = atomicAdd(hd().counter, 1);
hd().src[spot] = src;
hd().dst[spot] = dst;
if (!vertex_color[src] || !vertex_color[dst]) {
int spot_rev = atomicAdd(hd().counter, 1);
hd().src[spot_rev] = dst;
hd().dst[spot_rev] = src;
}
}
}
};
void KCore::reset() {
vqueue.swap();
peel_vqueue.swap();
active_queue.swap();
iter_queue.swap();
}
void oper_bidirect_batch(HornetGraph &hornet, vid_t *src, vid_t *dst, int size, bool print = false) {
UpdatePtr ptr(size, src, dst);
Update batch_update(ptr);
hornet.erase(batch_update);
}
void kcores_new(HornetGraph &hornet,
HostDeviceVar<KCoreData>& hd,
TwoLevelQueue<vid_t> &peel_queue,
TwoLevelQueue<vid_t> &active_queue,
TwoLevelQueue<vid_t> &iter_queue,
load_balancing::VertexBased1 load_balancing,
vid_t *deg,
vid_t *vertex_pres,
uint32_t *max_peel,
int *batch_size) {
forAllVertices(hornet, ActiveVertices { vertex_pres, deg, active_queue });
active_queue.swap();
int n_active = active_queue.size();
uint32_t peel = 0;
while (n_active > 0) {
forAllVertices(hornet, active_queue,
PeelVertices { vertex_pres, deg, peel, peel_queue, iter_queue} );
iter_queue.swap();
n_active -= iter_queue.size();
if (iter_queue.size() == 0) {
peel++;
peel_queue.swap();
if (n_active > 0) {
forAllVertices(hornet, active_queue, RemovePres { vertex_pres });
}
} else {
forAllEdges(hornet, iter_queue, DecrementDegree { deg }, load_balancing);
}
}
gpu::memsetZero(hd().counter); // reset counter.
peel_queue.swap();
forAllEdges(hornet, peel_queue,
ExtractSubgraph { hd, vertex_pres }, load_balancing);
*max_peel = peel;
int size = 0;
cudaMemcpy(&size, hd().counter, sizeof(int), cudaMemcpyDeviceToHost);
//print<true>(hd().src, hd().dst, size, true);
*batch_size = size;
}
void json_dump(vid_t *src, vid_t *dst, uint32_t *peel, uint32_t peel_edges, bool sort_output = false) {
if (sort_output) {
auto iter = thrust::make_zip_iterator(thrust::make_tuple(src, dst, peel));
thrust::sort(thrust::host, iter, iter + peel_edges, Comp());
}
std::ofstream output_file;
output_file.open("output.txt");
output_file << "{\n";
for (uint32_t i = 0; i < peel_edges; i++) {
output_file << "\"" << src[i] << "," << dst[i] << "\": " << peel[i];
if (i < peel_edges - 1) {
output_file << ",";
}
output_file << "\n";
}
output_file << "}";
output_file.close();
}
void KCore::run() {
omp_set_num_threads(72);
vid_t *src = new vid_t[hornet.nE()];
vid_t *dst = new vid_t[hornet.nE()];
uint32_t len = hornet.nE() / 2 + 1;
uint32_t *peel = new uint32_t[hornet.nE()];
uint32_t peel_edges = 0;
uint32_t ne = hornet.nE();
std::cout << "ne: " << ne << std::endl;
auto pres = vertex_pres;
auto deg = vertex_deg;
auto color = vertex_color;
forAllnumV(hornet, [=] __device__ (int i){ pres[i] = 0; } );
forAllnumV(hornet, [=] __device__ (int i){ deg[i] = 0; } );
forAllnumV(hornet, [=] __device__ (int i){ color[i] = 0; } );
Timer<DEVICE> TM;
TM.start();
/* Begin degree 1 vertex preprocessing optimization */
// Find vertices of degree 1.
forAllVertices(hornet, GetDegOne { vqueue, vertex_color });
vqueue.swap();
// Find the edges incident to these vertices.
gpu::memsetZero(hd_data().counter); // reset counter.
forAllEdges(hornet, vqueue,
DegOneEdges { hd_data, vertex_color }, load_balancing);
// Mark edges with peel 1.
int peel_one_count = 0;
cudaMemcpy(&peel_one_count, hd_data().counter, sizeof(int), cudaMemcpyDeviceToHost);
#pragma omp parallel for
for (int i = 0; i < peel_one_count; i++) {
peel[i] = 1;
}
cudaMemcpy(src, hd_data().src, peel_one_count * sizeof(vid_t),
cudaMemcpyDeviceToHost);
cudaMemcpy(dst, hd_data().dst, peel_one_count * sizeof(vid_t),
cudaMemcpyDeviceToHost);
peel_edges = (uint32_t)peel_one_count;
// Delete peel 1 edges.
oper_bidirect_batch(hornet, hd_data().src, hd_data().dst, peel_one_count);
/* Begin running main kcore algorithm */
while (peel_edges < ne) {
uint32_t max_peel = 0;
int batch_size = 0;
kcores_new(hornet, hd_data, peel_vqueue, active_queue, iter_queue,
load_balancing, vertex_deg, vertex_pres, &max_peel, &batch_size);
std::cout << "max_peel: " << max_peel << "\n";
if (batch_size > 0) {
cudaMemcpy(src + peel_edges, hd_data().src,
batch_size * sizeof(vid_t), cudaMemcpyDeviceToHost);
cudaMemcpy(dst + peel_edges, hd_data().dst,
batch_size * sizeof(vid_t), cudaMemcpyDeviceToHost);
#pragma omp parallel for
for (int i = 0; i < batch_size; i++) {
peel[peel_edges + i] = max_peel;
}
peel_edges += batch_size;
}
oper_bidirect_batch(hornet, hd_data().src, hd_data().dst, batch_size);
}
TM.stop();
TM.print("KCore");
//json_dump(src, dst, peel, peel_edges);
}
void KCore::release() {
gpu::free(vertex_pres);
gpu::free(vertex_color);
gpu::free(vertex_deg);
gpu::free(hd_data().src);
gpu::free(hd_data().dst);
hd_data().src = nullptr;
hd_data().dst = nullptr;
}
} | the_stack |
// #include <THC/THCTensorMathReduce.cuh>
// #include <THC/THCTensorSort.cuh>
// #include <THC/THCThrustAllocator.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <c10/macros/Macros.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/NumericLimits.cuh>
// #include <ATen/cuda/DeviceUtils.cuh>
#include <type_traits>
// #include <ATen/native/cuda/PersistentSoftmax.cuh>
#include <assert.h>
#include <cuda_fp16.h>
#include <cfloat>
#include <limits>
#include <stdint.h>
#include <cuda_fp16.h>
// #include <c10/macros/Macros.h>
using namespace at;
using namespace at::native;
int log2_ceil(int value) {
int log2_value = 0;
while ((1 << log2_value) < value) ++log2_value;
return log2_value;
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
template <typename acc_t, int WARP_BATCH, int WARP_SIZE, template<typename> class ReduceOp>
__device__ __forceinline__ void warp_reduce(acc_t* sum) {
ReduceOp<acc_t> r;
#pragma unroll
for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) {
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
acc_t b = WARP_SHFL_XOR(sum[i], offset, WARP_SIZE);
sum[i] = r(sum[i], b);
}
}
}
// The softmax_warp_* methods perform softmax forward and backward propagation on samples spanning the fast dimension.
// Each sample contains element_count scalar elements. element_count can be any integer value <= 1024.
// The template arguments have the following meaning:
// One "WARP" works on one "BATCH". One "BATCH" contains "WARP_BATCH" samples.
// WARP_BATCH is equal to 1 when element_count is large, and > 1 when element_count is small.
// A "WARP" contains "C10_WARPS_SIZE" threads, these treads are guaranteed to belong to the same warp.
// This is important because it means only __shfl_ instructions are required for reductions.
// Note that this means WARP_SIZE must be a power of two and <= architecture warp size.
// CUDA warp size is 32 for all existing GPU architectures, but there is no guarantee this will not change for future arch.
// ROCm warp size is 64 for all currently ROCm-supported GPU architectures, but this may change for future archs.
// is_log_softmax is a flag indicating whether SoftMax or LogSoftMax should be computed.
// The template can be instantiated with any floating point type for the type arguments input_t, output_t and acc_t.
// This allows SoftMax to be fused with a cast immediately following the SoftMax.
// For instance:
// input_t=half, acc_t=float, output_t=half => read half tensor, float accumulators, write half tensor.
// input_t=half, acc_t=float, output_t=float => read half tensor, float accumulators, write float tensor.
// input_t_float, acc_t=float, output_t=half => read float tensor, float accumulators, write half tensor.
template <typename input_t, typename output_t, typename acc_t, int log2_elements, bool is_log_softmax>
__global__ void softmax_warp_forward(output_t *dst, const input_t *src, int batch_size, int stride, int element_count)
{
// WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method warp_softmax_forward_kernel.
constexpr int next_power_of_two = 1 << log2_elements;
constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
// batch_size might not be a multiple of WARP_BATCH. Check how
// many batches have to computed within this WARP.
int local_batches = batch_size - first_batch;
if (local_batches > WARP_BATCH)
local_batches = WARP_BATCH;
// there might be multiple batches per warp. compute the index within the batch
int local_idx = threadIdx.x;
src += first_batch * stride + local_idx;
dst += first_batch * stride + local_idx;
// The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop,
// but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep
// the nested loops.
// This should have no impact on performance because the loops are unrolled anyway.
// load data from global memory
acc_t elements[WARP_BATCH][WARP_ITERATIONS];
for (int i = 0; i < WARP_BATCH; ++i) {
int batch_element_count = (i >= local_batches) ? 0 : element_count;
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < batch_element_count) {
elements[i][it] = src[i*element_count+it*WARP_SIZE];
} else {
elements[i][it] = -std::numeric_limits<acc_t>::infinity();
}
}
}
// compute max_value
acc_t max_value[WARP_BATCH];
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
max_value[i] = elements[i][0];
#pragma unroll
for (int it = 1; it < WARP_ITERATIONS; ++it) {
max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
acc_t sum[WARP_BATCH] { 0.0f };
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
if (is_log_softmax) {
sum[i] += std::exp(elements[i][it] - max_value[i]);
} else {
elements[i][it] = std::exp(elements[i][it] - max_value[i]);
sum[i] += elements[i][it];
}
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
// store result
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
if (i >= local_batches)
break;
if (is_log_softmax) sum[i] = max_value[i] + std::log(sum[i]);
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < element_count) {
if (is_log_softmax) {
dst[i*element_count+it*WARP_SIZE] = elements[i][it] - sum[i];
} else {
dst[i*element_count+it*WARP_SIZE] = elements[i][it] / sum[i];
}
} else {
break;
}
}
}
}
template <typename input_t, typename output_t, typename acc_t, int log2_elements, bool is_log_softmax>
__global__ void softmax_warp_backward(output_t *gradInput, const input_t *grad, const input_t *output, int batch_size, int stride, int element_count)
{
// WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method warp_softmax_backward_kernel.
constexpr int next_power_of_two = 1 << log2_elements;
constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
// batch_size might not be a multiple of WARP_BATCH. Check how
// many batches have to computed within this WARP.
int local_batches = batch_size - first_batch;
if (local_batches > WARP_BATCH)
local_batches = WARP_BATCH;
// there might be multiple batches per warp. compute the index within the batch
int local_idx = threadIdx.x % WARP_SIZE;
// the first element to process by the current thread
int thread_offset = first_batch * stride + local_idx;
grad += thread_offset;
output += thread_offset;
gradInput += thread_offset;
// The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop,
// but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep
// the nested loops.
// This should have no impact on performance because the loops are unrolled anyway.
// load data from global memory
acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS];
acc_t output_reg[WARP_BATCH][WARP_ITERATIONS];
for (int i = 0; i < WARP_BATCH; ++i) {
int batch_element_count = (i >= local_batches) ? 0 : element_count;
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < batch_element_count) {
grad_reg[i][it] = grad[i*element_count+it*WARP_SIZE];
output_reg[i][it] = output[i*element_count+it*WARP_SIZE];
} else {
grad_reg[i][it] = acc_t(0);
output_reg[i][it] = acc_t(0);
}
}
}
acc_t sum[WARP_BATCH];
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
sum[i] = grad_reg[i][0];
#pragma unroll
for (int it = 1; it < WARP_ITERATIONS; ++it) {
sum[i] += grad_reg[i][it];
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
// store result
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
if (i >= local_batches)
break;
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < element_count) {
// compute gradients
if (is_log_softmax) {
gradInput[i*element_count+it*WARP_SIZE] = (grad_reg[i][it] - std::exp(output_reg[i][it]) * sum[i]);
} else {
gradInput[i*element_count+it*WARP_SIZE] = (grad_reg[i][it] - output_reg[i][it] * sum[i]);
}
}
}
}
}
template<typename input_t, typename output_t, typename acc_t, bool is_log_softmax>
void dispatch_softmax_forward1(output_t *dst, const input_t *src, int softmax_elements, int softmax_elements_stride, int batch_count)
{
TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 1024 );
if (softmax_elements == 0) {
return;
} else {
int log2_elements = log2_ceil(softmax_elements);
const int next_power_of_two = 1 << log2_elements;
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
switch (log2_elements) {
case 0: // 1
softmax_warp_forward<input_t, output_t, acc_t, 0, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 1: // 2
softmax_warp_forward<input_t, output_t, acc_t, 1, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 2: // 4
softmax_warp_forward<input_t, output_t, acc_t, 2, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 3: // 8
softmax_warp_forward<input_t, output_t, acc_t, 3, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 4: // 16
softmax_warp_forward<input_t, output_t, acc_t, 4, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 5: // 32
softmax_warp_forward<input_t, output_t, acc_t, 5, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 6: // 64
softmax_warp_forward<input_t, output_t, acc_t, 6, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 7: // 128
softmax_warp_forward<input_t, output_t, acc_t, 7, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 8: // 256
softmax_warp_forward<input_t, output_t, acc_t, 8, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 9: // 512
softmax_warp_forward<input_t, output_t, acc_t, 9, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 10: // 1024
softmax_warp_forward<input_t, output_t, acc_t, 10, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
default:
break;
}
}
}
template<typename input_t, typename output_t, typename acc_t, bool is_log_softmax>
void dispatch_softmax_backward1(output_t *grad_input, const input_t *grad, const input_t *output, int softmax_elements, int softmax_elements_stride, int batch_count)
{
TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 1024 );
if (softmax_elements == 0) {
return;
} else {
int log2_elements = log2_ceil(softmax_elements);
const int next_power_of_two = 1 << log2_elements;
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward.
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward.
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
switch (log2_elements) {
case 0: // 1
softmax_warp_backward<input_t, output_t, acc_t, 0, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 1: // 2
softmax_warp_backward<input_t, output_t, acc_t, 1, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 2: // 4
softmax_warp_backward<input_t, output_t, acc_t, 2, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 3: // 8
softmax_warp_backward<input_t, output_t, acc_t, 3, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 4: // 16
softmax_warp_backward<input_t, output_t, acc_t, 4, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 5: // 32
softmax_warp_backward<input_t, output_t, acc_t, 5, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 6: // 64
softmax_warp_backward<input_t, output_t, acc_t, 6, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 7: // 128
softmax_warp_backward<input_t, output_t, acc_t, 7, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 8: // 256
softmax_warp_backward<input_t, output_t, acc_t, 8, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 9: // 512
softmax_warp_backward<input_t, output_t, acc_t, 9, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 10: // 1024
softmax_warp_backward<input_t, output_t, acc_t, 10, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
default:
break;
}
}
}
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue1 {
__device__ __forceinline__ LogSoftMaxForwardEpilogue1(AccumT max_input, AccumT sum)
: logsum(max_input + std::log(sum)) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - logsum);
}
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue1 {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue1(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - std::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxForwardEpilogue1 {
__device__ __forceinline__ SoftMaxForwardEpilogue1(AccumT max_input, AccumT sum)
: max_input(max_input)
, sum(sum) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(std::exp(input - max_input) / sum);
}
const AccumT max_input;
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxBackwardEpilogue1 {
__device__ __forceinline__ SoftMaxBackwardEpilogue1(AccumT sum)
: sum(sum) {}
// XXX: gradOutput that we get here is really gradOutput * output
// Look for cmul in SoftMax_updateGradInput
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - output * sum);
}
const AccumT sum;
};
////////////////////////////////////////////////////////////////////////////////
// Spatial kernel (fast with large inner_size and small dim_size)
////////////////////////////////////////////////////////////////////////////////
// Let's assume that our input has been flattened to have only three dimension:
// outer x dim x inner
// The spatial algorithm tries to parallelize along all of them.
// Within a 2d block threadIdx.y parallelizes over dim slices, and threads that
// share it will speed up reductions over dim (along axis x).
// The 2d grid is used to parallelize inner dimension over y axis and outer over x.
inline dim3 SpatialSoftMax_getGridSize(
dim3 block, uint32_t max_active_blocks,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
// First, tile as many blocks as we can over the y axis
uint32_t inner_blocks = (inner_size + block.y - 1) / block.y;
if (inner_blocks > max_active_blocks)
inner_blocks = max_active_blocks;
// Fill the x axis with as many blocks as we can fit (a little more is ok too)
uint32_t outer_blocks = (max_active_blocks + inner_blocks - 1) / inner_blocks;
if (outer_blocks > outer_size)
outer_blocks = outer_size;
return dim3(outer_blocks, inner_blocks);
}
const int max_threads = 1024;
inline dim3 SpatialSoftMax_getBlockSize1(
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
uint32_t inner_threads = inner_size;
inner_threads = std::min(inner_threads, static_cast<uint32_t>(max_threads));
uint32_t dim_threads = 1;
if (inner_threads <= 64 && dim_size >= 64) {
while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size)
dim_threads *= 2;
dim_threads /= 2;
}
return dim3(dim_threads, inner_threads);
}
template<typename accscalar_t, typename Kernel>
void SpatialSoftMax_getLaunchSizes1(
Kernel k,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size,
dim3& grid, dim3& block, uint32_t& smem_size) {
block = SpatialSoftMax_getBlockSize1(outer_size, dim_size, inner_size);
uint32_t block_threads = block.x * block.y;
smem_size = block.x == 1 ? 0 : block_threads * sizeof(accscalar_t);
int max_active_blocks;
#ifdef __HIP_PLATFORM_HCC__
// XXX HIP function signature is not compatible yet.
uint32_t max_blocks;
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks,
k, block_threads, smem_size);
max_active_blocks = max_blocks;
#else
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks,
k, block_threads, smem_size);
#endif
max_active_blocks *= at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
grid = SpatialSoftMax_getGridSize(block, max_active_blocks, outer_size, dim_size, inner_size);
}
inline dim3 SoftMax_getBlockSize1(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = std::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
while (block_size < max_block_size) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = std::max(block_size, static_cast<uint64_t>(C10_WARP_SIZE));
return dim3(block_size);
}
// template<typename T>
// struct Add {
// __device__ __forceinline__ T operator()(T a, T b) const {
// return a + b;
// }
// };
// template<typename T>
// struct Max {
// __device__ __forceinline__ T operator()(T a, T b) const {
// return a < b ? b : a;
// }
// };
// Note that it's not a complete block-wide reduction.
// Only threads that share threadIdx.y reduce values.
template<typename T, template<typename> class ReduceOp>
__forceinline__ __device__
T spatialBlockReduceX(T *shared, T val) {
ReduceOp<T> r;
shared += threadIdx.y * blockDim.x;
__syncthreads();
shared[threadIdx.x] = val;
// NOTE: loop starts with __syncthreads()
int offset = blockDim.x / 2;
while (offset > 0) {
__syncthreads();
if (threadIdx.x < offset)
shared[threadIdx.x] = r(shared[threadIdx.x], shared[threadIdx.x + offset]);
offset /= 2;
}
__syncthreads();
return shared[0];
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxForward1(
outscalar_t *output, scalar_t *input,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
////////////////////////////////////////////////////////////
// These two blocks are really equivalent, but specializing on
// blockDim.x == 1 makes the kernel faster when it's unused.
// I didn't want to thread an extra template parameter, and nvcc
// seems to be smart enough to hoist the if outside of the loops.
////////////////////////////////////////////////////////////
if (blockDim.x > 1) {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
max_input = spatialBlockReduceX<accscalar_t, Max>(sdata,max_input);
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
} else {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
}
}
}
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxBackward1(
scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
// See the comment in forward kernel
if (blockDim.x > 1) {
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += gradOutput[data_offset + d * dim_stride];
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
} else {
accscalar_t sum = 0;
for (uint32_t d = 0; d < dim_size; d++)
sum += gradOutput[data_offset + d * dim_stride];
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = 0; d < dim_size; d++) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + std::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / C10_WARP_SIZE)) - 1;
if (threadIdx.x < C10_WARP_SIZE) {
int lane = threadIdx.x % C10_WARP_SIZE;
if (lane < blockDim.x / C10_WARP_SIZE) {
#pragma unroll
for (int i = 0; i < C10_WARP_SIZE; ++i) {
warpVal = r(warpVal, smem[lane * C10_WARP_SIZE + i]);
}
#ifndef __HIP_PLATFORM_HCC__
__syncwarp(mask);
#endif
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
int last = size % (ILP * blockDim.x);
// Body (unroll by ILP times)
for (; offset < size - last; offset += blockDim.x * ILP) {
T tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = data[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
threadVal = r(threadVal, tmp[j]);
}
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxForward1(outscalar_t *output, scalar_t *input, int classes)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
output += blockIdx.x * classes;
// find the max
accscalar_t threadMax = ilpReduce<MaxFloat, ILP, scalar_t, accscalar_t>(
input, classes, MaxFloat<scalar_t, accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
accscalar_t max_k = blockReduce<Max, accscalar_t>(
sdata, threadMax, Max<accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
// reduce all values
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(
input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
scalar_t tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = input[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
output[offset + j * blockDim.x] = epilogue(tmp[j]);
}
for (; offset < classes; offset += blockDim.x)
output[offset] = epilogue(input[offset]);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxBackward1(scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, int classes)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
gradInput += blockIdx.x * classes;
output += blockIdx.x * classes;
gradOutput += blockIdx.x * classes;
accscalar_t threadSum = ilpReduce<AddFloat, 4, outscalar_t, accscalar_t>(
gradOutput, classes, AddFloat<outscalar_t, accscalar_t>(), accscalar_t(0));
accscalar_t sum_k = blockReduce<Add, accscalar_t>(
sdata, threadSum, Add<accscalar_t>(), accscalar_t(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum_k);
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
outscalar_t tmpGradOutput[ILP];
outscalar_t tmpOutput[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpGradOutput[j] = gradOutput[offset + j * blockDim.x];
tmpOutput[j] = output[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = epilogue(tmpGradOutput[j], tmpOutput[j]);
}
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = epilogue(gradOutput[offset], output[offset]);
}
template<template<typename, typename, typename> class Epilogue, bool is_log_softmax>
torch::Tensor host_softmax1(const torch::Tensor & input_, const int64_t dim_, const bool half_to_float){
if (half_to_float) AT_ASSERTM(input_.scalar_type() == ScalarType::Half,"conversion is supported for Half type only");
auto input = input_.contiguous();
torch::Tensor output = half_to_float ? at::empty_like(input, input.options().dtype(ScalarType::Float), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (input.dim() == 0) input = input.view(1);
int64_t dim = at::maybe_wrap_dim(dim_, input.dim());
TORCH_CHECK(dim >=0 && dim < input.dim(), "dim must be non-negative and less than input dimensions");
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
if (input.numel() > 0) {
int64_t inner_size = 1;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
if (inner_size == 1) {
const int ILP = 2;
dim3 grid(outer_size);
dim3 block = SoftMax_getBlockSize1(ILP, dim_size);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "host_softmax", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
dispatch_softmax_forward1<scalar_t, scalar_t, accscalar_t, is_log_softmax>(
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size);
} else {
cunn_SoftMaxForward1<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size
);
}
} else {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
dispatch_softmax_forward1<scalar_t, accscalar_t, accscalar_t, is_log_softmax>(
output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size);
} else {
cunn_SoftMaxForward1<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size
);
}
}
});
});
// This kernel runs in a 2D grid, where each application along y dimension has a fixed
// outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size.
// Reductions over dim are done in a single-threaded manner.
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "host_softmax", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes1<accscalar_t>(
&cunn_SpatialSoftMaxForward1<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxForward1<scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size
);
} else {
SpatialSoftMax_getLaunchSizes1<accscalar_t>(
&cunn_SpatialSoftMaxForward1<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxForward1<scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size
);
}
});
});
}
AT_CUDA_CHECK(cudaGetLastError());
}
return output;
}
template<template<typename, typename, typename> class Epilogue, bool is_log_softmax>
torch::Tensor host_softmax_backward1(const torch::Tensor &grad_, const torch::Tensor &output_, int64_t dim_, bool half_to_float){
int64_t dim = at::maybe_wrap_dim(dim_, grad_.dim());
torch::Tensor gI = half_to_float ? at::empty_like(grad_, grad_.options().dtype(ScalarType::Half), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(grad_, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (grad_.numel() == 0) {
return gI;
}
auto grad = grad_.contiguous();
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (grad.dim() == 0) grad = grad.view(1);
TORCH_CHECK(dim >=0 && dim < grad.dim(), "dim must be non-negative and less than input dimensions");
auto output = output_.contiguous();
if (output.dim() == 0) output = output.view(1);
int64_t outer_size = 1;
int64_t dim_size = output.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= output.size(i);
for (int64_t i = dim + 1; i < output.dim(); ++i)
inner_size *= output.size(i);
// See descriptions of kernels above.
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (inner_size == 1) {
const int ILP = 2;
dim3 grid(outer_size);
dim3 block = SoftMax_getBlockSize1(ILP, dim_size);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, gI.scalar_type(), "host_softmax_backward", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
dispatch_softmax_backward1<scalar_t, scalar_t, accscalar_t, is_log_softmax>(
gI.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), dim_size, dim_size, outer_size);
} else {
cunn_SoftMaxBackward1<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), dim_size
);
}
} else {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
dispatch_softmax_backward1<accscalar_t, scalar_t, accscalar_t, is_log_softmax>(
gI.data_ptr<scalar_t>(), grad.data_ptr<accscalar_t>(), output.data_ptr<accscalar_t>(), dim_size, dim_size, outer_size);
} else {
cunn_SoftMaxBackward1<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(), dim_size
);
}
}
});
});
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, gI.scalar_type(), "host_softmax_backward", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes1<accscalar_t>(
&cunn_SpatialSoftMaxBackward1<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxBackward1<scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(),
outer_size, dim_size, inner_size
);
} else {
SpatialSoftMax_getLaunchSizes1<accscalar_t>(
&cunn_SpatialSoftMaxBackward1<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxBackward1<scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(),
outer_size, dim_size, inner_size
);
}
});
});
}
AT_CUDA_CHECK(cudaGetLastError());
return gI;
}
torch::Tensor log_softmax_cuda1(const torch::Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax1<LogSoftMaxForwardEpilogue1,true>(input, dim, half_to_float);
}
torch::Tensor log_softmax_backward_cuda1(const torch::Tensor &grad, const torch::Tensor &output, int64_t dim, const torch::Tensor &input){
bool half_to_float = grad.scalar_type() != input.scalar_type();
if (half_to_float) {
AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
return host_softmax_backward1<LogSoftMaxBackwardEpilogue1,true>(grad, output, dim, half_to_float);
}
torch::Tensor softmax_cuda1(const torch::Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax1<SoftMaxForwardEpilogue1,false>(input, dim, half_to_float);
}
torch::Tensor do_softmax_backward1(torch::Tensor &tmp, torch::Tensor &output, int64_t dim, bool half_to_float) {
// bool half_to_float = grad.scalar_type() != input.scalar_type();
// if (half_to_float) {
// AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
// }
// torch::Tensor tmp = grad * output;
return host_softmax_backward1<SoftMaxBackwardEpilogue1,false>(tmp, output, dim, half_to_float);
} | the_stack |
#include "hipcommon.h"
#define _USE_MATH_DEFINES
#include <float.h>
#include "comm.h"
using namespace std;
#include "kernels_common.h"
#include "kernels_compact_storage.h"
// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific options parsing. The user is allowed to specify
// the size of the input data in megabytes if they are not using a
// predefined size (i.e. the -s option).
//
// Arguments:
// op: the options parser / parameter database
//
// Programmer: Anthony Danalis
// Creation: February 04, 2011
// Returns: nothing
//
// ****************************************************************************
void addBenchmarkSpecOptions(OptionParser &op){
op.addOption("PointCount", OPT_INT, "4096", "point count (default: 4096)");
op.addOption("Threshold", OPT_FLOAT, "1", "cluster diameter threshold (default: 1)");
op.addOption("SaveOutput", OPT_BOOL, "", "Save output results in files (default: false)");
op.addOption("Verbose", OPT_BOOL, "", "Print cluster cardinalities (default: false)");
}
// ****************************************************************************
// Function: RunBenchmark
//
// Purpose:
// Calls single precision and, if viable, double precision QT-Clustering
// benchmark.
//
// Arguments:
// resultDB: the benchmark stores its results in this ResultDatabase
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Anthony Danalis
// Creation: February 04, 2011
//
// ****************************************************************************
void runTest(const string& name, OptionParser& op);
void RunBenchmark(OptionParser &op){
runTest("QTC", op);
}
// ****************************************************************************
// Function: calculate_participants
//
// Purpose:
// This function decides how many GPUs (up to the maximum requested by the user)
// and threadblocks per GPU will be used. It also returns the total number of
// thread-blocks across all GPUs and the number of thread-blocks that are in nodes
// before the current one.
// In the future, the behavior of this function should be decided based on
// auto-tuning instead of arbitrary decisions.
//
// Arguments:
// The number of nodes requested by the user and the four
// variables that the function computes (passed by reference)
//
//
// Returns: nothing
//
// Programmer: Anthony Danalis
// Creation: May 25, 2011
//
// ****************************************************************************
void calculate_participants(int point_count, int node_count, int cwrank, int *thread_block_count, int *total_thread_block_count, int *active_node_count){
int ac_nd_cnt, thr_blc_cnt, total_thr_blc_cnt;
ac_nd_cnt = node_count;
if( point_count <= (node_count-1) * SM_COUNT * GPU_MIN_SATURATION_FACTOR ){
int K = SM_COUNT * GPU_MIN_SATURATION_FACTOR;
ac_nd_cnt = (point_count+K-1) / K;
}
if( point_count >= ac_nd_cnt * SM_COUNT * OVR_SBSCR_FACTOR ){
thr_blc_cnt = SM_COUNT * OVR_SBSCR_FACTOR;
total_thr_blc_cnt = thr_blc_cnt * ac_nd_cnt;
}else{
thr_blc_cnt = point_count/ac_nd_cnt;
if( cwrank < point_count%ac_nd_cnt ){
thr_blc_cnt++;
}
total_thr_blc_cnt = point_count;
}
*active_node_count = ac_nd_cnt;
*thread_block_count = thr_blc_cnt;
*total_thread_block_count = total_thr_blc_cnt;
return;
}
// ****************************************************************************
// Function: runTest
//
// Purpose:
// This benchmark measures the performance of applying QT-clustering on
// single precision data.
//
// Arguments:
// resultDB: the benchmark stores its results in this ResultDatabase
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Anthony Danalis
// Creation: February 04, 2011
//
// ****************************************************************************
void runTest(const string& name, OptionParser& op)
{
int matrix_type = 0x0;
if( 0 == comm_get_rank() ){
matrix_type |= GLOBAL_MEMORY;
// find out what type of distance matrix we will be using.
matrix_type |= COMPACT_STORAGE_MATRIX;
}
comm_broadcast ( &matrix_type, 1, COMM_TYPE_INT, 0);
QTC(name, op, matrix_type);
}
////////////////////////////////////////////////////////////////////////////////
//
void QTC(const string& name, OptionParser& op, int matrix_type){
ofstream debug_out, seeds_out;
void *Ai_mask, *cardnl, *ungrpd_pnts_indr, *clustered_pnts_mask, *result, *dist_to_clust;
void *indr_mtrx, *degrees;
int *indr_mtrx_host, *ungrpd_pnts_indr_host, *cardinalities, *output;
bool save_clusters = false;
bool be_verbose = false;
void *distance_matrix_gmem, *distance_matrix;
float *dist_source, *pnts;
float threshold = 1.0f;
int i, max_degree, thread_block_count, total_thread_block_count, active_node_count;
int cwrank=0, node_count=1, tpb, max_card, iter=0;
unsigned long int dst_matrix_elems, point_count, max_point_count;
point_count = op.getOptionInt("PointCount");
threshold = op.getOptionFloat("Threshold");
save_clusters = op.getOptionBool("SaveOutput");
be_verbose = op.getOptionBool("Verbose");
// TODO - only deal with this size-switch once
int def_size = op.getOptionInt("size");
switch( def_size ) {
case 1:
// size == 1 should match default values of PointCount,
// Threshold, TextureMem, and CompactStorage parameters.
// (i.e., -s 1 is the default)
point_count = 4*1024;
break;
case 2:
point_count = 8*1024;
break;
case 3:
point_count = 16*1024;
break;
case 4:
point_count = 16*1024;
break;
case 5:
point_count = 26*1024;
break;
default:
fprintf( stderr, "unsupported size %d given; terminating\n", def_size );
return;
}
cwrank = comm_get_rank();
node_count = comm_get_size();
if( cwrank == 0 ){
pnts = generate_synthetic_data(&dist_source, &indr_mtrx_host, &max_degree, threshold, point_count, matrix_type);
}
comm_broadcast ( &point_count, 1, COMM_TYPE_INT, 0);
comm_broadcast ( &max_degree, 1, COMM_TYPE_INT, 0);
dst_matrix_elems = point_count*max_degree;
if( cwrank != 0 ){ // For all nodes except zero, in a distributed run.
dist_source = (float*) malloc (sizeof(float)*dst_matrix_elems);
indr_mtrx_host = (int*) malloc (sizeof(int)*point_count*max_degree);
}
// If we need to print the actual clusters later on, we'll need to have all points in all nodes.
if( save_clusters ){
if( cwrank != 0 ){
pnts = (float *)malloc( 2*point_count*sizeof(float) );
}
comm_broadcast ( pnts, 2*point_count, COMM_TYPE_FLOAT, 0);
}
comm_broadcast ( dist_source, dst_matrix_elems, COMM_TYPE_FLOAT, 0);
comm_broadcast ( indr_mtrx_host, point_count*max_degree, COMM_TYPE_INT, 0);
assert( max_degree > 0 );
calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count);
ungrpd_pnts_indr_host = (int*) malloc (sizeof(int)*point_count);
for(int i=0; i<point_count; i++){
ungrpd_pnts_indr_host[i] = i;
}
cardinalities = (int*) malloc (sizeof(int)*2);
output = (int*) malloc (sizeof(int)*max_degree);
allocDeviceBuffer(&distance_matrix_gmem, dst_matrix_elems*sizeof(float));
CHECK_CUDA_ERROR();
// This is the N*Delta indirection matrix
allocDeviceBuffer(&indr_mtrx, point_count*max_degree*sizeof(int));
allocDeviceBuffer(°rees, point_count*sizeof(int));
allocDeviceBuffer(&ungrpd_pnts_indr, point_count*sizeof(int));
allocDeviceBuffer(&Ai_mask, thread_block_count*point_count*sizeof(char));
allocDeviceBuffer(&dist_to_clust, thread_block_count*max_degree*sizeof(float));
allocDeviceBuffer(&clustered_pnts_mask, point_count*sizeof(char));
allocDeviceBuffer(&cardnl, thread_block_count*2*sizeof(int));
allocDeviceBuffer(&result, point_count*sizeof(int));
#ifdef DEBUG
int* cardinalities_debug = (int*) malloc (sizeof(int)*thread_block_count*2);
#endif
// Copy to device, and record transfer time
copyToDevice(distance_matrix_gmem, dist_source, dst_matrix_elems*sizeof(float));
copyToDevice(indr_mtrx, indr_mtrx_host, point_count*max_degree*sizeof(int));
copyToDevice(ungrpd_pnts_indr, ungrpd_pnts_indr_host, point_count*sizeof(int));
hipMemset(clustered_pnts_mask, 0, point_count*sizeof(char));
hipMemset(dist_to_clust, 0, max_degree*thread_block_count*sizeof(float));
tpb = ( point_count > THREADSPERBLOCK )? THREADSPERBLOCK : point_count;
hipLaunchKernelGGL(compute_degrees, dim3(thread_block_count), dim3(tpb), 0, 0, (int *)indr_mtrx, (int *)degrees, point_count, max_degree);
hipDeviceSynchronize();
CHECK_CUDA_ERROR();
// The names of the saved outputs, if enabled, are "p", "p_seeds", and "p."
if( 0 == cwrank ){
if( save_clusters ){
debug_out.open("p");
for(i=0; i<point_count; i++){
debug_out << pnts[2*i] << " " << pnts[2*i+1] << endl;
}
debug_out.close();
seeds_out.open("p_seeds");
}
cout << "\nInitial ThreadBlockCount: " << thread_block_count;
cout << " PointCount: " << point_count;
cout << " Max degree: " << max_degree << "\n" << endl;
cout.flush();
}
max_point_count = point_count;
tpb = THREADSPERBLOCK;
distance_matrix = distance_matrix_gmem;
//////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
//
// Kernel execution
do{
stringstream ss;
int winner_node=-1;
int winner_index=-1;
bool this_node_participates = true;
++iter;
calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count);
// If there are only a few elements left to cluster, reduce the number of participating nodes (GPUs).
if( cwrank >= active_node_count ){
this_node_participates = false;
}
comm_update_communicator(cwrank, active_node_count);
if( !this_node_participates )
break;
cwrank = comm_get_rank();
////////////////////////////////////////////////////////////////////////////////////////////////
///////// ----------------- Main kernel ----------------- /////////
hipLaunchKernelGGL(QTC_device, dim3(thread_block_count), dim3(tpb), 0, 0, (float*)distance_matrix, (char *)Ai_mask, (char *)clustered_pnts_mask,
(int *)indr_mtrx, (int *)cardnl, (int *)ungrpd_pnts_indr,
(float *)dist_to_clust, (int *)degrees, point_count, max_point_count,
max_degree, threshold, cwrank, active_node_count,
total_thread_block_count);
///////// ----------------- Main kernel ----------------- /////////
////////////////////////////////////////////////////////////////////////////////////////////////
hipDeviceSynchronize();
CHECK_CUDA_ERROR();
#ifdef DEBUG
printf("cardinalities\n");
copyFromDevice( cardinalities_debug, cardnl, 2*576*sizeof(int) );
for (int i = 0; i < 576*2; i++)
printf("%d %d\n", i, cardinalities_debug[i]);
#endif
if( thread_block_count > 1 ){
// We are reducing 128 numbers or less, so one thread should be sufficient.
hipLaunchKernelGGL(reduce_card_device, dim3(1), dim3(1), 0, 0, (int *)cardnl, thread_block_count);
hipDeviceSynchronize();
CHECK_CUDA_ERROR();
}
copyFromDevice( cardinalities, cardnl, 2*sizeof(int) );
max_card = cardinalities[0];
winner_index = cardinalities[1];
comm_barrier();
comm_find_winner(&max_card, &winner_node, &winner_index, cwrank, max_point_count+1);
if( be_verbose && cwrank == winner_node){ // for non-parallel cases, both "cwrank" and "winner_node" should be zero.
cout << "[" << cwrank << "] Cluster Cardinality: " << max_card << " (Node: " << cwrank << ", index: " << winner_index << ")" << endl;
}
hipLaunchKernelGGL(trim_ungrouped_pnts_indr_array, dim3(1), dim3(tpb), 0, 0, winner_index, (int*)ungrpd_pnts_indr, (float*)distance_matrix,
(int *)result, (char *)Ai_mask, (char *)clustered_pnts_mask,
(int *)indr_mtrx, (int *)cardnl, (float *)dist_to_clust, (int *)degrees,
point_count, max_point_count, max_degree, threshold);
hipDeviceSynchronize();
CHECK_CUDA_ERROR();
if( cwrank == winner_node){ // for non-parallel cases, these should both be zero.
if( save_clusters ){
ss << "p." << iter;
debug_out.open(ss.str().c_str());
}
copyFromDevice(output, (void *)result, max_card*sizeof(int) );
if( save_clusters ){
for(int i=0; i<max_card; i++){
debug_out << pnts[2*output[i]] << " " << pnts[2*output[i]+1] << endl;
}
seeds_out << pnts[2*winner_index] << " " << pnts[2*winner_index+1] << endl;
debug_out.close();
}
}
hipLaunchKernelGGL(update_clustered_pnts_mask, dim3(1), dim3(tpb), 0, 0, (char *)clustered_pnts_mask, (char *)Ai_mask, max_point_count);
hipDeviceSynchronize();
CHECK_CUDA_ERROR();
point_count -= max_card;
//break;
}while( max_card > 1 && point_count );
if( save_clusters ){
seeds_out.close();
}
//
////////////////////////////////////////////////////////////////////////////////
if( cwrank == 0){
cout << "QTC is complete. Clustering iteration count: " << iter << endl;
cout.flush();
}
free(dist_source);
free(indr_mtrx_host);
free(output);
free(cardinalities);
#ifdef DEBUG
free(cardinalities_debug);
#endif
freeDeviceBuffer(distance_matrix_gmem);
freeDeviceBuffer(indr_mtrx);
freeDeviceBuffer(Ai_mask);
freeDeviceBuffer(cardnl);
freeDeviceBuffer(result);
return;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void
allocDeviceBuffer(void** bufferp, unsigned long bytes)
{
hipMalloc(bufferp, bytes);
CHECK_CUDA_ERROR();
}
void
freeDeviceBuffer(void* buffer)
{
hipFree(buffer);
}
void
copyToDevice(void* to_device, void* from_host, unsigned long bytes)
{
hipMemcpy(to_device, from_host, bytes, hipMemcpyHostToDevice);
CHECK_CUDA_ERROR();
}
void
copyFromDevice(void* to_host, void* from_device, unsigned long bytes)
{
hipMemcpy(to_host, from_device, bytes, hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR();
} | the_stack |
extern "C"
{
__global__ void BilinearResampleKernel(float *input, float *output, int inputWidth, int inputHeight, int outputWidth, int outputHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = outputWidth * outputHeight;
float iT, iB;
if (id < size)
{
//output point coordinates
int px = id % outputWidth;
int py = id / outputWidth;
float xRatio = (float)(inputWidth - 1) / (outputWidth - 1);
float yRatio = (float)(inputHeight - 1) / (outputHeight - 1);
//corresponding coordinates in the original image
float x = xRatio * px;
float y = yRatio * py;
//corresponding integer (pixel) coordinates in the original image
int xL = (int)floor(x);
int xR = (int)ceil(x);
int yT = (int)floor(y);
int yB = (int)ceil(y);
//inverse distances to these points
float dL = 1.0f - (x - xL);
float dR = 1.0f - (xR - x);
float dT = 1.0f - (y - yT);
float dB = 1.0f - (yB - y);
//values at those points
float topLeft = input[yT * inputWidth + xL];
float topRight = input[yT * inputWidth + xR];
float bottomLeft = input[yB * inputWidth + xL];
float bottomRight = input[yB * inputWidth + xR];
//linear interpolation in X (i.e., top and bottom pairs of points)
if (xL == xR) { //interpolated points corresponds exactly to one integer x-coordinate in the original image, choose any one of them
iT = topLeft;
iB = bottomLeft;
}
else {
iT = topLeft * dL + topRight * dR;
iB = bottomLeft * dL + bottomRight * dR;
}
//linear interpolation in Y (i.e., linear interpolation of those two points)
if (yT == yB) //interpolated points corresponds exactly to one integer ycoordinate in the original image, choose any one of them
{
output[py * outputWidth + px] = iT;
}
else {
output[py * outputWidth + px] = iT * dT + iB * dB;
}
}
}
// Reasamples images so that to each pixel in the input image corresponds exactly to N pixels in the output image (all will have the value of the input pixel).
__global__ void ExactResampleKernel_1toN(float *input, float *output, int inputWidth, int inputHeight, int outputWidth, int outputHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = outputWidth * outputHeight;
if (id < size)
{
//output point coordinates
int px = id % outputWidth;
int py = id / outputWidth;
int xRatio = outputWidth / inputWidth;
int yRatio = outputHeight / inputHeight;
//corresponding coordinates in the original image
int x = px / xRatio;
int y = py / yRatio;
output[py * outputWidth + px] = input[y * inputWidth + x];
}
}
// Reasamples images so that to each pixel in the output image corresponds exactly to N pixels in the input image (their values are averaged).
__global__ void ExactResampleKernel_Nto1(float *input, float *output, int inputWidth, int inputHeight, int outputWidth, int outputHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = outputWidth * outputHeight;
if (id < size)
{
//output point coordinates
int px = id % outputWidth;
int py = id / outputWidth;
int xRatio = inputWidth / outputWidth;
int yRatio = inputHeight / outputHeight;
float sum = 0;
for (int sx = 0; sx < xRatio; sx++) {
for (int sy = 0; sy < yRatio; sy++) {
//corresponding coordinates in the original image
int x = px * xRatio + sx;
int y = py * yRatio + sy;
sum += input[y * inputWidth + x];
}
}
output[py * outputWidth + px] = sum / (float)(xRatio * yRatio);
}
}
__global__ void NNResampleKernel(float *input, float *output, int inputWidth, int inputHeight, int outputWidth, int outputHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = outputWidth * outputHeight;
if (id < size)
{
int px = id % outputWidth;
int py = id / outputWidth;
float xRatio = (float)(inputWidth - 1) / (outputWidth);
float yRatio = (float)(inputHeight - 1) / (outputHeight);
int x = (int) (xRatio * (px+.5f));
int y = (int) (yRatio * (py+.5f));
output[py * outputWidth + px] = input[y*inputWidth + x];
}
}
__global__ void CutSubImageKernel_SingleParams(float *input, float *output, float subImageX, float subImageY, float subImageDiameter, bool safeBounds,
int inputWidth, int inputHeight, int outputWidth, int outputHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = outputWidth * outputHeight;
if (id < size)
{
float subImgCX = subImageX; // <-1, 1>
float subImgCY = subImageY; // <-1, 1>
float subImgDiameter = subImageDiameter; // <0,1>
int maxDiameter = min(inputWidth - 1, inputHeight - 1);
int diameterPix = (int)(subImgDiameter * maxDiameter);
diameterPix = max(1, diameterPix);
diameterPix = min(maxDiameter, diameterPix);
int subImgX = (int)(inputWidth * (subImgCX + 1) * 0.5f) - diameterPix / 2;
int subImgY = (int)(inputHeight * (subImgCY + 1) * 0.5f) - diameterPix / 2;
if (safeBounds)
{
subImgX = max(subImgX, 1);
subImgY = max(subImgY, 1);
subImgX = min(subImgX, inputWidth - diameterPix - 1);
subImgY = min(subImgY, inputHeight - diameterPix - 1);
}
int px = id % outputWidth;
int py = id / outputWidth;
//
float xRatio = (float)(diameterPix - 1) / (outputWidth - 1);
float yRatio = (float)(diameterPix - 1) / (outputHeight - 1);
//
int x = (int)(xRatio * px);
int y = (int)(yRatio * py);
if (x + subImgX >= 0 && y + subImgY >= 0 &&
x + subImgX < inputWidth && y + subImgY < inputHeight)
{
output[py * outputWidth + px] = input[(y + subImgY) * inputWidth + x + subImgX];
}
}
}
__global__ void BilinearResampleSubImageKernel(float *input, float *output, float* subImageDefs, bool safeBounds,
int inputWidth, int inputHeight, int outputWidth, int outputHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = outputWidth * outputHeight;
if (id < size)
{
float subImgCX = subImageDefs[0]; // <-1, 1>
float subImgCY = subImageDefs[1]; // <-1, 1>
float subImgDiameter = subImageDefs[2]; // <0,1>
int maxDiameter = min(inputWidth - 1, inputHeight - 1);
int diameterPix = (int)(subImgDiameter * maxDiameter);
diameterPix = max(1, diameterPix);
diameterPix = min(maxDiameter, diameterPix);
int subImgX = (int)(inputWidth * (subImgCX + 1) * 0.5f) - diameterPix / 2;
int subImgY = (int)(inputHeight * (subImgCY + 1) * 0.5f) - diameterPix / 2;
if (safeBounds)
{
subImgX = max(subImgX, 1);
subImgY = max(subImgY, 1);
subImgX = min(subImgX, inputWidth - diameterPix - 1);
subImgY = min(subImgY, inputHeight - diameterPix - 1);
}
int px = id % outputWidth;
int py = id / outputWidth;
float xRatio = (float)(diameterPix - 1) / (outputWidth - 1);
float yRatio = (float)(diameterPix - 1) / (outputHeight - 1);
int x = (int) (xRatio * px);
int y = (int) (yRatio * py);
if (x + subImgX >= 0 && y + subImgY >= 0 &&
x + subImgX < inputWidth && y + subImgY < inputHeight)
{
// X and Y distance difference
float xDist = (xRatio * px) - x;
float yDist = (yRatio * py) - y;
// Points
float topLeft= input[(y + subImgY) * inputWidth + x + subImgX];
float topRight = input[(y + subImgY) * inputWidth + x + subImgX + 1];
float bottomLeft = input[(y + subImgY + 1) * inputWidth + x + subImgX];
float bottomRight = input[(y + subImgY + 1) * inputWidth + x + subImgX + 1];
float result =
topLeft * (1 - xDist) * (1 - yDist) +
topRight * xDist * (1 - yDist) +
bottomLeft * yDist * (1 - xDist) +
bottomRight * xDist * yDist;
output[py * outputWidth + px] = result;
}
}
}
/// Resmaple for the set of locations. It needs proper
__global__ void BilinearResampleSubImageKernel_ForManyProposals(const float *input, float *output, const float* subImageDefs, bool safeBounds,
int subImageDefsDim, int inputWidth, int inputHeight, int outputWidth, int outputHeight, int numberSubImages, int outputSize)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int px = id % outputWidth; // line in the single output image
int subim_id = id / outputWidth / outputHeight; // which image it is
int py = (id / outputWidth) % outputHeight; // column in the single output image
if (id<outputSize)
{
float subImgCX = subImageDefs[0 + subim_id*subImageDefsDim]; // <-1, 1>
float subImgCY = subImageDefs[1 + subim_id*subImageDefsDim]; // <-1, 1>
float subImgDiameter = subImageDefs[2 + subim_id*subImageDefsDim]; // <0,1>
int maxDiameter = min(inputWidth - 1, inputHeight - 1);
int diameterPix = (int)(subImgDiameter * maxDiameter);
diameterPix = max(1, diameterPix);
diameterPix = min(maxDiameter, diameterPix);
int subImgX = (int)(inputWidth * (subImgCX + 1) * 0.5f) - diameterPix / 2;
int subImgY = (int)(inputHeight * (subImgCY + 1) * 0.5f) - diameterPix / 2;
if (safeBounds)
{
subImgX = max(subImgX, 1);
subImgY = max(subImgY, 1);
subImgX = min(subImgX, inputWidth - diameterPix - 1);
subImgY = min(subImgY, inputHeight - diameterPix - 1);
}
float xRatio = (float)(diameterPix - 1) / (outputWidth - 1);
float yRatio = (float)(diameterPix - 1) / (outputHeight - 1);
int x = (int) (xRatio * px);
int y = (int) (yRatio * py);
if (x + subImgX >= 0 && y + subImgY >= 0 &&
x + subImgX < inputWidth && y + subImgY < inputHeight)
{
//--- X and Y distance difference
float xDist = (xRatio * px) - x;
float yDist = (yRatio * py) - y;
//--- Points
float topLeft= input[(y + subImgY) * inputWidth + x + subImgX];
float topRight = input[(y + subImgY) * inputWidth + x + subImgX + 1];
float bottomLeft = input[(y + subImgY + 1) * inputWidth + x + subImgX];
float bottomRight = input[(y + subImgY + 1) * inputWidth + x + subImgX + 1 ];
float result =
topLeft * (1 - xDist) * (1 - yDist) +
topRight * xDist * (1 - yDist) +
bottomLeft * yDist * (1 - xDist) +
bottomRight * xDist * yDist;
output[py * outputWidth + px + subim_id*outputWidth*outputHeight] = result;
}
}
}
__global__ void BilinearAddSubImageKernel(float *input, float *opImage, float* subImageDefs, int inputWidth, int inputHeight, int opImageWidth, int opImageHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
float subImgCX = subImageDefs[0]; // <-1, 1>
float subImgCY = subImageDefs[1]; // <-1, 1>
float subImgDiameter = subImageDefs[2]; // <0,1>
int maxDiameter = min(inputWidth, inputHeight);
int diameterPix = (int)(subImgDiameter * maxDiameter);
diameterPix = max(1, diameterPix);
int subImgX = (int)(inputWidth * (subImgCX + 1) * 0.5f) - diameterPix / 2;
int subImgY = (int)(inputHeight * (subImgCY + 1) * 0.5f) - diameterPix / 2;
int px = id % diameterPix;
int py = id / diameterPix;
if (px + subImgX >= 0 && py + subImgY >= 0 &&
px + subImgX < inputWidth && py + subImgY < inputHeight &&
py < diameterPix )
{
float xRatio = (float)(opImageWidth - 1) / (diameterPix);
float yRatio = (float)(opImageHeight - 1) / (diameterPix);
int x = (int) (xRatio * px);
int y = (int) (yRatio * py);
// X and Y distance difference
float xDist = (xRatio * px) - x;
float yDist = (yRatio * py) - y;
// Points
float topLeft= opImage[y * opImageWidth + x];
float topRight = opImage[y * opImageWidth + x + 1];
float bottomLeft = opImage[(y + 1) * opImageWidth + x];
float bottomRight = opImage[(y + 1) * opImageWidth + x + 1];
float result =
topLeft * (1 - xDist) * (1 - yDist) +
topRight * xDist * (1 - yDist) +
bottomLeft * yDist * (1 - xDist) +
bottomRight * xDist * yDist;
input[(py + subImgY) * inputWidth + px + subImgX] += result;
}
}
__global__ void DrawSpriteKernel(float *input, int inputWidth, int inputHeight, float *sprite, float2 position, int2 spriteSize)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int inputSize = inputWidth * inputHeight;
int size = spriteSize.x * spriteSize.y;
int px = id % spriteSize.x;
int py = id / spriteSize.x;
int inputOffset = ((int)position.y + py) * inputWidth + position.x + px;
if (id < size && inputOffset >= 0 && inputOffset < inputSize)
{
input[inputOffset] = sprite[id];
}
}
__global__ void Crop2DKernel(float *input, float *output, int inputWidth, int inputHeight, int outputWidth, int size, int leftMargin, int topMargin, float fillValue)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
if (id < size)
{
int inputX = id % outputWidth - leftMargin;
int inputY = id / outputWidth - topMargin;
if (inputX >= 0 && inputX < inputWidth && inputY >= 0 && inputY < inputHeight)
output[id] = input[inputX + inputY * inputWidth];
else
output[id] = fillValue;
}
}
//------------------------------------------------------------------------------------------------------------------------
// RETINA STUFF
//------------------------------------------------------------------------------------------------------------------------
__device__ void EstimateParForSubsample(float* subImageDefs, bool safeBounds,
int inputWidth, int inputHeight,
int2 & subImg, int & diameterPix)
{
diameterPix = (int)( fminf( (float)inputWidth,(float)inputHeight ) * subImageDefs[2] ); // <0,1>
subImg.x = (int)((float)inputWidth * (subImageDefs[0] + 1) * 0.5f) ;//- diameterPix / 2;
subImg.y = (int)((float)inputHeight * (subImageDefs[1] + 1) * 0.5f);// - diameterPix / 2;
int maxDiameter = min(inputWidth - 1, inputHeight - 1);
diameterPix = max(1, diameterPix);
diameterPix = min(maxDiameter, diameterPix);
if (safeBounds)
{
subImg.x = max(subImg.x, 1);
subImg.y = max(subImg.y, 1);
subImg.x = min(subImg.x, inputWidth - diameterPix - 1);
subImg.y = min(subImg.y, inputHeight - diameterPix - 1);
}
}
__global__ void RetinaTransform_HaveAtLeastOneValueThere (float * subImageDefs,
float* input, int inputWidth, int inputHeight,
float* output,int outputDataSize,
float* retinaMask, int retinaDataSize, int retinaMaskColHint,
float* retinaDataInserted)
{
int id_retinaPoint = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int2 subImg;
int diameterPix;
bool safeBounds = 0;
EstimateParForSubsample( subImageDefs, safeBounds, inputWidth, inputHeight, subImg, diameterPix );
if (id_retinaPoint<outputDataSize)
{
output[id_retinaPoint] = 0; // default value
float x_mask = (retinaMask[id_retinaPoint*retinaMaskColHint]*diameterPix);
float y_mask = (retinaMask[id_retinaPoint*retinaMaskColHint+1]*diameterPix);
int x = subImg.x + x_mask;
int y = subImg.y + y_mask;
if (x<inputWidth && y<inputHeight && x>=0 && y>=0)
{
float val = input[x+y*inputWidth];
output[id_retinaPoint] = val;
atomicAdd(output + id_retinaPoint , val);
atomicAdd(retinaDataInserted + id_retinaPoint , 1);
}
}
}
__global__ void RetinaTransform_FillRetinaAtomic (float * subImageDefs,
float* input, int inputWidth, int inputHeight,
float* output,int outputDataSize,
float* retinaMask, int retinaDataSize, int retinaMaskColHint,
float* retinaDataInserted)
{
int id_pxl = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int2 subImg;
int diameterPix;
bool safeBounds = 0;
int x = id_pxl % inputWidth;
int y = id_pxl/inputWidth;
EstimateParForSubsample( subImageDefs, safeBounds, inputWidth, inputHeight, subImg, diameterPix );
if (id_pxl<inputWidth*inputHeight)
{
float minDist = 999999.9; // ??>? should be written bette
int minIdx = 1;
for (int id_retinaPoint=0 ; id_retinaPoint<retinaDataSize ; id_retinaPoint++)
{
float x_mask = (retinaMask[id_retinaPoint*retinaMaskColHint]*diameterPix);
float y_mask = (retinaMask[id_retinaPoint*retinaMaskColHint+1]*diameterPix);
x_mask += subImg.x;
y_mask += subImg.y;
float dist = (x-x_mask)*(x-x_mask) + (y-y_mask)*(y-y_mask);
if (dist<minDist)
{
minDist = dist;
minIdx = id_retinaPoint;
}
}
atomicAdd(output + minIdx , input[id_pxl]);
atomicAdd(retinaDataInserted + minIdx , 1);
}
}
} | the_stack |
#include "fast_rnnt/csrc/mutual_information.h"
namespace fast_rnnt {
/*
Forward of mutual_information. Each thread block computes blocks of the 'p'
array of (s, t) shape equal to (BLOCK_SIZE, BLOCK_SIZE), e.g. (32, 32).
Thread-blocks loop over such blocks, but they might loop only once if there is
not that much data to process. We sequentially launch thread groups in
such a way that thread-blocks within a group do not depend on each other
(see the "iter" parameter). The blocks of the 'image' (i.e. of the p matrix)
that each group handles are arranged in a diagonal.
Template args:
scalar_t: the floating-point type, e.g. float, double; maybe eventually
half, although I think we don't support LogAdd for half yet.
BLOCK_SIZE: an integer power of two no greater than 32 (this limitation
is because we assume BLOCK_SIZE + 1 <= 64 in some data-loading
code).
Args:
px: Tensor of shape [B][S][T + 1], if !modified; [B][S][T] if modified;
may be interpreted as the log-odds ratio of
generating the next x in the sequence, i.e.
xy[b][s][t] is the log of
p(x_s | x_0..x_{s-1}, y_0..y_{s-1}) / p(x_s),
i.e. the log-prob of generating x_s given subsequences of lengths
(s, t), divided by the prior probability of generating x_s. (See
mutual_information.py for more info).
py: The log-odds ratio of generating the next y in the sequence.
Shape [B][S + 1][T]
p: This function writes to p[b][s][t] the mutual information between
sub-sequences of x and y of length s and t respectively, from the
b'th sequences in the batch. Its shape is [B][S + 1][T + 1].
Concretely, this function implements the following recursion,
in the case where s_begin == t_begin == 0:
p[b,0,0] = 0.0
if not `modified`:
p[b,s,t] = log_add(p[b,s-1,t] + px[b,s-1,t],
p[b,s,t-1] + py[b,s,t-1]) (eq. 0)
if `modified`:
p[b,s,t] = log_add(p[b,s-1,t-t] + px[b,s-1,t-1],
p[b,s,t-1] + py[b,s,t-1]) (eq. 0)
treating values with any -1 index as -infinity.
.. if `boundary` is set, we start fom p[b,s_begin,t_begin]=0.0.
boundary: If set, a tensor of shape [B][4] of type int64_t, which
contains, where for each batch element b, boundary[b] equals
[s_begin, t_begin, s_end, t_end]
which are the beginning and end (i.e. one-past-the-last) of the
x and y sequences that we should process. Otherwise, must be
a tensor of shape [0][0] of type int64_t; the values will
default to (0, 0, S, T).
ans: a tensor `ans` of shape [B], where this function will set
ans[b] = p[b][s_end][t_end],
with s_end and t_end being (S, T) if `boundary` was specified,
and (boundary[b][2], boundary[b][3]) otherwise.
`ans` represents the mutual information between each pair of
sequences (i.e. x[b] and y[b], although the sequences are not
supplied directy to this function).
The block-dim and grid-dim must both be 1-dimensional, and the block-dim must
be at least 128.
*/
template <typename scalar_t,
int BLOCK_SIZE> // e.g. BLOCK_SIZE == 16 or 32.
__global__ void mutual_information_kernel(
// B, S, T + 1, i.e. batch, x_seq_length, y_seq_length + 1
torch::PackedTensorAccessor32<scalar_t, 3> px,
torch::PackedTensorAccessor32<scalar_t, 3> py, // B, S + 1, T.
// B, S + 1, T + 1. This is an output.
torch::PackedTensorAccessor32<scalar_t, 3> p,
// B, 4; or 0, 0 if boundaries are the defaults (0, 0, S, T)
torch::PackedTensorAccessor32<int64_t, 2> boundary,
torch::PackedTensorAccessor32<scalar_t, 1> ans, // [B]
int iter) { // This kernel is sequentially called with 'iter' = 0, 1, 2 and
// so on, up to num_iters - 1 where num_iters = num_s_blocks +
// num_t_blocks - 1 num_s_blocks = S / BLOCK_SIZE + 1
// num_t_blocks = T / BLOCK_SIZE + 1
// so that each group depends on the previous group...
const int B = px.size(0), S = px.size(1), T = py.size(2);
const bool modified = (px.size(2) == T);
const int t_offset = (modified ? -1 : 0); // see CPU code to understand.
// num_s_blocks and num_t_blocks are the number of blocks we need to cover the
// array of size (S, T) with blocks of this size, in the s and t directions
// respectively.
// You can read the following expressions as simplifications of, for example,
// num_s_blocks = ((S + 1) + BLOCK_SIZE - 1) / BLOCK_SIZE,
// i.e. rounding-up division of (S + 1) by BLOCK_SIZE, and the same for (T +
// 1).
const int num_s_blocks = S / BLOCK_SIZE + 1;
//, num_t_blocks = T / BLOCK_SIZE + 1;
// num_blocks_this_iter is an upper bound on the number of blocks of size
// (BLOCK_SIZE by BLOCK_SIZE) that might be active on this iteration (`iter`).
// These iterations start from the bottom left of the image so that on iter ==
// 0 we process only one block with block-index (0, 0) then on iter == 1 we
// process block-indexes (1, 0) and (0, 1); and then on iter==2 we process (2,
// 0), (1, 1) and (0, 2); and so on. We also will never have more than
// `num_s_blocks` blocks (We'll never have more than num_t_blocks either, but
// the numbering we use corresponds to s and not t, so when we hit the
// num_t_blocks limit, the blocks with the lowest s indexes would just not be
// active and we'll 'continue' in the loop below).
int num_blocks_this_iter = min(iter + 1, num_s_blocks);
// For the block with s_block_begin == 0 and t_block_begin == 0 (for
// easy illustration), px_buf[s][t] will contain px[s - 1][t + t_offset]; or
// -infinity. for out-of-range indexes into px. Likewise, py_buf[s][t] will
// contain (py[s][t - 1]).
__shared__ scalar_t px_buf[BLOCK_SIZE][BLOCK_SIZE],
py_buf[BLOCK_SIZE][BLOCK_SIZE];
// p_buf[s][t] == p[s+s_block_begin-1][t+t_block_begin-1]
// 1st row/col of p_buf correspond to the previously computed blocks (lower
// `iter`), or to negative indexes into p. So, for the origin block,
// p_buf[s][t] corresponds to p[s - 1][t - 1]; or -inf for
// out-of-range values.
__shared__ scalar_t p_buf[BLOCK_SIZE + 1][BLOCK_SIZE + 1];
// boundary_buf will be used to store the b'th row of `boundary` if we have
// boundary information supplied; or (0, 0, S, T) otherwise.
__shared__ int64_t boundary_buf[4];
if (threadIdx.x == 0) {
boundary_buf[0] = 0;
boundary_buf[1] = 0;
boundary_buf[2] = S;
boundary_buf[3] = T;
}
// batch_block_iter iterates over batch elements (index b) and block
// indexes in the range [0..num_blocks_this_iter-1], combining both
// batch and block indexes.
for (int batch_block_iter = blockIdx.x;
batch_block_iter < B * num_blocks_this_iter;
batch_block_iter += gridDim.x) {
int block = batch_block_iter / B,
b = batch_block_iter % B; // b is the index into the batch
// Note: `block` can be no greater than `iter` because num_blocks_this_iter
// <= iter + 1, i.e. iter >= num_blocks_this_iter - 1; and
// block < num_blocks_this_iter, so iter - block >= 0.
int s_block_begin = block * BLOCK_SIZE,
t_block_begin = (iter - block) * BLOCK_SIZE;
bool is_origin_block = (s_block_begin + t_block_begin == 0);
__syncthreads();
if (threadIdx.x < 4)
boundary_buf[threadIdx.x] = boundary[b][threadIdx.x];
__syncthreads();
int s_begin = boundary_buf[0], t_begin = boundary_buf[1],
s_end = boundary_buf[2], t_end = boundary_buf[3];
s_block_begin += s_begin;
t_block_begin += t_begin;
// block_S and block_T are the actual sizes of this block (the block of `p`
// that we will write), no greater than (BLOCK_SIZE, BLOCK_SIZE) but
// possibly less than that if we are towards the end of the sequence. The
// last element in the output matrix p that we need to write is (s_end,
// t_end), i.e. the one-past-the-end index is (s_end + 1, t_end + 1).
int block_S = min(BLOCK_SIZE, s_end + 1 - s_block_begin),
block_T = min(BLOCK_SIZE, t_end + 1 - t_block_begin);
if (block_S <= 0 || block_T <= 0)
continue;
// Load px_buf and py_buf.
for (int i = threadIdx.x; i < BLOCK_SIZE * BLOCK_SIZE; i += blockDim.x) {
int s_in_block = i / BLOCK_SIZE, t_in_block = i % BLOCK_SIZE,
s = s_in_block + s_block_begin, t = t_in_block + t_block_begin,
t_off = t + t_offset;
// comparing as unsigned int makes sure the index is nonnegative.
// Caution: if s_begin > 0 or t_begin > 0 we may end up loading some px
// and py values that are outside the proper boundaries that we need, but
// the corresponding p_buf values will end up being 0 so this won't
// matter.
scalar_t this_px = -INFINITY;
// Below, "&& t <= t_end" can be interpreted as:
// "&& (modified ? t_off < t_end : t_off <= t_end)
// [since px's last valid index is t_end - 1 if modified, else t_end.
if (s > s_begin && s <= s_end && t_off >= t_begin && t <= t_end)
this_px = px[b][s - 1][t_off];
px_buf[s_in_block][t_in_block] = this_px;
scalar_t this_py = -INFINITY;
if (t > t_begin && t <= t_end && s <= s_end)
this_py = py[b][s][t - 1];
py_buf[s_in_block][t_in_block] = this_py;
}
// Load the 1st row and 1st column of p_buf.
// This is the context from previously computed blocks of the
// image. Remember: p_buf[s][t] will correspond to p[s + s_block_begin -
// 1][t + t_block_begin - 1]
if (threadIdx.x <= BLOCK_SIZE) {
// s_in_p_buf and t_in_pbuf are simply the indexes into p_buf
int s_in_p_buf = threadIdx.x, t_in_p_buf = 0,
s = s_in_p_buf + s_block_begin - 1,
t = t_in_p_buf + t_block_begin - 1;
scalar_t this_p = -INFINITY;
if (s >= s_begin && s <= s_end && t >= t_begin && t <= t_end)
this_p = p[b][s][t];
p_buf[s_in_p_buf][t_in_p_buf] = this_p;
} else if (static_cast<unsigned int>(static_cast<int>(threadIdx.x) - 64) <=
static_cast<unsigned int>(BLOCK_SIZE)) {
// Another warp handles the other leg. Checking as unsigned
// tests that threadIdx.x - 64 is both >= 0 and <= BLOCK_SIZE
int s_in_p_buf = 0, t_in_p_buf = static_cast<int>(threadIdx.x) - 64,
s = s_in_p_buf + s_block_begin - 1,
t = t_in_p_buf + t_block_begin - 1;
scalar_t this_p = -INFINITY;
if (s >= s_begin && s <= s_end && t >= t_begin && t <= t_end)
this_p = p[b][s][t];
p_buf[s_in_p_buf][t_in_p_buf] = this_p;
}
__syncthreads();
// from here to the next __syncthreads(), only the 1st warp should be active
// so we shouldn't need to synchronize. (implicit within-warp
// synchronization).
if (threadIdx.x == 0) {
// This if-statement is an optimization and modification of the loop below
// for the value i == 0, i.e. inner-iteration == 0. The modification is
// to set p_buf to 1.0 = exp(0.0) if this is the "origin block",
// i.e. s == s_begin, t == t_begin. This corresponds to the
// probability of the pair of sequences of length (0, 0).
p_buf[1][1] =
(is_origin_block ? 0.0
: LogAdd(
// px_buf has t_offset applied.
p_buf[0][1 + t_offset] + px_buf[0][0],
p_buf[1][0] + py_buf[0][0]));
}
int s = threadIdx.x;
for (int i = 1; i < block_S + block_T - 1; ++i) {
__syncwarp();
// i is the inner iteration, which corresponds to the (s + t) indexes of
// the elements within the block that we write. So i == 0 writes
// positions (s, t) == (0, 0) (but we treated i == 0 as a special case
// above); i == 1 writes (0, 1) and (1, 0); i == 2 writes (0, 2), (1, 1)
// and (2, 1); and so on. Note: not many threads participate in this
// part, only up to BLOCK_SIZE at most. Unfortunately we couldn't figure
// out a very meaningful way for more threads to do work, that looked like
// it would really spead things up.
// So this kernel does (2 * BLOCK_SIZE) iterations, which may seem a lot,
// but we do at least do the I/O in an efficient way and keep the
// inner loop simple and fast (e.g. no exp() or log()).
int t = i - s;
if (s < block_S &&
static_cast<unsigned int>(t) < static_cast<unsigned int>(block_T)) {
// p_buf is indexed by s + 1 and t + 1 because it has an extra initial
// row and column for context from previous blocks. Taking into account
// the way these buffers relate to the tensors p, px and py,
// can be interpreted as follows,
// writing sbb for s_block_begin and tbb for t_block_begin:
//
// p[b][s+sbb][t+tbb] = LogAdd(p[b][s+sbb-1][t+tbb] +
// px[s+sbb-1][t+tbb],
// p[b][s+sbb][t+tbb-1] +
// py[s+sbb][t+tbb-1]
//
// where you can see that apart from the offsets of tbb and sbb, this is
// the same as the recursion defined for p in
// mutual_information.py:mutual_information_recursion(); and (eq. 0)
// above.
// note: px_buf has t_offset applied..
p_buf[s + 1][t + 1] = LogAdd(p_buf[s][t + 1 + t_offset] + px_buf[s][t],
p_buf[s + 1][t] + py_buf[s][t]);
// We don't need to do __syncthreads() in this loop because all the
// threads that are active are in the same warp. (However, in future,
// if NVidia changes some things, we might need to sync here).
}
}
__syncthreads();
// Write out the data to p;
for (int i = threadIdx.x; i < BLOCK_SIZE * BLOCK_SIZE; i += blockDim.x) {
int s_in_block = i / BLOCK_SIZE, t_in_block = i % BLOCK_SIZE,
s = s_in_block + s_block_begin, t = t_in_block + t_block_begin;
if (s_in_block < block_S && t_in_block < block_T) {
scalar_t this_p = p_buf[s_in_block + 1][t_in_block + 1];
p[b][s][t] = this_p;
}
}
__syncthreads();
if (threadIdx.x == 0) {
// Write `ans`, if this is the final (top-right) block in its sequence
// Logically, the following equation corresponds to:
// ans[b] = p[b][s_end][t_end]
if (s_block_begin + block_S - 1 == s_end &&
t_block_begin + block_T - 1 == t_end) {
// you could read block_S below as block_S - 1 + 1, meaning,
// it's the last index in a block of size block_S, but the indexes into
// p_buf have a "+ 1". Likewise for block_T.
ans[b] = p_buf[block_S][block_T];
}
}
}
}
// like exp(), but returns 0 if arg is inf/nan, or if result would be
// infinity or nan (note: this can happen for out-of-range elements
// when setting px_buf and py_buf is block_S != BLOCK_SIZE or
// block_T != BLOCK_SIZE, and it's a problem because even though
// out-of-range gradients are zero, if we multiply them by infinity
// we get NaN.
template <typename Real> __forceinline__ __device__ Real safe_exp(Real x) {
if (x - x != 0)
return 0;
else {
Real ans = exp(x);
if (ans - ans != 0.0)
return 0;
return ans;
}
}
/*
Backward of mutual_information.
The forward pass is:
p[b,s,t] = log_add(p[b,s-1,t+t_offset] + px[b,s-1,t+t_offset],
p[b,s,t-1] + py[b,s,t-1]) (eq. 0)
where t_offset = (modified ? -1 : 0)
The backprop for the above, implemented in the obvious way, would be as
follows (note, we define term1 and term2 with offsets in the indexes, which
will be convenient later..):
term1(b,s-1,t+t_offset) =
exp(p[b,s-1,t+t_offset] + px[b,s-1,t+t_offset] - p[b,s,t]) (0a)
term2(b,s,t-1) = exp(p[b,s,t-1] + py[b,s,t-1] - p[b,s,t]) (0b)
p_grad[b,s-1,t+t_offset] += p_grad[b,s,t] * term1(b,s-1,t+t_offset) (1a)
px_grad[b,s-1,t+t_offset] += p_grad[b,s,t] * term1(b,s-1,t+t_offset) (1b)
p_grad[b,s,t-1] += p_grad[b,s,t] * term2(b,s,t-1) (1c)
py_grad[b,s,t-1] += p_grad[b,s,t] * term2(b,s,t-1) (1d)
Adding 1 and -t_offset to the s and t indexes of (1a) an (1b), and
1 to the t index of (1c) and (1d), the equations become:
p_grad[b,s,t] += p_grad[b,s+1,t-t_offset] * term1(b,s,t) (2a)
px_grad[b,s,t] += p_grad[b,s+1,t-t_offset] * term1(b,s,t) (2b)
p_grad[b,s,t] += p_grad[b,s,t+1] * term2(b,s,t) (2c)
py_grad[b,s,t] += p_grad[b,s,t+1] * term2(b,s,t) (2d)
.. and replacing "+=" with "=", we can write:
p_grad[b,s,t] = p_grad[b,s+1,t-t_offset] * term1(b,s,t) + (3a)
p_grad[b,s,t+1] * term2(b,s,t)
px_grad[b,s,t] = p_grad[b,s+1,t-t_offset] * term1(b,s,t) (3b)
py_grad[b,s,t] = p_grad[b,s,t+1] * term2(b,s,t) (3c)
Writing the definitions of term1 and term2 in a more convenient way:
term1(b,s,t) = exp(p[b,s,t] + px[b,s,t] - p[b,s+1,t-t_offset]) (4a)
term2(b,s,t) = exp(p[b,s,t] + py[b,s,t] - p[b,s,t+1]) (4b)
The backward pass will be slightly different from the forward pass in terms of
how we store and index p (and p_grad), because for writing a particular block
of p_grad, we need context on the top and right instead of the bottom and
left. So there are offsets of 1.
*/
template <typename scalar_t, int BLOCK_SIZE>
__global__ void mutual_information_backward_kernel(
torch::PackedTensorAccessor32<scalar_t, 3>
px, // B, S, T + 1 if !modified; B, S, T if modified.
torch::PackedTensorAccessor32<scalar_t, 3> py, // B, S + 1, T.
// B, S + 1, T + 1. Produced in forward pass.
torch::PackedTensorAccessor32<scalar_t, 3> p,
// [B]. This is an input.
torch::PackedTensorAccessor32<scalar_t, 1> ans_grad,
torch::PackedTensorAccessor32<scalar_t, 3>
p_grad, // B, S + 1, T + 1 if !modified; B, S, T if modified.
torch::PackedTensorAccessor32<scalar_t, 3> px_grad, // B, S, T + 1.
torch::PackedTensorAccessor32<scalar_t, 3> py_grad, // B, S + 1, T.
// B, 4; or 0, 0 if boundaries are the defaults (0, 0, S, T)
torch::PackedTensorAccessor32<int64_t, 2> boundary,
int iter, // This kernel is sequentially called with 'iter' = num_iters
// - 1, num_iters - 2, .. 0, where num_iters can be taken to
// be any sufficiently large number but will actually be:
// num_s_blocks + num_t_blocks - 1 where num_s_blocks = S /
// BLOCK_SIZE + 1 and num_t_blocks = T / BLOCK_SIZE + 1
bool overwrite_ans_grad) { // If overwite_ans_grad == true, this function
// will overwrite ans_grad with a value which,
// if everything is working correctly, should be
// identical or very close to the value of
// ans_grad that was passed in.
const int B = px.size(0), S = px.size(1), T = py.size(2);
const bool modified = (px.size(2) == T);
const int neg_t_offset = (modified ? 1 : 0);
// For statements that are the same as the forward pass, we are omitting some
// comments. We'll focus, in the comments, on differences from the forward
// pass.
const int num_s_blocks = S / BLOCK_SIZE + 1,
// num_t_blocks = T / BLOCK_SIZE + 1,
num_blocks_this_iter = min(iter + 1, num_s_blocks);
// px_buf and py_buf are used temporarily to store the px and py values,
// but then modified to store the "xderiv" and "yderiv" values defined
// in (eq. 5) and (eq. 6) above. For out-of-range values, we'll write 0.0
// here.
// Initially (before xderiv/yderiv are written):
// px_buf[s][t] contains px[s+s_block_begin][t+t_block_begin];
// py_buf[s][t] contains py[s+s_block_begin][t+t_block_begin].
// Later (see eq. 4 and eq. 5):
// px_buf[s][t] contains term1(b,ss,tt) ==
// exp(p[b][ss][tt] + px[b][ss][tt] - p[b][ss + 1][tt-t_offset]),
// py_buf[s][t] contains term2(b,ss,tt) ==
// where ss == s + s_block_begin, tt = t + t_block_begin.
// Unlike in the forward code, there is no offset of 1 in the indexes.
__shared__ scalar_t px_buf[BLOCK_SIZE][BLOCK_SIZE],
py_buf[BLOCK_SIZE][BLOCK_SIZE];
// p_buf is initially used to store p, and then (after we are done putting
// term1 and term2 into px_buf and py_buf) it is repurposed to store
// p_grad.
//
// Unlike in the forward pass, p_buf has the same numbering as px_buf and
// py_buf, it's not offset by 1: e.g., for the origin block, p_buf[0][0]
// refers to p[0][0] and not p[-1][-1]. The p_buf block is larger by 1 than
// the block for px_buf and py_buf; unlike in the forward pass, we store
// context on the top and right, not the bottom and left, i.e. the elements at
// (one past the largest indexes in the block).
//
// For out-of-range elements of p_buf, we'll put zero.
__shared__ scalar_t p_buf[BLOCK_SIZE + 1][BLOCK_SIZE + 1];
// boundary_buf will be used to store the b'th row of `boundary` if we have
// boundary information supplied; or (0, 0, S, T) if not.
__shared__ int64_t boundary_buf[4];
if (threadIdx.x == 0) {
boundary_buf[0] = 0;
boundary_buf[1] = 0;
boundary_buf[2] = S;
boundary_buf[3] = T;
}
// batch_block_iter iterates over both batch elements (index b), and block
// indexes in the range [0..num_blocks_this_iter-1]. The order here
// doesn't matter, since there are no interdependencies between these
// blocks (they are on a diagonal).
for (int batch_block_iter = blockIdx.x;
batch_block_iter < B * num_blocks_this_iter;
batch_block_iter += gridDim.x) {
int block = batch_block_iter / B, b = batch_block_iter % B;
int s_block_begin = block * BLOCK_SIZE,
t_block_begin = (iter - block) * BLOCK_SIZE;
if (threadIdx.x < 4)
boundary_buf[threadIdx.x] = boundary[b][threadIdx.x];
__syncthreads();
int s_begin = boundary_buf[0], t_begin = boundary_buf[1],
s_end = boundary_buf[2], t_end = boundary_buf[3];
s_block_begin += s_begin;
t_block_begin += t_begin;
// block_S and block_T are the actual sizes of this block, no greater than
// (BLOCK_SIZE, BLOCK_SIZE) but possibly less than that if we are towards
// the end of the sequence.
// The last element of the output matrix p_grad we write is (s_end, t_end),
// i.e. the one-past-the-end index of p_grad is (s_end + 1, t_end + 1).
int block_S = min(BLOCK_SIZE, s_end + 1 - s_block_begin),
block_T = min(BLOCK_SIZE, t_end + 1 - t_block_begin);
if (block_S <= 0 || block_T <= 0)
continue;
// Load px_buf and py_buf. At this point we just set them to the px and py
// for this block.
for (int i = threadIdx.x; i < BLOCK_SIZE * BLOCK_SIZE; i += blockDim.x) {
int s_in_block = i / BLOCK_SIZE, t_in_block = i % BLOCK_SIZE,
s = s_in_block + s_block_begin, t = t_in_block + t_block_begin;
// We let px and py default to -infinity if they are out of range, which
// will cause xderiv and yderiv for out-of-range values to be zero, and
// cause correct behavior in edge cases (for the top and right blocks).
// The issue is that p and p_grad are of larger size than px and py.
scalar_t this_px = -INFINITY;
if (s < s_end && t <= t_end)
this_px = px[b][s][t];
px_buf[s_in_block][t_in_block] = this_px;
scalar_t this_py = -INFINITY;
if (s <= s_end && t < t_end)
this_py = py[b][s][t];
py_buf[s_in_block][t_in_block] = this_py;
}
__syncthreads();
// load p.
for (int i = threadIdx.x; i < (BLOCK_SIZE + 1) * (BLOCK_SIZE + 1);
i += blockDim.x) {
int s_in_block = i / (BLOCK_SIZE + 1), t_in_block = i % (BLOCK_SIZE + 1),
s = s_in_block + s_block_begin, t = t_in_block + t_block_begin;
// Setting 0.0 for out-of-bounds elements of p, together with setting
// -INFINITY for out-of-bounds elements of px_buf and py_buf, will
// ensure that we do the right thing in top and right edge cases,
// i.e. that no derivatives will be propagated from out-of-bounds points
// because the corresponding xderiv and yderiv values will be zero.
scalar_t this_p = 0.0;
if (s <= s_end && t <= t_end)
this_p = p[b][s][t];
// if this_p is -inf, replace with large finite negative value, to avoid
// NaN's below.
// TODO: use a value that would work correctly in half precision
if (this_p < -1.0e+30)
this_p = -1.0e+30;
p_buf[s_in_block][t_in_block] = this_p;
}
__syncthreads();
// Set term1 and term2; see equations (4a) and (4b) above.
for (int i = threadIdx.x; i < BLOCK_SIZE * BLOCK_SIZE; i += blockDim.x) {
// We can apply this formula to the entire block even if we are processing
// a partial block; we have ensured that x_buf and y_buf contain
// -infinity, and p contains 0, for out-of-range elements, so we'll get
// x_buf and y_buf containing 0 after applying the followin formulas.
int s = i / BLOCK_SIZE, t = i % BLOCK_SIZE;
// Mathematically the following is doing:
// term1(b,s,t) = exp(p[b,s,t] + px[b,s,t] - p[b,s+1,t-t_offset]) (4a)
// (with an offset on the s and t indexes)
// Use safe_exp() not exp(), as we could have (-inf) - (-inf) = nan, want
// any finite number in this case as derivs would be zero.
// Also want -inf->zero.
px_buf[s][t] =
safe_exp(p_buf[s][t] + px_buf[s][t] - p_buf[s + 1][t + neg_t_offset]);
// Mathematically the following is doing:
// term2(b,s,t) = exp(p[b,s,t] + py[b,s,t] - p[b,s,t+1]) (4b)
// (with an offset on the s and t indexes)
py_buf[s][t] = safe_exp(p_buf[s][t] + py_buf[s][t] - p_buf[s][t + 1]);
}
__syncthreads();
// Load p_grad for the top and right elements in p_buf: i.e. for elements
// p_buf[s][t] where s == block_S (exclusive-or) t == block_T.
// These are the p_grad values computed by previous instances of this kernel
// If this is one of the top or right blocks, some or all of the p_grad
// values we'd be reading here will be out of range, and we use zeros
// to ensure no gradient gets propagated from those positions.
if (threadIdx.x <= block_S) {
int s_in_block = threadIdx.x, t_in_block = block_T,
s = s_in_block + s_block_begin, t = t_in_block + t_block_begin;
p_buf[s_in_block][t_in_block] =
(s <= s_end && t <= t_end ? p_grad[b][s][t] : 0.0);
} else if (static_cast<unsigned int>(static_cast<int>(threadIdx.x) - 64) <
static_cast<unsigned int>(block_T)) {
// casting to unsigned before the comparison tests for both negative and
// out-of-range values of (int)threadIdx.x - 64.
int s_in_block = block_S, t_in_block = static_cast<int>(threadIdx.x) - 64,
s = s_in_block + s_block_begin, t = t_in_block + t_block_begin;
p_buf[s_in_block][t_in_block] =
(s <= s_end && t <= t_end ? p_grad[b][s][t] : 0.0);
}
__syncthreads();
// The highest-numbered value in p_buf that we need (corresponding,
// of course, to p_grad), is:
// p_buf[block_S - 1][block_T - 1],
// and the inner iteration number (i) on which we set this is the sum of
// these indexes, i.e. (block_S - 1) + (block_T - 1).
bool is_final_block = (s_block_begin + block_S == s_end + 1 &&
t_block_begin + block_T == t_end + 1);
int first_iter = block_S + block_T - 2;
if (is_final_block) {
// The following statement corresponds to:
// p_grad[b][s_end][t_end] = ans_grad[b]
// Normally this element of p_buf would be set by the first iteration of
// the loop below, so if it's set this way we have to decrement first_iter
// to prevent it from being overwritten.
p_buf[block_S - 1][block_T - 1] = ans_grad[b];
--first_iter;
}
{
int s = threadIdx.x;
for (int i = first_iter; i >= 0; --i) {
__syncwarp();
int t = i - s;
if (s < block_S &&
static_cast<unsigned int>(t) < static_cast<unsigned int>(block_T)) {
// The following statement is really operating on the gradients;
// it corresponds, with offsets of s_block_begin and t_block_begin
// on the indexes, to equation (3a) above, i.e.:
// p_grad[b,s,t] =
// p_grad[b,s+1,t-t_offset] * term1(b,s,t) + (3a)
// p_grad[b,s,t+1] * term2(b,s,t)
p_buf[s][t] = (p_buf[s + 1][t + neg_t_offset] * px_buf[s][t] +
p_buf[s][t + 1] * py_buf[s][t]);
}
}
}
__syncthreads();
// Write out p_grad, px_grad and py_grad.
for (int i = threadIdx.x; i < BLOCK_SIZE * BLOCK_SIZE; i += blockDim.x) {
int s_in_block = i / BLOCK_SIZE, t_in_block = i % BLOCK_SIZE,
s = s_in_block + s_block_begin, t = t_in_block + t_block_begin;
// s_end and t_end are the one-past-the-end of the (x,y) sequences, but
// the one-past-the-end element of p_grad would be (s_end + 1, t_end + 1).
if (t <= t_end && s <= s_end) {
p_grad[b][s][t] = p_buf[s_in_block][t_in_block];
if (s < s_end && t <= t_end - neg_t_offset) {
// write px_grad, which is of shape [B][S][T + 1] if !modified,
// [B][S][T] if modified. the condition "t <= t_end - neg_t_offset"
// becomes "t <= t_end" if !modified, and "t <= t_end - 1" if
// modified, keeping us within the bounds of px_grad.
// From (eq. 3b):
// px_grad[b,s,t] = p_grad[b,s+1,t-t_offset] * term1(b,s,t)
px_grad[b][s][t] = (p_buf[s_in_block + 1][t_in_block + neg_t_offset] *
px_buf[s_in_block][t_in_block]);
}
if (t < t_end) { // write py_grad, which is of shape [B][S + 1][T]
// from (eq. 3c):
// py_grad[b,s,t] = p_grad[b,s,t+1] * term2(b,s,t)
py_grad[b][s][t] = (p_buf[s_in_block][t_in_block + 1] *
py_buf[s_in_block][t_in_block]);
}
}
}
if (threadIdx.x == 0 && s_block_begin == s_begin &&
t_block_begin == t_begin && overwrite_ans_grad)
ans_grad[b] = p_buf[0][0];
}
}
// forward of mutual_information. See """... """ comment of
// `mutual_information` in mutual_information.py for documentation of the
// behavior of this function.
torch::Tensor MutualInformationCuda(torch::Tensor px, torch::Tensor py,
torch::optional<torch::Tensor> opt_boundary,
torch::Tensor p) {
TORCH_CHECK(px.dim() == 3, "px must be 3-dimensional");
TORCH_CHECK(py.dim() == 3, "py must be 3-dimensional.");
TORCH_CHECK(p.dim() == 3, "p must be 3-dimensional.");
TORCH_CHECK(px.device().is_cuda() && py.device().is_cuda() &&
p.device().is_cuda(),
"inputs must be CUDA tensors");
auto scalar_t = px.scalar_type();
auto opts = torch::TensorOptions().dtype(scalar_t).device(px.device());
const int B = px.size(0), S = px.size(1), T = py.size(2);
TORCH_CHECK(px.size(2) == T || px.size(2) == T + 1);
TORCH_CHECK(py.size(0) == B && py.size(1) == S + 1 && py.size(2) == T);
TORCH_CHECK(p.size(0) == B && p.size(1) == S + 1 && p.size(2) == T + 1);
auto boundary = opt_boundary.value_or(
torch::tensor({0, 0, S, T},
torch::dtype(torch::kInt64).device(px.device()))
.reshape({1, 4})
.expand({B, 4}));
TORCH_CHECK(boundary.size(0) == B && boundary.size(1) == 4);
TORCH_CHECK(boundary.device().is_cuda() && boundary.dtype() == torch::kInt64);
torch::Tensor ans = torch::empty({B}, opts);
// num_threads and num_blocks and BLOCK_SIZE can be tuned.
// (however, num_threads may not be less than 128).
const int num_threads = 128, num_blocks = 256, BLOCK_SIZE = 32;
// The blocks cover the 'p' matrix, which is of size (B, S+1, T+1),
// so dividing by BLOCK_SIZE rounding up we get e.g.
// (S+1 + BLOCK_SIZE-1) / BLOCK_SIZE == S / BLOCK_SIZE + 1
const int num_s_blocks = S / BLOCK_SIZE + 1,
num_t_blocks = T / BLOCK_SIZE + 1,
num_iters = num_s_blocks + num_t_blocks - 1;
AT_DISPATCH_FLOATING_TYPES(
px.scalar_type(), "mutual_information_cuda_stub", ([&] {
for (int iter = 0; iter < num_iters; ++iter) {
mutual_information_kernel<scalar_t, BLOCK_SIZE>
<<<num_blocks, num_threads>>>(
px.packed_accessor32<scalar_t, 3>(),
py.packed_accessor32<scalar_t, 3>(),
p.packed_accessor32<scalar_t, 3>(),
boundary.packed_accessor32<int64_t, 2>(),
ans.packed_accessor32<scalar_t, 1>(), iter);
}
}));
return ans;
}
// backward of mutual_information; returns (grad_px, grad_py)
// If overwrite_ans_grad == true, will overwrite ans_grad with a value which
// should be identical to the original ans_grad if the computation worked
// as it should.
std::vector<torch::Tensor>
MutualInformationBackwardCuda(torch::Tensor px, torch::Tensor py,
torch::optional<torch::Tensor> opt_boundary,
torch::Tensor p, torch::Tensor ans_grad,
bool overwrite_ans_grad) {
TORCH_CHECK(px.dim() == 3, "px must be 3-dimensional");
TORCH_CHECK(py.dim() == 3, "py must be 3-dimensional.");
TORCH_CHECK(p.dim() == 3, "p must be 3-dimensional.");
TORCH_CHECK(ans_grad.dim() == 1, "ans_grad must be 1-dimensional.");
TORCH_CHECK(px.device().is_cuda() && py.device().is_cuda() &&
p.device().is_cuda() && ans_grad.device().is_cuda() &&
"inputs must be CUDA tensors");
auto scalar_t = px.scalar_type();
auto opts = torch::TensorOptions().dtype(scalar_t).device(px.device());
const int B = px.size(0), S = px.size(1), T = py.size(2);
TORCH_CHECK(px.size(2) == T ||
px.size(2) == T + 1); // modified case || not-modified case
const bool modified = (px.size(2) == T);
TORCH_CHECK(py.size(0) == B && py.size(1) == S + 1);
TORCH_CHECK(p.size(0) == B && p.size(1) == S + 1 && p.size(2) == T + 1);
auto boundary = opt_boundary.value_or(
torch::tensor({0, 0, S, T},
torch::dtype(torch::kInt64).device(px.device()))
.reshape({1, 4})
.expand({B, 4}));
TORCH_CHECK(boundary.size(0) == B && boundary.size(1) == 4);
TORCH_CHECK(boundary.device().is_cuda() && boundary.dtype() == torch::kInt64);
TORCH_CHECK(ans_grad.size(0) == B);
bool has_boundary = opt_boundary.has_value();
int T1 = T + (modified ? 0 : 1);
torch::Tensor p_grad = torch::empty({B, S + 1, T + 1}, opts),
px_grad = (has_boundary ? torch::zeros({B, S, T1}, opts)
: torch::empty({B, S, T1}, opts)),
py_grad = (has_boundary ? torch::zeros({B, S + 1, T}, opts)
: torch::empty({B, S + 1, T}, opts));
// num_threads and num_blocks and BLOCK_SIZE can be tuned.
// (however, num_threads may not be less than 128).
const int num_threads = 128, num_blocks = 256, BLOCK_SIZE = 32;
// The blocks cover the 'p' matrix, which is of size (B, S+1, T+1),
// so dividing by BLOCK_SIZE rounding up we get e.g.
// (S+1 + BLOCK_SIZE-1) / BLOCK_SIZE == S / BLOCK_SIZE + 1
const int num_s_blocks = S / BLOCK_SIZE + 1,
num_t_blocks = T / BLOCK_SIZE + 1,
num_iters = num_s_blocks + num_t_blocks - 1;
AT_DISPATCH_FLOATING_TYPES(
px.scalar_type(), "mutual_information_backward_stub", ([&] {
for (int iter = num_iters - 1; iter >= 0; --iter) {
mutual_information_backward_kernel<scalar_t, BLOCK_SIZE>
<<<num_blocks, num_threads>>>(
px.packed_accessor32<scalar_t, 3>(),
py.packed_accessor32<scalar_t, 3>(),
p.packed_accessor32<scalar_t, 3>(),
ans_grad.packed_accessor32<scalar_t, 1>(),
p_grad.packed_accessor32<scalar_t, 3>(),
px_grad.packed_accessor32<scalar_t, 3>(),
py_grad.packed_accessor32<scalar_t, 3>(),
boundary.packed_accessor32<int64_t, 2>(), iter,
overwrite_ans_grad);
}
}));
return std::vector<torch::Tensor>({px_grad, py_grad});
}
} // namespace fast_rnnt | the_stack |
#ifndef _BISECT_UTIL_H_
#define _BISECT_UTIL_H_
// includes, project
#include <tests/Eigenvalues/config.h>
#include <tests/Eigenvalues/util.h>
////////////////////////////////////////////////////////////////////////////////
//! Compute the next lower power of two of n
//! @param n number for which next higher power of two is seeked
////////////////////////////////////////////////////////////////////////////////
__device__
inline int
floorPow2(int n) {
// early out if already power of two
if( 0 == (n & (n-1))) {
return n;
}
int exp;
frexp((float)n, &exp);
return (1 << (exp - 1));
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the next higher power of two of n
//! @param n number for which next higher power of two is seeked
////////////////////////////////////////////////////////////////////////////////
__device__
inline int
ceilPow2(int n) {
// early out if already power of two
if( 0 == (n & (n-1))) {
return n;
}
int exp;
frexp((float)n, &exp);
return (1 << exp);
}
////////////////////////////////////////////////////////////////////////////////
//! Compute midpoint of interval [\a left, \a right] avoiding overflow if
//! possible
//! @param left left / lower limit of interval
//! @param right right / upper limit of interval
////////////////////////////////////////////////////////////////////////////////
__device__
inline float
computeMidpoint( const float left, const float right) {
float mid;
if( sign_f( left) == sign_f( right)) {
mid = left + (right - left) * 0.5f;
}
else {
mid = (left + right) * 0.5f;
}
return mid;
}
////////////////////////////////////////////////////////////////////////////////
//! Check if interval converged and store appropriately
//! @param addr address where to store the information of the interval
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param right_count eigenvalues less than \a right
//! @param precision desired precision for eigenvalues
////////////////////////////////////////////////////////////////////////////////
template<class S, class T>
__device__
void
storeInterval( unsigned int addr,
float* s_left, float* s_right,
T* s_left_count, T* s_right_count,
float left, float right,
S left_count, S right_count,
float precision )
{
s_left_count[addr] = left_count;
s_right_count[addr] = right_count;
// check if interval converged
float t0 = abs( right - left);
float t1 = max( abs(left), abs(right)) * precision;
if( t0 <= max( MIN_ABS_INTERVAL, t1)) {
// compute mid point
float lambda = computeMidpoint( left, right);
// mark as converged
s_left[addr] = lambda;
s_right[addr] = lambda;
}
else {
// store current limits
s_left[addr] = left;
s_right[addr] = right;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute number of eigenvalues that are smaller than x given a symmetric,
//! real, and tridiagonal matrix
//! @param g_d diagonal elements stored in global memory
//! @param g_s superdiagonal elements stored in global memory
//! @param n size of matrix
//! @param x value for which the number of eigenvalues that are smaller is
//! seeked
//! @param tid thread identified (e.g. threadIdx.x or gtid)
//! @param num_intervals_active number of active intervals / threads that
//! currently process an interval
//! @param s_d scratch space to store diagonal entries of the tridiagonal
//! matrix in shared memory
//! @param s_s scratch space to store superdiagonal entries of the tridiagonal
//! matrix in shared memory
//! @param converged flag if the current thread is already converged (that
//! is count does not have to be computed)
////////////////////////////////////////////////////////////////////////////////
__device__
inline unsigned int
computeNumSmallerEigenvals( float* g_d, float* g_s, const unsigned int n,
const float x,
const unsigned int tid,
const unsigned int num_intervals_active,
float* s_d, float* s_s,
unsigned int converged
)
{
float delta = 1.0f;
unsigned int count = 0;
__syncthreads();
// read data into shared memory
if( threadIdx.x < n) {
s_d[threadIdx.x] = *(g_d + threadIdx.x);
s_s[threadIdx.x] = *(g_s + threadIdx.x - 1);
}
__syncthreads();
// perform loop only for active threads
if(( tid < num_intervals_active) && (0 == converged)) {
// perform (optimized) Gaussian elimination to determine the number
// of eigenvalues that are smaller than n
for( unsigned int k = 0; k < n; ++k) {
delta = s_d[k] - x - (s_s[k] * s_s[k]) / delta;
count += (delta < 0) ? 1 : 0;
}
} // end if thread currently processing an interval
return count;
}
////////////////////////////////////////////////////////////////////////////////
//! Compute number of eigenvalues that are smaller than x given a symmetric,
//! real, and tridiagonal matrix
//! @param g_d diagonal elements stored in global memory
//! @param g_s superdiagonal elements stored in global memory
//! @param n size of matrix
//! @param x value for which the number of eigenvalues that are smaller is
//! seeked
//! @param tid thread identified (e.g. threadIdx.x or gtid)
//! @param num_intervals_active number of active intervals / threads that
//! currently process an interval
//! @param s_d scratch space to store diagonal entries of the tridiagonal
//! matrix in shared memory
//! @param s_s scratch space to store superdiagonal entries of the tridiagonal
//! matrix in shared memory
//! @param converged flag if the current thread is already converged (that
//! is count does not have to be computed)
////////////////////////////////////////////////////////////////////////////////
__device__
inline unsigned int
computeNumSmallerEigenvalsLarge( float* g_d, float* g_s, const unsigned int n,
const float x,
const unsigned int tid,
const unsigned int num_intervals_active,
float* s_d, float* s_s,
unsigned int converged
)
{
float delta = 1.0f;
unsigned int count = 0;
unsigned int rem = n;
// do until whole diagonal and superdiagonal has been loaded and processed
for( unsigned int i = 0; i < n; i += blockDim.x) {
__syncthreads();
// read new chunk of data into shared memory
if( (i + threadIdx.x) < n) {
s_d[threadIdx.x] = *(g_d + i + threadIdx.x);
s_s[threadIdx.x] = *(g_s + i + threadIdx.x - 1);
}
__syncthreads();
if( tid < num_intervals_active) {
// perform (optimized) Gaussian elimination to determine the number
// of eigenvalues that are smaller than n
for( unsigned int k = 0; k < min(rem,blockDim.x); ++k) {
delta = s_d[k] - x - (s_s[k] * s_s[k]) / delta;
// delta = (abs( delta) < (1.0e-10)) ? -(1.0e-10) : delta;
count += (delta < 0) ? 1 : 0;
}
} // end if thread currently processing an interval
rem -= blockDim.x;
}
return count;
}
////////////////////////////////////////////////////////////////////////////////
//! Store all non-empty intervals resulting from the subdivision of the interval
//! currently processed by the thread
//! @param addr base address for storing intervals
//! @param num_threads_active number of threads / intervals in current sweep
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param mid midpoint of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param mid_count eigenvalues less than \a mid
//! @param right_count eigenvalues less than \a right
//! @param precision desired precision for eigenvalues
//! @param compact_second_chunk shared mem flag if second chunk is used and
//! ergo requires compaction
//! @param s_compaction_list_exc helper array for stream compaction,
//! s_compaction_list_exc[tid] = 1 when the
//! thread generated two child intervals
//! @is_active_interval mark is thread has a second non-empty child interval
////////////////////////////////////////////////////////////////////////////////
template<class S, class T>
__device__
void
storeNonEmptyIntervals( unsigned int addr,
const unsigned int num_threads_active,
float* s_left, float* s_right,
T* s_left_count, T* s_right_count,
float left, float mid, float right,
const S left_count,
const S mid_count,
const S right_count,
float precision,
unsigned int& compact_second_chunk,
T* s_compaction_list_exc,
unsigned int &is_active_second )
{
// check if both child intervals are valid
if(( left_count != mid_count) && (mid_count != right_count)) {
// store the left interval
storeInterval( addr, s_left, s_right, s_left_count, s_right_count,
left, mid, left_count, mid_count, precision);
// mark that a second interval has been generated, only stored after
// stream compaction of second chunk
is_active_second = 1;
s_compaction_list_exc[threadIdx.x] = 1;
compact_second_chunk = 1;
}
else {
// only one non-empty child interval
// mark that no second child
is_active_second = 0;
s_compaction_list_exc[threadIdx.x] = 0;
// store the one valid child interval
if( left_count != mid_count) {
storeInterval( addr, s_left, s_right, s_left_count, s_right_count,
left, mid, left_count, mid_count, precision);
}
else {
storeInterval( addr, s_left, s_right, s_left_count, s_right_count,
mid, right, mid_count, right_count, precision);
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Create indices for compaction, that is process \a s_compaction_list_exc
//! which is 1 for intervals that generated a second child and 0 otherwise
//! and create for each of the non-zero elements the index where the new
//! interval belongs to in a compact representation of all generated second
//! childs
//! @param s_compaction_list_exc list containing the flags which threads
//! generated two childs
//! @param num_threads_compaction number of threads to employ for compaction
////////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
createIndicesCompaction( T* s_compaction_list_exc,
unsigned int num_threads_compaction ) {
unsigned int offset = 1;
const unsigned int tid = threadIdx.x;
// higher levels of scan tree
for(int d = (num_threads_compaction >> 1); d > 0; d >>= 1) {
__syncthreads();
if (tid < d) {
unsigned int ai = offset*(2*tid+1);
unsigned int bi = offset*(2*tid+2);
ai = ai > 0 ? ai - 1 : 0;
bi = bi > 0 ? bi - 1 : 0;
s_compaction_list_exc[bi] = s_compaction_list_exc[bi]
+ s_compaction_list_exc[ai];
}
offset <<= 1;
}
// traverse down tree: first down to level 2 across
for( int d = 2; d < num_threads_compaction; d <<= 1) {
offset >>= 1;
__syncthreads();
if (tid < (d-1)) {
unsigned int ai = offset*(tid+1) - 1;
unsigned int bi = ai + (offset >> 1);
s_compaction_list_exc[bi] = s_compaction_list_exc[bi]
+ s_compaction_list_exc[ai];
}
}
__syncthreads();
}
///////////////////////////////////////////////////////////////////////////////
//! Perform stream compaction for second child intervals
//! @param s_left shared
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param mid midpoint of current interval (left of new interval)
//! @param right upper limit of interval
//! @param mid_count eigenvalues less than \a mid
//! @param s_compaction_list list containing the indices where the data has
//! to be stored
//! @param num_threads_active number of active threads / intervals
//! @is_active_interval mark is thread has a second non-empty child interval
///////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
compactIntervals( float* s_left, float* s_right,
T* s_left_count, T* s_right_count,
float mid, float right,
unsigned int mid_count, unsigned int right_count,
T* s_compaction_list,
unsigned int num_threads_active,
unsigned int is_active_second )
{
const unsigned int tid = threadIdx.x;
// perform compaction / copy data for all threads where the second
// child is not dead
if( (tid < num_threads_active) && ( 1 == is_active_second))
{
unsigned int addr_w = num_threads_active + s_compaction_list[tid];
s_left[addr_w] = mid;
s_right[addr_w] = right;
s_left_count[addr_w] = mid_count;
s_right_count[addr_w] = right_count;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Store intervals that have already converged (w.r.t. the desired precision),
//! duplicating intervals that contain multiple eigenvalues
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param mid midpoint of interval (updated if split is necessary)
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param mid_count eigenvalues less than \a mid
//! @param right_count eigenvalues less than \a right
//! @param s_compaction_list_exc helper array for stream compaction, updated
//! at tid if split is necessary
//! @param compact_second_chunk shared mem flag if second chunk is used and
//! ergo requires compaction
//! @param num_threads_active number of active threads / intervals
///////////////////////////////////////////////////////////////////////////////
template<class T, class S>
__device__
void
storeIntervalConverged( float* s_left, float* s_right,
T* s_left_count, T* s_right_count,
float& left, float& mid, float& right,
S& left_count, S& mid_count, S& right_count,
T* s_compaction_list_exc,
unsigned int& compact_second_chunk,
const unsigned int num_threads_active )
{
const unsigned int tid = threadIdx.x;
const unsigned int multiplicity = right_count - left_count;
// check multiplicity of eigenvalue
if( 1 == multiplicity) {
// just re-store intervals, simple eigenvalue
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = right_count;
// mark that no second child / clear
s_right_count[tid + num_threads_active] = 0;
s_compaction_list_exc[tid] = 0;
}
else {
// number of eigenvalues after the split less than mid
mid_count = left_count + (multiplicity >> 1);
// store left interval
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = mid_count;
mid = left;
// mark that second child interval exists
s_right_count[tid + num_threads_active] = right_count;
s_compaction_list_exc[tid] = 1;
compact_second_chunk = 1;
}
}
template<class T, class S>
__device__
void
storeIntervalConverged( float* s_left, float* s_right,
T* s_left_count, T* s_right_count,
float& left, float& mid, float& right,
S& left_count, S& mid_count, S& right_count,
T* s_compaction_list_exc,
unsigned int& compact_second_chunk,
const unsigned int num_threads_active,
unsigned int &is_active_second)
{
const unsigned int tid = threadIdx.x;
const unsigned int multiplicity = right_count - left_count;
// check multiplicity of eigenvalue
if( 1 == multiplicity) {
// just re-store intervals, simple eigenvalue
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = right_count;
// mark that no second child / clear
is_active_second = 0;
s_compaction_list_exc[tid] = 0;
}
else {
// number of eigenvalues after the split less than mid
mid_count = left_count + (multiplicity >> 1);
// store left interval
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = mid_count;
mid = left;
// mark that second child interval exists
is_active_second = 1;
s_compaction_list_exc[tid] = 1;
compact_second_chunk = 1;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Subdivide interval if active and not already converged
//! @param tid id of thread
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param num_threads_active number of active threads in warp
//! @param left lower limit of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param right_count eigenvalues less than \a right
//! @param all_threads_converged shared memory flag if all threads are
//! converged
///////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
subdivideActiveInterval( const unsigned int tid,
float* s_left, float* s_right,
T* s_left_count, T* s_right_count,
const unsigned int num_threads_active,
float& left, float& right,
unsigned int& left_count, unsigned int& right_count,
float& mid, unsigned int& all_threads_converged )
{
// for all active threads
if( tid < num_threads_active) {
left = s_left[tid];
right = s_right[tid];
left_count = s_left_count[tid];
right_count = s_right_count[tid];
// check if thread already converged
if( left != right) {
mid = computeMidpoint( left, right);
all_threads_converged = 0;
}
else if( (right_count - left_count) > 1) {
// mark as not converged if multiple eigenvalues enclosed
// duplicate interval in storeIntervalsConverged()
all_threads_converged = 0;
}
} // end for all active threads
}
#endif // #ifndef _BISECT_UTIL_H_ | the_stack |
// number of samples in the input datamatrix.
// Fixed here to make static shared memory on a device
#define MAXSAMPLE 200
__device__
struct pair_r compute(
float *genekj,
float *geneij,
const char *sample,
int wid,int k,int i,int D,
const float *gene)
{
int j;
float sx = 0.f, sxx = 0.f, sy = 0.f, sxy = 0.f, syy = 0.f;
float sx_n = 0.f, sxx_n = 0.f, sy_n = 0.f, sxy_n = 0.f, syy_n = 0.f;
struct pair_r rval = {0.f, 0.f};
for (j = 0; j < D; j++) {
genekj[j]=gene[k*(D+1)+j];
if(sample[j]=='1')
sx += genekj[j];
else
sx_n += genekj[j];
}
sx /= wid;
sx_n /= (D-wid);
for (j = 0; j < D; j++) {
if(sample[j]=='1')
sxx += (sx-genekj[j]) * (sx-genekj[j]);
else
sxx_n += (sx_n-genekj[j]) * (sx_n-genekj[j]);
}
sxx = sqrtf(sxx);
sxx_n = sqrtf(sxx_n);
for (j = 0; j < D; j++) {
geneij[j]=gene[i*(D+1)+j];
if(sample[j]=='1')
sy += geneij[j];
else
sy_n += geneij[j];
}
sy /= wid;
sy_n /= (D-wid);
for (j = 0; j < D; j++)
{
if(sample[j]=='1') {
sxy += (sx - genekj[j]) * (sy - geneij[j]);
syy += (sy - geneij[j]) * (sy - geneij[j]);
}
else {
sxy_n += (sx_n - genekj[j]) * (sy_n - geneij[j]);
syy_n += (sy_n - geneij[j]) * (sy_n - geneij[j]);
}
}
syy = sqrtf(syy);
syy_n = sqrtf(syy_n);
rval.r = fabsf(sxy/(sxx * syy));
rval.n_r = fabsf(sxy_n/(sxx_n * syy_n));
return rval;
}
__global__ void compute_bicluster(
const float *__restrict__ gene,
const int n,
const int maxbcn,
const int D,
const float thr,
char *__restrict__ maxbc_sample,
char *__restrict__ maxbc_data,
float *__restrict__ maxbc_score,
int *__restrict__ maxbc_datacount,
int *__restrict__ maxbc_samplecount,
char *__restrict__ tmpbc_sample,
char *__restrict__ tmpbc_data)
{
__shared__ float s_genekj[MAXSAMPLE];
__shared__ float s_geneij[MAXSAMPLE];
__shared__ char s_vect[3*MAXSAMPLE];
int k=blockIdx.x*blockDim.x+threadIdx.x;
if(k<maxbcn) {
float jcc,mean_k,mean_i;
int i,j,l,vl,wid,wid_0,wid_1,wid_2,l_i,t_tot,t_dif;
int dif,tot;
struct pair_r rval;
int tmpbc_datacount,tmpbc_samplecount;
float genekj,geneij;
maxbc_score[k]=1.f;
maxbc_datacount[k]=0;
//calculate mean expression for gene k
mean_k=gene[k*(D+1)+D];
for (i = k+1; i < n; i++) //pair k,i
{
//calculate mean expression for gene i
mean_i=gene[i*(D+1)+D];
wid_0=0; wid_1=0; wid_2=0;
for (j = 0; j < D; j++)
{
genekj=gene[k*(D+1)+j];
geneij=gene[i*(D+1)+j];
if ((genekj - mean_k)>=0 && (geneij - mean_i)>=0) //i and k upregulated : positive correlation
{
s_vect[0*3+j] = '1';
s_vect[1*3+j] = '0';
s_vect[2*3+j] = '0';
wid_0++;
}
else if ((genekj - mean_k)<0 && (geneij - mean_i)<0) // i and k down regulated : positive correlation
{
s_vect[0*3+j] = '0';
s_vect[1*3+j] = '1';
s_vect[2*3+j] = '0';
wid_1++;
}
else if ((genekj - mean_k)*(geneij - mean_i)<0) //betwenn i and k one is up regulated and the other one is down regulated : negative correlation
{
s_vect[0*3+j] = '0';
s_vect[1*3+j] = '0';
s_vect[2*3+j] = '1';
wid_2++;
}
}
for (vl = 0; vl < 3; vl++)
{
dif=0; tot=0;
if(vl==0)
wid=wid_0;
else if(vl==1)
wid=wid_1;
if(vl==2)
wid=wid_2;
if(wid>minsample) { //minimum samples required to form a bicluster module. Default minimum set to 10 in ccs.h
rval=compute(s_genekj, s_geneij, s_vect+vl*MAXSAMPLE, wid, k, i, D, gene);
}
else {
continue;
}
if (rval.r > thr)
{
tot++;
if(rval.n_r>thr)
dif++;
for (j = 0;j < D; j++)
tmpbc_sample[k*D+j] = s_vect[vl*MAXSAMPLE+j];
for (j = 0;j < n; j++)
tmpbc_data[k*n+j] = '0';
tmpbc_data[k*n+k] = '1';
tmpbc_data[k*n+i] = '1';
tmpbc_datacount = 2;
tmpbc_samplecount = wid;
for (l = 0; l < n; l++) { //bicluster augmentation
if (l != i && l != k) {
t_tot=0; t_dif=0;
for(l_i=0;l_i<n;l_i++) {
if(tmpbc_data[k*n+l_i]=='1') {
rval=compute(s_genekj, s_geneij, s_vect + vl*MAXSAMPLE, wid, l, l_i, D, gene);
if(rval.r>thr)
t_tot+=1;
else {
t_tot=0;
break;
}
if(rval.n_r>thr)
t_dif+=1;
}
}
if(t_tot>0) {
tmpbc_data[k*n+l] = '1';
tmpbc_datacount+=1;
tot+=t_tot; dif+=t_dif;
}
}
} // end of augmentation
// Compute Jaccard score
if(tot>0)
jcc=(float)dif/tot;
else
jcc=1.f;
/* Select bicluster candidate as the largest (maxbc[k].datacount<tmpbc.datacount)
of all condition dependent (jaccard score <0.01) bicluster for k. Minimum number of gene
for a bicluster is set at 10. See the mingene at ccs.h */
if(jcc<0.01f && maxbc_datacount[k]<tmpbc_datacount && tmpbc_datacount>mingene)
{
maxbc_score[k]=jcc;
for (j = 0; j < n; j++)
maxbc_data[k*n+j]=tmpbc_data[k*n+j];
for (j = 0; j < D; j++)
maxbc_sample[k*D+j]=tmpbc_sample[k*D+j];
maxbc_datacount[k]=tmpbc_datacount;
maxbc_samplecount[k]=tmpbc_samplecount;
}
} //end of r>thr condition
} //end of loop for vl
} // end of i loop
}
}
int main(int argc, char *argv[])
{
FILE *in,*out;
struct gn *gene;
char **Hd;
char *infile,*outfile;
int c, errflag;
int maxbcn=MAXB;
int print_type=0;
int i,n,D;
extern char *optarg;
float thr;
struct bicl *bicluster;
float overlap=100.f;
infile = outfile = NULL;
in = out = NULL;
errflag = n = D = 0;
thr = 0.f;
while ((c = getopt(argc, argv, "ht:m:i:p:o:g:?")) != -1)
{
switch(c)
{
case 'h': // help
printUsage();
exit(0);
case 't': // threshold value
thr = atof(optarg);
break;
case 'm': // maximum number of bicluster search
maxbcn = atoi(optarg);
break;
case 'g': // output file format
overlap = atof(optarg);
break;
case 'p': // output file format
print_type = atoi(optarg);
break;
case 'i': // the input expression file
infile = optarg;
break;
case 'o': // the output file
outfile = optarg;
break;
case ':': /* -f or -o without operand */
printf("Option -%c requires an operand\n", optopt);
errflag++;
break;
case '?':
fprintf(stderr,"Unrecognized option: -%c\n", optopt);
errflag++;
}
}
if (thr == 0)
{
fprintf(stderr,"***** WARNING: Threshold Theta (corr coeff) "
"value assumed to be ZERO (0)\n");
}
if (outfile == NULL)
{
fprintf(stderr,"***** WARNING: Output file assumed to be STDOUT\n");
out = stdout;
}
else if ((out = fopen(outfile,"w")) == NULL) //write open bicluster file
{
fprintf(stderr,"***** ERROR: Unable to open Output file %s\n",outfile);
errflag++;
}
if ((thr < 0) || (thr > 1))
{
fprintf(stderr,"***** ERROR: Threshold Theta (corr coeff) "
"must be between 0.0-1.0\n");
}
if (infile == NULL)
{
fprintf(stderr,"***** ERROR: Input file not defined\n");
if (out) fclose(out);
errflag++;
}
else if ((in = fopen(infile,"r")) == NULL) //open gene file
{
fprintf(stderr,"***** ERROR: Unable to open Input %s\n", infile);
if (out) fclose(out);
errflag++;
}
if (errflag)
{
printUsage();
exit(1);
}
getmatrixsize(in,&n,&D);
printf("Number of rows=%d\tNumber of columns=%d\n",n,D);
if(maxbcn>n) maxbcn=n;
gene = (struct gn *)calloc(n,sizeof(struct gn));
Hd = (char **)calloc(D+1,sizeof(char *));
for (i = 0; i < n; i++)
gene[i].x = (float *)calloc(D+1,sizeof(float));
bicluster = (struct bicl *)calloc(maxbcn,sizeof(struct bicl));
for (i = 0; i < maxbcn; i++)
{
bicluster[i].sample = (char *)calloc(D,sizeof(char));
bicluster[i].data = (char *)calloc(n,sizeof(char));
}
// initialize the gene data
readgene(infile,gene,Hd,n,D);
clock_t start = clock();
float *d_gene;
hipMalloc((void**)&d_gene, sizeof(float) * n * (D+1));
for (i = 0; i < n; i++) {
hipMemcpy(d_gene+i*(D+1), gene[i].x, sizeof(float)*(D+1), hipMemcpyHostToDevice);
}
float *d_bc_score;
hipMalloc((void**)&d_bc_score, sizeof(float)*maxbcn);
int *d_bc_datacount;
hipMalloc((void**)&d_bc_datacount, sizeof(int)*maxbcn);
int *d_bc_samplecount;
hipMalloc((void**)&d_bc_samplecount, sizeof(int)*maxbcn);
char *d_bc_sample;
hipMalloc((void**)&d_bc_sample, sizeof(char)*D*maxbcn);
char *d_bc_sample_tmp;
hipMalloc((void**)&d_bc_sample_tmp, sizeof(char)*D*maxbcn);
char *d_bc_data;
hipMalloc((void**)&d_bc_data, sizeof(char)*n*maxbcn);
char *d_bc_data_tmp;
hipMalloc((void**)&d_bc_data_tmp, sizeof(char)*n*maxbcn);
dim3 blocks (1);
dim3 grids (maxbcn);
for (i = 0; i < 100; i++) {
hipLaunchKernelGGL(compute_bicluster, grids, blocks, 0, 0,
d_gene,
n,maxbcn,D,thr,
d_bc_sample,
d_bc_data,
d_bc_score,
d_bc_datacount,
d_bc_samplecount,
d_bc_sample_tmp,
d_bc_data_tmp);
}
float *bicluster_temp_score = (float *)calloc(maxbcn,sizeof(float));
hipMemcpy(bicluster_temp_score, d_bc_score, sizeof(float)*maxbcn, hipMemcpyDeviceToHost);
int *bicluster_temp_datacount = (int *)calloc(maxbcn,sizeof(int));
hipMemcpy(bicluster_temp_datacount, d_bc_datacount, sizeof(int)*maxbcn, hipMemcpyDeviceToHost);
int *bicluster_temp_samplecount = (int *)calloc(maxbcn,sizeof(int));
hipMemcpy(bicluster_temp_samplecount, d_bc_samplecount, sizeof(int)*maxbcn, hipMemcpyDeviceToHost);
for(i=0; i<maxbcn; i++) {
hipMemcpy(bicluster[i].sample, d_bc_sample_tmp+D*i, sizeof(char)*D, hipMemcpyDeviceToHost);
hipMemcpy(bicluster[i].data, d_bc_data_tmp+n*i, sizeof(char)*n, hipMemcpyDeviceToHost);
}
for(i=0; i<maxbcn; i++) {
bicluster[i].score=bicluster_temp_score[i];
bicluster[i].datacount=bicluster_temp_datacount[i];
bicluster[i].samplecount=bicluster_temp_samplecount[i];
}
printbicluster(out,gene,Hd,n,D,maxbcn,thr,bicluster,print_type,overlap);
hipFree(d_gene);
hipFree(d_bc_score);
hipFree(d_bc_datacount);
hipFree(d_bc_samplecount);
hipFree(d_bc_sample);
hipFree(d_bc_sample_tmp);
hipFree(d_bc_data);
hipFree(d_bc_data_tmp);
for (i = 0; i < n; i++) {
free(gene[i].x);
free(gene[i].id);
}
free(gene);
for (i = 0; i < D+1; i++) free(Hd[i]);
free(Hd);
for (i = 0; i < maxbcn; i++) {
free(bicluster[i].sample);
free(bicluster[i].data);
}
free(bicluster_temp_score);
free(bicluster_temp_datacount);
free(bicluster_temp_samplecount);
free(bicluster);
clock_t end = clock() ;
float elapsed_time = (end-start)/(float)CLOCKS_PER_SEC ;
printf("Elapsed time = %f s\n",elapsed_time);
if(print_type==0) fprintf(out,"\n\nElapsed time= %f s\n",elapsed_time);
if (out) fclose(out);
return 0;
} | the_stack |
#include <cstdint>
#include <type_traits>
#include <c10/util/Exception.h>
#include <c10/util/TypeCast.h>
#include <c10/macros/Macros.h>
#include <ATen/core/Array.h>
#include <ATen/detail/FunctionTraits.h>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <thrust/tuple.h>
// References:
// https://devblogs.nvidia.com/cuda-pro-tip-increase-performance-with-vectorized-memory-access/
namespace at { namespace native { namespace memory {
namespace detail {
// What does the `static_unroll` do?
//
// We want to do something like:
//
// using args_t = typename traits::ArgsTuple;
// args_t args;
// #pragma unroll
// for (int i = 0; i < traits::arity; i++) {
// std::get<i>(args) = ....
// }
//
// but unfortunately the above code does not work because
// the template argument has to be a compile time constant
// so `static_unroll` is created to simulate `#pragma unroll`
// using template metaprogramming.
template<template<int i> typename func, int end, int current=0>
struct static_unroll {
template<typename... Args>
static inline C10_HOST_DEVICE void with_args(Args&&... args) {
func<current>::apply(std::forward<Args>(args)...);
static_unroll<func, end, current+1>::with_args(args...);
}
};
template<template<int i> typename func, int end>
struct static_unroll<func, end, end> {
template<typename... Args>
static inline C10_HOST_DEVICE void with_args(Args... args) {}
};
// helper structs to be used with static_unroll to load arguments
// one by one
template<int arg_index>
struct vectorized_load_helper {
template <typename args_t, typename policy_t>
static __device__ void apply(policy_t &self, args_t *args, int idx) {
using arg_t = std::tuple_element_t<arg_index, args_t>;
// `data` hold the data_ptr for tensors [output, input0, input1, ...], so we
// need a +1 offset to get the input
auto ptr = reinterpret_cast<arg_t *>(self.data[arg_index + 1]) + block_work_size() * idx;
auto args_accessor = [&args] __device__ (int thread_unroll_idx) -> arg_t & { return std::get<arg_index>(args[thread_unroll_idx]); };
self.load_single_arg(args_accessor, ptr);
}
};
template<int arg_index>
struct unroll_load_helper {
template <typename args_t, typename policy_t, typename offset_t, typename loader_t>
static __device__ void apply(policy_t &self, args_t *args, offset_t offset, loader_t loader, int j, int num_outputs) {
using arg_t = std::tuple_element_t<arg_index, args_t>;
// `data` hold the data_ptr for tensors [output, input0, input1, ...], so we
// need a +1 offset to get the input
std::get<arg_index>(args[j]) = loader.template load<arg_t>(self.data[arg_index + num_outputs], offset[arg_index], arg_index);
}
};
template <int current>
struct multi_outputs_store_helper {
template<int ntensors, int num_outputs, typename ...Args>
C10_HOST_DEVICE static void apply(
at::detail::Array<char*, ntensors> data,
at::detail::Array<uint32_t, num_outputs> offsets,
thrust::tuple<Args...> ret) {
using T = typename thrust::tuple_element<current, thrust::tuple<Args...>>::type;
T *to = reinterpret_cast<T *>(data[current]) + offsets[current];
*to = thrust::get<current>(ret);
}
};
} // namespace detail
struct LoadWithoutCast {
template<typename scalar_t>
__device__ scalar_t load(char *base_ptr, uint32_t offset, int arg) {
return *(reinterpret_cast<scalar_t *>(base_ptr) + offset);
}
};
template <int N>
struct LoadWithCast {
using array_t = at::detail::Array<at::ScalarType, std::max<int>(N, 1)>;
using size_array_t = at::detail::Array<uint32_t, std::max<int>(N, 1)>;
array_t dtypes;
size_array_t element_sizes;
template<typename array_t_>
LoadWithCast(array_t_ dtypes) {
#pragma unroll
for (int i = 0; i < N; i++) {
this->dtypes[i] = dtypes[i];
element_sizes[i] = c10::elementSize(dtypes[i]);
}
}
template<typename scalar_t>
__device__ scalar_t load(char *base_ptr, uint32_t offset, int arg) {
void *ptr = base_ptr + element_sizes[arg] * offset;
return c10::fetch_and_cast<scalar_t>(dtypes[arg], ptr);
}
};
struct StoreWithoutCast {
template<typename scalar_t>
__device__ void store(scalar_t value, char *base_ptr, uint32_t offset) {
*(reinterpret_cast<scalar_t *>(base_ptr) + offset) = value;
}
};
struct StoreWithCast {
at::ScalarType dtype;
uint32_t element_size;
StoreWithCast(at::ScalarType dtype): dtype(dtype), element_size(c10::elementSize(dtype)) {}
template<typename scalar_t>
__device__ void store(scalar_t value, char *base_ptr, uint32_t offset) {
void *ptr = base_ptr + element_size * offset;
c10::cast_and_store<scalar_t>(dtype, ptr, value);
}
};
// aligned vector generates vectorized load/store on CUDA
template<typename scalar_t, int vec_size>
struct alignas(sizeof(scalar_t) * vec_size) aligned_vector {
scalar_t val[vec_size];
};
namespace policies {
// Assumption:
// all tensors are contiguous, that is: stride == sizeof(type) for all tensors
template<typename data_t, typename inp_calc_t, typename out_calc_t, typename loader_t, typename storer_t, int num_outputs = 1>
struct unroll {
data_t data;
int remaining;
inp_calc_t input_offset_calculator;
out_calc_t output_offset_calculator;
loader_t loader;
storer_t storer;
__device__ unroll(data_t data, int remaining, inp_calc_t ic, out_calc_t oc, loader_t l, storer_t s):
data(data), remaining(remaining), input_offset_calculator(ic), output_offset_calculator(oc), loader(l), storer(s) {}
__device__ inline bool check_inbounds(int thread_work_elem) {
return ((threadIdx.x + thread_work_elem*num_threads()) < remaining);
}
template<typename args_t>
__device__ inline void load(args_t *args, int idx) {
constexpr int arity = std::tuple_size<args_t>::value;
int thread_idx = threadIdx.x;
#pragma unroll
for (int i = 0; i < thread_work_size(); i++) {
if (thread_idx >= remaining) {
return;
}
int linear_idx = thread_idx + block_work_size() * idx;
auto offset = input_offset_calculator.get(linear_idx);
detail::static_unroll<detail::unroll_load_helper, arity>::with_args(*this, args, offset, loader, i, num_outputs);
thread_idx += num_threads();
}
}
template<typename scalar_t>
__device__ inline void store(scalar_t *from, int idx) {
int thread_idx = threadIdx.x;
scalar_t *to = reinterpret_cast<scalar_t *>(data[0]) + block_work_size() * idx;
#pragma unroll
for (int i = 0; i < thread_work_size(); i++) {
if (thread_idx >= remaining) {
return;
}
int linear_idx = thread_idx + block_work_size() * idx;
int offset = output_offset_calculator.get(linear_idx)[0];
storer.store(from[i], data[0], offset);
thread_idx += num_threads();
}
}
};
// Assumption:
// all tensors are contiguous, that is: stride == sizeof(type) for all tensors
// Note:
// Functions in vectorized policy does not do boundary check. It assumes the whole block
// has its job to do. So the reminders should be handled by the the caller manually.
template <int vec_size, typename data_t> // vec_size: number of scalars, can be 1, 2, or 4.
struct vectorized {
static_assert(thread_work_size() % vec_size == 0, "The workload per thread must be a multiple of vec_size");
static constexpr int loop_size = thread_work_size() / vec_size;
data_t data;
__device__ vectorized(data_t data) : data(data) {}
__device__ inline constexpr bool check_inbounds(int thread_work_elem) {
return true;
}
template<typename accessor_t, typename scalar_t>
__device__ inline void load_single_arg(accessor_t to, scalar_t *from) {
using vec_t = aligned_vector<scalar_t, vec_size>;
vec_t *from_ = reinterpret_cast<vec_t *>(from);
int thread_idx = threadIdx.x;
#pragma unroll
for (int i = 0; i < loop_size; i++) {
int index = thread_idx + i * num_threads();
vec_t v = from_[index];
#pragma unroll
for (int j = 0; j < vec_size; j++) {
to(vec_size * i + j) = v.val[j];
}
}
}
template<typename args_t>
__device__ inline void load(args_t *args, int idx) {
constexpr int arity = std::tuple_size<args_t>::value;
detail::static_unroll<detail::vectorized_load_helper, arity>::with_args(*this, args, idx);
}
template<typename scalar_t>
__device__ inline void store(scalar_t *from, int idx) {
using vec_t = aligned_vector<scalar_t, vec_size>;
scalar_t *to = reinterpret_cast<scalar_t *>(data[0]) + block_work_size() * idx;
vec_t *to_ = reinterpret_cast<vec_t *>(to);
int thread_idx = threadIdx.x;
#pragma unroll
for (int i = 0; i < loop_size; i++) {
int index = thread_idx + i * num_threads();
vec_t v;
for (int j = 0; j < vec_size; j++) {
v.val[j] = from[vec_size * i + j];
}
to_[index] = v;
}
}
};
template <typename data_t, typename inp_calc_t, typename out_calc_t, int num_outputs>
struct multi_outputs_unroll {
//multi_outputs_unroll struct members and check_inbounds and load methods are copypasted from unroll struct
//we don't use inheritance because of compiler bug in cuda 10.2+
data_t data;
int remaining;
inp_calc_t input_offset_calculator;
out_calc_t output_offset_calculator;
LoadWithoutCast loader;
StoreWithoutCast storer;
__device__ multi_outputs_unroll(data_t data, int remaining, inp_calc_t ic, out_calc_t oc):
data(data), remaining(remaining), input_offset_calculator(ic), output_offset_calculator(oc) {}
__device__ inline bool check_inbounds(int thread_work_elem) {
return ((threadIdx.x + thread_work_elem*num_threads()) < remaining);
}
template<typename args_t>
__device__ inline void load(args_t *args, int idx) {
constexpr int arity = std::tuple_size<args_t>::value;
int thread_idx = threadIdx.x;
#pragma unroll
for (int i = 0; i < thread_work_size(); i++) {
if (thread_idx >= remaining) {
return;
}
int linear_idx = thread_idx + block_work_size() * idx;
auto offset = input_offset_calculator.get(linear_idx);
detail::static_unroll<detail::unroll_load_helper, arity>::with_args(*this, args, offset, loader, i, num_outputs);
thread_idx += num_threads();
}
}
template <typename return_t>
__device__ inline void store(return_t *from, int idx) {
int thread_idx = threadIdx.x;
#pragma unroll
for (int i = 0; i < thread_work_size(); i++) {
if (thread_idx >= this->remaining) {
return;
}
int linear_idx = thread_idx + block_work_size() * idx;
auto offsets = this->output_offset_calculator.get(linear_idx);
memory::detail::static_unroll<detail::multi_outputs_store_helper, num_outputs>::with_args(this->data, offsets, from[i]);
thread_idx += num_threads();
}
}
};
} // namespace policies
// This is only used in host, but we will wrap this into some templates
// which is C10_HOST_DEVICE, so we have to make this C10_HOST_DEVICE
// in order to compile
template<typename scalar_t>
inline C10_HOST_DEVICE int can_vectorize_up_to(char *pointer) {
uint64_t address = reinterpret_cast<uint64_t>(pointer);
constexpr int vec2_alignment = std::alignment_of<aligned_vector<scalar_t, 2>>::value;
constexpr int vec4_alignment = std::alignment_of<aligned_vector<scalar_t, 4>>::value;
if (address % vec4_alignment == 0) {
return 4;
} else if (address % vec2_alignment == 0) {
return 2;
}
return 1;
}
template<int i>
struct can_vectorize_up_to_helper {
template <typename array_t, typename traits>
static C10_HOST_DEVICE void apply(int &result, array_t pointers, traits _) {
using arg_t = typename traits::template arg<i>::type;
// `pointers` hold the data_ptr for tensors [output, input0, input1, ...], so we
// need a +1 offset to get the input
result = std::min<int>(result, can_vectorize_up_to<arg_t>(pointers[i + 1]));
}
};
template<typename func_t, typename array_t>
inline int can_vectorize_up_to(array_t pointers) {
using traits = function_traits<func_t>;
using return_t = typename traits::result_type;
constexpr int arity = traits::arity;
int result = can_vectorize_up_to<return_t>(pointers[0]);
// We need to get the type for each argument of `func_t`, this can only
// be done at compile time.
detail::static_unroll<can_vectorize_up_to_helper, arity>::with_args(result, pointers, traits());
return result;
}
// jitted version of the above
// See Note [Jiterator], this relies on the assumptions enumerated there
template<typename result_type, typename common_type, int arity, typename array_t>
inline int jitted_can_vectorize_up_to(array_t pointers) {
// Deals with output
int result = can_vectorize_up_to<result_type>(pointers[0]);
// Incorporates input(s)
for (auto i = decltype(arity){1}; i < (arity + 1); ++i) {
result = std::min<int>(result, can_vectorize_up_to<common_type>(pointers[i]));
}
return result;
}
}}} // namespace at::native::memory | the_stack |
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "bvh/BVHNode.h"
#include "bvh/MBVHNode.h"
#include "CUDAIntersect.h"
#include "getShadingData.h"
#include "bsdf/bsdf.h"
#include "lights.h"
#define USE_WARP_PACKETS 1
#define USE_TOP_MBVH 1
#define USE_MBVH 1
#define IS_SPECULAR 1
#define MAX_IS_LIGHTS 16
#define VARIANCE_REDUCTION 1
#define T_EPSILON 1e-6f
#define NEXTMULTIPLEOF(a, b) (((a) + ((b)-1)) & (0x7fffffff - ((b)-1)))
using namespace glm;
#ifndef __launch_bounds__ // Fix errors in IDE
void __sincosf(float, float *, float *) {}
#define __launch_bounds__(x, y)
int __float_as_int(float x) { return int(x); }
uint __float_as_uint(float x) { return uint(x); }
float __uint_as_float(uint x) { return float(x); }
float __int_as_float(int x) { return float(x); }
template <typename T, typename B> T atomicAdd(T *, B) { return T; }
template <typename T, int x> struct surface
{
};
template <typename T>
void surf2Dwrite(T value, surface<void, cudaSurfaceType2D> output, size_t stride, size_t y,
cudaSurfaceBoundaryMode mode)
{
}
#endif
surface<void, cudaSurfaceType2D> output;
__constant__ __device__ float geometryEpsilon;
__constant__ __device__ CameraView *view;
__constant__ __device__ Counters *counters;
__constant__ __device__ glm::vec4 *accumulator;
__constant__ __device__ uint stride;
__constant__ __device__ glm::vec4 *pathStates;
__constant__ __device__ glm::vec4 *pathOrigins;
__constant__ __device__ glm::vec4 *pathDirections;
__constant__ __device__ glm::vec4 *pathThroughputs;
__constant__ __device__ glm::vec3 *skybox;
__constant__ __device__ uint skyboxWidth;
__constant__ __device__ uint skyboxHeight;
__constant__ __device__ uint scrWidth;
__constant__ __device__ uint scrHeight;
__constant__ __device__ uint *blueNoise;
__constant__ __device__ float clampValue;
__constant__ __device__ PotentialContribution *connectData;
#ifndef MAT_CONSTANTS_H
#define MAT_CONSTANTS_H
__constant__ __device__ DeviceMaterial *materials;
__constant__ __device__ glm::vec4 *floatTextures;
__constant__ __device__ uint *uintTextures;
#endif
#ifndef LIGHTS_H
#define LIGHTS_H
__constant__ __device__ rfw::DeviceAreaLight *areaLights;
__constant__ __device__ rfw::DevicePointLight *pointLights;
__constant__ __device__ rfw::DeviceSpotLight *spotLights;
__constant__ __device__ rfw::DeviceDirectionalLight *directionalLights;
__constant__ __device__ rfw::LightCount lightCounts;
#endif
__constant__ __device__ rfw::bvh::BVHNode *topLevelBVH;
__constant__ __device__ rfw::bvh::MBVHNode *topLevelMBVH;
__constant__ __device__ uint *topPrimIndices;
__constant__ __device__ InstanceBVHDescriptor *instances;
__host__ void setTopLevelBVH(rfw::bvh::BVHNode *ptr) { cudaMemcpyToSymbol(topLevelBVH, &ptr, sizeof(void *)); }
__host__ void setTopLevelMBVH(rfw::bvh::MBVHNode *ptr) { cudaMemcpyToSymbol(topLevelMBVH, &ptr, sizeof(void *)); }
__host__ void setTopPrimIndices(uint *ptr) { cudaMemcpyToSymbol(topPrimIndices, &ptr, sizeof(void *)); }
__host__ void setInstances(InstanceBVHDescriptor *ptr) { cudaMemcpyToSymbol(instances, &ptr, sizeof(void *)); }
__host__ void setCameraView(rfw::CameraView *ptr) { cudaMemcpyToSymbol(view, &ptr, sizeof(void *)); }
__host__ void setCounters(Counters *ptr) { cudaMemcpyToSymbol(counters, &ptr, sizeof(void *)); }
__host__ void setAccumulator(glm::vec4 *ptr) { cudaMemcpyToSymbol(accumulator, &ptr, sizeof(void *)); }
__host__ void setStride(uint s) { cudaMemcpyToSymbol(stride, &s, sizeof(void *)); }
__host__ void setPathStates(glm::vec4 *ptr) { cudaMemcpyToSymbol(pathStates, &ptr, sizeof(void *)); }
__host__ void setPathOrigins(glm::vec4 *ptr) { cudaMemcpyToSymbol(pathOrigins, &ptr, sizeof(void *)); }
__host__ void setPathDirections(glm::vec4 *ptr) { cudaMemcpyToSymbol(pathDirections, &ptr, sizeof(void *)); }
__host__ void setPathThroughputs(glm::vec4 *ptr) { cudaMemcpyToSymbol(pathThroughputs, &ptr, sizeof(void *)); }
__host__ void setPotentialContributions(PotentialContribution *ptr)
{
cudaMemcpyToSymbol(connectData, &ptr, sizeof(void *));
}
__host__ void setMaterials(DeviceMaterial *ptr) { cudaMemcpyToSymbol(materials, &ptr, sizeof(void *)); }
__host__ void setFloatTextures(glm::vec4 *ptr) { cudaMemcpyToSymbol(floatTextures, &ptr, sizeof(void *)); }
__host__ void setUintTextures(uint *ptr) { cudaMemcpyToSymbol(uintTextures, &ptr, sizeof(void *)); }
__host__ void setSkybox(glm::vec3 *ptr) { cudaMemcpyToSymbol(skybox, &ptr, sizeof(void *)); }
__host__ void setSkyDimensions(uint width, uint height)
{
cudaMemcpyToSymbol(skyboxWidth, &width, sizeof(uint));
cudaMemcpyToSymbol(skyboxHeight, &height, sizeof(uint));
}
__host__ void setInstanceDescriptors(DeviceInstanceDescriptor *ptr)
{
cudaMemcpyToSymbol(instances, &ptr, sizeof(void *));
}
__host__ void setGeometryEpsilon(float value) { cudaMemcpyToSymbol(geometryEpsilon, &value, sizeof(float)); }
__host__ void setBlueNoiseBuffer(uint *ptr) { cudaMemcpyToSymbol(blueNoise, &ptr, sizeof(void *)); }
__host__ void setScreenDimensions(uint width, uint height)
{
cudaMemcpyToSymbol(scrWidth, &width, sizeof(uint));
cudaMemcpyToSymbol(scrHeight, &height, sizeof(uint));
}
__host__ void setLightCount(rfw::LightCount lightCount)
{
cudaMemcpyToSymbol(lightCounts, &lightCount, sizeof(rfw::LightCount));
}
__host__ void setAreaLights(rfw::DeviceAreaLight *als) { cudaMemcpyToSymbol(areaLights, &als, sizeof(void *)); }
__host__ void setPointLights(rfw::DevicePointLight *pls) { cudaMemcpyToSymbol(pointLights, &pls, sizeof(void *)); }
__host__ void setSpotLights(rfw::DeviceSpotLight *sls) { cudaMemcpyToSymbol(spotLights, &sls, sizeof(void *)); }
__host__ void setDirectionalLights(rfw::DeviceDirectionalLight *dls)
{
cudaMemcpyToSymbol(directionalLights, &dls, sizeof(void *));
}
__host__ void setClampValue(float value) { cudaMemcpyToSymbol(clampValue, &value, sizeof(float)); }
__host__ const surfaceReference *getOutputSurfaceReference()
{
const surfaceReference *ref;
cudaGetSurfaceReference(&ref, &output);
return ref;
}
__global__ void initCountersExtent(uint pathCount, uint sampleIndex)
{
if (threadIdx.x != 0)
return; // Only run a single thread
counters->activePaths = pathCount;
counters->shaded = 0; // Thread atomic for shade kernel
counters->extensionRays = 0; // Compaction counter for extension rays
counters->shadowRays = 0; // Compaction counter for connections
counters->totalExtensionRays = pathCount;
counters->totalShadowRays = 0;
counters->sampleIndex = sampleIndex;
}
__global__ void initCountersSubsequent()
{
if (threadIdx.x != 0)
return;
counters->totalExtensionRays += counters->extensionRays;
counters->activePaths = counters->extensionRays; // Remaining active paths
counters->shaded = 0; // Thread atomic for shade kernel
counters->extensionRays = 0; // Compaction counter for extension rays
counters->shadowRays = 0;
}
__host__ void InitCountersForExtend(unsigned int pathCount, uint sampleIndex)
{
initCountersExtent<<<1, 32>>>(pathCount, sampleIndex);
}
__host__ void InitCountersSubsequent() { initCountersSubsequent<<<1, 32>>>(); }
__global__ void blit_buffer(const uint scrwidth, const uint scrheight, const float scale)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= scrwidth || y >= scrheight)
return;
const auto index = x + y * scrwidth;
const glm::vec4 value = accumulator[index] * scale;
surf2Dwrite<glm::vec4>(value, output, x * sizeof(float4), y, cudaBoundaryModeClamp);
}
__host__ cudaError blitBuffer(const unsigned int scrwidth, const unsigned int scrheight, const uint sampleID)
{
const unsigned int alignedWidth = NEXTMULTIPLEOF(scrwidth, 16) / 16;
const unsigned int alignedHeight = NEXTMULTIPLEOF(scrheight, 16) / 16;
const dim3 gridDim = dim3(alignedWidth, alignedHeight, 1);
const dim3 blockDim = dim3(16, 16, 1);
blit_buffer<<<gridDim, blockDim>>>(scrwidth, scrheight, 1.0f / float(sampleID));
return cudaGetLastError();
}
__device__ inline float blueNoiseSampler(int x, int y, int sampleIdx, int sampleDimension)
{
// wrap arguments
x &= 127;
y &= 127;
sampleIdx &= 255;
sampleDimension &= 255;
// xor index based on optimized ranking
const int rankedSampleIndex = sampleIdx ^ blueNoise[sampleDimension + (x + y * 128) * 8 + 65536 * 3];
// fetch value in sequence
int value = blueNoise[sampleDimension + rankedSampleIndex * 256];
// if the dimension is optimized, xor sequence value based on optimized scrambling
value ^= blueNoise[(sampleDimension & 7) + (x + y * 128) * 8 + 65536];
// convert to float and return
return (0.5f + value) * (1.0f / 256.0f);
}
inline __device__ bool intersect_scene(const vec3 origin, const vec3 direction, int *instID, int *primID, float *t,
vec2 *barycentrics, float t_min = 1e-5f)
{
#if !USE_TOP_MBVH
return intersect_bvh(origin, direction, t_min, t, instID, topLevelBVH, topPrimIndices, [&](uint instance) {
const InstanceBVHDescriptor &desc = instances[instance];
const vec3 new_origin = desc.inverse_transform * vec4(origin, 1);
const vec3 new_direction = desc.inverse_transform * vec4(direction, 0);
const uvec3 *indices = meshIndices[instance];
const vec4 *vertices = meshVertices[instance];
const uint *primIndices = meshPrimIndices[instance];
if (indices != nullptr) // Mesh with indices
{
#if !USE_MBVH
return intersect_bvh(
new_origin, new_direction, t_min, t, primID, meshBVHs[instance], primIndices, [&](uint triangleID) {
#else
return intersect_mbvh(new_origin, new_direction, t_min, t, primID, meshMBVHs[instance], primIndices, [&](uint triangleID) {
#endif
const uvec3 idx = indices[triangleID];
return intersect_triangle(new_origin, new_direction, t_min, t, vertices[idx.x], vertices[idx.y],
vertices[idx.z], barycentrics, T_EPSILON);
});
}
// Intersect mesh without indices
#if !USE_MBVH
return intersect_bvh(new_origin, new_direction, t_min, t, primID, meshBVHs[instance], primIndices,
[&](uint triangleID) {
#else
return intersect_mbvh(new_origin, new_direction, t_min, t, primID, meshMBVHs[instance], primIndices, [&](uint triangleID) {
#endif
const uvec3 idx = uvec3(triangleID * 3) + uvec3(0, 1, 2);
return intersect_triangle(new_origin, new_direction, t_min, t, vertices[idx.x],
vertices[idx.y], vertices[idx.z], barycentrics, T_EPSILON);
});
});
#else
return intersect_mbvh(origin, direction, t_min, t, instID, topLevelMBVH, topPrimIndices, [&](uint instance) {
const InstanceBVHDescriptor &desc = instances[instance];
const mat4 inverse_transform = desc.inverse_transform;
const vec3 new_origin = vec3(inverse_transform * vec4(origin, 1));
const vec3 new_direction = vec3(inverse_transform * vec4(direction, 0));
const uvec3 *indices = desc.indices;
const vec4 *vertices = desc.vertices;
const uint *prim_indices = desc.bvh_indices;
if (indices != nullptr) // Mesh with indices
{
#if !USE_MBVH
return intersect_bvh(
new_origin, new_direction, t_min, t, primID, desc.bvh, prim_indices, [&](uint triangleID) {
#else
return intersect_mbvh(new_origin, new_direction, t_min, t, primID, desc.mbvh, prim_indices, [&](uint triangleID) {
#endif
const uvec3 idx = indices[triangleID];
return intersect_triangle(new_origin, new_direction, t_min, t, vertices[idx.x], vertices[idx.y],
vertices[idx.z], barycentrics, T_EPSILON);
});
}
// Intersect mesh without indices
#if !USE_MBVH
return intersect_bvh(new_origin, new_direction, t_min, t, primID, desc.bvh, prim_indices, [&](uint triangleID) {
#else
return intersect_mbvh(new_origin, new_direction, t_min, t, primID, desc.mbvh, prim_indices, [&](uint triangleID) {
#endif
const uvec3 idx = uvec3(triangleID * 3) + uvec3(0, 1, 2);
return intersect_triangle(new_origin, new_direction, t_min, t, vertices[idx.x], vertices[idx.y],
vertices[idx.z], barycentrics, T_EPSILON);
});
});
#endif
}
__device__ bool is_occluded(const vec3 origin, const vec3 direction, float t_min, float t_max)
{
#if !USE_TOP_MBVH
return intersect_bvh_shadow(origin, direction, t_min, t_max, topLevelBVH, topPrimIndices, [&](uint instance) {
const vec3 new_origin = inverse_transforms[instance] * vec4(origin, 1);
const vec3 new_direction = inverse_transforms[instance] * vec4(direction, 0);
const uvec3 *indices = meshIndices[instance];
const vec4 *vertices = meshVertices[instance];
const uint *primIndices = meshPrimIndices[instance];
if (indices != nullptr) // Mesh with indices
{
#if !USE_MBVH
return intersect_bvh_shadow(
new_origin, new_direction, t_min, t_max, meshBVHs[instance], primIndices, [&](uint triangleID) {
#else
return intersect_mbvh_shadow(new_origin, new_direction, t_min, t_max, meshMBVHs[instance], primIndices, [&](uint triangleID) {
#endif
const uvec3 idx = indices[triangleID];
return intersect_triangle(new_origin, new_direction, t_min, &t_max, vertices[idx.x],
vertices[idx.y], vertices[idx.z], T_EPSILON);
});
}
// Intersect mesh without indices
#if !USE_MBVH
return intersect_bvh_shadow(
new_origin, new_direction, t_min, t_max, meshBVHs[instance], primIndices, [&](uint triangleID) {
#else
return intersect_mbvh_shadow(new_origin, new_direction, t_min, t_max, meshMBVHs[instance], primIndices, [&](uint triangleID) {
#endif
const uvec3 idx = uvec3(triangleID * 3) + uvec3(0, 1, 2);
return intersect_triangle(new_origin, new_direction, t_min, &t_max, vertices[idx.x], vertices[idx.y],
vertices[idx.z], T_EPSILON);
});
});
#else
return intersect_mbvh_shadow(origin, direction, t_min, t_max, topLevelMBVH, topPrimIndices, [&](uint instance) {
const InstanceBVHDescriptor &desc = instances[instance];
const vec3 new_origin = desc.inverse_transform * vec4(origin, 1);
const vec3 new_direction = desc.inverse_transform * vec4(direction, 0);
const uvec3 *indices = desc.indices;
const vec4 *vertices = desc.vertices;
const uint *primIndices = desc.bvh_indices;
if (indices != nullptr) // Mesh with indices
{
#if !USE_MBVH
return intersect_bvh_shadow(
new_origin, new_direction, t_min, t_max, desc.bvh, primIndices, [&](uint triangleID) {
#else
return intersect_mbvh_shadow(new_origin, new_direction, t_min, t_max, desc.mbvh, primIndices, [&](uint triangleID) {
#endif
const uvec3 idx = indices[triangleID];
return intersect_triangle(new_origin, new_direction, t_min, &t_max, vertices[idx.x],
vertices[idx.y], vertices[idx.z], T_EPSILON);
});
}
// Intersect mesh without indices
#if !USE_MBVH
return intersect_bvh_shadow(
new_origin, new_direction, t_min, t_max, desc.bvh, primIndices, [&](uint triangleID) {
#else
return intersect_mbvh_shadow(new_origin, new_direction, t_min, t_max, desc.mbvh, primIndices, [&](uint triangleID) {
#endif
const uvec3 idx = uvec3(triangleID * 3) + uvec3(0, 1, 2);
return intersect_triangle(new_origin, new_direction, t_min, &t_max, vertices[idx.x], vertices[idx.y],
vertices[idx.z], T_EPSILON);
});
});
#endif
}
inline __device__ void generatePrimaryRay(const uint pathID, glm::vec3 *O, glm::vec3 *D)
{
uint seed = WangHash(pathID * 16789 + counters->sampleIndex * 1791);
const int sx = pathID % scrWidth;
const int sy = pathID / scrWidth;
#if 1
const float r0 = blueNoiseSampler(sx, sy, int(counters->sampleIndex), 0);
const float r1 = blueNoiseSampler(sx, sy, int(counters->sampleIndex), 1);
float r2 = blueNoiseSampler(sx, sy, int(counters->sampleIndex), 2);
float r3 = blueNoiseSampler(sx, sy, int(counters->sampleIndex), 3);
#else
const float r0 = RandomFloat(seed);
const float r1 = RandomFloat(seed);
float r2 = RandomFloat(seed);
float r3 = RandomFloat(seed);
#endif
const float blade = static_cast<int>(r0 * 9);
r2 = (r2 - blade * (1.0f / 9.0f)) * 9.0f;
float x1, y1, x2, y2;
constexpr float piOver4point5 = 3.14159265359f / 4.5f;
__sincosf(blade * piOver4point5, &x1, &y1);
__sincosf((blade + 1.0f) * piOver4point5, &x2, &y2);
if ((r2 + r3) > 1.0f)
{
r2 = 1.0f - r2;
r3 = 1.0f - r3;
}
const float xr = x1 * r2 + x2 * r3;
const float yr = y1 * r2 + y2 * r3;
// TODO: Calculate this on cpu
const vec3 right = view->p2 - view->p1;
const vec3 up = view->p3 - view->p1;
*O = view->pos + view->aperture * (right * xr + up * yr);
const float u = (static_cast<float>(sx) + r0) * (1.0f / scrWidth);
const float v = (static_cast<float>(sy) + r1) * (1.0f / scrHeight);
const vec3 pointOnPixel = view->p1 + u * right + v * up;
*D = normalize(pointOnPixel - *O);
}
__global__ void intersect_rays(IntersectionStage stage, const uint pathLength, const uint width, const uint height,
const uint count)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const uint pathID = x + y * width;
if (stage == Primary)
{
const uint bufferIndex = pathLength % 2;
const uint bufferID = pathID + bufferIndex * stride;
float t = 1e34f;
int instID;
int primID;
vec2 bary;
vec3 O, D;
generatePrimaryRay(pathID, &O, &D);
pathOrigins[pathID] = vec4(O, __uint_as_float((pathID << 8) + 1 /* 1 == specular */));
pathDirections[pathID] = vec4(D, 0);
vec4 result = vec4(0, 0, __int_as_float(-1), 0);
if (intersect_scene(O, D, &instID, &primID, &t, &bary, 1e-5f))
result = vec4(__uint_as_float(uint(65535.0f * bary.x) | (uint(65535.0f * bary.y) << 16)),
__int_as_float(uint(instID)), __int_as_float(primID), t);
pathStates[bufferID] = result;
}
else if (stage == Secondary)
{
const uint bufferIndex = pathLength % 2;
const uint bufferID = pathID + bufferIndex * stride;
float t = 1e34f;
int instID;
int primID;
vec2 bary;
const vec4 O4 = pathOrigins[bufferID];
const vec4 D4 = pathDirections[bufferID];
const vec3 O = O4;
const vec3 D = D4;
vec4 result = vec4(0, 0, __int_as_float(-1), 0);
if (intersect_scene(O, D, &instID, &primID, &t, &bary, 1e-5f))
result = vec4(__uint_as_float(uint(65535.0f * bary.x) + (uint(65535.0f * bary.y) << 16)),
__int_as_float(uint(instID)), __int_as_float(primID), t);
pathStates[bufferID] = result;
}
else if (stage == Shadow)
{
const vec4 O4 = connectData[pathID].Origin;
const vec4 D4 = connectData[pathID].Direction;
const vec3 O = vec3(O4);
const vec3 D = vec3(D4);
if (is_occluded(O, D, geometryEpsilon, D4.w))
return;
const vec4 contribution = connectData[pathID].Emission;
const uint pixelID = __float_as_uint(contribution.w);
accumulator[pixelID] += vec4(vec3(contribution), 1.0f);
}
}
__global__ void intersect_rays(IntersectionStage stage, const uint pathLength, const uint count)
{
const uint pathID = threadIdx.x + blockIdx.x * blockDim.x;
if (pathID >= count)
return;
if (stage == Primary)
{
const uint bufferIndex = pathLength % 2;
const uint bufferID = pathID + bufferIndex * stride;
float t = 1e34f;
int instID;
int primID;
vec2 bary;
vec3 O, D;
generatePrimaryRay(pathID, &O, &D);
pathOrigins[pathID] = vec4(O, __uint_as_float((pathID << 8) + 1 /* 1 == specular */));
pathDirections[pathID] = vec4(D, 0);
vec4 result = vec4(0, 0, __int_as_float(-1), 0);
if (intersect_scene(O, D, &instID, &primID, &t, &bary, 1e-5f))
result = vec4(__uint_as_float(uint(65535.0f * bary.x) | (uint(65535.0f * bary.y) << 16)),
__int_as_float(uint(instID)), __int_as_float(primID), t);
pathStates[bufferID] = result;
}
else if (stage == Secondary)
{
const uint bufferIndex = pathLength % 2;
const uint bufferID = pathID + bufferIndex * stride;
float t = 1e34f;
int instID;
int primID;
vec2 bary;
const vec4 O4 = pathOrigins[bufferID];
const vec4 D4 = pathDirections[bufferID];
const vec3 O = O4;
const vec3 D = D4;
vec4 result = vec4(0, 0, __int_as_float(-1), 0);
if (intersect_scene(O, D, &instID, &primID, &t, &bary, 1e-5f))
result = vec4(__uint_as_float(uint(65535.0f * bary.x) + (uint(65535.0f * bary.y) << 16)),
__int_as_float(uint(instID)), __int_as_float(primID), t);
pathStates[bufferID] = result;
}
else if (stage == Shadow)
{
const vec4 O4 = connectData[pathID].Origin;
const vec4 D4 = connectData[pathID].Direction;
const vec3 O = vec3(O4);
const vec3 D = vec3(D4);
if (is_occluded(O, D, geometryEpsilon, D4.w))
return;
const vec4 contribution = connectData[pathID].Emission;
const uint pixelID = __float_as_uint(contribution.w);
accumulator[pixelID] += vec4(vec3(contribution), 1.0f);
}
}
__global__ __launch_bounds__(128 /* Max block size */, 4 /* Min blocks per sm */) void shade_rays(const uint pathLength,
uint count)
{
const int jobIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (jobIndex >= counters->activePaths)
return;
const uint bufferIndex = pathLength % 2;
const uint nextBufferIndex = 1 - bufferIndex;
const vec4 hitData = pathStates[jobIndex + bufferIndex * stride];
const vec4 O4 = pathOrigins[jobIndex + bufferIndex * stride];
const vec4 D4 = pathDirections[jobIndex + bufferIndex * stride];
vec4 T4 = pathLength == 0 ? vec4(1.0f) : pathThroughputs[jobIndex + bufferIndex * stride];
uint flags = __float_as_uint(O4.w) & 0xFF;
vec3 throughput = vec3(T4);
const float bsdfPdf = T4.w;
const vec3 D = glm::vec3(D4);
const uint pathIndex = (__float_as_uint(O4.w) >> 8u);
const int primIdx = __float_as_int(hitData.z);
if (primIdx < 0)
{
// formulas by Paul Debevec, http://www.pauldebevec.com/Probes
const uint u = static_cast<uint>(static_cast<float>(skyboxWidth) * 0.5f *
(1.0f + atan2(D.x, -D.z) * glm::one_over_pi<float>()));
const uint v = static_cast<uint>(static_cast<float>(skyboxHeight) * acos(D.y) * glm::one_over_pi<float>());
const uint idx = u + v * skyboxWidth;
const vec3 skySample = idx < skyboxHeight * skyboxWidth ? skybox[idx] : vec3(0);
vec3 contribution = throughput * (1.0f / bsdfPdf) * vec3(skySample);
if (any(isnan(contribution)))
return;
clampIntensity(contribution, clampValue);
accumulator[pathIndex] += vec4(contribution, 0.0f);
return;
}
const vec3 O = glm::vec3(O4);
const vec3 I = O + D * hitData.w;
const uint uintBaryCentrics = __float_as_uint(hitData.x);
const int instanceIdx = __float_as_uint(hitData.y);
const InstanceBVHDescriptor &instance = instances[instanceIdx];
const DeviceTriangle &triangle = instance.triangles[primIdx];
const vec2 barycentrics =
vec2(float(uintBaryCentrics & 65535), float((uintBaryCentrics >> 16) & 65535)) * (1.0f / 65535.0f);
glm::vec3 N, iN, T, B;
const ShadingData shadingData = getShadingData(D, barycentrics.x, barycentrics.y, view->spreadAngle * hitData.w,
triangle, instanceIdx, N, iN, T, B, instance.normal_transform);
if (pathLength == 0 && pathIndex == counters->probeIdx)
{
counters->probedInstanceId = instanceIdx;
counters->probedPrimId = primIdx;
counters->probedDistance = hitData.w;
}
// Detect alpha in the shading code.
if (shadingData.flags & 1)
{
if (pathLength < MAX_PATH_LENGTH)
{
if (any(isnan(throughput)))
return;
const uint extensionRayIdx = atomicAdd(&counters->extensionRays, 1);
pathOrigins[extensionRayIdx + nextBufferIndex * stride] = vec4(I + D * geometryEpsilon, O4.w);
pathDirections[extensionRayIdx + nextBufferIndex * stride] = D4;
pathStates[extensionRayIdx + nextBufferIndex * stride] = T4;
// TODO: this never gets hit, fix this
}
return;
}
// Terminate path on light
if (shadingData.isEmissive()) /* r, g or b exceeds 1 */
{
const float DdotNL = -dot(D, N);
vec3 contribution = vec3(0);
if (DdotNL > 0)
{
if (pathLength == 0)
{
// Only camera rays will be treated special
contribution = shadingData.color;
}
#if VARIANCE_REDUCTION
else if (flags & IS_SPECULAR)
{
contribution = throughput * shadingData.color * (1.0f / bsdfPdf);
}
else
{
// Last vertex was not specular: apply MIS
const vec3 lastN = UnpackNormal(floatBitsToUint(D4.w));
const float lightPdf = CalculateLightPDF(D, hitData.w, triangle.getArea(), N);
const int triangleIdx = int(triangle.getLightTriangleIndex());
const float pickProb = LightPickProb(triangleIdx, O, lastN, I);
if ((bsdfPdf + lightPdf * pickProb) <= 0)
return;
contribution = throughput * shadingData.color * (1.0f / (bsdfPdf + lightPdf * pickProb));
}
#else
else
{
contribution = throughput * shadingData.color * (1.0f / bsdfPdf);
}
#endif
}
if (any(isnan(contribution)))
contribution = vec3(0);
clampIntensity(contribution, clampValue);
accumulator[pathIndex] += vec4(contribution, 0.0f);
return;
}
if (shadingData.getRoughness() < MIN_ROUGHNESS)
flags |= IS_SPECULAR; // Object was specular
else
flags &= ~IS_SPECULAR; // Object was not specular
uint seed = WangHash(pathIndex * 16789 + counters->samplesTaken * 1791 + pathLength * 720898027);
const float flip = (dot(D, N) > 0) ? -1.0f : 1.0f;
N *= flip; // Fix geometric normal
iN *= flip; // Fix interpolated normal
throughput *= 1.0f / bsdfPdf; // Apply postponed bsdf pdf
#if VARIANCE_REDUCTION
if ((flags & IS_SPECULAR) == 0 &&
(lightCounts.areaLightCount + lightCounts.pointLightCount + lightCounts.directionalLightCount +
lightCounts.spotLightCount) > 0) // Only cast shadow rays for non-specular objects
{
vec3 lightColor;
float r0, r1, pickProb, lightPdf = 0;
#if BLUENOISE
if (counters->samplesTaken < 256)
{
const int x = int(pathIndex % scrWidth);
const int y = int(pathIndex / scrWidth);
r0 = blueNoiseSampler(blueNoise, x, y, int(counters->samplesTaken), 4);
r1 = blueNoiseSampler(blueNoise, x, y, int(counters->samplesTaken), 5);
}
else
{
r0 = RandomFloat(seed);
r1 = RandomFloat(seed);
}
#else
r0 = RandomFloat(seed);
r1 = RandomFloat(seed);
#endif
vec3 L = RandomPointOnLight(r0, r1, I, iN, pickProb, lightPdf, lightColor) - I;
const float dist = length(L);
L *= 1.0f / dist;
const float NdotL = dot(L, iN);
if (NdotL > 0 && lightPdf > 0)
{
float shadowPdf;
const vec3 sampledBSDF = EvaluateBSDF(shadingData, iN, T, B, D * -1.0f, L, shadowPdf, seed);
if (shadowPdf > 0)
{
// calculate potential contribution
vec3 contribution = throughput * sampledBSDF * lightColor * (NdotL / (shadowPdf + lightPdf * pickProb));
clampIntensity(contribution, clampValue);
if (!any(isnan(contribution)))
{
// Add fire-and-forget shadow ray to the connections buffer
const uint shadowRayIdx = atomicAdd(&counters->shadowRays, 1); // compaction
connectData[shadowRayIdx].Origin = vec4(SafeOrigin(I, L, N, geometryEpsilon), 0);
connectData[shadowRayIdx].Direction = vec4(L, dist - 2.0f * geometryEpsilon);
connectData[shadowRayIdx].Emission = vec4(contribution, uintBitsToFloat(pathIndex));
}
}
}
}
#endif
if (pathLength >= MAX_PATH_LENGTH) // Early out in case we reached maximum path length
return;
vec3 R, bsdf;
float newBsdfPdf = 0.0f;
// float r3, r4;
//#if BLUENOISE // TODO
// if (counters->samplesTaken < 256) // Blue noise
// {
// const int x = int(pathIndex % scrWidth) & 127;
// const int y = int(pathIndex / scrWidth) & 127;
// r3 = blueNoiseSampler(blueNoise, x, y, int(counters->samplesTaken), 4);
// r4 = blueNoiseSampler(blueNoise, x, y, int(counters->samplesTaken), 5);
// }
// else
// {
// r3 = RandomFloat(seed);
// r4 = RandomFloat(seed);
// }
//#else
// r3 = RandomFloat(seed);
// r4 = RandomFloat(seed);
//#endif
bsdf = SampleBSDF(shadingData, iN, N, T, B, D * -1.0f, hitData.w, flip < 0, R, newBsdfPdf, seed);
throughput = throughput * 1.0f / SurvivalProbability(throughput) * bsdf * abs(dot(iN, R));
if (newBsdfPdf < 1e-6f || isnan(newBsdfPdf) || any(lessThan(throughput, vec3(0.0f))))
return; // Early out in case we have an invalid bsdf
const uint extensionRayIdx = atomicAdd(&counters->extensionRays, 1u); // Get compacted index for extension ray
pathOrigins[extensionRayIdx + nextBufferIndex * stride] =
vec4(SafeOrigin(I, R, N, geometryEpsilon), uintBitsToFloat((pathIndex << 8u) | flags));
pathDirections[extensionRayIdx + nextBufferIndex * stride] = vec4(R, uintBitsToFloat(PackNormal(iN)));
pathThroughputs[extensionRayIdx + nextBufferIndex * stride] = vec4(throughput, newBsdfPdf);
}
__host__ cudaError intersectRays(IntersectionStage stage, const uint pathLength, const uint width, const uint height)
{
constexpr uint PACKET_WIDTH = 8;
constexpr uint PACKET_HEIGHT = 8;
const dim3 gridDim =
dim3(NEXTMULTIPLEOF(width, PACKET_WIDTH) / PACKET_WIDTH, NEXTMULTIPLEOF(height, PACKET_HEIGHT) / PACKET_HEIGHT);
const dim3 blockDim = dim3(PACKET_WIDTH, PACKET_HEIGHT);
intersect_rays<<<gridDim, blockDim>>>(stage, pathLength, width, height, width * height);
return cudaGetLastError();
}
__host__ cudaError intersectRays(IntersectionStage stage, const uint pathLength, const uint count)
{
const dim3 gridDim = dim3(NEXTMULTIPLEOF(count, 64) / 64);
const dim3 blockDim = dim3(64);
intersect_rays<<<gridDim, blockDim>>>(stage, pathLength, count);
return cudaGetLastError();
}
__host__ cudaError shadeRays(const uint pathLength, const uint count)
{
const dim3 gridDim = dim3(NEXTMULTIPLEOF(count, 128) / 128);
const dim3 blockDim = dim3(128);
shade_rays<<<gridDim, blockDim>>>(pathLength, count);
return cudaGetLastError();
} | the_stack |
#include "common.cuh"
#include "common/include/conversion_kernels.cuh"
#include "operation/operation_interface.h"
namespace SparseOperationKit {
template <typename KeyType, typename ValueType>
class CsrConversionDistributed : public Operation {
public:
explicit CsrConversionDistributed(ConstructionContext_t context)
: Operation(context),
resource_mgr_(context->get_resource_mgr()),
slot_num_(context->get_slot_num()),
max_nnz_(context->get_max_nnz()) {
const size_t local_gpu_count = resource_mgr_->get_local_gpu_count();
binary_flags_.reserve(local_gpu_count);
cub_d_temp_storage_.reserve(local_gpu_count);
cub_coo_indices_output_.reserve(local_gpu_count);
cub_values_output_.reserve(local_gpu_count);
cub_host_num_selected_.reserve(local_gpu_count);
cub_dev_num_selected_.reserve(local_gpu_count);
cusparse_csr_row_offsets_output_.reserve(local_gpu_count);
csr_row_offsets_cast_.reserve(local_gpu_count);
}
void allocate_forward_spaces() override {
const size_t global_batch_size = base_context()->get_global_batch_size();
for (size_t dev_id = 0; dev_id < resource_mgr_->get_local_gpu_count(); ++dev_id) {
auto &buffer = base_context()->get_buffer(dev_id);
auto &host_buffer = base_context()->get_host_buffer(dev_id);
{
Tensor2<bool> binary_flag;
buffer->reserve({1, global_batch_size * slot_num_ * max_nnz_}, &binary_flag);
binary_flags_.push_back(binary_flag);
}
{
Tensor2<int32_t> cub_coo_indices_output;
buffer->reserve({1, global_batch_size * slot_num_ * max_nnz_}, &cub_coo_indices_output);
cub_coo_indices_output_.push_back(cub_coo_indices_output);
}
{
Tensor2<KeyType> cub_values_output;
buffer->reserve({1, global_batch_size * slot_num_ * max_nnz_}, &cub_values_output);
cub_values_output_.push_back(cub_values_output);
}
{
Tensor2<size_t> cub_host_num_selected;
host_buffer->reserve({1, 1}, &cub_host_num_selected);
cub_host_num_selected_.push_back(cub_host_num_selected);
}
{
Tensor2<size_t> cub_dev_num_selected;
buffer->reserve({1, 1}, &cub_dev_num_selected);
cub_dev_num_selected_.push_back(cub_dev_num_selected);
}
{
Tensor2<int32_t> cusparse_csr_row_offset_output;
buffer->reserve({1, global_batch_size * slot_num_ + 1}, &cusparse_csr_row_offset_output);
cusparse_csr_row_offsets_output_.push_back(cusparse_csr_row_offset_output);
}
{
Tensor2<int64_t> csr_row_offset_cast;
buffer->reserve({1, global_batch_size * slot_num_ + 1}, &csr_row_offset_cast);
csr_row_offsets_cast_.push_back(csr_row_offset_cast);
}
{
size_t size_0 = 0;
CK_CUDA(cub::DeviceSelect::Flagged(
(void *)nullptr, size_0, (KeyType *)nullptr, (bool *)nullptr, (KeyType *)nullptr,
(size_t *)nullptr, static_cast<int32_t>(global_batch_size * slot_num_ * max_nnz_)));
size_t size_1 = 0;
CK_CUDA(cub::DeviceSelect::Flagged(
(void *)nullptr, size_1, (int64_t *)nullptr, (bool *)nullptr, (int32_t *)nullptr,
(size_t *)nullptr, static_cast<int32_t>(global_batch_size * slot_num_ * max_nnz_)));
size_t size = (size_0 > size_1) ? size_0 : size_1;
Tensor2<void> cub_d_temp_storage;
buffer->reserve({size}, &cub_d_temp_storage);
cub_d_temp_storage_.push_back(cub_d_temp_storage);
}
} // for dev_id
}
void allocate_backward_spaces() override {
// it does nothing
}
void forward(const Context_t &replica_context, const bool training) override {
const size_t global_replica_id = replica_context->get_global_replica_id();
const size_t local_replica_id = resource_mgr_->cal_local_id_from_global_id(global_replica_id);
const auto &local_gpu = resource_mgr_->get_local_gpu(local_replica_id);
const auto &stream = local_gpu->get_stream();
const auto &total_values = replica_context->input("total_values");
const auto &total_row_indices = replica_context->input("total_row_indices");
const auto &host_total_num_elements = replica_context->input("host_total_num_elements");
// reset internal buffers
reset(local_replica_id);
// generate binary vector
gen_binary_vector(global_replica_id,
/*values=*/total_values,
/*total_valid_num=*/host_total_num_elements,
/*binary_flag=*/binary_flags_[local_replica_id]);
// choose valuse based on binary vector
size_t total_valid_num = host_total_num_elements->GetPtrWithType<size_t>()[0];
size_t size = cub_d_temp_storage_[local_replica_id].get_size_in_bytes();
CK_CUDA(cub::DeviceSelect::Flagged(
/*d_temp_storage=*/cub_d_temp_storage_[local_replica_id].get_ptr(),
/*temp_storage_bytes=*/size,
/*d_in=*/total_values->GetPtrWithType<KeyType>(),
/*d_flags=*/binary_flags_[local_replica_id].get_ptr(),
/*d_out=*/cub_values_output_[local_replica_id].get_ptr(),
/*d_num_selected_out=*/cub_dev_num_selected_[local_replica_id].get_ptr(),
/*num_iterms=*/total_valid_num, stream));
// copy num_selected (nnz) to host
CK_CUDA(cudaMemcpyAsync(cub_host_num_selected_[local_replica_id].get_ptr(),
cub_dev_num_selected_[local_replica_id].get_ptr(),
cub_dev_num_selected_[local_replica_id].get_size_in_bytes(),
cudaMemcpyDeviceToHost, stream));
CK_CUDA(cudaStreamSynchronize(stream));
// choose row_indices based on binary vector
CK_CUDA(cub::DeviceSelect::Flagged(
/*d_temp_storage=*/cub_d_temp_storage_[local_replica_id].get_ptr(),
/*temp_storage_bytes=*/size,
/*d_in=*/total_row_indices->GetPtrWithType<int64_t>(),
/*d_flags=*/binary_flags_[local_replica_id].get_ptr(),
/*d_out=*/cub_coo_indices_output_[local_replica_id].get_ptr(),
/*d_num_selected_out=*/cub_dev_num_selected_[local_replica_id].get_ptr(),
/*num_iterms=*/total_valid_num, stream));
// convert COO row_indices to CSR row_offsets.
size_t rows_num = binary_flags_[local_replica_id].get_num_elements() / max_nnz_;
CK_CUSPARSE(cusparseXcoo2csr(
/*handle=*/local_gpu->get_cusparse(),
/*cooRowInd=*/cub_coo_indices_output_[local_replica_id].get_ptr(),
/*nnz=*/static_cast<int32_t>(cub_host_num_selected_[local_replica_id].get_ptr()[0]),
/*m=*/rows_num,
/*csrRowPtr=*/cusparse_csr_row_offsets_output_[local_replica_id].get_ptr(),
CUSPARSE_INDEX_BASE_ZERO));
// cast row_offset dtype
auto op = [] __device__(int value) { return static_cast<int64_t>(value); };
transform_array<<<local_gpu->get_sm_count() * 2, 1024, 0, stream>>>(
cusparse_csr_row_offsets_output_[local_replica_id].get_ptr(),
csr_row_offsets_cast_[local_replica_id].get_ptr(), rows_num + 1, op);
// set outputs
replica_context->set_output("replica_csr_values", cub_values_output_[local_replica_id]);
replica_context->set_output("replica_row_offset", csr_row_offsets_cast_[local_replica_id]);
auto& host_nnz = replica_context->output("replica_host_nnz");
host_nnz->GetPtrWithType<size_t>()[0] = static_cast<size_t>(
cub_host_num_selected_[local_replica_id].get_ptr()[0]);
}
void backward(const Context_t &replica_context) override {
// it does nothing
}
private:
std::shared_ptr<ResourcesManager> resource_mgr_;
const size_t slot_num_;
const size_t max_nnz_;
Tensors2<bool> binary_flags_;
Tensors2<void> cub_d_temp_storage_;
Tensors2<int32_t> cub_coo_indices_output_;
Tensors2<KeyType> cub_values_output_;
Tensors2<size_t> cub_host_num_selected_;
Tensors2<size_t> cub_dev_num_selected_;
Tensors2<int32_t> cusparse_csr_row_offsets_output_;
Tensors2<int64_t> csr_row_offsets_cast_; // always int64, cause coo-indices always int64
void reset(const size_t local_replica_id) {
const auto &stream = resource_mgr_->get_local_gpu(local_replica_id)->get_stream();
CK_CUDA(cudaMemsetAsync(binary_flags_[local_replica_id].get_ptr(), 0,
binary_flags_[local_replica_id].get_size_in_bytes(), stream));
CK_CUDA(cudaMemsetAsync(cub_coo_indices_output_[local_replica_id].get_ptr(), 0,
cub_coo_indices_output_[local_replica_id].get_size_in_bytes(), stream));
CK_CUDA(cudaMemsetAsync(cub_values_output_[local_replica_id].get_ptr(), 0,
cub_values_output_[local_replica_id].get_size_in_bytes(), stream));
CK_CUDA(cudaMemsetAsync(cusparse_csr_row_offsets_output_[local_replica_id].get_ptr(), 0,
cusparse_csr_row_offsets_output_[local_replica_id].get_size_in_bytes(),
stream));
CK_CUDA(cudaMemsetAsync(csr_row_offsets_cast_[local_replica_id].get_ptr(), 0,
csr_row_offsets_cast_[local_replica_id].get_size_in_bytes(), stream));
}
public:
void gen_binary_vector(const size_t global_replica_id, const std::shared_ptr<Tensor> values,
const std::shared_ptr<Tensor> total_valid_num,
Tensor2<bool> &binary_flag) {
const size_t local_replica_id = resource_mgr_->cal_local_id_from_global_id(global_replica_id);
const size_t global_gpu_count = resource_mgr_->get_global_gpu_count();
const auto &local_gpu = resource_mgr_->get_local_gpu(local_replica_id);
auto fn = [global_replica_id, global_gpu_count] __device__(KeyType value) -> bool {
return (global_replica_id == value % global_gpu_count) ? true : false;
};
boolean_vector<<<local_gpu->get_sm_count() * 2, 1024, 0, local_gpu->get_stream()>>>(
values->GetPtrWithType<KeyType>(), total_valid_num->GetPtrWithType<size_t>()[0], fn,
binary_flag.get_ptr());
}
};
REGISTER_OPERATION_BUILDER("csr_conversion_distributed",
DataType::Int64,
DataType::Float32,
CsrConversionDistributed<int64_t, float>);
REGISTER_OPERATION_BUILDER("csr_conversion_distributed",
DataType::Int64,
DataType::Float16,
CsrConversionDistributed<int64_t, __half>);
REGISTER_OPERATION_BUILDER("csr_conversion_distributed",
DataType::Uint32,
DataType::Float32,
CsrConversionDistributed<uint32_t, float>);
REGISTER_OPERATION_BUILDER("csr_conversion_distributed",
DataType::Uint32,
DataType::Float16,
CsrConversionDistributed<uint32_t, __half>);
} // namespace SparseOperationKit | the_stack |
#include <raft/cudart_utils.h>
#include <raft/cuda_utils.cuh>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <cub/cub.cuh>
#include <math.h>
namespace MLCommon {
namespace Metrics {
typedef enum {
IMPL_NONE,
SMEM_ATOMICS,
GLOBAL_ATOMICS,
SORT_AND_GATOMICS
} ContingencyMatrixImplType;
template <typename T, typename OutT = int>
__global__ void devConstructContingencyMatrix(const T* groundTruth,
const T* predicted,
int nSamples,
OutT* outMat,
int outIdxOffset,
int outMatWidth)
{
int elementId = threadIdx.x + blockDim.x * blockIdx.x;
if (elementId < nSamples) {
T gt = groundTruth[elementId];
T pd = predicted[elementId];
auto outputIdx = (gt - outIdxOffset) * outMatWidth + pd - outIdxOffset;
raft::myAtomicAdd(outMat + outputIdx, OutT(1));
}
}
template <typename T, typename OutT = int>
void computeCMatWAtomics(const T* groundTruth,
const T* predictedLabel,
int nSamples,
OutT* outMat,
int outIdxOffset,
int outDimN,
cudaStream_t stream)
{
CUDA_CHECK(cudaFuncSetCacheConfig(devConstructContingencyMatrix<T, OutT>, cudaFuncCachePreferL1));
static const int block = 128;
auto grid = raft::ceildiv(nSamples, block);
devConstructContingencyMatrix<T, OutT><<<grid, block, 0, stream>>>(
groundTruth, predictedLabel, nSamples, outMat, outIdxOffset, outDimN);
CUDA_CHECK(cudaGetLastError());
}
template <typename T, typename OutT = int>
__global__ void devConstructContingencyMatrixSmem(const T* groundTruth,
const T* predicted,
int nSamples,
OutT* outMat,
int outIdxOffset,
int outMatWidth)
{
extern __shared__ char smem[];
auto* sMemMatrix = reinterpret_cast<OutT*>(smem);
for (int smemIdx = threadIdx.x; smemIdx < outMatWidth * outMatWidth; smemIdx += blockDim.x) {
sMemMatrix[smemIdx] = 0;
}
__syncthreads();
int elementId = threadIdx.x + blockDim.x * blockIdx.x;
if (elementId < nSamples) {
T gt = groundTruth[elementId];
T pd = predicted[elementId];
auto outputIdx = (gt - outIdxOffset) * outMatWidth + pd - outIdxOffset;
raft::myAtomicAdd(sMemMatrix + outputIdx, OutT(1));
}
__syncthreads();
for (int smemIdx = threadIdx.x; smemIdx < outMatWidth * outMatWidth; smemIdx += blockDim.x) {
raft::myAtomicAdd(outMat + smemIdx, sMemMatrix[smemIdx]);
}
}
template <typename T, typename OutT = int>
void computeCMatWSmemAtomics(const T* groundTruth,
const T* predictedLabel,
int nSamples,
OutT* outMat,
int outIdxOffset,
int outDimN,
cudaStream_t stream)
{
static const int block = 128;
auto grid = raft::ceildiv(nSamples, block);
size_t smemSizePerBlock = outDimN * outDimN * sizeof(OutT);
devConstructContingencyMatrixSmem<T, OutT><<<grid, block, smemSizePerBlock, stream>>>(
groundTruth, predictedLabel, nSamples, outMat, outIdxOffset, outDimN);
CUDA_CHECK(cudaGetLastError());
}
template <typename T, typename OutT = int>
void contingencyMatrixWSort(const T* groundTruth,
const T* predictedLabel,
int nSamples,
OutT* outMat,
T minLabel,
T maxLabel,
void* workspace,
size_t workspaceSize,
cudaStream_t stream)
{
T* outKeys = reinterpret_cast<T*>(workspace);
auto alignedBufferSz = raft::alignTo<size_t>(nSamples * sizeof(T), 256);
T* outValue = reinterpret_cast<T*>((size_t)workspace + alignedBufferSz);
void* pWorkspaceCub = reinterpret_cast<void*>((size_t)workspace + 2 * alignedBufferSz);
auto bitsToSort = log2<int>(maxLabel);
if (!raft::isPo2(maxLabel)) ++bitsToSort;
// we dont really need perfect sorting, should get by with some sort of
// binning-reordering operation
///@todo: future work - explore "efficient" custom binning kernels vs cub sort
CUDA_CHECK(cub::DeviceRadixSort::SortPairs(pWorkspaceCub,
workspaceSize,
groundTruth,
outKeys,
predictedLabel,
outValue,
nSamples,
0,
bitsToSort,
stream));
auto outDimM_N = int(maxLabel - minLabel + 1);
computeCMatWAtomics<T, OutT>(outKeys, outValue, nSamples, outMat, minLabel, outDimM_N, stream);
}
template <typename OutT = int>
ContingencyMatrixImplType getImplVersion(OutT outDimN)
{
int currDevice = 0;
int l2CacheSize = 0;
// no way to query this from CUDA APIs, value for CC 7.0, 3.0
int maxBlocksResidentPerSM = 16;
CUDA_CHECK(cudaGetDevice(&currDevice));
CUDA_CHECK(cudaDeviceGetAttribute(&l2CacheSize, cudaDevAttrL2CacheSize, currDevice));
auto maxSmemPerBlock = raft::getSharedMemPerBlock();
ContingencyMatrixImplType implVersion = IMPL_NONE;
// keeping 8 block per SM to get good utilization
// can go higher but reduced L1 size degrades perf
OutT upperLimitSmemAtomics =
std::floor(std::sqrt(maxSmemPerBlock / (sizeof(OutT) * (maxBlocksResidentPerSM / 2))));
OutT upperLimitL2Atomics = std::floor(std::sqrt(l2CacheSize / sizeof(OutT)));
if (outDimN <= upperLimitSmemAtomics)
implVersion = SMEM_ATOMICS;
else if (outDimN <= upperLimitL2Atomics)
implVersion = GLOBAL_ATOMICS;
else
implVersion = SORT_AND_GATOMICS;
return implVersion;
}
/**
* @brief use this to allocate output matrix size
* size of matrix = (maxLabel - minLabel + 1)^2 * sizeof(int)
* @param groundTruth: device 1-d array for ground truth (num of rows)
* @param nSamples: number of elements in input array
* @param stream: cuda stream for execution
* @param minLabel: [out] calculated min value in input array
* @param maxLabel: [out] calculated max value in input array
*/
template <typename T>
void getInputClassCardinality(
const T* groundTruth, const int nSamples, cudaStream_t stream, T& minLabel, T& maxLabel)
{
thrust::device_ptr<const T> dTrueLabel = thrust::device_pointer_cast(groundTruth);
auto min_max =
thrust::minmax_element(thrust::cuda::par.on(stream), dTrueLabel, dTrueLabel + nSamples);
minLabel = *min_max.first;
maxLabel = *min_max.second;
}
/**
* @brief Calculate workspace size for running contingency matrix calculations
* @tparam T label type
* @tparam OutT output matrix type
* @param nSamples: number of elements in input array
* @param groundTruth: device 1-d array for ground truth (num of rows)
* @param stream: cuda stream for execution
* @param minLabel: Optional, min value in input array
* @param maxLabel: Optional, max value in input array
*/
template <typename T, typename OutT = int>
size_t getContingencyMatrixWorkspaceSize(int nSamples,
const T* groundTruth,
cudaStream_t stream,
T minLabel = std::numeric_limits<T>::max(),
T maxLabel = std::numeric_limits<T>::max())
{
size_t workspaceSize = 0;
// below is a redundant computation - can be avoided
if (minLabel == std::numeric_limits<T>::max() || maxLabel == std::numeric_limits<T>::max()) {
getInputClassCardinality<T>(groundTruth, nSamples, stream, minLabel, maxLabel);
}
auto outDimN = OutT(maxLabel - minLabel + 1);
ContingencyMatrixImplType implVersion = getImplVersion<OutT>(outDimN);
if (implVersion == SORT_AND_GATOMICS) {
void* pWorkspaceCub{};
size_t tmpStorageBytes = 0;
// no-op pointers to get workspace size
T* pTmpUnused{};
CUDA_CHECK(cub::DeviceRadixSort::SortPairs(
pWorkspaceCub, tmpStorageBytes, pTmpUnused, pTmpUnused, pTmpUnused, pTmpUnused, nSamples));
auto tmpStagingMemorySize = raft::alignTo<size_t>(nSamples * sizeof(T), 256);
tmpStagingMemorySize *= 2;
workspaceSize = tmpStagingMemorySize + tmpStorageBytes;
}
return workspaceSize;
}
/**
* @brief contruct contingency matrix given input ground truth and prediction
* labels. Users should call function getInputClassCardinality to find
* and allocate memory for output. Similarly workspace requirements
* should be checked using function getContingencyMatrixWorkspaceSize
* @tparam T label type
* @tparam OutT output matrix type
* @param groundTruth: device 1-d array for ground truth (num of rows)
* @param predictedLabel: device 1-d array for prediction (num of columns)
* @param nSamples: number of elements in input array
* @param outMat: output buffer for contingecy matrix
* @param stream: cuda stream for execution
* @param workspace: Optional, workspace memory allocation
* @param workspaceSize: Optional, size of workspace memory
* @param minLabel: Optional, min value in input ground truth array
* @param maxLabel: Optional, max value in input ground truth array
*/
template <typename T, typename OutT = int>
void contingencyMatrix(const T* groundTruth,
const T* predictedLabel,
int nSamples,
OutT* outMat,
cudaStream_t stream,
void* workspace = nullptr,
size_t workspaceSize = 0,
T minLabel = std::numeric_limits<T>::max(),
T maxLabel = std::numeric_limits<T>::max())
{
// assumptions:
// output is not at par with scikit learn - output will be square matrix
// always with numRows = numColumns = numOfClassesInTrueLabel
// it is also assumed that true labels are monotically increasing
// if for some reason groundTruth completely skips some labels
// eg: {0,1,2,5} instead of {0,1,2,3}.
// Output matrix will still have empty rows for label value {3,4}
// Users can use "make_monotonic" to convert their discontinuous input label
// range to a monotonically increasing one //
// this also serves as way to measure co-occurence/joint counts for NLP tasks which
// can be used to then compute pointwise mutual information and mutual information
if (minLabel == std::numeric_limits<T>::max() || maxLabel == std::numeric_limits<T>::max()) {
getInputClassCardinality<T>(groundTruth, nSamples, stream, minLabel, maxLabel);
}
auto outDimM_N = OutT(maxLabel - minLabel + 1);
CUDA_CHECK(cudaMemsetAsync(outMat, 0, sizeof(OutT) * outDimM_N * outDimM_N, stream));
ContingencyMatrixImplType implVersion = getImplVersion<OutT>(outDimM_N);
switch (implVersion) {
case SMEM_ATOMICS:
// smem atomics and then single global mem atomics only works
// when all label count can fit in smem for a block
// helps when GLOBAL_ATOMICS performance blocked by atomic update
// serialization -when very less labels ~10 labels
computeCMatWSmemAtomics<T, OutT>(
groundTruth, predictedLabel, nSamples, outMat, minLabel, outDimM_N, stream);
break;
case GLOBAL_ATOMICS:
// launch kernel - global atomic ops per (groundTruth,predictedValue) pair
computeCMatWAtomics<T, OutT>(
groundTruth, predictedLabel, nSamples, outMat, minLabel, outDimM_N, stream);
break;
// more L2 thrashing if atomic OPs land in completely different mem
// segment - when more labels
case SORT_AND_GATOMICS:
contingencyMatrixWSort<T, OutT>(groundTruth,
predictedLabel,
nSamples,
outMat,
minLabel,
maxLabel,
workspace,
workspaceSize,
stream);
break;
case IMPL_NONE: break;
}
}
}; // namespace Metrics
}; // namespace MLCommon | the_stack |
#include <hip/hip_runtime.h>
#pragma once
inline void ecl_connected_components(const int nodes,
const int edges,
const int *d_nidx,
const int* d_nlist,
const int *d_nstat,
const int *d_wl,
const hipDeviceProp_t& deviceProp);
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <set>
static const int Device = 0;
static const int ThreadsPerBlock = 256;
#ifdef __HIP_PLATFORM_NVCC__
static const int warpsize = 32;
#else
static const int warpsize = 64;
#endif
static __device__ int topL, posL, topH, posH;
/* initialize with first smaller neighbor ID */
static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock)
void init(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat)
{
const int from = threadIdx.x + blockIdx.x * ThreadsPerBlock;
const int incr = gridDim.x * ThreadsPerBlock;
for (int v = from; v < nodes; v += incr) {
const int beg = nidx[v];
const int end = nidx[v + 1];
int m = v;
int i = beg;
while ((m == v) && (i < end)) {
m = min(m, nlist[i]);
i++;
}
nstat[v] = m;
}
if (from == 0) {topL = 0; posL = 0; topH = nodes - 1; posH = nodes - 1;}
}
/* intermediate pointer jumping */
inline __device__ int representative(const int idx, int* const __restrict__ nstat)
{
int curr = nstat[idx];
if (curr != idx) {
int next, prev = idx;
while (curr > (next = nstat[curr])) {
nstat[prev] = next;
prev = curr;
curr = next;
}
}
return curr;
}
/* process low-degree vertices at thread granularity and fill worklists */
static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock)
void compute1(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat, int* const __restrict__ wl)
{
const int from = threadIdx.x + blockIdx.x * ThreadsPerBlock;
const int incr = gridDim.x * ThreadsPerBlock;
for (int v = from; v < nodes; v += incr) {
const int vstat = nstat[v];
if (v != vstat) {
const int beg = nidx[v];
const int end = nidx[v + 1];
int deg = end - beg;
if (deg > 16) {
int idx;
if (deg <= 352) {
idx = atomicAdd(&topL, 1);
} else {
idx = atomicAdd(&topH, -1);
}
wl[idx] = v;
} else {
int vstat = representative(v, nstat);
for (int i = beg; i < end; i++) {
const int nli = nlist[i];
if (v > nli) {
int ostat = representative(nli, nstat);
bool repeat;
do {
repeat = false;
if (vstat != ostat) {
int ret;
if (vstat < ostat) {
if ((ret = atomicCAS(&nstat[ostat], ostat, vstat)) != ostat) {
ostat = ret;
repeat = true;
}
} else {
if ((ret = atomicCAS(&nstat[vstat], vstat, ostat)) != vstat) {
vstat = ret;
repeat = true;
}
}
}
} while (repeat);
}
}
}
}
}
}
/* process medium-degree vertices at warp granularity */
static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock)
void compute2(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat, const int* const __restrict__ wl)
{
const int lane = threadIdx.x % warpsize;
int idx;
if (lane == 0) idx = atomicAdd(&posL, 1);
#ifdef __HIP_PLATFORM_NVCC__
idx = __shfl_sync(0xffffffff,idx, 0);
#else
idx = __shfl(idx,0);
#endif
while (idx < topL) {
const int v = wl[idx];
int vstat = representative(v, nstat);
for (int i = nidx[v] + lane; i < nidx[v + 1]; i += warpsize) {
const int nli = nlist[i];
if (v > nli) {
int ostat = representative(nli, nstat);
bool repeat;
do {
repeat = false;
if (vstat != ostat) {
int ret;
if (vstat < ostat) {
if ((ret = atomicCAS(&nstat[ostat], ostat, vstat)) != ostat) {
ostat = ret;
repeat = true;
}
} else {
if ((ret = atomicCAS(&nstat[vstat], vstat, ostat)) != vstat) {
vstat = ret;
repeat = true;
}
}
}
} while (repeat);
}
}
if (lane == 0) idx = atomicAdd(&posL, 1);
#ifdef __HIP_PLATFORM_NVCC__
idx = __shfl_sync(0xffffffff,idx, 0);
#else
idx = __shfl(idx,0);
#endif
}
}
/* process high-degree vertices at block granularity */
static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock)
void compute3(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat, const int* const __restrict__ wl)
{
__shared__ int vB;
if (threadIdx.x == 0) {
const int idx = atomicAdd(&posH, -1);
vB = (idx > topH) ? wl[idx] : -1;
}
__syncthreads();
while (vB >= 0) {
const int v = vB;
__syncthreads();
int vstat = representative(v, nstat);
for (int i = nidx[v] + threadIdx.x; i < nidx[v + 1]; i += ThreadsPerBlock) {
const int nli = nlist[i];
if (v > nli) {
int ostat = representative(nli, nstat);
bool repeat;
do {
repeat = false;
if (vstat != ostat) {
int ret;
if (vstat < ostat) {
if ((ret = atomicCAS(&nstat[ostat], ostat, vstat)) != ostat) {
ostat = ret;
repeat = true;
}
} else {
if ((ret = atomicCAS(&nstat[vstat], vstat, ostat)) != vstat) {
vstat = ret;
repeat = true;
}
}
}
} while (repeat);
}
}
if (threadIdx.x == 0) {
const int idx = atomicAdd(&posH, -1);
vB = (idx > topH) ? wl[idx] : -1;
}
__syncthreads();
}
}
/* link all vertices to sink */
static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock)
void flatten(const int nodes, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat)
{
const int from = threadIdx.x + blockIdx.x * ThreadsPerBlock;
const int incr = gridDim.x * ThreadsPerBlock;
for (int v = from; v < nodes; v += incr) {
int next, vstat = nstat[v];
const int old = vstat;
while (vstat > (next = nstat[vstat])) {
vstat = next;
}
if (old != vstat) nstat[v] = vstat;
}
}
inline void ecl_connected_components(const int nodes,
const int edges,
const int *d_nidx,
const int *d_nlist,
int *d_nstat,
int *d_wl,
const hipDeviceProp_t& deviceProp)
{
const int SMs = deviceProp.multiProcessorCount;
const int mTSM = deviceProp.maxThreadsPerMultiProcessor;
const int blocks = SMs * mTSM / ThreadsPerBlock;
hipLaunchKernelGGL(init, dim3(blocks), dim3(ThreadsPerBlock), 0, 0, nodes, d_nidx, d_nlist, d_nstat);
hipLaunchKernelGGL(compute1, dim3(blocks), dim3(ThreadsPerBlock), 0, 0, nodes, d_nidx, d_nlist, d_nstat, d_wl);
hipLaunchKernelGGL(compute2, dim3(blocks), dim3(ThreadsPerBlock), 0, 0, nodes, d_nidx, d_nlist, d_nstat, d_wl);
hipLaunchKernelGGL(compute3, dim3(blocks), dim3(ThreadsPerBlock), 0, 0, nodes, d_nidx, d_nlist, d_nstat, d_wl);
hipLaunchKernelGGL(flatten, dim3(blocks), dim3(ThreadsPerBlock), 0, 0, nodes, d_nidx, d_nlist, d_nstat);
}
#if 0
struct GPUTimer
{
hipEvent_t beg, end;
GPUTimer() {hipEventCreate(&beg); hipEventCreate(&end);}
~GPUTimer() {hipEventDestroy(beg); hipEventDestroy(end);}
void start() {hipEventRecord(beg, 0);}
double stop() {hipEventRecord(end, 0); hipEventSynchronize(end); float ms; hipEventElapsedTime(&ms, beg, end); return 0.001 * ms;}
};
static void computeCC(const int nodes, const int edges, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat)
{
hipSetDevice(Device);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, Device);
if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {fprintf(stderr, "ERROR: there is no CUDA capable device\n\n"); exit(-1);}
const int SMs = deviceProp.multiProcessorCount;
const int mTSM = deviceProp.maxThreadsPerMultiProcessor;
printf("gpu: %s with %d SMs and %d mTpSM (%.1f MHz and %.1f MHz)\n", deviceProp.name, SMs, mTSM, deviceProp.clockRate * 0.001, deviceProp.memoryClockRate * 0.001);
int* nidx_d;
int* nlist_d;
int* nstat_d;
int* wl_d;
if (hipSuccess != hipMalloc((void **)&nidx_d, (nodes + 1) * sizeof(int))) {fprintf(stderr, "ERROR: could not allocate nidx_d\n\n"); exit(-1);}
if (hipSuccess != hipMalloc((void **)&nlist_d, edges * sizeof(int))) {fprintf(stderr, "ERROR: could not allocate nlist_d\n\n"); exit(-1);}
if (hipSuccess != hipMalloc((void **)&nstat_d, nodes * sizeof(int))) {fprintf(stderr, "ERROR: could not allocate nstat_d,\n\n"); exit(-1);}
if (hipSuccess != hipMalloc((void **)&wl_d, nodes * sizeof(int))) {fprintf(stderr, "ERROR: could not allocate wl_d,\n\n"); exit(-1);}
if (hipSuccess != hipMemcpy(nidx_d, nidx, (nodes + 1) * sizeof(int), hipMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n\n"); exit(-1);}
if (hipSuccess != hipMemcpy(nlist_d, nlist, edges * sizeof(int), hipMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n\n"); exit(-1);}
hipFuncSetCacheConfig(init, hipFuncCachePreferL1);
hipFuncSetCacheConfig(compute1, hipFuncCachePreferL1);
hipFuncSetCacheConfig(compute2, hipFuncCachePreferL1);
hipFuncSetCacheConfig(compute3, hipFuncCachePreferL1);
hipFuncSetCacheConfig(flatten, hipFuncCachePreferL1);
const int blocks = SMs * mTSM / ThreadsPerBlock;
GPUTimer timer;
timer.start();
init<<<blocks, ThreadsPerBlock>>>(nodes, nidx_d, nlist_d, nstat_d);
compute1<<<blocks, ThreadsPerBlock>>>(nodes, nidx_d, nlist_d, nstat_d, wl_d);
compute2<<<blocks, ThreadsPerBlock>>>(nodes, nidx_d, nlist_d, nstat_d, wl_d);
compute3<<<blocks, ThreadsPerBlock>>>(nodes, nidx_d, nlist_d, nstat_d, wl_d);
flatten<<<blocks, ThreadsPerBlock>>>(nodes, nidx_d, nlist_d, nstat_d);
double runtime = timer.stop();
printf("compute time: %.4f s\n", runtime);
printf("throughput: %.3f Mnodes/s\n", nodes * 0.000001 / runtime);
printf("throughput: %.3f Medges/s\n", edges * 0.000001 / runtime);
if (hipSuccess != hipMemcpy(nstat, nstat_d, nodes * sizeof(int), hipMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying from device failed\n\n"); exit(-1);}
hipFree(wl_d);
hipFree(nstat_d);
hipFree(nlist_d);
hipFree(nidx_d);
}
static void verify(const int v, const int id, const int* const __restrict__ nidx, const int* const __restrict__ nlist, int* const __restrict__ nstat)
{
if (nstat[v] >= 0) {
if (nstat[v] != id) {fprintf(stderr, "ERROR: found incorrect ID value\n\n"); exit(-1);}
nstat[v] = -1;
for (int i = nidx[v]; i < nidx[v + 1]; i++) {
verify(nlist[i], id, nidx, nlist, nstat);
}
}
}
int main(int argc, char* argv[])
{
printf("ECL-CC v1.0 (%s)\n", __FILE__);
printf("Copyright 2017 Texas State University\n");
if (argc != 2) {fprintf(stderr, "USAGE: %s input_file_name\n\n", argv[0]); exit(-1);}
ECLgraph g = readECLgraph(argv[1]);
int* nodestatus = NULL;
hipHostAlloc(&nodestatus, g.nodes * sizeof(int), hipHostAllocDefault);
if (nodestatus == NULL) {fprintf(stderr, "ERROR: nodestatus - host memory allocation failed\n\n"); exit(-1);}
printf("input graph: %d nodes and %d edges (%s)\n", g.nodes, g.edges, argv[1]);
printf("average degree: %.2f edges per node\n", 1.0 * g.edges / g.nodes);
int mindeg = g.nodes;
int maxdeg = 0;
for (int v = 0; v < g.nodes; v++) {
int deg = g.nindex[v + 1] - g.nindex[v];
mindeg = std::min(mindeg, deg);
maxdeg = std::max(maxdeg, deg);
}
printf("minimum degree: %d edges\n", mindeg);
printf("maximum degree: %d edges\n", maxdeg);
computeCC(g.nodes, g.edges, g.nindex, g.nlist, nodestatus);
std::set<int> s1;
for (int v = 0; v < g.nodes; v++) {
s1.insert(nodestatus[v]);
}
printf("number of connected components: %d\n", s1.size());
/* verification code (may need extra runtime stack space due to deep recursion) */
for (int v = 0; v < g.nodes; v++) {
for (int i = g.nindex[v]; i < g.nindex[v + 1]; i++) {
if (nodestatus[g.nlist[i]] != nodestatus[v]) {fprintf(stderr, "ERROR: found adjacent nodes in different components\n\n"); exit(-1);}
}
}
for (int v = 0; v < g.nodes; v++) {
if (nodestatus[v] < 0) {fprintf(stderr, "ERROR: found negative component number\n\n"); exit(-1);}
}
std::set<int> s2;
int count = 0;
for (int v = 0; v < g.nodes; v++) {
if (nodestatus[v] >= 0) {
count++;
s2.insert(nodestatus[v]);
verify(v, nodestatus[v], g.nindex, g.nlist, nodestatus);
}
}
if (s1.size() != s2.size()) {fprintf(stderr, "ERROR: number of components do not match\n\n"); exit(-1);}
if (s1.size() != count) {fprintf(stderr, "ERROR: component IDs are not unique\n\n"); exit(-1);}
printf("all good\n\n");
hipFreeHost(nodestatus);
return 0;
}
#endif | the_stack |
#include <rmm/cuda_stream_view.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <memory>
namespace {
template <typename T>
void in_place_copy_range(cudf::column_view const& source,
cudf::mutable_column_view& target,
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::cuda_stream_view stream)
{
auto p_source_device_view = cudf::column_device_view::create(source, stream);
if (source.has_nulls()) {
cudf::detail::copy_range(
cudf::detail::make_null_replacement_iterator<T>(*p_source_device_view, T()) + source_begin,
cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin,
target,
target_begin,
target_begin + (source_end - source_begin),
stream);
} else {
cudf::detail::copy_range(p_source_device_view->begin<T>() + source_begin,
thrust::make_constant_iterator(true), // dummy
target,
target_begin,
target_begin + (source_end - source_begin),
stream);
}
}
struct in_place_copy_range_dispatch {
cudf::column_view const& source;
cudf::mutable_column_view& target;
template <typename T, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<T>())>
void operator()(cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::cuda_stream_view stream)
{
in_place_copy_range<T>(source, target, source_begin, source_end, target_begin, stream);
}
template <typename T, typename... Args>
void operator()(Args&&...)
{
CUDF_FAIL("Unsupported type for in-place copy.");
}
};
struct out_of_place_copy_range_dispatch {
cudf::column_view const& source;
cudf::column_view const& target;
template <typename T, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<T>())>
std::unique_ptr<cudf::column> operator()(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
auto p_ret = std::make_unique<cudf::column>(target, stream, mr);
if ((!p_ret->nullable()) && source.has_nulls(source_begin, source_end)) {
p_ret->set_null_mask(
cudf::detail::create_null_mask(p_ret->size(), cudf::mask_state::ALL_VALID, stream, mr), 0);
}
if (source_end != source_begin) { // otherwise no-op
auto ret_view = p_ret->mutable_view();
in_place_copy_range<T>(source, ret_view, source_begin, source_end, target_begin, stream);
}
return p_ret;
}
template <typename T, typename... Args>
std::enable_if_t<not cudf::is_rep_layout_compatible<T>(), std::unique_ptr<cudf::column>>
operator()(Args...)
{
CUDF_FAIL("Unsupported type for out of place copy.");
}
};
template <>
std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::string_view>(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto target_end = target_begin + (source_end - source_begin);
auto p_source_device_view = cudf::column_device_view::create(source, stream);
if (source.has_nulls()) {
return cudf::strings::detail::copy_range(
cudf::detail::make_null_replacement_iterator<cudf::string_view>(*p_source_device_view,
cudf::string_view()) +
source_begin,
cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin,
cudf::strings_column_view(target),
target_begin,
target_end,
stream,
mr);
} else {
return cudf::strings::detail::copy_range(
p_source_device_view->begin<cudf::string_view>() + source_begin,
thrust::make_constant_iterator(true),
cudf::strings_column_view(target),
target_begin,
target_end,
stream,
mr);
}
}
template <>
std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::dictionary32>(
cudf::size_type source_begin,
cudf::size_type source_end,
cudf::size_type target_begin,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// check the keys in the source and target
cudf::dictionary_column_view const dict_source(source);
cudf::dictionary_column_view const dict_target(target);
CUDF_EXPECTS(dict_source.keys().type() == dict_target.keys().type(),
"dictionary keys must be the same type");
// combine keys so both dictionaries have the same set
auto target_matched =
cudf::dictionary::detail::add_keys(dict_target, dict_source.keys(), stream, mr);
auto const target_view = cudf::dictionary_column_view(target_matched->view());
auto source_matched = cudf::dictionary::detail::set_keys(dict_source, target_view.keys(), stream);
auto const source_view = cudf::dictionary_column_view(source_matched->view());
// build the new indices by calling in_place_copy_range on just the indices
auto const source_indices = source_view.get_indices_annotated();
auto target_contents = target_matched->release();
auto target_indices(std::move(target_contents.children.front()));
cudf::mutable_column_view new_indices(
target_indices->type(),
dict_target.size(),
target_indices->mutable_view().head(),
static_cast<cudf::bitmask_type*>(target_contents.null_mask->data()),
dict_target.null_count());
cudf::type_dispatcher(new_indices.type(),
in_place_copy_range_dispatch{source_indices, new_indices},
source_begin,
source_end,
target_begin,
stream);
auto null_count = new_indices.null_count();
auto indices_column =
std::make_unique<cudf::column>(new_indices.type(),
new_indices.size(),
std::move(*(target_indices->release().data.release())),
rmm::device_buffer{0, stream, mr},
0);
// take the keys from the matched column allocated using mr
auto keys_column(std::move(target_contents.children.back()));
// create column with keys_column and indices_column
return make_dictionary_column(std::move(keys_column),
std::move(indices_column),
std::move(*(target_contents.null_mask.release())),
null_count);
}
} // namespace
namespace cudf {
namespace detail {
void copy_range_in_place(column_view const& source,
mutable_column_view& target,
size_type source_begin,
size_type source_end,
size_type target_begin,
rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(cudf::is_fixed_width(target.type()) == true,
"In-place copy_range does not support variable-sized types.");
CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) &&
(source_begin <= source_end) && (target_begin >= 0) &&
(target_begin <= target.size() - (source_end - source_begin)),
"Range is out of bounds.");
CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch.");
CUDF_EXPECTS((target.nullable() == true) || (source.has_nulls() == false),
"target should be nullable if source has null values.");
if (source_end != source_begin) { // otherwise no-op
cudf::type_dispatcher<dispatch_storage_type>(target.type(),
in_place_copy_range_dispatch{source, target},
source_begin,
source_end,
target_begin,
stream);
}
}
std::unique_ptr<column> copy_range(column_view const& source,
column_view const& target,
size_type source_begin,
size_type source_end,
size_type target_begin,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) &&
(source_begin <= source_end) && (target_begin >= 0) &&
(target_begin <= target.size() - (source_end - source_begin)),
"Range is out of bounds.");
CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch.");
return cudf::type_dispatcher<dispatch_storage_type>(
target.type(),
out_of_place_copy_range_dispatch{source, target},
source_begin,
source_end,
target_begin,
stream,
mr);
}
} // namespace detail
void copy_range_in_place(column_view const& source,
mutable_column_view& target,
size_type source_begin,
size_type source_end,
size_type target_begin)
{
CUDF_FUNC_RANGE();
return detail::copy_range_in_place(
source, target, source_begin, source_end, target_begin, rmm::cuda_stream_default);
}
std::unique_ptr<column> copy_range(column_view const& source,
column_view const& target,
size_type source_begin,
size_type source_end,
size_type target_begin,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::copy_range(
source, target, source_begin, source_end, target_begin, rmm::cuda_stream_default, mr);
}
} // namespace cudf | the_stack |
// -----------------------------------------------------------------------------------------
// NVEnc by rigaya
// -----------------------------------------------------------------------------------------
//
// The MIT License
//
// Copyright (c) 2014-2016 rigaya
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
// ------------------------------------------------------------------------------------------
#include <map>
#include <array>
#include <iostream>
#include <fstream>
#include <algorithm>
#include <numeric>
#include <type_traits>
#define _USE_MATH_DEFINES
#include <cmath>
#include "convert_csp.h"
#include "NVEncFilterMpdecimate.h"
#include "NVEncParam.h"
#pragma warning (push)
#pragma warning (disable: 4819)
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#pragma warning (pop)
#include "rgy_cuda_util_kernel.h"
#define MPDECIMATE_BLOCK_X (32)
#define MPDECIMATE_BLOCK_Y (8)
__device__ __inline__
int func_diff_pix(int a, int b) {
return abs(a - b);
}
template<typename Type4>
__global__ void kernel_block_diff(
const uint8_t *__restrict__ p0, const int p0_pitch,
const uint8_t *__restrict__ p1, const int p1_pitch,
const int width, const int height,
uint8_t *__restrict__ pDst, const int dst_pitch) {
const int lx = threadIdx.x; //スレッド数=MPDECIMATE_BLOCK_X
const int ly = threadIdx.y; //スレッド数=MPDECIMATE_BLOCK_Y
const int blockoffset_x = blockIdx.x * blockDim.x;
const int blockoffset_y = blockIdx.y * blockDim.y;
const int imgx = (blockoffset_x + lx) * 8;
const int imgy = (blockoffset_y + ly);
int diff = 0;
if (imgx < width && imgy < height) {
p0 += imgy * p0_pitch + imgx * sizeof(Type4::x);
p1 += imgy * p1_pitch + imgx * sizeof(Type4::x);
Type4 *ptrp0 = (Type4 *)p0;
Type4 *ptrp1 = (Type4 *)p1;
{
Type4 pix0 = ptrp0[0];
Type4 pix1 = ptrp1[0];
diff += func_diff_pix(pix0.x, pix1.x);
if (imgx + 1 < width) diff += func_diff_pix(pix0.y, pix1.y);
if (imgx + 2 < width) diff += func_diff_pix(pix0.z, pix1.z);
if (imgx + 3 < width) diff += func_diff_pix(pix0.w, pix1.w);
}
if (imgx + 4 < width) {
Type4 pix0 = ptrp0[1];
Type4 pix1 = ptrp1[1];
diff += func_diff_pix(pix0.x, pix1.x);
if (imgx + 5 < width) diff += func_diff_pix(pix0.y, pix1.y);
if (imgx + 6 < width) diff += func_diff_pix(pix0.z, pix1.z);
if (imgx + 7 < width) diff += func_diff_pix(pix0.w, pix1.w);
}
}
__shared__ int tmp[MPDECIMATE_BLOCK_Y][MPDECIMATE_BLOCK_X +1];
tmp[ly][lx] = diff;
__syncthreads();
if (ly == 0) {
#pragma unroll
for (int i = 1; i < 8; i++) {
diff += tmp[i][lx];
}
const int block8x8X = blockoffset_x + lx;
const int block8x8Y = blockIdx.y;
pDst += block8x8Y * dst_pitch + block8x8X * sizeof(diff);
*(int *)pDst = diff;
}
}
template<typename Type4>
cudaError calc_block_diff_plane(const RGYFrameInfo *p0, const RGYFrameInfo *p1, RGYFrameInfo *tmp, cudaStream_t streamDiff) {
const int width = p0->width;
const int height = p0->height;
dim3 blockSize(MPDECIMATE_BLOCK_X, MPDECIMATE_BLOCK_Y);
dim3 gridSize(divCeil(width, blockSize.x * 8), divCeil(height, blockSize.y));
kernel_block_diff<Type4><<< gridSize, blockSize, 0, streamDiff >>> (
(const uint8_t *)p0->ptr, p0->pitch,
(const uint8_t *)p1->ptr, p1->pitch,
width, height,
(uint8_t *)tmp->ptr, tmp->pitch);
return cudaGetLastError();
}
template<typename Type4>
cudaError_t calc_block_diff_frame(const RGYFrameInfo *p0, const RGYFrameInfo *p1, RGYFrameInfo *tmp, cudaStream_t streamDiff) {
for (int i = 0; i < RGY_CSP_PLANES[p0->csp]; i++) {
const auto plane0 = getPlane(p0, (RGY_PLANE)i);
const auto plane1 = getPlane(p1, (RGY_PLANE)i);
auto planeTmp = getPlane(tmp, (RGY_PLANE)i);
auto cudaerr = calc_block_diff_plane<Type4>( &plane0, &plane1, &planeTmp, streamDiff);
if (cudaerr != cudaSuccess) {
return cudaerr;
}
}
return cudaSuccess;
}
NVEncFilterMpdecimateFrameData::NVEncFilterMpdecimateFrameData(std::shared_ptr<RGYLog> log) :
m_log(log),
m_inFrameId(-1),
m_buf(),
m_tmp() {
}
NVEncFilterMpdecimateFrameData::~NVEncFilterMpdecimateFrameData() {
m_buf.clear();
}
RGY_ERR NVEncFilterMpdecimateFrameData::set(const RGYFrameInfo *pInputFrame, int inputFrameId, cudaStream_t stream) {
m_inFrameId = inputFrameId;
if (m_buf.frame.ptr == nullptr) {
m_buf.alloc(pInputFrame->width, pInputFrame->height, pInputFrame->csp);
}
if (m_tmp.frameDev.ptr == nullptr) {
m_tmp.alloc(divCeil(pInputFrame->width, 8), divCeil(pInputFrame->height, 8), RGY_CSP_YUV444_32);
}
copyFrameProp(&m_buf.frame, pInputFrame);
auto cudaerr = m_buf.copyFrameAsync(pInputFrame, stream);
if (cudaerr != cudaSuccess) {
m_log->write(RGY_LOG_ERROR, RGY_LOGT_VPP, _T("failed to set frame to data cache: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str());
return RGY_ERR_CUDA;
}
return RGY_ERR_NONE;
}
RGY_ERR NVEncFilterMpdecimateFrameData::calcDiff(const NVEncFilterMpdecimateFrameData *ref,
cudaStream_t streamDiff, cudaEvent_t eventTransfer, cudaStream_t streamTransfer) {
static const std::map<RGY_CSP, decltype(calc_block_diff_frame<uchar4>)*> func_list = {
{ RGY_CSP_YV12, calc_block_diff_frame<uchar4> },
{ RGY_CSP_YV12_16, calc_block_diff_frame<ushort4> },
{ RGY_CSP_YUV444, calc_block_diff_frame<uchar4> },
{ RGY_CSP_YUV444_16, calc_block_diff_frame<ushort4> }
};
if (func_list.count(ref->m_buf.frame.csp) == 0) {
m_log->write(RGY_LOG_ERROR, RGY_LOGT_VPP, _T("unsupported csp %s.\n"), RGY_CSP_NAMES[ref->m_buf.frame.csp]);
return RGY_ERR_UNSUPPORTED;
}
auto cudaerr = func_list.at(ref->m_buf.frame.csp)(&m_buf.frame, &ref->get()->frame, &m_tmp.frameDev, streamDiff);
if (cudaerr != cudaSuccess) {
m_log->write(RGY_LOG_ERROR, RGY_LOGT_VPP, _T("failed to run calcDiff: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str());
return RGY_ERR_CUDA;
}
if ((cudaerr = cudaEventRecord(eventTransfer, streamDiff)) != cudaSuccess) {
m_log->write(RGY_LOG_ERROR, RGY_LOGT_VPP, _T("failed to cudaEventRecord in calcDiff: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str());
return RGY_ERR_CUDA;
}
if ((cudaerr = cudaStreamWaitEvent(streamTransfer, eventTransfer, 0)) != cudaSuccess) {
m_log->write(RGY_LOG_ERROR, RGY_LOGT_VPP, _T("failed to cudaStreamWaitEvent in calcDiff: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str());
return RGY_ERR_CUDA;
}
if ((cudaerr = m_tmp.copyDtoHAsync(streamTransfer)) != cudaSuccess) {
m_log->write(RGY_LOG_ERROR, RGY_LOGT_VPP, _T("failed to copyDtoHAsync in calcDiff: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str());
return RGY_ERR_CUDA;
}
return RGY_ERR_NONE;
}
bool NVEncFilterMpdecimateFrameData::checkIfFrameCanbeDropped(const int hi, const int lo, const float factor) {
const int threshold = (int)((float)m_tmp.frameHost.width * m_tmp.frameHost.height * factor + 0.5f);
int loCount = 0;
for (int iplane = 0; iplane < RGY_CSP_PLANES[m_buf.frame.csp]; iplane++) {
const auto plane = getPlane(&m_buf.frame, (RGY_PLANE)iplane);
const int blockw = divCeil(plane.width, 8);
const int blockh = divCeil(plane.height, 8);
for (int j = 0; j < blockh; j++) {
const int *ptrResult = (const int *)(m_tmp.frameHost.ptr + j * m_tmp.frameHost.pitch);
for (int i = 0; i < blockw; i++) {
const int result = ptrResult[i];
if (result > hi) {
return false;
}
if (result > lo) {
loCount++;
if (loCount > threshold) {
return false;
}
}
}
}
}
return true;
}
NVEncFilterMpdecimateCache::NVEncFilterMpdecimateCache() : m_inputFrames(0), m_frames() {
}
NVEncFilterMpdecimateCache::~NVEncFilterMpdecimateCache() {
m_frames.clear();
}
void NVEncFilterMpdecimateCache::init(int bufCount, std::shared_ptr<RGYLog> log) {
m_log = log;
m_frames.clear();
for (int i = 0; i < bufCount; i++) {
m_frames.push_back(std::make_unique<NVEncFilterMpdecimateFrameData>(log));
}
}
RGY_ERR NVEncFilterMpdecimateCache::add(const RGYFrameInfo *pInputFrame, cudaStream_t stream) {
const int id = m_inputFrames++;
return getEmpty()->set(pInputFrame, id, stream);
}
NVEncFilterMpdecimate::NVEncFilterMpdecimate() : m_dropCount(0), m_ref(-1), m_target(-1), m_cache(), m_eventDiff(), m_streamDiff(), m_streamTransfer() {
m_sFilterName = _T("mpdecimate");
}
NVEncFilterMpdecimate::~NVEncFilterMpdecimate() {
close();
}
RGY_ERR NVEncFilterMpdecimate::checkParam(const std::shared_ptr<NVEncFilterParamMpdecimate> prm) {
if (prm->frameOut.height <= 0 || prm->frameOut.width <= 0) {
AddMessage(RGY_LOG_ERROR, _T("Invalid frame size.\n"));
return RGY_ERR_INVALID_PARAM;
}
if (prm->mpdecimate.lo <= 0) {
AddMessage(RGY_LOG_ERROR, _T("\"lo\" must a positive value.\n"));
return RGY_ERR_INVALID_PARAM;
}
if (prm->mpdecimate.hi <= 0) {
AddMessage(RGY_LOG_ERROR, _T("\"hi\" must a positive value.\n"));
return RGY_ERR_INVALID_PARAM;
}
if (prm->mpdecimate.frac < 0.0) {
AddMessage(RGY_LOG_ERROR, _T("\"frac\" must a positive value.\n"));
return RGY_ERR_INVALID_PARAM;
}
return RGY_ERR_NONE;
}
RGY_ERR NVEncFilterMpdecimate::init(shared_ptr<NVEncFilterParam> pParam, shared_ptr<RGYLog> pPrintMes) {
RGY_ERR sts = RGY_ERR_NONE;
m_pPrintMes = pPrintMes;
auto prm = std::dynamic_pointer_cast<NVEncFilterParamMpdecimate>(pParam);
if (!prm) {
AddMessage(RGY_LOG_ERROR, _T("Invalid parameter type.\n"));
return RGY_ERR_INVALID_PARAM;
}
//パラメータチェック
if ((sts = checkParam(prm)) != RGY_ERR_NONE) {
return sts;
}
if (!m_pParam || std::dynamic_pointer_cast<NVEncFilterParamMpdecimate>(m_pParam)->mpdecimate != prm->mpdecimate) {
m_cache.init(2, m_pPrintMes);
auto cudaerr = cudaSuccess;
m_eventDiff = std::unique_ptr<cudaEvent_t, cudaevent_deleter>(new cudaEvent_t(), cudaevent_deleter());
if (cudaSuccess != (cudaerr = cudaEventCreateWithFlags(m_eventDiff.get(), cudaEventDisableTiming))) {
AddMessage(RGY_LOG_ERROR, _T("failed to cudaEventCreateWithFlags: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str());
return RGY_ERR_CUDA;
}
AddMessage(RGY_LOG_DEBUG, _T("cudaEventCreateWithFlags for m_eventDiff: Success.\n"));
m_eventTransfer = std::unique_ptr<cudaEvent_t, cudaevent_deleter>(new cudaEvent_t(), cudaevent_deleter());
if (cudaSuccess != (cudaerr = cudaEventCreateWithFlags(m_eventTransfer.get(), cudaEventDisableTiming))) {
AddMessage(RGY_LOG_ERROR, _T("failed to cudaEventCreateWithFlags: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str());
return RGY_ERR_CUDA;
}
AddMessage(RGY_LOG_DEBUG, _T("cudaEventCreateWithFlags for m_eventTransfer: Success.\n"));
m_streamDiff = std::unique_ptr<cudaStream_t, cudastream_deleter>(new cudaStream_t(), cudastream_deleter());
if (cudaSuccess != (cudaerr = cudaStreamCreateWithFlags(m_streamDiff.get(), 0/*cudaStreamNonBlocking*/))) {
AddMessage(RGY_LOG_ERROR, _T("failed to cudaStreamCreateWithFlags: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str());
return RGY_ERR_CUDA;
}
AddMessage(RGY_LOG_DEBUG, _T("cudaStreamCreateWithFlags for m_streamDiff: Success.\n"));
m_streamTransfer = std::unique_ptr<cudaStream_t, cudastream_deleter>(new cudaStream_t(), cudastream_deleter());
if (cudaSuccess != (cudaerr = cudaStreamCreateWithFlags(m_streamTransfer.get(), 0/*cudaStreamNonBlocking*/))) {
AddMessage(RGY_LOG_ERROR, _T("failed to cudaStreamCreateWithFlags: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str());
return RGY_ERR_CUDA;
}
AddMessage(RGY_LOG_DEBUG, _T("cudaStreamCreateWithFlags for m_streamTransfer: Success.\n"));
prm->frameOut.pitch = prm->frameIn.pitch;
m_fpLog.reset();
if (prm->mpdecimate.log) {
const tstring logfilename = prm->outfilename + _T(".mpdecimate.log.txt");
m_fpLog = std::unique_ptr<FILE, fp_deleter>(_tfopen(logfilename.c_str(), _T("w")), fp_deleter());
AddMessage(RGY_LOG_DEBUG, _T("Opened log file: %s.\n"), logfilename.c_str());
}
const int max_value = (1 << RGY_CSP_BIT_DEPTH[prm->frameIn.csp]) - 1;
m_nPathThrough &= (~(FILTER_PATHTHROUGH_TIMESTAMP));
m_dropCount = 0;
m_ref = -1;
m_target = -1;
setFilterInfo(pParam->print());
}
m_pParam = pParam;
return sts;
}
tstring NVEncFilterParamMpdecimate::print() const {
return mpdecimate.print();
}
bool NVEncFilterMpdecimate::dropFrame(NVEncFilterMpdecimateFrameData *targetFrame) {
auto prm = std::dynamic_pointer_cast<NVEncFilterParamMpdecimate>(m_pParam);
if (!prm) {
AddMessage(RGY_LOG_ERROR, _T("Invalid parameter type.\n"));
return false;
}
if (prm->mpdecimate.max > 0 &&
m_dropCount >= prm->mpdecimate.max) {
return false;
}
if (prm->mpdecimate.max < 0 &&
(m_dropCount - 1) > prm->mpdecimate.max) {
return false;
}
const int bit_depth = RGY_CSP_BIT_DEPTH[targetFrame->get()->frame.csp];
return targetFrame->checkIfFrameCanbeDropped(prm->mpdecimate.hi << (bit_depth - 8), prm->mpdecimate.lo << (bit_depth - 8), prm->mpdecimate.frac);
}
RGY_ERR NVEncFilterMpdecimate::run_filter(const RGYFrameInfo *pInputFrame, RGYFrameInfo **ppOutputFrames, int *pOutputFrameNum, cudaStream_t stream) {
RGY_ERR sts = RGY_ERR_NONE;
auto prm = std::dynamic_pointer_cast<NVEncFilterParamMpdecimate>(m_pParam);
if (!prm) {
AddMessage(RGY_LOG_ERROR, _T("Invalid parameter type.\n"));
return RGY_ERR_INVALID_PARAM;
}
if (pInputFrame->ptr == nullptr && m_ref < 0) {
//終了
*pOutputFrameNum = 0;
ppOutputFrames[0] = nullptr;
return sts;
}
if (m_ref < 0) {
m_ref = m_cache.inframe();
auto err = m_cache.add(pInputFrame, stream);
if (err != RGY_ERR_NONE) {
AddMessage(RGY_LOG_ERROR, _T("failed to add frame to cache: %s.\n"), get_err_mes(err));
return err;
}
*pOutputFrameNum = 1;
ppOutputFrames[0] = &m_cache.get(m_ref)->frame;
if (m_fpLog) {
fprintf(m_fpLog.get(), " %8d: %10lld\n", m_ref, (long long)ppOutputFrames[0]->timestamp);
}
return sts;
}
if (m_target >= 0) {
auto targetFrame = m_cache.frame(m_target);
//GPU->CPUの転送終了を待機
cudaStreamSynchronize(*m_streamTransfer.get());
const bool drop = dropFrame(targetFrame) && pInputFrame->ptr != nullptr; //最終フレームは必ず出力する
if (m_fpLog) {
fprintf(m_fpLog.get(), "%s %8d: %10lld\n", (drop) ? "d" : " ", m_target, (long long)targetFrame->get()->frame.timestamp);
}
if (drop) {
targetFrame->reset();
m_target = -1;
m_dropCount = std::max(1, m_dropCount + 1);
*pOutputFrameNum = 0;
ppOutputFrames[0] = nullptr;
} else {
m_dropCount = std::min(-1, m_dropCount - 1);
m_cache.frame(m_ref)->reset();
m_ref = m_target;
m_target = -1;
*pOutputFrameNum = 1;
ppOutputFrames[0] = &targetFrame->get()->frame;
}
}
if (pInputFrame->ptr != nullptr) {
m_target = m_cache.inframe();
auto err = m_cache.add(pInputFrame, stream);
if (err != RGY_ERR_NONE) {
AddMessage(RGY_LOG_ERROR, _T("failed to add frame to cache: %s.\n"), get_err_mes(err));
return err;
}
cudaEventRecord(*m_eventDiff.get(), stream);
cudaStreamWaitEvent(*m_streamDiff.get(), *m_eventDiff.get(), 0);
err = m_cache.frame(m_target)->calcDiff(m_cache.frame(m_ref), *m_streamDiff.get(), *m_eventTransfer.get(), *m_streamTransfer.get());
if (err != RGY_ERR_NONE) {
AddMessage(RGY_LOG_ERROR, _T("failed to run calcDiff: %s.\n"), get_err_mes(err));
return err;
}
}
return RGY_ERR_NONE;
}
void NVEncFilterMpdecimate::close() {
m_pFrameBuf.clear();
m_eventDiff.reset();
m_streamDiff.reset();
m_streamTransfer.reset();
m_fpLog.reset();
} | the_stack |
#define SINC_KERNEL_RADIUS 3
#define SINC_KERNEL_SIZE SINC_KERNEL_RADIUS*2
/* *************************************************************** */
unsigned int min1(unsigned int a, unsigned int b)
{
return (a < b) ? a : b;
}
/* *************************************************************** */
__device__ __constant__ float cIdentity[16];
__device__ __inline__ void reg_mat44_expm_cuda(float* mat)
{
//todo
}
__device__ __inline__
void reg_mat44_logm_cuda(float* mat)
{
//todo
}
/* *************************************************************** */
template<class DTYPE>
__device__ __inline__ void reg_mat44_mul_cuda(DTYPE const* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]);
out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]);
return;
}
/* *************************************************************** */
template<class DTYPE>
__device__ __inline__ void reg_mat44_mul_cuda(float* mat, DTYPE const* in, DTYPE *out)
{
out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]);
out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]);
out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]);
return;
}
/* *************************************************************** */
__device__ __inline__ int cuda_reg_floor(double a)
{
return (int) (floor(a));
}
/* *************************************************************** */
template<class FieldTYPE>
__device__ __inline__ void interpolantCubicSpline(FieldTYPE ratio, FieldTYPE *basis)
{
if (ratio < 0.0)
ratio = 0.0; //reg_rounding error
double FF = (double) ratio * ratio;
basis[0] = (FieldTYPE) ((ratio * (((double)2.0 - ratio) * ratio - (double)1.0)) / (double)2.0);
basis[1] = (FieldTYPE) ((FF * ((double)3.0 * ratio - 5.0) + 2.0) / (double)2.0);
basis[2] = (FieldTYPE) ((ratio * (((double)4.0 - (double)3.0 * ratio) * ratio + (double)1.0)) / (double)2.0);
basis[3] = (FieldTYPE) ((ratio - (double)1.0) * FF / (double)2.0);
}
/* *************************************************************** */
__device__ __inline__
void reg_mat44_eye(float *mat) {
mat[0 * 4 + 0] = 1.f;
mat[0 * 4 + 1] = mat[0 * 4 + 2] = mat[0 * 4 + 3] = 0.f;
mat[1 * 4 + 1] = 1.f;
mat[1 * 4 + 0] = mat[1 * 4 + 2] = mat[1 * 4 + 3] = 0.f;
mat[2 * 4 + 2] = 1.f;
mat[2 * 4 + 0] = mat[2 * 4 + 1] = mat[2 * 4 + 3] = 0.f;
mat[3 * 4 + 3] = 1.f;
mat[3 * 4 + 0] = mat[3 * 4 + 1] = mat[3 * 4 + 2] = 0.f;
}
/* *************************************************************** */
__inline__ __device__ void interpWindowedSincKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
int j = 0;
double sum = 0.;
for (int i = -SINC_KERNEL_RADIUS; i < SINC_KERNEL_RADIUS; ++i) {
double x = relative - (double) (i);
if (x == 0.0)
basis[j] = 1.0;
else if (abs(x) >= (double) (SINC_KERNEL_RADIUS))
basis[j] = 0;
else {
double pi_x = M_PI * x;
basis[j] = (SINC_KERNEL_RADIUS) * sin(pi_x) * sin(pi_x / SINC_KERNEL_RADIUS) / (pi_x * pi_x);
}
sum += basis[j];
j++;
}
for (int i = 0; i < SINC_KERNEL_SIZE; ++i)
basis[i] /= sum;
}
/* *************************************************************** */
__inline__ __device__ void interpCubicSplineKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
double FF = relative * relative;
basis[0] = (relative * ((2.0 - relative) * relative - 1.0)) / 2.0;
basis[1] = (FF * (3.0 * relative - 5.0) + 2.0) / 2.0;
basis[2] = (relative * ((4.0 - 3.0 * relative) * relative + 1.0)) / 2.0;
basis[3] = (relative - 1.0) * FF / 2.0;
}
/* *************************************************************** */
__inline__ __device__ void interpLinearKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
basis[1] = relative;
basis[0] = 1.0 - relative;
}
/* *************************************************************** */
__inline__ __device__ void interpNearestNeighKernel(double relative, double *basis)
{
if (relative < 0.0)
relative = 0.0; //reg_rounding error
basis[0] = basis[1] = 0.0;
if (relative >= 0.5)
basis[1] = 1;
else
basis[0] = 1;
}
/* *************************************************************** */
__inline__ __device__ double interpLoop2D(float* floatingIntensity,
double* xBasis,
double* yBasis,
double* zBasis,
int *previous,
uint3 fi_xyz,
float paddingValue,
unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int b = 0; b < kernel_size; b++) {
int Y = previous[1] + b;
bool yInBounds = -1 < Y && Y < fi_xyz.y;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
int X = previous[0] + a;
bool xInBounds = -1 < X && X < fi_xyz.x;
const unsigned int idx = Y * fi_xyz.x + X;
xTempNewValue += (xInBounds && yInBounds) ? floatingIntensity[idx] * xBasis[a] : paddingValue * xBasis[a];
}
intensity += xTempNewValue * yBasis[b];
}
return intensity;
}
/* *************************************************************** */
__inline__ __device__ double interpLoop3D(float* floatingIntensity,
double* xBasis,
double* yBasis,
double* zBasis,
int *previous,
uint3 fi_xyz,
float paddingValue,
unsigned int kernel_size)
{
double intensity = (double)(0.0);
for (int c = 0; c < kernel_size; c++) {
int Z = previous[2] + c;
bool zInBounds = -1 < Z && Z < fi_xyz.z;
double yTempNewValue = 0.0;
for (int b = 0; b < kernel_size; b++) {
int Y = previous[1] + b;
bool yInBounds = -1 < Y && Y < fi_xyz.y;
double xTempNewValue = 0.0;
for (int a = 0; a < kernel_size; a++) {
int X = previous[0] + a;
bool xInBounds = -1 < X && X < fi_xyz.x;
const unsigned int idx = Z * fi_xyz.x * fi_xyz.y + Y * fi_xyz.x + X;
xTempNewValue += (xInBounds && yInBounds && zInBounds) ? floatingIntensity[idx] * xBasis[a] : paddingValue * xBasis[a];
}
yTempNewValue += xTempNewValue * yBasis[b];
}
intensity += yTempNewValue * zBasis[c];
}
return intensity;
}
/* *************************************************************** */
__global__ void ResampleImage2D(float* floatingImage,
float* deformationField,
float* warpedImage,
int *mask,
float* sourceIJKMatrix,
ulong2 voxelNumber,
uint3 fi_xyz,
uint2 wi_tu,
float paddingValue,
int kernelType)
{
float *sourceIntensityPtr = (floatingImage);
float *resultIntensityPtr = (warpedImage);
float *deformationFieldPtrX = (deformationField);
float *deformationFieldPtrY = &deformationFieldPtrX[voxelNumber.x];
int *maskPtr = &mask[0];
long index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < voxelNumber.x) {
for (unsigned int t = 0; t < wi_tu.x * wi_tu.y; t++) {
float *resultIntensity = &resultIntensityPtr[t * voxelNumber.x];
float *floatingIntensity = &sourceIntensityPtr[t * voxelNumber.y];
double intensity = paddingValue;
if (maskPtr[index] > -1) {
int previous[3];
float world[3], position[3];
double relative[3];
world[0] = (float)(deformationFieldPtrX[index]);
world[1] = (float)(deformationFieldPtrY[index]);
world[2] = 0.0f;
// real -> voxel; floating space
reg_mat44_mul_cuda<float>(sourceIJKMatrix, world, position);
previous[0] = cuda_reg_floor(position[0]);
previous[1] = cuda_reg_floor(position[1]);
relative[0] = (double)(position[0]) - (double)(previous[0]);
relative[1] = (double)(position[1]) - (double)(previous[1]);
if (kernelType == 0) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpNearestNeighKernel(relative[0], xBasisIn);
interpNearestNeighKernel(relative[1], yBasisIn);
intensity = interpLoop2D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 2);
}
else if (kernelType == 1) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpLinearKernel(relative[0], xBasisIn);
interpLinearKernel(relative[1], yBasisIn);
intensity = interpLoop2D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 2);
}
else if (kernelType == 4) {
double xBasisIn[6], yBasisIn[6], zBasisIn[6];
previous[0] -= SINC_KERNEL_RADIUS;
previous[1] -= SINC_KERNEL_RADIUS;
previous[2] -= SINC_KERNEL_RADIUS;
interpWindowedSincKernel(relative[0], xBasisIn);
interpWindowedSincKernel(relative[1], yBasisIn);
intensity = interpLoop2D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 6);
}
else {
double xBasisIn[4], yBasisIn[4], zBasisIn[4];
previous[0]--;
previous[1]--;
previous[2]--;
interpCubicSplineKernel(relative[0], xBasisIn);
interpCubicSplineKernel(relative[1], yBasisIn);
intensity = interpLoop2D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 4);
}
}
resultIntensity[index] = (float)intensity;
}
index += blockDim.x * gridDim.x;
}
}
/* *************************************************************** */
__global__ void ResampleImage3D(float* floatingImage,
float* deformationField,
float* warpedImage,
int *mask,
float* sourceIJKMatrix,
ulong2 voxelNumber,
uint3 fi_xyz,
uint2 wi_tu,
float paddingValue,
int kernelType)
{
float *sourceIntensityPtr = (floatingImage);
float *resultIntensityPtr = (warpedImage);
float *deformationFieldPtrX = (deformationField);
float *deformationFieldPtrY = &deformationFieldPtrX[voxelNumber.x];
float *deformationFieldPtrZ = &deformationFieldPtrY[voxelNumber.x];
int *maskPtr = &mask[0];
long index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < voxelNumber.x) {
for (unsigned int t = 0; t < wi_tu.x * wi_tu.y; t++) {
float *resultIntensity = &resultIntensityPtr[t * voxelNumber.x];
float *floatingIntensity = &sourceIntensityPtr[t * voxelNumber.y];
double intensity = paddingValue;
if (maskPtr[index] > -1) {
int previous[3];
float world[3], position[3];
double relative[3];
world[0] = (float) (deformationFieldPtrX[index]);
world[1] = (float) (deformationFieldPtrY[index]);
world[2] = (float) (deformationFieldPtrZ[index]);
// real -> voxel; floating space
reg_mat44_mul_cuda<float>(sourceIJKMatrix, world, position);
previous[0] = cuda_reg_floor(position[0]);
previous[1] = cuda_reg_floor(position[1]);
previous[2] = cuda_reg_floor(position[2]);
relative[0] = (double)(position[0]) - (double)(previous[0]);
relative[1] = (double)(position[1]) - (double)(previous[1]);
relative[2] = (double)(position[2]) - (double)(previous[2]);
if (kernelType == 0) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpNearestNeighKernel(relative[0], xBasisIn);
interpNearestNeighKernel(relative[1], yBasisIn);
interpNearestNeighKernel(relative[2], zBasisIn);
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 2);
} else if (kernelType == 1) {
double xBasisIn[2], yBasisIn[2], zBasisIn[2];
interpLinearKernel(relative[0], xBasisIn);
interpLinearKernel(relative[1], yBasisIn);
interpLinearKernel(relative[2], zBasisIn);
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 2);
} else if (kernelType == 4) {
double xBasisIn[6], yBasisIn[6], zBasisIn[6];
previous[0] -= SINC_KERNEL_RADIUS;
previous[1] -= SINC_KERNEL_RADIUS;
previous[2] -= SINC_KERNEL_RADIUS;
interpWindowedSincKernel(relative[0], xBasisIn);
interpWindowedSincKernel(relative[1], yBasisIn);
interpWindowedSincKernel(relative[2], zBasisIn);
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 6);
} else {
double xBasisIn[4], yBasisIn[4], zBasisIn[4];
previous[0]--;
previous[1]--;
previous[2]--;
interpCubicSplineKernel(relative[0], xBasisIn);
interpCubicSplineKernel(relative[1], yBasisIn);
interpCubicSplineKernel(relative[2], zBasisIn);
intensity = interpLoop3D(floatingIntensity, xBasisIn, yBasisIn, zBasisIn, previous, fi_xyz, paddingValue, 4);
}
}
resultIntensity[index] = (float)intensity;
}
index += blockDim.x * gridDim.x;
}
}
/* *************************************************************** */
void launchResample(nifti_image *floatingImage,
nifti_image *warpedImage,
int interp,
float paddingValue,
bool *dti_timepoint,
mat33 *jacMat,
float **floatingImage_d,
float **warpedImage_d,
float **deformationFieldImage_d,
int **mask_d,
float **sourceIJKMatrix_d) {
// Define the DTI indices if required
if(dti_timepoint!=NULL || jacMat!=NULL){
reg_print_fct_error("launchResample");
reg_print_msg_error("The DTI resampling has not yet been implemented with the CUDA platform. Exit.");
reg_exit();
}
long targetVoxelNumber = (long) warpedImage->nx * warpedImage->ny * warpedImage->nz;
//the below lines need to be moved to cu common
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
unsigned int maxThreads = 512;
unsigned int maxBlocks = 65365;
unsigned int blocks = (targetVoxelNumber % maxThreads) ? (targetVoxelNumber / maxThreads) + 1 : targetVoxelNumber / maxThreads;
blocks = min1(blocks, maxBlocks);
dim3 mygrid(blocks, 1, 1);
dim3 myblocks(maxThreads, 1, 1);
ulong2 voxelNumber = make_ulong2(warpedImage->nx * warpedImage->ny * warpedImage->nz, floatingImage->nx * floatingImage->ny * floatingImage->nz);
uint3 fi_xyz = make_uint3(floatingImage->nx, floatingImage->ny, floatingImage->nz);
uint2 wi_tu = make_uint2(warpedImage->nt, warpedImage->nu);
if (floatingImage->nz > 1) {
ResampleImage3D <<<mygrid, myblocks >>>(*floatingImage_d,
*deformationFieldImage_d,
*warpedImage_d,
*mask_d,
*sourceIJKMatrix_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
}
else{
ResampleImage2D <<<mygrid, myblocks >>>(*floatingImage_d,
*deformationFieldImage_d,
*warpedImage_d,
*mask_d,
*sourceIJKMatrix_d,
voxelNumber,
fi_xyz,
wi_tu,
paddingValue,
interp);
}
#ifndef NDEBUG
NR_CUDA_CHECK_KERNEL(mygrid, myblocks)
#else
NR_CUDA_SAFE_CALL(cudaThreadSynchronize());
#endif
}
/* *************************************************************** */
void identityConst()
{
float* mat_h = (float*) malloc(16 * sizeof(float));
mat44 *final = new mat44();
// Set the current transformation to identity
final->m[0][0] = final->m[1][1] = final->m[2][2] = final->m[3][3] = 1.0f;
final->m[0][1] = final->m[0][2] = final->m[0][3] = 0.0f;
final->m[1][0] = final->m[1][2] = final->m[1][3] = 0.0f;
final->m[2][0] = final->m[2][1] = final->m[2][3] = 0.0f;
final->m[3][0] = final->m[3][1] = final->m[3][2] = 0.0f;
mat44ToCptr(*final, mat_h);
cudaMemcpyToSymbol(cIdentity, &mat_h, 16 * sizeof(float));
}
/* *************************************************************** */ | the_stack |
#include "Inputs/cuda.h"
// CHECK-LABEL: @_Z24atomic32_op_singlethreadPiii
// CHECK: cmpxchg i32* {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: cmpxchg weak i32* {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread-one-as") monotonic monotonic, align 4
// CHECK: atomicrmw xchg i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw add i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw and i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw or i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw xor i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw min i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw max i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: load atomic i32, i32* {{%[0-9]+}} syncscope("singlethread-one-as") monotonic, align 4
// CHECK: store atomic i32 %{{.*}}, i32* %{{.*}} syncscope("singlethread-one-as") monotonic, align 4
__device__ int atomic32_op_singlethread(int *ptr, int val, int desired) {
bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
flag = __hip_atomic_compare_exchange_weak(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_exchange(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_add(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_and(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_or(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_xor(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_load(ptr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
return flag ? val : desired;
}
// CHECK-LABEL: @_Z25atomicu32_op_singlethreadPjjj
// CHECK: atomicrmw umin i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw umax i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread-one-as")
__device__ unsigned int atomicu32_op_singlethread(unsigned int *ptr, unsigned int val, unsigned int desired) {
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
return val;
}
// CHECK-LABEL: @_Z21atomic32_op_wavefrontPiii
// CHECK: cmpxchg i32* {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: cmpxchg weak i32* {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront-one-as") monotonic monotonic, align 4
// CHECK: atomicrmw xchg i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw add i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw and i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw or i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw xor i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw min i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw max i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: load atomic i32, i32* {{%[0-9]+}} syncscope("wavefront-one-as") monotonic, align 4
// CHECK: store atomic i32 %{{.*}}, i32* %{{.*}} syncscope("wavefront-one-as") monotonic, align 4
__device__ int atomic32_op_wavefront(int *ptr, int val, int desired) {
bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
flag = __hip_atomic_compare_exchange_weak(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_exchange(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_add(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_and(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_or(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_xor(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_load(ptr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
return flag ? val : desired;
}
// CHECK-LABEL: @_Z22atomicu32_op_wavefrontPjjj
// CHECK: atomicrmw umin i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw umax i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront-one-as")
__device__ unsigned int atomicu32_op_wavefront(unsigned int *ptr, unsigned int val, unsigned int desired) {
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
return val;
}
// CHECK-LABEL: @_Z21atomic32_op_workgroupPiii
// CHECK: cmpxchg i32* {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: cmpxchg weak i32* {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup-one-as") monotonic monotonic, align 4
// CHECK: atomicrmw xchg i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw add i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw and i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw or i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw xor i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw min i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw max i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: store atomic i32 %{{.*}}, i32* %{{.*}} syncscope("workgroup-one-as") monotonic, align 4
__device__ int atomic32_op_workgroup(int *ptr, int val, int desired) {
bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
flag = __hip_atomic_compare_exchange_weak(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_exchange(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_add(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_and(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_or(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_xor(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
return flag ? val : desired;
}
// CHECK-LABEL: @_Z22atomicu32_op_workgroupPjjj
// CHECK: atomicrmw umin i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw umax i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup-one-as")
__device__ unsigned int atomicu32_op_workgroup(unsigned int *ptr, unsigned int val, unsigned int desired) {
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
return val;
}
// CHECK-LABEL: @_Z17atomic32_op_agentPiii
// CHECK: cmpxchg i32* {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: cmpxchg weak i32* {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent-one-as") monotonic monotonic, align 4
// CHECK: atomicrmw xchg i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw add i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw and i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw or i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw xor i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw min i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw max i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: store atomic i32 %{{.*}}, i32* %{{.*}} syncscope("agent-one-as") monotonic, align 4
__device__ int atomic32_op_agent(int *ptr, int val, int desired) {
bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
flag = __hip_atomic_compare_exchange_weak(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_exchange(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_add(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_and(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_or(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_xor(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
return flag ? val : desired;
}
// CHECK-LABEL: @_Z18atomicu32_op_agentPjjj
// CHECK: atomicrmw umin i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw umax i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent-one-as")
__device__ unsigned int atomicu32_op_agent(unsigned int *ptr, unsigned int val, unsigned int desired) {
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
return val;
}
// CHECK-LABEL: @_Z18atomic32_op_systemPiii
// CHECK: cmpxchg i32* {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("one-as")
// CHECK: cmpxchg weak i32* {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("one-as") monotonic monotonic, align 4
// CHECK: atomicrmw xchg i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw add i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw and i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw or i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw xor i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw min i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw max i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("one-as")
// CHECK: load i32, i32* %{{.*}}, align 4
// CHECK: store atomic i32 %{{.*}}, i32* %{{.*}} syncscope("one-as") monotonic, align 4
__device__ int atomic32_op_system(int *ptr, int val, int desired) {
bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
flag = __hip_atomic_compare_exchange_weak(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_exchange(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_add(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_and(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_or(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_xor(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_load(ptr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
return flag ? val : desired;
}
// CHECK-LABEL: @_Z19atomicu32_op_systemPjjj
// CHECK: atomicrmw umin i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw umax i32* {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("one-as")
__device__ unsigned int atomicu32_op_system(unsigned int *ptr, unsigned int val, unsigned int desired) {
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
return val;
}
// CHECK-LABEL: @_Z24atomic64_op_singlethreadPxS_xx
// CHECK: cmpxchg i64* {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: cmpxchg weak i64* {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread-one-as") monotonic monotonic, align 8
// CHECK: atomicrmw xchg i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw add i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw and i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw or i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw xor i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw min i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw max i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: store atomic i64 %{{.*}}, i64* %{{.*}} syncscope("singlethread-one-as") monotonic, align 8
__device__ long long atomic64_op_singlethread(long long *ptr, long long *ptr2, long long val, long long desired) {
bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
flag = __hip_atomic_compare_exchange_weak(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_exchange(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_add(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_and(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_or(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_xor(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
return flag ? val : desired;
}
// CHECK-LABEL: @_Z25atomicu64_op_singlethreadPyS_yy
// CHECK: atomicrmw umin i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: atomicrmw umax i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread-one-as")
// CHECK: load atomic i64, i64* %{{.*}} syncscope("singlethread-one-as") monotonic, align 8
// CHECK: store atomic i64 %{{.*}}, i64* %{{.*}} syncscope("singlethread-one-as") monotonic, align 8
__device__ unsigned long long atomicu64_op_singlethread(unsigned long long *ptr, unsigned long long *ptr2, unsigned long long val, unsigned long long desired) {
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
val = __hip_atomic_load(ptr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
return val;
}
// CHECK-LABEL: @_Z21atomic64_op_wavefrontPxS_xx
// CHECK: cmpxchg i64* {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: cmpxchg weak i64* {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront-one-as") monotonic monotonic, align 8
// CHECK: atomicrmw xchg i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw add i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw and i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw or i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw xor i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw min i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw max i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: load atomic i64, i64* {{%[0-9]+}} syncscope("wavefront-one-as") monotonic, align 8
// CHECK: store atomic i64 %{{.*}}, i64* %{{.*}} syncscope("wavefront-one-as") monotonic, align 8
__device__ long long atomic64_op_wavefront(long long *ptr, long long *ptr2, long long val, long long desired) {
bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
flag = __hip_atomic_compare_exchange_weak(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_exchange(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_add(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_and(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_or(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_xor(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_load(ptr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
return flag ? val : desired;
}
// CHECK-LABEL: @_Z22atomicu64_op_wavefrontPyS_yy
// CHECK: atomicrmw umin i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: atomicrmw umax i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront-one-as")
// CHECK: load atomic i64, i64* {{%[0-9]+}} syncscope("wavefront-one-as") monotonic, align 8
// CHECK: store atomic i64 %{{.*}}, i64* %{{.*}} syncscope("wavefront-one-as") monotonic, align 8
__device__ unsigned long long atomicu64_op_wavefront(unsigned long long *ptr, unsigned long long *ptr2, unsigned long long val, unsigned long long desired) {
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
val = __hip_atomic_load(ptr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
return val;
}
// CHECK-LABEL: @_Z21atomic64_op_workgroupPxS_xx
// CHECK: cmpxchg i64* {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: cmpxchg weak i64* {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup-one-as") monotonic monotonic, align 8
// CHECK: atomicrmw xchg i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw add i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw and i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw or i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw xor i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw min i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw max i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: store atomic i64 %{{.*}}, i64* %{{.*}} syncscope("workgroup-one-as") monotonic, align 8
__device__ long long atomic64_op_workgroup(long long *ptr, long long *ptr2, long long val, long long desired) {
bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
flag = __hip_atomic_compare_exchange_weak(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_exchange(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_add(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_and(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_or(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_xor(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
return flag ? val : desired;
}
// CHECK-LABEL: @_Z22atomicu64_op_workgroupPyS_yy
// CHECK: atomicrmw umin i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: atomicrmw umax i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup-one-as")
// CHECK: store atomic i64 %{{.*}}, i64* %{{.*}} syncscope("workgroup-one-as") monotonic, align 8
__device__ unsigned long long atomicu64_op_workgroup(unsigned long long *ptr, unsigned long long *ptr2, unsigned long long val, unsigned long long desired) {
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
return val;
}
// CHECK-LABEL: @_Z17atomic64_op_agentPxS_xx
// CHECK: cmpxchg i64* {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: cmpxchg weak i64* {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent-one-as") monotonic monotonic, align 8
// CHECK: atomicrmw xchg i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw add i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw and i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw or i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw xor i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw min i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw max i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: store atomic i64 %{{.*}}, i64* %{{.*}} syncscope("agent-one-as") monotonic, align 8
__device__ long long atomic64_op_agent(long long *ptr, long long *ptr2, long long val, long long desired) {
bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
flag = __hip_atomic_compare_exchange_weak(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_exchange(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_add(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_and(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_or(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_xor(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
return flag ? val : desired;
}
// CHECK-LABEL: @_Z18atomicu64_op_agentPyS_yy
// CHECK: atomicrmw umin i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: atomicrmw umax i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent-one-as")
// CHECK: store atomic i64 %{{.*}}, i64* %{{.*}} syncscope("agent-one-as") monotonic, align 8
__device__ unsigned long long atomicu64_op_agent(unsigned long long *ptr, unsigned long long *ptr2, unsigned long long val, unsigned long long desired) {
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
return val;
}
// CHECK-LABEL: @_Z18atomic64_op_systemPxS_xx
// CHECK: cmpxchg i64* {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("one-as")
// CHECK: cmpxchg weak i64* {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("one-as") monotonic monotonic, align 8
// CHECK: atomicrmw xchg i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw add i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw and i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw or i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw xor i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw min i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw max i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("one-as")
// CHECK: load i64, i64* %{{.*}}, align 8
// CHECK: store atomic i64 %{{.*}}, i64* %{{.*}} syncscope("one-as") monotonic, align 8
__device__ long long atomic64_op_system(long long *ptr, long long *ptr2, long long val, long long desired) {
bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
flag = __hip_atomic_compare_exchange_weak(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_exchange(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_add(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_and(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_or(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_xor(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_load(ptr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
return flag ? val : desired;
}
// CHECK-LABEL: @_Z19atomicu64_op_systemPyS_yy
// CHECK: atomicrmw umin i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("one-as")
// CHECK: atomicrmw umax i64* {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("one-as")
// CHECK: load i64, i64* %{{.*}}, align 8
// CHECK: store atomic i64 %{{.*}}, i64* %{{.*}} syncscope("one-as") monotonic, align 8
__device__ unsigned long long atomicu64_op_system(unsigned long long *ptr, unsigned long long *ptr2, unsigned long long val, unsigned long long desired) {
val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
val = __hip_atomic_load(ptr, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
__hip_atomic_store(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
return val;
} | the_stack |
#include <cooperative_groups.h>
#include <thrust/distance.h>
#include <thrust/functional.h>
#include <cub/cub.cuh>
#include <memory>
#include <cuco/allocator.hpp>
#include <cuco/probe_sequences.cuh>
#include <cuco/traits.hpp>
#if defined(CUDART_VERSION) && (CUDART_VERSION >= 11000) && defined(__CUDA_ARCH__) && \
(__CUDA_ARCH__ >= 700)
#define CUCO_HAS_CUDA_BARRIER
#endif
// cg::memcpy_aysnc is supported for CUDA 11.1 and up
#if defined(CUDART_VERSION) && (CUDART_VERSION >= 11100)
#define CUCO_HAS_CG_MEMCPY_ASYNC
#endif
#if defined(CUCO_HAS_CUDA_BARRIER)
#include <cuda/barrier>
#endif
#include <cuco/detail/error.hpp>
#include <cuco/detail/prime.hpp>
#include <cuco/detail/static_multimap/kernels.cuh>
namespace cuco {
/**
* @brief A GPU-accelerated, unordered, associative container of key-value
* pairs that supports equivalent keys.
*
* Allows constant time concurrent inserts or concurrent find operations from threads in device
* code. Concurrent insert/find is allowed only when
* `static_multimap<Key, Value>::supports_concurrent_insert_find()` is true.
*
* Current limitations:
* - Requires keys and values where `cuco::is_bitwise_comparable_v<T>` is true
* - Comparisons against the "sentinel" values will always be done with bitwise comparisons
* Therefore, the objects must have unique, bitwise object representations (e.g., no padding bits).
* - Does not support erasing keys
* - Capacity is fixed and will not grow automatically
* - Requires the user to specify sentinel values for both key and mapped value
* to indicate empty slots
* - Concurrent insert/find is only supported when `static_multimap<Key,
* Value>::supports_concurrent_insert_find()` is true`
*
* The `static_multimap` supports two types of operations:
* - Host-side "bulk" operations
* - Device-side "singular" operations
*
* The host-side bulk operations include `insert`, `contains`, `count`, `retrieve` and their
* variants. These APIs should be used when there are a large number of keys to insert or lookup in
* the map. For example, given a range of keys specified by device-accessible iterators, the bulk
* `insert` function will insert all keys into the map.
*
* The singular device-side operations allow individual threads to perform
* independent operations (e.g. `insert`, etc.) from device code. These
* operations are accessed through non-owning, trivially copyable "view" types:
* `device_view` and `device_mutable_view`. The `device_view` class is an
* immutable view that allows only non-modifying operations such as `count` or
* `contains`. The `device_mutable_view` class only allows `insert` operations.
* The two types are separate to prevent erroneous concurrent insert/find
* operations.
*
* By default, when querying for a Key `k` in operations like `count` or `retrieve`, if `k` is not
* present in the map, it will not contribute to the output. Query APIs with the `_outer` suffix
* will include non-matching keys in the output. See the relevant API documentation for more
* information.
*
* Typical associative container query APIs like `retrieve` look up values by solely by key, e.g.,
* `count` for a Key `k` will count all values whose associated key `k'` matches `k` as determined
* by `key_equal(k, k')`. In some cases, one may want to consider both key _and_ value when
* determining if a key-value pair should contribute to the output. `static_multimap` supports this
* use case with APIs prefixed with `pair_`, e.g., `pair_count` is given a key-value pair
* `{k,v}` and only counts key-value pairs, `{k', v'}`, in the map where `pair_equal({k,v}, {k',
* v'})` is true. See the relevant API documentation for more information.
*
* Example:
* \code{.cpp}
* int empty_key_sentinel = -1;
* int empty_value_sentinel = -1;
*
* // Constructs a multimap with 100,000 slots using -1 and -1 as the empty key/value
* // sentinels. Note the capacity is chosen knowing we will insert 50,000 keys,
* // for an load factor of 50%.
* static_multimap<int, int> m{100'000, empty_key_sentinel, empty_value_sentinel};
*
* // Create a sequence of pairs {{0,0}, {1,1}, ... {i,i}}
* thrust::device_vector<thrust::pair<int,int>> pairs(50,000);
* thrust::transform(thrust::make_counting_iterator(0),
* thrust::make_counting_iterator(pairs.size()),
* pairs.begin(),
* []__device__(auto i){ return thrust::make_pair(i,i); };
*
*
* // Inserts all pairs into the map
* m.insert(pairs.begin(), pairs.end());
*
* // Get a `device_view` and passes it to a kernel where threads may perform
* // `contains/count/retrieve` lookups
* kernel<<<...>>>(m.get_device_view());
* \endcode
*
*
* @tparam Key Type used for keys. Requires `cuco::is_bitwise_comparable_v<Key>`
* @tparam Value Type of the mapped values. Requires `cuco::is_bitwise_comparable_v<Value>`
* @tparam Scope The scope in which multimap operations will be performed by
* individual threads
* @tparam ProbeSequence Probe sequence chosen between `cuco::detail::linear_probing`
* and `cuco::detail::double_hashing`. (see `detail/probe_sequences.cuh`)
* @tparam Allocator Type of allocator used for device storage
*/
template <typename Key,
typename Value,
cuda::thread_scope Scope = cuda::thread_scope_device,
typename Allocator = cuco::cuda_allocator<char>,
class ProbeSequence =
cuco::double_hashing<8, detail::MurmurHash3_32<Key>, detail::MurmurHash3_32<Key>>>
class static_multimap {
static_assert(
cuco::is_bitwise_comparable_v<Key>,
"Key type must have unique object representations or have been explicitly declared as safe for "
"bitwise comparison via specialization of cuco::is_bitwise_comparable_v<Key>.");
static_assert(
cuco::is_bitwise_comparable_v<Value>,
"Value type must have unique object representations or have been explicitly declared as safe "
"for bitwise comparison via specialization of cuco::is_bitwise_comparable_v<Value>.");
static_assert(
std::is_base_of_v<cuco::detail::probe_sequence_base<ProbeSequence::cg_size>, ProbeSequence>,
"ProbeSequence must be a specialization of either cuco::double_hashing or "
"cuco::linear_probing");
public:
using value_type = cuco::pair_type<Key, Value>;
using key_type = Key;
using mapped_type = Value;
using atomic_key_type = cuda::atomic<key_type, Scope>;
using atomic_mapped_type = cuda::atomic<mapped_type, Scope>;
using pair_atomic_type = cuco::pair_type<atomic_key_type, atomic_mapped_type>;
using atomic_ctr_type = cuda::atomic<std::size_t, Scope>;
using allocator_type = Allocator;
using slot_allocator_type =
typename std::allocator_traits<Allocator>::rebind_alloc<pair_atomic_type>;
using counter_allocator_type =
typename std::allocator_traits<Allocator>::rebind_alloc<atomic_ctr_type>;
using probe_sequence_type = detail::probe_sequence<ProbeSequence, Key, Value, Scope>;
static_multimap(static_multimap const&) = delete;
static_multimap& operator=(static_multimap const&) = delete;
static_multimap(static_multimap&&) = default;
static_multimap& operator=(static_multimap&&) = default;
~static_multimap() = default;
/**
* @brief Indicate if concurrent insert/find is supported for the key/value types.
*
* @return Boolean indicating if concurrent insert/find is supported.
*/
__host__ __device__ __forceinline__ static constexpr bool
supports_concurrent_insert_find() noexcept
{
return cuco::detail::is_packable<value_type>();
}
/**
* @brief The size of the CUDA cooperative thread group.
*
* @return The CG size.
*/
__host__ __device__ __forceinline__ static constexpr uint32_t cg_size() noexcept
{
return ProbeSequence::cg_size;
}
/**
* @brief Construct a statically-sized map with the specified initial capacity,
* sentinel values and CUDA stream.
*
* The capacity of the map is fixed. Insert operations will not automatically
* grow the map. Attempting to insert more unique keys than the capacity of
* the map results in undefined behavior.
*
* Performance begins to degrade significantly beyond a load factor of ~70%.
* For best performance, choose a capacity that will keep the load factor
* below 70%. E.g., if inserting `N` unique keys, choose a capacity of
* `N * (1/0.7)`.
*
* The `empty_key_sentinel` and `empty_value_sentinel` values are reserved and
* undefined behavior results from attempting to insert any key/value pair
* that contains either.
*
* @param capacity The total number of slots in the map
* @param empty_key_sentinel The reserved key value for empty slots
* @param empty_value_sentinel The reserved mapped value for empty slots
* @param stream CUDA stream used to initialize the map
* @param alloc Allocator used for allocating device storage
*/
static_multimap(std::size_t capacity,
Key empty_key_sentinel,
Value empty_value_sentinel,
cudaStream_t stream = 0,
Allocator const& alloc = Allocator{});
/**
* @brief Inserts all key/value pairs in the range `[first, last)`.
*
* @tparam InputIt Device accessible random access input iterator where
* `std::is_convertible<std::iterator_traits<InputIt>::value_type,
* static_multimap<K, V>::value_type>` is `true`
* @param first Beginning of the sequence of key/value pairs
* @param last End of the sequence of key/value pairs
* @param stream CUDA stream used for insert
*/
template <typename InputIt>
void insert(InputIt first, InputIt last, cudaStream_t stream = 0);
/**
* @brief Inserts key/value pairs in the range `[first, first + n)` if `pred`
* of the corresponding stencil returns true.
*
* The key/value pair `*(first + i)` is inserted if `pred( *(stencil + i) )` returns true.
*
* @tparam InputIt Device accessible random access input iterator where
* `std::is_convertible<std::iterator_traits<InputIt>::value_type,
* static_multimap<K, V>::value_type>` is `true`
* @tparam StencilIt Device accessible random access iterator whose value_type is
* convertible to Predicate's argument type
* @tparam Predicate Unary predicate callable whose return type must be convertible to `bool` and
* argument type is convertible from `std::iterator_traits<StencilIt>::value_type`.
* @param first Beginning of the sequence of key/value pairs
* @param last End of the sequence of key/value pairs
* @param stencil Beginning of the stencil sequence
* @param pred Predicate to test on every element in the range `[stencil, stencil +
* std::distance(first, last))`
* @param stream CUDA stream used for insert
*/
template <typename InputIt, typename StencilIt, typename Predicate>
void insert_if(
InputIt first, InputIt last, StencilIt stencil, Predicate pred, cudaStream_t stream = 0);
/**
* @brief Indicates whether the keys in the range `[first, last)` are contained in the map.
*
* Stores `true` or `false` to `(output + i)` indicating if the key `*(first + i)` exists in the
* map.
*
* @tparam InputIt Device accessible input iterator whose `value_type` is
* convertible to the map's `key_type`
* @tparam OutputIt Device accessible output iterator whose `value_type` is convertible from
* `bool`
* @tparam KeyEqual Binary callable type used to compare two keys for equality
* @param first Beginning of the sequence of keys
* @param last End of the sequence of keys
* @param output_begin Beginning of the output sequence indicating whether each key is present
* @param stream CUDA stream used for contains
* @param key_equal The binary function to compare two keys for equality
*/
template <typename InputIt, typename OutputIt, typename KeyEqual = thrust::equal_to<key_type>>
void contains(InputIt first,
InputIt last,
OutputIt output_begin,
cudaStream_t stream = 0,
KeyEqual key_equal = KeyEqual{}) const;
/**
* @brief Counts the occurrences of keys in `[first, last)` contained in the multimap.
*
* For each key, `k = *(first + i)`, counts all matching keys, `k'`, as determined by
* `key_equal(k, k')` and returns the sum of all matches for all keys.
*
* @tparam Input Device accesible input iterator whose `value_type` is convertible to `key_type`
* @tparam KeyEqual Binary callable
* @param first Beginning of the sequence of keys to count
* @param last End of the sequence of keys to count
* @param stream CUDA stream used for count
* @param key_equal Binary function to compare two keys for equality
* @return The sum of total occurrences of all keys in `[first, last)`
*/
template <typename InputIt, typename KeyEqual = thrust::equal_to<key_type>>
std::size_t count(InputIt first,
InputIt last,
cudaStream_t stream = 0,
KeyEqual key_equal = KeyEqual{}) const;
/**
* @brief Counts the occurrences of keys in `[first, last)` contained in the multimap.
*
* For each key, `k = *(first + i)`, counts all matching keys, `k'`, as determined by
* `key_equal(k, k')` and returns the sum of all matches for all keys. If `k` does not have any
* matches, it contributes 1 to the final sum.
*
* @tparam Input Device accesible input iterator whose `value_type` is convertible to `key_type`
* @tparam KeyEqual Binary callable
* @param first Beginning of the sequence of keys to count
* @param last End of the sequence of keys to count
* @param stream CUDA stream used for count_outer
* @param key_equal Binary function to compare two keys for equality
* @return The sum of total occurrences of all keys in `[first, last)` where keys without matches
* are considered to have a single occurrence.
*/
template <typename InputIt, typename KeyEqual = thrust::equal_to<key_type>>
std::size_t count_outer(InputIt first,
InputIt last,
cudaStream_t stream = 0,
KeyEqual key_equal = KeyEqual{}) const;
/**
* @brief Counts the occurrences of key/value pairs in `[first, last)` contained in the multimap.
*
* For key-value pair, `kv = *(first + i)`, counts all matching key-value pairs, `kv'`, as
* determined by `pair_equal(kv, kv')` and returns the sum of all matches for all key-value pairs.
*
* @tparam InputIt Device accessible random access input iterator where
* `std::is_convertible<std::iterator_traits<InputIt>::value_type,
* static_multimap<K, V>::value_type>` is `true`
* @tparam PairEqual Binary callable
* @param first Beginning of the sequence of pairs to count
* @param last End of the sequence of pairs to count
* @param pair_equal Binary function to compare two pairs for equality
* @param stream CUDA stream used for pair_count
* @return The sum of total occurrences of all pairs in `[first, last)`
*/
template <typename InputIt, typename PairEqual>
std::size_t pair_count(InputIt first,
InputIt last,
PairEqual pair_equal,
cudaStream_t stream = 0) const;
/**
* @brief Counts the occurrences of key/value pairs in `[first, last)` contained in the multimap.
*
* For key-value pair, `kv = *(first + i)`, counts all matching key-value pairs, `kv'`, as
* determined by `pair_equal(kv, kv')` and returns the sum of all matches for all key-value pairs.
* if `kv` does not have any matches, it contributes 1 to the final sum.
*
* @tparam InputIt Device accessible random access input iterator where
* `std::is_convertible<std::iterator_traits<InputIt>::value_type,
* static_multimap<K, V>::value_type>` is `true`
* @tparam PairEqual Binary callable
* @param first Beginning of the sequence of pairs to count
* @param last End of the sequence of pairs to count
* @param pair_equal Binary function to compare two pairs for equality
* @param stream CUDA stream used for pair_count_outer
* @return The sum of total occurrences of all pairs in `[first, last)` where a key-value pair
* without a match is considered to have a single occurrence
*/
template <typename InputIt, typename PairEqual>
std::size_t pair_count_outer(InputIt first,
InputIt last,
PairEqual pair_equal,
cudaStream_t stream = 0) const;
/**
* @brief Retrieves all the values corresponding to all keys in the range `[first, last)`.
*
* If key `k = *(first + i)` exists in the map, copies `k` and all associated values to
* unspecified locations in `[output_begin, output_end)`. Else, does nothing.
*
* Behavior is undefined if the size of the output range exceeds `std::distance(output_begin,
* output_end)`. Use `count()` to determine the size of the output range.
*
* @tparam InputIt Device accessible input iterator whose `value_type` is
* convertible to the map's `key_type`
* @tparam OutputIt Device accessible output iterator whose `value_type` is
* constructible from the map's `value_type`
* @tparam KeyEqual Binary callable type
* @param first Beginning of the sequence of keys
* @param last End of the sequence of keys
* @param output_begin Beginning of the sequence of key/value pairs retrieved for each key
* @param stream CUDA stream used for retrieve
* @param key_equal The binary function to compare two keys for equality
* @return The iterator indicating the last valid key/value pairs in the output
*/
template <typename InputIt, typename OutputIt, typename KeyEqual = thrust::equal_to<key_type>>
OutputIt retrieve(InputIt first,
InputIt last,
OutputIt output_begin,
cudaStream_t stream = 0,
KeyEqual key_equal = KeyEqual{}) const;
/**
* @brief Retrieves all the matches corresponding to all keys in the range `[first, last)`.
*
* If key `k = *(first + i)` exists in the map, copies `k` and all associated values to
* unspecified locations in `[output_begin, output_end)`. Else, copies `k` and
* `empty_value_sentinel`.
*
* Behavior is undefined if the size of the output range exceeds `std::distance(output_begin,
* output_end)`. Use `count_outer()` to determine the size of the output range.
*
* @tparam InputIt Device accessible input iterator whose `value_type` is
* convertible to the map's `key_type`
* @tparam OutputIt Device accessible output iterator whose `value_type` is
* constructible from the map's `value_type`
* @tparam KeyEqual Binary callable type
* @param first Beginning of the sequence of keys
* @param last End of the sequence of keys
* @param output_begin Beginning of the sequence of key/value pairs retrieved for each key
* @param stream CUDA stream used for retrieve_outer
* @param key_equal The binary function to compare two keys for equality
* @return The iterator indicating the last valid key/value pairs in the output
*/
template <typename InputIt, typename OutputIt, typename KeyEqual = thrust::equal_to<key_type>>
OutputIt retrieve_outer(InputIt first,
InputIt last,
OutputIt output_begin,
cudaStream_t stream = 0,
KeyEqual key_equal = KeyEqual{}) const;
/**
* @brief Retrieves all pairs matching the input probe pair in the range `[first, last)`.
*
* The `pair_` prefix indicates that the input data type is convertible to the map's
* `value_type`. If pair_equal(*(first + i), slot[j]) returns true, then *(first+i) is
* stored to `probe_output_begin`, and slot[j] is stored to `contained_output_begin`.
*
* Behavior is undefined if the size of the output range exceeds
* `std::distance(probe_output_begin, probe_output_end)` (or
* `std::distance(contained_output_begin, contained_output_end)`). Use
* `pair_count()` to determine the size of the output range.
*
* @tparam InputIt Device accessible random access input iterator where
* `std::is_convertible<std::iterator_traits<InputIt>::value_type,
* static_multimap<K, V>::value_type>` is `true`
* @tparam OutputIt1 Device accessible output iterator whose `value_type` is constructible from
* `InputIt`s `value_type`.
* @tparam OutputIt2 Device accessible output iterator whose `value_type` is constructible from
* the map's `value_type`.
* @tparam PairEqual Binary callable type
* @param first Beginning of the sequence of pairs
* @param last End of the sequence of pairs
* @param probe_output_begin Beginning of the sequence of the matched probe pairs
* @param contained_output_begin Beginning of the sequence of the matched contained pairs
* @param pair_equal The binary function to compare two pairs for equality
* @param stream CUDA stream used for pair_retrieve
* @return Pair of iterators pointing to the last elements in the output
*/
template <typename InputIt, typename OutputIt1, typename OutputIt2, typename PairEqual>
std::pair<OutputIt1, OutputIt2> pair_retrieve(InputIt first,
InputIt last,
OutputIt1 probe_output_begin,
OutputIt2 contained_output_begin,
PairEqual pair_equal,
cudaStream_t stream = 0) const;
/**
* @brief Retrieves all pairs matching the input probe pair in the range `[first, last)`.
*
* The `pair_` prefix indicates that the input data type is convertible to the map's `value_type`.
* If pair_equal(*(first + i), slot[j]) returns true, then *(first+i) is stored to
* `probe_output_begin`, and slot[j] is stored to `contained_output_begin`. If *(first+i) doesn't
* have matches in the map, copies *(first + i) in `probe_output_begin` and a pair of
* `empty_key_sentinel` and `empty_value_sentinel` in `contained_output_begin`.
*
* Behavior is undefined if the size of the output range exceeds
* `std::distance(probe_output_begin, probe_output_end)` (or
* `std::distance(contained_output_begin, contained_output_end)`). Use
* `pair_count()` to determine the size of the output range.
*
* @tparam InputIt Device accessible random access input iterator where
* `std::is_convertible<std::iterator_traits<InputIt>::value_type,
* static_multimap<K, V>::value_type>` is `true`
* @tparam OutputIt1 Device accessible output iterator whose `value_type` is constructible from
* `InputIt`s `value_type`.
* @tparam OutputIt2 Device accessible output iterator whose `value_type` is constructible from
* the map's `value_type`.
* @tparam PairEqual Binary callable type
* @param first Beginning of the sequence of pairs
* @param last End of the sequence of pairs
* @param probe_output_begin Beginning of the sequence of the matched probe pairs
* @param contained_output_begin Beginning of the sequence of the matched contained pairs
* @param pair_equal The binary function to compare two pairs for equality
* @param stream CUDA stream used for pair_retrieve_outer
* @return Pair of iterators pointing to the last elements in the output
*/
template <typename InputIt, typename OutputIt1, typename OutputIt2, typename PairEqual>
std::pair<OutputIt1, OutputIt2> pair_retrieve_outer(InputIt first,
InputIt last,
OutputIt1 probe_output_begin,
OutputIt2 contained_output_begin,
PairEqual pair_equal,
cudaStream_t stream = 0) const;
private:
/**
* @brief Indicates if vector-load is used.
*
* Users have no explicit control on whether vector-load is used.
*
* @return Boolean indicating if vector-load is used.
*/
static constexpr bool uses_vector_load() noexcept
{
return cuco::detail::is_packable<value_type>();
}
/**
* @brief Returns the number of pairs loaded with each vector-load
*/
static constexpr uint32_t vector_width() noexcept { return ProbeSequence::vector_width(); }
/**
* @brief Returns the warp size.
*/
static constexpr uint32_t warp_size() noexcept { return 32u; }
/**
* @brief Custom deleter for unique pointer of device counter.
*/
struct counter_deleter {
counter_deleter(counter_allocator_type& a) : allocator{a} {}
counter_deleter(counter_deleter const&) = default;
void operator()(atomic_ctr_type* ptr) { allocator.deallocate(ptr, 1); }
counter_allocator_type& allocator;
};
/**
* @brief Custom deleter for unique pointer of slots.
*/
struct slot_deleter {
slot_deleter(slot_allocator_type& a, size_t& c) : allocator{a}, capacity{c} {}
slot_deleter(slot_deleter const&) = default;
void operator()(pair_atomic_type* ptr) { allocator.deallocate(ptr, capacity); }
slot_allocator_type& allocator;
size_t& capacity;
};
class device_view_impl_base;
class device_mutable_view_impl;
class device_view_impl;
template <typename ViewImpl>
class device_view_base {
protected:
// Import member type definitions from `static_multimap`
using value_type = value_type;
using key_type = Key;
using mapped_type = Value;
using pair_atomic_type = pair_atomic_type;
using iterator = pair_atomic_type*;
using const_iterator = pair_atomic_type const*;
using probe_sequence_type = probe_sequence_type;
__host__ __device__ device_view_base(pair_atomic_type* slots,
std::size_t capacity,
Key empty_key_sentinel,
Value empty_value_sentinel) noexcept
: impl_{slots, capacity, empty_key_sentinel, empty_value_sentinel}
{
}
public:
/**
* @brief Gets slots array.
*
* @return Slots array
*/
__device__ __forceinline__ pair_atomic_type* get_slots() noexcept { return impl_.get_slots(); }
/**
* @brief Gets slots array.
*
* @return Slots array
*/
__device__ __forceinline__ pair_atomic_type const* get_slots() const noexcept
{
return impl_.get_slots();
}
/**
* @brief Gets the maximum number of elements the hash map can hold.
*
* @return The maximum number of elements the hash map can hold
*/
__host__ __device__ __forceinline__ std::size_t get_capacity() const noexcept
{
return impl_.get_capacity();
}
/**
* @brief Gets the sentinel value used to represent an empty key slot.
*
* @return The sentinel value used to represent an empty key slot
*/
__host__ __device__ __forceinline__ Key get_empty_key_sentinel() const noexcept
{
return impl_.get_empty_key_sentinel();
}
/**
* @brief Gets the sentinel value used to represent an empty value slot.
*
* @return The sentinel value used to represent an empty value slot
*/
__host__ __device__ __forceinline__ Value get_empty_value_sentinel() const noexcept
{
return impl_.get_empty_value_sentinel();
}
protected:
ViewImpl impl_;
}; // class device_view_base
public:
/**
* @brief Mutable, non-owning view-type that may be used in device code to
* perform singular inserts into the map.
*
* `device_mutable_view` is trivially-copyable and is intended to be passed by
* value.
*
* Example:
* \code{.cpp}
* cuco::static_multimap<int,int> m{100'000, -1, -1};
*
* // Inserts a sequence of pairs {{0,0}, {1,1}, ... {i,i}}
* thrust::for_each(thrust::make_counting_iterator(0),
* thrust::make_counting_iterator(50'000),
* [map = m.get_device_mutable_view()]
* __device__ (auto i) mutable {
* map.insert(thrust::make_pair(i,i));
* });
* \endcode
*/
class device_mutable_view : public device_view_base<device_mutable_view_impl> {
public:
using view_base_type = device_view_base<device_mutable_view_impl>;
using value_type = typename view_base_type::value_type;
using key_type = typename view_base_type::key_type;
using mapped_type = typename view_base_type::mapped_type;
using iterator = typename view_base_type::iterator;
using const_iterator = typename view_base_type::const_iterator;
/**
* @brief Construct a mutable view of the first `capacity` slots of the
* slots array pointed to by `slots`.
*
* @param slots Pointer to beginning of initialized slots array
* @param capacity The number of slots viewed by this object
* @param empty_key_sentinel The reserved value for keys to represent empty
* slots
* @param empty_value_sentinel The reserved value for mapped values to
* represent empty slots
*/
__host__ __device__ device_mutable_view(pair_atomic_type* slots,
std::size_t capacity,
Key empty_key_sentinel,
Value empty_value_sentinel) noexcept
: view_base_type{slots, capacity, empty_key_sentinel, empty_value_sentinel}
{
}
/**
* @brief Inserts the specified key/value pair into the map.
*
* @param g The Cooperative Group that performs the insert
* @param insert_pair The pair to insert
* @return void.
*/
__device__ __forceinline__ void insert(
cooperative_groups::thread_block_tile<ProbeSequence::cg_size> const& g,
value_type const& insert_pair) noexcept;
private:
using device_view_base<device_mutable_view_impl>::impl_;
}; // class device mutable view
/**
* @brief Non-owning view-type that may be used in device code to
* perform singular find and contains operations for the map.
*
* `device_view` is trivially-copyable and is intended to be passed by
* value.
*
*/
class device_view : public device_view_base<device_view_impl> {
public:
using view_base_type = device_view_base<device_view_impl>;
using value_type = typename view_base_type::value_type;
using key_type = typename view_base_type::key_type;
using mapped_type = typename view_base_type::mapped_type;
using iterator = typename view_base_type::iterator;
using const_iterator = typename view_base_type::const_iterator;
/**
* @brief Construct a view of the first `capacity` slots of the
* slots array pointed to by `slots`.
*
* @param slots Pointer to beginning of initialized slots array
* @param capacity The number of slots viewed by this object
* @param empty_key_sentinel The reserved value for keys to represent empty
* slots
* @param empty_value_sentinel The reserved value for mapped values to
* represent empty slots
*/
__host__ __device__ device_view(pair_atomic_type* slots,
std::size_t capacity,
Key empty_key_sentinel,
Value empty_value_sentinel) noexcept
: view_base_type{slots, capacity, empty_key_sentinel, empty_value_sentinel}
{
}
/**
* @brief Makes a copy of given `device_view` using non-owned memory.
*
* This function is intended to be used to create shared memory copies of small static maps,
* although global memory can be used as well.
*
* @tparam CG The type of the cooperative thread group
* @param g The cooperative thread group used to copy the slots
* @param source_device_view `device_view` to copy from
* @param memory_to_use Array large enough to support `capacity` elements. Object does not
* take the ownership of the memory
* @return Copy of passed `device_view`
*/
template <typename CG>
__device__ __forceinline__ static device_view make_copy(
CG g, pair_atomic_type* const memory_to_use, device_view source_device_view) noexcept;
/**
* @brief Flushes per-CG buffer into the output sequence.
*
* A given CUDA Cooperative Group, `g`, loads `num_outputs` key-value pairs from `output_buffer`
* and writes them into global memory in a coalesced fashion. CG-wide `memcpy_sync` is used if
* `CUCO_HAS_CG_MEMCPY_ASYNC` is defined and `thrust::is_contiguous_iterator_v<OutputIt>`
* returns true. All threads of `g` must be active due to implicit CG-wide synchronization
* during flushing.
*
* @tparam CG Cooperative Group type
* @tparam atomicT Type of atomic storage
* @tparam OutputIt Device accessible output iterator whose `value_type` is
* constructible from the map's `value_type`
* @param g The Cooperative Group used to flush output buffer
* @param num_outputs Number of valid output in the buffer
* @param output_buffer Buffer of the key/value pair sequence
* @param num_matches Size of the output sequence
* @param output_begin Beginning of the output sequence of key/value pairs
*/
template <typename CG, typename atomicT, typename OutputIt>
__device__ __forceinline__ void flush_output_buffer(CG const& g,
uint32_t const num_outputs,
value_type* output_buffer,
atomicT* num_matches,
OutputIt output_begin) noexcept;
/**
* @brief Flushes per-CG buffer into the output sequences.
*
* A given CUDA Cooperative Group, `g`, loads `num_outputs` elements from `probe_output_buffer`
* and `num_outputs` elements from `contained_output_buffer`, then writes them into global
* memory started from `probe_output_begin` and `contained_output_begin` respectively. All
* threads of `g` must be active due to implicit CG-wide synchronization during flushing.
*
* @tparam CG Cooperative Group type
* @tparam atomicT Type of atomic storage
* @tparam OutputIt1 Device accessible output iterator whose `value_type` is constructible from
* `InputIt`s `value_type`.
* @tparam OutputIt2 Device accessible output iterator whose `value_type` is constructible from
* the map's `value_type`.
* @param g The Cooperative Group used to flush output buffer
* @param num_outputs Number of valid output in the buffer
* @param probe_output_buffer Buffer of the matched probe pair sequence
* @param contained_output_buffer Buffer of the matched contained pair sequence
* @param num_matches Size of the output sequence
* @param probe_output_begin Beginning of the output sequence of the matched probe pairs
* @param contained_output_begin Beginning of the output sequence of the matched contained
* pairs
*/
template <typename CG, typename atomicT, typename OutputIt1, typename OutputIt2>
__device__ __forceinline__ void flush_output_buffer(CG const& g,
uint32_t const num_outputs,
value_type* probe_output_buffer,
value_type* contained_output_buffer,
atomicT* num_matches,
OutputIt1 probe_output_begin,
OutputIt2 contained_output_begin) noexcept;
/**
* @brief Indicates whether the key `k` exists in the map.
*
* If the key `k` was inserted into the map, `contains` returns
* true. Otherwise, it returns false. Uses the CUDA Cooperative Groups API to
* to leverage multiple threads to perform a single `contains` operation. This provides a
* significant boost in throughput compared to the non Cooperative Group
* `contains` at moderate to high load factors.
*
* @tparam KeyEqual Binary callable type
* @param g The Cooperative Group used to perform the contains operation
* @param k The key to search for
* @param key_equal The binary callable used to compare two keys
* for equality
* @return A boolean indicating whether the key/value pair
* containing `k` was inserted
*/
template <typename KeyEqual = thrust::equal_to<key_type>>
__device__ __forceinline__ bool contains(
cooperative_groups::thread_block_tile<ProbeSequence::cg_size> const& g,
Key const& k,
KeyEqual key_equal = KeyEqual{}) noexcept;
/**
* @brief Counts the occurrence of a given key contained in multimap.
*
* For a given key, `k`, counts all matching keys, `k'`, as determined by `key_equal(k, k')` and
* returns the sum of all matches for `k`.
*
* @tparam KeyEqual Binary callable type
* @param g The Cooperative Group used to perform the count operation
* @param k The key to search for
* @param key_equal The binary callable used to compare two keys
* for equality
* @return Number of matches found by the current thread
*/
template <typename KeyEqual = thrust::equal_to<key_type>>
__device__ __forceinline__ std::size_t count(
cooperative_groups::thread_block_tile<ProbeSequence::cg_size> const& g,
Key const& k,
KeyEqual key_equal = KeyEqual{}) noexcept;
/**
* @brief Counts the occurrence of a given key contained in multimap. If no
* matches can be found for a given key, the corresponding occurrence is 1.
*
* For a given key, `k`, counts all matching keys, `k'`, as determined by `key_equal(k, k')` and
* returns the sum of all matches for `k`. If `k` does not have any matches, returns 1.
*
* @tparam KeyEqual Binary callable type
* @param g The Cooperative Group used to perform the count operation
* @param k The key to search for
* @param key_equal The binary callable used to compare two keys
* for equality
* @return Number of matches found by the current thread
*/
template <typename KeyEqual = thrust::equal_to<key_type>>
__device__ __forceinline__ std::size_t count_outer(
cooperative_groups::thread_block_tile<ProbeSequence::cg_size> const& g,
Key const& k,
KeyEqual key_equal = KeyEqual{}) noexcept;
/**
* @brief Counts the occurrence of a given key/value pair contained in multimap.
*
* For a given pair, `p`, counts all matching pairs, `p'`, as determined by `pair_equal(p, p')`
* and returns the sum of all matches for `p`.
*
* @tparam PairEqual Binary callable type
* @param g The Cooperative Group used to perform the pair_count operation
* @param pair The pair to search for
* @param pair_equal The binary callable used to compare two pairs
* for equality
* @return Number of matches found by the current thread
*/
template <typename PairEqual>
__device__ __forceinline__ std::size_t pair_count(
cooperative_groups::thread_block_tile<ProbeSequence::cg_size> const& g,
value_type const& pair,
PairEqual pair_equal) noexcept;
/**
* @brief Counts the occurrence of a given key/value pair contained in multimap.
* If no matches can be found for a given key, the corresponding occurrence is 1.
*
* For a given pair, `p`, counts all matching pairs, `p'`, as determined by `pair_equal(p, p')`
* and returns the sum of all matches for `p`. If `p` does not have any matches, returns 1.
*
* @tparam PairEqual Binary callable type
* @param g The Cooperative Group used to perform the pair_count operation
* @param pair The pair to search for
* @param pair_equal The binary callable used to compare two pairs
* for equality
* @return Number of matches found by the current thread
*/
template <typename PairEqual>
__device__ __forceinline__ std::size_t pair_count_outer(
cooperative_groups::thread_block_tile<ProbeSequence::cg_size> const& g,
value_type const& pair,
PairEqual pair_equal) noexcept;
/**
* @brief Retrieves all the matches of a given key contained in multimap with per-flushing-CG
* shared memory buffer.
*
* For key `k` existing in the map, copies `k` and all associated values to unspecified
* locations in `[output_begin, output_end)`.
*
* @tparam buffer_size Size of the output buffer
* @tparam FlushingCG Type of Cooperative Group used to flush output buffer
* @tparam atomicT Type of atomic storage
* @tparam OutputIt Device accessible output iterator whose `value_type` is
* constructible from the map's `value_type`
* @tparam KeyEqual Binary callable type
* @param flushing_cg The Cooperative Group used to flush output buffer
* @param probing_cg The Cooperative Group used to retrieve
* @param k The key to search for
* @param flushing_cg_counter Pointer to flushing_cg counter
* @param output_buffer Shared memory buffer of the key/value pair sequence
* @param num_matches Size of the output sequence
* @param output_begin Beginning of the output sequence of key/value pairs
* @param key_equal The binary callable used to compare two keys
* for equality
*/
template <uint32_t buffer_size,
typename FlushingCG,
typename atomicT,
typename OutputIt,
typename KeyEqual = thrust::equal_to<key_type>>
__device__ __forceinline__ void retrieve(
FlushingCG const& flushing_cg,
cooperative_groups::thread_block_tile<ProbeSequence::cg_size> const& probing_cg,
Key const& k,
uint32_t* flushing_cg_counter,
value_type* output_buffer,
atomicT* num_matches,
OutputIt output_begin,
KeyEqual key_equal = KeyEqual{}) noexcept;
/**
* @brief Retrieves all the matches of a given key contained in multimap with per-flushing-CG
* shared memory buffer.
*
* For key `k` existing in the map, copies `k` and all associated values to unspecified
* locations in `[output_begin, output_end)`. If `k` does not have any matches, copies `k` and
* `empty_value_sentinel()` into the output.
*
* @tparam buffer_size Size of the output buffer
* @tparam FlushingCG Type of Cooperative Group used to flush output buffer
* @tparam atomicT Type of atomic storage
* @tparam OutputIt Device accessible output iterator whose `value_type` is
* constructible from the map's `value_type`
* @tparam KeyEqual Binary callable type
* @param flushing_cg The Cooperative Group used to flush output buffer
* @param probing_cg The Cooperative Group used to retrieve
* @param k The key to search for
* @param flushing_cg_counter Pointer to flushing_cg counter
* @param output_buffer Shared memory buffer of the key/value pair sequence
* @param num_matches Size of the output sequence
* @param output_begin Beginning of the output sequence of key/value pairs
* @param key_equal The binary callable used to compare two keys
* for equality
*/
template <uint32_t buffer_size,
typename FlushingCG,
typename atomicT,
typename OutputIt,
typename KeyEqual = thrust::equal_to<key_type>>
__device__ __forceinline__ void retrieve_outer(
FlushingCG const& flushing_cg,
cooperative_groups::thread_block_tile<ProbeSequence::cg_size> const& probing_cg,
Key const& k,
uint32_t* flushing_cg_counter,
value_type* output_buffer,
atomicT* num_matches,
OutputIt output_begin,
KeyEqual key_equal = KeyEqual{}) noexcept;
/**
* @brief Retrieves all the matches of a given pair
*
* For pair `p` with `n = pair_count(cg, p, pair_equal)` matching pairs, if `pair_equal(p,
* slot)` returns true, stores `probe_key_begin[j] = p.first`, `probe_val_begin[j] = p.second`,
* `contained_key_begin[j] = slot.first`, and `contained_val_begin[j] = slot.second` for an
* unspecified value of `j` where `0 <= j < n`.
*
* Concurrent reads or writes to any of the output ranges results in undefined behavior.
*
* Behavior is undefined if the extent of any of the output ranges is less than `n`.
*
* @tparam OutputIt1 Device accessible output iterator whose `value_type` is constructible from
* `pair`'s `Key` type.
* @tparam OutputIt2 Device accessible output iterator whose `value_type` is constructible from
* `pair`'s `Value` type.
* @tparam OutputIt3 Device accessible output iterator whose `value_type` is constructible from
* the map's `key_type`.
* @tparam OutputIt4 Device accessible output iterator whose `value_type` is constructible from
* the map's `mapped_type`.
* @tparam PairEqual Binary callable type
* @param probing_cg The Cooperative Group used to retrieve
* @param pair The pair to search for
* @param probe_key_begin Beginning of the output sequence of the matched probe keys
* @param probe_val_begin Beginning of the output sequence of the matched probe values
* @param contained_key_begin Beginning of the output sequence of the matched contained keys
* @param contained_val_begin Beginning of the output sequence of the matched contained values
* @param pair_equal The binary callable used to compare two pairs for equality
*/
template <typename OutputIt1,
typename OutputIt2,
typename OutputIt3,
typename OutputIt4,
typename PairEqual>
__device__ __forceinline__ void pair_retrieve(
cooperative_groups::thread_block_tile<ProbeSequence::cg_size> const& probing_cg,
value_type const& pair,
OutputIt1 probe_key_begin,
OutputIt2 probe_val_begin,
OutputIt3 contained_key_begin,
OutputIt4 contained_val_begin,
PairEqual pair_equal) noexcept;
/**
* @brief Retrieves all the matches of a given pair contained in multimap with per-flushing-CG
* shared memory buffer.
*
* For pair `p`, if pair_equal(p, slot[j]) returns true, copies `p` to unspecified locations
* in `[probe_output_begin, probe_output_end)` and copies slot[j] to unspecified locations in
* `[contained_output_begin, contained_output_end)`.
*
* @tparam buffer_size Size of the output buffer
* @tparam FlushingCG Type of Cooperative Group used to flush output buffer
* @tparam atomicT Type of atomic storage
* @tparam OutputIt1 Device accessible output iterator whose `value_type` is constructible from
* `InputIt`s `value_type`.
* @tparam OutputIt2 Device accessible output iterator whose `value_type` is constructible from
* the map's `value_type`.
* @tparam PairEqual Binary callable type
* @param flushing_cg The Cooperative Group used to flush output buffer
* @param probing_cg The Cooperative Group used to retrieve
* @param pair The pair to search for
* @param flushing_cg_counter Pointer to the flushing CG counter
* @param probe_output_buffer Buffer of the matched probe pair sequence
* @param contained_output_buffer Buffer of the matched contained pair sequence
* @param num_matches Size of the output sequence
* @param probe_output_begin Beginning of the output sequence of the matched probe pairs
* @param contained_output_begin Beginning of the output sequence of the matched contained
* pairs
* @param pair_equal The binary callable used to compare two pairs for equality
*/
template <uint32_t buffer_size,
typename FlushingCG,
typename atomicT,
typename OutputIt1,
typename OutputIt2,
typename PairEqual>
__device__ __forceinline__ void pair_retrieve(
FlushingCG const& flushing_cg,
cooperative_groups::thread_block_tile<ProbeSequence::cg_size> const& probing_cg,
value_type const& pair,
uint32_t* warp_counter,
value_type* probe_output_buffer,
value_type* contained_output_buffer,
atomicT* num_matches,
OutputIt1 probe_output_begin,
OutputIt2 contained_output_begin,
PairEqual pair_equal) noexcept;
/**
* @brief Retrieves all the matches of a given pair
*
* For pair `p` with `n = pair_count_outer(cg, p, pair_equal)` matching pairs, if `pair_equal(p,
* slot)` returns true, stores `probe_key_begin[j] = p.first`, `probe_val_begin[j] = p.second`,
* `contained_key_begin[j] = slot.first`, and `contained_val_begin[j] = slot.second` for an
* unspecified value of `j` where `0 <= j < n`. If `p` does not have any matches, stores
* `probe_key_begin[0] = p.first`, `probe_val_begin[0] = p.second`, `contained_key_begin[0] =
* empty_key_sentinel`, and `contained_val_begin[0] = empty_value_sentinel`.
*
* Concurrent reads or writes to any of the output ranges results in undefined behavior.
*
* Behavior is undefined if the extent of any of the output ranges is less than `n`.
*
* @tparam OutputIt1 Device accessible output iterator whose `value_type` is constructible from
* `pair`'s `Key` type.
* @tparam OutputIt2 Device accessible output iterator whose `value_type` is constructible from
* `pair`'s `Value` type.
* @tparam OutputIt3 Device accessible output iterator whose `value_type` is constructible from
* the map's `key_type`.
* @tparam OutputIt4 Device accessible output iterator whose `value_type` is constructible from
* the map's `mapped_type`.
* @tparam PairEqual Binary callable type
* @param probing_cg The Cooperative Group used to retrieve
* @param pair The pair to search for
* @param probe_key_begin Beginning of the output sequence of the matched probe keys
* @param probe_val_begin Beginning of the output sequence of the matched probe values
* @param contained_key_begin Beginning of the output sequence of the matched contained keys
* @param contained_val_begin Beginning of the output sequence of the matched contained values
* @param pair_equal The binary callable used to compare two pairs for equality
*/
template <typename OutputIt1,
typename OutputIt2,
typename OutputIt3,
typename OutputIt4,
typename PairEqual>
__device__ __forceinline__ void pair_retrieve_outer(
cooperative_groups::thread_block_tile<ProbeSequence::cg_size> const& probing_cg,
value_type const& pair,
OutputIt1 probe_key_begin,
OutputIt2 probe_val_begin,
OutputIt3 contained_key_begin,
OutputIt4 contained_val_begin,
PairEqual pair_equal) noexcept;
/**
* @brief Retrieves all the matches of a given pair contained in multimap with per-flushing-CG
* shared memory buffer.
*
* For pair `p`, if pair_equal(p, slot[j]) returns true, copies `p` to unspecified locations
* in `[probe_output_begin, probe_output_end)` and copies slot[j] to unspecified locations in
* `[contained_output_begin, contained_output_end)`. If `p` does not have any matches, copies
* `p` and a pair of `empty_key_sentinel` and `empty_value_sentinel` into the output.
*
* @tparam buffer_size Size of the output buffer
* @tparam FlushingCG Type of Cooperative Group used to flush output buffer
* @tparam atomicT Type of atomic storage
* @tparam OutputIt1 Device accessible output iterator whose `value_type` is constructible from
* `InputIt`s `value_type`.
* @tparam OutputIt2 Device accessible output iterator whose `value_type` is constructible from
* the map's `value_type`.
* @tparam PairEqual Binary callable type
* @param flushing_cg The Cooperative Group used to flush output buffer
* @param probing_cg The Cooperative Group used to retrieve
* @param pair The pair to search for
* @param flushing_cg_counter Pointer to the flushing CG counter
* @param probe_output_buffer Buffer of the matched probe pair sequence
* @param contained_output_buffer Buffer of the matched contained pair sequence
* @param num_matches Size of the output sequence
* @param probe_output_begin Beginning of the output sequence of the matched probe pairs
* @param contained_output_begin Beginning of the output sequence of the matched contained
* pairs
* @param pair_equal The binary callable used to compare two pairs for equality
*/
template <uint32_t buffer_size,
typename FlushingCG,
typename atomicT,
typename OutputIt1,
typename OutputIt2,
typename PairEqual>
__device__ __forceinline__ void pair_retrieve_outer(
FlushingCG const& flushing_cg,
cooperative_groups::thread_block_tile<ProbeSequence::cg_size> const& probing_cg,
value_type const& pair,
uint32_t* flushing_cg_counter,
value_type* probe_output_buffer,
value_type* contained_output_buffer,
atomicT* num_matches,
OutputIt1 probe_output_begin,
OutputIt2 contained_output_begin,
PairEqual pair_equal) noexcept;
private:
using device_view_base<device_view_impl>::impl_;
}; // class device_view
/**
* @brief Return the raw pointer of the hash map slots.
*/
value_type* raw_slots() noexcept
{
// Unsafe access to the slots stripping away their atomic-ness to allow non-atomic access.
// TODO: to be replace by atomic_ref when it's ready
return reinterpret_cast<value_type*>(slots_.get());
}
/**
* @brief Return the raw pointer of the hash map slots.
*/
value_type const* raw_slots() const noexcept
{
// Unsafe access to the slots stripping away their atomic-ness to allow non-atomic access.
// TODO: to be replace by atomic_ref when it's ready
return reinterpret_cast<value_type const*>(slots_.get());
}
/**
* @brief Gets the maximum number of elements the hash map can hold.
*
* @return The maximum number of elements the hash map can hold
*/
std::size_t get_capacity() const noexcept { return capacity_; }
/**
* @brief Gets the number of elements in the hash map.
*
* @param stream CUDA stream used to get the number of inserted elements
* @return The number of elements in the map
*/
std::size_t get_size(cudaStream_t stream = 0) const noexcept;
/**
* @brief Gets the load factor of the hash map.
*
* @param stream CUDA stream used to get the load factor
* @return The load factor of the hash map
*/
float get_load_factor(cudaStream_t stream = 0) const noexcept;
/**
* @brief Gets the sentinel value used to represent an empty key slot.
*
* @return The sentinel value used to represent an empty key slot
*/
Key get_empty_key_sentinel() const noexcept { return empty_key_sentinel_; }
/**
* @brief Gets the sentinel value used to represent an empty value slot.
*
* @return The sentinel value used to represent an empty value slot
*/
Value get_empty_value_sentinel() const noexcept { return empty_value_sentinel_; }
/**
* @brief Constructs a device_view object based on the members of the `static_multimap`
* object.
*
* @return A device_view object based on the members of the `static_multimap` object
*/
device_view get_device_view() const noexcept
{
return device_view(slots_.get(), capacity_, empty_key_sentinel_, empty_value_sentinel_);
}
/**
* @brief Constructs a device_mutable_view object based on the members of the
* `static_multimap` object
*
* @return A device_mutable_view object based on the members of the `static_multimap` object
*/
device_mutable_view get_device_mutable_view() const noexcept
{
return device_mutable_view(slots_.get(), capacity_, empty_key_sentinel_, empty_value_sentinel_);
}
private:
std::size_t capacity_{}; ///< Total number of slots
Key empty_key_sentinel_{}; ///< Key value that represents an empty slot
Value empty_value_sentinel_{}; ///< Initial value of empty slot
slot_allocator_type slot_allocator_{}; ///< Allocator used to allocate slots
counter_allocator_type counter_allocator_{}; ///< Allocator used to allocate counters
counter_deleter delete_counter_; ///< Custom counter deleter
slot_deleter delete_slots_; ///< Custom slots deleter
std::unique_ptr<atomic_ctr_type, counter_deleter> d_counter_{}; ///< Preallocated device counter
std::unique_ptr<pair_atomic_type, slot_deleter> slots_{}; ///< Pointer to flat slots storage
}; // class static_multimap
} // namespace cuco
#include <cuco/detail/static_multimap/device_view_impl.inl>
#include <cuco/detail/static_multimap/static_multimap.inl> | the_stack |
Copyright (C) 2014-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include <assert.h>
#include <vector>
#include <string>
#include <algorithm>
#include <iostream>
#include <sstream>
#include <cstdlib>
#include "bits/impl/tinythread.h"
#include "bits/impl/blashelper.hpp"
#include "bits/imread.hpp"
#include "bits/impl/imread_helpers.hpp"
#include "bits/datamex.hpp"
#include "bits/mexutils.h"
static int verbosity = 0 ;
/* option codes */
enum {
opt_num_threads = 0,
opt_prefetch,
opt_resize,
opt_pack,
opt_gpu,
opt_verbose,
opt_subtract_average,
opt_crop_size,
opt_crop_location,
opt_crop_anisotropy,
opt_flip,
opt_contrast,
opt_saturation,
opt_brightness,
opt_interpolation,
} ;
/* options */
VLMXOption options [] = {
{"NumThreads", 1, opt_num_threads },
{"Prefetch", 0, opt_prefetch },
{"Verbose", 0, opt_verbose },
{"Resize", 1, opt_resize },
{"Pack", 0, opt_pack },
{"GPU", 0, opt_gpu },
{"SubtractAverage", 1, opt_subtract_average },
{"CropAnisotropy", 1, opt_crop_anisotropy },
{"CropSize", 1, opt_crop_size },
{"CropLocation", 1, opt_crop_location },
{"Flip", 0, opt_flip },
{"Brightness", 1, opt_brightness },
{"Contrast", 1, opt_contrast },
{"Saturation", 1, opt_saturation },
{"Interpolation", 1, opt_interpolation },
{0, 0, 0 }
} ;
enum {
IN_FILENAMES = 0, IN_END
} ;
enum {
OUT_IMAGES = 0, OUT_END
} ;
/* ---------------------------------------------------------------- */
/* Logger */
/* ---------------------------------------------------------------- */
namespace vl {
class Logger
{
public:
Logger() ;
~Logger() ;
std::ostringstream & getStream() ;
protected:
std::ostringstream stringStream ;
private:
// Disable
Logger(const Logger&) ;
Logger& operator= (const Logger&) ;
} ;
}
vl::Logger::Logger()
{ }
vl::Logger::~Logger()
{
printf("%s\n", stringStream.str().c_str()) ;
//fflush(stdout) ;
}
std::ostringstream &
vl::Logger::getStream()
{
return stringStream ;
}
#define LOGERROR \
vl::Logger().getStream() \
<<"[info] "<<__func__<<"::"
#define LOG(level) \
if (verbosity < level) { } \
else vl::Logger().getStream() \
<<"[info] "<<__func__<<"::"
/* ---------------------------------------------------------------- */
/* Batch */
/* ---------------------------------------------------------------- */
class Batch
{
public:
struct Item
{
enum State {
prefetch,
fetch,
ready
} state ;
Batch const & batch ;
std::string name ;
vl::ImageShape shape ;
mxArray * array ;
vl::ErrorCode error ;
char errorMessage [512] ;
bool borrowed ;
vl::MexTensor cpuArray ;
vl::MexTensor gpuArray ;
int index ;
size_t outputWidth ;
size_t outputHeight ;
size_t outputNumChannels ;
size_t cropWidth ;
size_t cropHeight ;
size_t cropOffsetX ;
size_t cropOffsetY ;
bool flip ;
vl::impl::ImageResizeFilter::FilterType filterType ;
float brightnessShift [3] ;
float contrastShift ;
float saturationShift ;
Item(Batch const & batch) ;
mxArray * relinquishArray() ;
} ;
enum ResizeMethod {
noResize,
resizeShortestSide,
fixedSize
} ;
enum PackingMethod {
individualArrays,
singleArray
};
enum CropLocation {
cropCenter,
cropRandom
} ;
Batch(vl::MexContext & context) ;
~Batch() ;
vl::ErrorCode init() ;
void finalize() ;
vl::ErrorCode registerItem(std::string const & name) ;
size_t getNumberOfItems() const ;
Item * getItem(int index) ;
void clear() ;
void sync() const ;
vl::ErrorCode prefetch() ;
mxArray * relinquishArray() ;
void setGpuMode(bool gpu) ;
void setPackingMethod(PackingMethod method) ;
void setResizeMethod(ResizeMethod method, int height, int width) ;
void setAverage(double average []) ;
void setAverageImage(float const * image) ;
void setColorDeviation(double brightness [], double contrast, double saturation) ;
void setFlipMode(bool x) ;
void setCropAnisotropy(double minAnisotropy, double maxAnisotropy) ;
void setCropSize(double minSize, double maxSize) ;
void setCropLocation(CropLocation location) ;
void setFilterType(vl::impl::ImageResizeFilter::FilterType type) ;
PackingMethod getPackingMethod() const ;
Item * borrowNextItem() ;
void returnItem(Item * item) ;
private:
vl::MexContext & context ;
tthread::mutex mutable mutex ;
tthread::condition_variable mutable waitNextItemToBorrow ;
tthread::condition_variable mutable waitCompletion ;
bool quit ;
typedef std::vector<Item*> items_t ;
items_t items ;
int nextItem ;
int numReturnedItems ;
enum PackingMethod packingMethod ;
enum ResizeMethod resizeMethod ;
int resizeHeight ;
int resizeWidth ;
bool gpuMode ;
double average [3] ;
float * averageImage ;
double contrastDeviation ;
double saturationDeviation ;
double brightnessDeviation [9] ;
double minCropAnisotropy ;
double maxCropAnisotropy ;
double minCropSize ;
double maxCropSize ;
CropLocation cropLocation ;
bool flipMode ;
vl::impl::ImageResizeFilter::FilterType filterType ;
vl::MexTensor cpuPack ;
vl::MexTensor gpuPack ;
friend class ReaderTask ;
int gpuDevice ;
#if ENABLE_GPU
bool cudaStreamInitialized ;
cudaStream_t cudaStream ;
float * cpuPinnedPack ;
size_t cpuPinnedPackSize ;
#endif
} ;
Batch::Item::Item(Batch const & batch)
: batch(batch),
cpuArray(batch.context),
gpuArray(batch.context),
borrowed(false),
error(vl::VLE_Success),
state(ready),
flip(false)
{
memset(errorMessage,sizeof(errorMessage),0) ;
}
mxArray * Batch::Item::relinquishArray()
{
if (batch.gpuMode) {
return gpuArray.relinquish() ;
} else {
return cpuArray.relinquish() ;
}
}
mxArray * Batch::relinquishArray()
{
if (gpuMode) {
return gpuPack.relinquish() ;
} else {
return cpuPack.relinquish() ;
}
}
Batch::Batch(vl::MexContext & context)
: context(context),
cpuPack(context),
gpuPack(context),
quit(true),
resizeMethod(noResize),
packingMethod(individualArrays),
gpuMode(false),
numReturnedItems(0),
averageImage(NULL)
#if ENABLE_GPU
, cpuPinnedPack(NULL),
cpuPinnedPackSize(0)
#endif
{ }
Batch::~Batch()
{
finalize() ;
}
size_t Batch::getNumberOfItems() const
{
return items.size() ;
}
Batch::Item * Batch::getItem(int index)
{
return items[index] ;
}
vl::ErrorCode Batch::init()
{
finalize() ;
LOG(2)<<"beginning batch" ;
quit = false ;
nextItem = 0 ;
numReturnedItems = 0 ;
// Restore defaults
memset(brightnessDeviation, 0, sizeof(brightnessDeviation)) ;
contrastDeviation = 0. ;
saturationDeviation = 0. ;
memset(average, 0, sizeof(average)) ;
averageImage = NULL ;
cropLocation = cropCenter ;
minCropSize = 1. ;
maxCropSize = 1. ;
minCropAnisotropy = 1. ;
maxCropAnisotropy = 1. ;
flipMode = false ;
filterType = vl::impl::ImageResizeFilter::kBilinear ;
packingMethod = individualArrays ;
resizeMethod = noResize ;
gpuMode = false ;
gpuDevice = -1 ;
#if ENABLE_GPU
if (cudaStreamInitialized) {
cudaStreamDestroy(cudaStream) ;
cudaStreamInitialized = false ;
}
#endif
return vl::VLE_Success ;
}
void Batch::finalize()
{
LOG(2)<<"finalizing batch" ;
// Clear current batch
clear() ;
// Release memory
#if ENABLE_GPU
if (cpuPinnedPack) {
cudaFreeHost(cpuPinnedPack) ;
cpuPinnedPack = 0 ;
cpuPinnedPackSize = 0 ;
}
#endif
// Signal waiting threads that we are quitting
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
quit = true ;
waitNextItemToBorrow.notify_all() ;
}
}
Batch::Item * Batch::borrowNextItem()
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (true) {
if (quit) { return NULL ; }
if (nextItem < items.size()) {
Item * item = items[nextItem] ;
if (item->state != Item::ready) {
item->borrowed = true ;
nextItem ++ ;
return item ;
}
}
waitNextItemToBorrow.wait(mutex) ;
}
}
void Batch::returnItem(Batch::Item * item)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
numReturnedItems ++ ;
if (item->state == Item::fetch &&
numReturnedItems == items.size() &&
packingMethod == singleArray &&
gpuMode) {
#if ENABLE_GPU
LOG(2) << "push to GPU the pack" ;
cudaError_t cerror ;
cerror = cudaMemcpyAsync (gpuPack.getMemory(),
cpuPinnedPack,
gpuPack.getNumElements() * sizeof(float),
cudaMemcpyHostToDevice,
cudaStream) ;
if (cerror != cudaSuccess) {
item->error = vl::VLE_Cuda ;
snprintf(item->errorMessage, sizeof(item->errorMessage),
"cudaMemcpyAsnyc : '%s'", cudaGetErrorString(cerror)) ;
}
#endif
}
item->borrowed = false ;
item->state = Batch::Item::ready ;
waitCompletion.notify_all() ;
}
void Batch::setAverageImage(float const * image)
{
if (image == NULL) {
if (averageImage) {
free(averageImage) ;
averageImage = NULL ;
}
return ;
}
assert (resizeMethod == fixedSize) ;
averageImage = (float*)malloc(sizeof(float) * resizeHeight * resizeWidth * 3) ;
memcpy(averageImage, image, sizeof(float) * resizeHeight * resizeWidth * 3) ;
}
void Batch::clear()
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
// Stop threads from getting more tasks. After this any call to borrowItem() by a worker will
// stop in a waiting state. Thus, we simply wait for all of them to return their items.
nextItem = (int)items.size() ;
// Wait for all thread to return their items
for (int i = 0 ; i < items.size() ; ++i) {
while (items[i]->borrowed) {
waitCompletion.wait(mutex) ;
}
}
for (int i = 0 ; i < items.size() ; ++i) {
delete items[i] ;
}
items.clear() ;
// Clear average image
setAverageImage(NULL) ;
// At the end of the current (empty) list
nextItem = 0 ;
numReturnedItems = 0 ;
}
void Batch::sync() const
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
// Wait for threads to complete work for all items.
// Note that it is not enough to check that threads are all in a
// "done" state as this does not mean that all work has been done yet.
// Instead, we look at the number of items returned.
while (numReturnedItems < items.size()) {
waitCompletion.wait(mutex) ;
}
if (gpuMode) {
#if ENABLE_GPU
cudaError_t cerror ;
cerror = cudaStreamSynchronize(cudaStream) ;
if (cerror != cudaSuccess) {
LOGERROR << "CUDA error while synchronizing a stream: '" << cudaGetErrorString(cerror) << '\'' ;
}
#endif
}
}
vl::ErrorCode Batch::registerItem(std::string const & name)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
Item * item = new Item(*this) ;
item->index = (int)items.size() ;
item->name = name ;
item->state = Item::prefetch ;
items.push_back(item) ;
return vl::VLE_Success ;
}
void Batch::setGpuMode(bool gpu)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
#if ENABLE_GPU
if (gpu) {
cudaGetDevice(&gpuDevice) ;
if (!cudaStreamInitialized) {
cudaError_t cerror ;
cerror = cudaStreamCreateWithFlags(&cudaStream, cudaStreamNonBlocking) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA error while creating a stream '"
<< cudaGetErrorString(cerror) << '\"' ;
} else {
cudaStreamInitialized = true ;
}
}
}
#endif
gpuMode = gpu ;
}
void Batch::setResizeMethod(Batch::ResizeMethod method, int height, int width)
{
resizeMethod = method ;
resizeHeight = height ;
resizeWidth = width ;
}
void Batch::setPackingMethod(Batch::PackingMethod method)
{
assert(method == individualArrays || method == singleArray) ;
packingMethod = method ;
}
Batch::PackingMethod Batch::getPackingMethod() const
{
return packingMethod ;
}
void Batch::setAverage(double average [])
{
::memcpy(this->average, average, sizeof(this->average)) ;
}
void Batch::setColorDeviation(double brightness [], double contrast, double saturation)
{
::memcpy(brightnessDeviation, brightness, sizeof(brightnessDeviation)) ;
contrastDeviation = contrast ;
saturationDeviation = saturation ;
}
void Batch::setFilterType(vl::impl::ImageResizeFilter::FilterType type)
{
filterType = type ;
}
void Batch::setFlipMode(bool x)
{
flipMode = x ;
}
void Batch::setCropAnisotropy(double minAnisotropy, double maxAnisotropy)
{
assert(minAnisotropy <= maxAnisotropy) ;
assert(0.0 <= minAnisotropy && minAnisotropy <= 1.0) ;
minCropAnisotropy = minAnisotropy ;
maxCropAnisotropy = maxAnisotropy ;
}
void Batch::setCropSize(double minSize, double maxSize)
{
assert(minSize <= maxSize) ;
assert(0.0 <= minSize && minSize <= 1.0) ;
assert(0.0 <= maxSize && maxSize <= 1.0) ;
minCropSize = minSize ;
maxCropSize = maxSize ;
}
void Batch::setCropLocation(CropLocation location)
{
assert(location == cropCenter || location == cropRandom) ;
cropLocation = location ;
}
//void Batch::getItemTransformation(Item * item)
//{
//
//}
vl::ErrorCode Batch::prefetch()
{
// Prod and then wait for reader threads to initialize the shape of the images
// and then perform the requried allocations.
waitNextItemToBorrow.notify_all() ;
sync() ;
// In packing mode, preallocate all memory here.
if (packingMethod == singleArray) {
assert(resizeMethod == fixedSize) ;
vl::TensorShape shape(resizeHeight, resizeWidth, 3, getNumberOfItems()) ;
if (gpuMode) {
#if ENABLE_GPU
gpuPack.init(vl::VLDT_GPU, vl::VLDT_Float, shape) ;
gpuPack.makePersistent() ;
size_t memSize = shape.getNumElements() * sizeof(float) ;
if (cpuPinnedPackSize < memSize) {
if (cpuPinnedPack) {
cudaFreeHost(cpuPinnedPack) ;
}
cudaMallocHost(&cpuPinnedPack, memSize) ;
cpuPinnedPackSize = memSize ;
}
#endif
} else {
cpuPack.init(vl::VLDT_CPU, vl::VLDT_Float, shape) ;
cpuPack.makePersistent() ;
}
}
// Get ready to reprocess all items.
nextItem = 0 ;
numReturnedItems = 0 ;
for (int i = 0 ; i < getNumberOfItems() ; ++ i) {
Batch::Item * item = getItem(i) ;
if (item->error == vl::VLE_Success) {
if (verbosity >= 2) {
mexPrintf("%20s: %d x %d x %d\n", item->name.c_str(), item->shape.width, item->shape.height, item->shape.depth) ;
}
} else {
mexPrintf("%20s: error '%s'\n", item->name.c_str(), item->errorMessage) ;
}
// Determine the shape of (height and width) of the output image. This is either
// the same as the input image, or with a fixed size for the shortest side,
// or a fixed size for both sides.
int outputHeight ;
int outputWidth ;
double cropHeight ;
double cropWidth ;
int dx ;
int dy ;
switch (resizeMethod) {
case noResize:
outputHeight = (int)item->shape.height ;
outputWidth = (int)item->shape.width ;
break ;
case resizeShortestSide: {
double scale1 = (double)resizeHeight / item->shape.width ;
double scale2 = (double)resizeHeight / item->shape.height ;
double scale = std::max(scale1, scale2) ;
outputHeight = (int)std::max(1.0, round(scale * item->shape.height)) ;
outputWidth = (int)std::max(1.0, round(scale * item->shape.width)) ;
break ;
}
case fixedSize:
outputHeight = resizeHeight ;
outputWidth = resizeWidth ;
break ;
}
// Determine the aspect ratio of the crop in the input image.
{
double anisotropyRatio = 1.0 ;
if (minCropAnisotropy == 0 || maxCropAnisotropy == 0) {
// Stretch crop to have the same shape as the input.
double inputAspect = (double)item->shape.width / item->shape.height ;
double outputAspect = (double)outputWidth / outputHeight ;
anisotropyRatio = inputAspect / outputAspect ;
} else {
double z = (double)rand() / RAND_MAX ;
double a = log(maxCropAnisotropy) ;
double b = log(minCropAnisotropy) ;
anisotropyRatio = exp(z * (b - a) + a) ;
}
cropWidth = outputWidth * sqrt(anisotropyRatio) ;
cropHeight = outputHeight / sqrt(anisotropyRatio) ;
}
// Determine the crop size.
{
double scale = std::min(item->shape.width / cropWidth,
item->shape.height / cropHeight) ;
double z = (double)rand() / RAND_MAX ;
#if 1
double a = maxCropSize * maxCropSize ;
double b = minCropSize * minCropSize ;
double size = sqrt(z * (b - a) + a) ;
#else
double a = maxCropSize ;
double b = minCropSize ;
double size = z * (b - a) + a ;
#endif
cropWidth *= scale * size ;
cropHeight *= scale * size ;
}
int cropWidth_i = (int)std::min(round(cropWidth), (double)item->shape.width) ;
int cropHeight_i = (int)std::min(round(cropHeight), (double)item->shape.height) ;
// Determine the crop location.
{
dx = (int)item->shape.width - cropWidth_i ;
dy = (int)item->shape.height - cropHeight_i ;
switch (cropLocation) {
case cropCenter:
dx /= 2 ;
dy /= 2 ;
break ;
case cropRandom:
dx = rand() % (dx + 1) ;
dy = rand() % (dy + 1) ;
break ;
default:
LOGERROR << "cropLocation not set" ;
}
}
// Save.
item->outputWidth = outputWidth ;
item->outputHeight = outputHeight ;
item->outputNumChannels = (packingMethod == individualArrays) ? item->shape.depth : 3 ;
item->cropWidth = cropWidth_i ;
item->cropHeight = cropHeight_i ;
item->cropOffsetX = dx ;
item->cropOffsetY = dy ;
item->flip = flipMode && (rand() > RAND_MAX/2) ;
item->filterType = filterType ;
// Color processing.
item->saturationShift = (float)(1. + saturationDeviation * (2.*(double)rand()/RAND_MAX - 1.)) ;
item->contrastShift = (float)(1. + contrastDeviation * (2.*(double)rand()/RAND_MAX - 1.)) ;
{
int numChannels = (int)item->outputNumChannels ;
double w [3] ;
for (int i = 0 ; i < numChannels ; ++i) { w[i] = vl::randn() ; }
for (int i = 0 ; i < numChannels ; ++i) {
item->brightnessShift[i] = 0.f ;
for (int j = 0 ; j < numChannels ; ++j) {
item->brightnessShift[i] += (float)(brightnessDeviation[i + 3*j] * w[i]) ;
}
}
}
LOG(2)
<< "input (" << item->shape.width << " x " << item->shape.height << " x " << item->shape.depth << ") "
<< "output (" << item->outputWidth << " x " << item->outputHeight << " x " << item->outputNumChannels << ") "
<< "crop (" << item->cropWidth << " x " << item->cropHeight << ") "
<< "offset (" << item->cropOffsetX << ", " << item->cropOffsetY << ")" ;
if (packingMethod == individualArrays) {
vl::TensorShape shape(outputHeight, outputWidth, item->outputNumChannels, 1) ;
item->cpuArray.init(vl::VLDT_CPU, vl::VLDT_Float, shape) ;
item->cpuArray.makePersistent() ;
if (gpuMode) {
item->gpuArray.init(vl::VLDT_GPU, vl::VLDT_Float, shape) ;
item->gpuArray.makePersistent() ;
}
}
// Ready to fetch
item->state = Item::fetch ;
}
// Notify that we are ready to fetch
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
waitNextItemToBorrow.notify_all() ;
}
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* ReaderTask */
/* ---------------------------------------------------------------- */
class ReaderTask
{
public:
ReaderTask() ;
~ReaderTask() { finalize() ; }
vl::ErrorCode init(Batch * batch, int index) ;
void finalize() ;
private:
int index ;
Batch * batch ;
tthread::thread * thread ;
vl::ImageReader * reader ;
static void threadEntryPoint(void * thing) ;
void entryPoint() ;
void * getBuffer(int index, size_t size) ;
int gpuDevice ;
private:
ReaderTask(ReaderTask const &) ;
ReaderTask & operator= (ReaderTask const &) ;
struct Buffer {
void * memory ;
size_t size ;
} buffers [2] ;
} ;
void ReaderTask::threadEntryPoint(void * thing)
{
((ReaderTask*)thing)->entryPoint() ;
}
ReaderTask::ReaderTask()
: batch(NULL), thread(NULL), reader(NULL)
{
memset(buffers, 0, sizeof(buffers)) ;
}
void * ReaderTask::getBuffer(int index, size_t size)
{
if (buffers[index].size < size) {
if (buffers[index].memory) {
free(buffers[index].memory) ;
}
buffers[index].memory = malloc(size) ;
buffers[index].size = size ;
}
return buffers[index].memory ;
}
void ReaderTask::entryPoint()
{
LOG(2) << "reader " << index << " task staring" ;
while (true) {
#if ENABLE_GPU
if (batch->gpuMode && batch->gpuDevice != gpuDevice) {
LOG(2) << "reader " << index << " setting GPU device" ;
cudaSetDevice(batch->gpuDevice) ;
cudaGetDevice(&gpuDevice) ;
}
#endif
Batch::Item * item = batch->borrowNextItem() ;
LOG(3) << "borrowed " << item ;
if (item == NULL) { break ; }
if (item->error != vl::VLE_Success) {
batch->returnItem(item) ;
continue ;
}
switch (item->state) {
case Batch::Item::prefetch: {
item->error = reader->readShape(item->shape, item->name.c_str()) ;
if (item->error != vl::VLE_Success) {
snprintf(item->errorMessage, sizeof(item->errorMessage), "%s", reader->getLastErrorMessage()) ;
}
break ;
}
case Batch::Item::fetch: {
// Get the CPU buffer that will hold the pixels.
float * outputPixels;
if (batch->getPackingMethod() == Batch::individualArrays) {
outputPixels = (float*)item->cpuArray.getMemory() ;
} else {
if (batch->gpuMode) {
#if ENABLE_GPU
outputPixels = batch->cpuPinnedPack ;
#else
snprintf(item->errorMessage, sizeof(item->errorMessage), "GPU support not compiled.") ;
break;
#endif
} else {
outputPixels = (float*)batch->cpuPack.getMemory() ;
}
outputPixels += item->outputHeight*item->outputWidth*3*item->index ;
}
// Read full image.
float * inputPixels = (float*)getBuffer(0,
item->shape.height *
item->shape.width *
item->shape.depth * sizeof(float)) ;
item->error = reader->readPixels(inputPixels, item->name.c_str()) ;
if (item->error != vl::VLE_Success) {
snprintf(item->errorMessage, sizeof(item->errorMessage), "%s", reader->getLastErrorMessage()) ;
break ;
}
// Crop.
float * temp = (float*)getBuffer(1,
item->outputHeight *
item->shape.width *
item->shape.depth * sizeof(float)) ;
vl::impl::imageResizeVertical(temp, inputPixels,
item->outputHeight,
item->shape.height,
item->shape.width,
item->shape.depth,
item->cropHeight,
item->cropOffsetY,
false, // flip
item->filterType) ;
vl::impl::imageResizeVertical(outputPixels, temp,
item->outputWidth,
item->shape.width,
item->outputHeight,
item->shape.depth,
item->cropWidth,
item->cropOffsetX,
item->flip,
item->filterType) ;
// Postprocess colors.
{
size_t inputNumChannels = item->shape.depth ;
size_t K = item->outputNumChannels ;
size_t n = item->outputHeight*item->outputWidth ;
if (batch->averageImage) {
// If there is an average image, then subtract it now.
// Grayscale images are expanded here to color if needed.
// Withouth an average image,
// they are expanded later.
for (size_t k = inputNumChannels ; k < K ; ++k) {
::memcpy(outputPixels + n*k, outputPixels, sizeof(float) * n) ;
}
vl::impl::blas<vl::VLDT_CPU,vl::VLDT_Float>::axpy
(batch->context,
n * item->outputNumChannels,
-1.0f,
batch->averageImage, 1,
outputPixels, 1) ;
inputNumChannels = K ;
}
float dv [3] ;
float * channels [3] ;
for (int k = 0 ; k < K ; ++k) {
channels[k] = outputPixels + n * k ;
}
for (int k = 0 ; k < inputNumChannels ; ++k) {
dv[k] = item->brightnessShift[k] - batch->average[k] ;
if (item->contrastShift != 1.) {
double mu = 0. ;
float const * pixel = channels[k] ;
float const * end = channels[k] + n ;
while (pixel != end) { mu += (double)(*pixel++) ; }
mu /= (double)n ;
dv[k] += (float)((1.0 - (double)item->contrastShift) * mu) ;
}
}
{
float mu = 0.f ;
for (int k = 0 ; k < inputNumChannels ; ++k) {
mu += dv[k] ;
}
float a = item->saturationShift ;
float b = (1. - item->saturationShift) / inputNumChannels ;
for (int k = 0 ; k < inputNumChannels ; ++k) {
dv[k] = a * dv[k] + b * mu ;
}
}
{
float const * end = channels[0] + n ;
float v [3] ;
if (K == 3 && inputNumChannels == 3) {
float const a = item->contrastShift * item->saturationShift ;
float const b = item->contrastShift * (1.f - item->saturationShift) / K ;
while (channels[0] != end) {
float mu = 0.f ;
v[0] = *channels[0] ; mu += v[0] ;
v[1] = *channels[1] ; mu += v[1] ;
v[2] = *channels[2] ; mu += v[2] ;
*channels[0]++ = a * v[0] + b * mu + dv[0] ;
*channels[1]++ = a * v[1] + b * mu + dv[1] ;
*channels[2]++ = a * v[2] + b * mu + dv[2] ;
}
} else if (K == 3 && inputNumChannels == 1) {
float const a = item->contrastShift * item->saturationShift ;
float const b = item->contrastShift * (1.f - item->saturationShift) / K ;
while (channels[0] != end) {
float mu = 0.f ;
v[0] = *channels[0] ; mu += v[0] ;
v[1] = *channels[0] ; mu += v[1] ;
v[2] = *channels[0] ; mu += v[2] ;
*channels[0]++ = a * v[0] + b * mu + dv[0] ;
*channels[1]++ = a * v[1] + b * mu + dv[0] ;
*channels[2]++ = a * v[2] + b * mu + dv[0] ;
}
} else {
float const a = item->contrastShift ;
while (channels[0] != end) {
float v = *channels[0] ;
*channels[0]++ = a * v + dv[0] ;
}
}
}
}
// Copy to GPU.
if (batch->getPackingMethod() == Batch::individualArrays && batch->gpuMode) {
#if ENABLE_GPU
cudaError_t cerror ;
cerror = cudaMemcpyAsync (item->gpuArray.getMemory(),
outputPixels,
item->gpuArray.getNumElements() * sizeof(float),
cudaMemcpyHostToDevice,
batch->cudaStream) ;
if (cerror != cudaSuccess) {
item->error = vl::VLE_Cuda ;
snprintf(item->errorMessage, sizeof(item->errorMessage),
"CUDA error while copying memory from host to device: '%s'", cudaGetErrorString(cerror)) ;
break ;
}
#endif
}
break ;
}
case Batch::Item::ready:
break ;
}
batch->returnItem(item) ;
}
LOG(2) << "reader " << index << " task quitting" ;
}
void ReaderTask::finalize()
{
LOG(2)<<"finalizing reader " << index ;
if (thread) {
if (thread->joinable()) {
thread->join() ;
}
delete thread ;
thread = NULL ;
}
for (int i = 0 ; i < sizeof(buffers)/sizeof(Buffer) ; ++i) {
if (buffers[i].memory) {
free(buffers[i].memory) ;
buffers[i].memory = NULL ;
buffers[i].size = 0 ;
}
}
if (reader) {
delete reader ;
reader = NULL ;
}
index = -1 ;
batch = NULL ;
}
vl::ErrorCode ReaderTask::init(Batch * batch, int index)
{
finalize() ;
this->batch = batch ;
this->index = index ;
thread = new tthread::thread(threadEntryPoint, this) ;
reader = new vl::ImageReader() ;
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* Cache */
/* ---------------------------------------------------------------- */
vl::MexContext context ;
Batch batch(context) ;
bool batchIsInitialized = false ;
typedef std::vector<ReaderTask*> readers_t ;
readers_t readers ;
void atExit()
{
if (batchIsInitialized) {
batch.finalize() ;
batchIsInitialized = false ;
}
for (int r = 0 ; r < readers.size() ; ++r) {
readers[r]->finalize() ;
delete readers[r] ;
}
readers.clear() ;
}
/* ---------------------------------------------------------------- */
/* Cache */
/* ---------------------------------------------------------------- */
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
bool prefetch = false ;
bool gpuMode = false ;
int requestedNumThreads = (int)readers.size() ;
int opt ;
int next = IN_END ;
mxArray const *optarg ;
Batch::PackingMethod packingMethod = Batch::individualArrays ;
Batch::ResizeMethod resizeMethod = Batch::noResize ;
int resizeWidth = -1 ;
int resizeHeight = -1 ;
vl::ErrorCode error ;
double average [3] = {0.} ;
vl::MexTensor averageImage(context) ;
double brightnessDeviation [9] = {0.} ;
double saturationDeviation = 0. ;
double contrastDeviation = 0. ;
bool flipMode = false ;
Batch::CropLocation cropLocation = Batch::cropCenter ;
double minCropSize = 1.0, maxCropSize = 1.0 ;
double minCropAnisotropy = 1.0, maxCropAnisotropy = 1.0 ;
vl::impl::ImageResizeFilter::FilterType filterType = vl::impl::ImageResizeFilter::kBilinear ;
verbosity = 0 ;
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
mexAtExit(atExit) ;
if (nin < 1) {
vlmxError(VLMXE_IllegalArgument, "There is less than one argument.") ;
}
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_verbose :
++ verbosity ;
break ;
case opt_prefetch :
prefetch = true ;
break ;
case opt_pack :
packingMethod = Batch::singleArray ;
break ;
case opt_gpu :
#ifndef ENABLE_GPU
vlmxError(VLMXE_IllegalArgument, "Not compiled with GPU support.") ;
#endif
gpuMode = true ;
break ;
case opt_num_threads :
requestedNumThreads = (int)mxGetScalar(optarg) ;
break ;
case opt_resize :
if (!vlmxIsPlainVector(optarg, -1)) {
vlmxError(VLMXE_IllegalArgument, "RESIZE is not a plain vector.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1 :
resizeMethod = Batch::resizeShortestSide ;
resizeHeight = (int)mxGetPr(optarg)[0] ;
resizeWidth = (int)mxGetPr(optarg)[0] ;
break ;
case 2 :
resizeMethod = Batch::fixedSize ;
resizeHeight = (int)mxGetPr(optarg)[0] ;
resizeWidth = (int)mxGetPr(optarg)[1] ;
break;
default:
vlmxError(VLMXE_IllegalArgument, "RESIZE does not have one or two dimensions.") ;
break ;
}
if (resizeHeight < 1 || resizeWidth < 1) {
vlmxError(VLMXE_IllegalArgument, "An element of RESIZE is smaller than one.") ;
}
break ;
case opt_brightness: {
if (!vlmxIsPlainMatrix(optarg, -1, -1)) {
vlmxError(VLMXE_IllegalArgument, "BRIGHTNESS is not a plain matrix.") ;
}
size_t n = mxGetNumberOfElements(optarg) ;
memset(brightnessDeviation, 0, sizeof(brightnessDeviation)) ;
if (n == 1) {
double x = mxGetPr(optarg)[0] ;
brightnessDeviation[0] = x;
brightnessDeviation[3] = x;
brightnessDeviation[8] = x;
} else if (n == 3) {
double const* x = mxGetPr(optarg) ;
brightnessDeviation[0] = x[0];
brightnessDeviation[3] = x[1];
brightnessDeviation[8] = x[2];
} else if (n == 9) {
memcpy(brightnessDeviation, mxGetPr(optarg), sizeof(brightnessDeviation)) ;
} else {
vlmxError(VLMXE_IllegalArgument, "BRIGHTNESS does not have 1, 3, or 9 elements.") ;
}
break ;
}
case opt_saturation: {
if (!vlmxIsPlainScalar(optarg)) {
vlmxError(VLMXE_IllegalArgument, "SATURATION is not a plain scalar.") ;
}
double x = mxGetPr(optarg)[0] ;
if (x < 0 || x > 1.0) {
vlmxError(VLMXE_IllegalArgument, "SATURATION is not in the [0,1] range..") ;
}
saturationDeviation = x ;
break ;
}
case opt_contrast: {
if (!vlmxIsPlainScalar(optarg)) {
vlmxError(VLMXE_IllegalArgument, "CONTRAST is not a plain scalar.") ;
}
double x = mxGetPr(optarg)[0] ;
if (x < 0 || x > 1.0) {
vlmxError(VLMXE_IllegalArgument, "CONTRAST is not in the [0,1] range..") ;
}
contrastDeviation = x ;
break ;
}
case opt_crop_anisotropy: {
if (!vlmxIsPlainScalar(optarg) && !vlmxIsPlainVector(optarg, 2)) {
vlmxError(VLMXE_IllegalArgument, "CROPANISOTROPY is not a plain scalar or vector with two components.") ;
}
minCropAnisotropy = mxGetPr(optarg)[0] ;
maxCropAnisotropy = mxGetPr(optarg)[std::min((mwSize)1, mxGetNumberOfElements(optarg)-1)] ;
if (minCropAnisotropy < 0.0 || minCropAnisotropy > maxCropAnisotropy) {
vlmxError(VLMXE_IllegalArgument, "CROPANISOTROPY values are not in the legal range.") ;
}
break ;
}
case opt_crop_size: {
if (!vlmxIsPlainScalar(optarg) && !vlmxIsPlainVector(optarg, 2)) {
vlmxError(VLMXE_IllegalArgument, "CROPSIZE is not a plain scalar or vector with two components.") ;
}
minCropSize = mxGetPr(optarg)[0] ;
maxCropSize = mxGetPr(optarg)[std::min((mwSize)1, mxGetNumberOfElements(optarg)-1)] ;
if (minCropSize < 0.0 || minCropSize > maxCropSize || maxCropSize > 1.0) {
vlmxError(VLMXE_IllegalArgument, "CROPSIZE values are not in the legal range.") ;
}
break ;
}
case opt_crop_location: {
if (!vlmxIsString(optarg, -1)) {
vlmxError(VLMXE_IllegalArgument, "CROPLOCATION is not a string") ;
}
if (vlmxCompareToStringI(optarg, "random") == 0) {
cropLocation = Batch::cropRandom ;
} else if (vlmxCompareToStringI(optarg, "center") == 0) {
cropLocation = Batch::cropCenter ;
} else {
vlmxError(VLMXE_IllegalArgument, "CROPLOCATION value unknown.") ;
}
break ;
}
case opt_subtract_average: {
if (vlmxIsVector(optarg,1) || vlmxIsVector(optarg, 3)) {
size_t n = mxGetNumberOfElements(optarg) ;
switch (mxGetClassID(optarg)) {
case mxSINGLE_CLASS: {
float * x = (float*)mxGetData(optarg) ;
average[0] = x[std::min((size_t)0,n-1)] ;
average[1] = x[std::min((size_t)1,n-1)] ;
average[2] = x[std::min((size_t)2,n-1)] ;
break ;
}
case mxDOUBLE_CLASS: {
double * x = mxGetPr(optarg) ;
average[0] = (float)x[std::min((size_t)0,n-1)] ;
average[1] = (float)x[std::min((size_t)1,n-1)] ;
average[2] = (float)x[std::min((size_t)2,n-1)] ;
break ;
}
default:
vlmxError(VLMXE_IllegalArgument, "SUBTRACTAVERAGE is not SINGLE or DOUBLE vector.") ;
}
} else {
if (mxGetClassID(optarg) != mxSINGLE_CLASS ||
mxGetNumberOfDimensions(optarg) > 3) {
vlmxError(VLMXE_IllegalArgument, "SUBTRACTAVERAGE is not a SINGLE image of a compatible shape.") ;
}
averageImage.init(optarg) ;
}
break ;
}
case opt_flip: {
flipMode = true ;
break ;
}
case opt_interpolation: {
if (!vlmxIsString(optarg,-1)) {
vlmxError(VLMXE_IllegalArgument, "INTERPOLATION is not a string.") ;
}
if (vlmxIsEqualToStringI(optarg, "box")) {
filterType = vl::impl::ImageResizeFilter::kBox ;
} else if (vlmxIsEqualToStringI(optarg, "bilinear")) {
filterType = vl::impl::ImageResizeFilter::kBilinear ;
} else if (vlmxIsEqualToStringI(optarg, "bicubic")) {
filterType = vl::impl::ImageResizeFilter::kBicubic ;
} else if (vlmxIsEqualToStringI(optarg, "lanczos2")) {
filterType = vl::impl::ImageResizeFilter::kLanczos2 ;
} else if (vlmxIsEqualToStringI(optarg, "lanczos3")) {
filterType = vl::impl::ImageResizeFilter::kLanczos3 ;
} else {
vlmxError(VLMXE_IllegalArgument, "INTERPOLATION is not a supported method.") ;
}
break ;
}
}
}
if (averageImage) {
if (resizeMethod != Batch::fixedSize) {
vlmxError(VLMXE_IllegalArgument, "Cannot subtract an average image unless RESIZE is used to set the size of the output.") ;
}
if (averageImage.getNumDimensions() != 3 ||
averageImage.getHeight() != resizeHeight ||
averageImage.getWidth() != resizeWidth ||
averageImage.getDepth() !=3) {
vlmxError(VLMXE_IllegalArgument, "The average image is not a RESIZEHEIGHT x RESIZEWIDTH x 3 array.") ;
}
}
/* -------------------------------------------------------------- */
/* Do the work */
/* -------------------------------------------------------------- */
if (!mxIsCell(in[IN_FILENAMES])) {
vlmxError(VLMXE_IllegalArgument, "FILENAMES is not a cell array of strings.") ;
}
// If the requested number of threads changes, finalize everything
requestedNumThreads = (std::max)(requestedNumThreads, 1) ;
if (readers.size() != requestedNumThreads) {
atExit() ; // Delete threads and current batch
}
// Prepare batch.
if (!batchIsInitialized) {
error = batch.init() ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Could not initialize a batch structure") ;
}
batchIsInitialized = true ;
}
// Prepare reader tasks.
for (size_t r = readers.size() ; r < requestedNumThreads ; ++r) {
readers.push_back(new ReaderTask()) ;
vl::ErrorCode error = readers[r]->init(&batch, r) ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Could not create the requested number of threads") ;
}
}
// Extract filenames as strings.
bool sameAsPrefeteched = true ;
std::vector<std::string> filenames ;
for (int i = 0 ; i < (int)mxGetNumberOfElements(in[IN_FILENAMES]) ; ++i) {
mxArray* filenameArray = mxGetCell(in[IN_FILENAMES], i) ;
if (!vlmxIsString(filenameArray,-1)) {
vlmxError(VLMXE_IllegalArgument, "FILENAMES contains an entry that is not a string.") ;
}
char filename [512] ;
mxGetString (filenameArray, filename, sizeof(filename)/sizeof(char)) ;
filenames.push_back(std::string(filename)) ;
sameAsPrefeteched &= (i < batch.getNumberOfItems() && batch.getItem(i)->name == filenames[i]) ;
}
// If the list of names is not the same as the prefetched ones,
// start a new cycle.
if (!sameAsPrefeteched) {
batch.clear() ;
// Check compatibility of options
if (packingMethod == Batch::singleArray && resizeMethod != Batch::fixedSize) {
vlmxError(VLMXE_IllegalArgument, "PACK must be used in combination with resizing to a fixed size.") ;
}
if (verbosity >= 2) {
mexPrintf("vl_imreadjpeg: gpu mode: %s\n", gpuMode?"yes":"no") ;
mexPrintf("vl_imreadjpeg: crop anisotropy: [%.1g, %.1g]\n",
minCropAnisotropy, maxCropAnisotropy) ;
mexPrintf("vl_imreadjpeg: crop size: [%.1g, %.1g]\n",
minCropSize, maxCropSize) ;
mexPrintf("vl_imreadjpeg: num_threads: %d requested %d readers\n",
requestedNumThreads, readers.size());
}
batch.setResizeMethod(resizeMethod, resizeHeight, resizeWidth) ;
batch.setPackingMethod(packingMethod) ;
batch.setGpuMode(gpuMode) ;
batch.setFlipMode(flipMode) ;
batch.setCropLocation(cropLocation) ;
batch.setCropAnisotropy(minCropAnisotropy, maxCropAnisotropy) ;
batch.setCropSize(minCropSize, maxCropSize) ;
batch.setColorDeviation(brightnessDeviation,
contrastDeviation,
saturationDeviation) ;
batch.setAverage(average) ;
if (averageImage) {
batch.setAverageImage((float const*)averageImage.getMemory()) ;
}
batch.setFilterType(filterType) ;
for (int i = 0 ; i < filenames.size() ; ++ i) {
batch.registerItem(filenames[i]) ;
}
batch.prefetch() ;
}
// Done if prefetching only.
if (prefetch) { return ; }
// Return result.
batch.sync() ;
switch (batch.getPackingMethod()) {
case Batch::singleArray: {
mwSize dims [] = {1,1} ;
out[OUT_IMAGES] = mxCreateCellArray(2, dims) ;
mxSetCell(out[OUT_IMAGES], 0, batch.relinquishArray()) ;
break ;
}
case Batch::individualArrays:
out[OUT_IMAGES] = mxCreateCellArray(mxGetNumberOfDimensions(in[IN_FILENAMES]),
mxGetDimensions(in[IN_FILENAMES])) ;
for (int i = 0 ; i < batch.getNumberOfItems() ; ++i) {
Batch::Item * item = batch.getItem(i) ;
if (item->error != vl::VLE_Success) {
vlmxWarning(VLMXE_Execution, "could not read image '%s' because '%s'",
item->name.c_str(),
item->errorMessage) ;
} else {
mxSetCell(out[OUT_IMAGES], i, item->relinquishArray()) ;
}
}
break ;
}
// Finalize.
batch.clear() ;
} | the_stack |
FEM3D::FEM3D(TetMesh* meshPtr)
{
initializeWithTetMesh(meshPtr);
}
void FEM3D::initializeWithTetMesh(TetMesh* meshPtr)
{
nv = meshPtr->vertices.size();
ne = meshPtr->tets.size();
IdxVector_h tri0(ne);
IdxVector_h tri1(ne);
IdxVector_h tri2(ne);
IdxVector_h tri3(ne);
for(int i = 0; i < ne; i++)
{
tri0[i] = meshPtr->tets[i][0];
tri1[i] = meshPtr->tets[i][1];
tri2[i] = meshPtr->tets[i][2];
tri3[i] = meshPtr->tets[i][3];
}
Vector_h_CG vx(nv);
Vector_h_CG vy(nv);
Vector_h_CG vz(nv);
for(int i = 0; i < nv; i++)
{
vx[i] = meshPtr->vertices[i][0];
vy[i] = meshPtr->vertices[i][1];
vz[i] = meshPtr->vertices[i][2];
}
d_tri0 = tri0;
d_tri1 = tri1;
d_tri2 = tri2;
d_tri3 = tri3;
d_vx = vx;
d_vy = vy;
d_vz = vz;
tri0.resize(0);
tri1.resize(0);
tri2.resize(0);
tri3.resize(0);
vx.resize(0);
vy.resize(0);
vz.resize(0);
}
double compute_gamma_3d(double x)
{
int i, k, m;
double ga, gr, r, z;
static double g[] = {
1.0,
0.5772156649015329,
-0.6558780715202538,
-0.420026350340952e-1,
0.1665386113822915,
-0.421977345555443e-1,
-0.9621971527877e-2,
0.7218943246663e-2,
-0.11651675918591e-2,
-0.2152416741149e-3,
0.1280502823882e-3,
-0.201348547807e-4,
-0.12504934821e-5,
0.1133027232e-5,
-0.2056338417e-6,
0.6116095e-8,
0.50020075e-8,
-0.11812746e-8,
0.1043427e-9,
0.77823e-11,
-0.36968e-11,
0.51e-12,
-0.206e-13,
-0.54e-14,
0.14e-14
};
if(x > 171.0) return 1e308; // This value is an overflow flag.
if(x == (int)x)
{
if(x > 0.0)
{
ga = 1.0; // use factorial
for(i = 2; i < x; i++)
{
ga *= i;
}
}
else
ga = 1e308;
}
else
{
if(fabs(x) > 1.0)
{
z = fabs(x);
m = (int)z;
r = 1.0;
for(k = 1; k <= m; k++)
{
r *= (z - k);
}
z -= m;
}
else
z = x;
gr = g[24];
for(k = 23; k >= 0; k--)
{
gr = gr * z + g[k];
}
ga = 1.0 / (gr * z);
if(fabs(x) > 1.0)
{
ga *= r;
if(x < 0.0)
{
ga = -M_PI / (x * ga * sin(M_PI * x));
}
}
}
return ga;
}
void FEM3D::JacobiPoly(int degree, Vector_h_CG x, int alpha, int beta, Vector_h_CG &y)
{
int s = x.size();
if(degree == 0)
{
y.resize(s);
for(int i = 0; i < s; i++)
{
y[i] = 1.0;
}
}
else if(degree == 1)
{
y.resize(s);
for(int i = 0; i < s; i++)
{
y[i] = 0.5 * (alpha - beta + (alpha + beta + 2.0) * x[i]);
}
}
else
{
double degm1 = degree - 1.0;
double tmp = 2.0 * degm1 + alpha + beta;
double a1 = 2.0 * (degm1 + 1)*(degm1 + alpha + beta + 1) * tmp;
double a2 = (tmp + 1)*(alpha * alpha - beta * beta);
double a3 = tmp * (tmp + 1.0)*(tmp + 2.0);
double a4 = 2.0 * (degm1 + alpha)*(degm1 + beta)*(tmp + 2.0);
Vector_h_CG poly1, poly2;
JacobiPoly(degree - 1, x, alpha, beta, poly1);
JacobiPoly(degree - 2, x, alpha, beta, poly2);
int plolysize = poly1.size();
y.resize(plolysize);
for(int i = 0; i < plolysize; i++)
{
y[i] = ((a2 + a3 * x[i]) * poly1[i] - a4 * poly2[i]) / a1;
}
}
}
void FEM3D::JacobiPolyDerivative(int degree, Vector_h_CG &x, int alpha, int beta, Vector_h_CG &y)
{
int s = x.size();
if(degree == 0)
{
y.resize(s);
for(int i = 0; i < s; i++)
{
y[i] = 0.0;
}
}
else
{
Vector_h_CG poly;
JacobiPoly(degree - 1, x, alpha + 1, beta + 1, poly);
y.resize(poly.size());
for(int i = 0; i < poly.size(); i++)
{
y[i] = 0.5 * (alpha + beta + degree + 1) * poly[i];
}
}
//y = 0.5*(alpha+beta+degree+1)*JacobiPoly(degree-1,x,alpha+1,beta+1);
}
void FEM3D::JacobiGZeros(int degree, int alpha, int beta, Vector_h_CG &z)
{
z.resize(degree);
if(degree == 0)
{
for(int i = 0; i < degree; i++)
{
z[i] = 0.0;
}
return;
}
int maxit = 60;
double EPS = 1.0e-6;
double dth = double(PI) / (2.0 * degree);
double rlast = 0.0;
double one = 1.0;
double two = 2.0;
Vector_h_CG r;
Vector_h_CG poly, pder;
r.resize(1);
poly.resize(1);
pder.resize(1);
double sum = 0;
double delr;
for(int k = 0; k < degree; k++)
{
r[0] = -cos((two * k + one) * dth);
if(k)
r[0] = 0.5 * (r[0] + rlast);
for(int j = 0; j < maxit; j++)
{
JacobiPoly(degree, r, alpha, beta, poly);
JacobiPolyDerivative(degree, r, alpha, beta, pder);
sum = 0.0;
for(int i = 0; i < k; i++)
sum = sum + one / (r[0] - z[i]);
delr = -poly[0] / (pder[0] - sum * poly[0]);
r[0] = r[0] + delr;
if(fabs(delr) < EPS)
break;
}
z[k] = r[0];
rlast = r[0];
}
}
void FEM3D::JacobiGLZW(Vector_h_CG& Z, Vector_h_CG& weight, int degree, int alpha, int beta)
{
Z.resize(degree);
weight.resize(degree);
double fac = 0;
if(degree == 1)
{
Z[0] = 0.0;
weight[0] = 0.0;
}
else
{
int apb = alpha + beta;
Z[0] = -1;
Z[degree - 1] = 1;
Vector_h_CG tmppoly;
JacobiGZeros(degree - 2, alpha + 1, beta + 1, tmppoly);
for(int i = 1; i < degree - 1; i++)
{
Z[i] = tmppoly[i - 1];
}
//Z(2:degree-1) = JacobiGZeros(degree-2,alpha+one,beta+one);
JacobiPoly(degree - 1, Z, alpha, beta, weight);
Matrix_ell_d_CG::value_type tmp1 = pow(Matrix_ell_d_CG::value_type(2), Matrix_ell_d_CG::value_type(apb + 1));
Matrix_ell_d_CG::value_type tmp2 = compute_gamma_3d(alpha + degree);
fac = tmp1 * tmp2 * compute_gamma_3d(beta + degree);
fac = fac / ((degree - 1) * compute_gamma_3d(degree) * compute_gamma_3d(alpha + beta + degree + 1));
for(int j = 0; j < degree; j++)
{
weight[j] = Matrix_ell_d_CG::value_type(fac) / (weight[j] * weight[j]);
}
//weight = fac./(w.*w);
weight[0] = weight[0]*(beta + 1);
weight[degree - 1] = weight[degree - 1]*(alpha + 1);
}
}
void FEM3D::JacobiGRZW(Vector_h_CG& Z, Vector_h_CG& weight, int degree, int alpha, int beta)
{
Z.resize(degree);
weight.resize(degree);
Matrix_ell_d_CG::value_type fac = 0;
if(degree == 1)
{
Z[0] = 0.0;
weight[0] = 2.0;
}
else
{
//one = 1.0;
int apb = alpha + beta;
//two = 2.0;
Z[0] = -1;
Vector_h_CG tmpPoly;
JacobiGZeros(degree - 1, alpha, beta + 1, tmpPoly);
for(int i = 1; i < degree; i++)
{
Z[i] = tmpPoly[i - 1];
}
//Z(2:degree-1) = JacobiGZeros(degree-1,alpha+one,beta+one);
JacobiPoly(degree - 1, Z, alpha, beta, weight);
Matrix_ell_d_CG::value_type tmp = compute_gamma_3d(alpha + degree);
fac = pow(Matrix_ell_d_CG::value_type(2), Matrix_ell_d_CG::value_type(apb)) * tmp * compute_gamma_3d(beta + degree);
fac = fac / (compute_gamma_3d(degree)*(beta + degree) * compute_gamma_3d(apb + degree + 1));
for(int j = 0; j < degree; j++)
{
weight[j] = Matrix_ell_d_CG::value_type(fac)*(1 - Z[j]) / (weight[j] * weight[j]);
}
weight[0] = weight[0]*(beta + 1);
}
}
void FEM3D::Transform2StdTetSpace(const Vector_h_CG &z_x, const Vector_h_CG &z_y, const Vector_h_CG &z_z, CGType(*VecXYZ)[DEGREE][DEGREE][3])
{
int nx = z_x.size();
int ny = z_y.size();
int nz = z_z.size();
CGType cx, cy, cz;
for(int i = 0; i < nx; i++)
{
cx = z_x[i];
for(int j = 0; j < ny; j++)
{
cy = z_y[j];
for(int k = 0; k < nz; k++)
{
cz = z_z[k];
VecXYZ[i][j][k][0] = (1 + cx)*0.5 * (1 - cy) * 0.5 * (1 - cz) * 0.5;
VecXYZ[i][j][k][1] = (1 + cy) * 0.5 * (1 - cz) * 0.5;
VecXYZ[i][j][k][2] = (1 + cz) * 0.5;
}
}
}
}
void FEM3D::EvalBasisTet(CGType(*coefmat)[4], const CGType(*VecXYZ)[DEGREE][DEGREE][3], CGType(*phi)[DEGREE][DEGREE][4])
{
CGType* coef;
CGType cx, cy, cz;
for(int s = 0; s < 4; s++)
{
coef = coefmat[s];
for(int i = 0; i < DEGREE; i++)
{
for(int j = 0; j < DEGREE; j++)
{
for(int k = 0; k < DEGREE; k++)
{
cx = VecXYZ[i][j][k][0];
cy = VecXYZ[i][j][k][1];
cz = VecXYZ[i][j][k][2];
phi[i][j][k][s] = coef[0] + coef[1] * cx + coef[2] * cy + coef[3] * cz;
}
}
}
}
}
CGType FEM3D::Integration_Quadrilateral_3d(Matrix_ell_d_CG::value_type(*fx)[DEGREE][DEGREE],
Vector_h_CG &w_x, Vector_h_CG &w_y, Vector_h_CG &w_z)
{
Matrix_ell_d_CG::value_type integral = 0;
Matrix_ell_d_CG::value_type tmp_y, tmp_z;
for (int i = 0; i < DEGREE; i++)
{
tmp_y = 0.0;
for (int j = 0; j < DEGREE; j++)
{
tmp_z = 0.0;
for (int k = 0; k < DEGREE; k++)
{
// tmp_z += fx[i][j][k] * c_w_z_3d[k];
tmp_z += fx[i][j][k] * w_z[k];
}
// tmp_y += tmp_z * c_w_y_3d[j];
tmp_y += tmp_z * w_y[j];
}
// integral += tmp_y * c_w_x_3d[i];
integral += tmp_y * w_x[i];
}
return integral;
}
void FEM3D::IntegrationInTet(Vector_h_CG &phi, Vector_h_CG &weight_x, Vector_h_CG &weight_y, Vector_h_CG &weight_z, Vector_h_CG &integralMass)
{
CGType integrandMass[DEGREE][DEGREE][DEGREE];
int cnt = 0;
for(int k = 0; k < 4; k++)
{
for(int g = k; g < 4; g++)
{
for(int p = 0; p < DEGREE; p++)
{
for(int q = 0; q < DEGREE; q++)
{
for(int r = 0; r < DEGREE; r++)
{
integrandMass[p][q][r] = phi[k * DEGREE * DEGREE * DEGREE + p * DEGREE * DEGREE + q * DEGREE + r] * phi[g * DEGREE * DEGREE * DEGREE + p * DEGREE * DEGREE + q * DEGREE + r];
}
}
}
integralMass[cnt++] = Integration_Quadrilateral_3d(integrandMass, weight_x, weight_y, weight_z);
}
}
}
//void FEM3D::IntegrationForce(Vector_h_CG &phi, Vector_h_CG &weight_x, Vector_h_CG &weight_y, Vector_h_CG &weight_z, Vector_h_CG &integralForce)
//{
// CGType integrandForce[DEGREE][DEGREE][DEGREE];
// int cnt = 0;
// for(int k = 0; k < 4; k++)
// {
// for(int g = k; g < 4; g++)
// {
// for(int p = 0; p < DEGREE; p++)
// {
// for(int q = 0; q < DEGREE; q++)
// {
// for(int r = 0; r < DEGREE; r++)
// {
// integrandForce[p][q][r] = phi[k * DEGREE * DEGREE * DEGREE + p * DEGREE * DEGREE + q * DEGREE + r] * phi[g * DEGREE * DEGREE * DEGREE + p * DEGREE * DEGREE + q * DEGREE + r];
// }
// }
// }
// integralForce[cnt++] = Integration_Quadrilateral_3d(integrandForce, weight_x, weight_y, weight_z);
// }
// }
//}
void FEM3D::assemble(TetMesh* meshPtr, Matrix_ell_d_CG &A, Vector_d_CG &b, bool isdevice)
{
int degree_x = DEGREE;
int degree_y = DEGREE;
int degree_z = DEGREE;
Vector_h_CG z_x, z_y, z_z;
Vector_h_CG weight_x, weight_y, weight_z;
JacobiGLZW(z_x, weight_x, degree_x, 0, 0);
JacobiGRZW(z_y, weight_y, degree_y, 1, 0);
JacobiGRZW(z_z, weight_z, degree_z, 2, 0);
for(int i = 0; i < degree_y; i++)
{
weight_y[i] /= 2;
weight_z[i] /= 4;
}
CGType qdTet[DEGREE][DEGREE][DEGREE][3];
Transform2StdTetSpace(z_x, z_y, z_z, qdTet);
CGType coefmatBaseTet[4][4] = {
{1, -1, -1, -1},
{0, 1, 0, 0},
{0, 0, 1, 0},
{0, 0, 0, 1}
};
CGType phiTet[DEGREE][DEGREE][DEGREE][4];
EvalBasisTet(coefmatBaseTet, qdTet, phiTet);
Vector_h_CG phi(DEGREE * DEGREE * DEGREE * 4);
for(int l = 0; l < 4; l++)
for(int i = 0; i < DEGREE; i++)
for(int j = 0; j < DEGREE; j++)
for(int k = 0; k < DEGREE; k++)
phi[l * DEGREE * DEGREE * DEGREE + i * DEGREE * DEGREE + j * DEGREE + k] = phiTet[i][j][k][l];
Vector_h_CG integrandMass(10);
IntegrationInTet(phi, weight_x, weight_y, weight_z, integrandMass);
Matrix_ell_d_CG::value_type * tmp_w_x = thrust::raw_pointer_cast(&weight_x[0]);
Matrix_ell_d_CG::value_type* tmp_w_y = thrust::raw_pointer_cast(&weight_y[0]);
Matrix_ell_d_CG::value_type* tmp_w_z = thrust::raw_pointer_cast(&weight_z[0]);
IdxVector_d matlabels = meshPtr->matlabels;
Vector_d_CG integrandMass_d = integrandMass;
perform_element_loop_3d(d_vx, d_vy, d_vz, d_tri0,
d_tri1, d_tri2, d_tri3, A, b, phi, weight_x,
weight_y, weight_z, matlabels, integrandMass_d, isdevice);
phi.clear();
} | the_stack |
#include <octree_slam/sensor/image_kernels.h>
// CUDA / OpenGL Dependencies
#include <cuda_gl_interop.h>
// Thrust Dependencies
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
namespace octree_slam {
namespace sensor {
#define PI 3.14159
int BILATERAL_KERNEL_SIZE = 7;
float BILATERAL_SIGMA_DEPTH = 40.0f; //in mm
float BILATERAL_SIGMA_SPATIAL = 4.5f;
float3 INTENSITY_RATIO = { 0.299f, 0.587f, 0.114f }; //These are taken from Kintinuous
__global__ void generateVertexMapKernel(const uint16_t* depth_pixels, glm::vec3* vertex_map, const int width, const int height, const glm::vec2 focal_length, const int2 img_size) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//Don't do anything if the index is out of bounds
if (idx >= width*height) {
return;
}
//Compute the x/y coords of this thread
int x = idx % width;
int y = idx / width;
//Get the depth value for this pixel from global memory once
int depth = depth_pixels[idx];
//Handle no-measurements
if (depth == 0 || depth > 15000) {
vertex_map[idx] = glm::vec3(INFINITY, INFINITY, INFINITY);
return;
}
//Conversion from millimeters to meters
const float milli = 0.001f;
//Compute the point coordinates
vertex_map[idx].x = ((img_size.x/width)*x - img_size.x/2) * (float) depth / focal_length.x * milli;
vertex_map[idx].y = (img_size.y/2 - (img_size.y/height)*y) * (float) depth / focal_length.y * milli;
vertex_map[idx].z = depth*milli;
}
extern "C" void generateVertexMap(const uint16_t* depth_pixels, glm::vec3* vertex_map, const int width, const int height, const glm::vec2 focal_length, const int2 img_size) {
generateVertexMapKernel<<<ceil((float)width * (float)height / 256.0f), 256>>>(depth_pixels, vertex_map, width, height, focal_length, img_size);
cudaDeviceSynchronize();
}
struct min_vec3 : public thrust::binary_function<glm::vec3, glm::vec3, glm::vec3>{
__host__ __device__ glm::vec3 operator() (const glm::vec3& lhs, const glm::vec3& rhs) {
glm::vec3 result;
if (lhs == glm::vec3(0.0f)) {
result = rhs;
} else if (!isfinite(rhs.x) || !isfinite(rhs.z) || !isfinite(rhs.z)) {
result = lhs;
} else {
result.x = min(rhs.x, lhs.x);
result.y = min(rhs.y, lhs.y);
result.z = min(rhs.z, lhs.z);
}
return result;
}
};
struct max_vec3 : public thrust::binary_function<glm::vec3, glm::vec3, glm::vec3>{
__host__ __device__ glm::vec3 operator() (const glm::vec3& lhs, const glm::vec3& rhs) {
glm::vec3 result;
if (lhs == glm::vec3(0.0f)) {
result = rhs;
} else if (!isfinite(rhs.x) || !isfinite(rhs.z) || !isfinite(rhs.z)) {
result = lhs;
} else {
result.x = max(rhs.x, lhs.x);
result.y = max(rhs.y, lhs.y);
result.z = max(rhs.z, lhs.z);
}
return result;
}
};
extern "C" void computePointCloudBoundingBox(glm::vec3* points, const int num_points, BoundingBox& bbox) {
//Use thrust max/min to get the bounding box
thrust::device_ptr<glm::vec3> t_pts = thrust::device_pointer_cast<glm::vec3>(points);
bbox.bbox0 = thrust::reduce(t_pts, t_pts + num_points, bbox.bbox0, min_vec3());
bbox.bbox1 = thrust::reduce(t_pts, t_pts + num_points, bbox.bbox1, max_vec3());
}
__global__ void generateNormalMapKernel(const glm::vec3* vertex_map, glm::vec3* normal_map, const int width, const int height) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//Don't do anything if the index is out of bounds
if (idx >= width*height) {
return;
}
//Compute the x/y coords of this thread
int x = idx % width;
int y = idx / width;
//Don't do anything for the edges, fill in invalid normals
if (x == (width-1) || y == (height-1)) {
normal_map[idx] = glm::vec3(INFINITY, INFINITY, INFINITY);
return;
}
//Get the center point from global memory once
glm::vec3 center = vertex_map[idx];
//Compute two vectors within the surface (locally)
glm::vec3 v1 = vertex_map[idx + 1] - center;
glm::vec3 v2 = vertex_map[idx + width] - center;
//Compute the normal
glm::vec3 normal = glm::normalize(-glm::cross(v1, v2));
//Store the result in global memory
normal_map[idx] = normal;
}
extern "C" void generateNormalMap(const glm::vec3* vertex_map, glm::vec3* normal_map, const int width, const int height) {
generateNormalMapKernel<<<ceil((float)width * (float)height / 256.0f), 256>>>(vertex_map, normal_map, width, height);
cudaDeviceSynchronize();
}
//New and improved bilateral filter based on kinfu_remake
__global__ void bilateralKernel(const uint16_t* depth_in, uint16_t* filtered_out, const uint2 dims, const int kernel_size, const float sig_spat, const float sig_dep) {
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
//Don't do anything if the index is out of bounds
if (idx >= dims.x*dims.y) {
return;
}
int x = idx % dims.x;
int y = idx / dims.x;
int value = depth_in[y*dims.x + x];
int tx = min(x - kernel_size / 2 + kernel_size, dims.x - 1);
int ty = min(y - kernel_size / 2 + kernel_size, dims.y - 1);
float sum1 = 0;
float sum2 = 0;
for (int cy = max(y - kernel_size / 2, 0); cy < ty; ++cy)
{
for (int cx = max(x - kernel_size / 2, 0); cx < tx; ++cx)
{
int depth = depth_in[cy*dims.x + cx];
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
float color2 = (value - depth) * (value - depth);
float weight = __expf(-(space2 * sig_spat + color2 * sig_dep));
sum1 += depth * weight;
sum2 += weight;
}
}
filtered_out[y*dims.x + x] = __float2int_rn(sum1 / sum2);
}
extern "C" void bilateralFilter(const uint16_t* depth_in, uint16_t* filtered_out, const int width, const int height) {
//Use the bilateral filter kernel on the inputs
uint2 dims = make_uint2(width, height);
float spatial = 0.5f / (BILATERAL_SIGMA_SPATIAL * BILATERAL_SIGMA_SPATIAL);
float depth = 0.5 / (BILATERAL_SIGMA_DEPTH * BILATERAL_SIGMA_DEPTH);
bilateralKernel<<<ceil((float)width * (float)height/256.0f), 256>>>(depth_in, filtered_out, dims, BILATERAL_KERNEL_SIZE, spatial, depth);
cudaDeviceSynchronize();
}
__global__ void colorToIntensityKernel(const Color256* color_in, float* intensity_out, const int size, const float3 intensity_ratio) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//Don't do anything if the index is out of bounds
if (idx >= size) {
return;
}
intensity_out[idx] = color_in[idx].r/255.0f * intensity_ratio.x + color_in[idx].b/255.0f * intensity_ratio.y
+ color_in[idx].b/255.0f * intensity_ratio.z;
}
extern "C" void colorToIntensity(const Color256* color_in, float* intensity_out, const int size) {
colorToIntensityKernel<<<ceil((float)size/256.0f), 256>>>(color_in, intensity_out, size, INTENSITY_RATIO);
cudaDeviceSynchronize();
}
__global__ void transformVertexMapKernel(glm::vec3* vertex, const glm::mat4 trans, const int size) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//Don't do anything if the index is out of bounds
if (idx >= size) {
return;
}
vertex[idx] = glm::vec3(trans * glm::vec4(vertex[idx], 1.0f));
}
extern "C" void transformVertexMap(glm::vec3* vertex_map, const glm::mat4 &trans, const int size) {
transformVertexMapKernel<<<ceil((float)size / 256.0f), 256>>>(vertex_map, trans, size);
}
__global__ void transformNormalMapKernel(glm::vec3* normal, const glm::mat4 trans, const int size) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//Don't do anything if the index is out of bounds
if (idx >= size) {
return;
}
normal[idx] = glm::vec3(trans * glm::vec4(normal[idx], 0.0f));
}
extern "C" void transformNormalMap(glm::vec3* normal_map, const glm::mat4 &trans, const int size) {
transformNormalMapKernel<<<ceil((float)size / 256.0f), 256>>>(normal_map, trans, size);
}
template <class T>
__global__ void subsampleDepthKernel(const T* data_in, T* data_out, const int width, const int height, const float sigma_depth) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//Don't do anything if the index is out of bounds
if (idx >= width*height) {
return;
}
//Compute the x/y coords of this thread
int x = idx % width;
int y = idx / width;
const int D = 5;
float center = data_in[4*y*width + 2*x];
int tx = min(2 * x - D / 2 + D, 2*width - 1);
int ty = min(2 * y - D / 2 + D, 2*height - 1);
float sum = 0;
float count = 0;
for (int cy = max(0, 2 * y - D / 2); cy < ty; ++cy) {
for (int cx = max(0, 2 * x - D / 2); cx < tx; ++cx) {
float val = data_in[2*cy*width + cx];
if (abs(val - center) < sigma_depth) {
sum += val;
++count;
}
}
}
data_out[y*width + x] = (T) (count == 0) ? 0 : sum / count;
}
template <class T>
void subsampleDepth(T* data, const int width, const int height) {
//Create new memory space (this can't actually be done in place)
T* data_new;
cudaMalloc((void**)&data_new, width*height*sizeof(T)/4);
subsampleDepthKernel<<<ceil((float)width * (float)height/1024.0f), 256>>>(data, data_new, width/2, height/2, BILATERAL_SIGMA_DEPTH*3.0f);
cudaDeviceSynchronize();
//Copy into the input
cudaMemcpy(data, data_new, width*height*sizeof(T)/4, cudaMemcpyDeviceToDevice);
//Free the temporary memory slot
cudaFree(data_new);
}
//Declare types to generate symbols
template void subsampleDepth<uint16_t>(uint16_t* data, const int width, const int height);
template void subsampleDepth<float>(float* data, const int width, const int height);
template <class T>
__global__ void subsampleKernel(const T* data_in, T* data_out, const int width, const int height) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//Don't do anything if the index is out of bounds
if (idx >= width*height) {
return;
}
//Compute the x/y coords of this thread
int x = idx % width;
int y = idx / width;
//Sample the value
data_out[y*width + x] = data_in[4 * y*width + 2 * x];
}
template <class T>
void subsample(T* data, const int width, const int height) {
//Create new memory space (this can't actually be done in place)
T* data_new;
cudaMalloc((void**)&data_new, width*height*sizeof(T) / 4);
subsampleKernel << <width*height / 1024 + 1, 256 >> >(data, data_new, width / 2, height / 2);
cudaDeviceSynchronize();
//Copy into the input
cudaMemcpy(data, data_new, width*height*sizeof(T) / 4, cudaMemcpyDeviceToDevice);
//Free the temporary memory slot
cudaFree(data_new);
}
//Declare types to generate symbols
template void subsample<Color256>(Color256* data, const int width, const int height);
template void subsample<float>(float* data, const int width, const int height);
} // namespace sensor
} // namespace octree_slam | the_stack |
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
//#include <dirent.h>
#include <boost/lexical_cast.hpp>
#include <boost/filesystem.hpp>
#include <boost/algorithm/string.hpp>
#include "caffe/layers/DenseBlock_layer.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/util/cudnn.hpp"
namespace caffe {
bool dirExists_cu(string dirStr) {
/* const char* dirCStr = dirStr.c_str();
DIR* dir = opendir(dirCStr);
if (ENOENT == errno){
return false;
}
closedir(dir);*/
return true;
}
void tryCreateDirectory_cu(string fileName) {
/* vector<string> strVec;
boost::split(strVec,fileName,boost::is_any_of("/"));
string newStr="";
for (int i=0;i<strVec.size()-1;++i){
newStr += strVec[i] + (i==strVec.size()-2?"":"/");
}
boost::filesystem::path dirToCreate(newStr);
if (!dirExists_cu(newStr)){
boost::filesystem::create_directories(dirToCreate);
}*/
}
string itos_cu(int i) {
string output = boost::lexical_cast<string>(i);
return output;
}
template <typename Dtype>
void gpu_copy_one_to_many(const Dtype* inPtr_gpu, Dtype* outPtr_gpu, int numChunks, int chunkSize_input, int chunkStride_output) {
for (int chunkIdx = 0; chunkIdx < numChunks; ++chunkIdx) {
const Dtype* inPtr_local = inPtr_gpu + chunkIdx*chunkSize_input;
Dtype* outPtr_local = outPtr_gpu + chunkIdx*chunkStride_output;
//printf("inpointer %p\n",inPtr_gpu);
//printf("outpointer %p\n",outPtr_gpu);
CUDA_CHECK(cudaMemcpy(outPtr_local, inPtr_local, chunkSize_input * sizeof(Dtype), cudaMemcpyDeviceToDevice));
}
}
template <typename Dtype>
void gpu_copy_many_to_one(Dtype* inPtr_gpu, Dtype* outPtr_gpu, int numChunks, int chunkSize_output, int chunkStride_input) {
for (int chunkIdx = 0; chunkIdx < numChunks; ++chunkIdx) {
Dtype* inPtr_local = inPtr_gpu + chunkIdx*chunkStride_input;
Dtype* outPtr_local = outPtr_gpu + chunkIdx*chunkSize_output;
CUDA_CHECK(cudaMemcpy(outPtr_local, inPtr_local, chunkSize_output * sizeof(Dtype), cudaMemcpyDeviceToDevice));
}
}
template <typename Dtype>
void print_gpuPtr(Dtype* gpuPtr, int numValues) {
Dtype* cpuPtr = new Dtype[numValues];
cudaMemcpy(cpuPtr, gpuPtr, numValues * sizeof(Dtype), cudaMemcpyDeviceToHost);
for (int i = 0; i < numValues; ++i) {
std::cout << cpuPtr[i] << ",";
}
std::cout << std::endl;
}
template <typename Dtype>
void log_gpuPtr(Dtype* gpuPtr, int numValues, string fileName) {
Dtype* cpuPtr = new Dtype[numValues];
cudaMemcpy(cpuPtr, gpuPtr, numValues * sizeof(Dtype), cudaMemcpyDeviceToHost);
const char* fileName_cstr = fileName.c_str();
tryCreateDirectory_cu(fileName_cstr);
std::ofstream outWriter(fileName_cstr, std::ofstream::out);
for (int i = 0; i < numValues; ++i) {
outWriter << cpuPtr[i] << ",";
}
outWriter << std::endl;
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::logInternal_gpu(string dir, int TIdx, bool logDynamic, bool logDiff) {
string localDir = dir + "/gpu_" + itos_cu(this->logId) + "/";
if (logDynamic) {
int postBufferSize = this->N * (this->initChannel + this->growthRate * this->numTransition) * this->H * this->W;
int quadGBufferSize = N * 4 * growthRate*H*W;
if (logDiff) {
//postConv_grad_gpu
log_gpuPtr<Dtype>(this->postConv_grad_gpu, postBufferSize, localDir + "postConv_grad_gpu_transition" + itos_cu(TIdx));
//postBN_grad_gpu
log_gpuPtr<Dtype>(this->postBN_grad_gpu, postBufferSize, localDir + "postBN_grad_gpu_transition" + itos_cu(TIdx));
//postReLU_grad_gpu
log_gpuPtr<Dtype>(this->postReLU_grad_gpu, postBufferSize, localDir + "postReLU_grad_gpu_transition" + itos_cu(TIdx));
//BC
if (useBC) {
//postConv_4G_grad
log_gpuPtr<Dtype>(this->postConv_4G_grad, quadGBufferSize, localDir + "postConv_4G_grad_transition" + itos_cu(TIdx));
//postBN_4G_grad
log_gpuPtr<Dtype>(this->postBN_4G_grad, quadGBufferSize, localDir + "postBN_4G_grad_transition" + itos_cu(TIdx));
//postReLU_4G_grad
log_gpuPtr<Dtype>(this->postReLU_4G_grad, quadGBufferSize, localDir + "postReLU_4G_grad_transition" + itos_cu(TIdx));
}
}
else {
//postConv_data_gpu
log_gpuPtr<Dtype>(this->postConv_data_gpu, postBufferSize, localDir + "postConv_data_gpu_transition" + itos_cu(TIdx));
//postBN_data_gpu
log_gpuPtr<Dtype>(this->postBN_data_gpu, postBufferSize, localDir + "postBN_data_gpu_transition" + itos_cu(TIdx));
//postReLU_data_gpu
log_gpuPtr<Dtype>(this->postReLU_data_gpu, postBufferSize, localDir + "postReLU_data_gpu_transition" + itos_cu(TIdx));
if (useBC) {
//postConv_4G
if (BC_ultra_spaceEfficient) {
log_gpuPtr<Dtype>(this->postConv_4G, quadGBufferSize, localDir + "postConv_4G_data_transition" + itos_cu(TIdx));
}
else {
log_gpuPtr<Dtype>(this->postConv_4GVec[TIdx], quadGBufferSize, localDir + "postConv_4G_data_transition" + itos_cu(TIdx));
}
//postBN_4G
log_gpuPtr<Dtype>(this->postBN_4G, quadGBufferSize, localDir + "postBN_4G_data_transition" + itos_cu(TIdx));
//postReLU_4G
log_gpuPtr<Dtype>(this->postReLU_4G, quadGBufferSize, localDir + "postReLU_4G_data_transition" + itos_cu(TIdx));
}
}
}
else {
for (int transitionIdx = 0; transitionIdx < this->numTransition; ++transitionIdx) {
int numChannel_moreWide = this->initChannel + this->growthRate * transitionIdx;
int numChannel_quadG = 4 * growthRate;
//global Mean/Variance
log_gpuPtr<Dtype>(this->blobs_[3 * this->numTransition + transitionIdx]->mutable_gpu_data(), numChannel_moreWide, localDir + "globalMean_gpu_transition" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[4 * this->numTransition + transitionIdx]->mutable_gpu_data(), numChannel_moreWide, localDir + "globalVariance_gpu_transition" + itos_cu(transitionIdx));
//ResultSaveMean/InvVariance
log_gpuPtr<Dtype>(this->ResultSaveMean_gpu[transitionIdx], numChannel_moreWide, localDir + "ResultSaveMean_gpu_transition" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->ResultSaveInvVariance_gpu[transitionIdx], numChannel_moreWide, localDir + "ResultSaveInvVariance_gpu_transition" + itos_cu(transitionIdx));
if (useBC) {
//global BC Mean/Variance
log_gpuPtr<Dtype>(this->blobs_[8 * numTransition + transitionIdx]->mutable_gpu_data(), numChannel_quadG, localDir + "globalMean_BC_transition" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[9 * numTransition + transitionIdx]->mutable_gpu_data(), numChannel_quadG, localDir + "globalVar_BC_transition" + itos_cu(transitionIdx));
//ResultSave BC Mean/InvVariance
log_gpuPtr<Dtype>(this->ResultSaveMean_BC[transitionIdx], numChannel_quadG, localDir + "ResultSaveMean_BC_transition" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->ResultSaveInvVariance_BC[transitionIdx], numChannel_quadG, localDir + "ResultSaveInvVariance_BC_transition" + itos_cu(transitionIdx));
}
//Filter_data/grad_gpu
int filterSize;
if (useBC) {
filterSize = 4 * growthRate*growthRate * 3 * 3;
}
else {
filterSize = (this->initChannel + this->growthRate*transitionIdx) * this->growthRate * 3 * 3;
}
log_gpuPtr<Dtype>(this->blobs_[transitionIdx]->mutable_gpu_data(), filterSize, localDir + "Filter_data_gpu_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[transitionIdx]->mutable_gpu_diff(), filterSize, localDir + "Filter_grad_gpu_" + itos_cu(transitionIdx));
//Scaler_data/grad_gpu
log_gpuPtr<Dtype>(this->blobs_[transitionIdx + this->numTransition]->mutable_gpu_diff(), numChannel_moreWide, localDir + "Scaler_grad_gpu_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[transitionIdx + this->numTransition]->mutable_gpu_data(), numChannel_moreWide, localDir + "Scaler_data_gpu_" + itos_cu(transitionIdx));
//Bias_data/grad_gpu
log_gpuPtr<Dtype>(this->blobs_[transitionIdx + 2 * this->numTransition]->mutable_gpu_diff(), numChannel_moreWide, localDir + "Bias_grad_gpu_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[transitionIdx + 2 * this->numTransition]->mutable_gpu_data(), numChannel_moreWide, localDir + "Bias_data_gpu_" + itos_cu(transitionIdx));
if (useBC) {
//BC Filter
int filterBC_size = (initChannel + growthRate*transitionIdx) * 4 * growthRate * 1 * 1;
log_gpuPtr<Dtype>(this->blobs_[5 * numTransition + transitionIdx]->mutable_gpu_data(), filterBC_size, localDir + "Filter_data_BC_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[5 * numTransition + transitionIdx]->mutable_gpu_diff(), filterBC_size, localDir + "Filter_grad_BC_" + itos_cu(transitionIdx));
//BC scaler
log_gpuPtr<Dtype>(this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_diff(), numChannel_quadG, localDir + "Scaler_grad_BC_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_data(), numChannel_quadG, localDir + "Scaler_data_BC_" + itos_cu(transitionIdx));
//BC bias
log_gpuPtr<Dtype>(this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_diff(), numChannel_quadG, localDir + "Bias_grad_BC_" + itos_cu(transitionIdx));
log_gpuPtr<Dtype>(this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_data(), numChannel_quadG, localDir + "Bias_data_BC_" + itos_cu(transitionIdx));
}
}
}
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::GPU_Initialization() {
//std::cout<<"Pre DeviceSet"<<std::endl;
//CUDA_CHECK(cudaSetDevice(1));
//std::cout<<"Post DeviceSet"<<std::endl;
//GPU intermediate ptrs
#if 1
int bufferSize_byte = this->N*(this->initChannel + this->growthRate*this->numTransition)*this->H*this->W * sizeof(Dtype);
CUDA_CHECK(cudaMalloc(&this->postConv_data_gpu, bufferSize_byte));
if (useDropout) {
CUDA_CHECK(cudaMalloc(&this->postDropout_data_gpu, bufferSize_byte));
}
CUDA_CHECK(cudaMalloc(&this->postBN_data_gpu, bufferSize_byte));
CUDA_CHECK(cudaMalloc(&this->postReLU_data_gpu, bufferSize_byte));
CUDA_CHECK(cudaMalloc(&this->postConv_grad_gpu, bufferSize_byte));
if (useDropout) {
CUDA_CHECK(cudaMalloc(&this->postDropout_grad_gpu, bufferSize_byte));
}
CUDA_CHECK(cudaMalloc(&this->postBN_grad_gpu, bufferSize_byte));
CUDA_CHECK(cudaMalloc(&this->postReLU_grad_gpu, bufferSize_byte));
cudaMemset(this->postConv_data_gpu, 0, bufferSize_byte);
cudaMemset(this->postBN_data_gpu, 0, bufferSize_byte);
cudaMemset(this->postReLU_data_gpu, 0, bufferSize_byte);
cudaMemset(this->postConv_grad_gpu, 0, bufferSize_byte);
cudaMemset(this->postBN_grad_gpu, 0, bufferSize_byte);
cudaMemset(this->postReLU_grad_gpu, 0, bufferSize_byte);
#endif
//workspace
CUDA_CHECK(cudaMalloc(&this->workspace, this->workspace_size_bytes));
cudaMemset(this->workspace, 0, this->workspace_size_bytes);
CUDA_CHECK(cudaMalloc(&this->workspace2, this->workspace_size_bytes));
cudaMemset(this->workspace2, 0, this->workspace_size_bytes);
//handles and descriptors
//cudnn handle
this->cudnnHandlePtr = new cudnnHandle_t;
cudaPrimalStream = new cudaStream_t;
CUDNN_CHECK(cudnnCreate(this->cudnnHandlePtr));
CUDA_CHECK(cudaStreamCreate(cudaPrimalStream));
//CUDNN_CHECK(cudnnSetStream(*cudnnHandlePtr,*cudaPrimalStream));
int extraHandle_num = 3;
for (int i = 0; i < extraHandle_num; ++i) {
cudnnHandle_t* localHandle = new cudnnHandle_t;
cudaStream_t* localStream = new cudaStream_t;
CUDNN_CHECK(cudnnCreate(localHandle));
CUDA_CHECK(cudaStreamCreate(localStream));
CUDNN_CHECK(cudnnSetStream(*localHandle, *localStream));
extraHandles.push_back(localHandle);
extraStreams.push_back(localStream);
}
//ReLU Activation Descriptor
this->ReLUDesc = new cudnnActivationDescriptor_t;
cudnn::createActivationDescriptor<Dtype>(ReLUDesc, CUDNN_ACTIVATION_RELU);
//conv_y global tensor descriptor
this->tensorDescriptor_conv_y = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(this->tensorDescriptor_conv_y);
#if 1
cudnn::setTensor4dDesc<Dtype>(this->tensorDescriptor_conv_y, this->N, this->growthRate, this->H, this->W, (this->numTransition*this->growthRate + this->initChannel)*this->H*this->W, this->H*this->W, this->W, 1);
#endif
//BC
int quadG_numValues = 4 * N*growthRate*H*W;
int quadG_numBytes = quadG_numValues * sizeof(Dtype);
if (useBC) {
#if 1
CUDA_CHECK(cudaMalloc(&postBN_4G, quadG_numBytes));
CUDA_CHECK(cudaMalloc(&postBN_4G_grad, quadG_numBytes));
CUDA_CHECK(cudaMalloc(&postReLU_4G, quadG_numBytes));
CUDA_CHECK(cudaMalloc(&postReLU_4G_grad, quadG_numBytes));
CUDA_CHECK(cudaMalloc(&postConv_4G_grad, quadG_numBytes));
cudaMemset(postBN_4G, 0, quadG_numBytes);
cudaMemset(postBN_4G_grad, 0, quadG_numBytes);
cudaMemset(postReLU_4G, 0, quadG_numBytes);
cudaMemset(postReLU_4G_grad, 0, quadG_numBytes);
cudaMemset(postConv_4G_grad, 0, quadG_numBytes);
if (BC_ultra_spaceEfficient) {
CUDA_CHECK(cudaMalloc(&postConv_4G, quadG_numBytes));
cudaMemset(postConv_4G, 0, quadG_numBytes);
}
#endif
quadG_tensorDesc = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(quadG_tensorDesc);
#if 1
cudnn::setTensor4dDesc<Dtype>(quadG_tensorDesc, N, 4 * growthRate, H, W, 4 * growthRate*H*W, H*W, W, 1);
#endif
quadG_paramDesc = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(quadG_paramDesc);
cudnn::setTensor4dDesc<Dtype>(quadG_paramDesc, 1, 4 * growthRate, 1, 1, 4 * growthRate, 1, 1, 1);
convBC_Descriptor = new cudnnConvolutionDescriptor_t;
CUDNN_CHECK(cudnnCreateConvolutionDescriptor(convBC_Descriptor));
CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*convBC_Descriptor, 0, 0, 1, 1, 1, 1, CUDNN_CONVOLUTION, cudnn::dataType<Dtype>::type));
}
//per transition variables
for (int i = 0; i < this->numTransition; ++i) {
//Result Running/Saving Mean/Variance/InvVariance
int localChannel = this->initChannel + i * this->growthRate;
Dtype* local_SaveMean;
Dtype* local_SaveInvVar;
CUDA_CHECK(cudaMalloc(&local_SaveMean, localChannel * sizeof(Dtype)));
CUDA_CHECK(cudaMalloc(&local_SaveInvVar, localChannel * sizeof(Dtype)));
cudaMemset(local_SaveMean, 0, localChannel * sizeof(Dtype));
cudaMemset(local_SaveInvVar, 0, localChannel * sizeof(Dtype));
this->ResultSaveMean_gpu.push_back(local_SaveMean);
this->ResultSaveInvVariance_gpu.push_back(local_SaveInvVar);
//conv_x descriptor
int conv_x_channels = this->initChannel + this->growthRate * i;
cudnnTensorDescriptor_t * wide_Desc_local_x = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(wide_Desc_local_x);
#if 1
cudnn::setTensor4dDesc<Dtype>(wide_Desc_local_x, this->N, conv_x_channels, this->H, this->W, (this->numTransition*this->growthRate + this->initChannel)*this->H*this->W, this->H*this->W, this->W, 1);
this->tensorDescriptorVec_conv_x.push_back(wide_Desc_local_x);
#endif
//filter Descriptor for Convolution
if (!useBC) {
cudnnFilterDescriptor_t * localFilterDesc = new cudnnFilterDescriptor_t;
cudnn::createFilterDesc<Dtype>(localFilterDesc, growthRate, conv_x_channels, 3, 3);
this->filterDescriptorVec.push_back(localFilterDesc);
}
else {
//3*3 convolution filter desc
cudnnFilterDescriptor_t * localFilterDesc = new cudnnFilterDescriptor_t;
cudnn::createFilterDesc<Dtype>(localFilterDesc, growthRate, 4 * growthRate, 3, 3);
this->filterDescriptorVec.push_back(localFilterDesc);
//1*1 convolution filter desc
cudnnFilterDescriptor_t * localBottleneckFilterDesc = new cudnnFilterDescriptor_t;
cudnn::createFilterDesc<Dtype>(localBottleneckFilterDesc, 4 * growthRate, conv_x_channels, 1, 1);
this->BC_filterDescriptorVec.push_back(localBottleneckFilterDesc);
}
//BN channel-wise Descriptor
int channelsBefore_self = initChannel + growthRate*i;
cudnnTensorDescriptor_t * BNparam = new cudnnTensorDescriptor_t;
cudnn::createTensor4dDesc<Dtype>(BNparam);
cudnn::setTensor4dDesc<Dtype>(BNparam, 1, channelsBefore_self, 1, 1);
this->tensorDescriptor_BN.push_back(BNparam);
//Dropout Ptr and Descriptor
if (useDropout) {
size_t * sizeState = new size_t[1];
size_t * sizeReserve = new size_t[1];
CUDNN_CHECK(cudnnDropoutGetStatesSize((*cudnnHandlePtr), sizeState));
CUDNN_CHECK(cudnnDropoutGetReserveSpaceSize(*tensorDescriptor_conv_y, sizeReserve));
dropout_reserveSize.push_back(sizeReserve[0]);
dropout_stateSize.push_back(sizeState[0]);
void* localStatePtr;
void* localReservePtr;
CUDA_CHECK(cudaMalloc(&localStatePtr, sizeState[0]));
CUDA_CHECK(cudaMalloc(&localReservePtr, sizeReserve[0]));
dropout_state_gpu.push_back(localStatePtr);
dropout_reserve_gpu.push_back(localReservePtr);
cudnnDropoutDescriptor_t* localDropoutDesc = new cudnnDropoutDescriptor_t;
cudnnCreateDropoutDescriptor(localDropoutDesc);
cudnnSetDropoutDescriptor(*localDropoutDesc, *cudnnHandlePtr, dropoutAmount, localStatePtr, sizeState[0], DB_randomSeed);
dropoutDescriptorVec.push_back(localDropoutDesc);
DB_randomSeed += 1;
}
//BC
if (useBC && (!BC_ultra_spaceEfficient)) {
Dtype* local_BC4G;
CUDA_CHECK(cudaMalloc(&local_BC4G, quadG_numValues * sizeof(Dtype)));
cudaMemset(local_BC4G, 0, quadG_numBytes);
postConv_4GVec.push_back(local_BC4G);
}
if (useBC) {
Dtype* BC_tmpMeanLocal;
Dtype* BC_tmpVarLocal;
int numChannel_BC = 4 * growthRate;
int byteChannel_BC = numChannel_BC * sizeof(Dtype);
CUDA_CHECK(cudaMalloc(&BC_tmpMeanLocal, numChannel_BC * sizeof(Dtype)));
CUDA_CHECK(cudaMalloc(&BC_tmpVarLocal, numChannel_BC * sizeof(Dtype)));
cudaMemset(BC_tmpMeanLocal, 0, byteChannel_BC);
cudaMemset(BC_tmpVarLocal, 0, byteChannel_BC);
BC_MeanInfVec.push_back(BC_tmpMeanLocal);
BC_VarInfVec.push_back(BC_tmpVarLocal);
Dtype* BC_localSaveMean;
Dtype* BC_localSaveInvVar;
CUDA_CHECK(cudaMalloc(&BC_localSaveMean, numChannel_BC * sizeof(Dtype)));
CUDA_CHECK(cudaMalloc(&BC_localSaveInvVar, numChannel_BC * sizeof(Dtype)));
cudaMemset(BC_localSaveMean, 0, byteChannel_BC);
cudaMemset(BC_localSaveInvVar, 0, byteChannel_BC);
ResultSaveMean_BC.push_back(BC_localSaveMean);
ResultSaveInvVariance_BC.push_back(BC_localSaveInvVar);
}
}
//Conv Descriptor
this->conv_Descriptor = new cudnnConvolutionDescriptor_t;
CUDNN_CHECK(cudnnCreateConvolutionDescriptor(this->conv_Descriptor));
CUDNN_CHECK(cudnnSetConvolution2dDescriptor(*this->conv_Descriptor, 1, 1, 1, 1, 1, 1, CUDNN_CONVOLUTION, cudnn::dataType<Dtype>::type));
//Mean and Var tmp
int totalNumChannel = this->initChannel + this->growthRate * this->numTransition;
CUDA_CHECK(cudaMalloc(&this->Mean_tmp, totalNumChannel * sizeof(Dtype)));
CUDA_CHECK(cudaMalloc(&this->Var_tmp, totalNumChannel * sizeof(Dtype)));
//Convolution Algorithms
for (int transitionIdx = 0; transitionIdx < numTransition; ++transitionIdx) {
cudnnTensorDescriptor_t conv_x_desc;
cudnnTensorDescriptor_t conv_y_desc;
cudnnFilterDescriptor_t conv_w_desc;
cudnnTensorDescriptor_t BC_x_desc;
cudnnTensorDescriptor_t BC_y_desc;
cudnnFilterDescriptor_t BC_w_desc;
if (useBC) {
conv_x_desc = *(quadG_tensorDesc);
conv_y_desc = *(tensorDescriptor_conv_y);
conv_w_desc = *(filterDescriptorVec[transitionIdx]);
BC_x_desc = *(tensorDescriptorVec_conv_x[transitionIdx]);
BC_y_desc = *(quadG_tensorDesc);
BC_w_desc = *(BC_filterDescriptorVec[transitionIdx]);
}
else {
conv_x_desc = *(tensorDescriptorVec_conv_x[transitionIdx]);
conv_y_desc = *(tensorDescriptor_conv_y);
conv_w_desc = *(filterDescriptorVec[transitionIdx]);
}
//Conv Fwd Algo
cudnnConvolutionFwdAlgo_t* conv_FwdAlgo_local = new cudnnConvolutionFwdAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(
*cudnnHandlePtr,
conv_x_desc, conv_w_desc, *conv_Descriptor, conv_y_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, conv_FwdAlgo_local
));
conv_FwdAlgoVec.push_back(conv_FwdAlgo_local);
//Conv Bwd Filter Algo
cudnnConvolutionBwdFilterAlgo_t* conv_BwdFilter_local = new cudnnConvolutionBwdFilterAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionBackwardFilterAlgorithm(
*cudnnHandlePtr,
conv_x_desc, conv_y_desc, *conv_Descriptor, conv_w_desc,
CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, conv_BwdFilter_local
));
conv_BwdFilterAlgoVec.push_back(conv_BwdFilter_local);
//Conv Bwd Data Algo
cudnnConvolutionBwdDataAlgo_t* conv_BwdData_local = new cudnnConvolutionBwdDataAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionBackwardDataAlgorithm(
*(this->extraHandles[0]),
conv_w_desc, conv_y_desc, *conv_Descriptor, conv_x_desc,
CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, conv_BwdData_local
));
conv_BwdDataAlgoVec.push_back(conv_BwdData_local);
//BC Convolution
if (useBC) {
//BC Fwd Algo
cudnnConvolutionFwdAlgo_t* BC_FwdAlgo_local = new cudnnConvolutionFwdAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(
*cudnnHandlePtr,
BC_x_desc, BC_w_desc, *convBC_Descriptor, BC_y_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, BC_FwdAlgo_local
));
BC_FwdAlgoVec.push_back(BC_FwdAlgo_local);
//BC Bwd Filter Algo
cudnnConvolutionBwdFilterAlgo_t* BC_BwdFilter_local = new cudnnConvolutionBwdFilterAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionBackwardFilterAlgorithm(
*cudnnHandlePtr,
BC_x_desc, BC_y_desc, *convBC_Descriptor, BC_w_desc,
CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, BC_BwdFilter_local
));
BC_BwdFilterAlgoVec.push_back(BC_BwdFilter_local);
//BC Bwd Data Algo
cudnnConvolutionBwdDataAlgo_t* BC_BwdData_local = new cudnnConvolutionBwdDataAlgo_t;
CUDNN_CHECK(cudnnGetConvolutionBackwardDataAlgorithm(
*(this->extraHandles[0]),
BC_w_desc, BC_y_desc, *convBC_Descriptor, BC_x_desc,
CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,
workspace_size_bytes, BC_BwdData_local
));
BC_BwdDataAlgoVec.push_back(BC_BwdData_local);
}
}
}
template <typename Dtype>
void cleanupBuffer(Dtype* ptr_gpu, int count) {
cudaMemset(ptr_gpu, 0, count * sizeof(Dtype));
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::LoopEndCleanup_gpu() {
int valsBuffer = this->N * (this->initChannel + this->growthRate * this->numTransition) * this->H * this->W;
cleanupBuffer(this->postConv_data_gpu, valsBuffer);
cleanupBuffer(this->postConv_grad_gpu, valsBuffer);
if (useDropout) {
cleanupBuffer(this->postDropout_data_gpu, valsBuffer);
cleanupBuffer(this->postDropout_grad_gpu, valsBuffer);
}
cleanupBuffer(this->postBN_data_gpu, valsBuffer);
cleanupBuffer(this->postBN_grad_gpu, valsBuffer);
cleanupBuffer(this->postReLU_data_gpu, valsBuffer);
cleanupBuffer(this->postReLU_grad_gpu, valsBuffer);
int vals4G = N * 4 * growthRate*H*W;
if (useBC) {
cleanupBuffer(postConv_4G_grad, vals4G);
cleanupBuffer(postBN_4G_grad, vals4G);
cleanupBuffer(postReLU_4G_grad, vals4G);
}
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::resetDropoutDesc() {
for (int transitionIdx = 0; transitionIdx < numTransition; ++transitionIdx) {
std::cout << &(dropout_state_gpu[transitionIdx]) << "," << dropout_stateSize[transitionIdx] << std::endl;
CUDNN_CHECK(cudnnSetDropoutDescriptor(
*(dropoutDescriptorVec[transitionIdx]),
*(this->cudnnHandlePtr),
dropoutAmount,
dropout_state_gpu[transitionIdx],
dropout_stateSize[transitionIdx],
DB_randomSeed
));
DB_randomSeed++;
}
}
__global__ void sync_streams() {}
template <typename Dtype>
void DenseBlockLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
#if 0
if (!this->gpuInited) {
//std::cout<<"Initializing GPU local"<<std::endl;
this->GPU_Initialization();
this->gpuInited = true;
//std::cout<< "GPUInited"<< std::endl;
}
#endif
clock_t begin_fwd = std::clock();//timer
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
//copy to bottom_data to buffer with stride
int chunkSize_copy_init = this->initChannel * this->H * this->W;
int chunkStride_copy = (this->initChannel + this->growthRate * this->numTransition) * this->H * this->W;
if ((this->phase_ == TRAIN) && useDropout) {
gpu_copy_one_to_many<Dtype>(bottom_data, this->postDropout_data_gpu, this->N, chunkSize_copy_init, chunkStride_copy);
}
else {
gpu_copy_one_to_many<Dtype>(bottom_data, this->postConv_data_gpu, this->N, chunkSize_copy_init, chunkStride_copy);
}
int work_n = this->N * (this->initChannel + this->numTransition * this->growthRate) * this->H * this->W;
//work in the buffer, transition by transition
for (int transitionIdx = 0; transitionIdx < this->numTransition; ++transitionIdx) {
//BN Fwd
Dtype* BN_x_ptr;
if (this->phase_ == TRAIN && useDropout) {
BN_x_ptr = this->postDropout_data_gpu;
}
else {
BN_x_ptr = this->postConv_data_gpu;
}
Dtype* BN_y_ptr = this->postBN_data_gpu;
Dtype* BN_globalMean = this->blobs_[3 * this->numTransition + transitionIdx]->mutable_gpu_data();
Dtype* BN_globalVar = this->blobs_[4 * this->numTransition + transitionIdx]->mutable_gpu_data();
cudnnTensorDescriptor_t * BN_paramDesc = tensorDescriptor_BN[transitionIdx];
int numChannels = initChannel + growthRate*transitionIdx;
Dtype* local_MeanInf = this->Mean_tmp;
Dtype* local_VarInf = this->Var_tmp;
if (this->phase_ == TEST) {
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(
*(this->cudnnHandlePtr), CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_x_ptr,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_y_ptr,
*BN_paramDesc,
this->blobs_[this->numTransition + transitionIdx]->gpu_data(),
this->blobs_[2 * this->numTransition + transitionIdx]->gpu_data(),
BN_globalMean, BN_globalVar, CUDNN_BN_MIN_EPSILON)
);
}
else {
Dtype* batchMean = this->ResultSaveMean_gpu[transitionIdx];
Dtype* batchInvVar = this->ResultSaveInvVariance_gpu[transitionIdx];
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
*(this->cudnnHandlePtr), CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_x_ptr,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_y_ptr,
*BN_paramDesc,
this->blobs_[this->numTransition + transitionIdx]->mutable_gpu_data(),
this->blobs_[2 * this->numTransition + transitionIdx]->mutable_gpu_data(),
Dtype(1), local_MeanInf, local_VarInf, CUDNN_BN_MIN_EPSILON,
batchMean, batchInvVar)
);
//update global Mean/Var manually
//Mean:
caffe_gpu_axpby(numChannels, EMA_decay, local_MeanInf, Dtype(1.0 - EMA_decay), BN_globalMean);
//Var:
caffe_gpu_axpby(numChannels, EMA_decay, local_VarInf, Dtype(1.0 - EMA_decay), BN_globalVar);
}
//ReLU
Dtype* ReLU_x_ptr = this->postBN_data_gpu;
Dtype* ReLU_y_ptr = this->postReLU_data_gpu;
CUDNN_CHECK(cudnnActivationForward(*(this->cudnnHandlePtr), *ReLUDesc,
cudnn::dataType<Dtype>::one,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_x_ptr,
cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_y_ptr)
);
if (useBC) {
//Convolution 1*1 kernel
Dtype* conv_x_4G = postReLU_data_gpu;
Dtype* conv_y_4G;
if (BC_ultra_spaceEfficient) {
conv_y_4G = postConv_4G;
}
else {
conv_y_4G = postConv_4GVec[transitionIdx];
}
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionForward(*(cudnnHandlePtr),
cudnn::dataType<Dtype>::one,
*this->tensorDescriptorVec_conv_x[transitionIdx], conv_x_4G,
*(BC_filterDescriptorVec[transitionIdx]),
this->blobs_[5 * numTransition + transitionIdx]->gpu_data(),
*(convBC_Descriptor), *BC_FwdAlgoVec[transitionIdx],
workspace, workspace_size_bytes, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, conv_y_4G
));
//std::cout<<"BC Fwd Conv Done"<<std::endl;
//BN 4G Fwd
Dtype* BN_x_4G = BC_ultra_spaceEfficient ? postConv_4G : postConv_4GVec[transitionIdx];
Dtype* BN_y_4G = postBN_4G;
Dtype* BN_BC_globalMean = this->blobs_[8 * numTransition + transitionIdx]->mutable_gpu_data();
Dtype* BN_BC_globalVar = this->blobs_[9 * numTransition + transitionIdx]->mutable_gpu_data();
Dtype* localBC_MeanInf = BC_MeanInfVec[transitionIdx];
Dtype* localBC_VarInf = BC_VarInfVec[transitionIdx];
//std::cout<<"BC Fwd BN Prepared"<<std::endl;
if (this->phase_ == TEST) {
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(
*cudnnHandlePtr, CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, BN_x_4G,
*quadG_tensorDesc, BN_y_4G,
*quadG_paramDesc,
this->blobs_[6 * numTransition + transitionIdx]->gpu_data(),
this->blobs_[7 * numTransition + transitionIdx]->gpu_data(),
BN_BC_globalMean, BN_BC_globalVar, CUDNN_BN_MIN_EPSILON)
);
}
else {
Dtype* BC_batchMean = ResultSaveMean_BC[transitionIdx];
Dtype* BC_batchInvVar = ResultSaveInvVariance_BC[transitionIdx];
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
*cudnnHandlePtr, CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, BN_x_4G,
*quadG_tensorDesc, BN_y_4G,
*quadG_paramDesc,
this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_data(),
this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_data(),
Dtype(1), localBC_MeanInf, localBC_VarInf, CUDNN_BN_MIN_EPSILON,
BC_batchMean, BC_batchInvVar
));
caffe_gpu_axpby(4 * growthRate, EMA_decay, localBC_MeanInf, Dtype(1.0 - EMA_decay), BN_BC_globalMean);
caffe_gpu_axpby(4 * growthRate, EMA_decay, localBC_VarInf, Dtype(1.0 - EMA_decay), BN_BC_globalVar);
}
//std::cout<<"BC Fwd BN Done"<<std::endl;
//ReLU 4G Fwd
Dtype* ReLU_BC_x = postBN_4G;
Dtype* ReLU_BC_y = postReLU_4G;
CUDNN_CHECK(cudnnActivationForward(*cudnnHandlePtr, *ReLUDesc,
cudnn::dataType<Dtype>::one,
*quadG_tensorDesc, ReLU_BC_x,
cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, ReLU_BC_y
));
//std::cout<<"BC Fwd ReLU Done"<<std::endl;
}
//Convolution
int delayChannel = this->initChannel + this->growthRate * transitionIdx;
Dtype* conv_x_local;
cudnnTensorDescriptor_t* conv_x_localDesc;
if (useBC) {
conv_x_local = postReLU_4G;
conv_x_localDesc = quadG_tensorDesc;
}
else {
conv_x_local = postReLU_data_gpu;
conv_x_localDesc = tensorDescriptorVec_conv_x[transitionIdx];
}
Dtype* conv_y_local = this->postConv_data_gpu + delayChannel * this->H * this->W;
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionForward(*(this->cudnnHandlePtr),
cudnn::dataType<Dtype>::one,
*conv_x_localDesc, conv_x_local,
*(filterDescriptorVec[transitionIdx]),
this->blobs_[transitionIdx]->gpu_data(),
*conv_Descriptor, *conv_FwdAlgoVec[transitionIdx],
workspace, workspace_size_bytes, cudnn::dataType<Dtype>::zero,
*(tensorDescriptor_conv_y), conv_y_local
)
);
//Dropout
if ((this->phase_ == TRAIN) && useDropout) {
Dtype* dropout_x_local = postConv_data_gpu + delayChannel*H*W;
Dtype* dropout_y_local = postDropout_data_gpu + delayChannel*H*W;
CUDNN_CHECK(cudnnDropoutForward(*(this->cudnnHandlePtr),
*(dropoutDescriptorVec[transitionIdx]),
*tensorDescriptor_conv_y, dropout_x_local,
*tensorDescriptor_conv_y, dropout_y_local,
dropout_reserve_gpu[transitionIdx], dropout_reserveSize[transitionIdx]
));
}
//this->logInternal_gpu("TClogFwd",transitionIdx,true,false);
}
//deploy top data
if ((this->phase_ == TRAIN) && useDropout) {
cudaMemcpy(top[0]->mutable_gpu_data(), postDropout_data_gpu, work_n * sizeof(Dtype), cudaMemcpyDeviceToDevice);
}
else {
cudaMemcpy(top[0]->mutable_gpu_data(), postConv_data_gpu, work_n * sizeof(Dtype), cudaMemcpyDeviceToDevice);
}
//clock_t end_fwd = std::clock();
//double elapsed_fwd = double(end_fwd - begin_fwd) / CLOCKS_PER_SEC;
//std::cout<<"elapsed fwd gpu:"<<elapsed_fwd<<std::endl;
//this->logInternal_gpu("TClogFwd",-1,false,false);
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
#if 0
if (!this->gpuInited) {
this->GPU_Initialization();
this->gpuInited = true;
}
#endif
//clock_t begin_bwd = std::clock();
//assuming buffers store already computed value, always propagate down
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int work_n = N * (initChannel + growthRate*numTransition) * H * W;
//deploy top diff
if (useDropout) {
cudaMemcpy(postDropout_grad_gpu, top[0]->mutable_gpu_diff(), work_n * sizeof(Dtype), cudaMemcpyDeviceToDevice);
}
else {
cudaMemcpy(postConv_grad_gpu, top[0]->mutable_gpu_diff(), work_n * sizeof(Dtype), cudaMemcpyDeviceToDevice);
}
//Backward, transition by transition
for (int transitionIdx = this->numTransition - 1; transitionIdx >= 0; --transitionIdx) {
int channelsBefore_self = this->initChannel + transitionIdx * this->growthRate;
//Using BN & ReLU Fwd to generate corresponding postBN,postReLU data for this transition
//BN Fwd
Dtype* BN_x_ptr;
if (useDropout) {
BN_x_ptr = postDropout_data_gpu;
}
else {
BN_x_ptr = postConv_data_gpu;
}
Dtype* BN_y_ptr = postBN_data_gpu;
Dtype* BN_globalMean = this->blobs_[3 * this->numTransition + transitionIdx]->mutable_gpu_data();
Dtype* BN_globalVar = this->blobs_[4 * this->numTransition + transitionIdx]->mutable_gpu_data();
cudnnTensorDescriptor_t* BN_paramDesc = tensorDescriptor_BN[transitionIdx];
Dtype* local_MeanInf = Mean_tmp;
Dtype* local_VarInf = Var_tmp;
Dtype* batchMean = this->ResultSaveMean_gpu[transitionIdx];
Dtype* batchInvVar = this->ResultSaveInvVariance_gpu[transitionIdx];
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
*(this->cudnnHandlePtr), CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_x_ptr,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_y_ptr,
*BN_paramDesc,
this->blobs_[this->numTransition + transitionIdx]->mutable_gpu_data(),
this->blobs_[2 * this->numTransition + transitionIdx]->mutable_gpu_data(),
Dtype(1), local_MeanInf, local_VarInf, CUDNN_BN_MIN_EPSILON,
batchMean, batchInvVar)
);
/*CUDNN_CHECK(cudnnBatchNormalizationForwardInference(
*(this->cudnnHandlePtr),CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one,cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]),BN_x_ptr,
*(this->tensorDescriptorVec_conv_x[transitionIdx]),BN_y_ptr,
*BN_paramDesc,
this->blobs_[this->numTransition+transitionIdx]->gpu_data(),
this->blobs_[2*this->numTransition+transitionIdx]->gpu_data(),
local_MeanInf,local_VarInf,CUDNN_BN_MIN_EPSILON)
);*/
//ReLU Fwd
Dtype* ReLU_x_ptr = this->postBN_data_gpu;
Dtype* ReLU_y_ptr = this->postReLU_data_gpu;
CUDNN_CHECK(cudnnActivationForward(*(this->cudnnHandlePtr), *ReLUDesc,
cudnn::dataType<Dtype>::one,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_x_ptr,
cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_y_ptr)
);
if (useBC) {
//Fwd phase
//If BC Ultra SpaceEfficient, then need convolution Fwd 1*1
//CONV_ALGO
if (BC_ultra_spaceEfficient) {
Dtype* conv_x_4G = postReLU_data_gpu;
Dtype* conv_y_4G = postConv_4G;
CUDNN_CHECK(cudnnConvolutionForward(*cudnnHandlePtr,
cudnn::dataType<Dtype>::one,
*this->tensorDescriptorVec_conv_x[transitionIdx], conv_x_4G,
*(BC_filterDescriptorVec[transitionIdx]),
this->blobs_[5 * numTransition + transitionIdx]->gpu_data(),
*(convBC_Descriptor), *BC_FwdAlgoVec[transitionIdx],
workspace, workspace_size_bytes, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, conv_y_4G
));
}
//cudnnHandle_t * localFwdHandle = BC_ultra_spaceEfficient?cudnnHandlePtr:extraHandles[0];//TODO
cudnnHandle_t * localFwdHandle = cudnnHandlePtr;
//BC BN Fwd reconstruction
Dtype* BN_x_4G = BC_ultra_spaceEfficient ? postConv_4G : postConv_4GVec[transitionIdx];
Dtype* BN_y_4G = postBN_4G;
Dtype* localBC_MeanInf = BC_MeanInfVec[transitionIdx];
Dtype* localBC_VarInf = BC_VarInfVec[transitionIdx];
Dtype* BC_batchMean = ResultSaveMean_BC[transitionIdx];
Dtype* BC_batchInvVar = ResultSaveInvVariance_BC[transitionIdx];
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
*cudnnHandlePtr, CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, BN_x_4G,
*quadG_tensorDesc, BN_y_4G,
*quadG_paramDesc,
this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_data(),
this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_data(),
Dtype(1), localBC_MeanInf, localBC_VarInf, CUDNN_BN_MIN_EPSILON,
BC_batchMean, BC_batchInvVar
));
/*CUDNN_CHECK(cudnnBatchNormalizationForwardInference(
*localFwdHandle,CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one,cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc,BN_x_4G,
*quadG_tensorDesc,BN_y_4G,
*quadG_paramDesc,
this->blobs_[6*numTransition+transitionIdx]->gpu_data(),
this->blobs_[7*numTransition+transitionIdx]->gpu_data(),
localBC_MeanInf,localBC_VarInf,CUDNN_BN_MIN_EPSILON
));*/
//BC ReLU Fwd reconstruction
Dtype* ReLU_BC_x = postBN_4G;
Dtype* ReLU_BC_y = postReLU_4G;
CUDNN_CHECK(cudnnActivationForward(*localFwdHandle, *ReLUDesc,
cudnn::dataType<Dtype>::one,
*quadG_tensorDesc, ReLU_BC_x,
cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, ReLU_BC_y
));
}
//CUDA_CHECK(cudaStreamSynchronize(*(extraStreams[0])));
//sync_streams<<<1, 1>>>();
//Now do Bwd
//Dropout
if (useDropout) {
Dtype* dropout_dy_ptr = postDropout_grad_gpu + channelsBefore_self*H*W;
Dtype* dropout_dx_ptr = postConv_grad_gpu + channelsBefore_self*H*W;
CUDNN_CHECK(cudnnDropoutBackward(*(this->cudnnHandlePtr),
*(dropoutDescriptorVec[transitionIdx]),
*tensorDescriptor_conv_y, dropout_dy_ptr,
*tensorDescriptor_conv_y, dropout_dx_ptr,
dropout_reserve_gpu[transitionIdx], dropout_reserveSize[transitionIdx]
));
}
//Conv
Dtype* filterGrad_local = this->blobs_[transitionIdx]->mutable_gpu_diff();
Dtype* filterData_local = this->blobs_[transitionIdx]->mutable_gpu_data();
Dtype* conv_x_local = useBC ? postReLU_4G : postReLU_data_gpu;
Dtype* conv_dy_local = postConv_grad_gpu + channelsBefore_self * this->H * this->W;
Dtype* conv_dx_local = useBC ? postReLU_4G_grad : postReLU_grad_gpu;
cudnnTensorDescriptor_t * conv_x_localDesc = useBC ? quadG_tensorDesc : tensorDescriptorVec_conv_x[transitionIdx];
//Conv w.r.t. filter
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionBackwardFilter(*(this->cudnnHandlePtr),
cudnn::dataType<Dtype>::one,
*conv_x_localDesc, conv_x_local,
*(this->tensorDescriptor_conv_y), conv_dy_local,
*(this->conv_Descriptor), *conv_BwdFilterAlgoVec[transitionIdx],
this->workspace, this->workspace_size_bytes,
cudnn::dataType<Dtype>::one,
*(this->filterDescriptorVec[transitionIdx]), filterGrad_local
)
);
//Conv w.r.t. x
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionBackwardData(*(this->extraHandles[0]),
cudnn::dataType<Dtype>::one,
*(this->filterDescriptorVec[transitionIdx]), filterData_local,
*(this->tensorDescriptor_conv_y), conv_dy_local,
*(this->conv_Descriptor), *conv_BwdDataAlgoVec[transitionIdx],
this->workspace2, this->workspace_size_bytes,
cudnn::dataType<Dtype>::zero,
*conv_x_localDesc, conv_dx_local
)
);
sync_streams << <1, 1 >> > ();
if (useBC) {
//BC ReLU Bwd
Dtype* BC_ReLU_y_local = postReLU_4G;
Dtype* BC_ReLU_dy_local = postReLU_4G_grad;
Dtype* BC_ReLU_x_local = postBN_4G;
Dtype* BC_ReLU_dx_local = postBN_4G_grad;
CUDNN_CHECK(cudnnActivationBackward(*cudnnHandlePtr, *ReLUDesc,
cudnn::dataType<Dtype>::one,
*quadG_tensorDesc, BC_ReLU_y_local,
*quadG_tensorDesc, BC_ReLU_dy_local,
*quadG_tensorDesc, BC_ReLU_x_local,
cudnn::dataType<Dtype>::zero,
*quadG_tensorDesc, BC_ReLU_dx_local
));
//BC BN Bwd
Dtype* BC_BN_x_local = BC_ultra_spaceEfficient ? postConv_4G : postConv_4GVec[transitionIdx];
Dtype* BC_BN_dx_local = postConv_4G_grad;
Dtype* BC_BN_dy_local = postBN_4G_grad;
Dtype* BC_saveMean_local = ResultSaveMean_BC[transitionIdx];
Dtype* BC_saveInvVar_local = ResultSaveInvVariance_BC[transitionIdx];
// CUDNN_CHECK(
cudnnStatus_t sta =
cudnnBatchNormalizationBackward(
*cudnnHandlePtr,
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one,
cudnn::dataType<Dtype>::zero,
#if CUDNN_VERSION >= 4005
cudnn::dataType<Dtype>::one,
cudnn::dataType<Dtype>::one,
#endif
*quadG_tensorDesc,
BC_BN_x_local,
*quadG_tensorDesc,
BC_BN_dy_local,
*quadG_tensorDesc,
BC_BN_dx_local,
*quadG_paramDesc,
this->blobs_[6 * numTransition + transitionIdx]->gpu_data(),
this->blobs_[6 * numTransition + transitionIdx]->mutable_gpu_diff(),
this->blobs_[7 * numTransition + transitionIdx]->mutable_gpu_diff(),
CUDNN_BN_MIN_EPSILON,
BC_saveMean_local,
BC_saveInvVar_local
);
//);
//BC Conv 1*1 Bwd
Dtype* BC_filterGrad = this->blobs_[5 * numTransition + transitionIdx]->mutable_gpu_diff();
Dtype* BC_filterData = this->blobs_[5 * numTransition + transitionIdx]->mutable_gpu_data();
Dtype* BC_conv_x_local = postReLU_data_gpu;
Dtype* BC_conv_dy_local = postConv_4G_grad;
Dtype* BC_conv_dx_local = postReLU_grad_gpu;
//Conv Bwd w.r.t. filter
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionBackwardFilter(*cudnnHandlePtr,
cudnn::dataType<Dtype>::one,
*tensorDescriptorVec_conv_x[transitionIdx], BC_conv_x_local,
*quadG_tensorDesc, BC_conv_dy_local,
*convBC_Descriptor, *BC_BwdFilterAlgoVec[transitionIdx],
workspace, workspace_size_bytes,
cudnn::dataType<Dtype>::one,
*BC_filterDescriptorVec[transitionIdx], BC_filterGrad
));
//Conv Bwd w.r.t. data
//CONV_ALGO
CUDNN_CHECK(cudnnConvolutionBackwardData(*(extraHandles[0]),
cudnn::dataType<Dtype>::one,
*BC_filterDescriptorVec[transitionIdx], BC_filterData,
*quadG_tensorDesc, BC_conv_dy_local,
*convBC_Descriptor, *BC_BwdDataAlgoVec[transitionIdx],
workspace2, workspace_size_bytes,
cudnn::dataType<Dtype>::zero,
*tensorDescriptorVec_conv_x[transitionIdx], BC_conv_dx_local
));
sync_streams << <1, 1 >> > ();
}
//ReLU Bwd
Dtype* ReLU_y_local = postReLU_data_gpu;
Dtype* ReLU_x_local = postBN_data_gpu;
Dtype* ReLU_dy_local = postReLU_grad_gpu;
Dtype* ReLU_dx_local = postBN_grad_gpu;
CUDNN_CHECK(cudnnActivationBackward(*(this->cudnnHandlePtr), *ReLUDesc,
cudnn::dataType<Dtype>::one,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_y_local,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_dy_local,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_x_local,
cudnn::dataType<Dtype>::zero,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), ReLU_dx_local)
);
//BN Bwd
Dtype* BN_x_local;
Dtype* BN_dx_local;
if (useDropout) {
BN_x_local = this->postDropout_data_gpu;
BN_dx_local = this->postDropout_grad_gpu;
}
else {
BN_x_local = this->postConv_data_gpu;
BN_dx_local = this->postConv_grad_gpu;
}
Dtype* BN_dy_local = this->postBN_grad_gpu;
Dtype* saveMean_local = this->ResultSaveMean_gpu[transitionIdx];
Dtype* saveInvVar_local = this->ResultSaveInvVariance_gpu[transitionIdx];
//CUDNN_CHECK(
cudnnBatchNormalizationBackward(*(this->cudnnHandlePtr),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::one,
#if CUDNN_VERSION >= 4005
cudnn::dataType<Dtype>::one, cudnn::dataType<Dtype>::one,
#endif
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_x_local,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_dy_local,
*(this->tensorDescriptorVec_conv_x[transitionIdx]), BN_dx_local,
*BN_paramDesc,
this->blobs_[this->numTransition + transitionIdx]->gpu_data(),
this->blobs_[this->numTransition + transitionIdx]->mutable_gpu_diff(),
this->blobs_[2 * this->numTransition + transitionIdx]->mutable_gpu_diff(),
CUDNN_BN_MIN_EPSILON, saveMean_local, saveInvVar_local
);
//);
//this->logInternal_gpu("TClogBwd",transitionIdx,true,false);
//this->logInternal_gpu("TClogBwd",transitionIdx,true,true);
}
//deploy buffer to bottom diff
//this->logInternal_gpu("TClogBwd",-1,false,false);
int chunkSize_copy_init = this->initChannel * this->H * this->W;
int chunkStride_copy = (this->initChannel + this->numTransition * this->growthRate) * this->H * this->W;
if (useDropout) {
gpu_copy_many_to_one(postDropout_grad_gpu, bottom_diff, this->N, chunkSize_copy_init, chunkStride_copy);
//this->resetDropoutDesc();
}
else {
gpu_copy_many_to_one(postConv_grad_gpu, bottom_diff, this->N, chunkSize_copy_init, chunkStride_copy);
}
int numTotalChannels = initChannel + growthRate*numTransition;
cleanupBuffer(this->Mean_tmp, numTotalChannels);
cleanupBuffer(this->Var_tmp, numTotalChannels);
this->LoopEndCleanup_gpu();
//clock_t end_bwd = std::clock();
//double elapsed_bwd = double(end_bwd - begin_bwd) / CLOCKS_PER_SEC;
//std::cout<<"elapsed bwd time:"<<elapsed_bwd<<std::endl;
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::Forward_gpu_public(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
this->Forward_gpu(bottom, top);
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::Backward_gpu_public(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
this->Backward_gpu(top, propagate_down, bottom);
}
template <typename Dtype>
void ReallocCudaMem(Dtype** p, int size)
{
cudaFree(*p); *p = 0;
CUDA_CHECK(cudaMalloc(p, size));
cudaMemset(*p, 0, size);
}
template <typename Dtype>
void DenseBlockLayer<Dtype>::reshape_gpu_data(int oldh, int oldw,int oldn, int h, int w,int newn)
{
int bufferSize_byte_old = oldn*(this->initChannel + this->growthRate*this->numTransition)*oldh*oldw * sizeof(Dtype);
int bufferSize_byte_new = newn*(this->initChannel + this->growthRate*this->numTransition)*h*w * sizeof(Dtype);
if (bufferSize_byte_new > bufferSize_byte_old)
{
int bufferSize_byte = bufferSize_byte_new;
ReallocCudaMem(&this->postConv_data_gpu, bufferSize_byte);
if (useDropout) {
ReallocCudaMem(&this->postDropout_data_gpu, bufferSize_byte);
}
ReallocCudaMem(&this->postBN_data_gpu, bufferSize_byte);
ReallocCudaMem(&this->postReLU_data_gpu, bufferSize_byte);
ReallocCudaMem(&this->postConv_grad_gpu, bufferSize_byte);
if (useDropout)
{
ReallocCudaMem(&this->postDropout_grad_gpu, bufferSize_byte);
}
ReallocCudaMem(&this->postBN_grad_gpu, bufferSize_byte);
ReallocCudaMem(&this->postReLU_grad_gpu, bufferSize_byte);
}
cudnn::setTensor4dDesc<Dtype>(this->tensorDescriptor_conv_y, newn, this->growthRate, h, w, (this->numTransition*this->growthRate + this->initChannel)*h*w, h*w, w, 1);
int quadG_numValues_old = 4 * newn*growthRate*oldh*oldw;
int quadG_numValues = 4 * newn*growthRate*h*w;
int quadG_numBytes = quadG_numValues * sizeof(Dtype);
if (quadG_numValues > quadG_numValues_old)
{
if (useBC)
{
ReallocCudaMem(&postBN_4G, quadG_numBytes);
ReallocCudaMem(&postBN_4G_grad, quadG_numBytes);
ReallocCudaMem(&postReLU_4G, quadG_numBytes);
ReallocCudaMem(&postReLU_4G_grad, quadG_numBytes);
ReallocCudaMem(&postConv_4G_grad, quadG_numBytes);
if (BC_ultra_spaceEfficient) {
ReallocCudaMem(&postConv_4G, quadG_numBytes);
}
}
}
if (useBC)
{
cudnn::setTensor4dDesc<Dtype>(quadG_tensorDesc, newn, 4 * growthRate, h, w, 4 * growthRate*h*w, h*w, w, 1);
}
for (int i = 0; i < this->numTransition; ++i)
{
int conv_x_channels = this->initChannel + this->growthRate * i;
cudnn::setTensor4dDesc<Dtype>(this->tensorDescriptorVec_conv_x[i], newn, conv_x_channels, h, w, (this->numTransition*this->growthRate + this->initChannel)*h*w, h*w, w, 1);
}
}
template void DenseBlockLayer<float>::reshape_gpu_data(int oldh, int oldw, int oldn, int h, int w, int newn);
template void DenseBlockLayer<double>::reshape_gpu_data(int oldh, int oldw, int oldn, int h, int w, int newn);
template void DenseBlockLayer<float>::GPU_Initialization();
template void DenseBlockLayer<double>::GPU_Initialization();
INSTANTIATE_LAYER_GPU_FUNCS(DenseBlockLayer);
} // namespace caffe | the_stack |
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/unpooling.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/cuda/utils/atomic_add.cuh>
#include <nbla/cuda/utils/nd_index.cuh>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T, bool channel_last = false>
__global__ void
kernel_unpooling_forward_1d(const int osize, T *dst, const T *src,
int outer_size, const int iinner_size,
const int oinner_size, const int istride,
const int ostride, const int kernel) {
NBLA_CUDA_KERNEL_LOOP(oidx, osize) {
auto ond_index = device_flat_to_2d(oidx, ostride);
auto oc = channel_last ? ond_index.y : 0;
auto ow = ond_index.x;
auto iw = ow / kernel;
auto ind_index = make_int2(iw, oc);
auto iidx = device_2d_to_flat(ind_index, istride);
do {
dst[oidx] = src[iidx];
src += iinner_size;
dst += oinner_size;
} while (--outer_size);
}
}
template <typename T, bool channel_last = false>
__global__ void
kernel_unpooling_forward_2d(const int osize, T *dst, const T *src,
int outer_size, const int iinner_size,
const int oinner_size, const int2 istride,
const int2 ostride, const int2 kernel) {
NBLA_CUDA_KERNEL_LOOP(oidx, osize) {
auto ond_index = device_flat_to_3d(oidx, ostride);
auto oc = channel_last ? ond_index.z : 0;
auto oh = ond_index.x;
auto ow = ond_index.y;
auto ih = oh / kernel.x;
auto iw = ow / kernel.y;
auto ind_index = make_int3(ih, iw, oc);
auto iidx = device_3d_to_flat(ind_index, istride);
do {
dst[oidx] = src[iidx];
src += iinner_size;
dst += oinner_size;
} while (--outer_size);
}
}
template <typename T, bool channel_last = false>
__global__ void
kernel_unpooling_forward_3d(const int osize, T *dst, const T *src,
int outer_size, const int iinner_size,
const int oinner_size, const int3 istride,
const int3 ostride, const int3 kernel) {
NBLA_CUDA_KERNEL_LOOP(oidx, osize) {
auto ond_index = device_flat_to_4d(oidx, ostride);
auto oc = channel_last ? ond_index.w : 0;
auto od = ond_index.x;
auto oh = ond_index.y;
auto ow = ond_index.z;
auto id = od / kernel.x;
auto ih = oh / kernel.y;
auto iw = ow / kernel.z;
auto ind_index = make_int4(id, ih, iw, oc);
auto iidx = device_4d_to_flat(ind_index, istride);
do {
dst[oidx] = src[iidx];
src += iinner_size;
dst += oinner_size;
} while (--outer_size);
}
}
template <typename T, bool channel_last = false>
__global__ void
kernel_unpooling_backward_1d(const int osize, T *dst, const T *src,
int outer_size, const int iinner_size,
const int oinner_size, const int istride,
const int ostride, const int kernel) {
NBLA_CUDA_KERNEL_LOOP(oidx, osize) {
auto ond_index = device_flat_to_2d(oidx, ostride);
auto oc = channel_last ? ond_index.y : 0;
auto ow = ond_index.x;
auto iw = ow / kernel;
auto ind_index = make_int2(iw, oc);
auto iidx = device_2d_to_flat(ind_index, istride);
do {
atomic_add(dst + iidx, src[oidx]);
src += oinner_size;
dst += iinner_size;
} while (--outer_size);
}
}
template <typename T, bool channel_last = false>
__global__ void
kernel_unpooling_backward_2d(const int osize, T *dst, const T *src,
int outer_size, const int iinner_size,
const int oinner_size, const int2 istride,
const int2 ostride, const int2 kernel) {
NBLA_CUDA_KERNEL_LOOP(oidx, osize) {
auto ond_index = device_flat_to_3d(oidx, ostride);
auto oc = channel_last ? ond_index.z : 0;
auto oh = ond_index.x;
auto ow = ond_index.y;
auto ih = oh / kernel.x;
auto iw = ow / kernel.y;
auto ind_index = make_int3(ih, iw, oc);
auto iidx = device_3d_to_flat(ind_index, istride);
do {
atomic_add(dst + iidx, src[oidx]);
src += oinner_size;
dst += iinner_size;
} while (--outer_size);
}
}
template <typename T, bool channel_last = false>
__global__ void
kernel_unpooling_backward_3d(const int osize, T *dst, const T *src,
int outer_size, const int iinner_size,
const int oinner_size, const int3 istride,
const int3 ostride, const int3 kernel) {
NBLA_CUDA_KERNEL_LOOP(oidx, osize) {
auto ond_index = device_flat_to_4d(oidx, ostride);
auto oc = channel_last ? ond_index.w : 0;
auto od = ond_index.x;
auto oh = ond_index.y;
auto ow = ond_index.z;
auto id = od / kernel.x;
auto ih = oh / kernel.y;
auto iw = ow / kernel.z;
auto ind_index = make_int4(id, ih, iw, oc);
auto iidx = device_4d_to_flat(ind_index, istride);
do {
atomic_add(dst + iidx, src[oidx]);
src += oinner_size;
dst += iinner_size;
} while (--outer_size);
}
}
template <typename T>
void UnpoolingCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
auto x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
auto y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true);
auto size = outputs[0]->size();
auto ndim = inputs[0]->ndim();
auto kdim = this->kernel_.size();
auto ishape = inputs[0]->shape();
auto oshape = outputs[0]->shape();
if (kdim == 1) {
auto oc = this->channel_last_ ? oshape[ndim - 1] : oshape[ndim - 2];
auto ow = this->channel_last_ ? oshape[ndim - 2] : oshape[ndim - 1];
auto ic = this->channel_last_ ? ishape[ndim - 1] : ishape[ndim - 2];
auto iw = this->channel_last_ ? ishape[ndim - 2] : ishape[ndim - 1];
auto osize = this->channel_last_ ? oc * ow : ow;
auto outer_size = size / osize;
auto iinner_size = this->channel_last_ ? ic * iw : iw;
auto oinner_size = osize;
auto istride = this->channel_last_ ? (ic) : 1;
auto ostride = this->channel_last_ ? (oc) : 1;
auto kernel = this->kernel_[0];
auto cuda_kernel = this->channel_last_
? kernel_unpooling_forward_1d<Tc, true>
: kernel_unpooling_forward_1d<Tc, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(cuda_kernel, osize, y, x, outer_size,
iinner_size, oinner_size, istride, ostride,
kernel);
} else if (kdim == 2) {
auto oc = this->channel_last_ ? oshape[ndim - 1] : oshape[ndim - 3];
auto oh = this->channel_last_ ? oshape[ndim - 3] : oshape[ndim - 2];
auto ow = this->channel_last_ ? oshape[ndim - 2] : oshape[ndim - 1];
auto ic = this->channel_last_ ? ishape[ndim - 1] : ishape[ndim - 3];
auto ih = this->channel_last_ ? ishape[ndim - 3] : ishape[ndim - 2];
auto iw = this->channel_last_ ? ishape[ndim - 2] : ishape[ndim - 1];
auto osize = this->channel_last_ ? oc * oh * ow : oh * ow;
auto outer_size = size / osize;
auto iinner_size = this->channel_last_ ? ic * ih * iw : ih * iw;
auto oinner_size = osize;
auto istride =
this->channel_last_ ? make_int2(iw * ic, ic) : make_int2(iw, 1);
auto ostride =
this->channel_last_ ? make_int2(ow * oc, oc) : make_int2(ow, 1);
auto kernel = make_int2(this->kernel_[0], this->kernel_[1]);
auto cuda_kernel = this->channel_last_
? kernel_unpooling_forward_2d<Tc, true>
: kernel_unpooling_forward_2d<Tc, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(cuda_kernel, osize, y, x, outer_size,
iinner_size, oinner_size, istride, ostride,
kernel);
} else if (kdim == 3) {
auto oc = this->channel_last_ ? oshape[ndim - 1] : oshape[ndim - 4];
auto od = this->channel_last_ ? oshape[ndim - 4] : oshape[ndim - 3];
auto oh = this->channel_last_ ? oshape[ndim - 3] : oshape[ndim - 2];
auto ow = this->channel_last_ ? oshape[ndim - 2] : oshape[ndim - 1];
auto ic = this->channel_last_ ? ishape[ndim - 1] : ishape[ndim - 4];
auto id = this->channel_last_ ? ishape[ndim - 4] : ishape[ndim - 3];
auto ih = this->channel_last_ ? ishape[ndim - 3] : ishape[ndim - 2];
auto iw = this->channel_last_ ? ishape[ndim - 2] : ishape[ndim - 1];
auto osize = this->channel_last_ ? oc * od * oh * ow : od * oh * ow;
auto outer_size = size / osize;
auto iinner_size = this->channel_last_ ? ic * id * ih * iw : id * ih * iw;
auto oinner_size = osize;
auto istride = this->channel_last_ ? make_int3(ih * iw * ic, iw * ic, ic)
: make_int3(ih * iw, iw, 1);
auto ostride = this->channel_last_ ? make_int3(oh * ow * oc, ow * oc, oc)
: make_int3(oh * ow, ow, 1);
auto kernel =
make_int3(this->kernel_[0], this->kernel_[1], this->kernel_[2]);
auto cuda_kernel = this->channel_last_
? kernel_unpooling_forward_3d<Tc, true>
: kernel_unpooling_forward_3d<Tc, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(cuda_kernel, osize, y, x, outer_size,
iinner_size, oinner_size, istride, ostride,
kernel);
} else {
NBLA_ERROR(error_code::value, "1D, 2D, 3D unpooling are supported.");
}
}
template <typename T>
void UnpoolingCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0]) {
return;
}
cuda_set_device(this->device_);
auto dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, false);
auto dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_);
auto size = outputs[0]->size();
auto ndim = inputs[0]->ndim();
auto kdim = this->kernel_.size();
auto ishape = inputs[0]->shape();
auto oshape = outputs[0]->shape();
if (kdim == 1) {
auto oc = this->channel_last_ ? oshape[ndim - 1] : oshape[ndim - 2];
auto ow = this->channel_last_ ? oshape[ndim - 2] : oshape[ndim - 1];
auto ic = this->channel_last_ ? ishape[ndim - 1] : ishape[ndim - 2];
auto iw = this->channel_last_ ? ishape[ndim - 2] : ishape[ndim - 1];
auto osize = this->channel_last_ ? oc * ow : ow;
auto outer_size = size / osize;
auto iinner_size = this->channel_last_ ? ic * iw : iw;
auto oinner_size = osize;
auto istride = this->channel_last_ ? (ic) : 1;
auto ostride = this->channel_last_ ? (oc) : 1;
auto kernel = this->kernel_[0];
auto cuda_kernel = this->channel_last_
? kernel_unpooling_backward_1d<Tc, true>
: kernel_unpooling_backward_1d<Tc, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(cuda_kernel, osize, dx, dy, outer_size,
iinner_size, oinner_size, istride, ostride,
kernel);
} else if (kdim == 2) {
auto oc = this->channel_last_ ? oshape[ndim - 1] : oshape[ndim - 3];
auto oh = this->channel_last_ ? oshape[ndim - 3] : oshape[ndim - 2];
auto ow = this->channel_last_ ? oshape[ndim - 2] : oshape[ndim - 1];
auto ic = this->channel_last_ ? ishape[ndim - 1] : ishape[ndim - 3];
auto ih = this->channel_last_ ? ishape[ndim - 3] : ishape[ndim - 2];
auto iw = this->channel_last_ ? ishape[ndim - 2] : ishape[ndim - 1];
auto osize = this->channel_last_ ? oc * oh * ow : oh * ow;
auto outer_size = size / osize;
auto iinner_size = this->channel_last_ ? ic * ih * iw : ih * iw;
auto oinner_size = osize;
auto istride =
this->channel_last_ ? make_int2(iw * ic, ic) : make_int2(iw, 1);
auto ostride =
this->channel_last_ ? make_int2(ow * oc, oc) : make_int2(ow, 1);
auto kernel = make_int2(this->kernel_[0], this->kernel_[1]);
auto cuda_kernel = this->channel_last_
? kernel_unpooling_backward_2d<Tc, true>
: kernel_unpooling_backward_2d<Tc, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(cuda_kernel, osize, dx, dy, outer_size,
iinner_size, oinner_size, istride, ostride,
kernel);
} else if (kdim == 3) {
auto oc = this->channel_last_ ? oshape[ndim - 1] : oshape[ndim - 4];
auto od = this->channel_last_ ? oshape[ndim - 4] : oshape[ndim - 3];
auto oh = this->channel_last_ ? oshape[ndim - 3] : oshape[ndim - 2];
auto ow = this->channel_last_ ? oshape[ndim - 2] : oshape[ndim - 1];
auto ic = this->channel_last_ ? ishape[ndim - 1] : ishape[ndim - 4];
auto id = this->channel_last_ ? ishape[ndim - 4] : ishape[ndim - 3];
auto ih = this->channel_last_ ? ishape[ndim - 3] : ishape[ndim - 2];
auto iw = this->channel_last_ ? ishape[ndim - 2] : ishape[ndim - 1];
auto osize = this->channel_last_ ? oc * od * oh * ow : od * oh * ow;
auto outer_size = size / osize;
auto iinner_size = this->channel_last_ ? ic * id * ih * iw : id * ih * iw;
auto oinner_size = osize;
auto istride = this->channel_last_ ? make_int3(ih * iw * ic, iw * ic, ic)
: make_int3(ih * iw, iw, 1);
auto ostride = this->channel_last_ ? make_int3(oh * ow * oc, ow * oc, oc)
: make_int3(oh * ow, ow, 1);
auto kernel =
make_int3(this->kernel_[0], this->kernel_[1], this->kernel_[2]);
auto cuda_kernel = this->channel_last_
? kernel_unpooling_backward_3d<Tc, true>
: kernel_unpooling_backward_3d<Tc, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(cuda_kernel, osize, dx, dy, outer_size,
iinner_size, oinner_size, istride, ostride,
kernel);
} else {
NBLA_ERROR(error_code::value, "Only 1D, 2D, 3D unpooling are supported.");
}
}
} | the_stack |
#include <indexing/construction/detail/utilities.cuh>
#include <utility/point_to_nearest_polyline.cuh>
#include <cuspatial/error.hpp>
#include <cuspatial/spatial_join.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/fill.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <limits>
#include <memory>
namespace cuspatial {
namespace detail {
namespace {
template <typename QuadOffsetsIter>
inline __device__ std::pair<uint32_t, uint32_t> get_local_poly_index_and_count(
uint32_t const poly_index, QuadOffsetsIter quad_offsets, QuadOffsetsIter quad_offsets_end)
{
auto const lhs_end = quad_offsets;
auto const rhs_end = quad_offsets_end;
auto const quad_offset = quad_offsets[poly_index];
auto const lhs =
thrust::lower_bound(thrust::seq, lhs_end, quad_offsets + poly_index, quad_offset);
auto const rhs =
thrust::upper_bound(thrust::seq, quad_offsets + poly_index, rhs_end, quad_offset);
return std::make_pair(
// local_poly_index
static_cast<uint32_t>(thrust::distance(lhs, quad_offsets + poly_index)),
// num_polys_in_quad
static_cast<uint32_t>(thrust::distance(lhs, rhs)));
}
template <typename QuadOffsetsIter, typename QuadLengthsIter>
inline __device__ std::pair<uint32_t, uint32_t> get_transposed_point_and_pair_index(
uint32_t const global_index,
uint32_t const* point_offsets,
uint32_t const* point_offsets_end,
QuadOffsetsIter quad_offsets,
QuadOffsetsIter quad_offsets_end,
QuadLengthsIter quad_lengths)
{
// uint32_t quad_poly_index, local_point_index;
auto const [quad_poly_index, local_point_index] =
get_quad_poly_and_local_point_indices(global_index, point_offsets, point_offsets_end);
// uint32_t local_poly_index, num_polys_in_quad;
auto const [local_poly_index, num_polys_in_quad] =
get_local_poly_index_and_count(quad_poly_index, quad_offsets, quad_offsets_end);
auto const quad_point_offset = quad_offsets[quad_poly_index];
auto const num_points_in_quad = quad_lengths[quad_poly_index];
auto const quad_poly_offset = quad_poly_index - local_poly_index;
auto const quad_poly_point_start = local_poly_index * num_points_in_quad;
auto const transposed_point_start = quad_poly_point_start + local_point_index;
return std::make_pair(
// transposed point index
(transposed_point_start / num_polys_in_quad) + quad_point_offset,
// transposed polyline index
(transposed_point_start % num_polys_in_quad) + quad_poly_offset);
}
template <typename T, typename PointIter, typename QuadOffsetsIter, typename QuadLengthsIter>
struct compute_point_poly_indices_and_distances {
PointIter points;
uint32_t const* point_offsets;
uint32_t const* point_offsets_end;
QuadOffsetsIter quad_offsets;
QuadOffsetsIter quad_offsets_end;
QuadLengthsIter quad_lengths;
uint32_t const* poly_indices;
cudf::column_device_view const poly_offsets;
cudf::column_device_view const poly_points_x;
cudf::column_device_view const poly_points_y;
inline __device__ thrust::tuple<uint32_t, uint32_t, T> operator()(uint32_t const global_index)
{
auto const [point_id, poly_id] = get_transposed_point_and_pair_index(
global_index, point_offsets, point_offsets_end, quad_offsets, quad_offsets_end, quad_lengths);
T x{}, y{};
thrust::tie(x, y) = points[point_id];
auto const poly_idx = poly_indices[poly_id];
auto const distance =
point_to_poly_line_distance<T>(x, y, poly_idx, poly_offsets, poly_points_x, poly_points_y);
return thrust::make_tuple(point_id, poly_idx, distance);
}
};
struct compute_quadtree_point_to_nearest_polyline {
template <typename T, typename... Args>
std::enable_if_t<!std::is_floating_point<T>::value, std::unique_ptr<cudf::table>> operator()(
Args&&...)
{
CUDF_FAIL("Non-floating point operation is not supported");
}
template <typename T>
std::enable_if_t<std::is_floating_point<T>::value, std::unique_ptr<cudf::table>> operator()(
cudf::table_view const& poly_quad_pairs,
cudf::table_view const& quadtree,
cudf::column_view const& point_indices,
cudf::column_view const& point_x,
cudf::column_view const& point_y,
cudf::column_view const& poly_offsets,
cudf::column_view const& poly_points_x,
cudf::column_view const& poly_points_y,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Wrapped in an IIFE so `local_point_offsets` is freed on return
auto const [point_idxs, poly_idxs, distances, num_distances] = [&]() {
auto num_poly_quad_pairs = poly_quad_pairs.num_rows();
auto poly_indices = poly_quad_pairs.column(0).begin<uint32_t>();
auto quad_lengths = thrust::make_permutation_iterator(
quadtree.column(3).begin<uint32_t>(), poly_quad_pairs.column(1).begin<uint32_t>());
auto quad_offsets = thrust::make_permutation_iterator(
quadtree.column(4).begin<uint32_t>(), poly_quad_pairs.column(1).begin<uint32_t>());
// Compute a "local" set of zero-based point offsets from number of points in each quadrant
// Use `num_poly_quad_pairs + 1` as the length so that the last element produced by
// `inclusive_scan` is the total number of points to be tested against any polyline.
rmm::device_uvector<uint32_t> local_point_offsets(num_poly_quad_pairs + 1, stream);
thrust::inclusive_scan(rmm::exec_policy(stream),
quad_lengths,
quad_lengths + num_poly_quad_pairs,
local_point_offsets.begin() + 1);
// Ensure local point offsets starts at 0
uint32_t init{0};
local_point_offsets.set_element_async(0, init, stream);
// The last element is the total number of points to test against any polyline.
auto num_point_poly_pairs = local_point_offsets.back_element(stream);
// Enumerate the point X/Ys using the sorted `point_indices` (from quadtree construction)
auto point_xys_iter = thrust::make_permutation_iterator(
thrust::make_zip_iterator(point_x.begin<T>(), point_y.begin<T>()),
point_indices.begin<uint32_t>());
//
// Compute the combination of point and polyline index pairs. For each polyline/quadrant pair,
// enumerate pairs of (point_index, polyline_index) for each point in each quadrant, and
// calculate the minimum distance between each point/poly pair.
//
// In Python pseudocode:
// ```
// pp_pairs_and_dist = []
// for polyline, quadrant in pq_pairs:
// for point in quadrant:
// pp_pairs_and_dist.append((point, polyline, min_distance(point, polyline)))
// ```
//
// However, the above psuedocode produces values in an order such that the distance
// from a point to each polyline cannot be reduced with `thrust::reduce_by_key`:
// ```
// point | polyline | distance
// 0 | 0 | 10.0
// 1 | 0 | 30.0
// 2 | 0 | 20.0
// 0 | 1 | 30.0
// 1 | 1 | 20.0
// 2 | 1 | 10.0
// ```
//
// In order to use `thrust::reduce_by_key` to compute the minimum distance from a point to
// the polylines in its quadrant, the above table needs to be sorted by `point` instead of
// `polyline`:
// ```
// point | polyline | distance
// 0 | 0 | 10.0
// 0 | 1 | 30.0
// 1 | 0 | 30.0
// 1 | 1 | 20.0
// 2 | 0 | 20.0
// 2 | 1 | 10.0
// ```
//
// A naive approach would be to allocate memory for the above three columns, sort the
// columns by `point`, then use `thrust::reduce_by_key` to compute the min distances.
//
// The sizes of the intermediate buffers required can easily grow beyond available
// device memory, so a better approach is to use a Thrust iterator to yield values
// in the sorted order on demand instead, which is what we're doing here.
//
auto all_point_poly_indices_and_distances = thrust::make_transform_iterator(
thrust::make_counting_iterator(0u),
compute_point_poly_indices_and_distances<T,
decltype(point_xys_iter),
decltype(quad_offsets),
decltype(quad_lengths)>{
point_xys_iter,
local_point_offsets.begin(),
local_point_offsets.end(),
quad_offsets,
quad_offsets + num_poly_quad_pairs,
quad_lengths,
poly_indices,
*cudf::column_device_view::create(poly_offsets, stream),
*cudf::column_device_view::create(poly_points_x, stream),
*cudf::column_device_view::create(poly_points_y, stream)});
auto all_point_indices =
thrust::make_transform_iterator(all_point_poly_indices_and_distances,
[] __device__(auto const& x) { return thrust::get<0>(x); });
// Allocate vectors for the distances min reduction
rmm::device_uvector<uint32_t> point_idxs(point_x.size(), stream);
rmm::device_uvector<uint32_t> poly_idxs(point_x.size(), stream);
rmm::device_uvector<T> distances(point_x.size(), stream);
// Fill distances with 0
CUDA_TRY(cudaMemsetAsync(distances.data(), 0, distances.size() * sizeof(T), stream.value()));
// Reduce the intermediate point/polyline indices to lists of point/polyline index pairs and
// distances, selecting the polyline index closest to each point.
auto const num_distances =
thrust::distance(point_idxs.begin(),
thrust::reduce_by_key(
rmm::exec_policy(stream),
// point indices in
all_point_indices,
all_point_indices + num_point_poly_pairs,
all_point_poly_indices_and_distances,
// point indices out
point_idxs.begin(),
// point/polyline indices and distances out
thrust::make_zip_iterator(
thrust::make_discard_iterator(), poly_idxs.begin(), distances.begin()),
// comparator
thrust::equal_to<uint32_t>(),
// binop to select the point/polyline pair with the smallest distance
[] __device__(auto const& lhs, auto const& rhs) {
T const& d_lhs = thrust::get<2>(lhs);
T const& d_rhs = thrust::get<2>(rhs);
// If lhs distance is 0, choose rhs
if (d_lhs == T{0}) { return rhs; }
// if rhs distance is 0, choose lhs
if (d_rhs == T{0}) { return lhs; }
// If distances to lhs/rhs are the same, choose poly with smallest id
if (d_lhs == d_rhs) {
auto const& i_lhs = thrust::get<1>(lhs);
auto const& i_rhs = thrust::get<1>(rhs);
return i_lhs < i_rhs ? lhs : rhs;
}
// Otherwise choose poly with smallest distance
return d_lhs < d_rhs ? lhs : rhs;
})
.first);
return std::make_tuple(
std::move(point_idxs), std::move(poly_idxs), std::move(distances), num_distances);
}();
// Allocate output columns for the point and polyline index pairs and their distances
auto point_index_col = make_fixed_width_column<uint32_t>(point_x.size(), stream, mr);
auto poly_index_col = make_fixed_width_column<uint32_t>(point_x.size(), stream, mr);
auto distance_col = make_fixed_width_column<T>(point_x.size(), stream, mr);
// Note: no need to resize `point_idxs`, `poly_idxs`, or `distances` if we set the end iterator
// to `point_poly_idxs_and_distances + num_distances`.
auto point_poly_idxs_and_distances =
thrust::make_zip_iterator(point_idxs.begin(), poly_idxs.begin(), distances.begin());
// scatter the values from their positions after reduction into their output positions
thrust::scatter(rmm::exec_policy(stream),
point_poly_idxs_and_distances,
point_poly_idxs_and_distances + num_distances,
point_idxs.begin(),
thrust::make_zip_iterator(point_index_col->mutable_view().begin<uint32_t>(),
poly_index_col->mutable_view().begin<uint32_t>(),
distance_col->mutable_view().template begin<T>()));
std::vector<std::unique_ptr<cudf::column>> cols{};
cols.reserve(3);
cols.push_back(std::move(point_index_col));
cols.push_back(std::move(poly_index_col));
cols.push_back(std::move(distance_col));
return std::make_unique<cudf::table>(std::move(cols));
}
};
} // namespace
std::unique_ptr<cudf::table> quadtree_point_to_nearest_polyline(
cudf::table_view const& poly_quad_pairs,
cudf::table_view const& quadtree,
cudf::column_view const& point_indices,
cudf::column_view const& point_x,
cudf::column_view const& point_y,
cudf::column_view const& poly_offsets,
cudf::column_view const& poly_points_x,
cudf::column_view const& poly_points_y,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return cudf::type_dispatcher(point_x.type(),
compute_quadtree_point_to_nearest_polyline{},
poly_quad_pairs,
quadtree,
point_indices,
point_x,
point_y,
poly_offsets,
poly_points_x,
poly_points_y,
stream,
mr);
}
} // namespace detail
std::unique_ptr<cudf::table> quadtree_point_to_nearest_polyline(
cudf::table_view const& poly_quad_pairs,
cudf::table_view const& quadtree,
cudf::column_view const& point_indices,
cudf::column_view const& point_x,
cudf::column_view const& point_y,
cudf::column_view const& poly_offsets,
cudf::column_view const& poly_points_x,
cudf::column_view const& poly_points_y,
rmm::mr::device_memory_resource* mr)
{
CUSPATIAL_EXPECTS(poly_quad_pairs.num_columns() == 2,
"a quadrant-polyline table must have 2 columns");
CUSPATIAL_EXPECTS(quadtree.num_columns() == 5, "a quadtree table must have 5 columns");
CUSPATIAL_EXPECTS(point_indices.size() == point_x.size() && point_x.size() == point_y.size(),
"number of points must be the same for both x and y columns");
CUSPATIAL_EXPECTS(poly_points_x.size() == poly_points_y.size(),
"numbers of vertices must be the same for both x and y columns");
CUSPATIAL_EXPECTS(poly_points_x.size() >= 2 * poly_offsets.size(),
"all polylines must have at least two vertices");
CUSPATIAL_EXPECTS(poly_points_x.type() == poly_points_y.type(),
"polyline columns must have the same data type");
CUSPATIAL_EXPECTS(point_x.type() == point_y.type(), "point columns must have the same data type");
CUSPATIAL_EXPECTS(point_x.type() == poly_points_x.type(),
"points and polylines must have the same data type");
if (poly_quad_pairs.num_rows() == 0 || quadtree.num_rows() == 0 || point_indices.size() == 0 ||
poly_offsets.size() == 0) {
std::vector<std::unique_ptr<cudf::column>> cols{};
cols.reserve(3);
cols.push_back(cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32}));
cols.push_back(cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32}));
cols.push_back(cudf::make_empty_column(point_x.type()));
return std::make_unique<cudf::table>(std::move(cols));
}
return detail::quadtree_point_to_nearest_polyline(poly_quad_pairs,
quadtree,
point_indices,
point_x,
point_y,
poly_offsets,
poly_points_x,
poly_points_y,
rmm::cuda_stream_default,
mr);
}
} // namespace cuspatial | the_stack |
#define NVBIO_CUDA_DEBUG
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <vector>
#include <algorithm>
#include <crc/crc.h>
#include <nvbio/basic/console.h>
#include <nvbio/basic/exceptions.h>
#include <nvbio/basic/bnt.h>
#include <nvbio/basic/numbers.h>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/packedstream.h>
#include <nvbio/basic/thrust_view.h>
#include <nvbio/basic/dna.h>
#include <nvbio/basic/exceptions.h>
#include <nvbio/basic/cuda/arch.h>
#include <nvbio/fmindex/bwt.h>
#include <nvbio/fasta/fasta.h>
#include <nvbio/io/fmindex/fmindex.h>
#include <nvbio/sufsort/sufsort.h>
#include "filelist.h"
// PAC File Type
enum PacType { BPAC = 0, WPAC = 1 };
using namespace nvbio;
unsigned char nst_nt4_table[256] = {
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5 /*'-'*/, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
};
#define RAND 0
#define RAND48 1
#if (GENERATOR == RAND) || ((GENERATOR == RAND48) && defined(WIN32))
// generate random base pairs using rand()
inline void srand_bp(const unsigned int s) { srand(s); }
inline float frand() { return float(rand()) / float(RAND_MAX); }
inline uint8 rand_bp() { return uint8( frand() * 4 ) & 3; }
#elif (GENERATOR == RAND48)
// generate random base pairs using rand48()
inline void srand_bp(const unsigned int s) { srand48(s); }
inline uint8 rand_bp() { return uint8( drand48() * 4 ) & 3; }
#endif
struct Counter
{
Counter() : m_size(0), m_reads(0) {}
void begin_read() { m_reads++; }
void end_read() {}
void id(const uint8 c) {}
void read(const uint8 c) { m_size++; }
uint64 m_size;
uint32 m_reads;
};
template <typename stream_type>
struct Writer
{
Writer(stream_type stream, const uint32 reads, const uint64 max_size) :
m_max_size(max_size), m_size(0), m_stream( stream )
{
m_bntseq.seed = 11;
m_bntseq.anns_data.resize( reads );
m_bntseq.anns_info.resize( reads );
srand_bp( m_bntseq.seed );
for (uint32 i = 0; i < 4; ++i)
m_freq[i] = 0;
}
void begin_read()
{
BNTAnnData& ann_data = m_bntseq.anns_data[ m_bntseq.n_seqs ];
ann_data.len = 0;
ann_data.gi = 0;
ann_data.offset = m_size;
ann_data.n_ambs = 0;
BNTAnnInfo& ann_info = m_bntseq.anns_info[ m_bntseq.n_seqs ];
ann_info.anno = "null";
m_lasts = 0;
}
void end_read()
{
m_bntseq.n_seqs++;
}
void id(const uint8 c)
{
m_bntseq.anns_info[ m_bntseq.n_seqs ].name.push_back(char(c));
}
void read(const uint8 s)
{
if (m_size < m_max_size)
{
const uint8 c = nst_nt4_table[s];
const uint8 sc = c < 4 ? c : rand_bp();
m_stream[ m_size ] = sc;
// keep track of the symbol frequencies
++m_freq[sc];
if (c >= 4) // we have an N
{
if (m_lasts == s) // contiguous N
{
// increment length of the last hole
++m_bntseq.ambs.back().len;
}
else
{
// beginning of a new hole
BNTAmb amb;
amb.len = 1;
amb.offset = m_size;
amb.amb = s;
m_bntseq.ambs.push_back( amb );
++m_bntseq.anns_data[ m_bntseq.n_seqs ].n_ambs;
++m_bntseq.n_holes;
}
}
// save last symbol
m_lasts = s;
// update sequence length
BNTAnnData& ann_data = m_bntseq.anns_data[ m_bntseq.n_seqs ];
ann_data.len++;
}
m_bntseq.l_pac++;
m_size++;
}
uint64 m_max_size;
uint64 m_size;
stream_type m_stream;
BNTSeq m_bntseq;
uint8 m_lasts;
uint32 m_freq[4];
};
template <typename StreamType>
bool save_stream(FILE* output_file, const uint64 seq_words, const StreamType* stream)
{
for (uint64 words = 0; words < seq_words; words += 1024)
{
const uint32 n_words = (uint32)nvbio::min( uint64(1024u), uint64(seq_words - words) );
if (fwrite( stream + words, sizeof(StreamType), n_words, output_file ) != n_words)
return false;
}
return true;
}
//
// .wpac file
//
void save_wpac(const uint32 seq_length, const uint32* string_storage, const char* pac_name)
{
log_info(stderr, "\nwriting \"%s\"... started\n", pac_name);
const uint32 seq_words = util::divide_ri( seq_length, 16 );
FILE* output_file = fopen( pac_name, "wb" );
if (output_file == NULL)
{
log_error(stderr, " could not open output file \"%s\"!\n", pac_name );
exit(1);
}
// write the sequence length as a uint64
const uint64 len = seq_length;
fwrite( &len, sizeof(len), 1u, output_file );
// save the uint32 stream
if (save_stream( output_file, seq_words, string_storage ) == false)
{
log_error(stderr, " writing failed!\n");
exit(1);
}
fclose( output_file );
log_info(stderr, "writing \"%s\"... done\n", pac_name);
}
//
// .pac file
//
void save_bpac(const uint32 seq_length, const uint32* string_storage, const char* pac_name)
{
typedef PackedStream<const uint32*,uint8,2,true,int64> stream_type;
typedef PackedStream< uint8*, uint8,2,true,int64> pac_stream_type;
log_info(stderr, "\nwriting \"%s\"... started\n", pac_name);
const uint32 bps_per_byte = 4u;
const uint64 seq_bytes = (seq_length + bps_per_byte - 1u) / bps_per_byte;
FILE* output_file = fopen( pac_name, "wb" );
if (output_file == NULL)
{
log_error(stderr, " could not open output file \"%s\"!\n", pac_name );
exit(1);
}
// copy the uint32 packed stream into a uint8 pac stream
thrust::host_vector<uint8> pac_storage( seq_bytes );
pac_stream_type pac_string( nvbio::plain_view( pac_storage ) );
stream_type string( string_storage );
for (uint32 i = 0; i < seq_length; ++i)
pac_string[i] = string[i];
// save the uint8 stream
if (save_stream( output_file, seq_bytes, nvbio::raw_pointer( pac_storage ) ) == false)
{
log_error(stderr, " writing failed!\n");
exit(1);
}
// the following code makes the pac file size always (l_pac/4+1+1)
if (seq_length % 4 == 0)
{
const uint8 ct = 0;
fwrite( &ct, 1, 1, output_file );
}
{
const uint8 ct = seq_length % 4;
fwrite( &ct, 1, 1, output_file );
}
fclose( output_file );
log_info(stderr, "writing \"%s\"... done\n", pac_name);
}
//
// .pac | .wpac file
//
void save_pac(const uint32 seq_length, const uint32* string_storage, const char* pac_name, const PacType pac_type)
{
if (pac_type == BPAC)
save_bpac( seq_length, string_storage, pac_name );
else
save_wpac( seq_length, string_storage, pac_name );
}
//
// .bwt file
//
void save_bwt(const uint32 seq_length, const uint32 seq_words, const uint32 primary, const uint32* cumFreq, const uint32* h_bwt_storage, const char* bwt_name)
{
log_info(stderr, "\nwriting \"%s\"... started\n", bwt_name);
FILE* output_file = fopen( bwt_name, "wb" );
if (output_file == NULL)
{
log_error(stderr, " could not open output file \"%s\"!\n", bwt_name );
exit(1);
}
fwrite( &primary, sizeof(uint32), 1, output_file );
fwrite( cumFreq, sizeof(uint32), 4, output_file );
if (save_stream( output_file, seq_words, h_bwt_storage ) == false)
{
log_error(stderr, " writing failed!\n");
exit(1);
}
fclose( output_file );
log_info(stderr, "writing \"%s\"... done\n", bwt_name);
}
//
// .sa file
//
void save_ssa(const uint32 seq_length, const uint32 sa_intv, const uint32 ssa_len, const uint32 primary, const uint32* cumFreq, const uint32* h_ssa, const char* sa_name)
{
log_info(stderr, "\nwriting \"%s\"... started\n", sa_name);
FILE* output_file = fopen( sa_name, "wb" );
if (output_file == NULL)
{
log_error(stderr, " could not open output file \"%s\"!\n", sa_name );
exit(1);
}
fwrite( &primary, sizeof(uint32), 1u, output_file );
fwrite( &cumFreq, sizeof(uint32), 4u, output_file );
fwrite( &sa_intv, sizeof(uint32), 1u, output_file );
fwrite( &seq_length, sizeof(uint32), 1u, output_file );
fwrite( &h_ssa[1], sizeof(uint32), ssa_len-1, output_file );
fclose( output_file );
log_info(stderr, "writing \"%s\"... done\n", sa_name);
}
int build(
const char* input_name,
const char* output_name,
const char* pac_name,
const char* rpac_name,
const char* bwt_name,
const char* rbwt_name,
const char* sa_name,
const char* rsa_name,
const uint64 max_length,
const PacType pac_type,
const bool compute_crc)
{
std::vector<std::string> sortednames;
list_files(input_name, sortednames);
uint32 n_inputs = (uint32)sortednames.size();
log_info(stderr, "\ncounting bps... started\n");
// count entire sequence length
Counter counter;
for (uint32 i = 0; i < n_inputs; ++i)
{
log_info(stderr, " counting \"%s\"\n", sortednames[i].c_str());
FASTA_inc_reader fasta( sortednames[i].c_str() );
if (fasta.valid() == false)
{
log_error(stderr, " unable to open file\n");
exit(1);
}
while (fasta.read( 1024, counter ) == 1024);
}
log_info(stderr, "counting bps... done\n");
const uint64 seq_length = nvbio::min( (uint64)counter.m_size, (uint64)max_length );
const uint32 bps_per_word = sizeof(uint32)*4u;
const uint64 seq_words = (seq_length + bps_per_word - 1u) / bps_per_word;
log_info(stderr, "\nstats:\n");
log_info(stderr, " reads : %u\n", counter.m_reads );
log_info(stderr, " sequence length : %llu bps (%.1f MB)\n",
seq_length,
float(seq_words*sizeof(uint32))/float(1024*1024));
log_info(stderr, " buffer size : %.1f MB\n",
2*seq_words*sizeof(uint32)/1.0e6f );
const uint32 sa_intv = nvbio::io::FMIndexData::SA_INT;
const uint32 ssa_len = (seq_length + sa_intv) / sa_intv;
// allocate the actual storage
thrust::host_vector<uint32> h_string_storage( seq_words+1 );
thrust::host_vector<uint32> h_bwt_storage( seq_words+1 );
thrust::host_vector<uint32> h_ssa( ssa_len );
typedef PackedStream<const uint32*,uint8,io::FMIndexData::BWT_BITS,io::FMIndexData::BWT_BIG_ENDIAN> const_stream_type;
typedef PackedStream< uint32*,uint8,io::FMIndexData::BWT_BITS,io::FMIndexData::BWT_BIG_ENDIAN> stream_type;
stream_type h_string( nvbio::plain_view( h_string_storage ) );
uint32 cumFreq[4] = { 0, 0, 0, 0 };
log_info(stderr, "\nbuffering bps... started\n");
// read all files
{
Writer<stream_type> writer( h_string, counter.m_reads, seq_length );
for (uint32 i = 0; i < n_inputs; ++i)
{
log_info(stderr, " buffering \"%s\"\n", sortednames[i].c_str());
FASTA_inc_reader fasta( sortednames[i].c_str() );
if (fasta.valid() == false)
{
log_error(stderr, " unable to open file!\n");
exit(1);
}
while (fasta.read( 1024, writer ) == 1024);
}
save_bns( writer.m_bntseq, output_name );
// compute the cumulative symbol frequencies
cumFreq[0] = writer.m_freq[0];
cumFreq[1] = writer.m_freq[1] + cumFreq[0];
cumFreq[2] = writer.m_freq[2] + cumFreq[1];
cumFreq[3] = writer.m_freq[3] + cumFreq[2];
if (cumFreq[3] != seq_length)
{
log_error(stderr, " mismatching symbol frequencies!\n");
log_error(stderr, " (%u, %u, %u, %u)\n", cumFreq[0], cumFreq[1], cumFreq[2], cumFreq[3]);
exit(1);
}
}
log_info(stderr, "buffering bps... done\n");
if (compute_crc)
{
const uint32 crc = crcCalc( h_string, uint32(seq_length) );
log_info(stderr, " crc: %u\n", crc);
}
try
{
BWTParams params;
uint32 primary;
thrust::device_vector<uint32> d_string_storage( h_string_storage );
thrust::device_vector<uint32> d_bwt_storage( seq_words+1 );
const_stream_type d_string( nvbio::plain_view( d_string_storage ) );
stream_type d_bwt( nvbio::plain_view( d_bwt_storage ) );
Timer timer;
log_info(stderr, "\nbuilding forward BWT... started\n");
timer.start();
{
StringBWTSSAHandler<const_stream_type,stream_type,uint32*> output(
seq_length, // string length
d_string, // string
sa_intv, // SSA sampling interval
d_bwt, // output bwt iterator
nvbio::plain_view( h_ssa ) ); // output ssa iterator
cuda::blockwise_suffix_sort(
seq_length,
d_string,
output,
¶ms );
// remove the dollar symbol
output.remove_dollar();
primary = output.primary();
}
timer.stop();
log_info(stderr, "building forward BWT... done: %um:%us\n", uint32(timer.seconds()/60), uint32(timer.seconds())%60);
log_info(stderr, " primary: %u\n", primary);
// save everything to disk
{
// copy to the host
thrust::copy( d_bwt_storage.begin(),
d_bwt_storage.begin() + seq_words,
h_bwt_storage.begin() );
if (compute_crc)
{
const_stream_type h_bwt( nvbio::plain_view( h_bwt_storage ) );
const uint32 crc = crcCalc( h_bwt, uint32(seq_length) );
log_info(stderr, " crc: %u\n", crc);
}
save_pac( seq_length, nvbio::plain_view( h_string_storage ), pac_name, pac_type );
save_bwt( seq_length, seq_words, primary, cumFreq, nvbio::plain_view( h_bwt_storage ), bwt_name );
save_ssa( seq_length, sa_intv, ssa_len, primary, cumFreq, nvbio::plain_view( h_ssa ), sa_name );
}
// reverse the string in h_string_storage
{
// reuse the bwt storage to build the reverse
uint32* h_rbase_stream = nvbio::plain_view( h_bwt_storage );
stream_type h_rstring( h_rbase_stream );
// reverse the string
for (uint32 i = 0; i < seq_length; ++i)
h_rstring[i] = h_string[ seq_length - i - 1u ];
// and now swap the vectors
h_bwt_storage.swap( h_string_storage );
h_string = stream_type( nvbio::plain_view( h_string_storage ) );
// and copy back the new string to the device
d_string_storage = h_string_storage;
}
log_info(stderr, "\nbuilding reverse BWT... started\n");
timer.start();
{
StringBWTSSAHandler<const_stream_type,stream_type,uint32*> output(
seq_length, // string length
d_string, // string
sa_intv, // SSA sampling interval
d_bwt, // output bwt iterator
nvbio::plain_view( h_ssa ) ); // output ssa iterator
cuda::blockwise_suffix_sort(
seq_length,
d_string,
output,
¶ms );
// remove the dollar symbol
output.remove_dollar();
primary = output.primary();
}
timer.stop();
log_info(stderr, "building reverse BWT... done: %um:%us\n", uint32(timer.seconds()/60), uint32(timer.seconds())%60);
log_info(stderr, " primary: %u\n", primary);
// save everything to disk
{
// copy to the host
thrust::copy( d_bwt_storage.begin(),
d_bwt_storage.begin() + seq_words,
h_bwt_storage.begin() );
if (compute_crc)
{
const_stream_type h_bwt( nvbio::plain_view( h_bwt_storage ) );
const uint32 crc = crcCalc( h_bwt, uint32(seq_length) );
log_info(stderr, " crc: %u\n", crc);
}
save_pac( seq_length, nvbio::plain_view( h_string_storage ), rpac_name, pac_type );
save_bwt( seq_length, seq_words, primary, cumFreq, nvbio::plain_view( h_bwt_storage ), rbwt_name );
save_ssa( seq_length, sa_intv, ssa_len, primary, cumFreq, nvbio::plain_view( h_ssa ), rsa_name );
}
}
catch (nvbio::cuda_error e)
{
log_error(stderr, "caught a nvbio::cuda_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::bad_alloc e)
{
log_error(stderr, "caught a nvbio::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::logic_error e)
{
log_error(stderr, "caught a nvbio::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::runtime_error e)
{
log_error(stderr, "caught a nvbio::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::bad_alloc e)
{
log_error(stderr, "caught a std::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::logic_error e)
{
log_error(stderr, "caught a std::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::runtime_error e)
{
log_error(stderr, "caught a std::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (...)
{
log_error(stderr,"unknown exception caught!\n");
exit(1);
}
return 0;
}
int main(int argc, char* argv[])
{
crcInit();
if (argc < 2)
{
log_info(stderr, "please specify input and output file names, e.g:\n");
log_info(stderr, " nvBWT [options] myinput.*.fa output-prefix\n");
log_info(stderr, " options:\n");
log_info(stderr, " -v | --verbosity select verbosity\n");
log_info(stderr, " -m | --max-length clamp input to max_length\n");
log_info(stderr, " -b | --byte-packing output byte packed .pac\n");
log_info(stderr, " -w | --word-packing output word packed .wpac\n");
log_info(stderr, " -c | --crc compute crcs\n");
log_info(stderr, " -d | --device cuda device\n");
exit(0);
}
const char* file_names[2] = { NULL, NULL };
uint64 max_length = uint64(-1);
PacType pac_type = BPAC;
bool crc = false;
int cuda_device = -1;
uint32 n_files = 0;
for (int32 i = 1; i < argc; ++i)
{
const char* arg = argv[i];
if ((strcmp( arg, "-m" ) == 0) ||
(strcmp( arg, "--max-length" ) == 0))
{
max_length = atoi( argv[++i] );
}
else if ((strcmp( argv[i], "-v" ) == 0) ||
(strcmp( argv[i], "-verbosity" ) == 0) ||
(strcmp( argv[i], "--verbosity" ) == 0))
{
set_verbosity( Verbosity( atoi( argv[++i] ) ) );
}
else if ((strcmp( arg, "-b" ) == 0) ||
(strcmp( arg, "--byte-packing" ) == 0))
{
pac_type = BPAC;
}
else if ((strcmp( arg, "-w" ) == 0) ||
(strcmp( arg, "--word-packing" ) == 0))
{
pac_type = WPAC;
}
else if ((strcmp( arg, "-c" ) == 0) ||
(strcmp( arg, "--crc" ) == 0))
{
crc = true;
}
else if ((strcmp( arg, "-d" ) == 0) ||
(strcmp( arg, "--device" ) == 0))
{
cuda_device = atoi( argv[++i] );
}
else
file_names[ n_files++ ] = argv[i];
}
const char* input_name = file_names[0];
const char* output_name = file_names[1];
std::string pac_string = std::string( output_name ) + (pac_type == BPAC ? ".pac" : ".wpac");
const char* pac_name = pac_string.c_str();
std::string rpac_string = std::string( output_name ) + (pac_type == BPAC ? ".rpac" : ".rwpac");
const char* rpac_name = rpac_string.c_str();
std::string bwt_string = std::string( output_name ) + ".bwt";
const char* bwt_name = bwt_string.c_str();
std::string rbwt_string = std::string( output_name ) + ".rbwt";
const char* rbwt_name = rbwt_string.c_str();
std::string sa_string = std::string( output_name ) + ".sa";
const char* sa_name = sa_string.c_str();
std::string rsa_string = std::string( output_name ) + ".rsa";
const char* rsa_name = rsa_string.c_str();
log_info(stderr, "max length : %lld\n", max_length);
log_info(stderr, "input : \"%s\"\n", input_name);
log_info(stderr, "output : \"%s\"\n", output_name);
try
{
int device_count;
cudaGetDeviceCount(&device_count);
cuda::check_error("cuda-check");
log_verbose(stderr, " cuda devices : %d\n", device_count);
// inspect and select cuda devices
if (device_count)
{
if (cuda_device == -1)
{
int best_device = 0;
cudaDeviceProp best_device_prop;
cudaGetDeviceProperties( &best_device_prop, best_device );
for (int device = 0; device < device_count; ++device)
{
cudaDeviceProp device_prop;
cudaGetDeviceProperties( &device_prop, device );
log_verbose(stderr, " device %d has compute capability %d.%d\n", device, device_prop.major, device_prop.minor);
log_verbose(stderr, " SM count : %u\n", device_prop.multiProcessorCount);
log_verbose(stderr, " SM clock rate : %u Mhz\n", device_prop.clockRate / 1000);
log_verbose(stderr, " memory clock rate : %.1f Ghz\n", float(device_prop.memoryClockRate) * 1.0e-6f);
if (device_prop.major >= best_device_prop.major &&
device_prop.minor >= best_device_prop.minor)
{
best_device_prop = device_prop;
best_device = device;
}
}
cuda_device = best_device;
}
log_verbose(stderr, " chosen device %d\n", cuda_device);
{
cudaDeviceProp device_prop;
cudaGetDeviceProperties( &device_prop, cuda_device );
log_verbose(stderr, " device name : %s\n", device_prop.name);
log_verbose(stderr, " compute capability : %d.%d\n", device_prop.major, device_prop.minor);
}
cudaSetDevice( cuda_device );
}
size_t free, total;
cudaMemGetInfo(&free, &total);
NVBIO_CUDA_DEBUG_STATEMENT( log_info(stderr,"device mem : total: %.1f GB, free: %.1f GB\n", float(total)/float(1024*1024*1024), float(free)/float(1024*1024*1024)) );
cuda::check_error("cuda-memory-check");
return build( input_name, output_name, pac_name, rpac_name, bwt_name, rbwt_name, sa_name, rsa_name, max_length, pac_type, crc );
}
catch (nvbio::cuda_error e)
{
log_error(stderr, "caught a nvbio::cuda_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (nvbio::bad_alloc e)
{
log_error(stderr, "caught a nvbio::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (nvbio::logic_error e)
{
log_error(stderr, "caught a nvbio::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (nvbio::runtime_error e)
{
log_error(stderr, "caught a nvbio::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (thrust::system::system_error e)
{
log_error(stderr, "caught a thrust::system_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (std::bad_alloc e)
{
log_error(stderr, "caught a std::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (std::logic_error e)
{
log_error(stderr, "caught a std::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (std::runtime_error e)
{
log_error(stderr, "caught a std::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
return 1;
}
catch (...)
{
log_error(stderr, "caught an unknown exception!\n");
return 1;
}
} | the_stack |
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <cmath>
#include <cuda.h>
#include <cuda_runtime.h>
#include "../utils.h"
#define eps 1e-7
#define PRIVATE_CASE_TYPE_AND_VAL(ENUM_TYPE, TYPE, TYPE_NAME, VAL, ...) \
case ENUM_TYPE: { \
using TYPE_NAME = TYPE; \
const int block_size = VAL; \
return __VA_ARGS__(); \
}
#define DISPATCH_INPUT_TYPES(TYPE, TYPE_NAME, SCOPE_NAME, ...) \
[&] { \
switch(TYPE) \
{ \
PRIVATE_CASE_TYPE_AND_VAL(at::ScalarType::Float, float, TYPE_NAME, 1024, __VA_ARGS__) \
PRIVATE_CASE_TYPE_AND_VAL(at::ScalarType::Double, double, TYPE_NAME, 512, __VA_ARGS__) \
default: \
AT_ERROR(#SCOPE_NAME, " not implemented for '", toString(TYPE), "'"); \
} \
}()
namespace kaolin {
template<typename scalar_t, int BLOCK_SIZE>
__global__ void packed_rasterize_forward_cuda_kernel(
const scalar_t* __restrict__ face_vertices_z,
const scalar_t* __restrict__ face_vertices_image,
const scalar_t* __restrict__ face_bboxes,
const scalar_t* __restrict__ face_features,
const int64_t* __restrict__ first_idx_face_per_mesh,
int64_t* __restrict__ selected_face_idx,
scalar_t* __restrict__ output_weights,
scalar_t* __restrict__ interpolated_features,
int batch_size,
int height,
int width,
int num_faces,
int num_features,
float multiplier) {
__shared__ scalar_t shm_pointsbbox[BLOCK_SIZE][4];
for (int bidx = blockIdx.y; bidx < batch_size; bidx += gridDim.y) {
for (int pixel_idx = blockIdx.x * blockDim.x + threadIdx.x;
pixel_idx < width * height;
pixel_idx += gridDim.x * blockDim.x) {
const int wididx = pixel_idx % width;
const int heiidx = (pixel_idx - wididx) / width;
const int first_id_faces = first_idx_face_per_mesh[bidx];
const int last_id_faces = first_idx_face_per_mesh[bidx + 1];
scalar_t max_z0 = -INFINITY;
int max_face_idx = -1;
scalar_t max_w0 = 0.;
scalar_t max_w1 = 0.;
scalar_t max_w2 = 0.;
bool is_active_pixel = heiidx < height;
// which pixel it belongs to
const int totalidx1 = bidx * height * width + pixel_idx;
const int totalidx3 = totalidx1 * 3;
const int totalidxd = totalidx1 * num_features;
// pixel coordinate
scalar_t x0 = multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = multiplier / height * (height - 2 * heiidx - 1);
for (int start_face_idx = first_id_faces;
start_face_idx < last_id_faces;
start_face_idx += BLOCK_SIZE) {
const int remaining_faces = last_id_faces - start_face_idx;
const int num_faces_this_iter = remaining_faces > BLOCK_SIZE ? BLOCK_SIZE : remaining_faces;
__syncthreads();
#pragma unroll
for (int ii = 0; ii < 4; ii++) {
const int _start_idx = start_face_idx * 4 + threadIdx.x + ii * blockDim.x;
if (_start_idx < (last_id_faces * 4)) {
shm_pointsbbox[((threadIdx.x - (threadIdx.x % 4) + ii * blockDim.x) / 4)][threadIdx.x % 4] = \
face_bboxes[_start_idx];
}
}
__syncthreads();
if (!(is_active_pixel)) {
continue;
}
for (int ii = 0; ii < num_faces_this_iter; ii++) {
int face_idx = ii + start_face_idx;
// will this pixel be influenced by this face?
scalar_t xmin = shm_pointsbbox[ii][0];
scalar_t ymin = shm_pointsbbox[ii][1];
scalar_t xmax = shm_pointsbbox[ii][2];
scalar_t ymax = shm_pointsbbox[ii][3];
// not covered by this face!
if (x0 < xmin || x0 >= xmax || y0 < ymin || y0 >= ymax) {
continue;
}
const int shift1 = face_idx;
const int shift3 = shift1 * 3;
const int shift6 = shift1 * 6;
//const int shift9 = shift1 * 9;
// if this pixel is covered by this face, then we check its depth and weights
scalar_t ax = face_vertices_image[shift6 + 0];
scalar_t ay = face_vertices_image[shift6 + 1];
scalar_t bx = face_vertices_image[shift6 + 2];
scalar_t by = face_vertices_image[shift6 + 3];
scalar_t cx = face_vertices_image[shift6 + 4];
scalar_t cy = face_vertices_image[shift6 + 5];
// replace with other variables
scalar_t m = bx - ax;
scalar_t p = by - ay;
scalar_t n = cx - ax;
scalar_t q = cy - ay;
scalar_t s = x0 - ax;
scalar_t t = y0 - ay;
// m* w1 + n * w2 = s
// p * w1 + q * w2 = t
scalar_t k1 = s * q - n * t;
scalar_t k2 = m * t - s * p;
scalar_t k3 = m * q - n * p;
scalar_t w1 = k1 / (k3 + eps);
scalar_t w2 = k2 / (k3 + eps);
scalar_t w0 = 1 - w1 - w2; // TODO(cfujitsang): 1. instead of 1 (but would change values)
// not lie in the triangle
// some tmies, there would be small shift in boundaries
if (w0 < -eps || w1 < -eps || w2 < -eps) {
continue;
}
// if it is perspective, then this way has a little error
// because face plane may not be parallel to the image plane
// but let's ignore it first
scalar_t az = face_vertices_z[shift3 + 0];
scalar_t bz = face_vertices_z[shift3 + 1];
scalar_t cz = face_vertices_z[shift3 + 2];
scalar_t z0 = w0 * az + w1 * bz + w2 * cz;
// it will be filled by a nearer face
if (z0 <= max_z0) {
continue;
}
max_z0 = z0;
max_face_idx = face_idx;
max_w0 = w0;
max_w1 = w1;
max_w2 = w2;
}
}
if (max_face_idx > -1) {
// index
selected_face_idx[totalidx1] = max_face_idx - first_id_faces;
const int shift3d = max_face_idx * 3 * num_features;
// wei
output_weights[totalidx3 + 0] = max_w0;
output_weights[totalidx3 + 1] = max_w1;
output_weights[totalidx3 + 2] = max_w2;
// color
for (int d = 0; d < num_features; d++) {
scalar_t r0 = face_features[shift3d + d];
scalar_t r1 = face_features[shift3d + num_features + d];
scalar_t r2 = face_features[shift3d + num_features + num_features + d];
interpolated_features[totalidxd + d] = max_w0 * r0 + max_w1 * r1 + max_w2 * r2;
}
}
}
}
}
void packed_rasterize_forward_cuda_kernel_launcher(
at::Tensor face_vertices_z,
at::Tensor face_vertices_image,
at::Tensor face_bboxes,
at::Tensor face_features,
at::Tensor num_face_per_mesh,
at::Tensor selected_face_idx,
at::Tensor output_weights,
at::Tensor interpolated_features,
float multiplier) {
const int num_faces = face_vertices_z.size(1);
const int batch_size = interpolated_features.size(0);
const int height = interpolated_features.size(1);
const int width = interpolated_features.size(2);
const int num_features = interpolated_features.size(3);
const int num_pixels = height * width;
DISPATCH_INPUT_TYPES(face_vertices_z.scalar_type(), scalar_t,
"packed_rasterize_forward_cuda_kernel", [&] {
const int num_blocks_per_sample = num_pixels / block_size + 1;
const dim3 threads(block_size, 1, 1);
const dim3 blocks(num_blocks_per_sample, 1, 1);
packed_rasterize_forward_cuda_kernel<scalar_t, block_size><<<blocks, threads>>>(
face_vertices_z.data_ptr<scalar_t>(),
face_vertices_image.data_ptr<scalar_t>(),
face_bboxes.data_ptr<scalar_t>(),
face_features.data_ptr<scalar_t>(),
num_face_per_mesh.data_ptr<int64_t>(),
selected_face_idx.data_ptr<int64_t>(),
output_weights.data_ptr<scalar_t>(),
interpolated_features.data_ptr<scalar_t>(),
batch_size, height, width, num_faces, num_features, multiplier);
});
}
template<typename scalar_t>
__global__ void generate_soft_mask_cuda_kernel(
const scalar_t* __restrict__ face_vertices_image,
const scalar_t* __restrict__ pointsbbox2_bxfx4,
const int64_t* __restrict__ selected_face_idx,
scalar_t* __restrict__ probface_bxhxwxk,
scalar_t* __restrict__ probcase_bxhxwxk,
scalar_t* __restrict__ probdis_bxhxwxk,
scalar_t* __restrict__ improb_bxhxwx1,
int bnum, int height, int width, int fnum,
int knum, float multiplier, float sigmainv) {
// bidx * height * width + heiidx * width + wididx
int presentthread = blockIdx.x * blockDim.x + threadIdx.x;
int wididx = presentthread % width;
presentthread = (presentthread - wididx) / width;
int heiidx = presentthread % height;
int bidx = (presentthread - heiidx) / height;
if (bidx >= bnum || heiidx >= height || wididx >= width) {
return;
}
// which pixel it belongs to
const int totalidx1 = bidx * height * width + heiidx * width + wididx;
const int totalidxk = totalidx1 * knum;
// which face it belongs to?
// face begins from 1
// convert it into int, use round!
int fidxint = selected_face_idx[totalidx1];
// not covered by any faces
// maybe we can search its neighbour
if (fidxint >= 0) {
improb_bxhxwx1[totalidx1] = 1.0;
}
// pixels not covered by any faces
else {
// pixel coordinate
scalar_t x0 = 1.0 * multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = 1.0 * multiplier / height * (height - 2 * heiidx - 1);
int kid = 0;
for (int fidxint = 0; fidxint < fnum; fidxint++) {
// which face it belongs to
const int shift1 = bidx * fnum + fidxint;
const int shift4 = shift1 * 4;
const int shift6 = shift1 * 6;
///////////////////////////////////////////////////////////////
// will this pixel is influenced by this face?
scalar_t xmin = pointsbbox2_bxfx4[shift4 + 0];
scalar_t ymin = pointsbbox2_bxfx4[shift4 + 1];
scalar_t xmax = pointsbbox2_bxfx4[shift4 + 2];
scalar_t ymax = pointsbbox2_bxfx4[shift4 + 3];
// not covered by this face!
if (x0 < xmin || x0 >= xmax || y0 < ymin || y0 >= ymax) {
continue;
}
//////////////////////////////////////////////////////////
scalar_t pdis[6];
// perdis
for (int i = 0; i < 3; i++) {
int pshift = shift6 + i * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
int pshift2 = shift6 + ((i + 1) % 3) * 2;
scalar_t x2 = face_vertices_image[pshift2 + 0];
scalar_t y2 = face_vertices_image[pshift2 + 1];
// ax + by + c = 0
scalar_t A = y2 - y1;
scalar_t B = x1 - x2;
scalar_t C = x2 * y1 - x1 * y2;
// dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2)
// up = ax + by + c
// down = a^2 + b^2
// dissquare = up^2 / down
scalar_t up = A * x0 + B * y0 + C;
scalar_t down = A * A + B * B;
// is it a bad triangle?
scalar_t x3 = B * B * x0 - A * B * y0 - A * C;
scalar_t y3 = A * A * y0 - A * B * x0 - B * C;
x3 = x3 / (down + eps);
y3 = y3 / (down + eps);
scalar_t direct = (x3 - x1) * (x3 - x2) + (y3 - y1) * (y3 - y2);
if (direct > 0) {
// bad triangle
pdis[i] = 4 * multiplier * multiplier;
} else {
// perpendicular distance
pdis[i] = up * up / (down + eps);
}
}
////////////////////////////////////////////////////////////
// point distance
for (int i = 0; i < 3; i++) {
int pshift = shift6 + i * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
pdis[i + 3] = (x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1);
}
int edgeid = 0;
scalar_t dissquare = pdis[0];
for (int i = 1; i < 6; i++) {
if (dissquare > pdis[i]) {
dissquare = pdis[i];
edgeid = i;
}
}
scalar_t z = sigmainv * dissquare / multiplier / multiplier;
scalar_t prob = exp(-z);
probface_bxhxwxk[totalidxk + kid] = fidxint + 1.0;
probcase_bxhxwxk[totalidxk + kid] = edgeid + 1.0;
probdis_bxhxwxk[totalidxk + kid] = prob;
kid++;
if (kid >= knum)
break;
}
scalar_t allprob = 1.0;
for (int i = 0; i < kid; i++) {
scalar_t prob = probdis_bxhxwxk[totalidxk + i];
allprob *= (1.0 - prob);
}
// final result
allprob = 1.0 - allprob;
improb_bxhxwx1[totalidx1] = allprob;
}
}
void generate_soft_mask_cuda_kernel_launcher(
at::Tensor face_vertices_image,
at::Tensor face_bboxes,
at::Tensor selected_face_idx,
at::Tensor probface_bxhxwxk,
at::Tensor probcase_bxhxwxk,
at::Tensor probdis_bxhxwxk,
at::Tensor improb_bxhxwx1,
float multiplier,
float sigmainv) {
int batch_size = face_vertices_image.size(0);
int num_faces = face_vertices_image.size(1);
int height = selected_face_idx.size(1);
int width = selected_face_idx.size(2);
int knum = probface_bxhxwxk.size(3);
const int num_pixels = batch_size * height * width;
DISPATCH_INPUT_TYPES(face_vertices_image.scalar_type(), scalar_t,
"generate_soft_mask_cuda_kernel", [&] {
const int grid_size = num_pixels / block_size + 1;
const dim3 threads(block_size, 1, 1);
const dim3 blocks(grid_size, 1, 1);
generate_soft_mask_cuda_kernel<scalar_t><<<blocks, threads>>>(
face_vertices_image.data_ptr<scalar_t>(),
face_bboxes.data_ptr<scalar_t>(),
selected_face_idx.data_ptr<int64_t>(),
probface_bxhxwxk.data_ptr<scalar_t>(),
probcase_bxhxwxk.data_ptr<scalar_t>(),
probdis_bxhxwxk.data_ptr<scalar_t>(),
improb_bxhxwx1.data_ptr<scalar_t>(),
batch_size, height, width, num_faces, knum, multiplier, sigmainv);
});
return;
}
} // namespace kaolin
#undef PRIVATE_CASE_TYPE_AND_VAL
#undef DISPATCH_INPUT_TYPES | the_stack |
__global__ void approxmatchkernel(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match,float * temp){
float * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n;
float multiL,multiR;
if (n>=m){
multiL=1;
multiR=n/m;
}else{
multiL=m/n;
multiR=1;
}
const int Block=1024;
__shared__ float buf[Block*4];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n*m;j+=blockDim.x)
match[i*n*m+j]=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
remainL[j]=multiL;
for (int j=threadIdx.x;j<m;j+=blockDim.x)
remainR[j]=multiR;
__syncthreads();
//for (int j=7;j>=-2;j--){
for (int j=7;j>-2;j--){
float level=-powf(4.0f,j);
if (j==-2){
level=0;
}
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
float suml=1e-9f;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
float x2=xyz2[i*m*3+l0*3+l*3+0];
float y2=xyz2[i*m*3+l0*3+l*3+1];
float z2=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+0]=x2;
buf[l*4+1]=y2;
buf[l*4+2]=z2;
buf[l*4+3]=remainR[l0+l];
}
__syncthreads();
for (int l=0;l<lend;l++){
float x2=buf[l*4+0];
float y2=buf[l*4+1];
float z2=buf[l*4+2];
float d=level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1));
float w=__expf(d)*buf[l*4+3];
suml+=w;
}
__syncthreads();
}
if (k<n)
ratioL[k]=remainL[k]/suml;
}
/*for (int k=threadIdx.x;k<n;k+=gridDim.x){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float suml=1e-9f;
for (int l=0;l<m;l++){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*remainR[l];
suml+=w;
}
ratioL[k]=remainL[k]/suml;
}*/
__syncthreads();
for (int l0=0;l0<m;l0+=blockDim.x){
int l=l0+threadIdx.x;
float x2=0,y2=0,z2=0;
if (l<m){
x2=xyz2[i*m*3+l*3+0];
y2=xyz2[i*m*3+l*3+1];
z2=xyz2[i*m*3+l*3+2];
}
float sumr=0;
for (int k0=0;k0<n;k0+=Block){
int kend=min(n,k0+Block)-k0;
for (int k=threadIdx.x;k<kend;k+=blockDim.x){
buf[k*4+0]=xyz1[i*n*3+k0*3+k*3+0];
buf[k*4+1]=xyz1[i*n*3+k0*3+k*3+1];
buf[k*4+2]=xyz1[i*n*3+k0*3+k*3+2];
buf[k*4+3]=ratioL[k0+k];
}
__syncthreads();
for (int k=0;k<kend;k++){
float x1=buf[k*4+0];
float y1=buf[k*4+1];
float z1=buf[k*4+2];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*buf[k*4+3];
sumr+=w;
}
__syncthreads();
}
if (l<m){
sumr*=remainR[l];
float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}
}
/*for (int l=threadIdx.x;l<m;l+=blockDim.x){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float sumr=0;
for (int k=0;k<n;k++){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k];
sumr+=w;
}
sumr*=remainR[l];
float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}*/
__syncthreads();
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
float suml=0;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
buf[l*4+0]=xyz2[i*m*3+l0*3+l*3+0];
buf[l*4+1]=xyz2[i*m*3+l0*3+l*3+1];
buf[l*4+2]=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+3]=ratioR[l0+l];
}
__syncthreads();
float rl=ratioL[k];
if (k<n){
for (int l=0;l<lend;l++){
float x2=buf[l*4+0];
float y2=buf[l*4+1];
float z2=buf[l*4+2];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*rl*buf[l*4+3];
match[i*n*m+(l0+l)*n+k]+=w;
suml+=w;
}
}
__syncthreads();
}
if (k<n)
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}
/*for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1=xyz1[i*n*3+k*3+0];
float y1=xyz1[i*n*3+k*3+1];
float z1=xyz1[i*n*3+k*3+2];
float suml=0;
for (int l=0;l<m;l++){
float x2=xyz2[i*m*3+l*3+0];
float y2=xyz2[i*m*3+l*3+1];
float z2=xyz2[i*m*3+l*3+2];
float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k]*ratioR[l];
match[i*n*m+l*n+k]+=w;
suml+=w;
}
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}*/
__syncthreads();
}
}
}
__global__ void matchcostkernel(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){
__shared__ float allsum[512];
const int Block=256;
__shared__ float buf[Block*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float subsum=0;
for (int k0=0;k0<m;k0+=Block){
int endk=min(m,k0+Block);
for (int k=threadIdx.x;k<(endk-k0)*3;k+=blockDim.x){
buf[k]=xyz2[i*m*3+k0*3+k];
}
__syncthreads();
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
for (int k=0;k<endk-k0;k++){
//float x2=xyz2[(i*m+k)*3+0]-x1;
//float y2=xyz2[(i*m+k)*3+1]-y1;
//float z2=xyz2[(i*m+k)*3+2]-z1;
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=sqrtf(x2*x2+y2*y2+z2*z2);
subsum+=match[i*n*m+(k0+k)*n+j]*d;
}
}
__syncthreads();
}
allsum[threadIdx.x]=subsum;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){
allsum[threadIdx.x]+=allsum[threadIdx.x+j];
}
}
if (threadIdx.x==0)
out[i]=allsum[0];
__syncthreads();
}
}
//void matchcostLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out){
// matchcost<<<32,512>>>(b,n,m,xyz1,xyz2,match,out);
//}
__global__ void matchcostgrad2kernel(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad2){
__shared__ float sum_grad[256*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int kbeg=m*blockIdx.y/gridDim.y;
int kend=m*(blockIdx.y+1)/gridDim.y;
for (int k=kbeg;k<kend;k++){
float x2=xyz2[(i*m+k)*3+0];
float y2=xyz2[(i*m+k)*3+1];
float z2=xyz2[(i*m+k)*3+2];
float subsumx=0,subsumy=0,subsumz=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=x2-xyz1[(i*n+j)*3+0];
float y1=y2-xyz1[(i*n+j)*3+1];
float z1=z2-xyz1[(i*n+j)*3+2];
float d=match[i*n*m+k*n+j]*rsqrtf(fmaxf(x1*x1+y1*y1+z1*z1,1e-20f));
subsumx+=x1*d;
subsumy+=y1*d;
subsumz+=z1*d;
}
sum_grad[threadIdx.x*3+0]=subsumx;
sum_grad[threadIdx.x*3+1]=subsumy;
sum_grad[threadIdx.x*3+2]=subsumz;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
int j1=threadIdx.x;
int j2=threadIdx.x+j;
if ((j1&j)==0 && j2<blockDim.x){
sum_grad[j1*3+0]+=sum_grad[j2*3+0];
sum_grad[j1*3+1]+=sum_grad[j2*3+1];
sum_grad[j1*3+2]+=sum_grad[j2*3+2];
}
}
if (threadIdx.x==0){
grad2[(i*m+k)*3+0]=sum_grad[0];
grad2[(i*m+k)*3+1]=sum_grad[1];
grad2[(i*m+k)*3+2]=sum_grad[2];
}
__syncthreads();
}
}
}
__global__ void matchcostgrad1kernel(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad1){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int l=threadIdx.x;l<n;l+=blockDim.x){
float x1=xyz1[i*n*3+l*3+0];
float y1=xyz1[i*n*3+l*3+1];
float z1=xyz1[i*n*3+l*3+2];
float dx=0,dy=0,dz=0;
for (int k=0;k<m;k++){
float x2=xyz2[i*m*3+k*3+0];
float y2=xyz2[i*m*3+k*3+1];
float z2=xyz2[i*m*3+k*3+2];
float d=match[i*n*m+k*n+l]*rsqrtf(fmaxf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2),1e-20f));
dx+=(x1-x2)*d;
dy+=(y1-y2)*d;
dz+=(z1-z2)*d;
}
grad1[i*n*3+l*3+0]=dx;
grad1[i*n*3+l*3+1]=dy;
grad1[i*n*3+l*3+2]=dz;
}
}
}
//void matchcostgradLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad2){
// matchcostgrad<<<dim3(32,32),256>>>(b,n,m,xyz1,xyz2,match,grad2);
//}
/*void AddGPUKernel(Dtype *in_a, Dtype *in_b, Dtype *out_c, int N,
cudaStream_t stream)*/
// temp: TensorShape{b,(n+m)*2}
void approxmatch(int b,int n,int m,const float * xyz1,const float * xyz2,float * match,float * temp, cudaStream_t stream){
approxmatchkernel
<<<32, 512, 0, stream>>>(b,n,m,xyz1,xyz2,match,temp);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
throw std::runtime_error(Formatter()
<< "CUDA kernel failed : " << std::to_string(err));
}
void matchcost(int b,int n,int m,const float * xyz1,const float * xyz2,float * match, float * out, cudaStream_t stream){
matchcostkernel<<<32,512,0,stream>>>(b,n,m,xyz1,xyz2,match,out);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
throw std::runtime_error(Formatter()
<< "CUDA kernel failed : " << std::to_string(err));
}
void matchcostgrad(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad1,float * grad2, cudaStream_t stream){
matchcostgrad1kernel<<<32,512,0,stream>>>(b,n,m,xyz1,xyz2,match,grad1);
matchcostgrad2kernel<<<dim3(32,32),256,0,stream>>>(b,n,m,xyz1,xyz2,match,grad2);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
throw std::runtime_error(Formatter()
<< "CUDA kernel failed : " << std::to_string(err));
} | the_stack |
#define LBANN_SCATTER_LAYER_INSTANTIATE
#include "lbann/layers/transform/scatter.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
using Dim2 = gpu_lib::array<size_t, 2>;
using Dim3 = gpu_lib::array<size_t, 3>;
/** @brief Kernel for scattering a 3D tensor
*
* output(k,indices(k,j),j) = values(k,j,i) if axis == 0
* output(k,j,indices(k,i)) = values(k,j,i) if axis == 1
*
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (num_columns_input_mat / bdimx) x (num_rows / bdimy) x mb_size /bdimz
*/
template <typename T, bool has_row_vectors>
__global__ void scatter3d_kernel(
const T* __restrict__ indices,
Dim2 indices_strides,
const T* __restrict__ values,
Dim3 values_dims,
Dim3 values_strides,
T* __restrict__ output,
Dim3 output_dims,
Dim3 output_strides) {
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = gridDim.x * blockDim.x;
const size_t nthreadsy = gridDim.y * blockDim.y;
const size_t nthreadsz = gridDim.z * blockDim.z;
auto mini_batch_size = output_dims[0];
auto num_rows = values_dims[1];
auto num_value_columns = values_dims[2];
auto bounds = has_row_vectors? output_dims[1] : output_dims[2];
for (size_t batch = gidz; batch < mini_batch_size; batch+=nthreadsz){
for(size_t row = gidy; row < num_rows; row+=nthreadsy){
for (size_t i = gidx; i < num_value_columns; i+=nthreadsx){
const auto axis = has_row_vectors? row: i;
const auto index_offest = axis*indices_strides[1];
const auto ind = static_cast<El::Int>(
gpu_lib::floor(
indices[batch*indices_strides[0] + index_offest]));
if (0<=ind && ind < static_cast<El::Int>(bounds)){
const auto output_axis_1 = has_row_vectors? ind : static_cast<El::Int>(row);
const auto output_axis_2 = has_row_vectors? static_cast<El::Int>(i): ind;
const auto output_offset = output_axis_1 * output_strides[1] + output_axis_2 * output_strides[2];
const auto& x = values[batch*values_strides[0] + row*values_strides[1] + i*values_strides[2]];
auto &y = output[batch*output_strides[0] + output_offset];
gpu_lib::atomic_add(&y, x);
}
}
}
}
}
/** @brief Kernel for gathering a 3D tensor
*
* output(k, j, i) = values(k, indices(k,j), i) axis == 0
* output(k, j, i) = values(k, j, indices(k,i)) axis == 1
*
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (num_columns_output_mat / bdimx) x (num_rows / bdimy) x mb_size /bdimz
*/
template <typename T, bool has_row_vectors>
__global__ void gather3d_kernel(
const T* __restrict__ indices,
Dim2 indices_strides,
const T* __restrict__ values,
Dim3 values_dims,
Dim3 values_strides,
T* __restrict__ output,
Dim3 output_dims,
Dim3 output_strides) {
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = gridDim.x * blockDim.x;
const size_t nthreadsy = gridDim.y * blockDim.y;
const size_t nthreadsz = gridDim.z * blockDim.z;
auto mini_batch_size = output_dims[0];
auto num_rows = output_dims[1];
auto num_out_columns = output_dims[2];
// If gathering along dim 0, the bounds are the number of row, otherwise bounds are
// the columns
auto bounds = has_row_vectors? values_dims[1]: values_dims[2];
for (size_t batch = gidz; batch < mini_batch_size; batch+=nthreadsz){
for(size_t row = gidy; row < num_rows; row+=nthreadsy){
for (size_t i = gidx; i < num_out_columns; i+=nthreadsx){
// If gatherin along dim 0, the len(ind) == num_rows
const auto& axis = has_row_vectors? row: i;
const auto& index_offest = axis*indices_strides[1];
const auto ind = static_cast<El::Int>(
gpu_lib::floor(
indices[batch*indices_strides[0] + index_offest]));
auto &y = output[batch*output_strides[0] + row*output_strides[1] + i*output_strides[2]];
const auto& output_axis_1 = has_row_vectors? ind : static_cast<El::Int>(row);
const auto& output_axis_2 = has_row_vectors? static_cast<El::Int>(i): ind;
const auto& values_offset = output_axis_1 * values_strides[1] + output_axis_2 * values_strides[2];
if (0<=ind && ind < static_cast<El::Int>(bounds)){
y = values[batch*values_strides[0] + values_offset];
}else{
y = T{0.f};
}
}
}
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void scatter_layer<TensorDataType, Layout, Device>::fp_compute() {
// Local matrices
const auto& local_values = this->get_local_prev_activations(0);
const auto& local_indices = this->get_local_prev_activations(1);
auto& local_output = this->get_local_activations();
const auto& input_dims_ = this->get_input_dims();
const auto& output_dims_ = this->get_output_dims();
std::vector<size_t> input_dims(input_dims_.begin(), input_dims_.end());
std::vector<size_t> output_dims(output_dims_.begin(), output_dims_.end());
const size_t local_mini_batch_size = local_indices.Width();
const bool is_2D = input_dims.size()>1;
const bool has_row_vectors = (is_2D && m_scatter_axis == 0);
const size_t values_size = is_2D ? input_dims[1] : this->get_input_size(0);
const size_t output_size = is_2D ? this->get_output_dims()[1] : this->get_output_size();
const size_t num_rows = is_2D ? input_dims[0] : 1;
const size_t num_output_rows = has_row_vectors ? this->get_output_dims()[0]: num_rows;
const size_t value_stride_2 = is_2D ? values_size : 0;
const size_t output_stride_2 = is_2D ? output_size : 0;
// Scatter into output matrix
El::Zero(local_output);
if (!local_values.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output),
gpu::get_sync_info(local_values),
gpu::get_sync_info(local_indices));
constexpr size_t block_size_x = 32;
constexpr size_t block_size_y = 8;
dim3 block_dims, grid_dims;
block_dims.x = block_size_x;
block_dims.y = block_size_y;
block_dims.z = 1;
grid_dims.x = (values_size + block_dims.x - 1) / block_dims.x;
grid_dims.y = (num_rows + block_dims.y - 1) / block_dims.y;
grid_dims.z = (local_mini_batch_size + block_dims.z - 1) / block_dims.z;
gpu_lib::clip_grid_dims(grid_dims);
if (has_row_vectors)
{
hydrogen::gpu::LaunchKernel(
scatter3d_kernel<TensorDataType, true>,
grid_dims, block_dims, 0, multisync,
local_indices.LockedBuffer(),
Dim2{static_cast<size_t>(local_indices.LDim()), 1},
local_values.LockedBuffer(),
Dim3{local_mini_batch_size, num_rows, values_size},
Dim3{static_cast<size_t>(local_values.LDim()), value_stride_2, 1},
local_output.Buffer(),
Dim3{local_mini_batch_size, num_output_rows, output_size},
Dim3{static_cast<size_t>(local_output.LDim()), output_stride_2, 1});
}else{
hydrogen::gpu::LaunchKernel(
scatter3d_kernel<TensorDataType, false>,
grid_dims, block_dims, 0, multisync,
local_indices.LockedBuffer(),
Dim2{static_cast<size_t>(local_indices.LDim()), 1},
local_values.LockedBuffer(),
Dim3{local_mini_batch_size, num_rows, values_size},
Dim3{static_cast<size_t>(local_values.LDim()), value_stride_2, 1},
local_output.Buffer(),
Dim3{local_mini_batch_size, num_output_rows, output_size},
Dim3{static_cast<size_t>(local_output.LDim()), output_stride_2, 1});
}
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void scatter_layer<TensorDataType, Layout, Device>::bp_compute() {
// Local matrices
const auto& local_indices = this->get_local_prev_activations(1);
const auto& local_output_grad = this->get_local_prev_error_signals();
auto& local_values_grad = this->get_local_error_signals(0);
auto& local_indices_grad = this->get_local_error_signals(1);
const auto& input_dims_ = this->get_input_dims();
const auto& output_dims_ = this->get_output_dims();
std::vector<size_t> input_dims(input_dims_.begin(), input_dims_.end());
std::vector<size_t> output_dims(output_dims_.begin(), output_dims_.end());
const size_t local_mini_batch_size = local_indices.Width();
const bool is_2D = input_dims.size()>1;
const bool has_row_vectors = (is_2D && m_scatter_axis == 0);
const size_t values_size = (is_2D) ? input_dims[1] : this->get_input_size(0);
const size_t output_size = (is_2D) ? this->get_output_dims()[1] : this->get_output_size();
const size_t num_rows = (is_2D) ? input_dims[0] : 1;
const size_t num_output_rows = has_row_vectors ? this->get_output_dims()[0]: num_rows;
const size_t value_stride_2 = (is_2D) ? values_size : 0;
const size_t output_stride_2 = (is_2D) ? output_size : 0;
// Zero out gradient w.r.t. indices
El::Zero(local_indices_grad);
// Gather into gradient w.r.t. values
if (!local_values_grad.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_values_grad),
gpu::get_sync_info(local_output_grad),
gpu::get_sync_info(local_indices));
constexpr size_t block_size_x = 32;
constexpr size_t block_size_y = 8;
dim3 block_dims, grid_dims;
block_dims.x = block_size_x;
block_dims.y = block_size_y;
block_dims.z = 1;
grid_dims.x = (num_rows + block_dims.x - 1) / block_dims.x;
grid_dims.y = (values_size + block_dims.y - 1) / block_dims.y;
grid_dims.z = (local_mini_batch_size + block_dims.z - 1) / block_dims.z;
gpu_lib::clip_grid_dims(grid_dims);
if (has_row_vectors){
hydrogen::gpu::LaunchKernel(
gather3d_kernel<TensorDataType, true>,
grid_dims, block_dims, 0, multisync,
local_indices.LockedBuffer(),
Dim2{static_cast<size_t>(local_indices.LDim()), 1},
local_output_grad.LockedBuffer(),
Dim3{local_mini_batch_size, num_output_rows, output_size},
Dim3{static_cast<size_t>(local_output_grad.LDim()), output_stride_2, 1},
local_values_grad.Buffer(),
Dim3{local_mini_batch_size, num_rows, values_size},
Dim3{static_cast<size_t>(local_values_grad.LDim()), value_stride_2, 1});
}else{
hydrogen::gpu::LaunchKernel(
gather3d_kernel<TensorDataType, false>,
grid_dims, block_dims, 0, multisync,
local_indices.LockedBuffer(),
Dim2{static_cast<size_t>(local_indices.LDim()), 1},
local_output_grad.LockedBuffer(),
Dim3{local_mini_batch_size, num_output_rows, output_size},
Dim3{static_cast<size_t>(local_output_grad.LDim()), output_stride_2, 1},
local_values_grad.Buffer(),
Dim3{local_mini_batch_size, num_rows, values_size},
Dim3{static_cast<size_t>(local_values_grad.LDim()), value_stride_2, 1});
}
}
}
#define PROTO(T) \
template class scatter_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann | the_stack |
#include "_reg_common_gpu.h"
#include "_reg_tools_gpu.h"
#include "_reg_tools_kernels.cu"
/* *************************************************************** */
/* *************************************************************** */
void reg_voxelCentric2NodeCentric_gpu(nifti_image *targetImage,
nifti_image *controlPointImage,
float4 **voxelNMIGradientArray_d,
float4 **nodeNMIGradientArray_d,
float weight)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const int nodeNumber = controlPointImage->nx * controlPointImage->ny * controlPointImage->nz;
const int voxelNumber = targetImage->nx * targetImage->ny * targetImage->nz;
const int3 targetImageDim = make_int3(targetImage->nx, targetImage->ny, targetImage->nz);
const int3 gridSize = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
float3 voxelNodeRatio_h = make_float3(
controlPointImage->dx / targetImage->dx,
controlPointImage->dy / targetImage->dy,
controlPointImage->dz / targetImage->dz);
// Ensure that Z=0 if 2D images
if(gridSize.z==1) voxelNodeRatio_h.z=0;
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_NodeNumber,&nodeNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_TargetImageDim,&targetImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&gridSize,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNodeRatio,&voxelNodeRatio_h,sizeof(float3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Weight,&weight,sizeof(float)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0, gradientImageTexture, *voxelNMIGradientArray_d, voxelNumber*sizeof(float4)))
const unsigned int Grid_reg_voxelCentric2NodeCentric = (unsigned int)ceil(sqrtf((float)nodeNumber/(float)NR_BLOCK->Block_reg_voxelCentric2NodeCentric));
dim3 B1(NR_BLOCK->Block_reg_voxelCentric2NodeCentric,1,1);
dim3 G1(Grid_reg_voxelCentric2NodeCentric,Grid_reg_voxelCentric2NodeCentric,1);
reg_voxelCentric2NodeCentric_kernel <<< G1, B1 >>> (*nodeNMIGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
NR_CUDA_SAFE_CALL(cudaUnbindTexture(gradientImageTexture))
}
/* *************************************************************** */
/* *************************************************************** */
void reg_convertNMIGradientFromVoxelToRealSpace_gpu( mat44 *sourceMatrix_xyz,
nifti_image *controlPointImage,
float4 **nodeNMIGradientArray_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const int nodeNumber = controlPointImage->nx * controlPointImage->ny * controlPointImage->nz;
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_NodeNumber,&nodeNumber,sizeof(int)))
float4 *matrix_h;NR_CUDA_SAFE_CALL(cudaMallocHost(&matrix_h, 3*sizeof(float4)))
matrix_h[0] = make_float4(sourceMatrix_xyz->m[0][0], sourceMatrix_xyz->m[0][1], sourceMatrix_xyz->m[0][2], sourceMatrix_xyz->m[0][3]);
matrix_h[1] = make_float4(sourceMatrix_xyz->m[1][0], sourceMatrix_xyz->m[1][1], sourceMatrix_xyz->m[1][2], sourceMatrix_xyz->m[1][3]);
matrix_h[2] = make_float4(sourceMatrix_xyz->m[2][0], sourceMatrix_xyz->m[2][1], sourceMatrix_xyz->m[2][2], sourceMatrix_xyz->m[2][3]);
float4 *matrix_d;
NR_CUDA_SAFE_CALL(cudaMalloc(&matrix_d, 3*sizeof(float4)))
NR_CUDA_SAFE_CALL(cudaMemcpy(matrix_d, matrix_h, 3*sizeof(float4), cudaMemcpyHostToDevice))
NR_CUDA_SAFE_CALL(cudaFreeHost((void *)matrix_h))
NR_CUDA_SAFE_CALL(cudaBindTexture(0, matrixTexture, matrix_d, 3*sizeof(float4)))
const unsigned int Grid_reg_convertNMIGradientFromVoxelToRealSpace =
(unsigned int)ceil(sqrtf((float)nodeNumber/(float)NR_BLOCK->Block_reg_convertNMIGradientFromVoxelToRealSpace));
dim3 G1(Grid_reg_convertNMIGradientFromVoxelToRealSpace,Grid_reg_convertNMIGradientFromVoxelToRealSpace,1);
dim3 B1(NR_BLOCK->Block_reg_convertNMIGradientFromVoxelToRealSpace,1,1);
_reg_convertNMIGradientFromVoxelToRealSpace_kernel <<< G1, B1 >>> (*nodeNMIGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
NR_CUDA_SAFE_CALL(cudaUnbindTexture(matrixTexture))
NR_CUDA_SAFE_CALL(cudaFree(matrix_d))
}
/* *************************************************************** */
/* *************************************************************** */
void reg_gaussianSmoothing_gpu( nifti_image *image,
float4 **imageArray_d,
float sigma,
bool smoothXYZ[8])
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const unsigned int voxelNumber = image->nx * image->ny * image->nz;
const int3 imageDim = make_int3(image->nx, image->ny, image->nz);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ImageDim, &imageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber, &voxelNumber,sizeof(int3)))
bool axisToSmooth[8];
if(smoothXYZ==NULL){
for(int i=0; i<8; i++) axisToSmooth[i]=true;
}
else{
for(int i=0; i<8; i++) axisToSmooth[i]=smoothXYZ[i];
}
for(int n=1; n<4; n++){
if(axisToSmooth[n]==true && image->dim[n]>1){
float currentSigma;
if(sigma>0) currentSigma=sigma/image->pixdim[n];
else currentSigma=fabs(sigma); // voxel based if negative value
int radius=(int)ceil(currentSigma*3.0f);
if(radius>0){
int kernelSize = 1+radius*2;
float *kernel_h;
NR_CUDA_SAFE_CALL(cudaMallocHost(&kernel_h, kernelSize*sizeof(float)))
float kernelSum=0;
for(int i=-radius; i<=radius; i++){
kernel_h[radius+i]=(float)(exp( -((float)i*(float)i)/(2.0*currentSigma*currentSigma)) /
(currentSigma*2.506628274631));
// 2.506... = sqrt(2*pi)
kernelSum += kernel_h[radius+i];
}
for(int i=0; i<kernelSize; i++)
kernel_h[i] /= kernelSum;
float *kernel_d;
NR_CUDA_SAFE_CALL(cudaMalloc(&kernel_d, kernelSize*sizeof(float)))
NR_CUDA_SAFE_CALL(cudaMemcpy(kernel_d, kernel_h, kernelSize*sizeof(float), cudaMemcpyHostToDevice))
NR_CUDA_SAFE_CALL(cudaFreeHost(kernel_h))
float4 *smoothedImage;
NR_CUDA_SAFE_CALL(cudaMalloc(&smoothedImage,voxelNumber*sizeof(float4)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0, convolutionKernelTexture, kernel_d, kernelSize*sizeof(float)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0, gradientImageTexture, *imageArray_d, voxelNumber*sizeof(float4)))
unsigned int Grid_reg_ApplyConvolutionWindow;
dim3 B,G;
switch(n){
case 1:
Grid_reg_ApplyConvolutionWindow =
(unsigned int)ceil(sqrtf((float)voxelNumber/(float)NR_BLOCK->Block_reg_ApplyConvolutionWindowAlongX));
B=dim3(NR_BLOCK->Block_reg_ApplyConvolutionWindowAlongX,1,1);
G=dim3(Grid_reg_ApplyConvolutionWindow,Grid_reg_ApplyConvolutionWindow,1);
_reg_ApplyConvolutionWindowAlongX_kernel <<< G, B >>> (smoothedImage, kernelSize);
NR_CUDA_CHECK_KERNEL(G,B)
break;
case 2:
Grid_reg_ApplyConvolutionWindow =
(unsigned int)ceil(sqrtf((float)voxelNumber/(float)NR_BLOCK->Block_reg_ApplyConvolutionWindowAlongY));
B=dim3(NR_BLOCK->Block_reg_ApplyConvolutionWindowAlongY,1,1);
G=dim3(Grid_reg_ApplyConvolutionWindow,Grid_reg_ApplyConvolutionWindow,1);
_reg_ApplyConvolutionWindowAlongY_kernel <<< G, B >>> (smoothedImage, kernelSize);
NR_CUDA_CHECK_KERNEL(G,B)
break;
case 3:
Grid_reg_ApplyConvolutionWindow =
(unsigned int)ceil(sqrtf((float)voxelNumber/(float)NR_BLOCK->Block_reg_ApplyConvolutionWindowAlongZ));
B=dim3(NR_BLOCK->Block_reg_ApplyConvolutionWindowAlongZ,1,1);
G=dim3(Grid_reg_ApplyConvolutionWindow,Grid_reg_ApplyConvolutionWindow,1);
_reg_ApplyConvolutionWindowAlongZ_kernel <<< G, B >>> (smoothedImage, kernelSize);
NR_CUDA_CHECK_KERNEL(G,B)
break;
}
NR_CUDA_SAFE_CALL(cudaUnbindTexture(convolutionKernelTexture))
NR_CUDA_SAFE_CALL(cudaUnbindTexture(gradientImageTexture))
NR_CUDA_SAFE_CALL(cudaFree(kernel_d))
NR_CUDA_SAFE_CALL(cudaMemcpy(*imageArray_d, smoothedImage, voxelNumber*sizeof(float4), cudaMemcpyDeviceToDevice))
NR_CUDA_SAFE_CALL(cudaFree(smoothedImage))
}
}
}
}
/* *************************************************************** */
void reg_smoothImageForCubicSpline_gpu( nifti_image *image,
float4 **imageArray_d,
float *spacingVoxel)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const int voxelNumber = image->nx * image->ny * image->nz;
const int3 imageDim = make_int3(image->nx, image->ny, image->nz);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ImageDim, &imageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber, &voxelNumber,sizeof(int)))
for(int n=0; n<3; n++){
if(spacingVoxel[n]>0 && image->dim[n+1]>1){
int radius = static_cast<int>(reg_ceil(2.0*spacingVoxel[n]));
int kernelSize = 1+radius*2;
float *kernel_h;
NR_CUDA_SAFE_CALL(cudaMallocHost(&kernel_h, kernelSize*sizeof(float)))
float coeffSum=0;
for(int it=-radius; it<=radius; it++){
float coeff = (float)(fabs((float)(float)it/(float)spacingVoxel[0]));
if(coeff<1.0) kernel_h[it+radius] = (float)(2.0/3.0 - coeff*coeff + 0.5*coeff*coeff*coeff);
else if (coeff<2.0) kernel_h[it+radius] = (float)(-(coeff-2.0)*(coeff-2.0)*(coeff-2.0)/6.0);
else kernel_h[it+radius]=0;
coeffSum += kernel_h[it+radius];
}
for(int it=0;it<kernelSize;it++) kernel_h[it] /= coeffSum;
float *kernel_d;
NR_CUDA_SAFE_CALL(cudaMalloc(&kernel_d, kernelSize*sizeof(float)))
NR_CUDA_SAFE_CALL(cudaMemcpy(kernel_d, kernel_h, kernelSize*sizeof(float), cudaMemcpyHostToDevice))
NR_CUDA_SAFE_CALL(cudaFreeHost(kernel_h))
NR_CUDA_SAFE_CALL(cudaBindTexture(0, convolutionKernelTexture, kernel_d, kernelSize*sizeof(float)))
float4 *smoothedImage_d;
NR_CUDA_SAFE_CALL(cudaMalloc(&smoothedImage_d,voxelNumber*sizeof(float4)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0, gradientImageTexture, *imageArray_d, voxelNumber*sizeof(float4)))
unsigned int Grid_reg_ApplyConvolutionWindow;
dim3 B,G;
switch(n){
case 0:
Grid_reg_ApplyConvolutionWindow =
(unsigned int)ceil(sqrtf((float)voxelNumber/(float)NR_BLOCK->Block_reg_ApplyConvolutionWindowAlongX));
B=dim3(NR_BLOCK->Block_reg_ApplyConvolutionWindowAlongX,1,1);
G=dim3(Grid_reg_ApplyConvolutionWindow,Grid_reg_ApplyConvolutionWindow,1);
_reg_ApplyConvolutionWindowAlongX_kernel <<< G, B >>> (smoothedImage_d, kernelSize);
NR_CUDA_CHECK_KERNEL(G,B)
break;
case 1:
Grid_reg_ApplyConvolutionWindow =
(unsigned int)ceil(sqrtf((float)voxelNumber/(float)NR_BLOCK->Block_reg_ApplyConvolutionWindowAlongY));
B=dim3(NR_BLOCK->Block_reg_ApplyConvolutionWindowAlongY,1,1);
G=dim3(Grid_reg_ApplyConvolutionWindow,Grid_reg_ApplyConvolutionWindow,1);
_reg_ApplyConvolutionWindowAlongY_kernel <<< G, B >>> (smoothedImage_d, kernelSize);
NR_CUDA_CHECK_KERNEL(G,B)
break;
case 2:
Grid_reg_ApplyConvolutionWindow =
(unsigned int)ceil(sqrtf((float)voxelNumber/(float)NR_BLOCK->Block_reg_ApplyConvolutionWindowAlongZ));
B=dim3(NR_BLOCK->Block_reg_ApplyConvolutionWindowAlongZ,1,1);
G=dim3(Grid_reg_ApplyConvolutionWindow,Grid_reg_ApplyConvolutionWindow,1);
_reg_ApplyConvolutionWindowAlongZ_kernel <<< G, B >>> (smoothedImage_d, kernelSize);
NR_CUDA_CHECK_KERNEL(G,B)
break;
}
NR_CUDA_SAFE_CALL(cudaUnbindTexture(convolutionKernelTexture))
NR_CUDA_SAFE_CALL(cudaUnbindTexture(gradientImageTexture))
NR_CUDA_SAFE_CALL(cudaFree(kernel_d))
NR_CUDA_SAFE_CALL(cudaMemcpy(*imageArray_d, smoothedImage_d, voxelNumber*sizeof(float4), cudaMemcpyDeviceToDevice))
NR_CUDA_SAFE_CALL(cudaFree(smoothedImage_d))
}
}
}
/* *************************************************************** */
void reg_multiplyValue_gpu(int num, float4 **array_d, float value)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&num,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Weight,&value,sizeof(float)))
const unsigned int Grid_reg_multiplyValues = (unsigned int)ceil(sqrtf((float)num/(float)NR_BLOCK->Block_reg_arithmetic));
dim3 G=dim3(Grid_reg_multiplyValues,Grid_reg_multiplyValues,1);
dim3 B=dim3(NR_BLOCK->Block_reg_arithmetic,1,1);
reg_multiplyValue_kernel_float4<<<G,B>>>(*array_d);
NR_CUDA_CHECK_KERNEL(G,B)
}
/* *************************************************************** */
void reg_addValue_gpu(int num, float4 **array_d, float value)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&num,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Weight,&value,sizeof(float)))
const unsigned int Grid_reg_addValues = (unsigned int)ceil(sqrtf((float)num/(float)NR_BLOCK->Block_reg_arithmetic));
dim3 G=dim3(Grid_reg_addValues,Grid_reg_addValues,1);
dim3 B=dim3(NR_BLOCK->Block_reg_arithmetic,1,1);
reg_addValue_kernel_float4<<<G,B>>>(*array_d);
NR_CUDA_CHECK_KERNEL(G,B)
}
/* *************************************************************** */
void reg_multiplyArrays_gpu(int num, float4 **array1_d, float4 **array2_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&num,sizeof(int)))
const unsigned int Grid_reg_multiplyArrays = (unsigned int)ceil(sqrtf((float)num/(float)NR_BLOCK->Block_reg_arithmetic));
dim3 G=dim3(Grid_reg_multiplyArrays,Grid_reg_multiplyArrays,1);
dim3 B=dim3(NR_BLOCK->Block_reg_arithmetic,1,1);
reg_multiplyArrays_kernel_float4<<<G,B>>>(*array1_d,*array2_d);
NR_CUDA_CHECK_KERNEL(G,B)
}
/* *************************************************************** */
void reg_addArrays_gpu(int num, float4 **array1_d, float4 **array2_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&num,sizeof(int)))
const unsigned int Grid_reg_addArrays = (unsigned int)ceil(sqrtf((float)num/(float)NR_BLOCK->Block_reg_arithmetic));
dim3 G=dim3(Grid_reg_addArrays,Grid_reg_addArrays,1);
dim3 B=dim3(NR_BLOCK->Block_reg_arithmetic,1,1);
reg_addArrays_kernel_float4<<<G,B>>>(*array1_d,*array2_d);
NR_CUDA_CHECK_KERNEL(G,B)
}
/* *************************************************************** */
void reg_fillMaskArray_gpu(int num, int **array1_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&num,sizeof(int)))
const unsigned int Grid_reg_fillMaskArray = (unsigned int)ceil(sqrtf((float)num/(float)NR_BLOCK->Block_reg_arithmetic));
dim3 G=dim3(Grid_reg_fillMaskArray,Grid_reg_fillMaskArray,1);
dim3 B=dim3(NR_BLOCK->Block_reg_arithmetic,1,1);
reg_fillMaskArray_kernel<<<G,B>>>(*array1_d);
NR_CUDA_CHECK_KERNEL(G,B)
}
/* *************************************************************** */
float reg_sumReduction_gpu(float *array_d,int size)
{
thrust::device_ptr<float> dptr(array_d);
return thrust::reduce(dptr,dptr+size, 0.f, thrust::plus<float>());
}
/* *************************************************************** */
float reg_maxReduction_gpu(float *array_d,int size)
{
thrust::device_ptr<float> dptr(array_d);
return thrust::reduce(dptr, dptr+size, 0.f, thrust::maximum<float>());
}
/* *************************************************************** */
float reg_minReduction_gpu(float *array_d,int size)
{
thrust::device_ptr<float> dptr(array_d);
return thrust::reduce(dptr, dptr+size, 0.f, thrust::minimum<float>());
}
/* *************************************************************** */
#endif | the_stack |
template <class Matrix, class Vector>
void MIS_Aggregator<Matrix, Vector>::computePermutation(TetMesh* meshPtr, IdxVector_h &permutation, IdxVector_h &ipermutation, IdxVector_h &aggregateIdx, IdxVector_h &partitionIdx, int* partitionlabel, int* nnout, int* &xadjout, int* &adjncyout, int metissize)
{
// Getting the neighbors for the mesh
meshPtr->need_neighbors();
// Vertex count:
int nn = meshPtr->vertices.size();
// Counting up edges for adjacency:
int edgeCount = 0;
for (int vIt = 0; vIt < nn; vIt++)
{
edgeCount += meshPtr->neighbors[vIt].size();
}
//Allocating storage for array values of adjacency
int* xadj = new int[nn+1];
int* adjncy = new int[edgeCount];
// filling the arrays:
xadj[0] = 0;
int idx = 0;
// Populating the arrays:
for(int i = 1; i < nn + 1; i++)
{
xadj[i] = xadj[i-1] + meshPtr->neighbors[i-1].size();
for(int j =0; j < meshPtr->neighbors[i-1].size(); j++)
{
adjncy[idx++] = meshPtr->neighbors[i-1][j];
}
}
// Calling the other override to finish:
computePermutation(nn, xadj, adjncy, permutation, ipermutation, aggregateIdx, partitionIdx, partitionlabel, nnout, xadjout, adjncyout, metissize);
// Freeing up memories:
delete [] xadj;
delete [] adjncy;
}
template <class Matrix, class Vector>
void MIS_Aggregator<Matrix, Vector>::computePermutation(TriMesh* meshPtr, IdxVector_h &permutation, IdxVector_h &ipermutation, IdxVector_h &aggregateIdx, IdxVector_h &partitionIdx,int* partitionlabel, int* nnout, int* &xadjout, int* &adjncyout, int metissize)
{
// Getting the neighbors for the mesh
meshPtr->need_neighbors();
// Vertex count:
int nn = meshPtr->vertices.size();
// Counting up edges for adjacency:
int edgeCount = 0;
for (int vIt = 0; vIt < nn; vIt++)
{
edgeCount += meshPtr->neighbors[vIt].size();
}
//Allocating storage for array values of adjacency
int* xadj = new int[nn+1];
int* adjncy = new int[edgeCount];
// filling the arrays:
xadj[0] = 0;
int idx = 0;
// Populating the arrays:
for(int i = 1; i < nn + 1; i++)
{
xadj[i] = xadj[i-1] + meshPtr->neighbors[i-1].size();
for(int j =0; j < meshPtr->neighbors[i-1].size(); j++)
{
adjncy[idx++] = meshPtr->neighbors[i-1][j];
}
}
// Calling the other override to finish:
computePermutation(nn, xadj, adjncy, permutation, ipermutation, aggregateIdx, partitionIdx, partitionlabel, nnout, xadjout, adjncyout, metissize);
// Freeing up memories:
delete [] xadj;
delete [] adjncy;
}
template <class Matrix, class Vector>
void MIS_Aggregator<Matrix, Vector>::computePermutation(int nn, int* xadj, int* adjncy, IdxVector_h &permutation, IdxVector_h &ipermutation, IdxVector_h &aggregateIdx, IdxVector_h &partitionIdx, int* partitionlabel, int* nnout, int* &xadjout, int* &adjncyout, int metissize)
{
//Get block aggregation
int nparts, edgecut;
int *npart = (int*)malloc(nn * sizeof(int));
nparts = (nn / metissize);
if (nparts < 2)
nparts = 2;
int options[10], pnumflag=0, wgtflag=0;
for(int i=0; i<10; i++)
options[i] = 0;
METIS_PartGraphKway(&nn, xadj, adjncy, NULL, NULL, &wgtflag, &pnumflag, &nparts, options, &edgecut, npart);
// Finding partitions that have vertices assigned:
std::vector<int> realParts;
realParts.resize(nn);
for (int i=0; i<nn; i++)
{
realParts[i] = npart[i];
}
std::sort(realParts.begin(), realParts.end());
// Scanning for gaps in the std::sorted array
std::vector<int> empties;
if (realParts[0] > 0)
for (int i = 0; i < realParts[0]; i++)
empties.push_back(i);
for (int i = 1; i < nn; i++)
{
if (realParts[i] != realParts[i-1])
{
if (realParts[i] > realParts[i-1] + 1)
{
for (int j = realParts[i] + 1; j < realParts[i]; j++)
empties.push_back(j);
}
}
}
// Re-numbering the npart array to close the gaps
for (int i = 0; i < empties.size(); i++)
{
for (int j = 0; j < nn; j++)
{
if(npart[j] > empties[i])
npart[j]--;
}
for (int j = i; j < empties.size(); j++)
{
empties[j]--;
}
}
// Getting the actual partition count:
int partCount = *(realParts.end() - 1) - empties.size() + 1;
//Building a structure of sub-graphs to aggregate:
std::vector< std::vector<int> > blocks;
blocks.resize(partCount);
for (int i = 0; i < nn; i++)
blocks[npart[i]].push_back(i);
// Creating the sub graphs for each block
// subgraphs[n][0] = pointer to xadj, [1] = pointer to adjncy [2]= pointer to npart [3]= number of aggregates
int aggregateCount = 0;
std::vector< std::vector<int *> > subGraphs(partCount);
for (int bIt = 0; bIt < blocks.size(); bIt++)
{
// Resizing to hold all the pointers
subGraphs[bIt].resize(4);
// Storing counts for array sizing
int adjacencySize = 0;
// Temporary vector to hold adjacency;
std::vector< std::vector<int> > adjacency(blocks[bIt].size());
// For every vertex add it's in-block neighbors to the adjacency list:
for (int vIt = 0; vIt < blocks[bIt].size(); vIt++)
{
int start = xadj[blocks[bIt][vIt]];
int end = xadj[blocks[bIt][vIt] + 1];
for (int nIt = start; nIt < end; nIt++)
{
// Checking if the neighbor is within block:
int neighbor = adjncy[nIt];
if (npart[neighbor] == bIt)
{
int localNeighbor = -1;
// Find the local index of the neighbor:
for (int i = 0; i < blocks[bIt].size(); i++)
{
if (blocks[bIt][i] == neighbor)
localNeighbor = i;
}
adjacency[vIt].push_back(localNeighbor);
adjacencySize++;
}
}
}
// Now allocate the arrays:
// The xadj array
subGraphs[bIt][0] = (int *)malloc((blocks[bIt].size() + 1) * sizeof(int));
// The adjncy array
subGraphs[bIt][1] = (int *)malloc((adjacencySize) * sizeof(int));
// The npart array
subGraphs[bIt][2] = (int *)malloc((blocks[bIt].size()) * sizeof(int));
// The number of aggregates
subGraphs[bIt][3] = (int *)malloc(sizeof(int));
// Populating the arrays from the adjacency vector:
subGraphs[bIt][0][0] = 0;
int idx = 0;
// Populating the matrices:
for(int i = 1; i < blocks[bIt].size() + 1; i++)
{
subGraphs[bIt][0][i] = subGraphs[bIt][0][i-1] + adjacency[i-1].size();
for(int j =0; j < adjacency[i-1].size(); j++)
{
subGraphs[bIt][1][idx++] = adjacency[i-1][j];
}
}
// Checking if the block's subgraph is connected:
std::queue<int> toCheck;
std::vector<int> visited(blocks[bIt].size());
for (int i=0; i < blocks[bIt].size(); i++)
visited[i] = -1;
int nextRoot = 0;
int componentID = 0;
bool connected = true;
bool completed = false;
while (!completed)
{
toCheck.push(nextRoot);
visited[nextRoot] = componentID;
while (!toCheck.empty()){
int currentV = toCheck.front();
toCheck.pop();
for (int nIt = subGraphs[bIt][0][currentV]; nIt < subGraphs[bIt][0][currentV + 1]; nIt++)
{
if(visited[subGraphs[bIt][1][nIt]] == -1)
{
visited[subGraphs[bIt][1][nIt]] = componentID;
toCheck.push(subGraphs[bIt][1][nIt]);
}
}
}
completed = true;
for (int i = 0; i < blocks[bIt].size(); i++)
if(visited[i] < 0)
{
connected = false;
componentID++;
nextRoot = i;
completed = false;
break;
}
}
if (!connected)
{
std::cout << "Block: " << bIt << " is an unconnected graph:\n";
for (int i = 0; i < blocks[bIt].size(); i++)
std::cout << visited[i] << ", ";
std::cout << "\n";
}
// Calling the mis_subroutine to partition
aggregateGraphMIS(blocks[bIt].size(), subGraphs[bIt][0], subGraphs[bIt][1], subGraphs[bIt][2], subGraphs[bIt][3]);
aggregateCount += subGraphs[bIt][3][0];
}
// Running a sanity check on the partitionings:
for (int bIt=0; bIt < blocks.size(); bIt++)
{
for (int vIt=0; vIt < blocks[bIt].size(); vIt++)
{
if (subGraphs[bIt][2][vIt] < 0)
std::cout << "There is a problem with block: " << bIt << " of " << blocks.size() << " vertex: " << vIt << " in partition: " << subGraphs[bIt][2][vIt] << "?\n";
}
}
// Now that every block has been aggregated generate the permutation matrices
aggregateIdx.resize(aggregateCount + 1);
partitionIdx.resize(blocks.size() + 1);
*nnout = aggregateCount;
//int aggregatelabel[nn];
int* aggregatelabel = new int[nn];
int currentPosition = 0;
int aggregatePosition = 0;
for (int bIt = 0; bIt < blocks.size(); bIt++)
{
partitionIdx[bIt] = aggregatePosition;
for (int aIt = 0; aIt < subGraphs[bIt][3][0]; aIt++)
{
aggregateIdx[aggregatePosition] = currentPosition;
// Find every vertex in the aggregate
for (int i = 0; i < blocks[bIt].size(); i++)
{
if (subGraphs[bIt][2][i] == aIt)
{
int globalVertex = blocks[bIt][i];
permutation[globalVertex] = currentPosition;
ipermutation[currentPosition] = globalVertex;
partitionlabel[globalVertex] = bIt;
aggregatelabel[globalVertex] = aggregatePosition;
currentPosition++;
}
}
aggregatePosition++;
}
}
aggregateIdx[aggregateCount] = nn;
partitionIdx[blocks.size()] = aggregateCount;
// Finding the adjacency for the graph of aggregates:
std::vector< std::vector <int> > aggregateAdjacency(aggregateCount);
int edgeCount = 0;
for (int aIt = 0; aIt < aggregateCount; aIt++)
{
std::set<int> partEdges;
int begin = aggregateIdx[aIt];
int end = aggregateIdx[aIt + 1];
for (int vIt = begin; vIt < end; vIt++)
{
// Getting the original id of the vertex
int originalID = ipermutation[vIt];
// Examining all neighbors of the vertex
for (int nIt = xadj[originalID]; nIt < xadj[originalID + 1]; nIt++)
{
if (aggregatelabel[ adjncy[nIt] ] != aggregatelabel[originalID])
partEdges.insert(aggregatelabel[ adjncy[nIt] ]);
}
}
for ( std::set<int>::iterator i=partEdges.begin(); i != partEdges.end(); i++)
{
aggregateAdjacency[aIt].push_back(*i);
edgeCount++;
}
}
// Allocate storage for the xadjout and adjncyout arrays:
xadjout = (int*)malloc((aggregateCount + 1) * sizeof(int));
adjncyout = (int*)malloc((edgeCount) * sizeof(int));
// Populate the outgoing arrays
xadjout[0] = 0;
int idx = 0;
// Populating the matrices:
for(int aIt = 1; aIt < aggregateCount + 1; aIt++)
{
xadjout[aIt] = xadjout[aIt-1] + aggregateAdjacency[aIt-1].size();
for(int nIt = 0; nIt < aggregateAdjacency[aIt-1].size(); nIt ++)
{
adjncyout[idx++] = aggregateAdjacency[aIt-1][nIt];
}
}
// Freeing up memory:
for (int i = 0; i < subGraphs.size(); i++)
for (int j = 0; j < 4; j++)
free(subGraphs[i][j]);
delete [] aggregatelabel;
// And Done.
return;
}
template <class Matrix, class Vector>
void MIS_Aggregator<Matrix, Vector>::aggregateGraphMIS(int n, int *adjIndexes, int *adjacency, int *partition, int *partCount)
{
// Creating a graph with edges for every 2-path in original:
std::vector< std::vector<int> > inducedAdj(n);
// Every Vertex
for (int i=0; i<n; i++) {
// All neighbors
for (int j=adjIndexes[i]; j<adjIndexes[i+1]; j++) {
// All neighbors of neighbors
int neighbor = adjacency[j];
for (int jj = adjIndexes[neighbor]; jj < adjIndexes[neighbor +1]; jj++) {
// Checking if this vertex is the original or a distance one
int vertex = adjacency[jj];
bool tooClose = false;
if (vertex != i) {
// Checking against distance one
for (int ii = adjIndexes[i]; ii < adjIndexes[i+1]; ii++) {
if (adjacency[ii] == vertex) {
tooClose = true;
break;
}
}
}
else {
tooClose = true;
}
// If vertex is two away and not 1 or 0 then add to adjacency:
if (!tooClose) {
inducedAdj[i].push_back(vertex);
}
}
}
}
// Clearing partitions:
for (int i = 0; i < n; i++) {
partition[i] = -1;
}
// Picking a better maximal independent set:
std::vector<int> mis(n, -1);
std::vector<int> rootDistance(n, -1);
bool incomplete = true;
int nextVertex = 0;
int curPart = 0;
do {
while (incomplete) {
incomplete = false;
mis[nextVertex] = 1;
rootDistance[nextVertex] = 0;
partition[nextVertex] = curPart;
// Marking adjacent(squared) nodes as not in the mis:
for (int i = 0; i < inducedAdj[nextVertex].size(); i++) {
mis[inducedAdj[nextVertex][i]] = 0;
}
// Marking adjacent nodes as in the same partition:
for (int i = adjIndexes[nextVertex]; i < adjIndexes[nextVertex + 1]; i++) {
partition[ adjacency[i] ] = curPart;
rootDistance[ adjacency[i] ] = 1;
}
curPart++;
// Getting a list of potential next nodes:
std::vector<int> potentialNodes;
for (int i = 0; i < n; i++) {
// For every node known to be outside MIS:
if (mis[i] == 0) {
for (int j = adjIndexes[i]; j < adjIndexes[i+1]; j++) {
// If a neighbor of an outsider has not been treated add it:
if (mis[ adjacency[j] ] == -1) {
potentialNodes.push_back(adjacency[j]);
}
}
}
}
// If there are potential nodes find the best one:
if (potentialNodes.size() > 0)
{
incomplete = true;
std::sort(potentialNodes.begin(), potentialNodes.end());
int occurs = 0;
int maxOccur = 0;
int curNode = potentialNodes[0];
nextVertex = curNode;
for (int i = 0; i < potentialNodes.size(); i++)
{
if (potentialNodes[i] == curNode)
{
occurs++;
}
else
{
// If this node has the most occurences seen, set it as next
if (maxOccur < occurs)
{
nextVertex = curNode;
maxOccur = occurs;
}
// Reset the counters
occurs = 1;
curNode = potentialNodes[i];
}
}
if (maxOccur < occurs)
{
nextVertex = curNode;
}
}
}
// Setting the partCount:
*partCount = curPart;
// Adding unpartitioned nodes to best partition for them:
for (int i = 0; i < n; i++) {
if (partition[i] == -1) {
int adjSize = adjIndexes[i + 1] - adjIndexes[i];
int * adjParts = new int[ adjSize ];
int * adjRootDist = new int[ adjSize ];
// Getting adjacent partitions:
for (int j = 0; j < adjSize; j++) {
int adjacentNodePart = partition[ adjacency[adjIndexes[i] + j] ];
adjParts[j] = adjacentNodePart;
// Getting the distance of the adjacent node to the root of its partition:
if (adjacentNodePart == -1) {
adjRootDist[j] = 1000;
}
else {
adjRootDist[j] = rootDistance[ adjacency[adjIndexes[i] + j] ];
}
}
// Finding the smallest partition distance:
int smallestDistance = adjRootDist[0];
for (int j = 0; j<adjSize; j++) {
if (smallestDistance > adjRootDist[j]) {
smallestDistance = adjRootDist[j];
}
}
// Finding most adjacent partition:
int addToPart = -1;
int adjCount = 0;
for (int j = 0; j < adjSize; j++) {
if (adjParts[j] > -1 && adjRootDist[j] == smallestDistance) {
int curCount = 1;
int curPart = adjParts[j];
for (int jj = j + 1; jj < adjSize; jj++) {
if (adjParts[jj] == adjParts[j]) {
curCount++;
}
}
if (curCount > adjCount) {
adjCount = curCount;
addToPart = curPart;
}
}
}
// Adding the node to best part found:
partition[i] = addToPart;
rootDistance[i] = smallestDistance + 1;
delete adjParts;
delete adjRootDist;
}
}
// If there are unassigned nodes set the first one as a new root
// (This should only happen if there were non-connected graphs supplied)
nextVertex = -1;
for (int vIt = 0; vIt < n; vIt++)
{
if (partition[vIt] == -1){
nextVertex = vIt;
incomplete = true;
break;
}
}
}while (nextVertex != -1);
}
template <class Matrix, class Vector>
void MIS_Aggregator<Matrix, Vector>::computePermutation_d(IdxVector_d &adjIndexesIn,
IdxVector_d &adjacencyIn, IdxVector_d &permutation, IdxVector_d &ipermutation,
IdxVector_d &aggregateIdx, IdxVector_d &partitionIdx, IdxVector_d &partitionLabel,
IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut, int aggregation_type,
int parameters, int part_max_size, bool verbose)
{
}
template <class Matrix, class Vector>
void MIS_Aggregator<Matrix, Vector>::computePermutation_d(TriMesh *meshPtr,
IdxVector_d &permutation, IdxVector_d &ipermutation, IdxVector_d &aggregateIdx,
IdxVector_d &partitionIdx, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut,
int aggregation_type, int parameters, int part_max_size, bool verbose)
{
}
template <class Matrix, class Vector>
void MIS_Aggregator<Matrix, Vector>::computePermutation_d(TetMesh *meshPtr,
IdxVector_d &permutation, IdxVector_d &ipermutation, IdxVector_d &aggregateIdx,
IdxVector_d &partitionIdx, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut,
IdxVector_d &adjacencyOut, int aggregation_type, int parameters, int part_max_size, bool verbose)
{
}
/****************************************
* Explict instantiations
***************************************/
template class MIS_Aggregator<Matrix_h, Vector_h>;
template class MIS_Aggregator<Matrix_d, Vector_d>; | the_stack |
#include <gunrock/app/pr/pr_app.cu>
#include <gunrock/app/test_base.cuh>
using namespace gunrock;
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(parameters.Use<int>(
"num-elements",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
1024 * 1024 * 100, "number of elements per GPU to test on", __FILE__,
__LINE__));
GUARD_CU(parameters.Use<int>(
"for-size",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
3276800, "number of operations to perform per repeat", __FILE__,
__LINE__));
GUARD_CU(parameters.Use<int>(
"num-repeats",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
100, "number of times to repeat the operations", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"device",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, 0,
"the devices to run on", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"rand-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"rand seed to generate random numbers; default is time(NULL)", __FILE__,
__LINE__));
GUARD_CU(parameters.Use<std::string>(
"access-type",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"All", "Memory access type, <Random | Regular | All>", __FILE__,
__LINE__));
GUARD_CU(parameters.Use<std::string>(
"operation",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
"All", "Operations to test, <Read | Write | Update | All>", __FILE__,
__LINE__));
GUARD_CU(parameters.Use<std::string>(
"bandwidth-latency",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
"All", "Test type, <Bandwidth | Latency | All>", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"num-runs",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
2, "how many times to repeat the testing", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"use-UVM",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
false, "Whether to include UVM test", __FILE__, __LINE__));
return retval;
}
using BWLFlag = uint32_t;
enum : BWLFlag {
OPERATION_BASE = 0x0F,
READ = 0x01,
WRITE = 0x02,
UPDATE = 0x04,
ACCESS_BASE = 0xF0,
RANDOM = 0x10,
REGULAR = 0x20,
BL_BASE = 0xF00,
BANDWIDTH = 0x100,
LATENCY = 0x200,
};
// Test routines
typedef std::mt19937 Engine;
typedef std::uniform_real_distribution<float> Distribution;
template <typename GraphT, typename ArrayT, typename ArrayT2>
cudaError_t Test_BWL(
util::Parameters ¶meters, GraphT &graph,
util::Array1D<typename GraphT::SizeT, typename GraphT::VertexT>
*gpu_elements,
util::Array1D<typename GraphT::SizeT, typename GraphT::VertexT>
*gpu_results,
ArrayT &host_elements, ArrayT &host_results, ArrayT2 &all_elements,
ArrayT2 &all_results, cudaStream_t *gpu_streams, int **peer_accessables,
float **timings, cudaError_t *retvals) {
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
cudaError_t retval = cudaSuccess;
auto devices = parameters.template Get<std::vector<int>>("device");
int num_devices = devices.size();
BWLFlag operation_flag, access_flag, bl_flag;
std::string operation_str = parameters.template Get<std::string>("operation");
std::string access_str = parameters.template Get<std::string>("access-type");
std::string bl_str =
parameters.template Get<std::string>("bandwidth-latency");
bool use_UVM = parameters.template Get<bool>("use-UVM");
uint32_t num_elements = parameters.template Get<uint32_t>("num-elements");
if (operation_str == "Read")
operation_flag = READ;
else if (operation_str == "Write")
operation_flag = WRITE;
else if (operation_str == "Update")
operation_flag = UPDATE;
if (access_str == "Random")
access_flag = RANDOM;
else if (access_str == "Regular")
access_flag = REGULAR;
if (bl_str == "Bandwidth")
bl_flag = BANDWIDTH;
else if (bl_str == "Latency")
bl_flag = LATENCY;
uint32_t for_size = parameters.template Get<uint32_t>("for-size");
uint32_t num_repeats = parameters.template Get<uint32_t>("num-repeats");
for (int peer_offset = 0; peer_offset <= num_devices + 1; peer_offset++) {
#pragma omp parallel num_threads(num_devices)
{
do {
int thread_num = omp_get_thread_num();
auto device_idx = devices[thread_num];
auto &retval = retvals[thread_num];
auto &stream = gpu_streams[thread_num];
int peer = (thread_num + peer_offset) % num_devices;
auto elements = gpu_elements[thread_num].GetPointer(util::DEVICE);
auto &results = gpu_results[thread_num];
auto peer_elements = gpu_elements[peer].GetPointer(util::DEVICE);
auto peer_results = gpu_results[peer].GetPointer(util::DEVICE);
auto &all_element = all_elements[thread_num];
auto &all_result = all_results[thread_num];
float elapsed = -1;
#pragma omp barrier
if (peer_offset >= num_devices)
peer = peer_offset;
else if (peer_accessables[thread_num][peer] == 0)
break;
if (peer_offset == num_devices && !use_UVM) break;
util::CpuTimer cpu_timer;
cpu_timer.Start();
if (peer_offset <= num_devices) {
if (operation_flag == READ) {
VertexT *sources =
((peer_offset == num_devices) ? (host_elements + 0)
: peer_elements);
retval = results.ForAll(
[elements, sources, num_elements, access_flag,
num_repeats] __host__ __device__(VertexT * result,
const SizeT &pos) {
for (int i = 0; i < num_repeats; i++) {
VertexT new_pos = pos + i * 65536;
new_pos = new_pos % num_elements;
if (access_flag == RANDOM) new_pos = elements[pos];
result[pos] = sources[new_pos];
}
},
(bl_flag == LATENCY) ? 1 : for_size, util::DEVICE, stream,
1280);
} else if (operation_flag == WRITE) {
VertexT *targets =
((peer_offset == num_devices) ? (host_results + 0)
: peer_results);
retval = results.ForAll(
[elements, targets, num_elements, access_flag,
num_repeats] __host__ __device__(VertexT * result,
const SizeT &pos) {
for (int i = 0; i < num_repeats; i++) {
VertexT new_pos = pos + i * 65536;
new_pos = new_pos % num_elements;
if (access_flag == RANDOM) new_pos = elements[pos];
targets[new_pos] = new_pos;
}
},
(bl_flag == LATENCY) ? 1 : for_size, util::DEVICE, stream,
1280);
} else if (operation_flag == UPDATE) {
VertexT *targets =
((peer_offset == num_devices) ? (host_results + 0)
: peer_results);
retval = results.ForAll(
[elements, targets, num_elements, access_flag,
num_repeats] __host__ __device__(VertexT * result,
const SizeT &pos) {
for (int i = 0; i < num_repeats; i++) {
VertexT new_pos = pos + i * 65536;
new_pos = new_pos % num_elements;
if (access_flag == RANDOM) new_pos = elements[pos];
targets[new_pos] += 1;
}
},
(bl_flag == LATENCY) ? 1 : for_size, util::DEVICE, stream,
1280);
}
} else { // All to all
if (operation_flag == READ) {
retval = results.ForAll(
[elements, num_elements, access_flag, num_repeats, all_element,
num_devices] __host__ __device__(VertexT * result,
const SizeT &pos) {
for (int i = 0; i < num_repeats; i++) {
VertexT new_pos = pos + i * 65536;
new_pos = new_pos % num_elements;
if (access_flag == RANDOM) new_pos = elements[pos];
result[pos] = all_element[new_pos % num_devices][new_pos];
}
},
(bl_flag == LATENCY) ? 1 : for_size, util::DEVICE, stream,
1280);
} else if (operation_flag == WRITE) {
retval = results.ForAll(
[elements, num_elements, all_result, num_devices, access_flag,
num_repeats] __host__ __device__(VertexT * result,
const SizeT &pos) {
for (int i = 0; i < num_repeats; i++) {
VertexT new_pos = pos + i * 65536;
new_pos = new_pos % num_elements;
if (access_flag == RANDOM) new_pos = elements[pos];
all_result[new_pos % num_devices][new_pos] = new_pos;
}
},
(bl_flag == LATENCY) ? 1 : for_size, util::DEVICE, stream,
1280);
} else if (operation_flag == UPDATE) {
retval = results.ForAll(
[elements, num_elements, all_result, num_devices, access_flag,
num_repeats] __host__ __device__(VertexT * result,
const SizeT &pos) {
for (int i = 0; i < num_repeats; i++) {
VertexT new_pos = pos + i * 65536;
new_pos = new_pos % num_elements;
if (access_flag == RANDOM) new_pos = elements[pos];
all_result[new_pos % num_devices][new_pos] += 1;
}
},
(bl_flag == LATENCY) ? 1 : for_size, util::DEVICE, stream,
1280);
}
}
if (retval) break;
retval =
util::GRError(cudaStreamSynchronize(stream),
"cudaStreamSynchronize failed", __FILE__, __LINE__);
cpu_timer.Stop();
elapsed = cpu_timer.ElapsedMillis();
timings[thread_num][peer] = elapsed;
} while (false);
}
}
for (int i = 0; i < num_devices; i++)
if (retvals[i]) return retvals[i];
std::string title = access_str + " " + operation_str + " " + bl_str;
if (bl_flag == BANDWIDTH)
title = title + " (GB/s)";
else if (bl_flag == LATENCY)
title = title + " (us)";
std::cout << title << std::endl;
for (int i = 0; i < num_devices; i++)
std::cout << (i == 0 ? "Peer" : "") << "\t" << devices[i];
std::cout << "\tHost\tAll2All" << std::endl;
for (int gpu = 0; gpu < num_devices; gpu++) {
std::cout << "GPU " << gpu;
for (int peer = 0; peer <= num_devices + 1; peer++) {
std::cout << "\t";
if (peer_accessables[gpu][peer] == 0 ||
(!use_UVM && peer == num_devices)) {
std::cout << "--";
continue;
}
auto elapsed = timings[gpu][peer];
if (bl_flag == BANDWIDTH) {
std::cout << 1.0 / 1024 / 1024 / 1024 * for_size * num_repeats *
sizeof(VertexT) / elapsed * 1000;
}
if (bl_flag == LATENCY) {
std::cout << elapsed / num_repeats * 1000;
}
}
std::cout << std::endl;
}
return retval;
}
/******************************************************************************
* Main
******************************************************************************/
/**
* @brief Enclosure to the main function
*/
struct main_struct {
/**
* @brief the actual main function, after type switching
* @tparam VertexT Type of vertex identifier
* @tparam SizeT Type of graph size, i.e. type of edge identifier
* @tparam ValueT Type of edge values
* @param parameters Command line parameters
* @param v,s,val Place holders for type deduction
* \return cudaError_t error message(s), if any
*/
template <typename VertexT, // Use int as the vertex identifier
typename SizeT, // Use int as the graph size type
typename ValueT> // Use int as the value type
cudaError_t
operator()(util::Parameters ¶meters, VertexT v, SizeT s, ValueT val) {
typedef typename app::TestGraph<VertexT, SizeT, ValueT, graph::HAS_COO>
GraphT;
cudaError_t retval = cudaSuccess;
GraphT graph;
std::vector<std::string> switches{"num-elements", "for-size",
"num-repeats", "access-type",
"operation", "bandwidth-latency"};
if (parameters.Get<std::string>("access-type") == "All")
parameters.Set("access-type", "Random,Regular");
if (parameters.Get<std::string>("operation") == "All")
parameters.Set("operation", "Read,Write,Update");
if (parameters.Get<std::string>("bandwidth-latency") == "All")
parameters.Set("bandwidth-latency", "Bandwidth,Latency");
auto num_elements =
parameters.template Get<std::vector<SizeT>>("num-elements");
SizeT max_elements = 0;
for (auto num_element : num_elements)
if (max_elements < num_element) max_elements = num_element;
auto for_sizes = parameters.template Get<std::vector<SizeT>>("for-size");
SizeT max_for_size = 0;
for (auto for_size : for_sizes)
if (max_for_size < for_size) max_for_size = for_size;
int rand_seed = parameters.template Get<int>("rand-seed");
auto devices = parameters.template Get<std::vector<int>>("device");
int num_devices = devices.size();
cudaError_t *retvals = new cudaError_t[num_devices];
util::Array1D<SizeT, VertexT> *gpu_elements =
new util::Array1D<SizeT, VertexT>[num_devices];
util::Array1D<SizeT, VertexT> *gpu_results =
new util::Array1D<SizeT, VertexT>[num_devices];
util::Array1D<SizeT, VertexT *> *all_elements =
new util::Array1D<SizeT, VertexT *>[num_devices];
util::Array1D<SizeT, VertexT *> *all_results =
new util::Array1D<SizeT, VertexT *>[num_devices];
cudaStream_t *gpu_streams = new cudaStream_t[num_devices];
if (!util::isValid(rand_seed)) rand_seed = time(NULL);
int **peer_accessables = new int *[num_devices + 1];
float **timings = new float *[num_devices + 1];
// util::Array1D<SizeT, VertexT, util::PINNED> host_elements;
// util::Array1D<SizeT, VertexT, util::PINNED> host_results;
// host_elements.SetName("host_elements");
// host_results .SetName("host_results");
// GUARD_CU(host_elements.Allocate(max_elements, util::HOST));
// GUARD_CU(host_results .Allocate(max_elements, util::HOST));
VertexT *host_elements = NULL;
VertexT *host_results = NULL;
GUARD_CU2(cudaMallocManaged((void **)(&host_elements),
(long long)max_elements * sizeof(VertexT)),
"cudaMallocHost failed");
GUARD_CU2(cudaMallocManaged((void **)(&host_results),
(long long)max_elements * sizeof(VertexT)),
"cudaMallocHost failed");
Engine engine_(rand_seed + 11 * num_devices);
Distribution distribution_(0.0, 1.0);
for (SizeT i = 0; i < max_elements; i++) {
host_elements[i] = distribution_(engine_) * max_elements;
if (host_elements[i] >= max_elements) host_elements[i] -= max_elements;
}
util::PrintMsg("num_devices = " + std::to_string(num_devices));
util::PrintMsg("rand-seed = " + std::to_string(rand_seed));
#pragma omp parallel num_threads(num_devices)
{
do {
int thread_num = omp_get_thread_num();
auto device_idx = devices[thread_num];
auto &retval = retvals[thread_num];
auto &elements = gpu_elements[thread_num];
auto &results = gpu_results[thread_num];
auto &stream = gpu_streams[thread_num];
auto &peer_accessable = peer_accessables[thread_num];
auto &timing = timings[thread_num];
peer_accessable = new int[num_devices + 10];
timing = new float[num_devices + 10];
for (int i = 0; i < num_devices + 10; i++) {
peer_accessable[i] = 1;
}
util::PrintMsg("using device[" + std::to_string(thread_num) + "] " +
std::to_string(device_idx));
retval = util::GRError(cudaSetDevice(device_idx),
"cudaSetDevice failed.", __FILE__, __LINE__);
if (retval) break;
retval = util::GRError(
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking),
"cudaStreamCreateWithFlags failed.", __FILE__, __LINE__);
if (retval) break;
if (thread_num == 0) {
retval = util::GRError(
cudaMemAdvise(host_elements + 0,
((long long)max_elements) * sizeof(VertexT),
cudaMemAdviseSetReadMostly, device_idx),
"cudaMemAdvise failed", __FILE__, __LINE__);
if (retval) break;
}
retval = util::GRError(
cudaMemAdvise(host_elements + 0,
(long long)max_elements * sizeof(VertexT),
cudaMemAdviseSetAccessedBy, device_idx),
"cudaMemAdvise failed", __FILE__, __LINE__);
if (retval) break;
retval = util::GRError(
cudaMemAdvise(host_results + 0,
(long long)max_elements * sizeof(VertexT),
cudaMemAdviseSetAccessedBy, device_idx),
"cudaMemAdvise failed", __FILE__, __LINE__);
if (retval) break;
for (int peer_offset = 1; peer_offset < num_devices; peer_offset++) {
int peer = devices[(thread_num + peer_offset) % num_devices];
int peer_access_avail = 0;
retval = util::GRError(
cudaDeviceCanAccessPeer(&peer_access_avail, device_idx, peer),
"cudaDeviceCanAccessPeer failed", __FILE__, __LINE__);
if (retval) break;
if (peer_access_avail) {
retval = util::GRError(cudaDeviceEnablePeerAccess(peer, 0),
"cudaDeviceEnablePeerAccess failed",
__FILE__, __LINE__);
if (retval) break;
} else {
peer_accessable[peer] = 0;
}
if (retval) break;
}
if (retval) break;
elements.SetName("elements[" + std::to_string(thread_num) + "]");
retval = elements.Allocate(max_elements, util::DEVICE | util::HOST);
if (retval) break;
results.SetName("results[" + std::to_string(thread_num) + "]");
retval =
results.Allocate(max(max_elements, max_for_size), util::DEVICE);
if (retval) break;
Engine engine(rand_seed + 11 * thread_num);
Distribution distribution(0.0, 1.0);
for (SizeT i = 0; i < max_elements; i++) {
elements[i] = distribution(engine) * max_elements;
if (elements[i] >= max_elements) elements[i] -= max_elements;
}
retval =
elements.Move(util::HOST, util::DEVICE, max_elements, 0, stream);
if (retval) break;
retval =
util::GRError(cudaStreamSynchronize(stream),
"cudaStreamSynchonorize failed", __FILE__, __LINE__);
if (retval) break;
} while (false);
}
for (int i = 0; i < num_devices; i++)
if (retvals[i]) return retvals[i];
#pragma omp parallel num_threads(num_devices)
{
do {
int thread_num = omp_get_thread_num();
auto device_idx = devices[thread_num];
auto &retval = retvals[thread_num];
retval = util::GRError(cudaSetDevice(device_idx),
"cudaSetDevice failed.", __FILE__, __LINE__);
if (retval) break;
auto &all_element = all_elements[thread_num];
auto &all_result = all_results[thread_num];
retval = all_element.Allocate(num_devices, util::HOST | util::DEVICE);
if (retval) break;
retval = all_result.Allocate(num_devices, util::HOST | util::DEVICE);
if (retval) break;
for (int i = 0; i < num_devices; i++) {
if (peer_accessables[thread_num][i] == 0) {
all_element[i] = gpu_elements[thread_num].GetPointer(util::DEVICE);
all_result[i] = gpu_results[thread_num].GetPointer(util::DEVICE);
continue;
}
all_element[i] = gpu_elements[i].GetPointer(util::DEVICE);
all_result[i] = gpu_results[i].GetPointer(util::DEVICE);
}
retval = all_element.Move(util::HOST, util::DEVICE);
if (retval) break;
retval = all_result.Move(util::HOST, util::DEVICE);
if (retval) break;
} while (false);
}
for (int i = 0; i < num_devices; i++)
if (retvals[i]) return retvals[i];
int num_runs = parameters.template Get<int>("num-runs");
for (int i = 0; i < num_runs; i++) {
GUARD_CU(app::Switch_Parameters(
parameters, graph, switches,
[devices, retvals, gpu_elements, gpu_results, host_elements,
host_results, gpu_streams, peer_accessables, timings, all_elements,
all_results](util::Parameters ¶meters, GraphT &graph) {
return Test_BWL(parameters, graph, gpu_elements, gpu_results,
host_elements, host_results, all_elements,
all_results, gpu_streams, peer_accessables, timings,
retvals);
}));
}
for (int d = 0; d < num_devices; d++) {
GUARD_CU2(cudaSetDevice(devices[d]), "cudaSetDevice failed");
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed");
GUARD_CU(gpu_elements[d].Release());
GUARD_CU(gpu_results[d].Release());
}
return retval;
}
};
int main(int argc, char **argv) {
cudaError_t retval = cudaSuccess;
util::Parameters parameters("test pr");
GUARD_CU(graphio::UseParameters(parameters));
GUARD_CU(app::UseParameters_test(parameters));
GUARD_CU(UseParameters(parameters));
GUARD_CU(parameters.Parse_CommandLine(argc, argv));
if (parameters.Get<bool>("help")) {
parameters.Print_Help();
return cudaSuccess;
}
GUARD_CU(parameters.Check_Required());
return app::Switch_Types<app::VERTEXT_U32B | app::VERTEXT_U64B |
app::SIZET_U32B | // app::SIZET_U64B |
app::VALUET_F32B | // app::VALUET_F64B |
app::DIRECTED | app::UNDIRECTED>(parameters,
main_struct());
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End: | the_stack |
#pragma once
#include <Cuda/Common/UtilsCuda.h>
#include <Cuda/Common/JacobianCuda.h>
#include <Cuda/Common/ReductionCuda.h>
#include <Cuda/Container/ArrayCudaDevice.cuh>
#include <Cuda/Container/Array2DCudaDevice.cuh>
#include "RegistrationCuda.h"
namespace open3d {
namespace cuda {
__global__
void ComputeColorGradientKernel(
RegistrationCudaDevice estimation,
CorrespondenceSetCudaDevice corres_for_gradient) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= corres_for_gradient.indices_.size()) return;
estimation.ComputePointwiseColorGradient(idx, corres_for_gradient);
}
void RegistrationCudaKernelCaller::ComputeColorGradient(
RegistrationCuda &estimation,
CorrespondenceSetCuda &corres_for_color_gradient) {
const dim3 blocks(
DIV_CEILING(estimation.target_.points_.size(), THREAD_1D_UNIT));
const dim3 threads(THREAD_1D_UNIT);
ComputeColorGradientKernel << < blocks, threads >> > (
*estimation.device_, *corres_for_color_gradient.device_);
CheckCuda(cudaDeviceSynchronize());
CheckCuda(cudaGetLastError());
}
__global__
void BuildLinearSystemForColoredICPKernel(
RegistrationCudaDevice estimation) {
__shared__ float local_sum0[THREAD_1D_UNIT];
__shared__ float local_sum1[THREAD_1D_UNIT];
__shared__ float local_sum2[THREAD_1D_UNIT];
/** Proper initialization **/
const int tid = threadIdx.x;
local_sum0[tid] = 0;
local_sum1[tid] = 0;
local_sum2[tid] = 0;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= estimation.correspondences_.indices_.size()) return;
int source_idx = estimation.correspondences_.indices_[idx];
int target_idx = estimation.correspondences_.matrix_(0, source_idx);
Vector6f jacobian_I, jacobian_G, Jtr;
float residual_I, residual_G;
HessianCuda<6> JtJ;
estimation.ComputePointwiseColoredJacobianAndResidual(
source_idx, target_idx, jacobian_I, jacobian_G, residual_I, residual_G);
ComputeJtJAndJtr(jacobian_I, jacobian_G, residual_I, residual_G, JtJ, Jtr);
/** Reduce Sum JtJ **/
for (size_t i = 0; i < 21; i += 3) {
local_sum0[tid] = JtJ(i + 0);
local_sum1[tid] = JtJ(i + 1);
local_sum2[tid] = JtJ(i + 2);
__syncthreads();
BlockReduceSum<float, THREAD_1D_UNIT>(tid, local_sum0, local_sum1, local_sum2);
if (tid == 0) {
atomicAdd(&estimation.results_.at(i + 0), local_sum0[0]);
atomicAdd(&estimation.results_.at(i + 1), local_sum1[0]);
atomicAdd(&estimation.results_.at(i + 2), local_sum2[0]);
}
__syncthreads();
}
/** Reduce Sum Jtr **/
const int OFFSET1 = 21;
for (size_t i = 0; i < 6; i += 3) {
local_sum0[tid] = Jtr(i + 0);
local_sum1[tid] = Jtr(i + 1);
local_sum2[tid] = Jtr(i + 2);
__syncthreads();
BlockReduceSum<float, THREAD_1D_UNIT>(tid, local_sum0, local_sum1, local_sum2);
if (tid == 0) {
atomicAdd(&estimation.results_.at(i + 0 + OFFSET1), local_sum0[0]);
atomicAdd(&estimation.results_.at(i + 1 + OFFSET1), local_sum1[0]);
atomicAdd(&estimation.results_.at(i + 2 + OFFSET1), local_sum2[0]);
}
__syncthreads();
}
/** Reduce Sum rmse **/
const int OFFSET2 = 27;
{
local_sum0[tid] = residual_I * residual_I + residual_G * residual_G;
__syncthreads();
BlockReduceSum<float, THREAD_1D_UNIT>(tid, local_sum0);
if (tid == 0) {
atomicAdd(&estimation.results_.at(0 + OFFSET2), local_sum0[0]);
}
__syncthreads();
}
}
void RegistrationCudaKernelCaller::BuildLinearSystemForColoredICP(
RegistrationCuda ®istration) {
const dim3 blocks(DIV_CEILING(registration.correspondences_.indices_.size(),
THREAD_1D_UNIT));
const dim3 threads(THREAD_1D_UNIT);
BuildLinearSystemForColoredICPKernel << < blocks, threads >>
> (
*registration.device_);
CheckCuda(cudaDeviceSynchronize());
CheckCuda(cudaGetLastError());
}
__global__
void BuildLinearSystemForPointToPlaneICPKernel(
RegistrationCudaDevice estimation) {
__shared__ float local_sum0[THREAD_1D_UNIT];
__shared__ float local_sum1[THREAD_1D_UNIT];
__shared__ float local_sum2[THREAD_1D_UNIT];
const int tid = threadIdx.x;
/** Proper initialization **/
local_sum0[tid] = 0;
local_sum1[tid] = 0;
local_sum2[tid] = 0;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= estimation.correspondences_.indices_.size()) return;
int source_idx = estimation.correspondences_.indices_[idx];
int target_idx = estimation.correspondences_.matrix_(0, source_idx);
Vector6f jacobian, Jtr;
float residual;
HessianCuda<6> JtJ;
estimation.ComputePointwisePointToPlaneJacobianAndResidual(
source_idx, target_idx, jacobian, residual);
ComputeJtJAndJtr(jacobian, residual, JtJ, Jtr);
/** Reduce Sum JtJ **/
#pragma unroll 1
for (size_t i = 0; i < 21; i += 3) {
local_sum0[tid] = JtJ(i + 0);
local_sum1[tid] = JtJ(i + 1);
local_sum2[tid] = JtJ(i + 2);
__syncthreads();
BlockReduceSum<float, THREAD_1D_UNIT>(tid, local_sum0, local_sum1, local_sum2);
if (tid == 0) {
atomicAdd(&estimation.results_.at(i + 0), local_sum0[0]);
atomicAdd(&estimation.results_.at(i + 1), local_sum1[0]);
atomicAdd(&estimation.results_.at(i + 2), local_sum2[0]);
}
__syncthreads();
}
/** Reduce Sum Jtr **/
const int OFFSET1 = 21;
#pragma unroll 1
for (size_t i = 0; i < 6; i += 3) {
local_sum0[tid] = Jtr(i + 0);
local_sum1[tid] = Jtr(i + 1);
local_sum2[tid] = Jtr(i + 2);
__syncthreads();
BlockReduceSum<float, THREAD_1D_UNIT>(tid, local_sum0, local_sum1, local_sum2);
if (tid == 0) {
atomicAdd(&estimation.results_.at(i + 0 + OFFSET1), local_sum0[0]);
atomicAdd(&estimation.results_.at(i + 1 + OFFSET1), local_sum1[0]);
atomicAdd(&estimation.results_.at(i + 2 + OFFSET1), local_sum2[0]);
}
__syncthreads();
}
/** Reduce Sum rmse **/
const int OFFSET2 = 27;
{
local_sum0[tid] = residual * residual;
__syncthreads();
BlockReduceSum<float, THREAD_1D_UNIT>(tid, local_sum0);
if (tid == 0) {
atomicAdd(&estimation.results_.at(0 + OFFSET2), local_sum0[0]);
}
__syncthreads();
}
}
void RegistrationCudaKernelCaller::BuildLinearSystemForPointToPlaneICP(
RegistrationCuda ®istration) {
const dim3 blocks(DIV_CEILING(registration.correspondences_.indices_.size(),
THREAD_1D_UNIT));
const dim3 threads(THREAD_1D_UNIT);
BuildLinearSystemForPointToPlaneICPKernel << < blocks,
threads >> > (
*registration.device_);
CheckCuda(cudaDeviceSynchronize());
CheckCuda(cudaGetLastError());
}
__global__
void ComputeSumForPointToPointICPKernel(
RegistrationCudaDevice estimation) {
__shared__ float local_sum0[THREAD_1D_UNIT];
__shared__ float local_sum1[THREAD_1D_UNIT];
__shared__ float local_sum2[THREAD_1D_UNIT];
const int tid = threadIdx.x;
/** Proper initialization **/
local_sum0[tid] = 0;
local_sum1[tid] = 0;
local_sum2[tid] = 0;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= estimation.correspondences_.indices_.size()) return;
int source_idx = estimation.correspondences_.indices_[idx];
int target_idx = estimation.correspondences_.matrix_(0, source_idx);
Vector3f &source = estimation.source_.points_[source_idx];
Vector3f &target = estimation.target_.points_[target_idx];
const int OFFSET1 = 0;
{
local_sum0[tid] = source(0);
local_sum1[tid] = source(1);
local_sum2[tid] = source(2);
__syncthreads();
BlockReduceSum<float, THREAD_1D_UNIT>(tid, local_sum0, local_sum1, local_sum2);
if (tid == 0) {
atomicAdd(&estimation.results_.at(0 + OFFSET1), local_sum0[0]);
atomicAdd(&estimation.results_.at(1 + OFFSET1), local_sum1[0]);
atomicAdd(&estimation.results_.at(2 + OFFSET1), local_sum2[0]);
}
__syncthreads();
}
const int OFFSET2 = 3;
{
local_sum0[tid] = target(0);
local_sum1[tid] = target(1);
local_sum2[tid] = target(2);
__syncthreads();
BlockReduceSum<float, THREAD_1D_UNIT>(tid, local_sum0, local_sum1, local_sum2);
if (tid == 0) {
atomicAdd(&estimation.results_.at(0 + OFFSET2), local_sum0[0]);
atomicAdd(&estimation.results_.at(1 + OFFSET2), local_sum1[0]);
atomicAdd(&estimation.results_.at(2 + OFFSET2), local_sum2[0]);
}
__syncthreads();
}
}
void RegistrationCudaKernelCaller::ComputeSumForPointToPointICP(
RegistrationCuda ®istration) {
const dim3 blocks(DIV_CEILING(registration.correspondences_.indices_.size(),
THREAD_1D_UNIT));
const dim3 threads(THREAD_1D_UNIT);
ComputeSumForPointToPointICPKernel << < blocks, threads >> > (*registration.device_);
CheckCuda(cudaDeviceSynchronize());
CheckCuda(cudaGetLastError());
}
__global__
void BuildLinearSystemForPointToPointICPKernel(
RegistrationCudaDevice estimation,
Vector3f mean_source, Vector3f mean_target) {
__shared__ float local_sum0[THREAD_1D_UNIT];
__shared__ float local_sum1[THREAD_1D_UNIT];
__shared__ float local_sum2[THREAD_1D_UNIT];
const int tid = threadIdx.x;
/** Proper initialization **/
local_sum0[tid] = 0;
local_sum1[tid] = 0;
local_sum2[tid] = 0;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= estimation.correspondences_.indices_.size()) return;
int source_idx = estimation.correspondences_.indices_[idx];
int target_idx = estimation.correspondences_.matrix_(0, source_idx);
Matrix3f Sigma;
float sigma_source2, rmse;
estimation.ComputePointwisePointToPointSigmaAndResidual(
source_idx, target_idx, mean_source, mean_target,
Sigma, sigma_source2, rmse);
for (size_t i = 0; i < 3; i ++) {
local_sum0[tid] = Sigma(i, 0);
local_sum1[tid] = Sigma(i, 1);
local_sum2[tid] = Sigma(i, 2);
__syncthreads();
BlockReduceSum<float, THREAD_1D_UNIT>(tid, local_sum0, local_sum1, local_sum2);
if (tid == 0) {
atomicAdd(&estimation.results_.at(3 * i + 0), local_sum0[0]);
atomicAdd(&estimation.results_.at(3 * i + 1), local_sum1[0]);
atomicAdd(&estimation.results_.at(3 * i + 2), local_sum2[0]);
}
__syncthreads();
}
const int OFFSET3 = 9;
{
local_sum0[tid] = sigma_source2;
local_sum1[tid] = rmse;
__syncthreads();
BlockReduceSum<float, THREAD_1D_UNIT>(tid, local_sum0, local_sum1);
if (tid == 0) {
atomicAdd(&estimation.results_.at(0 + OFFSET3), local_sum0[0]);
atomicAdd(&estimation.results_.at(1 + OFFSET3), local_sum1[0]);
}
__syncthreads();
}
}
void RegistrationCudaKernelCaller::BuildLinearSystemForPointToPointICP(
RegistrationCuda ®istration,
const Vector3f &mean_source, const Vector3f &mean_target) {
const dim3 blocks(DIV_CEILING(registration.correspondences_.indices_.size(),
THREAD_1D_UNIT));
const dim3 threads(THREAD_1D_UNIT);
BuildLinearSystemForPointToPointICPKernel<< < blocks, threads >> > (
*registration.device_, mean_source, mean_target);
CheckCuda(cudaDeviceSynchronize());
CheckCuda(cudaGetLastError());
}
__global__
void ComputeInformationMatrixKernel(RegistrationCudaDevice estimation) {
__shared__ float local_sum0[THREAD_1D_UNIT];
__shared__ float local_sum1[THREAD_1D_UNIT];
__shared__ float local_sum2[THREAD_1D_UNIT];
const int tid = threadIdx.x;
/** Proper initialization **/
local_sum0[tid] = 0;
local_sum1[tid] = 0;
local_sum2[tid] = 0;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
CorrespondenceSetCudaDevice &corres = estimation.correspondences_;
if (idx >= corres.indices_.size()) return;
int source_idx = corres.indices_[idx];
int target_idx = corres.matrix_(0, source_idx);
Vector6f jacobian_x, jacobian_y, jacobian_z;
HessianCuda<6> JtJ;
Vector3f &point = estimation.target_.points_[target_idx];
estimation.ComputePixelwiseInformationJacobian(point,
jacobian_x,
jacobian_y,
jacobian_z);
ComputeJtJ(jacobian_x, jacobian_y, jacobian_z, JtJ);
/** Reduce Sum JtJ **/
for (size_t i = 0; i < 21; i += 3) {
local_sum0[tid] = JtJ(i + 0);
local_sum1[tid] = JtJ(i + 1);
local_sum2[tid] = JtJ(i + 2);
__syncthreads();
BlockReduceSum<float, THREAD_1D_UNIT>(tid, local_sum0, local_sum1, local_sum2);
if (tid == 0) {
atomicAdd(&estimation.results_.at(i + 0), local_sum0[0]);
atomicAdd(&estimation.results_.at(i + 1), local_sum1[0]);
atomicAdd(&estimation.results_.at(i + 2), local_sum2[0]);
}
__syncthreads();
}
}
void RegistrationCudaKernelCaller::ComputeInformationMatrix(
RegistrationCuda &estimation) {
const dim3 blocks(DIV_CEILING(estimation.correspondences_.indices_.size(),
THREAD_1D_UNIT));
const dim3 threads(THREAD_1D_UNIT);
ComputeInformationMatrixKernel << < blocks, threads >> > (
*estimation.device_);
CheckCuda(cudaDeviceSynchronize());
CheckCuda(cudaGetLastError());
}
} // cuda
} // open3d | the_stack |
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_color(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSize, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesX, const int imgStride,
const float scaleTargets, const float scaleOutputs,
const bool conv) {
__shared__ float shFilters[B_Y*numColors][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters
__shared__ float shImages[B_Y*numColors][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images
const int imgPixels = imgSize * imgSize;
const int filterPixels = filterSize * filterSize;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = blockIdx.y % blocksPerModule;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += myImgIdx;
filters += filtersPerThread * B_Y * blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y) * numImages * numModulesX * numModulesX
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
for (int p = 0; p < filterPixels; p += B_Y) {
/*
* Load B_Y pixels from B_Y*filtersPerThread filters
*/
if (shFilterLoadY < B_Y) {
#pragma unroll
for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0;
}
}
}
}
/*
* Load B_Y pixels from B_X*imgsPerThread images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = paddingStart + imgLoadModPosX + pixIdx % filterSize;
const int y = paddingStart + imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y< imgSize && x >= 0 && x < imgSize) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = images[imgStride * (c * imgPixels + y * imgSize + x) + i * B_X];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < B_Y*numColors; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y];
}
}
}
__syncthreads();
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModulesX * numModulesX] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModulesX * numModulesX] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModulesX * numModulesX] = scaleOutputs * prod[f][g];
}
}
}
}
}
/*
* Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images.
* threadIdx.x determines image
* threadIdx.y determines filter
*
* blockIdx.x determines image batch of B_X * imgsPerThread
* blockIdx.y determines filter batch of B_Y * filtersPerThread
*
* images: (numImgColors, imgPixels, numImages) with stride given
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
*
* targets: (numFilters, numModules, numImages)
*
* B_Y one of 4, 8, 16
* B_X one of 16, 32
* imgsPerThread one of 1, 2, 4
* filtersPerThread one of 1, 2, 4, 8
* colorCache: how many colors to put into shmem
*
* numFilters should be divisible by B_Y * filtersPerThread
* numImages be divisible by B_X * imgsPerThread
* numFilterColors should be divisible by colorCache.
* numImgColors must be even.
* numFilters must be divisible by numGroups.
*
* The imgSize here is the size of the actual image without the padding.
*
*/
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache,
bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSize, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups,
const float scaleTargets, const float scaleOutputs,
const bool conv) {
__shared__ float shFilters[B_Y*colorCache][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters
__shared__ float shImages[B_Y*colorCache][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images
const int imgPixels = imgSize * imgSize;
const int filterPixels = filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesX;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += blockColorIdx * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numFilterColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y) * numImages * numModules
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
for (int p = 0; p < filterPixels; p += B_Y) {
/*
* Load B_Y pixels from B_Y*filtersPerThread filters
*/
if (shFilterLoadY < B_Y) {
#pragma unroll
for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0;
}
}
}
}
/*
* Load B_Y pixels from B_X*imgsPerThread images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y < imgSize && x >= 0 && x < imgSize) {
float* m = &images[imgStride * (oc * imgPixels + y * imgSize + x)];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < B_Y*colorCache; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y];
}
}
}
__syncthreads();
}
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g];
}
}
}
}
}
/*
* images: (groups, coloursPerGroup, height, width, images)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, rows, cols, groups, numFiltersPerGroup)
*
* targets: (numFilters, numModules, numImages)
*/
int _filterActs(
int numGroups,
int numImgColorsPerGroup,
int numImgRows,
int numImgCols,
int numImages,
int numModulesX,
int numModulesY,
int numFilterRows,
int numFilterCols,
int numFiltersPerGroup,
float * imageptr,
float * filterptr,
float * targetptr,
int paddingStart,
int moduleStride,
int imgStride, // step from start of img 0 to start of img 1
float scaleTargets,
float scaleOutput,
bool conv) {
int numImgColors = numGroups * numImgColorsPerGroup;
int numFilterColors = numImgColorsPerGroup;
int numModules = numModulesX * numModulesY;
int imgSize = numImgRows;
int imgPixels = numImgRows * numImgCols;
int numFilters = numFiltersPerGroup * numGroups;
int filterPixels = numFilterRows * numFilterCols;
int filterSize = numFilterRows;
//XXX: asserts should be turned to ifs and returns
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 2 == 0);
assert(numFilters % (16 * numGroups) == 0);
assert(numImgColors % numGroups == 0);
assert(imgSize * imgSize == imgPixels);
assert(filterSize * filterSize == filterPixels);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize);
assert(moduleStride <= filterSize);
dim3 blocks = numFiltersPerGroup % 32 == 0 ? dim3(DIVUP(numImages, 32 * 4), (numModules * numFilters) / (4 * 8))
: dim3(DIVUP(numImages, 32 * 4), (numModules * numFilters) / (4 * 4));
dim3 threads(32, 4);
bool checkImgBounds = numImages % 128 != 0;
if (numModulesX != numModulesY) { return 1; }
if (numImgRows != numImgCols) { return 2; }
if (numFilterRows != numFilterCols) { return 3; }
if (numImgColors <= 3) {
assert(numGroups == 1); // It has to be based on above definitions, but just to be sure.
if (scaleTargets == 0) { // don't scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, false, true >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, false, true >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, false, false >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, false, false >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, false, true >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, false, true >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, false, false >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, false, false >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, false, true >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, false, true >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, false, false >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, false, false >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
} else { // do scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, true, true >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, true, true >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, true, false >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, true, false >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, true, true >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, true, true >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, true, false >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, true, false >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, true, true >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, true, true >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, true, false >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, true, false >
<<<blocks, threads>>>(imageptr, filterptr,
targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, true >
<<<blocks, threads>>>(imageptr, filterptr, targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, true >
<<<blocks, threads>>>(imageptr, filterptr, targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, false >
<<<blocks, threads>>>(imageptr, filterptr, targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, false >
<<<blocks, threads>>>(imageptr, filterptr, targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, true >
<<<blocks, threads>>>(imageptr, filterptr, targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, true >
<<<blocks, threads>>>(imageptr, filterptr, targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, false >
<<<blocks, threads>>>(imageptr, filterptr, targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, false >
<<<blocks, threads>>>(imageptr, filterptr, targetptr,
numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
{ // new scope permits new vars
cudaError_t err = cudaGetLastError();
if (CUBLAS_STATUS_SUCCESS != err)
{
PyErr_Format(PyExc_RuntimeError, "filterActs failed (%s)", cudaGetErrorString(err));
return -1;
}
}
return 0;
} | the_stack |
#include "cuNVSM/data.h"
#include "cuNVSM/cuda_utils.h"
namespace TextEntity {
Batch::Batch(const size_t batch_size, const size_t window_size)
: batch_size_(batch_size), window_size_(window_size),
features_(nullptr), feature_weights_(nullptr), labels_(nullptr), weights_(nullptr),
num_instances_(0) {
CHECK_GT(batch_size_, 0);
CHECK_GT(window_size_, 0);
CCE(cudaHostAlloc(&features_,
batch_size_ * window_size_ * sizeof(FeaturesType),
cudaHostAllocDefault));
CCE(cudaHostAlloc(&feature_weights_,
batch_size_ * window_size_ * sizeof(WeightType),
cudaHostAllocDefault));
CCE(cudaHostAlloc(&labels_,
batch_size_ * sizeof(LabelsType),
cudaHostAllocDefault));
CCE(cudaHostAlloc(&weights_,
batch_size_ * sizeof(WeightType),
cudaHostAllocDefault));
clear();
}
Batch::Batch(const lse::TrainConfig& train_config)
: Batch(train_config.batch_size(), train_config.window_size()) {}
Batch::Batch(Batch&& other)
: batch_size_(other.batch_size_),
window_size_(other.window_size_),
features_(nullptr), feature_weights_(nullptr), labels_(nullptr), weights_(nullptr),
num_instances_(0) {
swap(&other);
}
Batch::~Batch() {
// This happens when the move constructor has been invoked.
if (features_ == nullptr && feature_weights_ == nullptr && labels_ == nullptr && weights_ == nullptr) {
return;
}
CCE(cudaFreeHost(features_));
features_= nullptr;
CCE(cudaFreeHost(feature_weights_));
feature_weights_= nullptr;
CCE(cudaFreeHost(labels_));
labels_ = nullptr;
CCE(cudaFreeHost(weights_));
weights_= nullptr;
}
void Batch::clear() {
num_instances_ = 0;
}
bool Batch::full() const {
DCHECK_LE(num_instances_, batch_size_);
return num_instances_ == batch_size_;
}
bool Batch::empty() const {
return num_instances_ == 0;
}
void Batch::swap(BatchInterface* const other) {
Batch* const other_batch = dynamic_cast<Batch*>(other);
CHECK_NOTNULL(other_batch);
CHECK_EQ(batch_size_, other_batch->batch_size_);
CHECK_EQ(window_size_, other_batch->window_size_);
// Swap data pointers.
std::swap(features_, other_batch->features_);
std::swap(feature_weights_, other_batch->feature_weights_);
std::swap(labels_, other_batch->labels_);
std::swap(weights_, other_batch->weights_);
// Swap meta-data.
std::swap(num_instances_, other_batch->num_instances_);
}
void DataSource::push_instance(
const std::vector<WordIdxType>& features,
const std::vector<FLOATING_POINT_TYPE> feature_weights,
const ObjectIdxType object_id,
const FLOATING_POINT_TYPE weight,
Batch* const batch) {
if (batch->full()) {
overflow_buffer_.push_back(std::make_tuple(features, feature_weights, object_id, weight));
} else {
DCHECK_EQ(features.size(), batch->window_size());
std::copy(features.begin(), features.end(),
&batch->features_[batch->num_instances_ * batch->window_size()]);
if (!feature_weights.empty()) {
CHECK_EQ(feature_weights.size(), features.size());
std::copy(feature_weights.begin(), feature_weights.end(),
&batch->feature_weights_[batch->num_instances_ * batch->window_size()]);
} else {
std::fill(&batch->feature_weights_[batch->num_instances_ * batch->window_size()],
&batch->feature_weights_[(batch->num_instances_ + 1) * batch->window_size()],
static_cast<FLOATING_POINT_TYPE>(1.0));
}
batch->labels_[batch->num_instances_] = object_id;
batch->weights_[batch->num_instances_] = weight;
++batch->num_instances_;
}
}
std::ostream& operator<<(std::ostream& os, const Batch& batch) {
os << "Window size: " << batch.window_size_ << std::endl;
os << "Batch size: " << batch.batch_size_ << std::endl;
os << "Number of instances: " << batch.num_instances_ << std::endl;
os << "Features (" << batch.features_ << "): ";
for (size_t i = 0; i < batch.num_instances_ * batch.window_size_; ++i)
os << batch.features_[i] << " ";
os << std::endl;
os << "Labels (" << batch.labels_ << "): ";
for (size_t i = 0; i < batch.num_instances_; ++i)
os << batch.labels_[i] << " ";
os << std::endl;
os << "Weights (" << batch.weights_ << "): ";
for (size_t i = 0; i < batch.num_instances_; ++i)
os << batch.weights_[i] << " ";
os << std::endl;
return os;
}
} // namespace TextEntity
namespace RepresentationSimilarity {
// TODO(cvangysel): some of the logic below is the same for both Batch types; merge?
Batch::Batch(const size_t batch_size)
: batch_size_(batch_size), features_(nullptr), weights_(nullptr), num_instances_(0) {
CHECK_GT(batch_size_, 0);
CCE(cudaHostAlloc(&features_,
batch_size_ * 2 * sizeof(ObjectIdxType),
cudaHostAllocDefault));
CCE(cudaHostAlloc(&weights_,
batch_size_ * sizeof(WeightType),
cudaHostAllocDefault));
clear();
}
Batch::Batch(const lse::TrainConfig& train_config)
: Batch(train_config.batch_size()) {}
Batch::Batch(Batch&& other)
: batch_size_(other.batch_size_), features_(nullptr), weights_(nullptr), num_instances_(0) {
swap(&other);
}
Batch::~Batch() {
// This happens when the move constructor has been invoked.
if (features_ == nullptr && weights_ == nullptr) {
return;
}
CCE(cudaFreeHost(features_));
features_= nullptr;
CCE(cudaFreeHost(weights_));
weights_ = nullptr;
}
void Batch::clear() {
num_instances_ = 0;
}
bool Batch::full() const {
DCHECK_LE(num_instances_, batch_size_);
return num_instances_ == batch_size_;
}
void Batch::swap(BatchInterface* const other) {
Batch* const other_batch = dynamic_cast<Batch*>(other);
CHECK_NOTNULL(other_batch);
CHECK_EQ(batch_size_, other_batch->batch_size_);
// Swap data pointers.
std::swap(features_, other_batch->features_);
std::swap(weights_, other_batch->weights_);
// Swap meta-data.
std::swap(num_instances_, other_batch->num_instances_);
}
bool Batch::empty() const {
return num_instances_ == 0;
}
std::ostream& operator<<(std::ostream& os, const Batch& batch) {
os << "Batch size: " << batch.batch_size_ << std::endl;
os << "Number of instances: " << batch.num_instances_ << std::endl;
os << "Features (" << batch.features_ << "): ";
for (size_t i = 0; i < batch.num_instances_ * 2; ++i)
os << batch.features_[i] << " ";
os << "Weights (" << batch.weights_ << "): ";
for (size_t i = 0; i < batch.num_instances_; ++i)
os << batch.weights_[i] << " ";
os << std::endl;
return os;
}
std::vector<InstanceT>* LoadSimilarities(
std::istream& file,
const IdentifiersMapT& identifiers_map) {
CHECK(file.good());
CHECK(!identifiers_map.empty());
std::vector<InstanceT>* const data = new std::vector<InstanceT>;
std::string line;
while (file.good() && std::getline(file, line)) {
std::istringstream iss(line);
std::string first_entity_id;
std::string second_entity_id;
WeightType weight;
iss >> first_entity_id;
iss >> second_entity_id;
iss >> weight;
if (!contains_key(identifiers_map, first_entity_id)) {
LOG(WARNING) << "Entity '" << first_entity_id << "' not found; "
<< "skipping pair.";
continue;
}
if (!contains_key(identifiers_map, second_entity_id)) {
LOG(WARNING) << "Entity '" << second_entity_id << "' not found; "
<< "skipping pair.";
continue;
}
data->push_back(std::make_tuple(
identifiers_map.at(first_entity_id),
identifiers_map.at(second_entity_id),
weight));
}
return data;
}
std::vector<InstanceT>* LoadSimilarities(
const std::string& path,
const IdentifiersMapT& identifiers_map) {
CHECK(!path.empty());
std::ifstream file;
file.open(path);
return LoadSimilarities(file, identifiers_map);
}
DataSource::DataSource(const std::string& path,
const IdentifiersMapT& identifiers_map,
RNG* const rng)
: DataSource(LoadSimilarities(path, identifiers_map), rng) {}
DataSource::DataSource(const std::vector<InstanceT>* const data,
RNG* const rng)
: data_(data), rng_(rng) {
reset();
}
void DataSource::reset() {
if (!instance_order_.empty()) {
LOG(WARNING) << "Resetting instance generator while there are still instances to consume.";
instance_order_.clear();
}
instance_order_.resize(data_->size());
// Fill instance order with 0, ..., n-1.
std::iota(std::begin(instance_order_), std::end(instance_order_), 0);
LOG(INFO) << "Shuffling " << instance_order_.size() << " instance pointers.";
std::shuffle(instance_order_.begin(), instance_order_.end(), *rng_);
}
void DataSource::next(Batch* const batch) {
CHECK(batch->empty());
while (!batch->full() && !instance_order_.empty()) {
const InstanceT& instance = data_->at(instance_order_.front());
const size_t offset = 2 * batch->num_instances_;
batch->features_[offset] = std::get<0>(instance);
batch->features_[offset + 1] = std::get<1>(instance);
batch->weights_[batch->num_instances_] = std::get<2>(instance);
instance_order_.pop_front();
++batch->num_instances_;
}
CHECK_LE(batch->num_instances_, batch->batch_size_);
}
bool DataSource::has_next() const {
return !instance_order_.empty();
}
float32 DataSource::progress() const {
return 1.0 - (
static_cast<float32>(instance_order_.size()) /
static_cast<float32>(data_->size()));
}
} // namespace RepresentationSimilarity | the_stack |
#define NSPEEDS 9
#define LOCALSIZEX 128
#define LOCALSIZEY 1
/* dump output files for verification */
#define FINALSTATEFILE "final_state.dat"
#define AVVELSFILE "av_vels.dat"
/* struct to hold the parameter values */
typedef struct
{
int nx; /* no. of cells in x-direction */
int ny; /* no. of cells in y-direction */
int maxIters; /* no. of iterations */
int reynolds_dim; /* dimension for Reynolds number */
float density; /* density per link */
float accel; /* density redistribution */
float omega; /* relaxation parameter */
} t_param;
/* struct to hold the 'speed' values */
typedef struct
{
float speeds[NSPEEDS];
} t_speed;
/*
** function prototypes
*/
/* load params, allocate memory, load obstacles & initialise fluid particle densities */
int initialise(const char* paramfile, const char* obstaclefile,
t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr,
int** obstacles_ptr, float** av_vels_ptr);
/*
** The main calculation methods.
** timestep calls, in order, the functions:
** accelerate_flow(), propagate(), rebound() & collision()
*/
int write_values(const t_param params, t_speed* cells, int* obstacles, float* av_vels);
/* finalise, including freeing up allocated memory */
int finalise(t_speed* cells_ptr, t_speed* tmp_cells_ptr,
int* obstacles_ptr, float* av_vels_ptr);
/* Sum all the densities in the grid.
** The total should remain constant from one timestep to the next. */
float total_density(const t_param params, t_speed* cells);
/* compute average velocity */
float av_velocity(const t_param params, t_speed* cells, int* obstacles);
/* calculate Reynolds number */
float calc_reynolds(const t_param params, t_speed* cells, int* obstacles);
/* utility functions */
void die(const char* message, const int line, const char* file);
void usage(const char* exe);
__device__ bool
isGreater(const float x, const float y)
{
return x > y ? 1 : 0;
}
__global__ void
d2q9_bgk( float* Speed0A,
const float* Speed1A,
const float* Speed2A,
const float* Speed3A,
const float* Speed4A,
const float* Speed5A,
const float* Speed6A,
const float* Speed7A,
const float* Speed8A,
float* Tmp0A,
float* Tmp1A,
float* Tmp2A,
float* Tmp3A,
float* Tmp4A,
float* Tmp5A,
float* Tmp6A,
float* Tmp7A,
float* Tmp8A,
const int* ObstaclesA,
float* Partial_Sum,
int* Partial_Sum2,
const float densityaccel,
const float omega,
const int nx,
const int ny,
const int tt)
{
//setup local memory
__shared__ int local_sum2[LOCALSIZEX*LOCALSIZEY];
__shared__ float local_sum[LOCALSIZEX*LOCALSIZEY];
/* get column and row indices */
const int ii = blockIdx.x * blockDim.x + threadIdx.x;
const int jj = blockIdx.y * blockDim.y + threadIdx.y;
const float c_sq_inv = 3.f;
const float c_sq = 1.f/c_sq_inv; /* square of speed of sound */
const float temp1 = 4.5f;
const float w1 = 1.f/9.f;
const float w0 = 4.f * w1; /* weighting factor */
const float w2 = 1.f/36.f; /* weighting factor */
const float w11 = densityaccel * w1;
const float w21 = densityaccel * w2;
/* determine indices of axis-direction neighbours
** respecting periodic boundary conditions (wrap around) */
const int y_n = (jj + 1) % ny;
const int x_e = (ii + 1) % nx;
const int y_s = (jj == 0) ? (jj + ny - 1) : (jj - 1);
const int x_w = (ii == 0) ? (ii + nx - 1) : (ii - 1);
/* propagate densities from neighbouring cells, following
** appropriate directions of travel and writing into
** scratch space grid */
float tmp_s0 = Speed0A[ii + jj*nx];
float tmp_s1 = (jj == ny-2 && (!ObstaclesA[x_w + jj*nx] && isGreater((Speed3A[x_w + jj*nx] - w11) , 0.f) && isGreater((Speed6A[x_w + jj*nx] - w21) , 0.f) && isGreater((Speed7A[x_w + jj*nx] - w21) , 0.f))) ? Speed1A[x_w + jj*nx]+w11 : Speed1A[x_w + jj*nx];
float tmp_s2 = Speed2A[ii + y_s*nx];
float tmp_s3 = (jj == ny-2 && (!ObstaclesA[x_e + jj*nx] && isGreater((Speed3A[x_e + jj*nx] - w11) , 0.f) && isGreater((Speed6A[x_e + jj*nx] - w21) , 0.f) && isGreater((Speed7A[x_e + jj*nx] - w21) , 0.f))) ? Speed3A[x_e + jj*nx]-w11 : Speed3A[x_e + jj*nx];
float tmp_s4 = Speed4A[ii + y_n*nx];
float tmp_s5 = (y_s == ny-2 && (!ObstaclesA[x_w + y_s*nx] && isGreater((Speed3A[x_w + y_s*nx] - w11) , 0.f) && isGreater((Speed6A[x_w + y_s*nx] - w21) , 0.f) && isGreater((Speed7A[x_w + y_s*nx] - w21) , 0.f))) ? Speed5A[x_w + y_s*nx]+w21 : Speed5A[x_w + y_s*nx];
float tmp_s6 = (y_s == ny-2 && (!ObstaclesA[x_e + y_s*nx] && isGreater((Speed3A[x_e + y_s*nx] - w11) , 0.f) && isGreater((Speed6A[x_e + y_s*nx] - w21) , 0.f) && isGreater((Speed7A[x_e + y_s*nx] - w21) , 0.f))) ? Speed6A[x_e + y_s*nx]-w21 : Speed6A[x_e + y_s*nx];
float tmp_s7 = (y_n == ny-2 && (!ObstaclesA[x_e + y_n*nx] && isGreater((Speed3A[x_e + y_n*nx] - w11) , 0.f) && isGreater((Speed6A[x_e + y_n*nx] - w21) , 0.f) && isGreater((Speed7A[x_e + y_n*nx] - w21) , 0.f))) ? Speed7A[x_e + y_n*nx]-w21 : Speed7A[x_e + y_n*nx];
float tmp_s8 = (y_n == ny-2 && (!ObstaclesA[x_w + y_n*nx] && isGreater((Speed3A[x_w + y_n*nx] - w11) , 0.f) && isGreater((Speed6A[x_w + y_n*nx] - w21) , 0.f) && isGreater((Speed7A[x_w + y_n*nx] - w21) , 0.f))) ? Speed8A[x_w + y_n*nx]+w21 : Speed8A[x_w + y_n*nx];
/* compute local density total */
float local_density = tmp_s0 + tmp_s1 + tmp_s2 + tmp_s3 + tmp_s4 + tmp_s5 + tmp_s6 + tmp_s7 + tmp_s8;
const float local_density_recip = 1.f/(local_density);
/* compute x velocity component */
float u_x = (tmp_s1
+ tmp_s5
+ tmp_s8
- tmp_s3
- tmp_s6
- tmp_s7)
* local_density_recip;
/* compute y velocity component */
float u_y = (tmp_s2
+ tmp_s5
+ tmp_s6
- tmp_s4
- tmp_s8
- tmp_s7)
* local_density_recip;
/* velocity squared */
const float temp2 = - (u_x * u_x + u_y * u_y)/(2.f * c_sq);
/* equilibrium densities */
float d_equ[NSPEEDS];
/* zero velocity density: weight w0 */
d_equ[0] = w0 * local_density
* (1.f + temp2);
/* axis speeds: weight w1 */
d_equ[1] = w1 * local_density * (1.f + u_x * c_sq_inv
+ (u_x * u_x) * temp1
+ temp2);
d_equ[2] = w1 * local_density * (1.f + u_y * c_sq_inv
+ (u_y * u_y) * temp1
+ temp2);
d_equ[3] = w1 * local_density * (1.f - u_x * c_sq_inv
+ (u_x * u_x) * temp1
+ temp2);
d_equ[4] = w1 * local_density * (1.f - u_y * c_sq_inv
+ (u_y * u_y) * temp1
+ temp2);
/* diagonal speeds: weight w2 */
d_equ[5] = w2 * local_density * (1.f + (u_x + u_y) * c_sq_inv
+ ((u_x + u_y) * (u_x + u_y)) * temp1
+ temp2);
d_equ[6] = w2 * local_density * (1.f + (-u_x + u_y) * c_sq_inv
+ ((-u_x + u_y) * (-u_x + u_y)) * temp1
+ temp2);
d_equ[7] = w2 * local_density * (1.f + (-u_x - u_y) * c_sq_inv
+ ((-u_x - u_y) * (-u_x - u_y)) * temp1
+ temp2);
d_equ[8] = w2 * local_density * (1.f + (u_x - u_y) * c_sq_inv
+ ((u_x - u_y) * (u_x - u_y)) * temp1
+ temp2);
float tmp;
int expression = ObstaclesA[ii + jj*nx];
tmp_s0 = expression ? tmp_s0 : (tmp_s0 + omega * (d_equ[0] - tmp_s0));
tmp = tmp_s1;
tmp_s1 = expression ? tmp_s3 : (tmp_s1 + omega * (d_equ[1] - tmp_s1));
tmp_s3 = expression ? tmp : (tmp_s3 + omega * (d_equ[3] - tmp_s3));
tmp = tmp_s2;
tmp_s2 = expression ? tmp_s4 : (tmp_s2 + omega * (d_equ[2] - tmp_s2));
tmp_s4 = expression ? tmp : (tmp_s4 + omega * (d_equ[4] - tmp_s4));
tmp = tmp_s5;
tmp_s5 = expression ? tmp_s7 : (tmp_s5 + omega * (d_equ[5] - tmp_s5));
tmp_s7 = expression ? tmp : (tmp_s7 + omega * (d_equ[7] - tmp_s7));
tmp = tmp_s6;
tmp_s6 = expression ? tmp_s8 : (tmp_s6 + omega * (d_equ[6] - tmp_s6));
tmp_s8 = expression ? tmp : (tmp_s8 + omega * (d_equ[8] - tmp_s8));
/* local density total */
local_density = 1.f/(tmp_s0 + tmp_s1 + tmp_s2 + tmp_s3 + tmp_s4 + tmp_s5 + tmp_s6 + tmp_s7 + tmp_s8);
/* x-component of velocity */
u_x = (tmp_s1
+ tmp_s5
+ tmp_s8
- tmp_s3
- tmp_s6
- tmp_s7)
* local_density;
/* compute y velocity component */
u_y = (tmp_s2
+ tmp_s5
+ tmp_s6
- tmp_s4
- tmp_s7
- tmp_s8)
* local_density;
Tmp0A[ii + jj*nx] = tmp_s0;
Tmp1A[ii + jj*nx] = tmp_s1;
Tmp2A[ii + jj*nx] = tmp_s2;
Tmp3A[ii + jj*nx] = tmp_s3;
Tmp4A[ii + jj*nx] = tmp_s4;
Tmp5A[ii + jj*nx] = tmp_s5;
Tmp6A[ii + jj*nx] = tmp_s6;
Tmp7A[ii + jj*nx] = tmp_s7;
Tmp8A[ii + jj*nx] = tmp_s8;
int local_idi = threadIdx.x;
int local_idj = threadIdx.y;
int local_sizei = blockDim.x;
int local_sizej = blockDim.y;
/* accumulate the norm of x- and y- velocity components */
local_sum[local_idi + local_idj*local_sizei] = (ObstaclesA[ii + jj*nx]) ? 0 : hypotf(u_x,u_y);
/* increase counter of inspected cells */
local_sum2[local_idi + local_idj*local_sizei] = (ObstaclesA[ii + jj*nx]) ? 0 : 1 ;
__syncthreads();
int group_id = blockIdx.x;
int group_id2 = blockIdx.y;
int group_size = gridDim.x;
int group_size2 = gridDim.y;
if(local_idi == 0 && local_idj == 0){
float sum = 0.0f;
int sum2 = 0;
for(int i = 0; i<local_sizei*local_sizej; i++){
sum += local_sum[i];
sum2 += local_sum2[i];
}
Partial_Sum[group_id+group_id2*group_size+tt*group_size*group_size2] = sum;
Partial_Sum2[group_id+group_id2*group_size+tt*group_size*group_size2] = sum2;
}
}
int main(int argc, char* argv[])
{
char* paramfile = NULL; /* input parameter file */
char* obstaclefile = NULL; /* input obstacle file */
t_param params; /* struct to hold parameter values */
t_speed* cells = NULL; /* grid containing fluid densities */
t_speed* tmp_cells = NULL; /* scratch space */
int* obstaclesHost = NULL;/* grid indicating which cells are blocked */
float* av_vels = NULL; /* a record of the av. velocity computed for each timestep */
struct timeval timstr; /* structure to hold elapsed time */
double tic, toc; /* floating point numbers to calculate elapsed wallclock time */
/* parse the command line */
if (argc != 3)
{
usage(argv[0]);
}
else
{
paramfile = argv[1];
obstaclefile = argv[2];
}
/* initialise our data structures and load values from file */
initialise(paramfile, obstaclefile, ¶ms, &cells,
&tmp_cells, &obstaclesHost, &av_vels);
// declare host arrays
int Ny = params.ny;
int Nx = params.nx;
int MaxIters = params.maxIters;
float *speedsHostS0 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS1 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS2 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS3 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS4 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS5 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS6 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS7 = (float*) malloc (sizeof(float)*Ny*Nx);
float *speedsHostS8 = (float*) malloc (sizeof(float)*Ny*Nx);
float *tot_up = (float*) malloc (sizeof(float) * (Ny/LOCALSIZEY) * (Nx/LOCALSIZEX) * MaxIters);
int *tot_cellsp = (int*) malloc (sizeof(int) * (Ny/LOCALSIZEY) * (Nx/LOCALSIZEX) * MaxIters);
// Init arrays
/* loop over _all_ cells */
for (int jj = 0; jj < Ny; jj++)
{
for (int ii = 0; ii < Nx; ii++)
{
speedsHostS0[ii + jj*Nx] = cells[ii + jj*Nx].speeds[0];
speedsHostS1[ii + jj*Nx] = cells[ii + jj*Nx].speeds[1];
speedsHostS2[ii + jj*Nx] = cells[ii + jj*Nx].speeds[2];
speedsHostS3[ii + jj*Nx] = cells[ii + jj*Nx].speeds[3];
speedsHostS4[ii + jj*Nx] = cells[ii + jj*Nx].speeds[4];
speedsHostS5[ii + jj*Nx] = cells[ii + jj*Nx].speeds[5];
speedsHostS6[ii + jj*Nx] = cells[ii + jj*Nx].speeds[6];
speedsHostS7[ii + jj*Nx] = cells[ii + jj*Nx].speeds[7];
speedsHostS8[ii + jj*Nx] = cells[ii + jj*Nx].speeds[8];
}
}
//start timer
gettimeofday(&timstr, NULL);
tic = timstr.tv_sec + (timstr.tv_usec / 1000000.0);
// Creating buffers which are bound to host arrays
float *speeds0, *speeds1, *speeds2, *speeds3, *speeds4,
*speeds5, *speeds6, *speeds7, *speeds8;
float *tmp_speeds0, *tmp_speeds1, *tmp_speeds2, *tmp_speeds3, *tmp_speeds4,
*tmp_speeds5, *tmp_speeds6, *tmp_speeds7, *tmp_speeds8;
cudaMalloc((void**)&speeds0, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds0, speedsHostS0, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds1, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds1, speedsHostS1, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds2, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds2, speedsHostS2, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds3, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds3, speedsHostS3, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds4, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds4, speedsHostS4, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds5, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds5, speedsHostS5, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds6, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds6, speedsHostS6, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds7, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds7, speedsHostS7, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&speeds8, sizeof(float)*Ny*Nx);
cudaMemcpy(speeds8, speedsHostS8, sizeof(float)*Ny*Nx, cudaMemcpyHostToDevice);
cudaMalloc((void**)&tmp_speeds0, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds1, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds2, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds3, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds4, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds5, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds6, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds7, sizeof(float)*Ny*Nx);
cudaMalloc((void**)&tmp_speeds8, sizeof(float)*Ny*Nx);
int *obstacles, *partial_sum2;
float *partial_sum;
cudaMalloc((void**)&obstacles, sizeof(int)*Ny*Nx);
cudaMalloc((void**)&partial_sum, sizeof(float)*(Ny/LOCALSIZEY)*(Nx/LOCALSIZEX)*MaxIters);
cudaMalloc((void**)&partial_sum2, sizeof(int)*(Ny/LOCALSIZEY)*(Nx/LOCALSIZEX)*MaxIters);
cudaMemcpy(obstacles, obstaclesHost, sizeof(int)*Ny*Nx, cudaMemcpyHostToDevice);
// parameters for kernel
float omega = params.omega;
float densityaccel = params.density*params.accel;
dim3 grids(Nx/LOCALSIZEX, Ny/LOCALSIZEY);
dim3 threads(LOCALSIZEX, LOCALSIZEY);
for (int tt = 0; tt < MaxIters; tt++){
d2q9_bgk<<<grids, threads>>>(
speeds0,
speeds1,
speeds2,
speeds3,
speeds4,
speeds5,
speeds6,
speeds7,
speeds8,
tmp_speeds0,
tmp_speeds1,
tmp_speeds2,
tmp_speeds3,
tmp_speeds4,
tmp_speeds5,
tmp_speeds6,
tmp_speeds7,
tmp_speeds8,
obstacles,
partial_sum,
partial_sum2,
densityaccel,
omega,
Nx,
Ny,
tt );
// swap the buffers
float* speed_tmp = speeds0;
speeds0 = tmp_speeds0;
tmp_speeds0 = speed_tmp;
speed_tmp = speeds1;
speeds1 = tmp_speeds1;
tmp_speeds1 = speed_tmp;
speed_tmp = speeds2;
speeds2 = tmp_speeds2;
tmp_speeds2 = speed_tmp;
speed_tmp = speeds3;
speeds3 = tmp_speeds3;
tmp_speeds3 = speed_tmp;
speed_tmp = speeds4;
speeds4 = tmp_speeds4;
tmp_speeds4 = speed_tmp;
speed_tmp = speeds5;
speeds5 = tmp_speeds5;
tmp_speeds5 = speed_tmp;
speed_tmp = speeds6;
speeds6 = tmp_speeds6;
tmp_speeds6 = speed_tmp;
speed_tmp = speeds7;
speeds7 = tmp_speeds7;
tmp_speeds7 = speed_tmp;
speed_tmp = speeds8;
speeds8 = tmp_speeds8;
tmp_speeds8 = speed_tmp;
}
cudaMemcpy(tot_up, partial_sum, sizeof(float)*(Ny/LOCALSIZEY)*(Nx/LOCALSIZEX)*MaxIters, cudaMemcpyDeviceToHost);
cudaMemcpy(tot_cellsp, partial_sum2, sizeof(int)*(Ny/LOCALSIZEY)*(Nx/LOCALSIZEX)*MaxIters, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS0, speeds0, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS1, speeds1, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS2, speeds2, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS3, speeds3, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS4, speeds4, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS5, speeds5, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS6, speeds6, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS7, speeds7, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaMemcpy(speedsHostS8, speeds8, sizeof(float)*Ny*Nx, cudaMemcpyDeviceToHost);
cudaFree(speeds0);
cudaFree(speeds1);
cudaFree(speeds2);
cudaFree(speeds3);
cudaFree(speeds4);
cudaFree(speeds5);
cudaFree(speeds6);
cudaFree(speeds7);
cudaFree(speeds8);
cudaFree(tmp_speeds0);
cudaFree(tmp_speeds1);
cudaFree(tmp_speeds2);
cudaFree(tmp_speeds3);
cudaFree(tmp_speeds4);
cudaFree(tmp_speeds5);
cudaFree(tmp_speeds6);
cudaFree(tmp_speeds7);
cudaFree(tmp_speeds8);
cudaFree(obstacles);
cudaFree(partial_sum2);
cudaFree(partial_sum);
float tot_u = 0;
int tot_cells = 0;
for (int tt = 0; tt < params.maxIters; tt++){
tot_u = 0;
tot_cells = 0;
for(int i = 0; i < params.nx/LOCALSIZEX*params.ny/LOCALSIZEY; i++){
tot_u += tot_up[i+tt*params.nx/LOCALSIZEX*params.ny/LOCALSIZEY];
tot_cells += tot_cellsp[i+tt*params.nx/LOCALSIZEX*params.ny/LOCALSIZEY];
//printf("%d %f %d\n", i, tot_u, tot_cells);
}
av_vels[tt] = tot_u/tot_cells;
}
//end timer
gettimeofday(&timstr, NULL);
toc = timstr.tv_sec + (timstr.tv_usec / 1000000.0);
// put answers back into cells
for (int jj = 0; jj < params.ny; jj++)
{
for (int ii = 0; ii < params.nx; ii++)
{
cells[ii + jj*params.nx].speeds[0] = speedsHostS0[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[1] = speedsHostS1[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[2] = speedsHostS2[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[3] = speedsHostS3[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[4] = speedsHostS4[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[5] = speedsHostS5[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[6] = speedsHostS6[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[7] = speedsHostS7[ii + jj*params.nx];
cells[ii + jj*params.nx].speeds[8] = speedsHostS8[ii + jj*params.nx];
}
}
/* write final values and free memory */
printf("==done==\n");
printf("Reynolds number:\t\t%.12E\n", calc_reynolds(params, cells, obstaclesHost));
printf("Elapsed time:\t\t\t%.6lf (s)\n", toc - tic);
write_values(params, cells, obstaclesHost, av_vels);
finalise(cells, tmp_cells, obstaclesHost, av_vels);
free(speedsHostS0);
free(speedsHostS1);
free(speedsHostS2);
free(speedsHostS3);
free(speedsHostS4);
free(speedsHostS5);
free(speedsHostS6);
free(speedsHostS7);
free(speedsHostS8);
free(tot_up);
free(tot_cellsp);
return EXIT_SUCCESS;
}
float av_velocity(const t_param params, t_speed* cells, int* obstacles)
{
int tot_cells = 0; /* no. of cells used in calculation */
float tot_u; /* accumulated magnitudes of velocity for each cell */
/* initialise */
tot_u = 0.f;
/* loop over all non-blocked cells */
for (int jj = 0; jj < params.ny; jj++)
{
for (int ii = 0; ii < params.nx; ii++)
{
/* ignore occupied cells */
if (!obstacles[ii + jj*params.nx])
{
/* local density total */
float local_density = 0.f;
for (int kk = 0; kk < NSPEEDS; kk++)
{
local_density += cells[ii + jj*params.nx].speeds[kk];
}
/* x-component of velocity */
float u_x = (cells[ii + jj*params.nx].speeds[1]
+ cells[ii + jj*params.nx].speeds[5]
+ cells[ii + jj*params.nx].speeds[8]
- (cells[ii + jj*params.nx].speeds[3]
+ cells[ii + jj*params.nx].speeds[6]
+ cells[ii + jj*params.nx].speeds[7]))
/ local_density;
/* compute y velocity component */
float u_y = (cells[ii + jj*params.nx].speeds[2]
+ cells[ii + jj*params.nx].speeds[5]
+ cells[ii + jj*params.nx].speeds[6]
- (cells[ii + jj*params.nx].speeds[4]
+ cells[ii + jj*params.nx].speeds[7]
+ cells[ii + jj*params.nx].speeds[8]))
/ local_density;
/* accumulate the norm of x- and y- velocity components */
tot_u += sqrtf((u_x * u_x) + (u_y * u_y));
/* increase counter of inspected cells */
++tot_cells;
}
}
}
return tot_u / (float)tot_cells;
}
int initialise(const char* paramfile, const char* obstaclefile,
t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr,
int** obstacles_ptr, float** av_vels_ptr){
char message[1024]; /* message buffer */
FILE* fp; /* file pointer */
int xx, yy; /* generic array indices */
int blocked; /* indicates whether a cell is blocked by an obstacle */
int retval; /* to hold return value for checking */
/* open the parameter file */
fp = fopen(paramfile, "r");
if (fp == NULL)
{
sprintf(message, "could not open input parameter file: %s", paramfile);
die(message, __LINE__, __FILE__);
}
/* read in the parameter values */
retval = fscanf(fp, "%d\n", &(params->nx));
if (retval != 1) die("could not read param file: nx", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", &(params->ny));
if (retval != 1) die("could not read param file: ny", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", &(params->maxIters));
if (retval != 1) die("could not read param file: maxIters", __LINE__, __FILE__);
retval = fscanf(fp, "%d\n", &(params->reynolds_dim));
if (retval != 1) die("could not read param file: reynolds_dim", __LINE__, __FILE__);
retval = fscanf(fp, "%f\n", &(params->density));
if (retval != 1) die("could not read param file: density", __LINE__, __FILE__);
retval = fscanf(fp, "%f\n", &(params->accel));
if (retval != 1) die("could not read param file: accel", __LINE__, __FILE__);
retval = fscanf(fp, "%f\n", &(params->omega));
if (retval != 1) die("could not read param file: omega", __LINE__, __FILE__);
/* and close up the file */
fclose(fp);
/*
** allocate memory.
**
** remember C is pass-by-value, so we need to
** pass pointers into the initialise function.
**
** nb we are allocating a 1D array, so that the
** memory will be contiguous. We still want to
** index this memory as if it were a (row major
** ordered) 2D array, however. We will perform
** some arithmetic using the row and column
** coordinates, inside the square brackets, when
** we want to access elements of this array.
**
** note also that we are using a structure to
** hold an array of 'speeds'. We will allocate
** a 1D array of these structs.
*/
/* main grid */
*cells_ptr = (t_speed*)malloc(sizeof(t_speed) * (params->ny * params->nx));
if (*cells_ptr == NULL) die("cannot allocate memory for cells", __LINE__, __FILE__);
/* 'helper' grid, used as scratch space */
*tmp_cells_ptr = (t_speed*)malloc(sizeof(t_speed) * (params->ny * params->nx));
if (*tmp_cells_ptr == NULL) die("cannot allocate memory for tmp_cells", __LINE__, __FILE__);
/* the map of obstacles */
*obstacles_ptr = (int*) malloc (sizeof(int) * params->ny * params->nx);
if (*obstacles_ptr == NULL) die("cannot allocate column memory for obstacles", __LINE__, __FILE__);
/* initialise densities */
float w0 = params->density * 4.f / 9.f;
float w1 = params->density / 9.f;
float w2 = params->density / 36.f;
for (int jj = 0; jj < params->ny; jj++)
{
for (int ii = 0; ii < params->nx; ii++)
{
/* centre */
(*cells_ptr)[ii + jj*params->nx].speeds[0] = w0;
/* axis directions */
(*cells_ptr)[ii + jj*params->nx].speeds[1] = w1;
(*cells_ptr)[ii + jj*params->nx].speeds[2] = w1;
(*cells_ptr)[ii + jj*params->nx].speeds[3] = w1;
(*cells_ptr)[ii + jj*params->nx].speeds[4] = w1;
/* diagonals */
(*cells_ptr)[ii + jj*params->nx].speeds[5] = w2;
(*cells_ptr)[ii + jj*params->nx].speeds[6] = w2;
(*cells_ptr)[ii + jj*params->nx].speeds[7] = w2;
(*cells_ptr)[ii + jj*params->nx].speeds[8] = w2;
}
}
/* first set all cells in obstacle array to zero */
for (int jj = 0; jj < params->ny; jj++)
{
for (int ii = 0; ii < params->nx; ii++)
{
(*obstacles_ptr)[ii + jj*params->nx] = 0;
}
}
/* open the obstacle data file */
fp = fopen(obstaclefile, "r");
if (fp == NULL)
{
sprintf(message, "could not open input obstacles file: %s", obstaclefile);
die(message, __LINE__, __FILE__);
}
/* read-in the blocked cells list */
while ((retval = fscanf(fp, "%d %d %d\n", &xx, &yy, &blocked)) != EOF)
{
/* some checks */
if (retval != 3) die("expected 3 values per line in obstacle file", __LINE__, __FILE__);
if (xx < 0 || xx > params->nx - 1) die("obstacle x-coord out of range", __LINE__, __FILE__);
if (yy < 0 || yy > params->ny - 1) die("obstacle y-coord out of range", __LINE__, __FILE__);
if (blocked != 1) die("obstacle blocked value should be 1", __LINE__, __FILE__);
/* assign to array */
(*obstacles_ptr)[xx + yy*params->nx] = blocked;
}
/* and close the file */
fclose(fp);
/*
** allocate space to hold a record of the avarage velocities computed
** at each timestep
*/
*av_vels_ptr = (float*)malloc(sizeof(float) * params->maxIters);
return EXIT_SUCCESS;
}
int finalise(t_speed* cells_ptr, t_speed* tmp_cells_ptr,
int* obstacles_ptr, float* av_vels_ptr)
{
/*
** free up allocated memory
*/
free(cells_ptr);
free(tmp_cells_ptr);
free(obstacles_ptr);
free(av_vels_ptr);
return EXIT_SUCCESS;
}
float calc_reynolds(const t_param params, t_speed* cells, int* obstacles)
{
const float viscosity = 1.f / 6.f * (2.f / params.omega - 1.f);
return av_velocity(params, cells, obstacles) * params.reynolds_dim / viscosity;
}
float total_density(const t_param params, t_speed* cells)
{
float total = 0.f; /* accumulator */
for (int jj = 0; jj < params.ny; jj++)
{
for (int ii = 0; ii < params.nx; ii++)
{
for (int kk = 0; kk < NSPEEDS; kk++)
{
total += cells[ii + jj*params.nx].speeds[kk];
}
}
}
return total;
}
int write_values(const t_param params, t_speed* cells, int* obstacles, float* av_vels)
{
FILE* fp; /* file pointer */
const float c_sq = 1.f / 3.f; /* sq. of speed of sound */
float local_density; /* per grid cell sum of densities */
float pressure; /* fluid pressure in grid cell */
float u_x; /* x-component of velocity in grid cell */
float u_y; /* y-component of velocity in grid cell */
float u; /* norm--root of summed squares--of u_x and u_y */
fp = fopen(FINALSTATEFILE, "w");
if (fp == NULL)
{
die("could not open file output file", __LINE__, __FILE__);
}
for (int jj = 0; jj < params.ny; jj++)
{
for (int ii = 0; ii < params.nx; ii++)
{
/* an occupied cell */
if (obstacles[ii + jj*params.nx])
{
u_x = u_y = u = 0.f;
pressure = params.density * c_sq;
}
/* no obstacle */
else
{
local_density = 0.f;
for (int kk = 0; kk < NSPEEDS; kk++)
{
local_density += cells[ii + jj*params.nx].speeds[kk];
}
/* compute x velocity component */
u_x = (cells[ii + jj*params.nx].speeds[1]
+ cells[ii + jj*params.nx].speeds[5]
+ cells[ii + jj*params.nx].speeds[8]
- (cells[ii + jj*params.nx].speeds[3]
+ cells[ii + jj*params.nx].speeds[6]
+ cells[ii + jj*params.nx].speeds[7]))
/ local_density;
/* compute y velocity component */
u_y = (cells[ii + jj*params.nx].speeds[2]
+ cells[ii + jj*params.nx].speeds[5]
+ cells[ii + jj*params.nx].speeds[6]
- (cells[ii + jj*params.nx].speeds[4]
+ cells[ii + jj*params.nx].speeds[7]
+ cells[ii + jj*params.nx].speeds[8]))
/ local_density;
/* compute norm of velocity */
u = sqrtf((u_x * u_x) + (u_y * u_y));
/* compute pressure */
pressure = local_density * c_sq;
}
/* write to file */
fprintf(fp, "%d %d %.12E %.12E %.12E %.12E %d\n", ii, jj, u_x, u_y, u, pressure, obstacles[ii * params.nx + jj]);
}
}
fclose(fp);
fp = fopen(AVVELSFILE, "w");
if (fp == NULL)
{
die("could not open file output file", __LINE__, __FILE__);
}
for (int ii = 0; ii < params.maxIters; ii++)
{
fprintf(fp, "%d:\t%.12E\n", ii, av_vels[ii]);
}
fclose(fp);
return EXIT_SUCCESS;
}
void die(const char* message, const int line, const char* file)
{
fprintf(stderr, "Error at line %d of file %s:\n", line, file);
fprintf(stderr, "%s\n", message);
fflush(stderr);
exit(EXIT_FAILURE);
}
void usage(const char* exe)
{
fprintf(stderr, "Usage: %s <paramfile> <obstaclefile>\n", exe);
exit(EXIT_FAILURE);
} | the_stack |
Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
under terms of your choice, so long as that work isn't itself a
parser generator using the skeleton or a modified version thereof
as a parser skeleton. Alternatively, if you modify or redistribute
the parser skeleton itself, you may (at your option) remove this
special exception, which will cause the skeleton and the resulting
Bison output files to be licensed under the GNU General Public
License without this special exception.
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
/* C LALR(1) parser skeleton written by Richard Stallman, by
simplifying the original so-called "semantic" parser. */
/* All symbols defined below should begin with yy or YY, to avoid
infringing on user name space. This should be done even for local
variables, as they might otherwise be expanded by user macros.
There are some unavoidable exceptions within include files to
define necessary library symbols; they are noted "INFRINGES ON
USER NAME SPACE" below. */
/* Identify Bison output. */
#define YYBISON 1
/* Bison version. */
#define YYBISON_VERSION "3.0.4"
/* Skeleton name. */
#define YYSKELETON_NAME "yacc.c"
/* Pure parsers. */
#define YYPURE 0
/* Push parsers. */
#define YYPUSH 0
/* Pull parsers. */
#define YYPULL 1
/* Copy the first part of user declarations. */
#line 15 "bison.y" /* yacc.c:339 */
#include "lex.yy.c"
#include "cm.h"
#include "operators.h"
#line 76 "bison.cu" /* yacc.c:339 */
# ifndef YY_NULLPTR
# if defined __cplusplus && 201103L <= __cplusplus
# define YY_NULLPTR nullptr
# else
# define YY_NULLPTR 0
# endif
# endif
/* Enabling verbose error messages. */
#ifdef YYERROR_VERBOSE
# undef YYERROR_VERBOSE
# define YYERROR_VERBOSE 1
#else
# define YYERROR_VERBOSE 0
#endif
/* Debug traces. */
#ifndef YYDEBUG
# define YYDEBUG 0
#endif
#if YYDEBUG
extern int yydebug;
#endif
/* Token type. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
enum yytokentype
{
FILENAME = 258,
NAME = 259,
STRING = 260,
INTNUM = 261,
DECIMAL1 = 262,
BOOL1 = 263,
APPROXNUM = 264,
USERVAR = 265,
ASSIGN = 266,
EQUAL = 267,
NONEQUAL = 268,
OR = 269,
XOR = 270,
AND = 271,
DISTINCT = 272,
IN = 273,
IS = 274,
LIKE = 275,
REGEXP = 276,
NOT = 277,
BETWEEN = 278,
COMPARISON = 279,
SHIFT = 280,
MOD = 281,
FROM = 282,
DELETE = 283,
LOAD = 284,
FILTER = 285,
BY = 286,
JOIN = 287,
STORE = 288,
INTO = 289,
GROUP = 290,
SELECT = 291,
AS = 292,
ORDER = 293,
ASC = 294,
DESC = 295,
COUNT = 296,
USING = 297,
SUM = 298,
AVG = 299,
MIN = 300,
MAX = 301,
LIMIT = 302,
ON = 303,
BINARY = 304,
YEAR = 305,
MONTH = 306,
DAY = 307,
CAST_TO_INT = 308,
LEFT = 309,
RIGHT = 310,
OUTER = 311,
SEMI = 312,
ANTI = 313,
SORT = 314,
SEGMENTS = 315,
PRESORTED = 316,
PARTITION = 317,
INSERT = 318,
WHERE = 319,
DISPLAY = 320,
CASE = 321,
WHEN = 322,
THEN = 323,
ELSE = 324,
END = 325,
SHOW = 326,
TABLES = 327,
TABLE = 328,
DESCRIBE = 329,
DROP = 330,
CREATE = 331,
INDEX = 332,
INTERVAL = 333,
APPEND = 334,
NO = 335,
ENCODING = 336
};
#endif
/* Value type. */
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
union YYSTYPE
{
#line 25 "bison.y" /* yacc.c:355 */
long long int intval;
double floatval;
char *strval;
int subtok;
#line 202 "bison.cu" /* yacc.c:355 */
};
typedef union YYSTYPE YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define YYSTYPE_IS_DECLARED 1
#endif
extern YYSTYPE yylval;
int yyparse (void);
/* Copy the second part of user declarations. */
#line 219 "bison.cu" /* yacc.c:358 */
#ifdef short
# undef short
#endif
#ifdef YYTYPE_UINT8
typedef YYTYPE_UINT8 yytype_uint8;
#else
typedef unsigned char yytype_uint8;
#endif
#ifdef YYTYPE_INT8
typedef YYTYPE_INT8 yytype_int8;
#else
typedef signed char yytype_int8;
#endif
#ifdef YYTYPE_UINT16
typedef YYTYPE_UINT16 yytype_uint16;
#else
typedef unsigned short int yytype_uint16;
#endif
#ifdef YYTYPE_INT16
typedef YYTYPE_INT16 yytype_int16;
#else
typedef short int yytype_int16;
#endif
#ifndef YYSIZE_T
# ifdef __SIZE_TYPE__
# define YYSIZE_T __SIZE_TYPE__
# elif defined size_t
# define YYSIZE_T size_t
# elif ! defined YYSIZE_T
# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
# define YYSIZE_T size_t
# else
# define YYSIZE_T unsigned int
# endif
#endif
#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
#ifndef YY_
# if defined YYENABLE_NLS && YYENABLE_NLS
# if ENABLE_NLS
# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
# define YY_(Msgid) dgettext ("bison-runtime", Msgid)
# endif
# endif
# ifndef YY_
# define YY_(Msgid) Msgid
# endif
#endif
#ifndef YY_ATTRIBUTE
# if (defined __GNUC__ \
&& (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \
|| defined __SUNPRO_C && 0x5110 <= __SUNPRO_C
# define YY_ATTRIBUTE(Spec) __attribute__(Spec)
# else
# define YY_ATTRIBUTE(Spec) /* empty */
# endif
#endif
#ifndef YY_ATTRIBUTE_PURE
# define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__))
#endif
#ifndef YY_ATTRIBUTE_UNUSED
# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__))
#endif
#if !defined _Noreturn \
&& (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112)
# if defined _MSC_VER && 1200 <= _MSC_VER
# define _Noreturn __declspec (noreturn)
# else
# define _Noreturn YY_ATTRIBUTE ((__noreturn__))
# endif
#endif
/* Suppress unused-variable warnings by "using" E. */
#if ! defined lint || defined __GNUC__
# define YYUSE(E) ((void) (E))
#else
# define YYUSE(E) /* empty */
#endif
#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__
/* Suppress an incorrect diagnostic about yylval being uninitialized. */
# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \
_Pragma ("GCC diagnostic push") \
_Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\
_Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
# define YY_IGNORE_MAYBE_UNINITIALIZED_END \
_Pragma ("GCC diagnostic pop")
#else
# define YY_INITIAL_VALUE(Value) Value
#endif
#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
# define YY_IGNORE_MAYBE_UNINITIALIZED_END
#endif
#ifndef YY_INITIAL_VALUE
# define YY_INITIAL_VALUE(Value) /* Nothing. */
#endif
#if ! defined yyoverflow || YYERROR_VERBOSE
/* The parser invokes alloca or malloc; define the necessary symbols. */
# ifdef YYSTACK_USE_ALLOCA
# if YYSTACK_USE_ALLOCA
# ifdef __GNUC__
# define YYSTACK_ALLOC __builtin_alloca
# elif defined __BUILTIN_VA_ARG_INCR
# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
# elif defined _AIX
# define YYSTACK_ALLOC __alloca
# elif defined _MSC_VER
# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
# define alloca _alloca
# else
# define YYSTACK_ALLOC alloca
# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
/* Use EXIT_SUCCESS as a witness for stdlib.h. */
# ifndef EXIT_SUCCESS
# define EXIT_SUCCESS 0
# endif
# endif
# endif
# endif
# endif
# ifdef YYSTACK_ALLOC
/* Pacify GCC's 'empty if-body' warning. */
# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
# ifndef YYSTACK_ALLOC_MAXIMUM
/* The OS might guarantee only one guard page at the bottom of the stack,
and a page size can be as small as 4096 bytes. So we cannot safely
invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
to allow for a few compiler-allocated temporary stack slots. */
# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
# endif
# else
# define YYSTACK_ALLOC YYMALLOC
# define YYSTACK_FREE YYFREE
# ifndef YYSTACK_ALLOC_MAXIMUM
# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
# endif
# if (defined __cplusplus && ! defined EXIT_SUCCESS \
&& ! ((defined YYMALLOC || defined malloc) \
&& (defined YYFREE || defined free)))
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef EXIT_SUCCESS
# define EXIT_SUCCESS 0
# endif
# endif
# ifndef YYMALLOC
# define YYMALLOC malloc
# if ! defined malloc && ! defined EXIT_SUCCESS
void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# ifndef YYFREE
# define YYFREE free
# if ! defined free && ! defined EXIT_SUCCESS
void free (void *); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# endif
#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
#if (! defined yyoverflow \
&& (! defined __cplusplus \
|| (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
/* A type that is properly aligned for any stack member. */
union yyalloc
{
yytype_int16 yyss_alloc;
YYSTYPE yyvs_alloc;
};
/* The size of the maximum gap between one aligned stack and the next. */
# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
/* The size of an array large to enough to hold all stacks, each with
N elements. */
# define YYSTACK_BYTES(N) \
((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ YYSTACK_GAP_MAXIMUM)
# define YYCOPY_NEEDED 1
/* Relocate STACK from its old location to the new one. The
local variables YYSIZE and YYSTACKSIZE give the old and new number of
elements in the stack, and YYPTR gives the new location of the
stack. Advance YYPTR to a properly aligned location for the next
stack. */
# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
do \
{ \
YYSIZE_T yynewbytes; \
YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
Stack = &yyptr->Stack_alloc; \
yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
yyptr += yynewbytes / sizeof (*yyptr); \
} \
while (0)
#endif
#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
/* Copy COUNT objects from SRC to DST. The source and destination do
not overlap. */
# ifndef YYCOPY
# if defined __GNUC__ && 1 < __GNUC__
# define YYCOPY(Dst, Src, Count) \
__builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src)))
# else
# define YYCOPY(Dst, Src, Count) \
do \
{ \
YYSIZE_T yyi; \
for (yyi = 0; yyi < (Count); yyi++) \
(Dst)[yyi] = (Src)[yyi]; \
} \
while (0)
# endif
# endif
#endif /* !YYCOPY_NEEDED */
/* YYFINAL -- State number of the termination state. */
#define YYFINAL 23
/* YYLAST -- Last index in YYTABLE. */
#define YYLAST 837
/* YYNTOKENS -- Number of terminals. */
#define YYNTOKENS 99
/* YYNNTS -- Number of nonterminals. */
#define YYNNTS 14
/* YYNRULES -- Number of rules. */
#define YYNRULES 101
/* YYNSTATES -- Number of states. */
#define YYNSTATES 303
/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned
by yylex, with out-of-bounds checking. */
#define YYUNDEFTOK 2
#define YYMAXUTOK 336
#define YYTRANSLATE(YYX) \
((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM
as returned by yylex, without out-of-bounds checking. */
static const yytype_uint8 yytranslate[] =
{
0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 23, 2, 2, 2, 33, 27, 2,
92, 93, 31, 29, 95, 30, 94, 32, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 98, 91,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 35, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 96, 26, 97, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 24, 25,
28, 34, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
84, 85, 86, 87, 88, 89, 90
};
#if YYDEBUG
/* YYRLINE[YYN] -- Source line where rule number YYN was defined. */
static const yytype_uint16 yyrline[] =
{
0, 128, 128, 129, 133, 136, 138, 140, 142, 144,
146, 148, 150, 152, 154, 156, 158, 160, 162, 164,
166, 168, 174, 175, 176, 177, 178, 179, 180, 181,
182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
192, 193, 194, 195, 196, 197, 198, 202, 203, 204,
205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
215, 216, 217, 219, 220, 221, 225, 226, 229, 232,
236, 237, 238, 242, 243, 247, 248, 251, 253, 256,
260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
270, 271, 272, 273, 274, 275, 277, 280, 282, 285,
286, 287
};
#endif
#if YYDEBUG || YYERROR_VERBOSE || 0
/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
First, the terminals, then, starting at YYNTOKENS, nonterminals. */
static const char *const yytname[] =
{
"$end", "error", "$undefined", "FILENAME", "NAME", "STRING", "INTNUM",
"DECIMAL1", "BOOL1", "APPROXNUM", "USERVAR", "ASSIGN", "EQUAL",
"NONEQUAL", "OR", "XOR", "AND", "DISTINCT", "IN", "IS", "LIKE", "REGEXP",
"NOT", "'!'", "BETWEEN", "COMPARISON", "'|'", "'&'", "SHIFT", "'+'",
"'-'", "'*'", "'/'", "'%'", "MOD", "'^'", "FROM", "DELETE", "LOAD",
"FILTER", "BY", "JOIN", "STORE", "INTO", "GROUP", "SELECT", "AS",
"ORDER", "ASC", "DESC", "COUNT", "USING", "SUM", "AVG", "MIN", "MAX",
"LIMIT", "ON", "BINARY", "YEAR", "MONTH", "DAY", "CAST_TO_INT", "LEFT",
"RIGHT", "OUTER", "SEMI", "ANTI", "SORT", "SEGMENTS", "PRESORTED",
"PARTITION", "INSERT", "WHERE", "DISPLAY", "CASE", "WHEN", "THEN",
"ELSE", "END", "SHOW", "TABLES", "TABLE", "DESCRIBE", "DROP", "CREATE",
"INDEX", "INTERVAL", "APPEND", "NO", "ENCODING", "';'", "'('", "')'",
"'.'", "','", "'{'", "'}'", "':'", "$accept", "stmt_list", "stmt",
"select_stmt", "expr", "opt_group_list", "expr_list", "load_list",
"val_list", "opt_val_list", "opt_where", "join_list", "opt_limit",
"sort_def", YY_NULLPTR
};
#endif
# ifdef YYPRINT
/* YYTOKNUM[NUM] -- (External) token number corresponding to the
(internal) symbol number NUM (which must be that of a token). */
static const yytype_uint16 yytoknum[] =
{
0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
275, 276, 277, 33, 278, 279, 124, 38, 280, 43,
45, 42, 47, 37, 281, 94, 282, 283, 284, 285,
286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
306, 307, 308, 309, 310, 311, 312, 313, 314, 315,
316, 317, 318, 319, 320, 321, 322, 323, 324, 325,
326, 327, 328, 329, 330, 331, 332, 333, 334, 335,
336, 59, 40, 41, 46, 44, 123, 125, 58
};
# endif
#define YYPACT_NINF -186
#define yypact_value_is_default(Yystate) \
(!!((Yystate) == (-186)))
#define YYTABLE_NINF -1
#define yytable_value_is_error(Yytable_value) \
(!!((Yytable_value) == (-1)))
/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
STATE-NUM. */
static const yytype_int16 yypact[] =
{
107, -5, -18, 22, -1, 34, -36, 45, -30, -73,
93, -38, -186, -22, 76, 38, 87, 44, -186, -186,
108, 111, 114, -186, 7, -186, 117, 132, 200, 133,
69, 143, 103, 58, -186, 104, 109, -186, 118, 120,
51, -186, -186, -186, -186, -186, -186, 259, 259, 259,
-186, 78, 80, 82, 83, 88, 90, 91, 92, 96,
110, 259, 703, -31, 128, 259, -48, 200, 182, 197,
198, 121, 259, -186, -186, -186, 207, 199, 209, 787,
255, 255, 259, 259, 259, 259, 259, 259, 259, 259,
259, 259, 344, 259, 259, 259, 259, 259, 2, 259,
293, 259, 259, 259, 259, 259, 259, 259, 212, 215,
259, 259, 749, 134, 214, 169, 170, -25, 136, 140,
144, 236, 749, 147, -186, 149, 367, 390, 413, 436,
459, 482, 505, 528, 551, 626, -186, 749, 771, 307,
668, 787, -186, 239, 803, 79, 676, 164, 124, 124,
-186, -186, -186, -186, -186, -29, 726, 94, -186, -186,
245, -186, 191, -27, 247, 169, 252, 253, 165, -186,
172, -186, -186, -186, -186, -186, -186, -186, -186, -186,
259, -186, -2, 178, 268, 233, -39, -37, 237, -186,
230, 273, 259, 186, -27, 221, 251, -186, -186, -186,
-62, 201, 248, 289, 597, -186, 238, 259, 300, 264,
265, 303, 267, 276, 320, -186, -186, -186, 169, -186,
285, 324, -186, 325, 326, 241, 250, 259, 259, -186,
274, 340, 345, 304, 346, 358, 308, -186, 362, -186,
277, 278, 259, 361, 574, 649, 259, 327, 331, 259,
332, 333, 259, 322, 355, -186, 749, -45, 10, -186,
-186, 649, 259, 259, 649, 259, 259, 649, 354, 403,
-186, 259, 319, 405, -186, 649, 649, -186, 649, 649,
-186, 406, 318, 749, 341, 321, -186, -186, -186, -186,
-186, 412, -186, -186, 357, 430, 323, 431, 424, 435,
359, 450, -186
};
/* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM.
Performed when YYTABLE does not specify something else to do. Zero
means the default is an error. */
static const yytype_uint8 yydefact[] =
{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 4, 0, 0, 0, 0, 0, 17, 13,
0, 0, 0, 1, 0, 2, 0, 0, 0, 0,
0, 0, 0, 0, 18, 0, 0, 3, 0, 0,
22, 25, 26, 27, 29, 28, 24, 0, 0, 0,
72, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 96, 0, 0, 0,
0, 0, 0, 7, 34, 35, 0, 0, 0, 41,
59, 60, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 77, 15, 0, 0, 96, 0, 0, 0, 0,
0, 0, 79, 0, 23, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 64, 54, 55, 56,
57, 53, 66, 0, 62, 0, 61, 58, 47, 48,
49, 50, 51, 52, 70, 68, 0, 75, 78, 8,
0, 97, 0, 98, 0, 96, 0, 0, 0, 46,
0, 36, 37, 38, 39, 40, 42, 43, 44, 45,
0, 67, 22, 0, 0, 0, 0, 0, 0, 5,
68, 0, 0, 0, 98, 0, 0, 11, 14, 16,
0, 0, 0, 0, 0, 63, 0, 0, 0, 0,
0, 0, 0, 0, 0, 9, 71, 76, 96, 12,
0, 0, 20, 0, 0, 0, 33, 0, 0, 69,
0, 0, 0, 0, 0, 0, 0, 10, 0, 101,
0, 0, 0, 0, 0, 80, 0, 0, 0, 0,
0, 0, 0, 99, 0, 21, 73, 0, 0, 65,
88, 84, 0, 0, 85, 0, 0, 87, 0, 0,
6, 0, 32, 0, 91, 83, 81, 93, 86, 82,
95, 0, 0, 74, 0, 0, 92, 89, 94, 90,
100, 0, 31, 30, 0, 0, 0, 0, 0, 0,
0, 0, 19
};
/* YYPGOTO[NTERM-NUM]. */
static const yytype_int16 yypgoto[] =
{
-186, -186, 447, 295, -28, 269, 391, -186, -185, -186,
-186, -34, -114, 282
};
/* YYDEFGOTO[NTERM-NUM]. */
static const yytype_int16 yydefgoto[] =
{
-1, 10, 11, 12, 157, 189, 63, 257, 158, 159,
73, 190, 116, 197
};
/* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If
positive, shift that token. If negative, reduce the rule whose
number is the opposite. If YYTABLE_NINF, syntax error. */
static const yytype_int16 yytable[] =
{
62, 162, 208, 113, 211, 109, 13, 217, 114, 13,
142, 164, 184, 21, 22, 185, 26, 27, 14, 79,
80, 81, 229, 28, 143, 29, 15, 209, 210, 212,
213, 222, 223, 92, 186, 187, 188, 112, 17, 62,
115, 195, 16, 196, 122, 18, 74, 75, 270, 19,
271, 199, 20, 25, 126, 127, 128, 129, 130, 131,
132, 133, 134, 135, 110, 137, 138, 139, 140, 141,
110, 144, 146, 147, 148, 149, 150, 151, 152, 153,
30, 31, 156, 182, 41, 42, 43, 44, 45, 46,
76, 32, 77, 23, 78, 33, 47, 1, 37, 74,
75, 48, 49, 272, 237, 273, 93, 94, 95, 96,
97, 1, 34, 98, 99, 35, 2, 92, 36, 100,
38, 3, 101, 102, 103, 104, 105, 106, 107, 51,
2, 52, 53, 54, 55, 3, 39, 64, 56, 57,
58, 59, 65, 76, 2, 77, 66, 78, 67, 3,
68, 4, 204, 5, 60, 104, 105, 106, 107, 6,
72, 69, 7, 8, 9, 4, 70, 5, 111, 71,
82, 61, 83, 6, 84, 85, 7, 8, 9, 4,
86, 5, 87, 88, 89, 118, 91, 6, 90, 192,
7, 8, 9, 102, 103, 104, 105, 106, 107, 244,
245, 119, 120, 124, 40, 41, 42, 43, 44, 45,
46, 260, 123, 121, 256, 125, 154, 47, 261, 155,
161, 264, 48, 49, 267, 114, 160, 274, 163, 165,
277, 50, 166, 280, 275, 276, 167, 278, 279, 168,
169, 286, 287, 283, 288, 289, 170, 181, 193, 194,
51, 198, 52, 53, 54, 55, 200, 201, 202, 56,
57, 58, 59, 40, 41, 42, 43, 44, 45, 46,
203, 205, 206, 207, 185, 60, 47, 216, 214, 218,
100, 48, 49, 101, 102, 103, 104, 105, 106, 107,
220, 221, 61, 226, 225, 228, 224, 40, 41, 42,
43, 44, 45, 46, 230, 231, 232, 233, 234, 51,
47, 52, 53, 54, 55, 48, 49, 235, 56, 57,
58, 59, 96, 97, 236, 238, 98, 99, 239, 240,
241, 246, 100, 242, 60, 101, 102, 103, 104, 105,
106, 107, 243, 51, 247, 52, 53, 54, 55, 248,
250, 61, 56, 57, 58, 59, 93, 94, 95, 96,
97, 249, 251, 98, 99, 252, 253, 258, 60, 100,
254, 255, 101, 102, 103, 104, 105, 106, 107, 93,
94, 95, 96, 97, 262, 145, 98, 99, 263, 265,
266, 269, 100, 268, 281, 101, 102, 103, 104, 105,
106, 107, 93, 94, 95, 96, 97, 282, 284, 98,
99, 285, 290, 291, 293, 100, 294, 297, 101, 102,
103, 104, 105, 106, 107, 93, 94, 95, 96, 97,
295, 292, 98, 99, 296, 298, 299, 136, 100, 300,
183, 101, 102, 103, 104, 105, 106, 107, 93, 94,
95, 96, 97, 301, 302, 98, 99, 24, 117, 215,
171, 100, 0, 0, 101, 102, 103, 104, 105, 106,
107, 93, 94, 95, 96, 97, 219, 0, 98, 99,
0, 0, 0, 172, 100, 0, 0, 101, 102, 103,
104, 105, 106, 107, 93, 94, 95, 96, 97, 0,
0, 98, 99, 0, 0, 0, 173, 100, 0, 0,
101, 102, 103, 104, 105, 106, 107, 93, 94, 95,
96, 97, 0, 0, 98, 99, 0, 0, 0, 174,
100, 0, 0, 101, 102, 103, 104, 105, 106, 107,
93, 94, 95, 96, 97, 0, 0, 98, 99, 0,
0, 0, 175, 100, 0, 0, 101, 102, 103, 104,
105, 106, 107, 93, 94, 95, 96, 97, 0, 0,
98, 99, 0, 0, 0, 176, 100, 0, 0, 101,
102, 103, 104, 105, 106, 107, 93, 94, 95, 96,
97, 0, 0, 98, 99, 0, 0, 0, 177, 100,
0, 0, 101, 102, 103, 104, 105, 106, 107, 93,
94, 95, 96, 97, 0, 0, 98, 99, 0, 0,
0, 178, 100, 0, 0, 101, 102, 103, 104, 105,
106, 107, 0, 0, 0, 0, 0, 0, 93, 94,
95, 96, 97, 0, 179, 98, 99, 0, 0, 0,
0, 100, 0, 259, 101, 102, 103, 104, 105, 106,
107, 93, 94, 95, 96, 97, 0, 0, 98, 99,
0, 0, 0, 0, 100, 227, 0, 101, 102, 103,
104, 105, 106, 107, 97, 0, 0, 98, 99, 0,
184, 0, 0, 100, 0, 0, 101, 102, 103, 104,
105, 106, 107, 180, 101, 102, 103, 104, 105, 106,
107, 0, 186, 187, 188, 93, 94, 95, 96, 97,
0, 0, 98, 99, 0, 0, 0, 0, 100, 0,
0, 101, 102, 103, 104, 105, 106, 107, 93, 94,
95, 96, 97, 0, 0, 98, 99, 0, 0, 108,
0, 100, 0, 0, 101, 102, 103, 104, 105, 106,
107, 93, 94, 95, 96, 97, 0, 0, 98, 99,
0, 0, 191, 0, 100, 0, 0, 101, 102, 103,
104, 105, 106, 107, 94, 95, 96, 97, 0, 0,
98, 99, 0, 0, 0, 0, 100, 0, 0, 101,
102, 103, 104, 105, 106, 107, 98, 99, 0, 0,
0, 0, 100, 0, 0, 101, 102, 103, 104, 105,
106, 107, -1, -1, 0, 0, 0, 0, 100, 0,
0, 101, 102, 103, 104, 105, 106, 107
};
static const yytype_int16 yycheck[] =
{
28, 115, 41, 51, 41, 36, 11, 192, 56, 11,
8, 36, 41, 86, 87, 44, 38, 39, 36, 47,
48, 49, 207, 45, 22, 47, 4, 66, 67, 66,
67, 93, 94, 61, 63, 64, 65, 65, 4, 67,
88, 68, 43, 70, 72, 81, 48, 49, 93, 4,
95, 165, 82, 91, 82, 83, 84, 85, 86, 87,
88, 89, 90, 91, 95, 93, 94, 95, 96, 97,
95, 99, 100, 101, 102, 103, 104, 105, 106, 107,
4, 43, 110, 4, 5, 6, 7, 8, 9, 10,
92, 4, 94, 0, 96, 51, 17, 4, 91, 48,
49, 22, 23, 93, 218, 95, 12, 13, 14, 15,
16, 4, 4, 19, 20, 4, 37, 145, 4, 25,
3, 42, 28, 29, 30, 31, 32, 33, 34, 50,
37, 52, 53, 54, 55, 42, 4, 4, 59, 60,
61, 62, 73, 92, 37, 94, 3, 96, 45, 42,
92, 72, 180, 74, 75, 31, 32, 33, 34, 80,
40, 57, 83, 84, 85, 72, 57, 74, 40, 51,
92, 92, 92, 80, 92, 92, 83, 84, 85, 72,
92, 74, 92, 92, 92, 3, 76, 80, 92, 95,
83, 84, 85, 29, 30, 31, 32, 33, 34, 227,
228, 4, 4, 4, 4, 5, 6, 7, 8, 9,
10, 245, 5, 92, 242, 6, 4, 17, 246, 4,
6, 249, 22, 23, 252, 56, 92, 261, 58, 93,
264, 31, 92, 267, 262, 263, 92, 265, 266, 3,
93, 275, 276, 271, 278, 279, 97, 8, 3, 58,
50, 4, 52, 53, 54, 55, 4, 4, 93, 59,
60, 61, 62, 4, 5, 6, 7, 8, 9, 10,
98, 93, 4, 40, 44, 75, 17, 4, 41, 93,
25, 22, 23, 28, 29, 30, 31, 32, 33, 34,
69, 40, 92, 4, 46, 57, 95, 4, 5, 6,
7, 8, 9, 10, 4, 41, 41, 4, 41, 50,
17, 52, 53, 54, 55, 22, 23, 41, 59, 60,
61, 62, 15, 16, 4, 40, 19, 20, 4, 4,
4, 57, 25, 92, 75, 28, 29, 30, 31, 32,
33, 34, 92, 50, 4, 52, 53, 54, 55, 4,
4, 92, 59, 60, 61, 62, 12, 13, 14, 15,
16, 57, 4, 19, 20, 57, 4, 6, 75, 25,
93, 93, 28, 29, 30, 31, 32, 33, 34, 12,
13, 14, 15, 16, 57, 92, 19, 20, 57, 57,
57, 36, 25, 71, 40, 28, 29, 30, 31, 32,
33, 34, 12, 13, 14, 15, 16, 4, 89, 19,
20, 6, 6, 95, 93, 25, 4, 94, 28, 29,
30, 31, 32, 33, 34, 12, 13, 14, 15, 16,
73, 90, 19, 20, 4, 4, 12, 93, 25, 4,
145, 28, 29, 30, 31, 32, 33, 34, 12, 13,
14, 15, 16, 94, 4, 19, 20, 10, 67, 190,
93, 25, -1, -1, 28, 29, 30, 31, 32, 33,
34, 12, 13, 14, 15, 16, 194, -1, 19, 20,
-1, -1, -1, 93, 25, -1, -1, 28, 29, 30,
31, 32, 33, 34, 12, 13, 14, 15, 16, -1,
-1, 19, 20, -1, -1, -1, 93, 25, -1, -1,
28, 29, 30, 31, 32, 33, 34, 12, 13, 14,
15, 16, -1, -1, 19, 20, -1, -1, -1, 93,
25, -1, -1, 28, 29, 30, 31, 32, 33, 34,
12, 13, 14, 15, 16, -1, -1, 19, 20, -1,
-1, -1, 93, 25, -1, -1, 28, 29, 30, 31,
32, 33, 34, 12, 13, 14, 15, 16, -1, -1,
19, 20, -1, -1, -1, 93, 25, -1, -1, 28,
29, 30, 31, 32, 33, 34, 12, 13, 14, 15,
16, -1, -1, 19, 20, -1, -1, -1, 93, 25,
-1, -1, 28, 29, 30, 31, 32, 33, 34, 12,
13, 14, 15, 16, -1, -1, 19, 20, -1, -1,
-1, 93, 25, -1, -1, 28, 29, 30, 31, 32,
33, 34, -1, -1, -1, -1, -1, -1, 12, 13,
14, 15, 16, -1, 93, 19, 20, -1, -1, -1,
-1, 25, -1, 79, 28, 29, 30, 31, 32, 33,
34, 12, 13, 14, 15, 16, -1, -1, 19, 20,
-1, -1, -1, -1, 25, 78, -1, 28, 29, 30,
31, 32, 33, 34, 16, -1, -1, 19, 20, -1,
41, -1, -1, 25, -1, -1, 28, 29, 30, 31,
32, 33, 34, 77, 28, 29, 30, 31, 32, 33,
34, -1, 63, 64, 65, 12, 13, 14, 15, 16,
-1, -1, 19, 20, -1, -1, -1, -1, 25, -1,
-1, 28, 29, 30, 31, 32, 33, 34, 12, 13,
14, 15, 16, -1, -1, 19, 20, -1, -1, 46,
-1, 25, -1, -1, 28, 29, 30, 31, 32, 33,
34, 12, 13, 14, 15, 16, -1, -1, 19, 20,
-1, -1, 46, -1, 25, -1, -1, 28, 29, 30,
31, 32, 33, 34, 13, 14, 15, 16, -1, -1,
19, 20, -1, -1, -1, -1, 25, -1, -1, 28,
29, 30, 31, 32, 33, 34, 19, 20, -1, -1,
-1, -1, 25, -1, -1, 28, 29, 30, 31, 32,
33, 34, 19, 20, -1, -1, -1, -1, 25, -1,
-1, 28, 29, 30, 31, 32, 33, 34
};
/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
symbol of state STATE-NUM. */
static const yytype_uint8 yystos[] =
{
0, 4, 37, 42, 72, 74, 80, 83, 84, 85,
100, 101, 102, 11, 36, 4, 43, 4, 81, 4,
82, 86, 87, 0, 101, 91, 38, 39, 45, 47,
4, 43, 4, 51, 4, 4, 4, 91, 3, 4,
4, 5, 6, 7, 8, 9, 10, 17, 22, 23,
31, 50, 52, 53, 54, 55, 59, 60, 61, 62,
75, 92, 103, 105, 4, 73, 3, 45, 92, 57,
57, 51, 40, 109, 48, 49, 92, 94, 96, 103,
103, 103, 92, 92, 92, 92, 92, 92, 92, 92,
92, 76, 103, 12, 13, 14, 15, 16, 19, 20,
25, 28, 29, 30, 31, 32, 33, 34, 46, 36,
95, 40, 103, 51, 56, 88, 111, 105, 3, 4,
4, 92, 103, 5, 4, 6, 103, 103, 103, 103,
103, 103, 103, 103, 103, 103, 93, 103, 103, 103,
103, 103, 8, 22, 103, 92, 103, 103, 103, 103,
103, 103, 103, 103, 4, 4, 103, 103, 107, 108,
92, 6, 111, 58, 36, 93, 92, 92, 3, 93,
97, 93, 93, 93, 93, 93, 93, 93, 93, 93,
77, 8, 4, 102, 41, 44, 63, 64, 65, 104,
110, 46, 95, 3, 58, 68, 70, 112, 4, 111,
4, 4, 93, 98, 103, 93, 4, 40, 41, 66,
67, 41, 66, 67, 41, 104, 4, 107, 93, 112,
69, 40, 93, 94, 95, 46, 4, 78, 57, 107,
4, 41, 41, 4, 41, 41, 4, 111, 40, 4,
4, 4, 92, 92, 103, 103, 57, 4, 4, 57,
4, 4, 57, 4, 93, 93, 103, 106, 6, 79,
110, 103, 57, 57, 103, 57, 57, 103, 71, 36,
93, 95, 93, 95, 110, 103, 103, 110, 103, 103,
110, 40, 4, 103, 89, 6, 110, 110, 110, 110,
6, 95, 90, 93, 4, 73, 4, 94, 4, 12,
4, 94, 4
};
/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
static const yytype_uint8 yyr1[] =
{
0, 99, 100, 100, 101, 102, 102, 102, 102, 102,
102, 102, 102, 102, 102, 102, 102, 102, 102, 102,
102, 102, 103, 103, 103, 103, 103, 103, 103, 103,
103, 103, 103, 103, 103, 103, 103, 103, 103, 103,
103, 103, 103, 103, 103, 103, 103, 103, 103, 103,
103, 103, 103, 103, 103, 103, 103, 103, 103, 103,
103, 103, 103, 103, 103, 103, 103, 103, 104, 104,
105, 105, 105, 106, 106, 107, 107, 108, 108, 109,
110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
110, 110, 110, 110, 110, 110, 111, 111, 112, 112,
112, 112
};
/* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */
static const yytype_uint8 yyr2[] =
{
0, 2, 2, 3, 1, 7, 12, 5, 6, 8,
9, 7, 8, 2, 7, 5, 7, 2, 3, 22,
8, 10, 1, 3, 1, 1, 1, 1, 1, 1,
11, 11, 9, 6, 2, 2, 4, 4, 4, 4,
4, 2, 4, 4, 4, 4, 4, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 2,
2, 3, 3, 5, 3, 8, 3, 4, 0, 3,
3, 5, 1, 1, 3, 1, 3, 0, 1, 2,
4, 6, 6, 6, 5, 5, 6, 5, 5, 7,
7, 6, 7, 6, 7, 6, 0, 2, 0, 4,
7, 3
};
#define yyerrok (yyerrstatus = 0)
#define yyclearin (yychar = YYEMPTY)
#define YYEMPTY (-2)
#define YYEOF 0
#define YYACCEPT goto yyacceptlab
#define YYABORT goto yyabortlab
#define YYERROR goto yyerrorlab
#define YYRECOVERING() (!!yyerrstatus)
#define YYBACKUP(Token, Value) \
do \
if (yychar == YYEMPTY) \
{ \
yychar = (Token); \
yylval = (Value); \
YYPOPSTACK (yylen); \
yystate = *yyssp; \
goto yybackup; \
} \
else \
{ \
yyerror (YY_("syntax error: cannot back up")); \
YYERROR; \
} \
while (0)
/* Error token number */
#define YYTERROR 1
#define YYERRCODE 256
/* Enable debugging if requested. */
#if YYDEBUG
# ifndef YYFPRINTF
# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
# define YYFPRINTF fprintf
# endif
# define YYDPRINTF(Args) \
do { \
if (yydebug) \
YYFPRINTF Args; \
} while (0)
/* This macro is provided for backward compatibility. */
#ifndef YY_LOCATION_PRINT
# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
#endif
# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
do { \
if (yydebug) \
{ \
YYFPRINTF (stderr, "%s ", Title); \
yy_symbol_print (stderr, \
Type, Value); \
YYFPRINTF (stderr, "\n"); \
} \
} while (0)
/*----------------------------------------.
| Print this symbol's value on YYOUTPUT. |
`----------------------------------------*/
static void
yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
{
FILE *yyo = yyoutput;
YYUSE (yyo);
if (!yyvaluep)
return;
# ifdef YYPRINT
if (yytype < YYNTOKENS)
YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
# endif
YYUSE (yytype);
}
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
static void
yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
{
YYFPRINTF (yyoutput, "%s %s (",
yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]);
yy_symbol_value_print (yyoutput, yytype, yyvaluep);
YYFPRINTF (yyoutput, ")");
}
/*------------------------------------------------------------------.
| yy_stack_print -- Print the state stack from its BOTTOM up to its |
| TOP (included). |
`------------------------------------------------------------------*/
static void
yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
{
YYFPRINTF (stderr, "Stack now");
for (; yybottom <= yytop; yybottom++)
{
int yybot = *yybottom;
YYFPRINTF (stderr, " %d", yybot);
}
YYFPRINTF (stderr, "\n");
}
# define YY_STACK_PRINT(Bottom, Top) \
do { \
if (yydebug) \
yy_stack_print ((Bottom), (Top)); \
} while (0)
/*------------------------------------------------.
| Report that the YYRULE is going to be reduced. |
`------------------------------------------------*/
static void
yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule)
{
unsigned long int yylno = yyrline[yyrule];
int yynrhs = yyr2[yyrule];
int yyi;
YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
yyrule - 1, yylno);
/* The symbols being reduced. */
for (yyi = 0; yyi < yynrhs; yyi++)
{
YYFPRINTF (stderr, " $%d = ", yyi + 1);
yy_symbol_print (stderr,
yystos[yyssp[yyi + 1 - yynrhs]],
&(yyvsp[(yyi + 1) - (yynrhs)])
);
YYFPRINTF (stderr, "\n");
}
}
# define YY_REDUCE_PRINT(Rule) \
do { \
if (yydebug) \
yy_reduce_print (yyssp, yyvsp, Rule); \
} while (0)
/* Nonzero means print parse trace. It is left uninitialized so that
multiple parsers can coexist. */
int yydebug;
#else /* !YYDEBUG */
# define YYDPRINTF(Args)
# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
# define YY_STACK_PRINT(Bottom, Top)
# define YY_REDUCE_PRINT(Rule)
#endif /* !YYDEBUG */
/* YYINITDEPTH -- initial size of the parser's stacks. */
#ifndef YYINITDEPTH
# define YYINITDEPTH 200
#endif
/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
if the built-in stack extension method is used).
Do not make this value too large; the results are undefined if
YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
evaluated with infinite-precision integer arithmetic. */
#ifndef YYMAXDEPTH
# define YYMAXDEPTH 10000
#endif
#if YYERROR_VERBOSE
# ifndef yystrlen
# if defined __GLIBC__ && defined _STRING_H
# define yystrlen strlen
# else
/* Return the length of YYSTR. */
static YYSIZE_T
yystrlen (const char *yystr)
{
YYSIZE_T yylen;
for (yylen = 0; yystr[yylen]; yylen++)
continue;
return yylen;
}
# endif
# endif
# ifndef yystpcpy
# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
# define yystpcpy stpcpy
# else
/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
YYDEST. */
static char *
yystpcpy (char *yydest, const char *yysrc)
{
char *yyd = yydest;
const char *yys = yysrc;
while ((*yyd++ = *yys++) != '\0')
continue;
return yyd - 1;
}
# endif
# endif
# ifndef yytnamerr
/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
quotes and backslashes, so that it's suitable for yyerror. The
heuristic is that double-quoting is unnecessary unless the string
contains an apostrophe, a comma, or backslash (other than
backslash-backslash). YYSTR is taken from yytname. If YYRES is
null, do not copy; instead, return the length of what the result
would have been. */
static YYSIZE_T
yytnamerr (char *yyres, const char *yystr)
{
if (*yystr == '"')
{
YYSIZE_T yyn = 0;
char const *yyp = yystr;
for (;;)
switch (*++yyp)
{
case '\'':
case ',':
goto do_not_strip_quotes;
case '\\':
if (*++yyp != '\\')
goto do_not_strip_quotes;
/* Fall through. */
default:
if (yyres)
yyres[yyn] = *yyp;
yyn++;
break;
case '"':
if (yyres)
yyres[yyn] = '\0';
return yyn;
}
do_not_strip_quotes: ;
}
if (! yyres)
return yystrlen (yystr);
return yystpcpy (yyres, yystr) - yyres;
}
# endif
/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
about the unexpected token YYTOKEN for the state stack whose top is
YYSSP.
Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is
not large enough to hold the message. In that case, also set
*YYMSG_ALLOC to the required number of bytes. Return 2 if the
required number of bytes is too large to store. */
static int
yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
yytype_int16 *yyssp, int yytoken)
{
YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]);
YYSIZE_T yysize = yysize0;
enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
/* Internationalized format string. */
const char *yyformat = YY_NULLPTR;
/* Arguments of yyformat. */
char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
/* Number of reported tokens (one for the "unexpected", one per
"expected"). */
int yycount = 0;
/* There are many possibilities here to consider:
- If this state is a consistent state with a default action, then
the only way this function was invoked is if the default action
is an error action. In that case, don't check for expected
tokens because there are none.
- The only way there can be no lookahead present (in yychar) is if
this state is a consistent state with a default action. Thus,
detecting the absence of a lookahead is sufficient to determine
that there is no unexpected or expected token to report. In that
case, just report a simple "syntax error".
- Don't assume there isn't a lookahead just because this state is a
consistent state with a default action. There might have been a
previous inconsistent state, consistent state with a non-default
action, or user semantic action that manipulated yychar.
- Of course, the expected token list depends on states to have
correct lookahead information, and it depends on the parser not
to perform extra reductions after fetching a lookahead from the
scanner and before detecting a syntax error. Thus, state merging
(from LALR or IELR) and default reductions corrupt the expected
token list. However, the list is correct for canonical LR with
one exception: it will still contain any token that will not be
accepted due to an error action in a later state.
*/
if (yytoken != YYEMPTY)
{
int yyn = yypact[*yyssp];
yyarg[yycount++] = yytname[yytoken];
if (!yypact_value_is_default (yyn))
{
/* Start YYX at -YYN if negative to avoid negative indexes in
YYCHECK. In other words, skip the first -YYN actions for
this state because they are default actions. */
int yyxbegin = yyn < 0 ? -yyn : 0;
/* Stay within bounds of both yycheck and yytname. */
int yychecklim = YYLAST - yyn + 1;
int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
int yyx;
for (yyx = yyxbegin; yyx < yyxend; ++yyx)
if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
&& !yytable_value_is_error (yytable[yyx + yyn]))
{
if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
{
yycount = 1;
yysize = yysize0;
break;
}
yyarg[yycount++] = yytname[yyx];
{
YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]);
if (! (yysize <= yysize1
&& yysize1 <= YYSTACK_ALLOC_MAXIMUM))
return 2;
yysize = yysize1;
}
}
}
}
switch (yycount)
{
# define YYCASE_(N, S) \
case N: \
yyformat = S; \
break
YYCASE_(0, YY_("syntax error"));
YYCASE_(1, YY_("syntax error, unexpected %s"));
YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
# undef YYCASE_
}
{
YYSIZE_T yysize1 = yysize + yystrlen (yyformat);
if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
return 2;
yysize = yysize1;
}
if (*yymsg_alloc < yysize)
{
*yymsg_alloc = 2 * yysize;
if (! (yysize <= *yymsg_alloc
&& *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
*yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
return 1;
}
/* Avoid sprintf, as that infringes on the user's name space.
Don't have undefined behavior even if the translation
produced a string with the wrong number of "%s"s. */
{
char *yyp = *yymsg;
int yyi = 0;
while ((*yyp = *yyformat) != '\0')
if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
{
yyp += yytnamerr (yyp, yyarg[yyi++]);
yyformat += 2;
}
else
{
yyp++;
yyformat++;
}
}
return 0;
}
#endif /* YYERROR_VERBOSE */
/*-----------------------------------------------.
| Release the memory associated to this symbol. |
`-----------------------------------------------*/
static void
yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
{
YYUSE (yyvaluep);
if (!yymsg)
yymsg = "Deleting";
YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
YYUSE (yytype);
YY_IGNORE_MAYBE_UNINITIALIZED_END
}
/* The lookahead symbol. */
int yychar;
/* The semantic value of the lookahead symbol. */
YYSTYPE yylval;
/* Number of syntax errors so far. */
int yynerrs;
/*----------.
| yyparse. |
`----------*/
int
yyparse (void)
{
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
/* The stacks and their tools:
'yyss': related to states.
'yyvs': related to semantic values.
Refer to the stacks through separate pointers, to allow yyoverflow
to reallocate them elsewhere. */
/* The state stack. */
yytype_int16 yyssa[YYINITDEPTH];
yytype_int16 *yyss;
yytype_int16 *yyssp;
/* The semantic value stack. */
YYSTYPE yyvsa[YYINITDEPTH];
YYSTYPE *yyvs;
YYSTYPE *yyvsp;
YYSIZE_T yystacksize;
int yyn;
int yyresult;
/* Lookahead token as an internal (translated) token number. */
int yytoken = 0;
/* The variables used to return semantic value and location from the
action routines. */
YYSTYPE yyval;
#if YYERROR_VERBOSE
/* Buffer for error messages, and its allocated size. */
char yymsgbuf[128];
char *yymsg = yymsgbuf;
YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
#endif
#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
/* The number of symbols on the RHS of the reduced rule.
Keep to zero when no symbol should be popped. */
int yylen = 0;
yyssp = yyss = yyssa;
yyvsp = yyvs = yyvsa;
yystacksize = YYINITDEPTH;
YYDPRINTF ((stderr, "Starting parse\n"));
yystate = 0;
yyerrstatus = 0;
yynerrs = 0;
yychar = YYEMPTY; /* Cause a token to be read. */
goto yysetstate;
/*------------------------------------------------------------.
| yynewstate -- Push a new state, which is found in yystate. |
`------------------------------------------------------------*/
yynewstate:
/* In all cases, when you get here, the value and location stacks
have just been pushed. So pushing a state here evens the stacks. */
yyssp++;
yysetstate:
*yyssp = yystate;
if (yyss + yystacksize - 1 <= yyssp)
{
/* Get the current used size of the three stacks, in elements. */
YYSIZE_T yysize = yyssp - yyss + 1;
#ifdef yyoverflow
{
/* Give user a chance to reallocate the stack. Use copies of
these so that the &'s don't force the real ones into
memory. */
YYSTYPE *yyvs1 = yyvs;
yytype_int16 *yyss1 = yyss;
/* Each stack pointer address is followed by the size of the
data in use in that stack, in bytes. This used to be a
conditional around just the two extra args, but that might
be undefined if yyoverflow is a macro. */
yyoverflow (YY_("memory exhausted"),
&yyss1, yysize * sizeof (*yyssp),
&yyvs1, yysize * sizeof (*yyvsp),
&yystacksize);
yyss = yyss1;
yyvs = yyvs1;
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
goto yyexhaustedlab;
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
yystacksize = YYMAXDEPTH;
{
yytype_int16 *yyss1 = yyss;
union yyalloc *yyptr =
(union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
if (! yyptr)
goto yyexhaustedlab;
YYSTACK_RELOCATE (yyss_alloc, yyss);
YYSTACK_RELOCATE (yyvs_alloc, yyvs);
# undef YYSTACK_RELOCATE
if (yyss1 != yyssa)
YYSTACK_FREE (yyss1);
}
# endif
#endif /* no yyoverflow */
yyssp = yyss + yysize - 1;
yyvsp = yyvs + yysize - 1;
YYDPRINTF ((stderr, "Stack size increased to %lu\n",
(unsigned long int) yystacksize));
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
YYDPRINTF ((stderr, "Entering state %d\n", yystate));
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yypact_value_is_default (yyn))
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
/* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
if (yychar == YYEMPTY)
{
YYDPRINTF ((stderr, "Reading a token: "));
yychar = yylex ();
}
if (yychar <= YYEOF)
{
yychar = yytoken = YYEOF;
YYDPRINTF ((stderr, "Now at end of input.\n"));
}
else
{
yytoken = YYTRANSLATE (yychar);
YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
}
/* If the proper action on seeing token YYTOKEN is to reduce or to
detect an error, take that action. */
yyn += yytoken;
if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
goto yydefault;
yyn = yytable[yyn];
if (yyn <= 0)
{
if (yytable_value_is_error (yyn))
goto yyerrlab;
yyn = -yyn;
goto yyreduce;
}
/* Count tokens shifted since error; after three, turn off error
status. */
if (yyerrstatus)
yyerrstatus--;
/* Shift the lookahead token. */
YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
/* Discard the shifted token. */
yychar = YYEMPTY;
yystate = yyn;
YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
*++yyvsp = yylval;
YY_IGNORE_MAYBE_UNINITIALIZED_END
goto yynewstate;
/*-----------------------------------------------------------.
| yydefault -- do the default action for the current state. |
`-----------------------------------------------------------*/
yydefault:
yyn = yydefact[yystate];
if (yyn == 0)
goto yyerrlab;
goto yyreduce;
/*-----------------------------.
| yyreduce -- Do a reduction. |
`-----------------------------*/
yyreduce:
/* yyn is the number of a rule to reduce with. */
yylen = yyr2[yyn];
/* If YYLEN is nonzero, implement the default value of the action:
'$$ = $1'.
Otherwise, the following line sets YYVAL to garbage.
This behavior is undocumented and Bison
users should not rely upon it. Assigning to YYVAL
unconditionally makes the parser a bit smaller, and it avoids a
GCC warning that YYVAL may be used uninitialized. */
yyval = yyvsp[1-yylen];
YY_REDUCE_PRINT (yyn);
switch (yyn)
{
case 4:
#line 133 "bison.y" /* yacc.c:1646 */
{ emit("STMT"); }
#line 1603 "bison.cu" /* yacc.c:1646 */
break;
case 5:
#line 137 "bison.y" /* yacc.c:1646 */
{ emit_select((yyvsp[-6].strval), (yyvsp[-1].strval), (yyvsp[0].intval)); }
#line 1609 "bison.cu" /* yacc.c:1646 */
break;
case 6:
#line 139 "bison.y" /* yacc.c:1646 */
{ emit_load((yyvsp[-11].strval), (yyvsp[-8].strval), (yyvsp[-1].intval), (yyvsp[-5].strval)); }
#line 1615 "bison.cu" /* yacc.c:1646 */
break;
case 7:
#line 141 "bison.y" /* yacc.c:1646 */
{ emit_filter((yyvsp[-4].strval), (yyvsp[-1].strval));}
#line 1621 "bison.cu" /* yacc.c:1646 */
break;
case 8:
#line 143 "bison.y" /* yacc.c:1646 */
{ emit_order((yyvsp[-5].strval), (yyvsp[-2].strval), (yyvsp[0].intval));}
#line 1627 "bison.cu" /* yacc.c:1646 */
break;
case 9:
#line 145 "bison.y" /* yacc.c:1646 */
{ emit_join((yyvsp[-7].strval),(yyvsp[-2].strval),(yyvsp[-1].intval),0,-1); }
#line 1633 "bison.cu" /* yacc.c:1646 */
break;
case 10:
#line 147 "bison.y" /* yacc.c:1646 */
{ emit_store((yyvsp[-7].strval),(yyvsp[-5].strval),(yyvsp[-2].strval)); }
#line 1639 "bison.cu" /* yacc.c:1646 */
break;
case 11:
#line 149 "bison.y" /* yacc.c:1646 */
{ emit_store_binary((yyvsp[-5].strval),(yyvsp[-3].strval),0); }
#line 1645 "bison.cu" /* yacc.c:1646 */
break;
case 12:
#line 151 "bison.y" /* yacc.c:1646 */
{ emit_store_binary((yyvsp[-6].strval),(yyvsp[-4].strval),1); }
#line 1651 "bison.cu" /* yacc.c:1646 */
break;
case 13:
#line 153 "bison.y" /* yacc.c:1646 */
{ emit_describe_table((yyvsp[0].strval));}
#line 1657 "bison.cu" /* yacc.c:1646 */
break;
case 14:
#line 155 "bison.y" /* yacc.c:1646 */
{ emit_insert((yyvsp[-4].strval), (yyvsp[0].strval));}
#line 1663 "bison.cu" /* yacc.c:1646 */
break;
case 15:
#line 157 "bison.y" /* yacc.c:1646 */
{ emit_delete((yyvsp[-2].strval));}
#line 1669 "bison.cu" /* yacc.c:1646 */
break;
case 16:
#line 159 "bison.y" /* yacc.c:1646 */
{ emit_display((yyvsp[-5].strval), (yyvsp[-2].strval));}
#line 1675 "bison.cu" /* yacc.c:1646 */
break;
case 17:
#line 161 "bison.y" /* yacc.c:1646 */
{ emit_show_tables();}
#line 1681 "bison.cu" /* yacc.c:1646 */
break;
case 18:
#line 163 "bison.y" /* yacc.c:1646 */
{ emit_drop_table((yyvsp[0].strval));}
#line 1687 "bison.cu" /* yacc.c:1646 */
break;
case 19:
#line 165 "bison.y" /* yacc.c:1646 */
{ emit_create_bitmap_index((yyvsp[-19].strval), (yyvsp[-17].strval), (yyvsp[-15].strval), (yyvsp[-13].strval), (yyvsp[-4].strval), (yyvsp[0].strval));}
#line 1693 "bison.cu" /* yacc.c:1646 */
break;
case 20:
#line 167 "bison.y" /* yacc.c:1646 */
{ emit_create_index((yyvsp[-5].strval), (yyvsp[-3].strval), (yyvsp[-1].strval));}
#line 1699 "bison.cu" /* yacc.c:1646 */
break;
case 21:
#line 169 "bison.y" /* yacc.c:1646 */
{ emit_create_interval((yyvsp[-7].strval), (yyvsp[-5].strval), (yyvsp[-3].strval), (yyvsp[-1].strval));}
#line 1705 "bison.cu" /* yacc.c:1646 */
break;
case 22:
#line 174 "bison.y" /* yacc.c:1646 */
{ emit_name((yyvsp[0].strval)); }
#line 1711 "bison.cu" /* yacc.c:1646 */
break;
case 23:
#line 175 "bison.y" /* yacc.c:1646 */
{ emit_fieldname((yyvsp[-2].strval), (yyvsp[0].strval)); }
#line 1717 "bison.cu" /* yacc.c:1646 */
break;
case 24:
#line 176 "bison.y" /* yacc.c:1646 */
{ emit("USERVAR %s", (yyvsp[0].strval)); }
#line 1723 "bison.cu" /* yacc.c:1646 */
break;
case 25:
#line 177 "bison.y" /* yacc.c:1646 */
{ emit_string((yyvsp[0].strval)); }
#line 1729 "bison.cu" /* yacc.c:1646 */
break;
case 26:
#line 178 "bison.y" /* yacc.c:1646 */
{ emit_number((yyvsp[0].intval)); }
#line 1735 "bison.cu" /* yacc.c:1646 */
break;
case 27:
#line 179 "bison.y" /* yacc.c:1646 */
{ emit_decimal((yyvsp[0].strval)); }
#line 1741 "bison.cu" /* yacc.c:1646 */
break;
case 28:
#line 180 "bison.y" /* yacc.c:1646 */
{ emit_float((yyvsp[0].floatval)); }
#line 1747 "bison.cu" /* yacc.c:1646 */
break;
case 29:
#line 181 "bison.y" /* yacc.c:1646 */
{ emit("BOOL %d", (yyvsp[0].intval)); }
#line 1753 "bison.cu" /* yacc.c:1646 */
break;
case 30:
#line 182 "bison.y" /* yacc.c:1646 */
{ emit_vardecimal((yyvsp[-10].strval), (yyvsp[-8].intval), (yyvsp[-5].strval), (yyvsp[-3].intval), (yyvsp[-1].intval));}
#line 1759 "bison.cu" /* yacc.c:1646 */
break;
case 31:
#line 183 "bison.y" /* yacc.c:1646 */
{ emit_varchar((yyvsp[-10].strval), (yyvsp[-8].intval), (yyvsp[-5].strval), (yyvsp[-3].intval), "", "", "N");}
#line 1765 "bison.cu" /* yacc.c:1646 */
break;
case 32:
#line 184 "bison.y" /* yacc.c:1646 */
{ emit_varchar((yyvsp[-8].strval), (yyvsp[-6].intval), (yyvsp[-3].strval), (yyvsp[-1].intval), "", "", "");}
#line 1771 "bison.cu" /* yacc.c:1646 */
break;
case 33:
#line 185 "bison.y" /* yacc.c:1646 */
{ emit_var((yyvsp[-5].strval), (yyvsp[-3].intval), (yyvsp[0].strval), "", "");}
#line 1777 "bison.cu" /* yacc.c:1646 */
break;
case 34:
#line 186 "bison.y" /* yacc.c:1646 */
{ emit_var_asc((yyvsp[-1].strval));}
#line 1783 "bison.cu" /* yacc.c:1646 */
break;
case 35:
#line 187 "bison.y" /* yacc.c:1646 */
{ emit_var_desc((yyvsp[-1].strval));}
#line 1789 "bison.cu" /* yacc.c:1646 */
break;
case 36:
#line 188 "bison.y" /* yacc.c:1646 */
{ emit_count(); }
#line 1795 "bison.cu" /* yacc.c:1646 */
break;
case 37:
#line 189 "bison.y" /* yacc.c:1646 */
{ emit_sum(); }
#line 1801 "bison.cu" /* yacc.c:1646 */
break;
case 38:
#line 190 "bison.y" /* yacc.c:1646 */
{ emit_average(); }
#line 1807 "bison.cu" /* yacc.c:1646 */
break;
case 39:
#line 191 "bison.y" /* yacc.c:1646 */
{ emit_min(); }
#line 1813 "bison.cu" /* yacc.c:1646 */
break;
case 40:
#line 192 "bison.y" /* yacc.c:1646 */
{ emit_max(); }
#line 1819 "bison.cu" /* yacc.c:1646 */
break;
case 41:
#line 193 "bison.y" /* yacc.c:1646 */
{ emit_distinct(); }
#line 1825 "bison.cu" /* yacc.c:1646 */
break;
case 42:
#line 194 "bison.y" /* yacc.c:1646 */
{ emit_year(); }
#line 1831 "bison.cu" /* yacc.c:1646 */
break;
case 43:
#line 195 "bison.y" /* yacc.c:1646 */
{ emit_month(); }
#line 1837 "bison.cu" /* yacc.c:1646 */
break;
case 44:
#line 196 "bison.y" /* yacc.c:1646 */
{ emit_day(); }
#line 1843 "bison.cu" /* yacc.c:1646 */
break;
case 45:
#line 197 "bison.y" /* yacc.c:1646 */
{ emit_cast(); }
#line 1849 "bison.cu" /* yacc.c:1646 */
break;
case 46:
#line 198 "bison.y" /* yacc.c:1646 */
{ emit_string_grp((yyvsp[-3].strval), (yyvsp[-1].strval)); }
#line 1855 "bison.cu" /* yacc.c:1646 */
break;
case 47:
#line 202 "bison.y" /* yacc.c:1646 */
{ emit_add(); }
#line 1861 "bison.cu" /* yacc.c:1646 */
break;
case 48:
#line 203 "bison.y" /* yacc.c:1646 */
{ emit_minus(); }
#line 1867 "bison.cu" /* yacc.c:1646 */
break;
case 49:
#line 204 "bison.y" /* yacc.c:1646 */
{ emit_mul(); }
#line 1873 "bison.cu" /* yacc.c:1646 */
break;
case 50:
#line 205 "bison.y" /* yacc.c:1646 */
{ emit_div(); }
#line 1879 "bison.cu" /* yacc.c:1646 */
break;
case 51:
#line 206 "bison.y" /* yacc.c:1646 */
{ emit("MOD"); }
#line 1885 "bison.cu" /* yacc.c:1646 */
break;
case 52:
#line 207 "bison.y" /* yacc.c:1646 */
{ emit("MOD"); }
#line 1891 "bison.cu" /* yacc.c:1646 */
break;
case 53:
#line 208 "bison.y" /* yacc.c:1646 */
{ emit_and(); }
#line 1897 "bison.cu" /* yacc.c:1646 */
break;
case 54:
#line 209 "bison.y" /* yacc.c:1646 */
{ emit_eq(); }
#line 1903 "bison.cu" /* yacc.c:1646 */
break;
case 55:
#line 210 "bison.y" /* yacc.c:1646 */
{ emit_neq(); }
#line 1909 "bison.cu" /* yacc.c:1646 */
break;
case 56:
#line 211 "bison.y" /* yacc.c:1646 */
{ emit_or(); }
#line 1915 "bison.cu" /* yacc.c:1646 */
break;
case 57:
#line 212 "bison.y" /* yacc.c:1646 */
{ emit("XOR"); }
#line 1921 "bison.cu" /* yacc.c:1646 */
break;
case 58:
#line 213 "bison.y" /* yacc.c:1646 */
{ emit("SHIFT %s", (yyvsp[-1].subtok)==1?"left":"right"); }
#line 1927 "bison.cu" /* yacc.c:1646 */
break;
case 59:
#line 214 "bison.y" /* yacc.c:1646 */
{ emit("NOT"); }
#line 1933 "bison.cu" /* yacc.c:1646 */
break;
case 60:
#line 215 "bison.y" /* yacc.c:1646 */
{ emit("NOT"); }
#line 1939 "bison.cu" /* yacc.c:1646 */
break;
case 61:
#line 216 "bison.y" /* yacc.c:1646 */
{ emit_cmp((yyvsp[-1].subtok)); }
#line 1945 "bison.cu" /* yacc.c:1646 */
break;
case 62:
#line 217 "bison.y" /* yacc.c:1646 */
{ emit_cmp(7); }
#line 1951 "bison.cu" /* yacc.c:1646 */
break;
case 63:
#line 219 "bison.y" /* yacc.c:1646 */
{ emit("CMPSELECT %d", (yyvsp[-3].subtok)); }
#line 1957 "bison.cu" /* yacc.c:1646 */
break;
case 64:
#line 220 "bison.y" /* yacc.c:1646 */
{emit("EXPR");}
#line 1963 "bison.cu" /* yacc.c:1646 */
break;
case 65:
#line 221 "bison.y" /* yacc.c:1646 */
{ emit_case(); }
#line 1969 "bison.cu" /* yacc.c:1646 */
break;
case 66:
#line 225 "bison.y" /* yacc.c:1646 */
{ emit("ISBOOL %d", (yyvsp[0].intval)); }
#line 1975 "bison.cu" /* yacc.c:1646 */
break;
case 67:
#line 226 "bison.y" /* yacc.c:1646 */
{ emit("ISBOOL %d", (yyvsp[0].intval)); emit("NOT"); }
#line 1981 "bison.cu" /* yacc.c:1646 */
break;
case 68:
#line 229 "bison.y" /* yacc.c:1646 */
{ /* nil */
(yyval.intval) = 0;
}
#line 1989 "bison.cu" /* yacc.c:1646 */
break;
case 69:
#line 232 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = (yyvsp[0].intval);}
#line 1995 "bison.cu" /* yacc.c:1646 */
break;
case 70:
#line 236 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_sel_name((yyvsp[0].strval));}
#line 2001 "bison.cu" /* yacc.c:1646 */
break;
case 71:
#line 237 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = (yyvsp[-4].intval) + 1; emit_sel_name((yyvsp[0].strval));}
#line 2007 "bison.cu" /* yacc.c:1646 */
break;
case 72:
#line 238 "bison.y" /* yacc.c:1646 */
{ emit_sel_name("*");}
#line 2013 "bison.cu" /* yacc.c:1646 */
break;
case 73:
#line 242 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; }
#line 2019 "bison.cu" /* yacc.c:1646 */
break;
case 74:
#line 243 "bison.y" /* yacc.c:1646 */
{(yyval.intval) = (yyvsp[-2].intval) + 1; }
#line 2025 "bison.cu" /* yacc.c:1646 */
break;
case 75:
#line 247 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; }
#line 2031 "bison.cu" /* yacc.c:1646 */
break;
case 76:
#line 248 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1 + (yyvsp[0].intval); }
#line 2037 "bison.cu" /* yacc.c:1646 */
break;
case 77:
#line 251 "bison.y" /* yacc.c:1646 */
{ /* nil */
(yyval.intval) = 0;
}
#line 2045 "bison.cu" /* yacc.c:1646 */
break;
case 79:
#line 256 "bison.y" /* yacc.c:1646 */
{ emit("FILTER BY"); }
#line 2051 "bison.cu" /* yacc.c:1646 */
break;
case 80:
#line 260 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), 'I');}
#line 2057 "bison.cu" /* yacc.c:1646 */
break;
case 81:
#line 261 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), '3');}
#line 2063 "bison.cu" /* yacc.c:1646 */
break;
case 82:
#line 262 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), '4');}
#line 2069 "bison.cu" /* yacc.c:1646 */
break;
case 83:
#line 263 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), '1');}
#line 2075 "bison.cu" /* yacc.c:1646 */
break;
case 84:
#line 264 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), 'S');}
#line 2081 "bison.cu" /* yacc.c:1646 */
break;
case 85:
#line 265 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), 'R');}
#line 2087 "bison.cu" /* yacc.c:1646 */
break;
case 86:
#line 266 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), '2');}
#line 2093 "bison.cu" /* yacc.c:1646 */
break;
case 87:
#line 267 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), 'O');}
#line 2099 "bison.cu" /* yacc.c:1646 */
break;
case 88:
#line 268 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), 'I'); }
#line 2105 "bison.cu" /* yacc.c:1646 */
break;
case 89:
#line 269 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), '3'); }
#line 2111 "bison.cu" /* yacc.c:1646 */
break;
case 90:
#line 270 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), '4'); }
#line 2117 "bison.cu" /* yacc.c:1646 */
break;
case 91:
#line 271 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), 'L'); }
#line 2123 "bison.cu" /* yacc.c:1646 */
break;
case 92:
#line 272 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), '1'); }
#line 2129 "bison.cu" /* yacc.c:1646 */
break;
case 93:
#line 273 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), 'R'); }
#line 2135 "bison.cu" /* yacc.c:1646 */
break;
case 94:
#line 274 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), 'R'); }
#line 2141 "bison.cu" /* yacc.c:1646 */
break;
case 95:
#line 275 "bison.y" /* yacc.c:1646 */
{ (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), 'O'); }
#line 2147 "bison.cu" /* yacc.c:1646 */
break;
case 96:
#line 277 "bison.y" /* yacc.c:1646 */
{ /* nil */
(yyval.intval) = 0;
}
#line 2155 "bison.cu" /* yacc.c:1646 */
break;
case 97:
#line 280 "bison.y" /* yacc.c:1646 */
{ emit_limit((yyvsp[0].intval)); }
#line 2161 "bison.cu" /* yacc.c:1646 */
break;
case 98:
#line 282 "bison.y" /* yacc.c:1646 */
{ /* nil */
(yyval.intval) = 0;
}
#line 2169 "bison.cu" /* yacc.c:1646 */
break;
case 99:
#line 285 "bison.y" /* yacc.c:1646 */
{ emit_sort((yyvsp[0].strval), 0); }
#line 2175 "bison.cu" /* yacc.c:1646 */
break;
case 100:
#line 286 "bison.y" /* yacc.c:1646 */
{ emit_sort((yyvsp[-3].strval), (yyvsp[0].intval)); }
#line 2181 "bison.cu" /* yacc.c:1646 */
break;
case 101:
#line 287 "bison.y" /* yacc.c:1646 */
{ emit_presort((yyvsp[0].strval)); }
#line 2187 "bison.cu" /* yacc.c:1646 */
break;
#line 2191 "bison.cu" /* yacc.c:1646 */
default: break;
}
/* User semantic actions sometimes alter yychar, and that requires
that yytoken be updated with the new translation. We take the
approach of translating immediately before every use of yytoken.
One alternative is translating here after every semantic action,
but that translation would be missed if the semantic action invokes
YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
incorrect destructor might then be invoked immediately. In the
case of YYERROR or YYBACKUP, subsequent parser actions might lead
to an incorrect destructor call or verbose syntax error message
before the lookahead is translated. */
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
*++yyvsp = yyval;
/* Now 'shift' the result of the reduction. Determine what state
that goes to, based on the state we popped back to and the rule
number reduced by. */
yyn = yyr1[yyn];
yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
yystate = yytable[yystate];
else
yystate = yydefgoto[yyn - YYNTOKENS];
goto yynewstate;
/*--------------------------------------.
| yyerrlab -- here on detecting error. |
`--------------------------------------*/
yyerrlab:
/* Make sure we have latest lookahead translation. See comments at
user semantic actions for why this is necessary. */
yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
/* If not already recovering from an error, report this error. */
if (!yyerrstatus)
{
++yynerrs;
#if ! YYERROR_VERBOSE
yyerror (YY_("syntax error"));
#else
# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
yyssp, yytoken)
{
char const *yymsgp = YY_("syntax error");
int yysyntax_error_status;
yysyntax_error_status = YYSYNTAX_ERROR;
if (yysyntax_error_status == 0)
yymsgp = yymsg;
else if (yysyntax_error_status == 1)
{
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
if (!yymsg)
{
yymsg = yymsgbuf;
yymsg_alloc = sizeof yymsgbuf;
yysyntax_error_status = 2;
}
else
{
yysyntax_error_status = YYSYNTAX_ERROR;
yymsgp = yymsg;
}
}
yyerror (yymsgp);
if (yysyntax_error_status == 2)
goto yyexhaustedlab;
}
# undef YYSYNTAX_ERROR
#endif
}
if (yyerrstatus == 3)
{
/* If just tried and failed to reuse lookahead token after an
error, discard it. */
if (yychar <= YYEOF)
{
/* Return failure if at end of input. */
if (yychar == YYEOF)
YYABORT;
}
else
{
yydestruct ("Error: discarding",
yytoken, &yylval);
yychar = YYEMPTY;
}
}
/* Else will try to reuse lookahead token after shifting the error
token. */
goto yyerrlab1;
/*---------------------------------------------------.
| yyerrorlab -- error raised explicitly by YYERROR. |
`---------------------------------------------------*/
yyerrorlab:
/* Pacify compilers like GCC when the user code never invokes
YYERROR and the label yyerrorlab therefore never appears in user
code. */
if (/*CONSTCOND*/ 0)
goto yyerrorlab;
/* Do not reclaim the symbols of the rule whose action triggered
this YYERROR. */
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
yystate = *yyssp;
goto yyerrlab1;
/*-------------------------------------------------------------.
| yyerrlab1 -- common code for both syntax error and YYERROR. |
`-------------------------------------------------------------*/
yyerrlab1:
yyerrstatus = 3; /* Each real token shifted decrements this. */
for (;;)
{
yyn = yypact[yystate];
if (!yypact_value_is_default (yyn))
{
yyn += YYTERROR;
if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
{
yyn = yytable[yyn];
if (0 < yyn)
break;
}
}
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
YYABORT;
yydestruct ("Error: popping",
yystos[yystate], yyvsp);
YYPOPSTACK (1);
yystate = *yyssp;
YY_STACK_PRINT (yyss, yyssp);
}
YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
*++yyvsp = yylval;
YY_IGNORE_MAYBE_UNINITIALIZED_END
/* Shift the error token. */
YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
yystate = yyn;
goto yynewstate;
/*-------------------------------------.
| yyacceptlab -- YYACCEPT comes here. |
`-------------------------------------*/
yyacceptlab:
yyresult = 0;
goto yyreturn;
/*-----------------------------------.
| yyabortlab -- YYABORT comes here. |
`-----------------------------------*/
yyabortlab:
yyresult = 1;
goto yyreturn;
#if !defined yyoverflow || YYERROR_VERBOSE
/*-------------------------------------------------.
| yyexhaustedlab -- memory exhaustion comes here. |
`-------------------------------------------------*/
yyexhaustedlab:
yyerror (YY_("memory exhausted"));
yyresult = 2;
/* Fall through. */
#endif
yyreturn:
if (yychar != YYEMPTY)
{
/* Make sure we have latest lookahead translation. See comments at
user semantic actions for why this is necessary. */
yytoken = YYTRANSLATE (yychar);
yydestruct ("Cleanup: discarding lookahead",
yytoken, &yylval);
}
/* Do not reclaim the symbols of the rule whose action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
YY_STACK_PRINT (yyss, yyssp);
while (yyssp != yyss)
{
yydestruct ("Cleanup: popping",
yystos[*yyssp], yyvsp);
YYPOPSTACK (1);
}
#ifndef yyoverflow
if (yyss != yyssa)
YYSTACK_FREE (yyss);
#endif
#if YYERROR_VERBOSE
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
#endif
return yyresult;
}
#line 289 "bison.y" /* yacc.c:1906 */
bool scan_state;
unsigned int statement_count;
time_t curr_time;
int execute_file(int ac, char **av)
{
bool just_once = 0;
string script;
process_count = 1000000000; //1GB by default
verbose = 0;
ssd = 0;
delta = 0;
total_buffer_size = 0;
hash_seed = 100;
for (int i = 1; i < ac; i++) {
if(strcmp(av[i],"-l") == 0) {
process_count = 1000000*atoff(av[i+1]);
}
else if(strcmp(av[i],"-v") == 0) {
verbose = 1;
}
else if(strcmp(av[i],"-delta") == 0) {
delta = 1;
}
else if(strcmp(av[i],"-ssd") == 0) {
ssd = 1;
}
else if(strcmp(av[i],"-precision") == 0) {
prs = atoi(av[i+1]);
}
else if(strcmp(av[i],"-i") == 0) {
interactive = 1;
break;
}
else if(strcmp(av[i],"-s") == 0) {
just_once = 1;
interactive = 1;
script = av[i+1];
};
};
load_col_data(data_dict, "data.dictionary");
tot_disk = 0;
if (!interactive) {
if((yyin = fopen(av[ac-1], "r")) == nullptr) {
perror(av[ac-1]);
exit(1);
};
if(yyparse()) {
printf("SQL scan parse failed\n");
exit(1);
};
scan_state = 1;
std::clock_t start1 = std::clock();
load_vars();
statement_count = 0;
clean_queues();
filter_var.clear();
yyin = fopen(av[ac-1], "r");
PROC_FLUSH_BUF ( yyin );
statement_count = 0;
extern FILE *yyin;
curr_time = time(0)*1000;
if(!yyparse()) {
if(verbose)
cout << "SQL scan parse worked " << endl;
}
else
cout << "SQL scan parse failed" << endl;
fclose(yyin);
for (auto it=varNames.begin() ; it != varNames.end(); ++it ) {
(*it).second->free();
};
if(verbose) {
cout<< "cycle time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl;
cout<< "disk time " << ( tot_disk / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl;
};
}
else {
//context = CreateCudaDevice(0, nullptr, verbose);
if(!just_once)
getline(cin, script);
while (script != "exit" && script != "EXIT") {
used_vars.clear();
yy_scan_string(script.c_str());
scan_state = 0;
statement_count = 0;
clean_queues();
if(yyparse()) {
printf("SQL scan parse failed \n");
getline(cin, script);
continue;
};
scan_state = 1;
load_vars();
statement_count = 0;
clean_queues();
filter_var.clear();
yy_scan_string(script.c_str());
std::clock_t start1 = std::clock();
curr_time = time(0)*1000;
if(!yyparse()) {
if(verbose)
cout << "SQL scan parse worked " << endl;
};
for (auto it=varNames.begin() ; it != varNames.end(); ++it ) {
(*it).second->free();
};
varNames.clear();
if(verbose) {
cout<< "cycle time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl;
};
if(!just_once)
getline(cin, script);
else
script = "exit";
};
while(!buffer_names.empty()) {
cudaFreeHost(buffers[buffer_names.front()]);
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
for(auto it = index_buffers.begin(); it != index_buffers.end();it++) {
cudaFreeHost(it->second);
};
for(auto it = idx_vals.begin(); it != idx_vals.end();it++) {
cudaFree(it->second);
idx_vals.clear();
};
};
if(save_dict) {
save_col_data(data_dict,"data.dictionary");
};
if(alloced_sz) {
cudaFree(alloced_tmp);
alloced_sz = 0;
};
if(scratch.size()) {
scratch.resize(0);
scratch.shrink_to_fit();
};
if(rcol_dev.size()) {
rcol_dev.resize(0);
rcol_dev.shrink_to_fit();
};
if(ranj.size()) {
ranj.resize(0);
ranj.shrink_to_fit();
};
return 0;
}
//external c global to report errors
//char alenka_err[4048];
int alenkaExecute(char *s)
{
YY_BUFFER_STATE bp;
total_buffer_size = 0;
scan_state = 0;
load_col_data(data_dict, "data.dictionary");
std::clock_t start;
if(verbose)
start = std::clock();
bp = yy_scan_string(s);
yy_switch_to_buffer(bp);
int ret = yyparse();
//printf("execute: returned [%d]\n", ret);
if(!ret) {
if(verbose)
cout << "SQL scan parse worked" << endl;
}
scan_state = 1;
load_vars();
statement_count = 0;
clean_queues();
bp = yy_scan_string(s);
yy_switch_to_buffer(bp);
if(!yyparse()) {
if(verbose)
cout << "SQL scan parse worked " << endl;
}
else
cout << "SQL scan parse failed" << endl;
yy_delete_buffer(bp);
// Clear Vars
for (auto it=varNames.begin() ; it != varNames.end(); ++it ) {
(*it).second->free();
};
varNames.clear();
if(verbose)
cout<< "statement time " << ( ( std::clock() - start ) / (double)CLOCKS_PER_SEC ) << endl;
if(save_dict)
save_col_data(data_dict,"data.dictionary");
return ret;
} | the_stack |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.