text stringlengths 2.5k 6.39M | kind stringclasses 3 values |
|---|---|
#include "flowfilter/gpu/util.h"
#include "flowfilter/gpu/error.h"
#include "flowfilter/gpu/propagation.h"
#include "flowfilter/gpu/device/propagation_k.h"
#include "flowfilter/gpu/device/misc_k.h"
namespace flowfilter {
namespace gpu {
FlowPropagator::FlowPropagator() :
Stage() {
__configured = false;
__inputFlowSet = false;
__invertInputFlow = false;
__iterations = 0;
__border = 3;
__dt = 0.0f;
}
FlowPropagator::FlowPropagator(GPUImage inputFlow,
const int iterations) :
Stage() {
__configured = false;
__inputFlowSet = false;
__invertInputFlow = false;
__border = 3;
setInputFlow(inputFlow);
setIterations(iterations);
configure();
}
FlowPropagator::~FlowPropagator() {
// nothing to do...
}
void FlowPropagator::configure() {
if(!__inputFlowSet) {
std::cerr << "ERROR: FlowPropagator::configure(): input flow has not been set" << std::endl;
throw std::exception();
}
int height = __inputFlow.height();
int width = __inputFlow.width();
// wrap __inputFlow in a texture
__inputFlowTexture = GPUTexture(__inputFlow, cudaChannelFormatKindFloat);
__propagatedFlow_X = GPUImage(height, width, 2, sizeof(float));
__propagatedFlowTexture_X = GPUTexture(__propagatedFlow_X, cudaChannelFormatKindFloat);
__propagatedFlow_Y = GPUImage(height, width, 2, sizeof(float));
__propagatedFlowTexture_Y = GPUTexture(__propagatedFlow_Y, cudaChannelFormatKindFloat);
// configure block and grid sizes
__block = dim3(32, 32, 1);
configureKernelGrid(height, width, __block, __grid);
__configured = true;
}
void FlowPropagator::compute() {
startTiming();
if(!__configured) {
std::cerr << "ERROR: FlowPropagator::compute() stage not configured." << std::endl;
exit(-1);
}
//#######################
// First Iteration
//#######################
if(__invertInputFlow) {
// invert __inputFlow and write it to __propagatedFlow_Y
scalarProductF2_k<<<__grid, __block, 0, __stream>>>(
__inputFlow.wrap<float2>(), -1.0f,
__propagatedFlow_Y.wrap<float2>());
// propagate in X using inverted flow written in __propagatedFlow_Y
flowPropagateX_k<<<__grid, __block, 0, __stream>>>(
__propagatedFlowTexture_Y.getTextureObject(),
__propagatedFlow_X.wrap<float2>(), __dt, __border);
} else {
// Iterate in X using __inputFlow directly
flowPropagateX_k<<<__grid, __block, 0, __stream>>>(
__inputFlowTexture.getTextureObject(),
__propagatedFlow_X.wrap<float2>(), __dt, __border);
}
// first iteration in Y
flowPropagateY_k<<<__grid, __block, 0, __stream>>>(
__propagatedFlowTexture_X.getTextureObject(),
__propagatedFlow_Y.wrap<float2>(), __dt, __border);
//#######################
// Rest of iterations
//#######################
for(int n = 0; n < __iterations - 1; n ++) {
// take as input __propagatedFlowY
flowPropagateX_k<<<__grid, __block, 0, __stream>>>(
__propagatedFlowTexture_Y.getTextureObject(),
__propagatedFlow_X.wrap<float2>(), __dt, __border);
flowPropagateY_k<<<__grid, __block, 0, __stream>>>(
__propagatedFlowTexture_X.getTextureObject(),
__propagatedFlow_Y.wrap<float2>(), __dt, __border);
}
stopTiming();
}
void FlowPropagator::setIterations(const int N) {
if(N <= 0) {
std::cerr << "ERROR: FlowPropagator::setIterations(): iterations less than zero: "
<< N << std::endl;
throw std::exception();
}
__iterations = N;
__dt = 1.0f / float(__iterations);
}
int FlowPropagator::getIterations() const {
return __iterations;
}
float FlowPropagator::getDt() const {
return __dt;
}
void FlowPropagator::setBorder(const int border) {
if(border < 0) {
std::cerr << "ERROR: FlowPropagator::setBorder(): border should be greater of equal zero: "
<< border << std::endl;
throw std::exception();
}
__border = border;
}
int FlowPropagator::getBorder() const {
return __border;
}
void FlowPropagator::setInputFlow(flowfilter::gpu::GPUImage inputFlow) {
if(inputFlow.depth() != 2) {
std::cerr << "ERROR: FlowPropagator::setInputFlow(): input flow should have depth 2: "
<< inputFlow.depth() << std::endl;
throw std::exception();
}
if(inputFlow.itemSize() != 4) {
std::cerr << "ERROR: FlowPropagator::setInputFlow(): input flow should have item size 4: "
<< inputFlow.itemSize() << std::endl;
throw std::exception();
}
__inputFlow = inputFlow;
__inputFlowSet = true;
}
GPUImage FlowPropagator::getPropagatedFlow() {
return __propagatedFlow_Y;
}
void FlowPropagator::setInvertInputFlow(const bool invert) {
__invertInputFlow = invert;
}
bool FlowPropagator::getInvertInputFlow() const {
return __invertInputFlow;
}
//###############################################
// FlowPropagatorPayload
//###############################################
FlowPropagatorPayload::FlowPropagatorPayload() :
Stage() {
__iterations = 0;
__dt = 0.0f;
__border = 3;
__configured = false;
__inputFlowSet = false;
__scalarPayloadSet = false;
__vectorPayloadSet = false;
}
FlowPropagatorPayload::FlowPropagatorPayload(GPUImage inputFlow,
GPUImage scalarPayload,
GPUImage vectorPayload,
const int iterations) :
Stage() {
__iterations = 0;
__dt = 0.0f;
__border = 3;
__configured = false;
__inputFlowSet = false;
__scalarPayloadSet = false;
__vectorPayloadSet = false;
setInputFlow(inputFlow);
setScalarPayload(scalarPayload);
setVectorPayload(vectorPayload);
setIterations(iterations);
configure();
}
FlowPropagatorPayload::~FlowPropagatorPayload() {
// nothing to do
}
void FlowPropagatorPayload::configure() {
if(!__inputFlowSet) {
std::cerr << "ERROR: FlowPropagatorPayload::configure(): input flow has not been set" << std::endl;
throw std::exception();
}
if(!__scalarPayloadSet) {
std::cerr << "ERROR: FlowPropagatorPayload::configure(): input scalar payload has not been set" << std::endl;
throw std::exception();
}
if(!__vectorPayloadSet) {
std::cerr << "ERROR: FlowPropagatorPayload::configure(): input vector payload has not been set" << std::endl;
throw std::exception();
}
int height = __inputFlow.height();
int width = __inputFlow.width();
//##################
// flow
//##################
__inputFlowTexture = GPUTexture(__inputFlow, cudaChannelFormatKindFloat);
__propagatedFlow_X = GPUImage(height, width, 2, sizeof(float));
__propagatedFlowTexture_X = GPUTexture(__propagatedFlow_X, cudaChannelFormatKindFloat);
__propagatedFlow_Y = GPUImage(height, width, 2, sizeof(float));
__propagatedFlowTexture_Y = GPUTexture(__propagatedFlow_Y, cudaChannelFormatKindFloat);
//##################
// scalar payload
//##################
__inputScalarTexture = GPUTexture(__inputScalar, cudaChannelFormatKindFloat);
__propagatedScalar_X = GPUImage(height, width, 1, sizeof(float));
__propagatedScalarTexture_X = GPUTexture(__propagatedScalar_X, cudaChannelFormatKindFloat);
__propagatedScalar_Y = GPUImage(height, width, 1, sizeof(float));
__propagatedScalarTexture_Y = GPUTexture(__propagatedScalar_Y, cudaChannelFormatKindFloat);
//##################
// vector payload
//##################
__inputVectorTexture = GPUTexture(__inputVector, cudaChannelFormatKindFloat);
__propagatedVector_X = GPUImage(height, width, 2, sizeof(float));
__propagatedVectorTexture_X = GPUTexture(__propagatedVector_X, cudaChannelFormatKindFloat);
__propagatedVector_Y = GPUImage(height, width, 2, sizeof(float));
__propagatedVectorTexture_Y = GPUTexture(__propagatedVector_Y, cudaChannelFormatKindFloat);
// configure block and grid sizes
__block = dim3(32, 32, 1);
configureKernelGrid(height, width, __block, __grid);
__configured = true;
}
void FlowPropagatorPayload::compute() {
startTiming();
if(!__configured) {
std::cerr << "ERROR: FlowPropagator::compute() stage not configured." << std::endl;
exit(-1);
}
// First iteration takes as input __inputFlow
flowPropagatePayloadX_k<<<__grid, __block, 0, __stream>>>(
__inputFlowTexture.getTextureObject(),
__propagatedFlow_X.wrap<float2>(),
__inputScalarTexture.getTextureObject(),
__propagatedScalar_X.wrap<float>(),
__inputVectorTexture.getTextureObject(),
__propagatedVector_X.wrap<float2>(), __dt, __border);
flowPropagatePayloadY_k<<<__grid, __block, 0, __stream>>>(
__propagatedFlowTexture_X.getTextureObject(),
__propagatedFlow_Y.wrap<float2>(),
__propagatedScalarTexture_X.getTextureObject(),
__propagatedScalar_Y.wrap<float>(),
__propagatedVectorTexture_X.getTextureObject(),
__propagatedVector_Y.wrap<float2>(), __dt, __border);
// Rest of iterations
for(int n = 0; n < __iterations - 1; n ++) {
// take as input __propagatedFlowY
flowPropagatePayloadX_k<<<__grid, __block, 0, __stream>>>(
__propagatedFlowTexture_Y.getTextureObject(),
__propagatedFlow_X.wrap<float2>(),
__propagatedScalarTexture_Y.getTextureObject(),
__propagatedScalar_X.wrap<float>(),
__propagatedVectorTexture_Y.getTextureObject(),
__propagatedVector_X.wrap<float2>(), __dt, __border);
flowPropagatePayloadY_k<<<__grid, __block, 0, __stream>>>(
__propagatedFlowTexture_X.getTextureObject(),
__propagatedFlow_Y.wrap<float2>(),
__propagatedScalarTexture_X.getTextureObject(),
__propagatedScalar_Y.wrap<float>(),
__propagatedVectorTexture_X.getTextureObject(),
__propagatedVector_Y.wrap<float2>(), __dt, __border);
}
stopTiming();
}
void FlowPropagatorPayload::setIterations(const int N) {
if(N <= 0) {
std::cerr << "ERROR: FlowPropagatorPayload::setIterations(): iterations less than zero: "
<< N << std::endl;
throw std::exception();
}
__iterations = N;
__dt = 1.0f / float(__iterations);
}
int FlowPropagatorPayload::getIterations() const {
return __iterations;
}
float FlowPropagatorPayload::getDt() const {
return __dt;
}
void FlowPropagatorPayload::setBorder(const int border) {
if(border < 0) {
std::cerr << "ERROR: FlowPropagatorPayload::setBorder(): border should be greater of equal zero: "
<< border << std::endl;
throw std::exception();
}
__border = border;
}
int FlowPropagatorPayload::getBorder() const {
return __border;
}
//#########################
// Stage inputs
//#########################
void FlowPropagatorPayload::setInputFlow(GPUImage inputFlow) {
if(inputFlow.depth() != 2) {
std::cerr << "ERROR: FlowPropagator::setInputFlow(): input flow should have depth 2: "
<< inputFlow.depth() << std::endl;
throw std::exception();
}
if(inputFlow.itemSize() != 4) {
std::cerr << "ERROR: FlowPropagator::setInputFlow(): input flow should have item size 4: "
<< inputFlow.itemSize() << std::endl;
throw std::exception();
}
__inputFlow = inputFlow;
__inputFlowSet = true;
}
void FlowPropagatorPayload::setScalarPayload(GPUImage scalarPayload) {
if(scalarPayload.depth() != 1) {
std::cerr << "ERROR: FlowPropagatorPayload::setscalarPayload(): input flow should have depth 2: "
<< scalarPayload.depth() << std::endl;
throw std::exception();
}
if(scalarPayload.itemSize() != 4) {
std::cerr << "ERROR: FlowPropagatorPayload::setscalarPayload(): input flow should have item size 4: "
<< scalarPayload.itemSize() << std::endl;
throw std::exception();
}
// check size with respect to __inputFlow
if(scalarPayload.height() != __inputFlow.height() ||
scalarPayload.width() != __inputFlow.width()) {
std::cerr << "ERROR: FlowPropagatorPayload::setscalarPayload(): scalar field shape" <<
"does not match with input flow" << std::endl;
throw std::exception();
}
__inputScalar = scalarPayload;
__scalarPayloadSet = true;
}
void FlowPropagatorPayload::setVectorPayload(GPUImage vectorPayload) {
if(vectorPayload.depth() != 2) {
std::cerr << "ERROR: FlowPropagatorPayload::setvectorPayload(): input flow should have depth 2: "
<< vectorPayload.depth() << std::endl;
throw std::exception();
}
if(vectorPayload.itemSize() != 4) {
std::cerr << "ERROR: FlowPropagatorPayload::setvectorPayload(): input flow should have item size 4: "
<< vectorPayload.itemSize() << std::endl;
throw std::exception();
}
// check size with respect to __inputFlow
if(vectorPayload.height() != __inputFlow.height() ||
vectorPayload.width() != __inputFlow.width()) {
std::cerr << "ERROR: FlowPropagatorPayload::setvectorPayload(): scalar field shape" <<
"does not match with input flow" << std::endl;
throw std::exception();
}
__inputVector = vectorPayload;
__vectorPayloadSet = true;
}
//#########################
// Stage outputs
//#########################
GPUImage FlowPropagatorPayload::getPropagatedFlow() {
return __propagatedFlow_Y;
}
GPUImage FlowPropagatorPayload::getPropagatedScalar() {
return __propagatedScalar_Y;
}
GPUImage FlowPropagatorPayload::getPropagatedVector() {
return __propagatedVector_Y;
}
//###############################################
// LaxWendroffPropagator
//###############################################
LaxWendroffPropagator::LaxWendroffPropagator() {
__iterations = 0;
__dt = 0.0f;
__inputFlowSet = false;
__inputImageSet = false;
__configured = false;
}
LaxWendroffPropagator::LaxWendroffPropagator(GPUImage inputFlow,
GPUImage inputImage) :
LaxWendroffPropagator() {
setInputFlow(inputFlow);
setInputImage(inputImage);
configure();
}
LaxWendroffPropagator::~LaxWendroffPropagator() {
// nothing to do
}
void LaxWendroffPropagator::configure() {
if(!__inputFlowSet) {
std::cerr << "ERROR: LaxWendroffPropagator::configure(): input flow has not been set" << std::endl;
throw std::exception();
}
if(!__inputImageSet) {
std::cerr << "ERROR: LaxWendroffPropagator::configure(): input image has not been set" << std::endl;
throw std::exception();
}
int height = __inputFlow.height();
int width = __inputFlow.width();
//##################
// flow
//##################
__inputFlowTexture = GPUTexture(__inputFlow, cudaChannelFormatKindFloat);
//##################
// input image
//##################
__inputImageTexture = GPUTexture(__inputImage, cudaChannelFormatKindFloat);
__propagatedImage_X = GPUImage(height, width, __inputImage.depth(), sizeof(float));
__propagatedImageTexture_X = GPUTexture(__propagatedImage_X, cudaChannelFormatKindFloat);
__propagatedImage_Y = GPUImage(height, width, __inputImage.depth(), sizeof(float));
__propagatedImageTexture_Y = GPUTexture(__propagatedImage_Y, cudaChannelFormatKindFloat);
// configure block and grid sizes
__block = dim3(32, 32, 1);
configureKernelGrid(height, width, __block, __grid);
__configured = true;
}
void LaxWendroffPropagator::compute() {
startTiming();
if(!__configured) {
std::cerr << "ERROR: LaxWendroffPropagator::compute(): stage not configured" << std::endl;
throw std::exception();
}
if(__inputImage.depth() == 1) {
// first iteration
LaxWendroffY_k<float><<<__grid, __block, 0, __stream>>>(
__inputFlowTexture.getTextureObject(),
__inputImageTexture.getTextureObject(),
__propagatedImage_Y.wrap<float>(),
__dt);
LaxWendroffX_k<float><<<__grid, __block, 0, __stream>>>(
__inputFlowTexture.getTextureObject(),
__propagatedImageTexture_Y.getTextureObject(),
__propagatedImage_X.wrap<float>(),
__dt);
// remaining iterations
for(int k = 0; k < __iterations -1; k ++) {
LaxWendroffY_k<float><<<__grid, __block, 0, __stream>>>(
__inputFlowTexture.getTextureObject(),
__propagatedImageTexture_X.getTextureObject(),
__propagatedImage_Y.wrap<float>(),
__dt);
LaxWendroffX_k<float><<<__grid, __block, 0, __stream>>>(
__inputFlowTexture.getTextureObject(),
__propagatedImageTexture_Y.getTextureObject(),
__propagatedImage_X.wrap<float>(),
__dt);
}
} else if(__inputImage.depth() == 4) {
// first iteration
LaxWendroffY_k<float4><<<__grid, __block, 0, __stream>>>(
__inputFlowTexture.getTextureObject(),
__inputImageTexture.getTextureObject(),
__propagatedImage_Y.wrap<float4>(),
__dt);
LaxWendroffX_k<float4><<<__grid, __block, 0, __stream>>>(
__inputFlowTexture.getTextureObject(),
__propagatedImageTexture_Y.getTextureObject(),
__propagatedImage_X.wrap<float4>(),
__dt);
// remaining iterations
for(int k = 0; k < __iterations -1; k ++) {
LaxWendroffY_k<float4><<<__grid, __block, 0, __stream>>>(
__inputFlowTexture.getTextureObject(),
__propagatedImageTexture_X.getTextureObject(),
__propagatedImage_Y.wrap<float4>(),
__dt);
LaxWendroffX_k<float4><<<__grid, __block, 0, __stream>>>(
__inputFlowTexture.getTextureObject(),
__propagatedImageTexture_Y.getTextureObject(),
__propagatedImage_X.wrap<float4>(),
__dt);
}
}
stopTiming();
}
void LaxWendroffPropagator::setIterations(const int N) {
if(N <= 0) {
std::cerr << "ERROR: LaxWendroffPropagator::setIterations(): iterations less than zero: "
<< N << std::endl;
throw std::exception();
}
__iterations = N;
__dt = 1.0f / float(__iterations);
}
int LaxWendroffPropagator::getIterations() const {
return __iterations;
}
float LaxWendroffPropagator::getDt() const {
return __dt;
}
void LaxWendroffPropagator::setInputFlow(GPUImage inputFlow) {
if(inputFlow.depth() != 2) {
std::cerr << "ERROR: LaxWendroffPropagator::setInputFlow(): input flow should have depth 2: "
<< inputFlow.depth() << std::endl;
throw std::exception();
}
if(inputFlow.itemSize() != 4) {
std::cerr << "ERROR: LaxWendroffPropagator::setInputFlow(): input flow should have item size 4: "
<< inputFlow.itemSize() << std::endl;
throw std::exception();
}
__inputFlow = inputFlow;
__inputFlowSet = true;
}
void LaxWendroffPropagator::setInputImage(GPUImage img) {
if(img.depth() != 1 && img.depth() != 4) {
std::cerr << "ERROR: LaxWendroffPropagator::setInputImage(): input flow should have depth 1 or 4, got: "
<< img.depth() << std::endl;
throw std::exception();
}
if(img.itemSize() != 4) {
std::cerr << "ERROR: LaxWendroffPropagator::setInputImage(): input image should have item size 4: "
<< img.itemSize() << std::endl;
throw std::exception();
}
// check size with respect to __inputFlow
if(img.height() != __inputFlow.height() ||
img.width() != __inputFlow.width()) {
std::cerr << "ERROR: LaxWendroffPropagator::setInputImage(): image shape" <<
"does not match with input flow" << std::endl;
throw std::exception();
}
__inputImage = img;
__inputImageSet = true;
}
GPUImage LaxWendroffPropagator::getFlow() {
return __inputFlow;
}
GPUImage LaxWendroffPropagator::getPropagatedImage() {
return __propagatedImage_X;
}
}; // namespace gpu
}; // namespace flowfilter | the_stack |
#include <assert.h>
#include <cuda_fp16.h>
#include <cfloat>
#include <limits>
#include <stdint.h>
#include <cuda_fp16.h>
#include <c10/macros/Macros.h>
namespace {
int log2_ceil(int value) {
int log2_value = 0;
while ((1 << log2_value) < value) ++log2_value;
return log2_value;
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
template <typename acc_t, int WARP_BATCH, int WARP_SIZE, template<typename> class ReduceOp>
__device__ __forceinline__ void warp_reduce(acc_t* sum) {
ReduceOp<acc_t> r;
#pragma unroll
for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) {
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
acc_t b = WARP_SHFL_XOR(sum[i], offset, WARP_SIZE);
sum[i] = r(sum[i], b);
}
}
}
// The softmax_warp_* methods perform softmax forward and backward propagation on samples spanning the fast dimension.
// Each sample contains element_count scalar elements. element_count can be any integer value <= 1024.
// The template arguments have the following meaning:
// One "WARP" works on one "BATCH". One "BATCH" contains "WARP_BATCH" samples.
// WARP_BATCH is equal to 1 when element_count is large, and > 1 when element_count is small.
// A "WARP" contains "C10_WARPS_SIZE" threads, these treads are guaranteed to belong to the same warp.
// This is important because it means only __shfl_ instructions are required for reductions.
// Note that this means WARP_SIZE must be a power of two and <= architecture warp size.
// CUDA warp size is 32 for all existing GPU architectures, but there is no guarantee this will not change for future arch.
// ROCm warp size is 64 for all currently ROCm-supported GPU architectures, but this may change for future archs.
// is_log_softmax is a flag indicating whether SoftMax or LogSoftMax should be computed.
// The template can be instantiated with any floating point type for the type arguments input_t, output_t and acc_t.
// This allows SoftMax to be fused with a cast immediately following the SoftMax.
// For instance:
// input_t=half, acc_t=float, output_t=half => read half tensor, float accumulators, write half tensor.
// input_t=half, acc_t=float, output_t=float => read half tensor, float accumulators, write float tensor.
// input_t_float, acc_t=float, output_t=half => read float tensor, float accumulators, write half tensor.
template <typename input_t, typename output_t, typename acc_t, int log2_elements, bool is_log_softmax>
__global__ void softmax_warp_forward(output_t *dst, const input_t *src, int batch_size, int stride, int element_count)
{
// WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method warp_softmax_forward_kernel.
constexpr int next_power_of_two = 1 << log2_elements;
constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
// batch_size might not be a multiple of WARP_BATCH. Check how
// many batches have to computed within this WARP.
int local_batches = batch_size - first_batch;
if (local_batches > WARP_BATCH)
local_batches = WARP_BATCH;
// there might be multiple batches per warp. compute the index within the batch
int local_idx = threadIdx.x;
src += first_batch * stride + local_idx;
dst += first_batch * stride + local_idx;
// The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop,
// but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep
// the nested loops.
// This should have no impact on performance because the loops are unrolled anyway.
// load data from global memory
acc_t elements[WARP_BATCH][WARP_ITERATIONS];
for (int i = 0; i < WARP_BATCH; ++i) {
int batch_element_count = (i >= local_batches) ? 0 : element_count;
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < batch_element_count) {
elements[i][it] = src[i*element_count+it*WARP_SIZE];
} else {
elements[i][it] = -std::numeric_limits<acc_t>::infinity();
}
}
}
// compute max_value
acc_t max_value[WARP_BATCH];
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
max_value[i] = elements[i][0];
#pragma unroll
for (int it = 1; it < WARP_ITERATIONS; ++it) {
max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
acc_t sum[WARP_BATCH] { 0.0f };
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
if (is_log_softmax) {
sum[i] += std::exp(elements[i][it] - max_value[i]);
} else {
elements[i][it] = std::exp(elements[i][it] - max_value[i]);
sum[i] += elements[i][it];
}
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
// store result
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
if (i >= local_batches)
break;
if (is_log_softmax) sum[i] = std::log(sum[i]);
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < element_count) {
if (is_log_softmax) {
dst[i*element_count+it*WARP_SIZE] = elements[i][it] - max_value[i] - sum[i];
} else {
dst[i*element_count+it*WARP_SIZE] = elements[i][it] / sum[i];
}
} else {
break;
}
}
}
}
template <typename input_t, typename output_t, typename acc_t, int log2_elements, bool is_log_softmax>
__global__ void softmax_warp_backward(output_t *gradInput, const input_t *grad, const input_t *output, int batch_size, int stride, int element_count)
{
// WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method warp_softmax_backward_kernel.
constexpr int next_power_of_two = 1 << log2_elements;
constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
// batch_size might not be a multiple of WARP_BATCH. Check how
// many batches have to computed within this WARP.
int local_batches = batch_size - first_batch;
if (local_batches > WARP_BATCH)
local_batches = WARP_BATCH;
// there might be multiple batches per warp. compute the index within the batch
int local_idx = threadIdx.x % WARP_SIZE;
// the first element to process by the current thread
int thread_offset = first_batch * stride + local_idx;
grad += thread_offset;
output += thread_offset;
gradInput += thread_offset;
// The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop,
// but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep
// the nested loops.
// This should have no impact on performance because the loops are unrolled anyway.
// load data from global memory
acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS];
acc_t output_reg[WARP_BATCH][WARP_ITERATIONS];
for (int i = 0; i < WARP_BATCH; ++i) {
int batch_element_count = (i >= local_batches) ? 0 : element_count;
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < batch_element_count) {
grad_reg[i][it] = grad[i*element_count+it*WARP_SIZE];
output_reg[i][it] = output[i*element_count+it*WARP_SIZE];
} else {
grad_reg[i][it] = acc_t(0);
output_reg[i][it] = acc_t(0);
}
}
}
acc_t sum[WARP_BATCH];
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
sum[i] = grad_reg[i][0];
#pragma unroll
for (int it = 1; it < WARP_ITERATIONS; ++it) {
sum[i] += grad_reg[i][it];
}
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
// store result
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
if (i >= local_batches)
break;
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; ++it) {
int element_index = local_idx + it * WARP_SIZE;
if (element_index < element_count) {
// compute gradients
if (is_log_softmax) {
gradInput[i*element_count+it*WARP_SIZE] = (grad_reg[i][it] - std::exp(output_reg[i][it]) * sum[i]);
} else {
gradInput[i*element_count+it*WARP_SIZE] = (grad_reg[i][it] - output_reg[i][it] * sum[i]);
}
}
}
}
}
} // end of anonymous namespace
template<typename input_t, typename output_t, typename acc_t, bool is_log_softmax>
void dispatch_softmax_forward(output_t *dst, const input_t *src, int softmax_elements, int softmax_elements_stride, int batch_count)
{
TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 1024 );
if (softmax_elements == 0) {
return;
} else {
int log2_elements = log2_ceil(softmax_elements);
const int next_power_of_two = 1 << log2_elements;
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
switch (log2_elements) {
case 0: // 1
softmax_warp_forward<input_t, output_t, acc_t, 0, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 1: // 2
softmax_warp_forward<input_t, output_t, acc_t, 1, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 2: // 4
softmax_warp_forward<input_t, output_t, acc_t, 2, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 3: // 8
softmax_warp_forward<input_t, output_t, acc_t, 3, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 4: // 16
softmax_warp_forward<input_t, output_t, acc_t, 4, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 5: // 32
softmax_warp_forward<input_t, output_t, acc_t, 5, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 6: // 64
softmax_warp_forward<input_t, output_t, acc_t, 6, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 7: // 128
softmax_warp_forward<input_t, output_t, acc_t, 7, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 8: // 256
softmax_warp_forward<input_t, output_t, acc_t, 8, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 9: // 512
softmax_warp_forward<input_t, output_t, acc_t, 9, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 10: // 1024
softmax_warp_forward<input_t, output_t, acc_t, 10, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
default:
break;
}
}
}
template<typename input_t, typename output_t, typename acc_t, bool is_log_softmax>
void dispatch_softmax_backward(output_t *grad_input, const input_t *grad, const input_t *output, int softmax_elements, int softmax_elements_stride, int batch_count)
{
TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 1024 );
if (softmax_elements == 0) {
return;
} else {
int log2_elements = log2_ceil(softmax_elements);
const int next_power_of_two = 1 << log2_elements;
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward.
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward.
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
switch (log2_elements) {
case 0: // 1
softmax_warp_backward<input_t, output_t, acc_t, 0, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 1: // 2
softmax_warp_backward<input_t, output_t, acc_t, 1, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 2: // 4
softmax_warp_backward<input_t, output_t, acc_t, 2, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 3: // 8
softmax_warp_backward<input_t, output_t, acc_t, 3, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 4: // 16
softmax_warp_backward<input_t, output_t, acc_t, 4, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 5: // 32
softmax_warp_backward<input_t, output_t, acc_t, 5, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 6: // 64
softmax_warp_backward<input_t, output_t, acc_t, 6, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 7: // 128
softmax_warp_backward<input_t, output_t, acc_t, 7, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 8: // 256
softmax_warp_backward<input_t, output_t, acc_t, 8, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 9: // 512
softmax_warp_backward<input_t, output_t, acc_t, 9, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
case 10: // 1024
softmax_warp_backward<input_t, output_t, acc_t, 10, is_log_softmax>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, batch_count, softmax_elements_stride, softmax_elements);
break;
default:
break;
}
}
} | the_stack |
#include "../Observers/ColorScaleObserverSingle.cu"
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
extern "C"
{
// PHYSICS PART ----------------------------
__global__ void SetForcesToZeroKernel(
float *force,
int maxCells
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells * 3)
{
force[threadId] = 0.00f;
}
}
__global__ void SpringKernel(
int *activityFlag,
int *connectionMatrix,
float *pointsCoordinates,
float springStrength,
float *force,
int maxCells
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
if(activityFlag[threadId] == 1)
{
int n;
float x,y,z;
float nX, nY, nZ;
float dX, dY, dZ;
float fX, fY, fZ;
float distanceSquared, distance;
x = pointsCoordinates[threadId * 3];
y = pointsCoordinates[threadId * 3 + 1];
z = pointsCoordinates[threadId * 3 + 2];
for(n = 0; n < maxCells; n++)
{
if(connectionMatrix[threadId * maxCells + n] == 1)
{
nX = pointsCoordinates[n * 3];
nY = pointsCoordinates[n * 3 + 1];
nZ = pointsCoordinates[n * 3 + 2];
dX = nX - x;
dY = nY - y;
dZ = nZ - z;
if(dX != 0 || dY != 0 || dZ != 0)
{
distanceSquared = dX * dX + dY * dY + dZ * dZ;
distance = sqrtf(distanceSquared);
fX = springStrength * dX;
fY = springStrength * dY;
fZ = springStrength * dZ;
force[threadId * 3] += fX;
force[threadId * 3 + 1] += fY;
force[threadId * 3 + 2] += fZ;
}
}
}
}
}
}
__global__ void RepulsionKernel(
float repulsion,
float repulsionDistance,
float *force,
float *pointsCoordinates,
int *activityFlag,
int maxCells
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
if(activityFlag[threadId] == 1)
{
float x,y,z;
float nX, nY, nZ;
float dX, dY, dZ;
float fX, fY, fZ;
float overallForce;
float distanceSquared, distance;
int n;
x = pointsCoordinates[threadId * 3];
y = pointsCoordinates[threadId * 3 + 1];
z = pointsCoordinates[threadId * 3 + 2];
for(n = 0; n < maxCells; n++)
{
if(activityFlag[n] == 1 && n != threadId)
{
nX = pointsCoordinates[n * 3];
nY = pointsCoordinates[n * 3 + 1];
nZ = pointsCoordinates[n * 3 + 2];
dX = nX - x;
dY = nY - y;
dZ = nZ - z;
if(dX != 0 || dY != 0 || dZ != 0)
{
distanceSquared = dX * dX + dY * dY + dZ * dZ;
distance = sqrtf(distanceSquared);
overallForce = -copysignf( repulsion, logf(distance/repulsionDistance)) / distanceSquared;
//overallForce = ((distance > repulsionDistance) * ( -repulsion) + (distance <= repulsionDistance) * repulsion ) / distanceSquared;
fX = overallForce * dX / distance;
fY = overallForce * dY / distance;
fZ = overallForce * dZ / distance;
force[threadId * 3] += -fX;
force[threadId * 3 + 1] += -fY;
force[threadId * 3 + 2] += - fZ;
}
}
}
}
}
}
__global__ void UseForceKernel(
float *force,
float forceFactor,
float *pointsCoordinates,
int maxCells
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells * 3)
{
pointsCoordinates[threadId] += forceFactor * force[threadId];
}
}
__global__ void CenterOfGravityKernel(
float *pointsCoordinates,
float *centerOfGravity,
int *activityFlag,
int maxCells
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < 1)
{
float xSum = 0.00f, ySum = 0.00f, zSum = 0.00f;
int livingCells = 0;
for(int c = 0; c < maxCells; c++)
{
if(activityFlag[c] == 1)
{
xSum += pointsCoordinates[c * 3];
ySum += pointsCoordinates[c * 3 + 1];
zSum += pointsCoordinates[c * 3 + 2];
livingCells++;
}
}
centerOfGravity[0] = xSum / (float)livingCells;
centerOfGravity[1] = ySum / (float)livingCells;
centerOfGravity[2] = zSum / (float)livingCells;
}
}
// GRAPHICS PART ---------------------------
// data preparation for the observer
__global__ void ZeroTextureKernel(
unsigned int *texture,
int count
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < count)
{
texture[threadId] = 0;
}
}
__global__ void CopyPointsCoordinatesKernel(
float *pointsCoordinates,
int *activityFlag,
float xNonValid,
float yNonValid,
float zNonValid,
float *dataVertex,
int dataVertexOffset,
int maxCells
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
float xToCopy = pointsCoordinates[threadId * 3];
float yToCopy = pointsCoordinates[threadId * 3 + 1];
float zToCopy = pointsCoordinates[threadId * 3 + 2];
if(activityFlag[threadId] == 0)
{
xToCopy = xNonValid;
yToCopy = yNonValid;
zToCopy = zNonValid;
}
dataVertex[dataVertexOffset + threadId * 3] = xToCopy;
dataVertex[dataVertexOffset + threadId * 3 + 1] = yToCopy;
dataVertex[dataVertexOffset + threadId * 3 + 2] = zToCopy;
}
}
__global__ void CopyConnectionsCoordinatesKernel(
int *connectionMatrix,
float *pointsCoordinates,
float *vertexData,
int *connectionCount,
int maxCells
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells * maxCells)
{
if(connectionMatrix[threadId] == 1)
{
int from = threadId / maxCells;
int to = threadId % maxCells;
if(to > from)
{
//int vertexDataOffset = maxCells * 3;
int vertexDataOffset = 0;
int connIdx = atomicAdd( &connectionCount[0], 1);
vertexData[vertexDataOffset + connIdx * 6] = pointsCoordinates[from * 3];
vertexData[vertexDataOffset + connIdx * 6 + 1] = pointsCoordinates[from * 3 + 1];
vertexData[vertexDataOffset + connIdx * 6 + 2] = pointsCoordinates[from * 3 + 2];
vertexData[vertexDataOffset + connIdx * 6 + 3] = pointsCoordinates[to * 3];
vertexData[vertexDataOffset + connIdx * 6 + 4] = pointsCoordinates[to * 3 + 1];
vertexData[vertexDataOffset + connIdx * 6 + 5] = pointsCoordinates[to * 3 + 2];
}
}
}
}
__global__ void ComputeQuadsKernel(
float *pointsCoordinates,
float *vertexData,
int quadOffset,
float textureSide,
int *activityFlag,
int textureWidth,
int maxCells
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
float x = pointsCoordinates[threadId * 3];
float y = pointsCoordinates[threadId * 3 + 1];
float z = pointsCoordinates[threadId * 3 + 2];
float halfSide = 0.50f * textureSide;
if(activityFlag[threadId] == 0)
{
halfSide = 0.00f;
}
int textureOffset = quadOffset + maxCells * 4 * 3 * 3;
float textureAbsLength = (float)(maxCells * textureWidth);
// vertical x-alligned
vertexData[quadOffset + threadId * 36] = x - halfSide;
vertexData[quadOffset + threadId * 36 + 1] = y + halfSide;
vertexData[quadOffset + threadId * 36 + 2] = z;
vertexData[textureOffset + threadId * 24] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 1] = 0.00f;
vertexData[quadOffset + threadId * 36 + 3] = x - halfSide;
vertexData[quadOffset + threadId * 36 + 4] = y - halfSide;
vertexData[quadOffset + threadId * 36 + 5] = z;
vertexData[textureOffset + threadId * 24 + 2] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 3] = 1.00f;
vertexData[quadOffset + threadId * 36 + 6] = x + halfSide;
vertexData[quadOffset + threadId * 36 + 7] = y - halfSide;
vertexData[quadOffset + threadId * 36 + 8] = z;
vertexData[textureOffset + threadId * 24 + 4] = (float)((threadId+1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 5] = 1.00f;
vertexData[quadOffset + threadId * 36 + 9] = x + halfSide;
vertexData[quadOffset + threadId * 36 + 10] = y + halfSide;
vertexData[quadOffset + threadId * 36 + 11] = z;
vertexData[textureOffset + threadId * 24 + 6] = (float)((threadId+1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 7] = 0.00f;
// horizontal
vertexData[quadOffset + threadId * 36 + 12] = x - halfSide;
vertexData[quadOffset + threadId * 36 + 13] = y;
vertexData[quadOffset + threadId * 36 + 14] = z + halfSide;
vertexData[textureOffset + threadId * 24 + 8] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 9] = 1.00f;
vertexData[quadOffset + threadId * 36 + 15] = x - halfSide;
vertexData[quadOffset + threadId * 36 + 16] = y;
vertexData[quadOffset + threadId * 36 + 17] = z - halfSide;
vertexData[textureOffset + threadId * 24 + 10] = (float)(threadId * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 11] = 0.00f;
vertexData[quadOffset + threadId * 36 + 18] = x + halfSide;
vertexData[quadOffset + threadId * 36 + 19] = y;
vertexData[quadOffset + threadId * 36 + 20] = z - halfSide;
vertexData[textureOffset + threadId * 24 + 12] = (float)((threadId+1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 13] = 0.00f;
vertexData[quadOffset + threadId * 36 + 21] = x + halfSide;
vertexData[quadOffset + threadId * 36 + 22] = y;
vertexData[quadOffset + threadId * 36 + 23] = z + halfSide;
vertexData[textureOffset + threadId * 24 + 14] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 15] = 1.00f;
// vertical z-alligned
vertexData[quadOffset + threadId * 36 + 24] = x;
vertexData[quadOffset + threadId * 36 + 25] = y - halfSide;
vertexData[quadOffset + threadId * 36 + 26] = z + halfSide;
vertexData[textureOffset + threadId * 24 + 16] = (float)((threadId+1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 17] = 1.00f;
vertexData[quadOffset + threadId * 36 + 27] = x;
vertexData[quadOffset + threadId * 36 + 28] = y - halfSide;
vertexData[quadOffset + threadId * 36 + 29] = z - halfSide;
vertexData[textureOffset + threadId * 24 + 18] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 19] = 1.00f;
vertexData[quadOffset + threadId * 36 + 30] = x;
vertexData[quadOffset + threadId * 36 + 31] = y + halfSide;
vertexData[quadOffset + threadId * 36 + 32] = z - halfSide;
vertexData[textureOffset + threadId * 24 + 20] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 21] = 0.00f;
vertexData[quadOffset + threadId * 36 + 33] = x;
vertexData[quadOffset + threadId * 36 + 34] = y + halfSide;
vertexData[quadOffset + threadId * 36 + 35] = z + halfSide;
vertexData[textureOffset + threadId * 24 + 22] = (float)((threadId+1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 23] = 0.00f;
}
}
__constant__ float operationMaskConstant[72];
__constant__ float cubeTexCoordinatesConstant[48];
__global__ void CubeCoordinatesKernel(
float *vertexData,
float *cubeOperation,
int quadOffset,
int *activityFlag,
float cubeSize,
float *pointsCoordinates,
int maxCells
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells * 72)
{
int cellId = threadId / 72;
int sideId = (threadId / 12) % 6;
int pointId = threadId % 12;
int coordId = threadId % 3;
float halfSide = (activityFlag[cellId] == 1) * 0.50f * cubeSize;
int textureOffset = quadOffset + maxCells * 4 * 6 * 3;
//vertexData[quadOffset + cellId * 72 + sideId * 12 + pointId] = pointsCoordinates[cellId * 3 + coordId] + cubeOperation[sideId * 12 + pointId] * halfSide;
vertexData[quadOffset + cellId * 72 + sideId * 12 + pointId] = pointsCoordinates[cellId * 3 + coordId] + operationMaskConstant[sideId * 12 + pointId] * halfSide;
}
}
__global__ void CubeTextureKernel(
float *vertexData,
int texCoorOffset,
float *cubeTexCoordinates,
float cubeSize,
float textureWidth,
int *activityFlag,
int maxCells
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells * 48)
{
int cellId = threadId / 48;
float fCellId = (float)cellId;
int sideId = (threadId / 8) % 6;
int pointId = threadId % 8;
int coordId = threadId % 3;
float textureAbsLength = (float)maxCells * textureWidth;
float halfSide = (activityFlag[cellId] == 1) * 0.50f * cubeSize;
//vertexData[texCoorOffset + cellId * 48 + sideId * 8 + pointId] = (pointId % 2 == 0 ) * (((fCellId + cubeTexCoordinates[sideId * 8 + pointId])* textureWidth) / textureAbsLength) + (pointId % 2 == 1) * (cubeTexCoordinates[sideId * 8 + pointId]);
vertexData[texCoorOffset + cellId * 48 + sideId * 8 + pointId] = (pointId % 2 == 0 ) * (((fCellId + cubeTexCoordinatesConstant[sideId * 8 + pointId])* textureWidth) / textureAbsLength) + (pointId % 2 == 1) * (cubeTexCoordinatesConstant[sideId * 8 + pointId]);
//vertexData[textureOffset + cellId * 48 + 8 * sideId] = ((fCellId + cubeTexCoordinates[sideId * 8])* textureWidth) / textureAbsLength;
//vertexData[textureOffset + cellId * 48 + 8 * sideId + 1] = cubeTexCoordinates[sideId * 8 + 1];
}
}
__global__ void WinnersKernel(
float *winner,
float *vertexData,
int vertexOffset,
float *pointsCoordinates,
float cubeSize,
int maxCells
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
if(winner[threadId] == 1.00f)
{
float x = pointsCoordinates[threadId * 3];
float y = pointsCoordinates[threadId * 3 + 1];
float z = pointsCoordinates[threadId * 3 + 2];
float side = 1.2f * cubeSize;
float halfSize = 0.50f * side;
// bottom side
vertexData[vertexOffset] = x - halfSize;
vertexData[vertexOffset + 1] = y - halfSize;
vertexData[vertexOffset + 2] = z - halfSize;
vertexData[vertexOffset + 3] = x - halfSize;
vertexData[vertexOffset + 4] = y - halfSize;
vertexData[vertexOffset + 5] = z + halfSize;
vertexData[vertexOffset + 6] = x + halfSize;
vertexData[vertexOffset + 7] = y - halfSize;
vertexData[vertexOffset + 8] = z + halfSize;
vertexData[vertexOffset + 9] = x + halfSize;
vertexData[vertexOffset + 10] = y - halfSize;
vertexData[vertexOffset + 11] = z - halfSize;
}
}
}
__global__ void ComputeCubes2Kernel(
float *pointsCoordinates,
float *vertexData,
int quadOffset,
float cubeSide,
float *cubeOperation,
float *cubeTexCoordinates,
int *activityFlag,
float textureWidth,
int maxCells
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells * 6)
{
int cellId = threadId / 6;
float fCellId = (float)cellId;
int sideId = threadId % 6;
float x = pointsCoordinates[cellId * 3];
float y = pointsCoordinates[cellId * 3 + 1];
float z = pointsCoordinates[cellId * 3 + 2];
float halfSide = (activityFlag[cellId] == 1) * 0.50f * cubeSide;
int textureOffset = quadOffset + maxCells * 4 * 6 * 3;
float textureAbsLength = (float)maxCells * textureWidth;
vertexData[quadOffset + cellId * 72 + 12*sideId] = x + operationMaskConstant[12*sideId] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 1] = y + operationMaskConstant[12*sideId + 1] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 2] = z + operationMaskConstant[12*sideId + 2] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 3] = x + operationMaskConstant[12*sideId + 3] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 4] = y + operationMaskConstant[12*sideId + 4] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 5] = z + operationMaskConstant[12*sideId + 5] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 6] = x + operationMaskConstant[12*sideId + 6] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 7] = y + operationMaskConstant[12*sideId + 7] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 8] = z + operationMaskConstant[12*sideId + 8] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 9] = x + operationMaskConstant[12*sideId + 9] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 10] = y + operationMaskConstant[12*sideId + 10] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 11] = z + operationMaskConstant[12*sideId + 11] * halfSide;
vertexData[textureOffset + cellId * 48 + 8 * sideId] = ((fCellId + cubeTexCoordinatesConstant[sideId * 8])* textureWidth) / textureAbsLength;
vertexData[textureOffset + cellId * 48 + 8 * sideId + 1] = cubeTexCoordinatesConstant[sideId * 8 + 1];
vertexData[textureOffset + cellId * 48 + 8 * sideId + 2] = ((fCellId + cubeTexCoordinatesConstant[sideId * 8 + 2]) * textureWidth) / textureAbsLength;
vertexData[textureOffset + cellId * 48 + 8 * sideId + 3] = cubeTexCoordinatesConstant[sideId * 8 + 3];
vertexData[textureOffset + cellId * 48 + 8 * sideId + 4] = ((fCellId + cubeTexCoordinatesConstant[sideId * 8 + 4]) * textureWidth) / textureAbsLength;
vertexData[textureOffset + cellId * 48 + 8 * sideId + 5] = cubeTexCoordinatesConstant[sideId * 8 + 5];
vertexData[textureOffset + cellId * 48 + 8 * sideId + 6] = ((fCellId + cubeTexCoordinatesConstant[sideId * 8 + 6]) * textureWidth) / textureAbsLength;
vertexData[textureOffset + cellId * 48 + 8 * sideId + 7] = cubeTexCoordinatesConstant[sideId * 8 + 7];
/*
vertexData[quadOffset + cellId * 72 + 12*sideId] = x + cubeOperation[12*sideId] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 1] = y + cubeOperation[12*sideId + 1] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 2] = z + cubeOperation[12*sideId + 2] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 3] = x + cubeOperation[12*sideId + 3] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 4] = y + cubeOperation[12*sideId + 4] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 5] = z + cubeOperation[12*sideId + 5] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 6] = x + cubeOperation[12*sideId + 6] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 7] = y + cubeOperation[12*sideId + 7] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 8] = z + cubeOperation[12*sideId + 8] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 9] = x + cubeOperation[12*sideId + 9] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 10] = y + cubeOperation[12*sideId + 10] * halfSide;
vertexData[quadOffset + cellId * 72 + 12*sideId + 11] = z + cubeOperation[12*sideId + 11] * halfSide;
vertexData[textureOffset + cellId * 48 + 8 * sideId] = ((fCellId + cubeTexCoordinates[sideId * 8])* textureWidth) / textureAbsLength;
vertexData[textureOffset + cellId * 48 + 8 * sideId + 1] = cubeTexCoordinates[sideId * 8 + 1];
vertexData[textureOffset + cellId * 48 + 8 * sideId + 2] = ((fCellId + cubeTexCoordinates[sideId * 8 + 2]) * textureWidth) / textureAbsLength;
vertexData[textureOffset + cellId * 48 + 8 * sideId + 3] = cubeTexCoordinates[sideId * 8 + 3];
vertexData[textureOffset + cellId * 48 + 8 * sideId + 4] = ((fCellId + cubeTexCoordinates[sideId * 8 + 4]) * textureWidth) / textureAbsLength;
vertexData[textureOffset + cellId * 48 + 8 * sideId + 5] = cubeTexCoordinates[sideId * 8 + 5];
vertexData[textureOffset + cellId * 48 + 8 * sideId + 6] = ((fCellId + cubeTexCoordinates[sideId * 8 + 6]) * textureWidth) / textureAbsLength;
vertexData[textureOffset + cellId * 48 + 8 * sideId + 7] = cubeTexCoordinates[sideId * 8 + 7];
*/
}
}
__global__ void ComputeCubesKernel(
float *pointsCoordinates,
float *vertexData,
int quadOffset,
float cubeSide,
int *activityFlag,
int textureWidth,
int maxCells
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
float x = pointsCoordinates[threadId * 3];
float y = pointsCoordinates[threadId * 3 + 1];
float z = pointsCoordinates[threadId * 3 + 2];
float halfSide = 0.50f * cubeSide;
if(activityFlag[threadId] == 0)
{
halfSide = 0.00f;
}
int textureOffset = quadOffset + maxCells * 4 * 6 * 3;
float textureAbsLength = (float)(maxCells * textureWidth);
// BOTTOM SIDE
vertexData[quadOffset + threadId * 72] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 1] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 2] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 3] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 4] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 5] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 6] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 7] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 8] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 9] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 10] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 11] = z + halfSide;
vertexData[textureOffset + threadId * 48] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 1] = 0.00f;
vertexData[textureOffset + threadId * 48 + 2] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 3] = 1.00f;
vertexData[textureOffset + threadId * 48 + 4] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 5] = 1.00f;
vertexData[textureOffset + threadId * 48 + 6] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 7] = 0.00f;
// FRONT SIDE
vertexData[quadOffset + threadId * 72 + 12] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 13] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 14] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 15] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 16] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 17] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 18] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 19] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 20] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 21] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 22] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 23] = z + halfSide;
vertexData[textureOffset + threadId * 48 + 8] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 9] = 0.00f;
vertexData[textureOffset + threadId * 48 + 10] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 11] = 1.00f;
vertexData[textureOffset + threadId * 48 + 12] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 13] = 1.00f;
vertexData[textureOffset + threadId * 48 + 14] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 15] = 0.00f;
// LEFT SIDE
vertexData[quadOffset + threadId * 72 + 24] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 25] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 26] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 27] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 28] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 29] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 30] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 31] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 32] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 33] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 34] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 35] = z + halfSide;
vertexData[textureOffset + threadId * 48 + 16] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 17] = 0.00f;
vertexData[textureOffset + threadId * 48 + 18] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 19] = 1.00f;
vertexData[textureOffset + threadId * 48 + 20] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 21] = 1.00f;
vertexData[textureOffset + threadId * 48 + 22] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 23] = 0.00f;
// BACK SIDE
vertexData[quadOffset + threadId * 72 + 36] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 37] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 38] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 39] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 40] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 41] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 42] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 43] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 44] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 45] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 46] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 47] = z - halfSide;
vertexData[textureOffset + threadId * 48 + 24] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 25] = 0.00f;
vertexData[textureOffset + threadId * 48 + 26] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 27] = 1.00f;
vertexData[textureOffset + threadId * 48 + 28] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 29] = 1.00f;
vertexData[textureOffset + threadId * 48 + 30] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 31] = 0.00f;
// RIGHT SIDE
vertexData[quadOffset + threadId * 72 + 48] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 49] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 50] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 51] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 52] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 53] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 54] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 55] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 56] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 57] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 58] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 59] = z + halfSide;
vertexData[textureOffset + threadId * 48 + 32] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 33] = 0.00f;
vertexData[textureOffset + threadId * 48 + 34] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 35] = 1.00f;
vertexData[textureOffset + threadId * 48 + 36] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 37] = 1.00f;
vertexData[textureOffset + threadId * 48 + 38] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 39] = 0.00f;
// UPPER SIDE
vertexData[quadOffset + threadId * 72 + 60] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 61] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 62] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 63] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 64] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 65] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 66] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 67] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 68] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 69] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 70] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 71] = z + halfSide;
vertexData[textureOffset + threadId * 48 + 40] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 41] = 1.00f;
vertexData[textureOffset + threadId * 48 + 42] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 43] = 0.00f;
vertexData[textureOffset + threadId * 48 + 44] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 45] = 0.00f;
vertexData[textureOffset + threadId * 48 + 46] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 47] = 1.00f;
}
}
__global__ void CopyAndProcessTextureKernel(
float *referenceVector,
int referenceVectorSize,
int textureWidth,
int textureFieldWidth,
unsigned int *pixels,
int maxCells,
int count
)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < count)
{
int cellId = threadId / referenceVectorSize;
int pixelXPos = (threadId - cellId * referenceVectorSize) % textureWidth;
int pixelYPos = (threadId - cellId * referenceVectorSize) / textureWidth;
float hue = 1;
float saturation = 0;
float value = fminf(1, fmaxf(-1, (referenceVector[threadId] + 1) * 0.50f));
pixels[pixelYPos * textureFieldWidth + cellId * textureWidth + pixelXPos] = hsva_to_uint_rgba(hue, saturation ,value, 1.00f);
//pixels[pixelYPos * textureFieldWidth + cellId * textureWidth + pixelXPos] = 0xFF000000 + (int)(referenceVector[threadId] * 255.00f);
}
}
} | the_stack |
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <builtin_types.h>
#include <vector_functions.h>
#include <float.h>
#include "SegmentDefs.cu"
#include "VisionMath.cu"
extern "C"
{
//index for enforce connectivity
const int dx4[4] = {-1, 0, 1, 0};
const int dy4[4] = { 0, -1, 0, 1};
//--- now kernels that do the job...
__global__ void kInitClusterCenters( float4* floatBuffer, int nWidth, int nHeight, SLICClusterCenter* vSLICCenterList )
{
int blockWidth=nWidth/blockDim.x;
int blockHeight=nHeight/gridDim.x;
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
int offsetBlock = blockIdx.x * blockHeight * nWidth + threadIdx.x * blockWidth;
float2 avXY;
avXY.x=threadIdx.x*blockWidth + (float)blockWidth/2.0;
avXY.y=blockIdx.x*blockHeight + (float)blockHeight/2.0;
//use a single point to init center
int offset=offsetBlock + blockHeight/2 * nWidth+ blockWidth/2 ;
float4 fPixel=floatBuffer[offset];
vSLICCenterList[clusterIdx].lab=fPixel;
vSLICCenterList[clusterIdx].xy=avXY;
vSLICCenterList[clusterIdx].nPoints=0;
}
__global__ void kIterateKmeans( int* maskBuffer, float4* floatBuffer,
int nWidth, int nHeight, int nSegs, int nClusterIdxStride,
SLICClusterCenter* vSLICCenterList, int listSize,
bool bLabelImg, float weight)
{
//for reading cluster centers
__shared__ float4 fShareLab[3][3];
__shared__ float2 fShareXY[3][3];
//pixel index
__shared__ SLICClusterCenter pixelUpdateList[MAX_BLOCK_SIZE];
__shared__ float2 pixelUpdateIdx[MAX_BLOCK_SIZE];
int clusterIdx=blockIdx.y;
int blockCol=clusterIdx%nClusterIdxStride;
int blockRow=clusterIdx/nClusterIdxStride;
//int upperBlockHeight=blockDim.y*gridDim.x;
int lowerBlockHeight=blockDim.y;
int blockWidth=blockDim.x;
int upperBlockHeight=blockWidth;
int innerBlockHeightIdx=lowerBlockHeight*blockIdx.x+threadIdx.y;
float M=weight;
float invWeight=1/((blockWidth/M)*(blockWidth/M));
int offsetBlock = (blockRow*upperBlockHeight+blockIdx.x*lowerBlockHeight)*nWidth+blockCol*blockWidth;
int offset=offsetBlock+threadIdx.x+threadIdx.y*nWidth;
int rBegin=(blockRow>0)?0:1;
int rEnd=(blockRow+1>(gridDim.y/nClusterIdxStride-1))?1:2;
int cBegin=(blockCol>0)?0:1;
int cEnd=(blockCol+1>(nClusterIdxStride-1))?1:2;
if (threadIdx.x<3 && threadIdx.y<3) {
if (threadIdx.x>=cBegin && threadIdx.x<=cEnd && threadIdx.y>=rBegin && threadIdx.y<=rEnd) {
int cmprIdx=(blockRow+threadIdx.y-1)*nClusterIdxStride+(blockCol+threadIdx.x-1);
fShareLab[threadIdx.y][threadIdx.x]=vSLICCenterList[cmprIdx].lab;
fShareXY[threadIdx.y][threadIdx.x]=vSLICCenterList[cmprIdx].xy;
}
}
__syncthreads();
if (innerBlockHeightIdx>=blockWidth)
return;
if (offset>=nWidth*nHeight)
return;
// finding the nearest center for current pixel
float fY=blockRow*upperBlockHeight+blockIdx.x*lowerBlockHeight+threadIdx.y;
float fX=blockCol*blockWidth+threadIdx.x;
if (fY<nHeight && fX<nWidth)
{
float4 fPoint=floatBuffer[offset];
float minDis=9999;
int nearestCenter=-1;
int nearestR, nearestC;
for (int r=rBegin;r<=rEnd;r++)
{
for (int c=cBegin;c<=cEnd;c++)
{
int cmprIdx=(blockRow+r-1)*nClusterIdxStride+(blockCol+c-1);
//compute SLIC distance
float fDab=(fPoint.x-fShareLab[r][c].x)*(fPoint.x-fShareLab[r][c].x)
+(fPoint.y-fShareLab[r][c].y)*(fPoint.y-fShareLab[r][c].y)
+(fPoint.z-fShareLab[r][c].z)*(fPoint.z-fShareLab[r][c].z);
//fDab=sqrt(fDab);
float fDxy=(fX-fShareXY[r][c].x)*(fX-fShareXY[r][c].x)
+(fY-fShareXY[r][c].y)*(fY-fShareXY[r][c].y);
//fDxy=sqrt(fDxy);
float fDis=fDab+invWeight*fDxy;
if (fDis<minDis)
{
minDis=fDis;
nearestCenter=cmprIdx;
nearestR=r;
nearestC=c;
}
}
}
if (nearestCenter>-1) {
int pixelIdx=threadIdx.y*blockWidth+threadIdx.x;
if(pixelIdx < MAX_BLOCK_SIZE) {
pixelUpdateList[pixelIdx].lab=fPoint;
pixelUpdateList[pixelIdx].xy.x=fX;
pixelUpdateList[pixelIdx].xy.y=fY;
pixelUpdateIdx[pixelIdx].x=nearestC;
pixelUpdateIdx[pixelIdx].y=nearestR;
}
if (bLabelImg)
maskBuffer[offset]=nearestCenter;
}
}
else {
int pixelIdx=threadIdx.y*blockWidth+threadIdx.x;
if(pixelIdx < MAX_BLOCK_SIZE) {
pixelUpdateIdx[pixelIdx].x=-1;
pixelUpdateIdx[pixelIdx].y=-1;
}
}
__syncthreads();
}
__global__ void kUpdateClusterCenters( float4* floatBuffer,int* maskBuffer, int nWidth, int nHeight, int nSegs, SLICClusterCenter* vSLICCenterList, int listSize)
{
int blockWidth=nWidth/blockDim.x;
int blockHeight=nHeight/gridDim.x;
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
int offsetBlock = threadIdx.x * blockWidth+ blockIdx.x * blockHeight * nWidth;
float2 crntXY=vSLICCenterList[clusterIdx].xy;
float4 avLab;
float2 avXY;
int nPoints=0;
avLab.x=0;
avLab.y=0;
avLab.z=0;
avXY.x=0;
avXY.y=0;
int yBegin=0 < (crntXY.y - blockHeight) ? (crntXY.y - blockHeight) : 0;
int yEnd= nHeight > (crntXY.y + blockHeight) ? (crntXY.y + blockHeight) : (nHeight-1);
int xBegin=0 < (crntXY.x - blockWidth) ? (crntXY.x - blockWidth) : 0;
int xEnd= nWidth > (crntXY.x + blockWidth) ? (crntXY.x + blockWidth) : (nWidth-1);
//update to cluster centers
for (int i = yBegin; i < yEnd ; i++)
{
for (int j = xBegin; j < xEnd; j++)
{
int offset=j + i * nWidth;
float4 fPixel=floatBuffer[offset];
int pIdx=maskBuffer[offset];
if (pIdx==clusterIdx)
{
avLab.x+=fPixel.x;
avLab.y+=fPixel.y;
avLab.z+=fPixel.z;
avXY.x+=j;
avXY.y+=i;
nPoints++;
}
}
}
if(nPoints == 0)
return;
avLab.x/=nPoints;
avLab.y/=nPoints;
avLab.z/=nPoints;
avXY.x/=nPoints;
avXY.y/=nPoints;
vSLICCenterList[clusterIdx].lab=avLab;
vSLICCenterList[clusterIdx].xy=avXY;
vSLICCenterList[clusterIdx].nPoints=nPoints;
}
//=======================================================
// create descriptors
///------------------------------------------------------
/// Add how edges around look like...
__device__ void Desc_get_hists_stats_for_each_segment (float2 xy, int id, float* edge_im , float* feat_desc, int dim_desc, int width){
int dim_id_start = 5;
int ngh_max = 5;
feat_desc[id*dim_desc+dim_id_start+0] = edge_im[((int)xy.x) + ((int)xy.y)*width];
feat_desc[id*dim_desc+dim_id_start+1] = abs(feat_desc[id*dim_desc+dim_id_start+0]);
feat_desc[id*dim_desc+dim_id_start+2] = edge_im[((int)xy.x)+ngh_max + ((int)xy.y+ngh_max)*width];
feat_desc[id*dim_desc+dim_id_start+3] = abs(feat_desc[id*dim_desc+dim_id_start+2]);
feat_desc[id*dim_desc+dim_id_start+4] = edge_im[((int)xy.x)+ngh_max + ((int)xy.y-ngh_max)*width];
feat_desc[id*dim_desc+dim_id_start+5] = abs(feat_desc[id*dim_desc+dim_id_start+4]);
feat_desc[id*dim_desc+dim_id_start+6] = edge_im[((int)xy.x)-ngh_max + ((int)xy.y+ngh_max)*width];
feat_desc[id*dim_desc+dim_id_start+7] = abs(feat_desc[id*dim_desc+dim_id_start+6]);
feat_desc[id*dim_desc+dim_id_start+8] = edge_im[((int)xy.x)-ngh_max + ((int)xy.y-ngh_max)*width];
feat_desc[id*dim_desc+dim_id_start+9] = abs(feat_desc[id*dim_desc+dim_id_start+8]);
}
// m_kernel_desc.Run(devSLICCCenter.DevicePointer, Owner.features_xy, Owner.features_desc , Owner.nSegs , 2 , Owner.dim_feat_desc); /// fill image with average color
__global__ void Desc (SLICClusterCenter* vSLICCenterList , float* feat_xy , float* feat_desc , int size , int dim_xy , int dim_desc , int width , int height){//, float* edge_im){
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id<size){
float2 xy = vSLICCenterList[id].xy;
float4 lab = vSLICCenterList[id].lab;
feat_xy[id*dim_xy+0] = xy.x;// (xy.x-width/2) / (width/2);
feat_xy[id*dim_xy+1] = xy.y;// (xy.y-height/2) / (height/2);
feat_xy[id*dim_xy+2] = (int)vSLICCenterList[id].nPoints;// 0.2;// id;
//--- old desc
float desc_prev[3];
desc_prev[0] = feat_desc[id*dim_desc+0];
desc_prev[1] = feat_desc[id*dim_desc+1];
desc_prev[2] = feat_desc[id*dim_desc+2];
feat_desc[id*dim_desc+0] = lab.x/1;
feat_desc[id*dim_desc+1] = lab.y/1;
feat_desc[id*dim_desc+2] = lab.z/1;
feat_desc[id*dim_desc+3] = Dist_between_two_vec(&desc_prev[0] , feat_desc+id*dim_desc , 3);
}
}
__global__ void Copy_intMat2Float(float* A , int* B , int size){
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size) {
A[id] = (float)B[id];
}
}
//=======================================================
// From image to float4 buffer
///------------------------------------------------------
__global__ void kBw2XYZ(float* im , float4* outputImg , int size)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size) {
float _b= im[id];
float _g= im[id];
float _r= im[id];
float x=_r*0.412453 +_g*0.357580 +_b*0.180423;
float y=_r*0.212671 +_g*0.715160 +_b*0.072169;
float z=_r*0.019334 +_g*0.119193 +_b*0.950227;
float4 fPixel;
fPixel.x=x;
fPixel.y=y;
fPixel.z=z;
outputImg[id]=fPixel;
}
}
__global__ void kRgb2XYZ(float* imR , float* imG , float* imB , float4* outputImg , int size)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size) {
float _b= imB[id];
float _g= imG[id];
float _r= imR[id];
float x=_r*0.412453 +_g*0.357580 +_b*0.180423;
float y=_r*0.212671 +_g*0.715160 +_b*0.072169;
float z=_r*0.019334 +_g*0.119193 +_b*0.950227;
float4 fPixel;
fPixel.x=x;
fPixel.y=y;
fPixel.z=z;
outputImg[id]=fPixel;
}
}
__global__ void kRgb2LAB(float* imR , float* imG , float* imB , float4* outputImg , int size)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size) {
float _b= imB[id];
float _g= imG[id];
float _r= imR[id];
float x=_r*0.412453 +_g*0.357580 +_b*0.180423;
float y=_r*0.212671 +_g*0.715160 +_b*0.072169;
float z=_r*0.019334 +_g*0.119193 +_b*0.950227;
float l,a,b;
x/=0.950456;
float y3=exp(log(y)/3.0);
z/=1.088754;
x = x>0.008856 ? exp(log(x)/3.0) : (7.787*x+0.13793);
y = y>0.008856 ? y3 : 7.787*y+0.13793;
z = z>0.008856 ? z/=exp(log(z)/3.0) : (7.787*z+0.13793);
l = y>0.008856 ? (116.0*y3-16.0) : 903.3*y;
a=(x-y)*500.0;
b=(y-z)*200.0;
float4 fPixel;
fPixel.x=l;
fPixel.y=a;
fPixel.z=b;
outputImg[id]=fPixel;
}
}
} | the_stack |
#include <df/camera/poly3.h>
#include <df/surface/marchingCubesTables.h>
#include <df/util/cudaHelpers.h>
#include <df/util/eigenHelpers.h>
#include <df/voxel/color.h>
#include <df/voxel/probability.h>
#include <df/voxel/compositeVoxel.h>
#include <df/voxel/tsdf.h>
#include <df/transform/rigid.h>
#include <thrust/device_ptr.h>
#include <thrust/binary_search.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
namespace df {
//extern texture<VertexCountT, 1, cudaReadModeElementType> vertexCountByVoxelCodeTex;
//extern texture<VertexIndexT, 1, cudaReadModeElementType> vertexIndicesByVoxelCodeTex;
texture<VertexCountT, 1, cudaReadModeElementType> vertexCountByVoxelCodeTex;
texture<VertexCountT, 1, cudaReadModeElementType> vertexIndicesByVoxelCodeTex;
class MarchingCubesMemoryManager {
public:
MarchingCubesMemoryManager() {
cudaMalloc(&vertexCountData_, 256*sizeof(VertexCountT));
cudaMemcpy(vertexCountData_, vertexCountByVoxelCodeTable, 256*sizeof(VertexCountT), cudaMemcpyHostToDevice);
// cudaChannelFormatDesc vertexCountChannelDesc = cudaCreateChannelDesc(8*sizeof(VertexCountT), 0, 0, 0, cudaChannelFormatKindUnsigned);
cudaBindTexture(0, vertexCountByVoxelCodeTex, vertexCountData_, 256*sizeof(VertexCountT)); //vertexCountChannelDesc);
cudaMalloc(&vertexIndexData_, 256*16*sizeof(VertexIndexT));
cudaMemcpy(vertexIndexData_, vertexIndicesByVoxelCodeTable, 256*16*sizeof(VertexIndexT), cudaMemcpyHostToDevice);
cudaBindTexture(0, vertexIndicesByVoxelCodeTex, vertexIndexData_, 256*16*sizeof(VertexIndexT));
cudaDeviceSynchronize();
CheckCudaDieOnError();
}
~MarchingCubesMemoryManager() {
cudaUnbindTexture(vertexCountByVoxelCodeTex);
cudaFree(vertexCountData_);
cudaUnbindTexture(vertexIndicesByVoxelCodeTex);
cudaFree(vertexIndexData_);
}
private:
VertexCountT * vertexCountData_;
VertexIndexT * vertexIndexData_;
};
void initMarchingCubesTables() {
static MarchingCubesMemoryManager manager;
}
template <typename Scalar,
typename VoxelT>
inline __device__ Scalar sampleVoxelGrid(const Tensor<3,VoxelT,DeviceResident> voxelGrid,
const int x, const int y, const int z,
const Scalar weightThreshold,
bool & missingData) {
const VoxelT & voxel = voxelGrid(x,y,z);
if (voxel.template weight<TsdfVoxel>() >= weightThreshold) {
return voxel.template value<TsdfVoxel>();
}
else {
missingData = true;
return Scalar(0);
}
}
template <typename VoxelT>
inline __device__ auto sampleVoxelGrid(const Tensor<3,VoxelT,DeviceResident> voxelGrid,
const int x, const int y, const int z) -> decltype(voxelGrid(0,0,0).template value<TsdfVoxel>()) {
const VoxelT & voxel = voxelGrid(x,y,z);
return voxel.template value<TsdfVoxel>();
}
//
template <typename Scalar,
typename VoxelT>
__global__ void classifyVoxelsKernel(const Tensor<3,VoxelT,DeviceResident> voxelGrid,
const Scalar weightThreshold,
//Tensor<3,uint,DeviceResident> voxelCodes, // TODO: experiment with data size
Tensor<3,uint,DeviceResident> vertexCounts) {
const uint x = threadIdx.x + blockDim.x * blockIdx.x;
const uint y = threadIdx.y + blockDim.y * blockIdx.y;
const uint z = threadIdx.z + blockDim.z * blockIdx.z;
if ((x < voxelGrid.dimensionSize(0)) && (y < voxelGrid.dimensionSize(1)) && (z < voxelGrid.dimensionSize(2))) {
uint voxelCode, numVertices;
if ((x == (voxelGrid.dimensionSize(0) - 1)) || (y == (voxelGrid.dimensionSize(1) - 1)) || (z == (voxelGrid.dimensionSize(2) - 1))) {
// cannot do binlinear interpolation on these vertices
voxelCode = 0;
numVertices = 0;
} else {
bool missingData = false;
Scalar centerVals[8];
centerVals[0] = sampleVoxelGrid(voxelGrid, x, y, z, weightThreshold, missingData);
centerVals[1] = sampleVoxelGrid(voxelGrid, x + 1, y, z, weightThreshold, missingData);
centerVals[2] = sampleVoxelGrid(voxelGrid, x + 1, y + 1, z, weightThreshold, missingData);
centerVals[3] = sampleVoxelGrid(voxelGrid, x, y + 1, z, weightThreshold, missingData);
centerVals[4] = sampleVoxelGrid(voxelGrid, x, y, z + 1, weightThreshold, missingData);
centerVals[5] = sampleVoxelGrid(voxelGrid, x + 1, y, z + 1, weightThreshold, missingData);
centerVals[6] = sampleVoxelGrid(voxelGrid, x + 1, y + 1, z + 1, weightThreshold, missingData);
centerVals[7] = sampleVoxelGrid(voxelGrid, x, y + 1, z + 1, weightThreshold, missingData);
if (missingData) {
voxelCode = 0;
numVertices = 0;
} else {
// printf("8 valid\n");
voxelCode = uint(centerVals[0] < Scalar(0));
voxelCode += uint(centerVals[1] < Scalar(0)) << 1;
voxelCode += uint(centerVals[2] < Scalar(0)) << 2;
voxelCode += uint(centerVals[3] < Scalar(0)) << 3;
voxelCode += uint(centerVals[4] < Scalar(0)) << 4;
voxelCode += uint(centerVals[5] < Scalar(0)) << 5;
voxelCode += uint(centerVals[6] < Scalar(0)) << 6;
voxelCode += uint(centerVals[7] < Scalar(0)) << 7;
// printf("vertex code %d\n",voxelCode);
// TODO: try constant memory as well
numVertices = tex1Dfetch(vertexCountByVoxelCodeTex, voxelCode);
}
}
// voxelCodes(x,y,z) = voxelCode;
vertexCounts(x,y,z) = numVertices;
// if (numVertices > 0) {
// atomicAdd(validVoxelCount,1);
// printf("%d\n",numVertices);
// }
}
}
struct Binarizer {
inline __host__ __device__ uint operator()(const uint & val) { return val > 0 ? 1 : 0; }
};
__global__ void computeValidVoxelIndicesKernel(const Tensor<3,uint,DeviceResident> vertexCounts,
const Tensor<3,uint,DeviceResident> validVoxelScanResult,
Tensor<1,uint,DeviceResident> validVoxelIndices) {
const uint x = threadIdx.x + blockDim.x * blockIdx.x;
const uint y = threadIdx.y + blockDim.y * blockIdx.y;
const uint z = threadIdx.z + blockDim.z * blockIdx.z;
// const uint i = threadIdx.x + blockDim.x * blockIdx.x;
// if (threadIdx.x == 0) {
// printf("%d m\n",blockIdx.x);
// }
if ( (x < vertexCounts.dimensionSize(0)) && (y < vertexCounts.dimensionSize(1)) && (z < vertexCounts.dimensionSize(2))) {
if (vertexCounts(x,y,z) > 0) {
// printf("%d,%d,%d valid \n",x,y,z);
// atomicAdd(nValid,1);
const uint i = x + vertexCounts.dimensionSize(0)*(y + vertexCounts.dimensionSize(1)*z);
const uint compactedIndex = validVoxelScanResult(x,y,z);
// if (compactedIndex < 50) {
// printf("%d (%d,%d,%d) valid -> %d\n",i,x,y,z,compactedIndex);
// }
validVoxelIndices(compactedIndex) = i;
}
}
}
template <typename Scalar>
inline __device__ Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> computeVertex(const Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> & voxelCenterA,
const Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> & voxelCenterB,
const Scalar valueA, const Scalar valueB) {
const Scalar t = ( -valueA ) / ( valueB - valueA );
return voxelCenterA + t*(voxelCenterB - voxelCenterA);
}
template <typename Scalar, typename VoxelT>
__global__ void computeTrianglesKernel(const Tensor<1,uint,DeviceResident> validVoxelIndices,
const Tensor<3,uint,DeviceResident> vertexCountScanResult,
const Tensor<3,VoxelT,DeviceResident> voxelGrid,
//const Tensor<3,uint,DeviceResident> voxelCodes,
Tensor<2,Scalar,DeviceResident> vertices) {
typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
const uint i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < validVoxelIndices.dimensionSize(0)) {
const uint index = validVoxelIndices(i);
// if (vertexCountScanResult.data()[index] < 5) {
// printf("%d: %d !!\n",index,vertexCountScanResult.data()[index]);
// }
// printf("%d: %d\n",i,index);
const uint x = index % voxelGrid.dimensionSize(0);
const uint y = (index / voxelGrid.dimensionSize(0)) % voxelGrid.dimensionSize(1);
const uint z = index / (voxelGrid.dimensionSize(0)*voxelGrid.dimensionSize(1));
// if (vertexCountScanResult.data()[index] < 5) {
// printf("%d: %d %d %d $\n",index,x,y,z);
// }
Vec3 vertexCenters[8];
vertexCenters[0] = Vec3(x, y, z );
vertexCenters[1] = Vec3(x + 1, y, z );
vertexCenters[2] = Vec3(x + 1, y + 1, z );
vertexCenters[3] = Vec3(x, y + 1, z );
vertexCenters[4] = Vec3(x, y, z + 1);
vertexCenters[5] = Vec3(x + 1, y, z + 1);
vertexCenters[6] = Vec3(x + 1, y + 1, z + 1);
vertexCenters[7] = Vec3(x, y + 1, z + 1);
Scalar centerVals[8];
centerVals[0] = sampleVoxelGrid(voxelGrid, x, y, z );
centerVals[1] = sampleVoxelGrid(voxelGrid, x + 1, y, z );
centerVals[2] = sampleVoxelGrid(voxelGrid, x + 1, y + 1, z );
centerVals[3] = sampleVoxelGrid(voxelGrid, x, y + 1, z );
centerVals[4] = sampleVoxelGrid(voxelGrid, x, y, z + 1);
centerVals[5] = sampleVoxelGrid(voxelGrid, x + 1, y, z + 1);
centerVals[6] = sampleVoxelGrid(voxelGrid, x + 1, y + 1, z + 1);
centerVals[7] = sampleVoxelGrid(voxelGrid, x, y + 1, z + 1);
static constexpr int maxVertsPerVoxel = 12;
// TODO: Richard's code uses 32 --- why? there's enough memory for up to 256
static constexpr int numThreads = 256;
// TODO: make dynamic? will there be a performance hit?
__shared__ char s[maxVertsPerVoxel*numThreads*sizeof(Vec3)];
// avoids constructor issues
Vec3 * potentialVertexList = reinterpret_cast<Vec3 *>(&s[0]);
// TODO: why strided like this? is it faster the other way?
// TODO: use a fancy dispatch mechanism to compute only necessary verts?
potentialVertexList[threadIdx.x + 0*numThreads] = computeVertex(vertexCenters[0],vertexCenters[1],centerVals[0],centerVals[1]);
potentialVertexList[threadIdx.x + 1*numThreads] = computeVertex(vertexCenters[1],vertexCenters[2],centerVals[1],centerVals[2]);
potentialVertexList[threadIdx.x + 2*numThreads] = computeVertex(vertexCenters[2],vertexCenters[3],centerVals[2],centerVals[3]);
potentialVertexList[threadIdx.x + 3*numThreads] = computeVertex(vertexCenters[3],vertexCenters[0],centerVals[3],centerVals[0]);
potentialVertexList[threadIdx.x + 4*numThreads] = computeVertex(vertexCenters[4],vertexCenters[5],centerVals[4],centerVals[5]);
potentialVertexList[threadIdx.x + 5*numThreads] = computeVertex(vertexCenters[5],vertexCenters[6],centerVals[5],centerVals[6]);
potentialVertexList[threadIdx.x + 6*numThreads] = computeVertex(vertexCenters[6],vertexCenters[7],centerVals[6],centerVals[7]);
potentialVertexList[threadIdx.x + 7*numThreads] = computeVertex(vertexCenters[7],vertexCenters[4],centerVals[7],centerVals[4]);
potentialVertexList[threadIdx.x + 8*numThreads] = computeVertex(vertexCenters[0],vertexCenters[4],centerVals[0],centerVals[4]);
potentialVertexList[threadIdx.x + 9*numThreads] = computeVertex(vertexCenters[1],vertexCenters[5],centerVals[1],centerVals[5]);
potentialVertexList[threadIdx.x + 10*numThreads] = computeVertex(vertexCenters[2],vertexCenters[6],centerVals[2],centerVals[6]);
potentialVertexList[threadIdx.x + 11*numThreads] = computeVertex(vertexCenters[3],vertexCenters[7],centerVals[3],centerVals[7]);
__syncthreads();
// TODO: recompute?
//const uint voxelCode = voxelCodes(x,y,z);
uint voxelCode;
voxelCode = uint(centerVals[0] < Scalar(0));
voxelCode += uint(centerVals[1] < Scalar(0)) << 1;
voxelCode += uint(centerVals[2] < Scalar(0)) << 2;
voxelCode += uint(centerVals[3] < Scalar(0)) << 3;
voxelCode += uint(centerVals[4] < Scalar(0)) << 4;
voxelCode += uint(centerVals[5] < Scalar(0)) << 5;
voxelCode += uint(centerVals[6] < Scalar(0)) << 6;
voxelCode += uint(centerVals[7] < Scalar(0)) << 7;
const uint numVertices = tex1Dfetch(vertexCountByVoxelCodeTex, voxelCode);
// if (vertexCountScanResult.data()[index] < 5) {
// printf("%d: %d ?!\n",index,voxelCode);
// printf("%d: %d ??\n",index,numVertices);
// }
for (uint v = 0; v < numVertices; ++v) {
const uint vertexIndex = tex1Dfetch(vertexIndicesByVoxelCodeTex, voxelCode*16 + v);
const uint outputIndex = vertexCountScanResult.data()[index] + v;
Eigen::Map<Vec3> map(&vertices(0,outputIndex));
map = potentialVertexList[threadIdx.x + vertexIndex*numThreads];
// if (outputIndex < 10) {
// printf("%d: %f %f %f\n",outputIndex,map(0),map(1),map(2));
// }
}
}
}
template <typename Scalar,
typename VoxelT>
void extractSurface(ManagedTensor<2, Scalar, DeviceResident> & vertices,
const VoxelGrid<Scalar,VoxelT,DeviceResident> & voxelGrid,
const Scalar weightThreshold) {
std::cout << "threshold: " << weightThreshold << std::endl;
// TODO: ideas to make this faster
//
// 1. the extra storage for whether or not each voxel contains geometry is probably wasteful
// and only done so we can use thrust::exclusive_scan. this could perhaps be made faster
// with a custom implementation of the exclusive_scan
//
// 2. Richard's code claims that recalculating the voxel code is faster than storing it in
// global memory. this should also be investigated
// ManagedTensor<3,uint,DeviceResident> voxelCodes(voxelGrid.dimensions());
static ManagedTensor<3,uint,DeviceResident> dVertexCounts(voxelGrid.dimensions());
// int * dValidVoxelCount;
// cudaMalloc(&dValidVoxelCount,sizeof(int));
// cudaMemset(dValidVoxelCount,0,sizeof(int));
{
dim3 block(16,16,4);
dim3 grid(voxelGrid.size(0)/block.x,voxelGrid.size(1)/block.y,voxelGrid.size(2)/block.z);
classifyVoxelsKernel<<<grid,block>>>(voxelGrid.grid(),weightThreshold,
// voxelCodes,
dVertexCounts);
}
cudaDeviceSynchronize();
CheckCudaDieOnError();
static ManagedTensor<3,uint,DeviceResident> vertexCountScanResult(voxelGrid.dimensions());
thrust::exclusive_scan(thrust::device_ptr<uint>(dVertexCounts.data()),
thrust::device_ptr<uint>(dVertexCounts.data() + dVertexCounts.count()),
thrust::device_ptr<uint>(vertexCountScanResult.data()));
uint numVertices;
cudaMemcpy(&numVertices,vertexCountScanResult.data() + vertexCountScanResult.count()-1,sizeof(uint),cudaMemcpyDeviceToHost);
uint lastNumVertices;
cudaMemcpy(&lastNumVertices,dVertexCounts.data() + dVertexCounts.count()-1,sizeof(uint),cudaMemcpyDeviceToHost);
numVertices += lastNumVertices;
printf("%d vertices\n",numVertices);
static ManagedTensor<3,uint,DeviceResident> validVoxelScanResult(voxelGrid.dimensions());
thrust::transform(thrust::device_ptr<uint>(dVertexCounts.data()),
thrust::device_ptr<uint>(dVertexCounts.data() + dVertexCounts.count()),
thrust::device_ptr<uint>(validVoxelScanResult.data()),
Binarizer());
thrust::exclusive_scan(thrust::device_ptr<uint>(validVoxelScanResult.data()),
thrust::device_ptr<uint>(validVoxelScanResult.data() + validVoxelScanResult.count()),
thrust::device_ptr<uint>(validVoxelScanResult.data()));
uint numValidVoxels;
cudaMemcpy(&numValidVoxels,validVoxelScanResult.data() + validVoxelScanResult.count()-1,sizeof(uint),cudaMemcpyDeviceToHost);
if (lastNumVertices > 0) {
++numValidVoxels;
}
// printf("%d valid voxels\n",numValidVoxels);
// cudaMemcpy(&numValidVoxels,dValidVoxelCount,sizeof(int),cudaMemcpyDeviceToHost);
// printf("%d valid voxels\n",numValidVoxels);
// cudaFree(dValidVoxelCount);
Eigen::Matrix<uint,1,1> validVoxelIndicesDim(numValidVoxels);
ManagedTensor<1,uint,DeviceResident> validVoxelIndices(validVoxelIndicesDim);
{
// const uint nThreads = 1024;
// const uint nVoxels = voxelGrid.grid().count();
// const uint grid = intDivideAndCeil(nVoxels,nThreads);
dim3 block(16,16,4);
dim3 grid(intDivideAndCeil(voxelGrid.size(0),block.x),
intDivideAndCeil(voxelGrid.size(1),block.y),
intDivideAndCeil(voxelGrid.size(2),block.z));
// std::cout << "grid: " << grid << std::endl;
computeValidVoxelIndicesKernel<<<grid,block>>>(dVertexCounts,
validVoxelScanResult,
validVoxelIndices);
}
cudaDeviceSynchronize();
CheckCudaDieOnError();
Eigen::Matrix<uint,2,1> verticesDim(3,numVertices);
vertices.resize(verticesDim);
{
const uint nThreads = 256;
computeTrianglesKernel<<<intDivideAndCeil(numValidVoxels,nThreads),nThreads>>>(validVoxelIndices,
vertexCountScanResult,
voxelGrid.grid(),
// voxelCodes,
vertices);
}
cudaDeviceSynchronize();
CheckCudaDieOnError();
}
template <typename Scalar>
uint weldVertices(const Tensor<2,Scalar,DeviceResident> & vertices,
Tensor<2,Scalar,DeviceResident> & weldedVertices,
ManagedTensor<1,int,DeviceResident> & indices) {
typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
assert(vertices.dimensionSize(0) == 3);
const uint numVertices = vertices.dimensionSize(1);
weldedVertices.copyFrom(vertices);
thrust::device_ptr<Vec3> weldedVertexPointer(reinterpret_cast<Vec3 *>(weldedVertices.data()));
thrust::sort( weldedVertexPointer, weldedVertexPointer + numVertices, VecLess<Scalar,3>() );
thrust::device_ptr<Vec3> endOfUniqueVertices =
thrust::unique(weldedVertexPointer, weldedVertexPointer + numVertices, VecEqual<Scalar,3>() );
const uint numUniqueVertices = thrust::distance(weldedVertexPointer, endOfUniqueVertices);
thrust::device_ptr<int> indexPointer(indices.data());
thrust::device_ptr<const Vec3> originalVertexPointer(reinterpret_cast<const Vec3 *>(vertices.data()));
thrust::lower_bound(weldedVertexPointer, endOfUniqueVertices,
originalVertexPointer, originalVertexPointer + numVertices,
indexPointer, VecLess<Scalar,3>() );
return numUniqueVertices;
}
// compute colors of vertices
template <typename Scalar>
__global__ void computeColorsKernel(const Tensor<2, Scalar, DeviceResident> vertices,
int* labels,
unsigned char* class_colors,
Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> min_val,
Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> max_val,
Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> offset,
Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> scale,
Tensor<2, unsigned char, DeviceResident> colors, int dimension, int num_classes)
{
const uint i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < vertices.dimensionSize(1))
{
// 3D point
Scalar X = vertices(0, i) * scale(0) + offset(0);
Scalar Y = vertices(1, i) * scale(1) + offset(1);
Scalar Z = vertices(2, i) * scale(2) + offset(2);
// voxel grid
Scalar step_x = (max_val(0) - min_val(0)) / dimension;
Scalar step_y = (max_val(1) - min_val(1)) / dimension;
Scalar step_z = (max_val(2) - min_val(2)) / dimension;
// grid location
int x = std::round((X - min_val(0)) / step_x);
int y = std::round((Y - min_val(1)) / step_y);
int z = std::round((Z - min_val(2)) / step_z);
if (x >= 0 && x < dimension && y >= 0 && y < dimension && z >= 0 && z < dimension)
{
int label = labels[x * dimension * dimension + y * dimension + z];
colors(0, i) = class_colors[0 * num_classes + label];
colors(1, i) = class_colors[1 * num_classes + label];
colors(2, i) = class_colors[2 * num_classes + label];
}
else
{
colors(0, i) = 255;
colors(1, i) = 255;
colors(2, i) = 255;
}
}
}
template <typename Scalar, typename VoxelT>
void computeColors(const Tensor<2, Scalar, DeviceResident> & vertices, int* labels,
unsigned char* class_colors, const VoxelGrid<Scalar, VoxelT, DeviceResident> & voxelGrid,
Tensor<2, unsigned char, DeviceResident> & colors, int dimension, int num_classes)
{
const uint numVertices = vertices.dimensionSize(1);
const uint nThreads = 256;
computeColorsKernel<<<intDivideAndCeil(numVertices, nThreads), nThreads>>>(vertices, labels, class_colors, voxelGrid.min(), voxelGrid.max(),
voxelGrid.gridToWorldOffset(), voxelGrid.gridToWorldScale(), colors, dimension, num_classes);
}
// extract labels from voxel grid
template <typename TransformerT,
typename DepthCameraModelT,
typename DepthT>
__global__ void computeLabelsKernel(const typename TransformerT::DeviceModule transformer,
const DepthCameraModelT depthCameraModel,
const DeviceTensor2<DepthT> depthMap,
const VoxelGrid<float, CompositeVoxel<float,TsdfVoxel,ProbabilityVoxel>, DeviceResident> voxelGrid,
DeviceTensor2<int> labels,
DeviceTensor2<Eigen::Matrix<unsigned char,3,1,Eigen::DontAlign> > label_colors,
const DeviceTensor1<Eigen::Matrix<unsigned char,3,1,Eigen::DontAlign> > class_colors, int width, int height)
{
typedef Eigen::Matrix<int,2,1,Eigen::DontAlign> Vec2i;
const uint index = threadIdx.x + blockDim.x * blockIdx.x;
const int x = index % width;
const int y = index / width;
if (x < width && y < height)
{
const Vec2i loc(x, y);
DepthT depth = depthMap(loc);
if (depth > 0)
{
// backprojection
const Eigen::Matrix<float,2,1> point2d(x, y);
const Eigen::Matrix<float,3,1> liveCoord = depthCameraModel.unproject(point2d, depth);
const Eigen::Matrix<float,3,1> worldCoord = transformer.transformLiveToWorld(liveCoord);
const Eigen::Matrix<float,3,1> gridCoord = voxelGrid.worldToGrid(worldCoord);
int X = int(gridCoord(0));
int Y = int(gridCoord(1));
int Z = int(gridCoord(2));
if (X >= 0 && X < voxelGrid.size(0) && Y >= 0 && Y < voxelGrid.size(1) && Z >= 0 && Z < voxelGrid.size(2))
{
CompositeVoxel<float,TsdfVoxel,ProbabilityVoxel> voxel = voxelGrid(X, Y, Z);
Eigen::Matrix<float,10,1,Eigen::DontAlign> prob;
prob = voxel.value<ProbabilityVoxel>();
int label = -1;
float max_prob = -1;
for (int i = 0; i < 10; i++)
{
if (prob(i) > max_prob)
{
max_prob = prob(i);
label = i;
}
}
labels(loc) = label;
}
else
labels(loc) = 0;
}
else
labels(loc) = 0;
// set color
// label_colors(loc) = class_colors(labels(loc));
label_colors(loc)(0) = 0;
label_colors(loc)(1) = 0;
label_colors(loc)(2) = 0;
}
}
template <typename TransformerT,
typename DepthCameraModelT,
typename DepthT>
void computeLabels(const TransformerT & transformer,
const DepthCameraModelT & depthCameraModel,
const DeviceTensor2<DepthT> & depthMap,
const VoxelGrid<float, CompositeVoxel<float,TsdfVoxel,ProbabilityVoxel>, DeviceResident> & voxelGrid,
DeviceTensor2<int> & labels,
DeviceTensor2<Eigen::Matrix<unsigned char,3,1,Eigen::DontAlign> > & label_colors,
const DeviceTensor1<Eigen::Matrix<unsigned char,3,1,Eigen::DontAlign> > & class_colors)
{
const uint width = depthMap.width();
const uint height = depthMap.height();
const uint output_size = width * height;
const uint nThreads = 256;
computeLabelsKernel<TransformerT,DepthCameraModelT,DepthT><<<intDivideAndCeil(output_size, nThreads), nThreads>>>
(transformer.deviceModule(), depthCameraModel, depthMap, voxelGrid, labels, label_colors, class_colors, width, height);
}
// instances
template void extractSurface(ManagedTensor<2,float,DeviceResident> &,
const VoxelGrid<float,CompositeVoxel<float,TsdfVoxel>,DeviceResident> &,
const float);
template void extractSurface(ManagedTensor<2,float,DeviceResident> &,
const VoxelGrid<float,CompositeVoxel<float,TsdfVoxel,ColorVoxel>,DeviceResident> &,
const float);
template void extractSurface(ManagedTensor<2,float,DeviceResident> &,
const VoxelGrid<float,CompositeVoxel<float,TsdfVoxel,ProbabilityVoxel>,DeviceResident> &,
const float);
template uint weldVertices(const Tensor<2,float,DeviceResident> &,
Tensor<2,float,DeviceResident> &,
ManagedTensor<1,int,DeviceResident> &);
template void computeColors(const Tensor<2, float, DeviceResident> &, int*,
unsigned char*, const VoxelGrid<float, CompositeVoxel<float,TsdfVoxel>, DeviceResident> &,
Tensor<2, unsigned char, DeviceResident> &, int, int);
template void computeColors(const Tensor<2, float, DeviceResident> &, int*,
unsigned char*, const VoxelGrid<float, CompositeVoxel<float,TsdfVoxel,ColorVoxel>, DeviceResident> &,
Tensor<2, unsigned char, DeviceResident> &, int, int);
template void computeColors(const Tensor<2, float, DeviceResident> &, int*,
unsigned char*, const VoxelGrid<float, CompositeVoxel<float,TsdfVoxel,ProbabilityVoxel>, DeviceResident> &,
Tensor<2, unsigned char, DeviceResident> &, int, int);
template void computeLabels(const RigidTransformer<float> &,
const Poly3CameraModel<float> &,
const DeviceTensor2<float> &,
const VoxelGrid<float,CompositeVoxel<float,TsdfVoxel,ProbabilityVoxel>, DeviceResident> &,
DeviceTensor2<int> &, DeviceTensor2<Eigen::Matrix<unsigned char,3,1,Eigen::DontAlign> > &,
const DeviceTensor1<Eigen::Matrix<unsigned char,3,1,Eigen::DontAlign> > &);
} // namespace df | the_stack |
#include <cuda_runtime_api.h>
#include <visionaray/cuda/cast.h>
#include <visionaray/math/unorm.h>
#include <visionaray/math/vector.h>
#include <gtest/gtest.h>
using namespace visionaray;
//-------------------------------------------------------------------------------------------------
// Test casts between CUDA and Visionaray vector types (e.g. float3 <-> vector<3, float>)
//
TEST(CastCU, CudaToVisionaray)
{
// vec2 -----------------------------------------------
char2 c2 = make_char2(-96, 127);
auto cc2 = cuda::cast<vector<2, char>>(c2);
EXPECT_EQ(c2.x, cc2.x);
EXPECT_EQ(c2.y, cc2.y);
uchar2 uc2 = make_uchar2(96, 255);
auto uuc2 = cuda::cast<vector<2, unsigned char>>(uc2);
EXPECT_EQ(uc2.x, uuc2.x);
EXPECT_EQ(uc2.y, uuc2.y);
short2 s2 = make_short2(-4711, 32767);
auto ss2 = cuda::cast<vector<2, short>>(s2);
EXPECT_EQ(s2.x, ss2.x);
EXPECT_EQ(s2.y, ss2.y);
ushort2 us2 = make_ushort2(4711, 65535);
auto uus2 = cuda::cast<vector<2, unsigned short>>(us2);
EXPECT_EQ(us2.x, uus2.x);
EXPECT_EQ(us2.y, uus2.y);
int2 i2 = make_int2(-1024, 2147483647);
auto ii2 = cuda::cast<vector<2, int>>(i2);
EXPECT_EQ(i2.x, ii2.x);
EXPECT_EQ(i2.y, ii2.y);
uint2 ui2 = make_uint2(1024, 4294967295);
auto uui2 = cuda::cast<vector<2, unsigned int>>(ui2);
EXPECT_EQ(ui2.x, uui2.x);
EXPECT_EQ(ui2.y, uui2.y);
float2 f2 = make_float2(0.0f, 4711.0f);
auto ff2 = cuda::cast<vector<2, float>>(f2);
EXPECT_FLOAT_EQ(f2.x, ff2.x);
EXPECT_FLOAT_EQ(f2.y, ff2.y);
// vec3 -----------------------------------------------
char3 c3 = make_char3(-96, 0, 127);
auto cc3 = cuda::cast<vector<3, char>>(c3);
EXPECT_EQ(c3.x, cc3.x);
EXPECT_EQ(c3.y, cc3.y);
EXPECT_EQ(c3.z, cc3.z);
uchar3 uc3 = make_uchar3(0, 96, 255);
auto uuc3 = cuda::cast<vector<3, unsigned char>>(uc3);
EXPECT_EQ(uc3.x, uuc3.x);
EXPECT_EQ(uc3.y, uuc3.y);
EXPECT_EQ(uc3.z, uuc3.z);
short3 s3 = make_short3(-4711, 0, 32767);
auto ss3 = cuda::cast<vector<3, short>>(s3);
EXPECT_EQ(s3.x, ss3.x);
EXPECT_EQ(s3.y, ss3.y);
EXPECT_EQ(s3.z, ss3.z);
ushort3 us3 = make_ushort3(0, 4711, 65535);
auto uus3 = cuda::cast<vector<3, unsigned short>>(us3);
EXPECT_EQ(us3.x, uus3.x);
EXPECT_EQ(us3.y, uus3.y);
EXPECT_EQ(us3.z, uus3.z);
int3 i3 = make_int3(-1024, 0, 2147483647);
auto ii3 = cuda::cast<vector<3, int>>(i3);
EXPECT_EQ(i3.x, ii3.x);
EXPECT_EQ(i3.y, ii3.y);
EXPECT_EQ(i3.z, ii3.z);
uint3 ui3 = make_uint3(0, 1024, 4294967295);
auto uui3 = cuda::cast<vector<3, unsigned int>>(ui3);
EXPECT_EQ(ui3.x, uui3.x);
EXPECT_EQ(ui3.y, uui3.y);
EXPECT_EQ(ui3.z, uui3.z);
float3 f3 = make_float3(0.0f, 4711.0f, 65535.0f);
auto ff3 = cuda::cast<vector<3, float>>(f3);
EXPECT_FLOAT_EQ(f3.x, ff3.x);
EXPECT_FLOAT_EQ(f3.y, ff3.y);
EXPECT_FLOAT_EQ(f3.z, ff3.z);
// vec4 -----------------------------------------------
char4 c4 = make_char4(-128, -96, 0, 127);
auto cc4 = cuda::cast<vector<4, char>>(c4);
EXPECT_EQ(c4.x, cc4.x);
EXPECT_EQ(c4.y, cc4.y);
EXPECT_EQ(c4.z, cc4.z);
EXPECT_EQ(c4.w, cc4.w);
uchar4 uc4 = make_uchar4(0, 32, 96, 255);
auto uuc4 = cuda::cast<vector<4, unsigned char>>(uc4);
EXPECT_EQ(uc4.x, uuc4.x);
EXPECT_EQ(uc4.y, uuc4.y);
EXPECT_EQ(uc4.z, uuc4.z);
EXPECT_EQ(uc4.w, uuc4.w);
short4 s4 = make_short4(-32768, -4711, 0, 32767);
auto ss4 = cuda::cast<vector<4, short>>(s4);
EXPECT_EQ(s4.x, ss4.x);
EXPECT_EQ(s4.y, ss4.y);
EXPECT_EQ(s4.z, ss4.z);
EXPECT_EQ(s4.w, ss4.w);
ushort4 us4 = make_ushort4(0, 1024, 4711, 65535);
auto uus4 = cuda::cast<vector<4, unsigned short>>(us4);
EXPECT_EQ(us4.x, uus4.x);
EXPECT_EQ(us4.y, uus4.y);
EXPECT_EQ(us4.z, uus4.z);
EXPECT_EQ(us4.w, uus4.w);
int4 i4 = make_int4(-2147483648, -1024, 0, 2147483647);
auto ii4 = cuda::cast<vector<4, int>>(i4);
EXPECT_EQ(i4.x, ii4.x);
EXPECT_EQ(i4.y, ii4.y);
EXPECT_EQ(i4.z, ii4.z);
EXPECT_EQ(i4.w, ii4.w);
uint4 ui4 = make_uint4(0, 1024, 4711, 4294967295);
auto uui4 = cuda::cast<vector<4, unsigned int>>(ui4);
EXPECT_EQ(ui4.x, uui4.x);
EXPECT_EQ(ui4.y, uui4.y);
EXPECT_EQ(ui4.z, uui4.z);
EXPECT_EQ(ui4.w, uui4.w);
float4 f4 = make_float4(0.0f, 4711.0f, 65535.0f, 4294967295.0f);
auto ff4 = cuda::cast<vector<4, float>>(f4);
EXPECT_FLOAT_EQ(f4.x, ff4.x);
EXPECT_FLOAT_EQ(f4.y, ff4.y);
EXPECT_FLOAT_EQ(f4.z, ff4.z);
EXPECT_FLOAT_EQ(f4.w, ff4.w);
}
TEST(CastCU, VisionarayToCuda)
{
// vec2 -----------------------------------------------
vector<2, char> c2(-96, 127);
char2 cc2 = cuda::cast<char2>(c2);
EXPECT_EQ(c2.x, cc2.x);
EXPECT_EQ(c2.y, cc2.y);
vector<2, unsigned char> uc2(96, 255);
uchar2 uuc2 = cuda::cast<uchar2>(uc2);
EXPECT_EQ(uc2.x, uuc2.x);
EXPECT_EQ(uc2.y, uuc2.y);
vector<2, short> s2(-4711, 32767);
short2 ss2 = cuda::cast<short2>(s2);
EXPECT_EQ(s2.x, ss2.x);
EXPECT_EQ(s2.y, ss2.y);
vector<2, unsigned short> us2(4711, 65535);
ushort2 uus2 = cuda::cast<ushort2>(us2);
EXPECT_EQ(us2.x, uus2.x);
EXPECT_EQ(us2.y, uus2.y);
vector<2, int> i2(-1024, 2147483647);
int2 ii2 = cuda::cast<int2>(i2);
EXPECT_EQ(i2.x, ii2.x);
EXPECT_EQ(i2.y, ii2.y);
vector<2, unsigned int> ui2(1024, 4294967295);
uint2 uui2 = cuda::cast<uint2>(ui2);
EXPECT_EQ(ui2.x, uui2.x);
EXPECT_EQ(ui2.y, uui2.y);
vector<2, float> f2(0.0f, 4711.0f);
float2 ff2 = cuda::cast<float2>(f2);
EXPECT_FLOAT_EQ(f2.x, ff2.x);
EXPECT_FLOAT_EQ(f2.y, ff2.y);
// vec3 -----------------------------------------------
vector<3, char> c3(-96, 0, 127);
char3 cc3 = cuda::cast<char3>(c3);
EXPECT_EQ(c3.x, cc3.x);
EXPECT_EQ(c3.y, cc3.y);
EXPECT_EQ(c3.z, cc3.z);
vector<3, unsigned char> uc3(0, 96, 255);
uchar3 uuc3 = cuda::cast<uchar3>(uc3);
EXPECT_EQ(uc3.x, uuc3.x);
EXPECT_EQ(uc3.y, uuc3.y);
EXPECT_EQ(uc3.z, uuc3.z);
vector<3, short> s3(-4711, 0, 32767);
short3 ss3 = cuda::cast<short3>(s3);
EXPECT_EQ(s3.x, ss3.x);
EXPECT_EQ(s3.y, ss3.y);
EXPECT_EQ(s3.z, ss3.z);
vector<3, unsigned short> us3(0, 4711, 65535);
ushort3 uus3 = cuda::cast<ushort3>(us3);
EXPECT_EQ(us3.x, uus3.x);
EXPECT_EQ(us3.y, uus3.y);
EXPECT_EQ(us3.z, uus3.z);
vector<3, int> i3(-1024, 0, 2147483647);
int3 ii3 = cuda::cast<int3>(i3);
EXPECT_EQ(i3.x, ii3.x);
EXPECT_EQ(i3.y, ii3.y);
EXPECT_EQ(i3.z, ii3.z);
vector<3, unsigned int> ui3(0, 1024, 4294967295);
uint3 uui3 = cuda::cast<uint3>(ui3);
EXPECT_EQ(ui3.x, uui3.x);
EXPECT_EQ(ui3.y, uui3.y);
EXPECT_EQ(ui3.z, uui3.z);
vector<3, float> f3(0.0f, 4711.0f, 65535.0f);
float3 ff3 = cuda::cast<float3>(f3);
EXPECT_FLOAT_EQ(f3.x, ff3.x);
EXPECT_FLOAT_EQ(f3.y, ff3.y);
EXPECT_FLOAT_EQ(f3.z, ff3.z);
// vec4 -----------------------------------------------
vector<4, char> c4(-128, -96, 0, 127);
char4 cc4 = cuda::cast<char4>(c4);
EXPECT_EQ(c4.x, cc4.x);
EXPECT_EQ(c4.y, cc4.y);
EXPECT_EQ(c4.z, cc4.z);
EXPECT_EQ(c4.w, cc4.w);
vector<4, unsigned char> uc4(0, 32, 96, 255);
uchar4 uuc4 = cuda::cast<uchar4>(uc4);
EXPECT_EQ(uc4.x, uuc4.x);
EXPECT_EQ(uc4.y, uuc4.y);
EXPECT_EQ(uc4.z, uuc4.z);
EXPECT_EQ(uc4.w, uuc4.w);
vector<4, short> s4(-32768, -4711, 0, 32767);
short4 ss4 = cuda::cast<short4>(s4);
EXPECT_EQ(s4.x, ss4.x);
EXPECT_EQ(s4.y, ss4.y);
EXPECT_EQ(s4.z, ss4.z);
EXPECT_EQ(s4.w, ss4.w);
vector<4, unsigned short> us4(0, 1024, 4711, 65535);
ushort4 uus4 = cuda::cast<ushort4>(us4);
EXPECT_EQ(us4.x, uus4.x);
EXPECT_EQ(us4.y, uus4.y);
EXPECT_EQ(us4.z, uus4.z);
EXPECT_EQ(us4.w, uus4.w);
vector<4, int> i4(-2147483648, -1024, 0, 2147483647);
int4 ii4 = cuda::cast<int4>(i4);
EXPECT_EQ(i4.x, ii4.x);
EXPECT_EQ(i4.y, ii4.y);
EXPECT_EQ(i4.z, ii4.z);
EXPECT_EQ(i4.w, ii4.w);
vector<4, unsigned int> ui4(0, 1024, 4711, 4294967295);
uint4 uui4 = cuda::cast<uint4>(ui4);
EXPECT_EQ(ui4.x, uui4.x);
EXPECT_EQ(ui4.y, uui4.y);
EXPECT_EQ(ui4.z, uui4.z);
EXPECT_EQ(ui4.w, uui4.w);
vector<4, float> f4(0.0f, 4711.0f, 65535.0f, 4294967295.0f);
float4 ff4 = cuda::cast<float4>(f4);
EXPECT_FLOAT_EQ(f4.x, ff4.x);
EXPECT_FLOAT_EQ(f4.y, ff4.y);
EXPECT_FLOAT_EQ(f4.z, ff4.z);
EXPECT_FLOAT_EQ(f4.w, ff4.w);
}
TEST(CastCU, NormToCuda)
{
// vec2 -----------------------------------------------
unsigned char arr8_2[2] = { 96, 255 };
vector<2, unorm<8>> un8_2 = *reinterpret_cast<vector<2, unorm<8>>*>(arr8_2);
uchar2 uun8_2 = cuda::cast<uchar2>(un8_2);
EXPECT_EQ(arr8_2[0], uun8_2.x);
EXPECT_EQ(arr8_2[1], uun8_2.y);
unsigned short arr16_2[2] = { 4711, 65535 };
vector<2, unorm<16>> un16_2 = *reinterpret_cast<vector<2, unorm<16>>*>(arr16_2);
ushort2 uun16_2 = cuda::cast<ushort2>(un16_2);
EXPECT_EQ(arr16_2[0], uun16_2.x);
EXPECT_EQ(arr16_2[1], uun16_2.y);
unsigned int arr32_2[2] = { 1024, 4294967295 };
vector<2, unorm<32>> un32_2 = *reinterpret_cast<vector<2, unorm<32>>*>(arr32_2);
uint2 uun32_2 = cuda::cast<uint2>(un32_2);
EXPECT_EQ(arr32_2[0], uun32_2.x);
EXPECT_EQ(arr32_2[1], uun32_2.y);
// vec3 -----------------------------------------------
unsigned char arr8_3[3] = { 0, 96, 255 };
vector<3, unorm<8>> un8_3 = *reinterpret_cast<vector<3, unorm<8>>*>(arr8_3);
uchar3 uun8_3 = cuda::cast<uchar3>(un8_3);
EXPECT_EQ(arr8_3[0], uun8_3.x);
EXPECT_EQ(arr8_3[1], uun8_3.y);
EXPECT_EQ(arr8_3[2], uun8_3.z);
unsigned short arr16_3[3] = { 0, 4711, 65535 };
vector<3, unorm<16>> un16_3 = *reinterpret_cast<vector<3, unorm<16>>*>(arr16_3);
ushort3 uun16_3 = cuda::cast<ushort3>(un16_3);
EXPECT_EQ(arr16_3[0], uun16_3.x);
EXPECT_EQ(arr16_3[1], uun16_3.y);
EXPECT_EQ(arr16_3[2], uun16_3.z);
unsigned int arr32_3[3] = { 0, 1024, 4294967295 };
vector<3, unorm<32>> un32_3 = *reinterpret_cast<vector<3, unorm<32>>*>(arr32_3);
uint3 uun32_3 = cuda::cast<uint3>(un32_3);
EXPECT_EQ(arr32_3[0], uun32_3.x);
EXPECT_EQ(arr32_3[1], uun32_3.y);
EXPECT_EQ(arr32_3[2], uun32_3.z);
// vec4 -----------------------------------------------
unsigned char arr8_4[4] = { 0, 32, 96, 255 };
vector<4, unorm<8>> un8_4 = *reinterpret_cast<vector<4, unorm<8>>*>(arr8_4);
uchar4 uun8_4 = cuda::cast<uchar4>(un8_4);
EXPECT_EQ(arr8_4[0], uun8_4.x);
EXPECT_EQ(arr8_4[1], uun8_4.y);
EXPECT_EQ(arr8_4[2], uun8_4.z);
EXPECT_EQ(arr8_4[3], uun8_4.w);
unsigned short arr16_4[4] = { 0, 1024, 4711, 65535 };
vector<4, unorm<16>> un16_4 = *reinterpret_cast<vector<4, unorm<16>>*>(arr16_4);
ushort4 uun16_4 = cuda::cast<ushort4>(un16_4);
EXPECT_EQ(arr16_4[0], uun16_4.x);
EXPECT_EQ(arr16_4[1], uun16_4.y);
EXPECT_EQ(arr16_4[2], uun16_4.z);
EXPECT_EQ(arr16_4[3], uun16_4.w);
unsigned int arr32_4[4] = { 0, 1024, 4711, 4294967295 };
vector<4, unorm<32>> un32_4 = *reinterpret_cast<vector<4, unorm<32>>*>(arr32_4);
uint4 uun32_4 = cuda::cast<uint4>(un32_4);
EXPECT_EQ(arr32_4[0], uun32_4.x);
EXPECT_EQ(arr32_4[1], uun32_4.y);
EXPECT_EQ(arr32_4[2], uun32_4.z);
EXPECT_EQ(arr32_4[3], uun32_4.w);
} | the_stack |
* \file dnn/src/cuda/conv_bias/quint4x4x32_wmma/wmma_conv_integer_u4_fhxfw.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include <stdio.h>
#include "src/cuda/utils.cuh"
#include "wmma_conv_integer_u4.cuh"
#if __CUDA_ARCH__ >= 730
using namespace nvcuda;
using namespace wmma::experimental::precision;
#endif
using namespace megdnn;
using namespace cuda;
using namespace wmma_conv_integer_subbyte;
namespace wmma_conv_integer_subbyte_fhxfw {
template <
int WARPS_W_, int WARPS_OC_, int OUT_CHANNELS_PER_WARP_, int OH_PER_WARP_,
int IC_UNROLL_SIZE_>
struct BlockConfig {
static int const WARPS_W = WARPS_W_;
static int const WARPS_OC = WARPS_OC_;
static int const OUT_CHANNELS_PER_WARP = OUT_CHANNELS_PER_WARP_;
static int const OH_PER_WARP = OH_PER_WARP_;
static int const IC_UNROLL_SIZE = IC_UNROLL_SIZE_;
static int const IC_BLKS = IC_BLK * IC_UNROLL_SIZE;
static int const WARPS_PER_BLOCK = WARPS_W * WARPS_OC;
};
template <typename ConvConfig, typename BlockConfig>
struct DataCount {
static int const LANE_SIZE =
BlockConfig::WARPS_W * WMMA_M * ConvConfig::SW + ConvConfig::FW - 1;
static int const LANES_PER_SLICE =
BlockConfig::OH_PER_WARP * ConvConfig::SH + ConvConfig::FH - 1;
static int const LANES_PER_BLOCK =
LANES_PER_SLICE * IC_BLK * BlockConfig::IC_UNROLL_SIZE;
static int const LANES_PER_WARP =
(LANES_PER_BLOCK + BlockConfig::WARPS_PER_BLOCK - 1) /
BlockConfig::WARPS_PER_BLOCK;
static int const SMEM_SKEW = (BlockConfig::IC_UNROLL_SIZE % 2 == 0) * SKEW;
static int const SMEM_DATA_COL =
(IC_BLK * BlockConfig::IC_UNROLL_SIZE * 8 + SMEM_SKEW) / 2;
static int const SMEM_DATA_STRIDE = SMEM_DATA_COL * 2;
static int const SMEM_DATA_ROW = LANE_SIZE * LANES_PER_SLICE;
};
template <typename ConvConfig, typename BlockConfig>
struct FilterCount {
static int const OUT_CHANNELS_PER_BLOCK =
WMMA_M * BlockConfig::WARPS_OC * BlockConfig::OUT_CHANNELS_PER_WARP;
static int const SMEM_FILTER_ROW = OUT_CHANNELS_PER_BLOCK;
static int const SMEM_SKEW =
((ConvConfig::FH * ConvConfig::FW * BlockConfig::IC_UNROLL_SIZE) % 2 == 0) *
SKEW;
static int const SMEM_FILTER_COL =
(BlockConfig::IC_BLKS * ConvConfig::FH * ConvConfig::FW * 8 + SMEM_SKEW) /
2;
static int const SMEM_FILTER_STRIDE = SMEM_FILTER_COL * 2;
static int const REG_FILTER_ROW =
(SMEM_FILTER_ROW + BlockConfig::WARPS_PER_BLOCK - 1) /
BlockConfig::WARPS_PER_BLOCK;
static int const REG_FILTER_COL =
(BlockConfig::IC_BLKS * ConvConfig::FH * ConvConfig::FW + WARP_SIZE - 1) /
WARP_SIZE;
};
#if __CUDA_ARCH__ >= 730
template <typename ConvConfig_, typename BlockConfig_>
struct ConvDataGlobal2ShareMemVisitor {
typedef int32_t copy_t;
uint8_t* smem;
const uint8_t* g_ptr;
int ci_stride, hi_stride;
int b_ih, b_iw;
int IH, IW;
copy_t zero;
const int warp_x = threadIdx.x / WARP_SIZE;
const int warp_y = threadIdx.y;
const int tid_in_warp = threadIdx.x % WARP_SIZE;
const int warp_id = (warp_y * BlockConfig_::WARPS_W + warp_x);
copy_t reg_cache[DataCount<ConvConfig_, BlockConfig_>::LANES_PER_WARP];
__device__ ConvDataGlobal2ShareMemVisitor(
uint8_t* smem, const uint8_t* g_ptr, int IH, int IW, int b_ih, int b_iw,
copy_t zero)
: smem{smem},
g_ptr{g_ptr},
b_ih{b_ih},
b_iw{b_iw},
IH{IH},
IW{IW},
zero{zero} {
ci_stride = 8 * IH * IW;
hi_stride = 8 * IW;
}
// not perfectly
__device__ __forceinline__ void copy() {
int col = (tid_in_warp << 3);
// read input from global memory without boundary check
#pragma unroll
for (int i = 0; i < DataCount<ConvConfig_, BlockConfig_>::LANES_PER_WARP; ++i) {
int row = i * BlockConfig_::WARPS_PER_BLOCK + warp_id;
int ci_idx = row / DataCount<ConvConfig_, BlockConfig_>::LANES_PER_SLICE;
int hi_idx = row -
ci_idx * DataCount<ConvConfig_, BlockConfig_>::LANES_PER_SLICE;
bool bounds = ((b_iw + tid_in_warp) >= 0) && ((b_iw + tid_in_warp) < IW) &&
((b_ih + hi_idx) >= 0) && ((b_ih + hi_idx) < IH);
if (bounds) {
copy_t val = *(copy_t*)(&g_ptr
[(ci_idx * ci_stride +
hi_idx * hi_stride + col) /
2]);
reg_cache[i] = val;
} else {
reg_cache[i] = zero;
}
}
}
__device__ __forceinline__ void commit() {
#pragma unroll
for (int i = 0; i < DataCount<ConvConfig_, BlockConfig_>::LANES_PER_WARP; ++i) {
if (tid_in_warp < DataCount<ConvConfig_, BlockConfig_>::LANE_SIZE) {
int row = i * BlockConfig_::WARPS_PER_BLOCK + warp_id;
int ci_idx =
row / DataCount<ConvConfig_, BlockConfig_>::LANES_PER_SLICE;
int hi_idx =
row -
ci_idx * DataCount<ConvConfig_, BlockConfig_>::LANES_PER_SLICE;
int y = hi_idx * DataCount<ConvConfig_, BlockConfig_>::LANE_SIZE +
tid_in_warp;
int x = ci_idx * 8;
*(copy_t*)(get_smem_ptr(y, x)) = reg_cache[i];
}
}
}
__device__ __forceinline__ uint8_t* get_smem_ptr(int y, int x) {
return &smem
[(y * DataCount<ConvConfig_, BlockConfig_>::SMEM_DATA_STRIDE + x) / 2];
}
__device__ __forceinline__ void inc_stage() {
g_ptr += BlockConfig_::IC_BLKS * ci_stride / 2;
}
};
template <typename ConvConfig_, typename BlockConfig_>
struct ConvFilterGlobal2ShareMemVisitor {
uint8_t* smem;
const uint8_t* g_ptr;
int co_stride, co_remain;
int idx;
const int warp_x = threadIdx.x / WARP_SIZE;
const int warp_y = threadIdx.y;
const int tid_in_warp = threadIdx.x % WARP_SIZE;
const int warp_id = (warp_y * BlockConfig_::WARPS_W + warp_x);
typedef int32_t copy_t;
copy_t reg_cache[FilterCount<ConvConfig_, BlockConfig_>::REG_FILTER_ROW]
[FilterCount<ConvConfig_, BlockConfig_>::REG_FILTER_COL];
__device__ ConvFilterGlobal2ShareMemVisitor(
uint8_t* smem, const uint8_t* g_ptr, int co_stride, int co_remain, int idx)
: smem{smem},
g_ptr{g_ptr},
co_stride{co_stride},
co_remain{co_remain},
idx{idx} {}
__device__ __forceinline__ void copy() {
int ci_remain = idx < BlockConfig_::IC_BLKS ? idx : BlockConfig_::IC_BLKS;
#pragma unroll
for (int i = 0; i < FilterCount<ConvConfig_, BlockConfig_>::REG_FILTER_ROW;
++i) {
#pragma unroll
for (int j = 0; j < FilterCount<ConvConfig_, BlockConfig_>::REG_FILTER_COL;
++j) {
int y = BlockConfig_::WARPS_PER_BLOCK * i + warp_id;
int x = WARP_SIZE * j + tid_in_warp;
bool valid =
(x < ci_remain * ConvConfig_::FH * ConvConfig_::FW) &&
(y <
FilterCount<
ConvConfig_, BlockConfig_>::OUT_CHANNELS_PER_BLOCK) &&
(y < co_remain);
if (valid) {
copy_t val = *(copy_t*)(&g_ptr[y * co_stride + x * 4]);
reg_cache[i][j] = val;
} else {
reg_cache[i][j] = 0;
}
}
}
}
__device__ __forceinline__ void commit() {
#pragma unroll
for (int i = 0; i < FilterCount<ConvConfig_, BlockConfig_>::REG_FILTER_ROW;
++i) {
#pragma unroll
for (int j = 0; j < FilterCount<ConvConfig_, BlockConfig_>::REG_FILTER_COL;
++j) {
int y = BlockConfig_::WARPS_PER_BLOCK * i + warp_id;
int x = WARP_SIZE * j + tid_in_warp;
int spatial_idx = x % (ConvConfig_::FH * ConvConfig_::FW);
int ci_blk = x / (ConvConfig_::FH * ConvConfig_::FW);
int ci_inner_blk = (ci_blk & 0x3);
int ci_outer_blk = (ci_blk >> 2);
int s_x = ci_outer_blk * IC_BLK * ConvConfig_::FH * ConvConfig_::FW +
spatial_idx * IC_BLK + ci_inner_blk;
bool bounds =
(y <
FilterCount<
ConvConfig_, BlockConfig_>::OUT_CHANNELS_PER_BLOCK) &&
(x < BlockConfig_::IC_BLKS * ConvConfig_::FH * ConvConfig_::FW);
if (bounds)
*(copy_t*)get_smem_ptr(y, s_x * 8) = reg_cache[i][j];
}
}
}
__device__ __forceinline__ uint8_t* get_smem_ptr(int y, int x) {
return &smem
[(y * FilterCount<ConvConfig_, BlockConfig_>::SMEM_FILTER_STRIDE + x) /
2];
}
__device__ __forceinline__ void inc_stage() {
idx -= BlockConfig_::IC_BLKS;
g_ptr += BlockConfig_::IC_BLKS * ConvConfig_::FH * ConvConfig_::FW * 4;
}
};
template <typename ConvConfig_, typename BlockConfig_>
__device__ inline void load_share_mem(
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, u4, wmma::col_major>
data_frag[BlockConfig_::OH_PER_WARP],
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, u4, wmma::row_major>
filter_frag[BlockConfig_::OUT_CHANNELS_PER_WARP],
ConvDataGlobal2ShareMemVisitor<ConvConfig_, BlockConfig_>&
gbl2smem_data_visitor,
ConvFilterGlobal2ShareMemVisitor<ConvConfig_, BlockConfig_>&
gbl2smem_filter_visitor,
int data_spatial_idx, int filter_spatial_idx, int ic_blk) {
const int warp_y = threadIdx.y;
uint8_t* __restrict__ s_ptr_data =
gbl2smem_data_visitor.get_smem_ptr(data_spatial_idx, ic_blk * WMMA_K);
uint8_t* __restrict__ s_ptr_filter = gbl2smem_filter_visitor.get_smem_ptr(
warp_y * WMMA_M, ic_blk * WMMA_K * ConvConfig_::FH * ConvConfig_::FW +
filter_spatial_idx * WMMA_K);
#pragma unroll
for (int i = 0; i < BlockConfig_::OH_PER_WARP; ++i) {
wmma::load_matrix_sync(
data_frag[i],
s_ptr_data +
i * DataCount<ConvConfig_, BlockConfig_>::LANE_SIZE *
DataCount<ConvConfig_, BlockConfig_>::SMEM_DATA_STRIDE /
2,
DataCount<ConvConfig_, BlockConfig_>::SMEM_DATA_STRIDE);
}
#pragma unroll
for (int j = 0; j < BlockConfig_::OUT_CHANNELS_PER_WARP; ++j) {
wmma::load_matrix_sync(
filter_frag[j],
s_ptr_filter +
j * WMMA_M * BlockConfig_::WARPS_OC *
FilterCount<
ConvConfig_, BlockConfig_>::SMEM_FILTER_STRIDE /
2,
FilterCount<ConvConfig_, BlockConfig_>::SMEM_FILTER_STRIDE);
}
}
template <size_t OUT_CHANNELS_PER_WARP, size_t OH_PER_WARP>
__device__ inline void calc(
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, u4, wmma::col_major>
data_frag[OH_PER_WARP],
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, u4, wmma::row_major>
filter_frag[OUT_CHANNELS_PER_WARP],
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int32_t>
acc_frag[OUT_CHANNELS_PER_WARP][OH_PER_WARP]) {
#pragma unroll
for (int i = 0; i < OUT_CHANNELS_PER_WARP; ++i) {
#pragma unroll
for (int j = 0; j < OH_PER_WARP; ++j) {
wmma::mma_sync(
acc_frag[i][j], filter_frag[i], data_frag[j], acc_frag[i][j]);
}
}
}
template <bool last_slice, typename ConvConfig_, typename BlockConfig_>
__device__ void consume_slice(
ConvDataGlobal2ShareMemVisitor<ConvConfig_, BlockConfig_>&
gbl2smem_data_visitor,
ConvFilterGlobal2ShareMemVisitor<ConvConfig_, BlockConfig_>&
gbl2smem_filter_visitor,
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, u4, wmma::col_major>
data_frag[2][BlockConfig_::OH_PER_WARP],
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, u4, wmma::row_major>
filter_frag[2][BlockConfig_::OUT_CHANNELS_PER_WARP],
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int32_t>
acc_frag[BlockConfig_::OUT_CHANNELS_PER_WARP]
[BlockConfig_::OH_PER_WARP]) {
if (!last_slice) {
gbl2smem_data_visitor.inc_stage();
gbl2smem_filter_visitor.inc_stage();
gbl2smem_data_visitor.copy();
gbl2smem_filter_visitor.copy();
}
int data_spatial_idx_base = threadIdx.x / WARP_SIZE * WMMA_N;
int loop_count = 0;
#pragma unroll
for (; loop_count <
BlockConfig_::IC_UNROLL_SIZE * ConvConfig_::FH * ConvConfig_::FW - 1;
loop_count++) {
calc<BlockConfig_::OUT_CHANNELS_PER_WARP, BlockConfig_::OH_PER_WARP>(
data_frag[loop_count % 2], filter_frag[loop_count % 2], acc_frag);
int filter_spatial_idx = (loop_count + 1) % (ConvConfig_::FH * ConvConfig_::FW);
int ic_blk = (loop_count + 1) / (ConvConfig_::FH * ConvConfig_::FW);
int fh = filter_spatial_idx / ConvConfig_::FW;
int fw = filter_spatial_idx % ConvConfig_::FW;
int data_spatial_idx = data_spatial_idx_base +
fh * DataCount<ConvConfig_, BlockConfig_>::LANE_SIZE +
fw;
load_share_mem<ConvConfig_, BlockConfig_>(
data_frag[(loop_count + 1) % 2], filter_frag[(loop_count + 1) % 2],
gbl2smem_data_visitor, gbl2smem_filter_visitor, data_spatial_idx,
filter_spatial_idx, ic_blk);
}
calc<BlockConfig_::OUT_CHANNELS_PER_WARP, BlockConfig_::OH_PER_WARP>(
data_frag[(loop_count % 2)], filter_frag[(loop_count % 2)], acc_frag);
if (!last_slice) {
__syncthreads();
gbl2smem_data_visitor.commit();
gbl2smem_filter_visitor.commit();
__syncthreads();
load_share_mem<ConvConfig_, BlockConfig_>(
data_frag[0], filter_frag[0], gbl2smem_data_visitor,
gbl2smem_filter_visitor, data_spatial_idx_base, 0, 0);
}
}
#if 0
template <bool last_slice, typename ConvConfig_, typename BlockConfig_>
__device__ void consume_slice_no_reg_cache(
ConvDataGlobal2ShareMemVisitor<ConvConfig_, BlockConfig_>&
gbl2smem_data_visitor,
ConvFilterGlobal2ShareMemVisitor<ConvConfig_, BlockConfig_>&
gbl2smem_filter_visitor,
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, u4,
wmma::col_major>
data_frag[BlockConfig_::OH_PER_WARP],
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, u4,
wmma::row_major>
filter_frag[BlockConfig_::OUT_CHANNELS_PER_WARP],
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int32_t>
acc_frag[BlockConfig_::OUT_CHANNELS_PER_WARP]
[BlockConfig_::OH_PER_WARP]) {
if (!last_slice) {
gbl2smem_data_visitor.inc_stage();
gbl2smem_filter_visitor.inc_stage();
gbl2smem_data_visitor.copy();
gbl2smem_filter_visitor.copy();
}
int data_spatial_idx_base = threadIdx.x / WARP_SIZE * WMMA_N;
int loop_count = 0;
#pragma unroll
for (; loop_count <
BlockConfig_::IC_UNROLL_SIZE * ConvConfig_::FH * ConvConfig_::FW;
loop_count++) {
int filter_spatial_idx =
(loop_count + 0) % (ConvConfig_::FH * ConvConfig_::FW);
int ic_blk = (loop_count + 0) / (ConvConfig_::FH * ConvConfig_::FW);
int fh = filter_spatial_idx / ConvConfig_::FW;
int fw = filter_spatial_idx % ConvConfig_::FW;
int data_spatial_idx =
data_spatial_idx_base +
fh * DataCount<ConvConfig_, BlockConfig_>::LANE_SIZE + fw;
load_share_mem<ConvConfig_, BlockConfig_>(
data_frag, filter_frag, gbl2smem_data_visitor,
gbl2smem_filter_visitor, data_spatial_idx, filter_spatial_idx,
ic_blk);
calc<BlockConfig_::OUT_CHANNELS_PER_WARP, BlockConfig_::OH_PER_WARP>(
data_frag, filter_frag, acc_frag);
}
if (!last_slice) {
__syncthreads();
gbl2smem_data_visitor.commit();
gbl2smem_filter_visitor.commit();
__syncthreads();
}
}
#endif
template <typename ConvConfig_, typename BlockConfig_>
__global__ void convolution_template_device_u4(
const uint8_t* __restrict__ data, const uint8_t* __restrict__ filter,
int32_t* __restrict__ out, int N, int IH, int IW, int OH, int OW, int PH,
int PW, int IC, int OC, int32_t zero) {
constexpr size_t IC_BLKS = BlockConfig_::IC_BLKS;
constexpr size_t OUT_CHANNELS_PER_BLOCK =
FilterCount<ConvConfig_, BlockConfig_>::OUT_CHANNELS_PER_BLOCK;
const int blocks_per_row = (OW + WMMA_N * BlockConfig_::WARPS_W - 1) /
(WMMA_N * BlockConfig_::WARPS_W);
const int bidx = blockIdx.x;
const int bidy = blockIdx.y;
const int bidz = blockIdx.z;
const int b_oh = bidx / blocks_per_row * BlockConfig_::OH_PER_WARP;
const int b_ow = bidx % blocks_per_row * (WMMA_N * BlockConfig_::WARPS_W);
const int warp_x = threadIdx.x / WARP_SIZE;
const int warp_y = threadIdx.y;
const int oc_start = bidy * OUT_CHANNELS_PER_BLOCK + warp_y * WMMA_M;
const int ow_start = b_ow + warp_x * WMMA_N;
const int b_ih = b_oh * ConvConfig_::SH - PH;
const int b_iw = b_ow * ConvConfig_::SW - PW;
const uint8_t* __restrict__ g_ptr_data =
data + bidz * IC * IH * IW / 2 + (b_ih * IW + b_iw) * 8 / 2;
const uint8_t* __restrict__ g_ptr_filter =
filter +
bidy * OUT_CHANNELS_PER_BLOCK * ConvConfig_::FH * ConvConfig_::FW * IC / 2;
const int co_remain = OC - bidy * OUT_CHANNELS_PER_BLOCK;
int32_t* __restrict__ g_ptr_out = out + bidz * OC * OH * OW + oc_start * OH * OW +
(b_oh * OW + ow_start) * WMMA_M;
const int icb = IC / 8;
__shared__ uint8_t smem_data[DataCount<ConvConfig_, BlockConfig_>::SMEM_DATA_ROW]
[DataCount<ConvConfig_, BlockConfig_>::SMEM_DATA_COL];
__shared__ uint8_t
smem_filter[FilterCount<ConvConfig_, BlockConfig_>::SMEM_FILTER_ROW]
[FilterCount<ConvConfig_, BlockConfig_>::SMEM_FILTER_COL];
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int32_t>
acc_frag[BlockConfig_::OUT_CHANNELS_PER_WARP][BlockConfig_::OH_PER_WARP];
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, u4, wmma::col_major>
data_frag[2][BlockConfig_::OH_PER_WARP];
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, u4, wmma::row_major>
filter_frag[2][BlockConfig_::OUT_CHANNELS_PER_WARP];
ConvDataGlobal2ShareMemVisitor<ConvConfig_, BlockConfig_> gbl2smem_data_visitor{
smem_data[0], g_ptr_data, IH, IW, b_ih, b_iw, zero};
ConvFilterGlobal2ShareMemVisitor<ConvConfig_, BlockConfig_> gbl2smem_filter_visitor{
smem_filter[0], g_ptr_filter, IC / 2 * ConvConfig_::FH * ConvConfig_::FW,
co_remain, icb};
#pragma unroll
for (int i = 0; i < BlockConfig_::OUT_CHANNELS_PER_WARP; ++i) {
#pragma unroll
for (int j = 0; j < BlockConfig_::OH_PER_WARP; ++j) {
wmma::fill_fragment(acc_frag[i][j], 0);
}
}
gbl2smem_data_visitor.copy();
gbl2smem_filter_visitor.copy();
gbl2smem_data_visitor.commit();
gbl2smem_filter_visitor.commit();
__syncthreads();
load_share_mem<ConvConfig_, BlockConfig_>(
data_frag[0], filter_frag[0], gbl2smem_data_visitor,
gbl2smem_filter_visitor, warp_x * WMMA_N, 0, 0);
int ic_blocks = (icb + IC_BLKS - 1) / IC_BLKS - 1;
#pragma unroll
for (int ci_blk = 0; ci_blk < ic_blocks; ci_blk++) {
consume_slice<false, ConvConfig_, BlockConfig_>(
gbl2smem_data_visitor, gbl2smem_filter_visitor, data_frag, filter_frag,
acc_frag);
}
consume_slice<true, ConvConfig_, BlockConfig_>(
gbl2smem_data_visitor, gbl2smem_filter_visitor, data_frag, filter_frag,
acc_frag);
// store
#pragma unroll
for (int i = 0; i < BlockConfig_::OUT_CHANNELS_PER_WARP; ++i) {
#pragma unroll
for (int j = 0; j < BlockConfig_::OH_PER_WARP; ++j) {
if (b_oh + j < OH && oc_start + i * BlockConfig_::WARPS_OC * WMMA_M < OC &&
ow_start < OW) {
wmma::store_matrix_sync(
&g_ptr_out
[i * BlockConfig_::WARPS_OC * WMMA_M * OH * OW +
j * OW * WMMA_M],
acc_frag[i][j], WMMA_M, wmma::mem_col_major);
}
}
}
}
#else
template <typename ConvConfig_, typename BlockConfig_>
__global__ void convolution_template_device_u4(
const uint8_t* __restrict__ /* data */,
const uint8_t* __restrict__ /* filter */, int32_t* __restrict__ /* out */,
int /* N */, int /* IH */, int /* IW */, int /* OH */, int /* OW */,
int /* PH */, int /* PW */, int /* IC */, int /* OC */, int32_t /* zero */) {}
#endif
} // namespace wmma_conv_integer_subbyte_fhxfw
using namespace wmma_conv_integer_subbyte_fhxfw;
void megdnn::cuda::wmma_conv_integer_subbyte::_do_wmma_conv_integer_subbyte_fhxfw(
const uint8_t* d_data, const uint8_t* d_filter, int32_t* d_out, int batch_size,
int hi, int wi, int ho, int wo, int ph, int pw, int ci, int co, int fh, int fw,
int sh, int sw, uint8_t zp_data, cudaStream_t stream) {
cuda_check(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared));
cuda_check(cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte));
zp_data = (zp_data << 4) | zp_data;
int32_t zero = (zp_data << 24) | (zp_data << 16) | (zp_data << 8) | zp_data;
if (fh == 3 && fw == 3 && sh == 1 && sw == 1) {
constexpr size_t warps_w = 2;
constexpr size_t warps_oc = 4;
constexpr size_t out_channels_per_warp = 2;
constexpr size_t oh_per_warp = 8;
constexpr size_t ic_unroll_size = 2;
dim3 gridDim;
dim3 blockDim;
int blocks_per_row = (wo + WMMA_N * warps_w - 1) / (WMMA_N * warps_w);
int blocks_per_col = (ho + oh_per_warp - 1) / (oh_per_warp);
int blocks_per_out_channel =
(co + WMMA_M * warps_oc * out_channels_per_warp - 1) /
(WMMA_M * warps_oc * out_channels_per_warp);
blockDim.x = WARP_SIZE * warps_w;
blockDim.y = warps_oc;
blockDim.z = 1;
gridDim.x = blocks_per_row * blocks_per_col;
gridDim.y = blocks_per_out_channel;
gridDim.z = batch_size;
convolution_template_device_u4<
ConvConfig<3, 3, 1, 1>,
BlockConfig<
warps_w, warps_oc, out_channels_per_warp, oh_per_warp,
ic_unroll_size>><<<gridDim, blockDim, 0, stream>>>(
d_data, d_filter, d_out, batch_size, hi, wi, ho, wo, ph, pw, ci, co,
zero);
} else if (fh == 5 && fw == 5 && sh == 1 && sw == 1) {
constexpr size_t warps_w = 2;
constexpr size_t warps_oc = 4;
constexpr size_t out_channels_per_warp = 2;
constexpr size_t oh_per_warp = 8;
constexpr size_t ic_unroll_size = 1;
dim3 gridDim;
dim3 blockDim;
int blocks_per_row = (wo + WMMA_N * warps_w - 1) / (WMMA_N * warps_w);
int blocks_per_col = (ho + oh_per_warp - 1) / (oh_per_warp);
int blocks_per_out_channel =
(co + WMMA_M * warps_oc * out_channels_per_warp - 1) /
(WMMA_M * warps_oc * out_channels_per_warp);
blockDim.x = WARP_SIZE * warps_w;
blockDim.y = warps_oc;
blockDim.z = 1;
gridDim.x = blocks_per_row * blocks_per_col;
gridDim.y = blocks_per_out_channel;
gridDim.z = batch_size;
convolution_template_device_u4<
ConvConfig<5, 5, 1, 1>,
BlockConfig<
warps_w, warps_oc, out_channels_per_warp, oh_per_warp,
ic_unroll_size>><<<gridDim, blockDim, 0, stream>>>(
d_data, d_filter, d_out, batch_size, hi, wi, ho, wo, ph, pw, ci, co,
zero);
} else if (fh == 7 && fw == 7 && sh == 1 && sw == 1) {
constexpr size_t warps_w = 2;
constexpr size_t warps_oc = 2;
constexpr size_t out_channels_per_warp = 2;
constexpr size_t oh_per_warp = 4;
constexpr size_t ic_unroll_size = 1;
dim3 gridDim;
dim3 blockDim;
int blocks_per_row = (wo + WMMA_N * warps_w - 1) / (WMMA_N * warps_w);
int blocks_per_col = (ho + oh_per_warp - 1) / (oh_per_warp);
int blocks_per_out_channel =
(co + WMMA_M * warps_oc * out_channels_per_warp - 1) /
(WMMA_M * warps_oc * out_channels_per_warp);
blockDim.x = WARP_SIZE * warps_w;
blockDim.y = warps_oc;
blockDim.z = 1;
gridDim.x = blocks_per_row * blocks_per_col;
gridDim.y = blocks_per_out_channel;
gridDim.z = batch_size;
convolution_template_device_u4<
ConvConfig<7, 7, 1, 1>,
BlockConfig<
warps_w, warps_oc, out_channels_per_warp, oh_per_warp,
ic_unroll_size>><<<gridDim, blockDim, 0, stream>>>(
d_data, d_filter, d_out, batch_size, hi, wi, ho, wo, ph, pw, ci, co,
zero);
}
after_kernel_launch();
}
// vim: syntax=cpp.doxygen | the_stack |
#include "cudnnUtils.h"
#include <ops/declarable/helpers/convolutions.h>
namespace sd {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
void checkConv2dCUDNNPadAsymmetric(NDArray* &input, NDArray* &gradI,
const int iH, const int iW,
const int oH, const int oW,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW) {
const auto pHsum = ((oH - 1) * sH + ((kH - 1) * dH + 1) - iH);
const auto pWsum = ((oW - 1) * sW + ((kW - 1) * dW + 1) - iW);
const bool isPHasymm = pH != (pHsum - pH);
const bool isPWasymm = pW != (pWsum - pW);
if(!isPHasymm && !isPWasymm)
return;
std::vector<Nd4jLong> newShape = input->getShapeAsVector();
const int iHposition = isNCHW ? 2 : 1;
if(isPHasymm)
newShape[iHposition] += 1;
if(isPWasymm)
newShape[iHposition + 1] += 1;
NDArray* newInput = new NDArray(input->ordering(), newShape, input->dataType(), input->getContext());
if(isNCHW)
(*newInput)({0,0, 0,0, 0,input->sizeAt(2), 0,input->sizeAt(3)}).assign(input);
else
(*newInput)({0,0, 0,input->sizeAt(1), 0,input->sizeAt(2), 0,0}).assign(input);
input = newInput;
if(gradI != nullptr)
gradI = new NDArray(gradI->ordering(), newShape, gradI->dataType(), gradI->getContext());
}
//////////////////////////////////////////////////////////////////////////
void checkConv3dCUDNNPadAsymmetric(NDArray* &input, NDArray* &gradI,
const int iD, const int iH, const int iW,
const int oD, const int oH, const int oW,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW) {
const auto pDsum = ((oD - 1) * sD + ((kD - 1) * dD + 1) - iD);
const auto pHsum = ((oH - 1) * sH + ((kH - 1) * dH + 1) - iH);
const auto pWsum = ((oW - 1) * sW + ((kW - 1) * dW + 1) - iW);
const bool isPDasymm = pD != (pDsum - pD);
const bool isPHasymm = pH != (pHsum - pH);
const bool isPWasymm = pW != (pWsum - pW);
if(!isPDasymm && !isPHasymm && !isPWasymm)
return;
std::vector<Nd4jLong> newShape = input->getShapeAsVector();
const int iDposition = isNCDHW ? 2 : 1;
if(isPDasymm)
newShape[iDposition] += 1;
if(isPHasymm)
newShape[iDposition + 1] += 1;
if(isPWasymm)
newShape[iDposition + 2] += 1;
NDArray* newInput = new NDArray(input->ordering(), newShape, input->dataType(), input->getContext());
if(isNCDHW)
(*newInput)({0,0, 0,0, 0,input->sizeAt(2), 0,input->sizeAt(3), 0,input->sizeAt(4)}).assign(input);
else
(*newInput)({0,0, 0,input->sizeAt(1), 0,input->sizeAt(2), 0,input->sizeAt(3), 0,0}).assign(input);
input = newInput;
if(gradI != nullptr)
gradI = new NDArray(gradI->ordering(), newShape, gradI->dataType(), gradI->getContext());
}
//////////////////////////////////////////////////////////////////////////
void pooling2dCUDNN(const LaunchContext* context,
const NDArray* input, NDArray* output,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW, const cudnnPoolingMode_t mode) {
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: can't set stream for cuDNN", err);
cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW);
else
err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1 && output->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(z, format, cudnnDataType(output->dataType()), bS, oC, oH, oW);
else
err = cudnnSetTensor4dDescriptorEx(z, cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for output failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPooling2dDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, kH, kW, pH, pW, sH, sW);
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnSetPooling2dDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input});
// run calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, z, output->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dCUDNN: cudnnPoolingForward failed", err);
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling2dCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input});
}
//////////////////////////////////////////////////////////////////////////
void pooling2dBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* gradO,
NDArray* gradI,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const bool isNCHW, const cudnnPoolingMode_t mode) {
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: can't set stream for cuDNN", err);
cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input and gradI descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW);
else
err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input/gradI failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1 && gradO->ordering() == 'c')
err = cudnnSetTensor4dDescriptor(dz, format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW);
else
err = cudnnSetTensor4dDescriptorEx(dz, cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1));
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for gradO failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPooling2dDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, kH, kW, pH, pW, sH, sW);
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnSetPooling2dDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({gradI}, {input, gradO});
// run calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, gradO->specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling2dBpCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({gradI}, {input, gradO});
}
//////////////////////////////////////////////////////////////////////////
void pooling3dCUDNN(const LaunchContext* context,
const NDArray* input, NDArray* output,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW, const cudnnPoolingMode_t mode) {
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: can't set stream for cuDNN", err);
const int numDims = 5;
int bS, iC, iD, iH, iW, oC, oD, oH, oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width;
int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, 0, *input, *output, bS, iC, iD, iH, iW, oC, oD, oH, oW, indIOioC, indIOioD, indWiC, indWoC, indWkD);
const int pSizes[] = {pD, pH, pW};
const int sSizes[] = {sD, sH, sW};
const int kSizes[] = {kD, kH, kW};
const int xShape[] = {bS, iC, iD, iH, iW};
const int zShape[] = {bS, oC, oD, oH, oW};
const int xStrides[] = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3), (int)input->strideAt(4)};
const int zStrides[] = {(int)output->strideAt(0), (int)output->strideAt(1), (int)output->strideAt(2), (int)output->strideAt(3), (int)output->strideAt(4)};
cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(x, format, cudnnDataType(input->dataType()), numDims, xShape);
else
err = cudnnSetTensorNdDescriptor(x, cudnnDataType(input->dataType()), numDims, xShape, xStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1 && output->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(z, format, cudnnDataType(output->dataType()), numDims, zShape);
else
err = cudnnSetTensorNdDescriptor(z, cudnnDataType(output->dataType()), numDims, zShape, zStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for output failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPoolingNdDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, numDims - 2, kSizes, pSizes, sSizes);
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnSetPoolingNdDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input});
// run calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, z, output->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnPoolingForward failed", err);
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling3dCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input});
}
//////////////////////////////////////////////////////////////////////////
void pooling3dBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* gradO,
NDArray* gradI,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const bool isNCDHW, const cudnnPoolingMode_t mode) {
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: can't set stream for cuDNN", err);
const int numDims = 5;
int bS, iC, iD, iH, iW, oC, oD, oH, oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width;
int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, 0, *input, *gradO, bS, iC, iD, iH, iW, oC, oD, oH, oW, indIOioC, indIOioD, indWiC, indWoC, indWkD);
const int pSizes[] = {pD, pH, pW};
const int sSizes[] = {sD, sH, sW};
const int kSizes[] = {kD, kH, kW};
const int xShape[] = {bS, iC, iD, iH, iW};
const int dzShape[] = {bS, oC, oD, oH, oW};
const int xStrides[] = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3), (int)input->strideAt(4)};
const int dzStrides[] = {(int)gradO->strideAt(0), (int)gradO->strideAt(1), (int)gradO->strideAt(2), (int)gradO->strideAt(3), (int)gradO->strideAt(4)};
cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC;
// input and gradI descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1 && input->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(x, format, cudnnDataType(input->dataType()), numDims, xShape);
else
err = cudnnSetTensorNdDescriptor(x, cudnnDataType(input->dataType()), numDims, xShape, xStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input/gradI failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1 && gradO->ordering() == 'c')
err = cudnnSetTensorNdDescriptorEx(dz, format, cudnnDataType(gradO->dataType()), numDims, dzShape);
else
err = cudnnSetTensorNdDescriptor(dz, cudnnDataType(gradO->dataType()), numDims, dzShape, dzStrides);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for gradO failed", err);
// description of pooling
cudnnPoolingDescriptor_t pooling;
cudnnCreatePoolingDescriptor(&pooling);
err = cudnnSetPoolingNdDescriptor(pooling, mode, CUDNN_PROPAGATE_NAN, numDims - 2, kSizes, pSizes, sSizes);
if (err != 0) throw sd::cuda_exception::build("pooling3dBpCUDNN: cudnnSetPoolingNdDescriptor failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
// cudnn maxpool2d_bp api requires ff output as one of input arguments
if(mode == CUDNN_POOLING_MAX) {
NDArray temp(gradO);
NDArray::prepareSpecialUse({gradI}, {input, gradO, &temp});
// run ff calculation
err = cudnnPoolingForward(*handle, pooling, alpha, x, input->specialBuffer(), beta, dz, temp.specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling3dCUDNN: cudnnPoolingForward failed", err);
// run bp calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, temp.specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
NDArray::registerSpecialUse({gradI}, {input, gradO, &temp});
}
else {
NDArray::prepareSpecialUse({gradI}, {input, gradO});
// run bp calculation for gradI
err = cudnnPoolingBackward(*handle, pooling, alpha, dz, gradO->specialBuffer(), dz, gradO->specialBuffer(), x, input->specialBuffer(), beta, x, gradI->specialBuffer());
if (err != 0) throw sd::cuda_exception::build("pooling2dBpCUDNN: cudnnPoolingBackward failed", err);
NDArray::registerSpecialUse({gradI}, {input, gradO});
}
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("pooling3dBpCUDNN: cudaStreamSynchronize failed !", cudaErr);
}
}
}
} | the_stack |
* \brief This file wraps CUFFT functionality into the Bifrost C++ API.
*/
/*
TODO: Implicitly padded/cropped transforms using load callback
*/
#include <bifrost/fft.h>
#include "assert.hpp"
#include "utils.hpp"
#include "cuda.hpp"
#include "trace.hpp"
#include "fft_kernels.h"
#include "ShapeIndexer.cuh"
#include "ArrayIndexer.cuh"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/system/cuda/experimental/pinned_allocator.h>
#include <cufft.h>
#include <cufftXt.h>
class BFfft_impl {
cufftHandle _handle;
bool _real_in;
bool _real_out;
int _nbit;
BFdtype _itype;
BFdtype _otype;
int _batch_shape[BF_MAX_DIMS];
size_t _workspace_size;
std::vector<int> _axes;
bool _do_fftshift;
bool _using_load_callback;
thrust::device_vector<char> _dv_tmp_storage;
thrust::device_vector<CallbackData> _dv_callback_data;
typedef thrust::cuda::experimental::pinned_allocator<CallbackData> pinned_allocator_type;
thrust::host_vector<CallbackData, pinned_allocator_type> _hv_callback_data;
BFstatus execute_impl(BFarray const* in,
BFarray const* out,
BFbool inverse,
void* tmp_storage,
size_t tmp_storage_size);
// No copy-assign
BFfft_impl(BFfft_impl const& );
BFfft_impl& operator=(BFfft_impl const& );
public:
BFfft_impl();
~BFfft_impl();
BFstatus init(BFarray const* in,
BFarray const* out,
int rank,
int const* axes,
bool do_fftshift,
size_t* tmp_storage_size);
BFstatus execute(BFarray const* in,
BFarray const* out,
BFbool inverse,
void* tmp_storage,
size_t tmp_storage_size);
};
BFfft_impl::BFfft_impl() {
BF_CHECK_CUFFT_EXCEPTION( cufftCreate(&_handle) );
}
BFfft_impl::~BFfft_impl() {
cufftDestroy(_handle);
}
BFstatus BFfft_impl::init(BFarray const* in,
BFarray const* out,
int rank,
int const* axes,
bool do_fftshift,
size_t* tmp_storage_size) {
BF_TRACE();
BF_ASSERT(rank > 0 && rank <= BF_MAX_DIMS, BF_STATUS_INVALID_ARGUMENT);
BF_ASSERT(rank <= in->ndim, BF_STATUS_INVALID_ARGUMENT);
//BF_ASSERT(
// TODO: More assertions...
_real_in = !BF_DTYPE_IS_COMPLEX( in->dtype);
_real_out = !BF_DTYPE_IS_COMPLEX(out->dtype);
int mutable_axes[BF_MAX_DIMS];
for( int d=0; d<rank; ++d ) {
// Default to last 'rank' axes
mutable_axes[d] = axes ? axes[d] : in->ndim-rank+d;
// Allow negative axis numbers
if( mutable_axes[d] < 0 ) {
mutable_axes[d] += in->ndim;
}
}
axes = mutable_axes;
for( int d=0; d<in->ndim; ++d ) {
long ilength = in->shape[d];
long olength = out->shape[d];
if( (!_real_in && !_real_out) ||
d != axes[rank-1] ) {
BF_ASSERT(ilength == olength,
BF_STATUS_INVALID_SHAPE);
} else if( !_real_out ) {
// Special case for last dim of R2C transforms
BF_ASSERT(olength == ilength/2+1,
BF_STATUS_INVALID_SHAPE);
} else {
// Special case for last dim of C2R transforms
BF_ASSERT(ilength == olength/2+1,
BF_STATUS_INVALID_SHAPE);
}
// Initialize batch shape to data shape
_batch_shape[d] = _real_in ? ilength : olength;
}
// Compute transform shape and strides
#if CUDA_VERSION >= 7500
typedef long long int_array_type;
#else
typedef int int_array_type;
#endif
int_array_type shape[BF_MAX_DIMS];
int_array_type inembed[BF_MAX_DIMS];
int_array_type onembed[BF_MAX_DIMS];
for( int d=0; d<rank; ++d ) {
long ilength = in->shape[axes[d]];
long olength = out->shape[axes[d]];
shape[d] = _real_in ? ilength : olength;
if( d > 0 ) {
BF_ASSERT( in->strides[axes[d-1]] % in->strides[axes[d]] == 0,
BF_STATUS_UNSUPPORTED_STRIDE);
BF_ASSERT(out->strides[axes[d-1]] % out->strides[axes[d]] == 0,
BF_STATUS_UNSUPPORTED_STRIDE);
// Note: These implicitly span the batch dims where necessary
inembed[d] = in->strides[axes[d-1]] / in->strides[axes[d]];
onembed[d] = out->strides[axes[d-1]] / out->strides[axes[d]];
} else {
inembed[d] = in->shape[axes[d]];
onembed[d] = out->shape[axes[d]];
}
// This is not a batch dim, so exclude it from _batch_shape
_batch_shape[axes[d]] = 1;
}
int itype_nbyte = BF_DTYPE_NBYTE( in->dtype);
int otype_nbyte = BF_DTYPE_NBYTE(out->dtype);
int istride_bytes = in->strides[axes[rank-1]];
BF_ASSERT(istride_bytes % itype_nbyte == 0,
BF_STATUS_UNSUPPORTED_STRIDE);
int istride = istride_bytes / itype_nbyte;
int ostride_bytes = out->strides[axes[rank-1]];
BF_ASSERT(ostride_bytes % otype_nbyte == 0,
BF_STATUS_UNSUPPORTED_STRIDE);
int ostride = ostride_bytes / otype_nbyte;
// Use longest batch dim as cuFFT batch parameter
int batch_dim;
bool fastest_dim_is_batch_dim = axes[rank-1] != in->ndim-1;
if( (_real_in || _real_out) && fastest_dim_is_batch_dim ) {
// Set the inner dim as the kernel batch, as a WAR for CUFFT requiring
// complex-aligned memory.
batch_dim = in->ndim-1;
} else {
// Otherwise use the largest batch dim as the kernel batch for best
// performance.
batch_dim = argmax_last(_batch_shape, in->ndim);
}
long batch = _batch_shape[batch_dim];
_batch_shape[batch_dim] = 1;
long idist = in->strides[batch_dim] / itype_nbyte;
long odist = out->strides[batch_dim] / otype_nbyte;
bool fp64 = (out->dtype == BF_DTYPE_F64 ||
out->dtype == BF_DTYPE_CF64);
_nbit = fp64 ? 64 : 32;
_itype = in->dtype;
_otype = out->dtype;
cufftType type;
if( !_real_in && !_real_out ) { type = fp64 ? CUFFT_Z2Z : CUFFT_C2C; }
else if( _real_in && !_real_out ) { type = fp64 ? CUFFT_D2Z : CUFFT_R2C; }
else if( !_real_in && _real_out ) { type = fp64 ? CUFFT_Z2D : CUFFT_C2R; }
else {
BF_FAIL("Complex input and/or output",
BF_STATUS_INVALID_DTYPE);
}
BF_CHECK_CUFFT( cufftSetAutoAllocation(_handle, false) );
#if CUDA_VERSION >= 7500
BF_CHECK_CUFFT( cufftMakePlanMany64(_handle,
rank, shape,
inembed, istride, idist,
onembed, ostride, odist,
type,
batch,
&_workspace_size) );
#else
BF_CHECK_CUFFT( cufftMakePlanMany (_handle,
rank, shape,
inembed, istride, idist,
onembed, ostride, odist,
type,
batch,
&_workspace_size) );
#endif
_axes.assign(axes, axes+rank);
_do_fftshift = do_fftshift;
_dv_callback_data.resize(1);
_hv_callback_data.resize(1);
CallbackData* callback_data = thrust::raw_pointer_cast(&_dv_callback_data[0]);
BF_CHECK( set_fft_load_callback(in->dtype, _nbit, _handle, _do_fftshift,
callback_data, &_using_load_callback) );
if( tmp_storage_size ) {
*tmp_storage_size = _workspace_size;
}
return BF_STATUS_SUCCESS;
}
BFstatus BFfft_impl::execute_impl(BFarray const* in,
BFarray const* out,
BFbool inverse,
void* tmp_storage,
size_t tmp_storage_size) {
BF_ASSERT( in->dtype == _itype, BF_STATUS_INVALID_DTYPE);
BF_ASSERT(out->dtype == _otype, BF_STATUS_INVALID_DTYPE);
if( !tmp_storage ) {
BF_TRY(_dv_tmp_storage.resize(_workspace_size));
tmp_storage = thrust::raw_pointer_cast(&_dv_tmp_storage[0]);
} else {
BF_ASSERT(tmp_storage_size >= _workspace_size,
BF_STATUS_INSUFFICIENT_STORAGE);
}
BF_CHECK_CUFFT( cufftSetWorkArea(_handle, tmp_storage) );
void* idata = in->data;
void* odata = out->data;
// TODO: This sync is needed to ensure that the previous h2d copy of
// h_callback_data has finished before we overwrite it.
// We could potentially use a CUDA event as a lighter-weight
// solution.
cudaStreamSynchronize(g_cuda_stream);
CallbackData* h_callback_data = &_hv_callback_data[0];
// WAR for CUFFT insisting that pointer be aligned to sizeof(cufftComplex)
int alignment = (_nbit == 32 ?
sizeof(cufftComplex) :
sizeof(cufftDoubleComplex));
// TODO: To support f32 input that is not aligned to 8 bytes, we need to
// use a load callback. However, we don't know this during init,
// so we really need to set the callback here instead. Not sure
// how expensive it is to set the callback.
if( _using_load_callback ) {
h_callback_data->ptr_offset = (uintptr_t)idata % sizeof(cufftComplex);
*(char**)&idata -= h_callback_data->ptr_offset;
}
// Set callback data needed for applying fftshift
h_callback_data->inverse = _real_out || (!_real_in && inverse);
h_callback_data->do_fftshift = _do_fftshift;
h_callback_data->ndim = _axes.size();
for( int d=0; d<h_callback_data->ndim; ++d ) {
h_callback_data->shape[d] = in->shape[_axes[d]];
int itype_nbyte = BF_DTYPE_NBYTE(in->dtype);
h_callback_data->istrides[d] = in->strides[_axes[d]] / itype_nbyte;
h_callback_data->inembed[d] =
(_axes[d] > 0 ?
in->strides[_axes[d]-1] / in->strides[_axes[d]] :
in->shape[_axes[d]]);
}
CallbackData* d_callback_data = thrust::raw_pointer_cast(&_dv_callback_data[0]);
cudaMemcpyAsync(d_callback_data, h_callback_data, sizeof(CallbackData),
cudaMemcpyHostToDevice, g_cuda_stream);
BF_ASSERT((uintptr_t)idata % alignment == 0, BF_STATUS_UNSUPPORTED_STRIDE);
BF_ASSERT((uintptr_t)odata % alignment == 0, BF_STATUS_UNSUPPORTED_STRIDE);
if( !_real_in && !_real_out ) {
int direction = inverse ? CUFFT_INVERSE : CUFFT_FORWARD;
if( _nbit == 32 ) {
BF_CHECK_CUFFT( cufftExecC2C(_handle, (cufftComplex*)idata, (cufftComplex*)odata, direction) );
} else if( _nbit == 64 ) {
BF_CHECK_CUFFT( cufftExecZ2Z(_handle, (cufftDoubleComplex*)idata, (cufftDoubleComplex*)odata, direction) );
} else {
BF_FAIL("Supported data types", BF_STATUS_UNSUPPORTED_DTYPE);
}
} else if( _real_in && !_real_out ) {
if( _nbit == 32 ) {
BF_CHECK_CUFFT( cufftExecR2C(_handle, (cufftReal*)idata, (cufftComplex*)odata) );
} else if( _nbit == 64 ) {
BF_CHECK_CUFFT( cufftExecD2Z(_handle, (cufftDoubleReal*)idata, (cufftDoubleComplex*)odata) );
} else {
BF_FAIL("Supported data types", BF_STATUS_UNSUPPORTED_DTYPE);
}
} else if( !_real_in && _real_out ) {
if( _nbit == 32 ) {
BF_CHECK_CUFFT( cufftExecC2R(_handle, (cufftComplex*)idata, (cufftReal*)odata) );
} else if( _nbit == 64 ) {
BF_CHECK_CUFFT( cufftExecZ2D(_handle, (cufftDoubleComplex*)idata, (cufftDoubleReal*)odata) );
} else {
BF_FAIL("Supported data types", BF_STATUS_UNSUPPORTED_DTYPE);
}
} else {
BF_FAIL("Valid data types", BF_STATUS_INVALID_DTYPE);
}
return BF_STATUS_SUCCESS;
}
BFstatus BFfft_impl::execute(BFarray const* in,
BFarray const* out,
BFbool inverse,
void* tmp_storage,
size_t tmp_storage_size) {
BF_TRACE();
BF_TRACE_STREAM(g_cuda_stream);
BF_ASSERT(space_accessible_from( in->space, BF_SPACE_CUDA), BF_STATUS_UNSUPPORTED_SPACE);
BF_ASSERT(space_accessible_from(out->space, BF_SPACE_CUDA), BF_STATUS_UNSUPPORTED_SPACE);
// TODO: More assertions
cudaStream_t stream = g_cuda_stream;
// Note: It appears that all transforms from the same plan must be executed
// on the same stream to avoid race conditions (use of workspace?).
BF_CHECK_CUFFT( cufftSetStream(_handle, stream) );
ShapeIndexer<BF_MAX_DIMS> shape_indexer(_batch_shape, in->ndim);
for( long i=0; i<shape_indexer.size(); ++i ) {
auto inds = shape_indexer.at(i);
BFarray batch_in = *in;
BFarray batch_out = *out;
batch_in.data = array_get_pointer( in, inds);
batch_out.data = array_get_pointer(out, inds);
BFstatus ret = this->execute_impl(&batch_in, &batch_out,
inverse,
tmp_storage, tmp_storage_size);
if( ret != BF_STATUS_SUCCESS ) {
return ret;
}
}
return BF_STATUS_SUCCESS;
}
BFstatus bfFftCreate(BFfft* plan_ptr) {
BF_TRACE();
BF_ASSERT(plan_ptr, BF_STATUS_INVALID_POINTER);
BF_TRY_RETURN_ELSE(*plan_ptr = new BFfft_impl(),
*plan_ptr = 0);
}
BFstatus bfFftInit(BFfft plan,
BFarray const* in,
BFarray const* out,
int rank,
int const* axes,
BFbool apply_fftshift,
size_t* tmp_storage_size) {
BF_TRACE();
BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE);
BF_ASSERT(in, BF_STATUS_INVALID_POINTER);
BF_ASSERT(out, BF_STATUS_INVALID_POINTER);
return plan->init(in, out, rank, axes, apply_fftshift, tmp_storage_size);
}
// in, out = complex, complex => [i]fft
// in, out = real, complex => rfft
// in, out = complex, real => irfft
// in, out = real, real => ERROR
// tmp_storage_size If NULL, library will allocate storage automatically
BFstatus bfFftExecute(BFfft plan,
BFarray const* in,
BFarray const* out,
BFbool inverse,
void* tmp_storage,
size_t tmp_storage_size) {
BF_TRACE();
BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE);
BF_ASSERT(in, BF_STATUS_INVALID_POINTER);
BF_ASSERT(out, BF_STATUS_INVALID_POINTER);
return plan->execute(in, out, inverse, tmp_storage, tmp_storage_size);
}
BFstatus bfFftDestroy(BFfft plan) {
BF_TRACE();
BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE);
delete plan;
return BF_STATUS_SUCCESS;
} | the_stack |
enum class ScaleType
{
SINGLE_SCALE,
PER_WEIGHT_CHANNEL,
PER_ACTIVATION_CHANNEL
};
ScaleType get_scale_type(const at::Tensor& input, const at::Tensor& input_low, const at::Tensor& input_range)
{
TORCH_CHECK(input_low.dim() == input_range.dim(), "input_low and input_range have different dimensionality");
uint64_t scale_dim = input_range.dim();
for (int i = 0; i < scale_dim; i++)
{
TORCH_CHECK(input_low.size(i) == input_range.size(i), "input_low and input_range have different dimension sizes");
}
uint64_t scale_count = input_range.numel();
if (scale_dim > 0)
{
// For (NxCxHxW) input/output tensors, it is assumed that input_range is
// either (1) for single-scale quantization, or (Nx1x1x1) for
// per-channel scale weights quantization, or (1xCx1x1) for per-channel
// activation quantization
if (input_range.size(0) > 1)
{
TORCH_CHECK(input_range.size(0) == input.size(0), "Scale count and weights input channel count is different");
TORCH_CHECK(input_range.size(0) == scale_count, "Scale shape is not flat");
return ScaleType::PER_WEIGHT_CHANNEL;
}
else if (scale_dim >= 2 and input_range.size(1) > 1)
{
TORCH_CHECK(input_range.size(1) == input.size(1), "Scale count and activations channel count is different");
TORCH_CHECK(input_range.size(1) == scale_count, "Scale shape is not flat");
return ScaleType::PER_ACTIVATION_CHANNEL;
}
// For (1x1x1x1) input/output tensors, it is assumed that input_range
// should be PER_WEIGHT_CHANNEL
if (scale_count == 1)
return ScaleType::PER_WEIGHT_CHANNEL;
}
return ScaleType::SINGLE_SCALE;
}
namespace {
template <typename scalar_t>
__device__ void fakeQuantize(
scalar_t* __restrict__ output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels
) {
scalar_t s = (levels - 1) / (*input_range);
(*output) = round((min(max((*input), (*input_low)), (*input_low) + (*input_range)) - (*input_low)) * s) / s + (*input_low);
}
template <typename scalar_t>
__global__ void q_cuda_forward_kernel(
scalar_t* __restrict__ output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels,
const uint64_t size,
const uint64_t contiguous_elements_per_scale,
const uint64_t scale_count) {
const uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
// "Scales" are derived from input_low/input_range
uint64_t scale_idx = static_cast<uint64_t>(idx / contiguous_elements_per_scale) % scale_count;
fakeQuantize<scalar_t>((output + idx), (input + idx), input_low + scale_idx, input_range + scale_idx, levels);
}
}
template <typename scalar_t>
__device__ void calcGrad(
scalar_t* __restrict__ val_grad_input,
scalar_t* __restrict__ val_grad_input_low,
scalar_t* __restrict__ val_grad_input_range,
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ output,
const scalar_t range_low,
const scalar_t range_high,
const scalar_t reverted_range,
const scalar_t val_low_grad) {
*val_grad_input_range = 0;
*val_grad_input_low = 0;
*val_grad_input = 0;
if ((*input) < range_low) {
(*val_grad_input_range) = val_low_grad * (*grad_output);
(*val_grad_input_low) = (*grad_output);
} else if ((*input) > range_high) {
(*val_grad_input_range) = (*grad_output);
(*val_grad_input_low) = (*grad_output);
} else {
(*val_grad_input_range) = (*grad_output) * (((*output) - (*input)) * reverted_range);
(*val_grad_input) = (*grad_output);
}
}
template <typename scalar_t>
__global__ void q_single_scale_cuda_backward_kernel(
scalar_t* __restrict__ grad_input,
scalar_t* __restrict__ grad_input_low,
scalar_t* __restrict__ grad_input_range,
scalar_t* __restrict__ dev_tmp_range,
scalar_t* __restrict__ dev_tmp_low,
int32_t* __restrict__ dev_last_block_counter_range,
int32_t* __restrict__ dev_last_block_counter_low,
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels,
const scalar_t level_low,
const scalar_t level_high,
const size_t size) {
const uint16_t tidx = threadIdx.x;
const uint32_t bidx = blockIdx.x;
const uint64_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx;
const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x;
scalar_t sum_range = 0, sum_low = 0;
scalar_t output, val_grad_input_range, val_grad_input_low;
scalar_t alpha = level_low / level_high;
scalar_t range_low = (*input_low);
scalar_t range_high = (*input_low) + (*input_range);
scalar_t reverted_range = 1 / (*input_range);
for (size_t i = gtidx; i < size; i += grid_size) {
fakeQuantize<scalar_t>(&output, (input + i), input_low, input_range, levels);
calcGrad<scalar_t>((grad_input + i), &val_grad_input_low, &val_grad_input_range, (grad_output + i),
(input + i), &output, range_low, range_high, reverted_range, alpha);
sum_range += val_grad_input_range;
sum_low += val_grad_input_low;
}
__shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK];
__shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK];
reduce_with_shared_memory<scalar_t>(sh_grad_range, sum_range, tidx, bidx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, gridDim.x);
reduce_with_shared_memory<scalar_t>(sh_grad_low, sum_low, tidx, bidx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, gridDim.x);
}
template <typename scalar_t>
__global__ void q_scale_per_weight_channel_cuda_backward_kernel(
scalar_t* __restrict__ grad_input,
scalar_t* __restrict__ grad_input_low,
scalar_t* __restrict__ grad_input_range,
scalar_t* __restrict__ dev_tmp_range,
scalar_t* __restrict__ dev_tmp_low,
int32_t* __restrict__ dev_last_block_counter_range,
int32_t* __restrict__ dev_last_block_counter_low,
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels,
const scalar_t level_low,
const scalar_t level_high,
const size_t elements_per_scale) {
const uint16_t tidx = threadIdx.x;
const uint32_t scale_idx = blockIdx.x;
const uint32_t per_scale_block_idx = blockIdx.y;
const uint64_t per_scale_tidx = per_scale_block_idx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx;
const uint32_t total_blocks_per_scale = gridDim.y;
const uint64_t total_threads_per_scale = total_blocks_per_scale * CUDA_MAX_NUM_THREADS_PER_BLOCK;
// Applying scale data offsets
input_low += scale_idx;
input_range += scale_idx;
dev_tmp_low += scale_idx * total_blocks_per_scale;
dev_tmp_range += scale_idx * total_blocks_per_scale;
dev_last_block_counter_low += scale_idx;
dev_last_block_counter_range += scale_idx;
grad_input_low += scale_idx;
grad_input_range += scale_idx;
const size_t offset_for_scaled_quantized_elements = scale_idx * elements_per_scale;
input += offset_for_scaled_quantized_elements;
grad_input += offset_for_scaled_quantized_elements;
grad_output += offset_for_scaled_quantized_elements;
scalar_t per_thread_grad_sum_range = 0, per_thread_grad_sum_low = 0;
scalar_t output, val_grad_input_range, val_grad_input_low;
scalar_t alpha = level_low / level_high;
scalar_t range_low = (*input_low);
scalar_t range_high = (*input_low) + (*input_range);
scalar_t reverted_range = 1 / (*input_range);
for (size_t i = per_scale_tidx; i < elements_per_scale; i += total_threads_per_scale) {
fakeQuantize<scalar_t>(&output, (input + i), input_low, input_range, levels);
calcGrad<scalar_t>((grad_input + i), &val_grad_input_low, &val_grad_input_range, (grad_output + i),
(input + i), &output, range_low, range_high, reverted_range, alpha);
per_thread_grad_sum_range += val_grad_input_range;
per_thread_grad_sum_low += val_grad_input_low;
}
__shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK];
__shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK];
reduce_with_shared_memory<scalar_t>(sh_grad_range, per_thread_grad_sum_range, tidx, per_scale_block_idx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, total_blocks_per_scale);
reduce_with_shared_memory<scalar_t>(sh_grad_low, per_thread_grad_sum_low, tidx, per_scale_block_idx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, total_blocks_per_scale);
}
template <typename scalar_t>
__global__ void q_scale_per_activation_channel_cuda_backward_kernel(
scalar_t* __restrict__ grad_input,
scalar_t* __restrict__ grad_input_low,
scalar_t* __restrict__ grad_input_range,
scalar_t* __restrict__ dev_tmp_range,
scalar_t* __restrict__ dev_tmp_low,
int32_t* __restrict__ dev_last_block_counter_range,
int32_t* __restrict__ dev_last_block_counter_low,
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels,
const scalar_t level_low,
const scalar_t level_high,
const int64_t total_elements_per_scale,
const int64_t contiguous_elements_per_scale,
const int64_t scale_count,
const int64_t leading_channel_offset) {
const uint16_t tidx = threadIdx.x;
const uint32_t scale_idx = blockIdx.x;
const uint32_t per_scale_block_idx = blockIdx.y;
const uint64_t per_scale_tidx = per_scale_block_idx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx;
const uint32_t total_blocks_per_scale = gridDim.y;
const uint64_t total_threads_per_scale = total_blocks_per_scale * CUDA_MAX_NUM_THREADS_PER_BLOCK;
// Applying scale data offsets
input_low += scale_idx;
input_range += scale_idx;
dev_tmp_low += scale_idx * total_blocks_per_scale;
dev_tmp_range += scale_idx * total_blocks_per_scale;
dev_last_block_counter_low += scale_idx;
dev_last_block_counter_range += scale_idx;
grad_input_low += scale_idx;
grad_input_range += scale_idx;
scalar_t per_thread_grad_sum_range = 0, per_thread_grad_sum_low = 0;
scalar_t output, val_grad_input_range, val_grad_input_low;
scalar_t alpha = level_low / level_high;
scalar_t range_low = (*input_low);
scalar_t range_high = (*input_low) + (*input_range);
scalar_t reverted_range = 1 / (*input_range);
// The blocks of values belonging to one and the same scale here are interleaved with a period
// equal to contiguous_elements_per_scale. Will apply an offset to the beginning of the first
// block of values belonging to the current scale of the thread block, and then, in the for loop, map
// a contiguously changing loop iteration index into a value-block-skipping offset calculation pattern.
const size_t initial_offset = scale_idx * contiguous_elements_per_scale;
input += initial_offset;
grad_input += initial_offset;
grad_output += initial_offset;
for (uint64_t i = per_scale_tidx; i < total_elements_per_scale; i += total_threads_per_scale) {
size_t additional_offset = (i / contiguous_elements_per_scale) * leading_channel_offset + (i % contiguous_elements_per_scale);
fakeQuantize<scalar_t>(&output, (input + additional_offset), input_low, input_range, levels);
calcGrad<scalar_t>((grad_input + additional_offset), &val_grad_input_low, &val_grad_input_range, (grad_output + additional_offset),
(input + additional_offset), &output, range_low, range_high, reverted_range, alpha);
per_thread_grad_sum_range += val_grad_input_range;
per_thread_grad_sum_low += val_grad_input_low;
}
__shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK];
__shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK];
reduce_with_shared_memory<scalar_t>(sh_grad_range, per_thread_grad_sum_range, tidx, per_scale_block_idx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, total_blocks_per_scale);
reduce_with_shared_memory<scalar_t>(sh_grad_low, per_thread_grad_sum_low, tidx, per_scale_block_idx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, total_blocks_per_scale);
}
}
at::Tensor q_cuda_forward(
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels) {
at::DeviceGuard guard(input.device());
const auto quantized_elements_count = input.numel();
ScaleType scale_type = get_scale_type(input, input_low, input_range);
uint64_t contiguous_elements_per_scale = 0;
uint64_t scale_count = input_range.numel();
switch (scale_type)
{
case ScaleType::PER_ACTIVATION_CHANNEL:
// Scale count should be equal to 1-st input tensor dimension
contiguous_elements_per_scale = quantized_elements_count / (input.size(0) * scale_count);
break;
case ScaleType::PER_WEIGHT_CHANNEL:
// Scale count should be equal to 0-th input tensor dimension
contiguous_elements_per_scale = quantized_elements_count / scale_count;
break;
default:
contiguous_elements_per_scale = quantized_elements_count;
break;
}
auto output = at::empty_like(input);
PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_cuda_forward", ([&] {
q_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(quantized_elements_count), CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>(
output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
input_low.data_ptr<scalar_t>(),
input_range.data_ptr<scalar_t>(),
levels,
quantized_elements_count,
contiguous_elements_per_scale,
scale_count);
}));)
return output;
}
std::vector<at::Tensor> q_single_scale_cuda_backward(at::Tensor grad_output,
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels,
int level_low,
int level_high) {
at::DeviceGuard guard(input.device());
const auto size = input.numel();
auto grad_input = at::empty_like(grad_output);
auto grad_input_range = at::empty({1}, grad_output.options());
auto grad_input_low = at::empty({1}, grad_output.options());
auto grid_size = std::min(GET_BLOCKS(size), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE);
auto dev_tmp_range = at::empty({grid_size}, grad_output.options());
auto dev_tmp_low = at::empty({grid_size}, grad_output.options());
auto dev_last_block_counter_range = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt));
auto dev_last_block_counter_low = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt));
PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_single_scale_cuda_backward", ([&] {
q_single_scale_cuda_backward_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_input_low.data_ptr<scalar_t>(),
grad_input_range.data_ptr<scalar_t>(),
dev_tmp_range.data_ptr<scalar_t>(),
dev_tmp_low.data_ptr<scalar_t>(),
dev_last_block_counter_range.data_ptr<int32_t>(),
dev_last_block_counter_low.data_ptr<int32_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
input_low.data_ptr<scalar_t>(),
input_range.data_ptr<scalar_t>(),
levels,
level_low,
level_high,
size);
}));)
return {grad_input, grad_input_low, grad_input_range};
}
std::vector<at::Tensor> q_scale_per_weight_channel_cuda_backward(at::Tensor grad_output,
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels,
int level_low,
int level_high) {
at::DeviceGuard guard(input.device());
const auto scale_count = input_range.size(0);
const auto elements_per_scale = input.numel() / scale_count;
auto grad_input = at::empty_like(grad_output);
auto grad_input_low = at::empty(input_range.sizes(), grad_output.options());
auto grad_input_range = at::empty(input_range.sizes(), grad_output.options());
dim3 grid_size = get_2d_grid_size_for_per_channel(scale_count);
auto dev_tmp_range = at::zeros({grid_size.x, grid_size.y}, grad_output.options());
auto dev_tmp_low = at::zeros({grid_size.x, grid_size.y}, grad_output.options());
auto dev_last_block_counter_range = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt));
auto dev_last_block_counter_low = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt));
PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_single_scale_cuda_backward", ([&] {
q_scale_per_weight_channel_cuda_backward_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_input_low.data_ptr<scalar_t>(),
grad_input_range.data_ptr<scalar_t>(),
dev_tmp_range.data_ptr<scalar_t>(),
dev_tmp_low.data_ptr<scalar_t>(),
dev_last_block_counter_range.data_ptr<int32_t>(),
dev_last_block_counter_low.data_ptr<int32_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
input_low.data_ptr<scalar_t>(),
input_range.data_ptr<scalar_t>(),
levels,
level_low,
level_high,
elements_per_scale);
}));
)
return {grad_input, grad_input_low, grad_input_range};
}
std::vector<at::Tensor> q_scale_per_activation_channel_cuda_backward(at::Tensor grad_output,
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels,
int level_low,
int level_high) {
at::DeviceGuard guard(input.device());
const auto scale_count = input_range.size(1);
const auto total_elements_per_scale = input.numel() / scale_count;
const auto contiguous_elements_per_scale = input.numel() / (scale_count * input.size(0));
const auto leading_channel_offset = input.numel() / input.size(0);
auto grad_input = at::empty_like(grad_output);
auto grad_input_low = at::empty(input_range.sizes(), grad_output.options());
auto grad_input_range = at::empty(input_range.sizes(), grad_output.options());
dim3 grid_size = get_2d_grid_size_for_per_channel(scale_count);
auto dev_tmp_range = at::zeros({grid_size.x, grid_size.y}, grad_output.options());
auto dev_tmp_low = at::zeros({grid_size.x, grid_size.y}, grad_output.options());
auto dev_last_block_counter_range = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt));
auto dev_last_block_counter_low = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt));
PROFILE(
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_scale_per_activation_channel_cuda_backward", ([&] {
q_scale_per_activation_channel_cuda_backward_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_input_low.data_ptr<scalar_t>(),
grad_input_range.data_ptr<scalar_t>(),
dev_tmp_range.data_ptr<scalar_t>(),
dev_tmp_low.data_ptr<scalar_t>(),
dev_last_block_counter_range.data_ptr<int32_t>(),
dev_last_block_counter_low.data_ptr<int32_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
input_low.data_ptr<scalar_t>(),
input_range.data_ptr<scalar_t>(),
levels,
level_low,
level_high,
total_elements_per_scale,
contiguous_elements_per_scale,
scale_count,
leading_channel_offset);
}));
)
return {grad_input, grad_input_low, grad_input_range};
}
std::vector<at::Tensor> q_cuda_backward(
at::Tensor grad_output,
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels,
int level_low,
int level_high) {
at::DeviceGuard guard(input.device());
ScaleType scale_type = get_scale_type(input, input_low, input_range);
switch (scale_type)
{
case ScaleType::PER_ACTIVATION_CHANNEL:
return q_scale_per_activation_channel_cuda_backward(
grad_output,
input,
input_low,
input_range,
levels,
level_low,
level_high);
case ScaleType::PER_WEIGHT_CHANNEL:
return q_scale_per_weight_channel_cuda_backward(
grad_output,
input,
input_low,
input_range,
levels,
level_low,
level_high);
case ScaleType::SINGLE_SCALE:
default:
return q_single_scale_cuda_backward(
grad_output,
input,
input_low,
input_range,
levels,
level_low,
level_high);
};
} | the_stack |
Parallel reduction kernels
*/
#ifndef _REDUCE_KERNEL_H_
#define _REDUCE_KERNEL_H_
#include <cuda_runtime_api.h>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n/2 threads
- only works for power-of-2 arrays
This version adds multiple elements per thread sequentially. This reduces
the overall cost of the algorithm while keeping the work complexity O(n) and
the step complexity O(log n). (Brent's Theorem optimization)
See the CUDA SDK "reduction" sample for more information.
*/
template <unsigned int blockSize>
__device__ void reduceBlock(volatile float *sdata, float mySum,
const unsigned int tid, cg::thread_block cta) {
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
sdata[tid] = mySum;
cg::sync(tile32);
const int VEC = 32;
const int vid = tid & (VEC - 1);
float beta = mySum;
float temp;
for (int i = VEC / 2; i > 0; i >>= 1) {
if (vid < i) {
temp = sdata[tid + i];
beta += temp;
sdata[tid] = beta;
}
cg::sync(tile32);
}
cg::sync(cta);
if (cta.thread_rank() == 0) {
beta = 0;
for (int i = 0; i < blockDim.x; i += VEC) {
beta += sdata[i];
}
sdata[0] = beta;
}
cg::sync(cta);
}
template <unsigned int blockSize, bool nIsPow2>
__device__ void reduceBlocks(const float *g_idata, float *g_odata,
unsigned int n, cg::thread_block cta) {
extern __shared__ float sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockSize * 2) + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
float mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n) {
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2
// sized arrays
if (nIsPow2 || i + blockSize < n) mySum += g_idata[i + blockSize];
i += gridSize;
}
// do reduction in shared mem
reduceBlock<blockSize>(sdata, mySum, tid, cta);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <unsigned int blockSize, bool nIsPow2>
__global__ void reduceMultiPass(const float *g_idata, float *g_odata,
unsigned int n) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n, cta);
}
// Global variable used by reduceSinglePass to count how many blocks have
// finished
__device__ unsigned int retirementCount = 0;
cudaError_t setRetirementCount(int retCnt) {
return cudaMemcpyToSymbol(retirementCount, &retCnt, sizeof(unsigned int), 0,
cudaMemcpyHostToDevice);
}
// This reduction kernel reduces an arbitrary size array in a single kernel
// invocation It does so by keeping track of how many blocks have finished.
// After each thread block completes the reduction of its own block of data, it
// "takes a ticket" by atomically incrementing a global counter. If the ticket
// value is equal to the number of thread blocks, then the block holding the
// ticket knows that it is the last block to finish. This last block is
// responsible for summing the results of all the other blocks.
//
// In order for this to work, we must be sure that before a block takes a
// ticket, all of its memory transactions have completed. This is what
// __threadfence() does -- it blocks until the results of all outstanding memory
// transactions within the calling thread are visible to all other threads.
//
// For more details on the reduction algorithm (notably the multi-pass
// approach), see the "reduction" sample in the CUDA SDK.
template <unsigned int blockSize, bool nIsPow2>
__global__ void reduceSinglePass(const float *g_idata, float *g_odata,
unsigned int n) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
//
// PHASE 1: Process all inputs assigned to this block
//
reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n, cta);
//
// PHASE 2: Last block finished will process all partial sums
//
if (gridDim.x > 1) {
const unsigned int tid = threadIdx.x;
__shared__ bool amLast;
extern float __shared__ smem[];
// wait until all outstanding memory instructions in this thread are
// finished
__threadfence();
// Thread 0 takes a ticket
if (tid == 0) {
unsigned int ticket = atomicInc(&retirementCount, gridDim.x);
// If the ticket ID is equal to the number of blocks, we are the last
// block!
amLast = (ticket == gridDim.x - 1);
}
cg::sync(cta);
// The last block sums the results of all other blocks
if (amLast) {
int i = tid;
float mySum = 0;
while (i < gridDim.x) {
mySum += g_odata[i];
i += blockSize;
}
reduceBlock<blockSize>(smem, mySum, tid, cta);
if (tid == 0) {
g_odata[0] = smem[0];
// reset retirement count so that next run succeeds
retirementCount = 0;
}
}
}
}
bool isPow2(unsigned int x) { return ((x & (x - 1)) == 0); }
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
extern "C" void reduce(int size, int threads, int blocks, float *d_idata,
float *d_odata) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize =
(threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
// choose which of the optimized versions of reduction to launch
if (isPow2(size)) {
switch (threads) {
case 512:
reduceMultiPass<512, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 256:
reduceMultiPass<256, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 128:
reduceMultiPass<128, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 64:
reduceMultiPass<64, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 32:
reduceMultiPass<32, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 16:
reduceMultiPass<16, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 8:
reduceMultiPass<8, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 4:
reduceMultiPass<4, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 2:
reduceMultiPass<2, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 1:
reduceMultiPass<1, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
}
} else {
switch (threads) {
case 512:
reduceMultiPass<512, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 256:
reduceMultiPass<256, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 128:
reduceMultiPass<128, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 64:
reduceMultiPass<64, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 32:
reduceMultiPass<32, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 16:
reduceMultiPass<16, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 8:
reduceMultiPass<8, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 4:
reduceMultiPass<4, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 2:
reduceMultiPass<2, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 1:
reduceMultiPass<1, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
}
}
}
extern "C" void reduceSinglePass(int size, int threads, int blocks,
float *d_idata, float *d_odata) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = threads * sizeof(float);
// choose which of the optimized versions of reduction to launch
if (isPow2(size)) {
switch (threads) {
case 512:
reduceSinglePass<512, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 256:
reduceSinglePass<256, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 128:
reduceSinglePass<128, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 64:
reduceSinglePass<64, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 32:
reduceSinglePass<32, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 16:
reduceSinglePass<16, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 8:
reduceSinglePass<8, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 4:
reduceSinglePass<4, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 2:
reduceSinglePass<2, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 1:
reduceSinglePass<1, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
}
} else {
switch (threads) {
case 512:
reduceSinglePass<512, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 256:
reduceSinglePass<256, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 128:
reduceSinglePass<128, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 64:
reduceSinglePass<64, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 32:
reduceSinglePass<32, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 16:
reduceSinglePass<16, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 8:
reduceSinglePass<8, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 4:
reduceSinglePass<4, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 2:
reduceSinglePass<2, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 1:
reduceSinglePass<1, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
}
}
}
#endif // #ifndef _REDUCE_KERNEL_H_ | the_stack |
* @author Istvan Reguly
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#ifdef KNL
#include <hbwmalloc.h>
#else
#define hbw_malloc malloc
#define hbw_free free
#endif
struct full_data
{
int sizex;
int sizey;
int Nmats;
double * __restrict__ rho;
double * __restrict__ rho_mat_ave;
double * __restrict__ p;
double * __restrict__ Vf;
double * __restrict__ t;
double * __restrict__ V;
double * __restrict__ x;
double * __restrict__ y;
double * __restrict__ n;
double * __restrict__ rho_ave;
};
struct compact_data
{
int sizex;
int sizey;
int Nmats;
double * __restrict__ rho_compact;
double * __restrict__ rho_compact_list;
double * __restrict__ rho_mat_ave_compact;
double * __restrict__ rho_mat_ave_compact_list;
double * __restrict__ p_compact;
double * __restrict__ p_compact_list;
double * __restrict__ Vf_compact_list;
double * __restrict__ t_compact;
double * __restrict__ t_compact_list;
double * __restrict__ V;
double * __restrict__ x;
double * __restrict__ y;
double * __restrict__ n;
double * __restrict__ rho_ave_compact;
int * __restrict__ imaterial;
int * __restrict__ matids;
int * __restrict__ nextfrac;
int * __restrict__ mmc_index;
int * __restrict__ mmc_i;
int * __restrict__ mmc_j;
int mm_len;
int mmc_cells;
};
extern void full_matrix_cell_centric(full_data cc);
extern void full_matrix_material_centric(full_data cc, full_data mc);
extern bool full_matrix_check_results(full_data cc, full_data mc);
extern void compact_cell_centric(full_data cc, compact_data ccc, double &a1, double &a2, double &a3, int argc, char** argv);
extern bool compact_check_results(full_data cc, compact_data ccc);
void initialise_field_rand(full_data cc, double prob2, double prob3, double prob4) {
//let's use a morton space filling curve here
srand(0);
double prob1 = 1.0-prob2-prob3-prob4;
#ifdef DEBUG
printf("Random layout %g %g %g %g\n", prob1, prob2, prob3, prob4);
#endif
for (int n = 0; n < cc.sizex*cc.sizey; n++) {
int i = n%cc.sizex;//n & 0xAAAA;
int j = n/cc.sizex;//n & 0x5555;
double r = (double)rand()/(double)RAND_MAX;
int m = (double)rand()/(double)RAND_MAX * cc.Nmats/4 + (cc.Nmats/4)*(n/(cc.sizex*cc.sizey/4));
int m2, m3, m4;
cc.rho[(i+cc.sizex*j)*cc.Nmats+m] = 1.0;
cc.t[(i+cc.sizex*j)*cc.Nmats+m] = 1.0;
cc.p[(i+cc.sizex*j)*cc.Nmats+m] = 1.0;
if (r >= prob1) {
m2 = (double)rand()/(double)RAND_MAX * cc.Nmats/4 + (cc.Nmats/4)*(n/(cc.sizex*cc.sizey/4));
while (m2 == m)
m2 = (double)rand()/(double)RAND_MAX * cc.Nmats/4 + (cc.Nmats/4)*(n/(cc.sizex*cc.sizey/4));
cc.rho[(i+cc.sizex*j)*cc.Nmats+m2] = 1.0;
cc.t[(i+cc.sizex*j)*cc.Nmats+m2] = 1.0;
cc.p[(i+cc.sizex*j)*cc.Nmats+m2] = 1.0;
}
if (r >= 1.0-prob4-prob3) {
m3 = (double)rand()/(double)RAND_MAX * cc.Nmats/4 + (cc.Nmats/4)*(n/(cc.sizex*cc.sizey/4));
while (m3 == m && m3 == m2)
m3 = (double)rand()/(double)RAND_MAX * cc.Nmats/4 + (cc.Nmats/4)*(n/(cc.sizex*cc.sizey/4));
cc.rho[(i+cc.sizex*j)*cc.Nmats+m3] = 1.0;
cc.t[(i+cc.sizex*j)*cc.Nmats+m3] = 1.0;
cc.p[(i+cc.sizex*j)*cc.Nmats+m3] = 1.0;
}
if (r >= 1.0-prob4) {
m4 = (double)rand()/(double)RAND_MAX * cc.Nmats/4 + (cc.Nmats/4)*(n/(cc.sizex*cc.sizey/4));
while (m4 == m && m4 == m2 && m4 == m3)
m4 = (double)rand()/(double)RAND_MAX * cc.Nmats/4 + (cc.Nmats/4)*(n/(cc.sizex*cc.sizey/4));
cc.rho[(i+cc.sizex*j)*cc.Nmats+m4] = 1.0;
cc.t[(i+cc.sizex*j)*cc.Nmats+m4] = 1.0;
cc.p[(i+cc.sizex*j)*cc.Nmats+m4] = 1.0;
}
}
}
void initialise_field_static(full_data cc) {
//Pure cells and simple overlaps
int sizex = cc.sizex;
int sizey = cc.sizey;
int Nmats = cc.Nmats;
int width = sizex/Nmats;
int overlap_i = std::max(0.0,ceil((double)sizey/1000.0)-1);
int overlap_j = std::max(0.0,floor((double)sizex/1000.0)-1);
//Top
for (int mat = 0; mat < cc.Nmats/2; mat++) {
#pragma omp parallel for
for (int j = mat*width; j < sizey/2+overlap_j; j++) {
for (int i = mat*width-(mat>0)-(mat>0)*overlap_i; i < (mat+1)*width; i++) { //+1 for overlap
cc.rho[(i+sizex*j)*cc.Nmats+mat] = 1.0;
cc.t[(i+sizex*j)*cc.Nmats+mat] = 1.0;
cc.p[(i+sizex*j)*cc.Nmats+mat] = 1.0;
}
for (int i = sizex-mat*width-1+(mat>0)*overlap_i; i >= sizex-(mat+1)*width-1; i--) { //+1 for overlap
cc.rho[(i+sizex*j)*cc.Nmats+mat] = 1.0;
cc.t[(i+sizex*j)*cc.Nmats+mat] = 1.0;
cc.p[(i+sizex*j)*cc.Nmats+mat] = 1.0;
}
}
#pragma omp parallel for
for (int j = mat*width-(mat>0)-(mat>0)*overlap_j; j < (mat+1)*width; j++) { //+1 for overlap
for (int i = mat*width-(mat>0)-(mat>0)*overlap_i; i < sizex-mat*width; i++) {
cc.rho[(i+sizex*j)*cc.Nmats+mat] = 1.0;
cc.t[(i+sizex*j)*cc.Nmats+mat] = 1.0;
cc.p[(i+sizex*j)*cc.Nmats+mat] = 1.0;
}
}
}
//Bottom
for (int mat = 0; mat < cc.Nmats/2; mat++) {
#pragma omp parallel for
for (int j = sizey/2-1-overlap_j; j < sizey-mat*width; j++) {
for (int i = mat*width-(mat>0)-(mat>0)*overlap_i; i < (mat+1)*width; i++) { //+1 for overlap
cc.rho[(i+sizex*j)*cc.Nmats+mat+cc.Nmats/2] = 1.0;
cc.t[(i+sizex*j)*cc.Nmats+mat+cc.Nmats/2] = 1.0;
cc.p[(i+sizex*j)*cc.Nmats+mat+cc.Nmats/2] = 1.0;
}
for (int i = sizex-mat*width-1+(mat>0)*overlap_i; i >= sizex-(mat+1)*width-1; i--) { //+1 for overlap
cc.rho[(i+sizex*j)*cc.Nmats+mat+cc.Nmats/2] = 1.0;
cc.t[(i+sizex*j)*cc.Nmats+mat+cc.Nmats/2] = 1.0;
cc.p[(i+sizex*j)*cc.Nmats+mat+cc.Nmats/2] = 1.0;
}
}
#pragma omp parallel for
for (int j = sizey-mat*width-1+(mat>0)*overlap_j; j >= sizey-(mat+1)*width-(mat<(cc.Nmats/2-1)); j--) { //+1 for overlap
for (int i = mat*width; i < sizex-mat*width; i++) {
cc.rho[(i+sizex*j)*cc.Nmats+mat+cc.Nmats/2] = 1.0;
cc.t[(i+sizex*j)*cc.Nmats+mat+cc.Nmats/2] = 1.0;
cc.p[(i+sizex*j)*cc.Nmats+mat+cc.Nmats/2] = 1.0;
}
}
}
//Fill in corners
#pragma omp parallel for
for (int mat = 1; mat < cc.Nmats/2; mat++) {
for (int j = sizey/2-3; j < sizey/2-1;j++)
for (int i = 2; i < 5+overlap_i; i++) {
//x neighbour material
cc.rho[(mat*width+i-2+sizex*j)*cc.Nmats+mat-1] = 1.0;cc.t[(mat*width+i-2+sizex*j)*cc.Nmats+mat-1] = 1.0;cc.p[(mat*width+i-2+sizex*j)*cc.Nmats+mat-1] = 1.0;
cc.rho[(mat*width-i+sizex*j)*cc.Nmats+mat] = 1.0;cc.t[(mat*width-i+sizex*j)*cc.Nmats+mat] = 1.0;cc.p[(mat*width-i+sizex*j)*cc.Nmats+mat] = 1.0;
//y neighbour material
cc.rho[(mat*width+i-2+sizex*j)*cc.Nmats+cc.Nmats/2+mat-1] = 1.0;cc.t[(mat*width+i-2+sizex*j)*cc.Nmats+cc.Nmats/2+mat-1] = 1.0;cc.p[(mat*width+i-2+sizex*j)*cc.Nmats+cc.Nmats/2+mat-1] = 1.0;
cc.rho[(mat*width-i+sizex*j)*cc.Nmats+cc.Nmats/2+mat] = 1.0;cc.t[(mat*width-i+sizex*j)*cc.Nmats+cc.Nmats/2+mat] = 1.0;cc.p[(mat*width-i+sizex*j)*cc.Nmats+cc.Nmats/2+mat] = 1.0;
//x-y neighbour material
cc.rho[(mat*width+i-2+sizex*j)*cc.Nmats+cc.Nmats/2+mat] = 1.0;cc.t[(mat*width+i-2+sizex*j)*cc.Nmats+cc.Nmats/2+mat-1] = 1.0;cc.p[(mat*width+i-2+sizex*j)*cc.Nmats+cc.Nmats/2+mat-1] = 1.0;
cc.rho[(mat*width-i+sizex*j)*cc.Nmats+cc.Nmats/2+mat-1] = 1.0;cc.t[(mat*width-i+sizex*j)*cc.Nmats+cc.Nmats/2+mat] = 1.0;cc.p[(mat*width-i+sizex*j)*cc.Nmats+cc.Nmats/2+mat] = 1.0;
}
for (int j = sizey/2; j < sizey/2+2+overlap_j;j++)
for (int i = 2; i < 5; i++) {
//x neighbour material
cc.rho[(mat*width+i-2+sizex*j)*cc.Nmats+cc.Nmats/2+mat-1] = 1.0;cc.t[(mat*width+i-2+sizex*j)*cc.Nmats+cc.Nmats/2+mat-1] = 1.0;cc.p[(mat*width+i-2+sizex*j)*cc.Nmats+cc.Nmats/2+mat-1] = 1.0;
cc.rho[(mat*width-i+sizex*j)*cc.Nmats+cc.Nmats/2+mat] = 1.0;cc.t[(mat*width-i+sizex*j)*cc.Nmats+cc.Nmats/2+mat] = 1.0;cc.p[(mat*width-i+sizex*j)*cc.Nmats+cc.Nmats/2+mat] = 1.0;
//y neighbour material
cc.rho[(mat*width+i-2+sizex*j)*cc.Nmats+mat-1] = 1.0;cc.t[(mat*width+i-2+sizex*j)*cc.Nmats+mat-1] = 1.0;cc.p[(mat*width+i-2+sizex*j)*cc.Nmats+mat-1] = 1.0;
cc.rho[(mat*width-i+sizex*j)*cc.Nmats+mat] = 1.0;cc.t[(mat*width-i+sizex*j)*cc.Nmats+mat] = 1.0;cc.p[(mat*width-i+sizex*j)*cc.Nmats+mat] = 1.0;
}
}
int only_8 = 0;
for (int mat = cc.Nmats/2+1; mat < cc.Nmats; mat++) {
for (int j = sizey/2-3; j < sizey/2-1;j++)
for (int i = 2; i < 5; i++) {
//x neighbour material
cc.rho[(mat*width+i-2+sizex*j)*cc.Nmats-cc.Nmats/2+mat-1] = 1.0;cc.t[(mat*width+i-2+sizex*j)*cc.Nmats-cc.Nmats/2+mat-1] = 1.0;cc.p[(mat*width+i-2+sizex*j)*cc.Nmats-cc.Nmats/2+mat-1] = 1.0;
cc.rho[(mat*width-i+sizex*j)*cc.Nmats-cc.Nmats/2+mat] = 1.0;cc.t[(mat*width-i+sizex*j)*cc.Nmats-cc.Nmats/2+mat] = 1.0;cc.p[(mat*width-i+sizex*j)*cc.Nmats-cc.Nmats/2+mat] = 1.0;
//y neighbour material
cc.rho[(mat*width+i-2+sizex*j)*cc.Nmats+mat-1] = 1.0;cc.t[(mat*width+i-2+sizex*j)*cc.Nmats+mat-1] = 1.0;cc.p[(mat*width+i-2+sizex*j)*cc.Nmats+mat-1] = 1.0;
cc.rho[(mat*width-i+sizex*j)*cc.Nmats+mat] = 1.0;cc.t[(mat*width-i+sizex*j)*cc.Nmats+mat] = 1.0;cc.p[(mat*width-i+sizex*j)*cc.Nmats+mat] = 1.0;
}
for (int j = sizey/2; j < sizey/2+2;j++)
for (int i = 2; i < 4; i++) {
if (i < 3 && only_8<6) {
//y neighbour material
cc.rho[(mat*width+i-2+sizex*j)*cc.Nmats-cc.Nmats/2+mat-1] = 1.0;cc.t[(mat*width+i-2+sizex*j)*cc.Nmats-cc.Nmats/2+mat-1] = 1.0;cc.p[(mat*width+i-2+sizex*j)*cc.Nmats-cc.Nmats/2+mat-1] = 1.0;
cc.rho[(mat*width-i+sizex*j)*cc.Nmats-cc.Nmats/2+mat] = 1.0;cc.t[(mat*width-i+sizex*j)*cc.Nmats-cc.Nmats/2+mat] = 1.0;cc.p[(mat*width-i+sizex*j)*cc.Nmats-cc.Nmats/2+mat] = 1.0;
}
if (i==2 && only_8==0) {
//x-y neighbour material
cc.rho[(mat*width+i-2+sizex*j)*cc.Nmats-cc.Nmats/2+mat] = 1.0;cc.t[(mat*width+i-2+sizex*j)*cc.Nmats-cc.Nmats/2+mat-1] = 1.0;cc.p[(mat*width+i-2+sizex*j)*cc.Nmats-cc.Nmats/2+mat-1] = 1.0;
cc.rho[(mat*width-i+sizex*j)*cc.Nmats-cc.Nmats/2+mat-1] = 1.0;cc.t[(mat*width-i+sizex*j)*cc.Nmats-cc.Nmats/2+mat] = 1.0;cc.p[(mat*width-i+sizex*j)*cc.Nmats-cc.Nmats/2+mat] = 1.0;
}
//x neighbour material
if (mat >= cc.Nmats-8 && j==sizey/2+1 && i==3) if (only_8++>=4) {
break;
}
cc.rho[(mat*width+i-2+sizex*j)*cc.Nmats+mat-1] = 1.0;cc.t[(mat*width+i-2+sizex*j)*cc.Nmats+mat-1] = 1.0;cc.p[(mat*width+i-2+sizex*j)*cc.Nmats+mat-1] = 1.0;
cc.rho[(mat*width-i+sizex*j)*cc.Nmats+mat] = 1.0;cc.t[(mat*width-i+sizex*j)*cc.Nmats+mat] = 1.0;cc.p[(mat*width-i+sizex*j)*cc.Nmats+mat] = 1.0;
}
}
#pragma omp parallel for
for (int mat=cc.Nmats/2+1; mat < cc.Nmats/2+5; mat++) {
int i = 2; int j = sizey/2+1;
cc.rho[(mat*width+i-2+sizex*j)*cc.Nmats-cc.Nmats/2+mat] = 0.0;cc.t[(mat*width+i-2+sizex*j)*cc.Nmats-cc.Nmats/2+mat-1] = 0.0;cc.p[(mat*width+i-2+sizex*j)*cc.Nmats-cc.Nmats/2+mat-1] = 0.0;
}
}
void initialise_field_file(full_data cc) {
int sizex = cc.sizex;
int sizey = cc.sizey;
int Nmats = cc.Nmats;
int status;
FILE *fp;
fp = fopen("volfrac.dat", "r");
if (!fp) {
fprintf(stderr, "unable to read volume fractions from file \"%s\"\n",
"volfrac.dat");
exit(-1);
}
int nmats;
status = fscanf(fp, "%d", &nmats);
if (status < 0) {
printf("error in read at line %d\n",__LINE__);
exit(1);
}
if (nmats != Nmats) {
printf("Error, invalid Nmats: %d!=%d\n", nmats, Nmats);
exit(1);
}
if (sizex%1000 != 0 || sizey%1000!=0) {
printf("size needs to be an integer multiple of 1000x1000: %dx%d\n", sizex, sizey);
exit(1);
}
int sx = sizex/1000;
int sy = sizey/1000;
status = fscanf(fp, "%d", &nmats);
if (status < 0) {
printf("error in read at line %d\n",__LINE__);
exit(1);
}
for (int j = 0; j < sizey; j++)
for (int i = 0; i < sizex; i++)
for (int m = 0; m < nmats; m++)
cc.Vf[(i+sizex*j)*Nmats+m] = 0.0;
char matname[256];
for (int m = 0; m < nmats; m++){
status = fscanf(fp, "%s", matname); // read and discard
if (status < 0) {
printf("error in read at line %d\n",__LINE__);
exit(1);
}
}
for (int j = 0; j < 1000; j++)
for (int i = 0; i < 1000; i++)
for (int m = 0; m < nmats; m++) {
double volfrac;
status = fscanf(fp, "%lf", &(volfrac));
if (status < 0) {
printf("error in read at line %d\n",__LINE__);
exit(1);
}
if (volfrac > 0.0) {
for (int jj = 0; jj < sy; jj++)
for (int ii = 0; ii < sx; ii++) {
cc.Vf[(i*sx+ii+sizex*(j*sy+jj))*Nmats+m] = volfrac;
cc.rho[(i*sx+ii+sizex*(j*sy+jj))*Nmats+m] = 1.0;
cc.t[(i*sx+ii+sizex*(j*sy+jj))*Nmats+m] = 1.0;
cc.p[(i*sx+ii+sizex*(j*sy+jj))*Nmats+m] = 1.0;
}
}
}
fclose(fp);
}
int main(int argc, char** argv) {
int sizex = 1000;
if (argc > 1)
sizex = atoi(argv[1]);
int sizey = 1000;
if (argc > 2)
sizey = atoi(argv[2]);
int ncells = sizex*sizey;
int Nmats = 50;
full_data cc;
full_data mc;
compact_data ccc;
cc.sizex = sizex;
mc.sizex = sizex;
ccc.sizex = sizex;
cc.sizey = sizey;
mc.sizey = sizey;
ccc.sizey = sizey;
cc.Nmats = Nmats;
mc.Nmats = Nmats;
ccc.Nmats = Nmats;
//Allocate the four state variables for all Nmats materials and all cells
//density
cc.rho = (double*)malloc(Nmats*ncells*sizeof(double));
memset(cc.rho, 0, Nmats*ncells*sizeof(double));
//average density in neighbourhood
cc.rho_mat_ave = (double*)malloc(Nmats*ncells*sizeof(double));
memset(cc.rho_mat_ave, 0, Nmats*ncells*sizeof(double));
//pressure
cc.p = (double*)malloc(Nmats*ncells*sizeof(double));
memset(cc.p, 0, Nmats*ncells*sizeof(double));
//Fractional volume
cc.Vf = (double*)malloc(Nmats*ncells*sizeof(double));
memset(cc.Vf, 0, Nmats*ncells*sizeof(double));
//temperature
cc.t = (double*)malloc(Nmats*ncells*sizeof(double));
memset(cc.t, 0, Nmats*ncells*sizeof(double));
// Buffers for material-centric representation
//density
mc.rho = (double*)malloc(Nmats*ncells*sizeof(double));
//average density in neighbouring cells
mc.rho_mat_ave = (double*)malloc(Nmats*ncells*sizeof(double));
memset(mc.rho_mat_ave, 0, Nmats*ncells*sizeof(double));
//pressure
mc.p = (double*)malloc(Nmats*ncells*sizeof(double));
//Fractional volume
mc.Vf = (double*)malloc(Nmats*ncells*sizeof(double));
//temperature
mc.t = (double*)malloc(Nmats*ncells*sizeof(double));
//Allocate per-cell only datasets
cc.V = (double*)malloc(ncells*sizeof(double));
cc.x = (double*)malloc(ncells*sizeof(double));
cc.y = (double*)malloc(ncells*sizeof(double));
//Allocate per-material only datasets
cc.n = (double*)malloc(Nmats*sizeof(double)); // number of moles
//Allocate output datasets
cc.rho_ave = (double*)malloc(ncells*sizeof(double));
mc.rho_ave = (double*)malloc(ncells*sizeof(double));
ccc.rho_ave_compact = (double*)hbw_malloc(ncells*sizeof(double));
// Cell-centric compact storage
ccc.rho_compact = (double*)hbw_malloc(ncells*sizeof(double));
ccc.rho_mat_ave_compact = (double*)hbw_malloc(ncells*sizeof(double));
memset(ccc.rho_mat_ave_compact, 0, ncells*sizeof(double));
ccc.p_compact = (double*)hbw_malloc(ncells*sizeof(double));
ccc.t_compact = (double*)hbw_malloc(ncells*sizeof(double));
int *nmats = (int*)hbw_malloc(ncells*sizeof(int));
ccc.imaterial = (int*)hbw_malloc(ncells*sizeof(int));
// List
double mul = ceil((double)sizex/1000.0) * ceil((double)sizey/1000.0);
int list_size = mul * 49000 * 2 + 600 * 3 + 400 * 4;
if (argc>=6)
list_size = (double(sizex*sizey)*atof(argv[3])*2+double(sizex*sizey)*atof(argv[4])*3+double(sizex*sizey)*atof(argv[5])*4)*1.1;
//plain linked list
ccc.nextfrac = (int*)hbw_malloc(list_size*sizeof(int));
int *frac2cell = (int*)hbw_malloc(list_size*sizeof(int));
ccc.matids = (int*)hbw_malloc(list_size*sizeof(int));
//CSR list
ccc.mmc_index = (int *)hbw_malloc(list_size*sizeof(int)); //CSR mapping for mix cell idx -> compact list position
ccc.mmc_i = (int *)hbw_malloc(list_size*sizeof(int)); // mixed cell -> physical cell i coord
ccc.mmc_j = (int *)hbw_malloc(list_size*sizeof(int)); // mixed cell -> physical cell j coord
ccc.mmc_cells = 0;
ccc.Vf_compact_list = (double*)hbw_malloc(list_size*sizeof(double));
ccc.rho_compact_list = (double*)hbw_malloc(list_size*sizeof(double));
ccc.rho_mat_ave_compact_list = (double*)hbw_malloc(list_size*sizeof(double));
memset(ccc.rho_mat_ave_compact_list, 0, list_size*sizeof(double));
ccc.t_compact_list = (double*)hbw_malloc(list_size*sizeof(double));
ccc.p_compact_list = (double*)hbw_malloc(list_size*sizeof(double));
int imaterial_multi_cell;
//Initialise arrays
double dx = 1.0/sizex;
double dy = 1.0/sizey;
for (int j = 0; j < sizey; j++) {
for (int i = 0; i < sizex; i++) {
cc.V[i+j*sizex] = dx*dy;
cc.x[i+j*sizex] = dx*i;
cc.y[i+j*sizex] = dy*j;
}
}
for (int mat = 0; mat < Nmats; mat++) {
cc.n[mat] = 1.0; // dummy value
}
//These are the same throughout
ccc.V = mc.V = cc.V;
ccc.x = mc.x = cc.x;
ccc.y = mc.y = cc.y;
ccc.n = mc.n = cc.n;
if (argc>=6) initialise_field_rand(cc, atof(argv[3]), atof(argv[4]), atof(argv[5]));
else initialise_field_file(cc);
//else initialise_field_static(cc);
FILE *f;
int print_to_file = 0;
if (print_to_file==1)
FILE *f = fopen("map.txt","w");
//Compute fractions and count cells
int cell_counts_by_mat[4] = {0,0,0,0};
ccc.mmc_cells = 0;
for (int j = 0; j < sizey; j++) {
for (int i = 0; i < sizex; i++) {
int count = 0;
for (int mat = 0; mat < Nmats; mat++) {
count += cc.rho[(i+sizex*j)*Nmats+mat]!=0.0;
}
if (count == 0) {
printf("Error: no materials in cell %d %d\n",i,j);
int mat = 1;
cc.rho[(i+sizex*j)*Nmats+mat] = 1.0;cc.t[(i+sizex*j)*Nmats+mat] = 1.0;cc.p[(i+sizex*j)*Nmats+mat] = 1.0; cc.Vf[(i+sizex*j)*Nmats+mat] = 1.0;
mc.rho[ncells*mat + i+sizex*j] = 1.0;mc.t[ncells*mat + i+sizex*j] = 1.0;mc.p[ncells*mat + i+sizex*j] = 1.0; mc.Vf[ncells*mat + i+sizex*j] = 1.0;
count = 1;
}
if (count > 1) ccc.mmc_cells++;
cell_counts_by_mat[count-1]++;
if (print_to_file==1) {
if (i!=0) fprintf(f,", %d",count);
else fprintf(f,"%d",count);
}
if (argc>=6) //Only if rand - file read has Volfrac already
for (int mat = 0; mat < Nmats; mat++) {
if (cc.rho[(i+sizex*j)*Nmats+mat]!=0.0) cc.Vf[(i+sizex*j)*Nmats+mat]=1.0/count;
}
}
if (print_to_file==1)
fprintf(f,"\n");
}
#ifdef DEBUG
printf("Pure cells %d, 2-materials %d, 3 materials %d, 4 materials %d: MMC cells %d\n",
cell_counts_by_mat[0],cell_counts_by_mat[1],cell_counts_by_mat[2],cell_counts_by_mat[3], ccc.mmc_cells);
#endif
if (cell_counts_by_mat[1]*2+cell_counts_by_mat[2]*3+cell_counts_by_mat[3]*4 >= list_size) {
printf("ERROR: list_size too small\n");
exit(-1);
}
if (print_to_file==1)
fclose(f);
// Convert representation to material-centric (using extra buffers)
#pragma omp parallel for
for (int j = 0; j < sizey; j++) {
for (int i = 0; i < sizex; i++) {
for (int mat = 0; mat < Nmats; mat++) {
mc.rho[ncells*mat + i+sizex*j] = cc.rho[(i+sizex*j)*Nmats+mat];
mc.p[ncells*mat + i+sizex*j] = cc.p[(i+sizex*j)*Nmats+mat];
mc.Vf[ncells*mat + i+sizex*j] = cc.Vf[(i+sizex*j)*Nmats+mat];
mc.t[ncells*mat + i+sizex*j] = cc.t[(i+sizex*j)*Nmats+mat];
}
}
}
// Copy data from cell-centric full matrix storage to cell-centric compact storage
imaterial_multi_cell = 0;
ccc.mmc_cells = 0;
for (int j = 0; j < sizey; j++) {
for (int i = 0; i < sizex; i++) {
int mat_indices[4] = { -1, -1, -1, -1 };
int matindex = 0;
int count = 0;
for (int mat = 0; mat < Nmats; mat++) {
if (cc.rho[(i+sizex*j)*Nmats+mat]!=0.0) {
mat_indices[matindex++] = mat;
count += 1;
}
}
if (count == 0) {
printf("Error: no materials in cell %d %d\n",i,j);
int mat = 1;
cc.rho[(i+sizex*j)*Nmats+mat] = 1.0;cc.t[(i+sizex*j)*Nmats+mat] = 1.0;cc.p[(i+sizex*j)*Nmats+mat] = 1.0; cc.Vf[(i+sizex*j)*Nmats+mat] = 1.0;
mc.rho[ncells*mat + i+sizex*j] = 1.0;mc.t[ncells*mat + i+sizex*j] = 1.0;mc.p[ncells*mat + i+sizex*j] = 1.0; mc.Vf[ncells*mat + i+sizex*j] = 1.0;
count = 1;
}
if (count == 1) {
int mat = mat_indices[0];
ccc.rho_compact[i+sizex*j] = cc.rho[(i+sizex*j)*Nmats+mat];
ccc.p_compact[i+sizex*j] = cc.p[(i+sizex*j)*Nmats+mat];
ccc.t_compact[i+sizex*j] = cc.t[(i+sizex*j)*Nmats+mat];
nmats[i+sizex*j] = -1;
// NOTE: HACK: we index materials from zero, but zero can be a list index
ccc.imaterial[i+sizex*j] = mat + 1;
}
else { // count > 1
nmats[i+sizex*j] = count;
// note the minus sign, it needs to be negative
#ifdef LINKED
ccc.imaterial[i+sizex*j] = -imaterial_multi_cell;
#else
ccc.imaterial[i+sizex*j] = -ccc.mmc_cells;
#endif
ccc.mmc_index[ccc.mmc_cells] = imaterial_multi_cell;
ccc.mmc_i[ccc.mmc_cells] = i;
ccc.mmc_j[ccc.mmc_cells] = j;
ccc.mmc_cells++;
for (int list_idx = imaterial_multi_cell; list_idx < imaterial_multi_cell + count; ++list_idx) {
// if last iteration
if (list_idx == imaterial_multi_cell + count - 1)
ccc.nextfrac[list_idx] = -1;
else // not last
ccc.nextfrac[list_idx] = list_idx + 1;
frac2cell[list_idx] = i+sizex*j;
int mat = mat_indices[list_idx - imaterial_multi_cell];
ccc.matids[list_idx] = mat;
ccc.Vf_compact_list[list_idx] = cc.Vf[(i+sizex*j)*Nmats+mat];
ccc.rho_compact_list[list_idx] = cc.rho[(i+sizex*j)*Nmats+mat];
ccc.p_compact_list[list_idx] = cc.p[(i+sizex*j)*Nmats+mat];
ccc.t_compact_list[list_idx] = cc.t[(i+sizex*j)*Nmats+mat];
}
imaterial_multi_cell += count;
}
}
}
ccc.mmc_index[ccc.mmc_cells] = imaterial_multi_cell;
ccc.mm_len = imaterial_multi_cell;
full_matrix_cell_centric(cc);
/* full_matrix_material_centric(cc, mc);
// Check results
if (!full_matrix_check_results(cc, mc)) {
goto end;
}*/
#define MIN(a,b) (a)<(b)?(a):(b)
double a1,a2,a3;
compact_cell_centric(cc, ccc, a1,a2,a3, argc, argv);
double t1=100, t2=100, t3=100;
for (int i = 0; i < 10; i++) {
a1=a2=a3=0.0;
compact_cell_centric(cc, ccc, a1,a2,a3, argc, argv);
/* t1+=a1;
t2+=a2;
t3+=a3;*/
t1 = MIN(t1,a1*10.0);
t2 = MIN(t2,a2*10.0);
t3 = MIN(t3,a3*10.0);
}
//printf("%g %g %g\n", t1/10.0,t2/10.0,t3/10.0);
int cell_mat_count = 1*cell_counts_by_mat[0] + 2*cell_counts_by_mat[1]
+ 3*cell_counts_by_mat[2] + 4*cell_counts_by_mat[3];
//Alg 1:
size_t alg1 = 0;
//read imaterial (sizex*sizey)*sizeof(int)
alg1 += (sizex*sizey)*sizeof(int);
//read Vf (cell_mat_count - cell_counts_by_mat[0])*sizeof(double)
alg1 += (cell_mat_count - cell_counts_by_mat[0])*sizeof(double);
#ifdef FUSED
//write rho_ave_compact (sizex*sizey)*sizeof(double)
alg1 += (sizex*sizey)*sizeof(double);
//read V (sizex*sizey)*sizeof(double)
alg1 += (sizex*sizey)*sizeof(double);
//read rho_compact+list cell_mat_count*sizeof(double)
alg1 += cell_mat_count*sizeof(double);
//LINKED - read nextfrac (cell_mat_count - cell_counts_by_mat[0])*sizeof(int)
#ifdef LINKED
alg1 += (cell_mat_count - cell_counts_by_mat[0])*sizeof(double);
//CSR - read mmc_index (ccc.mmc_cells+1) * sizeof(int)
#else
alg1 += (ccc.mmc_cells+1) * sizeof(int);
#endif
#else
//write rho_ave_compact (sizex*sizey+ccc.mmc_cells)*sizeof(double)
alg1 += (sizex*sizey+ccc.mmc_cells)*sizeof(double);
//read V (sizex*sizey+ccc.mmc_cells)*sizeof(double)
alg1 += (sizex*sizey+ccc.mmc_cells)*sizeof(double);
//read rho_compact+list (sizex*sizey+cell_mat_count - cell_counts_by_mat[0])*sizeof(double)
alg1 += (sizex*sizey+cell_mat_count - cell_counts_by_mat[0])*sizeof(double);
//CSR - read mmc_index (ccc.mmc_cells+1) * sizeof(int)
alg1 += (ccc.mmc_cells+1) * sizeof(int);
//CSR - read mmc_i&j (ccc.mmc_cells) * 2 * sizeof(int)
alg1 += (ccc.mmc_cells) * 2 * sizeof(int);
#endif
//Alg2
size_t alg2 = 0;
//read imaterial (sizex*sizey)*sizeof(int)
alg2 += (sizex*sizey)*sizeof(int);
//read Vf (cell_mat_count - cell_counts_by_mat[0])*sizeof(double)
alg2 += (cell_mat_count - cell_counts_by_mat[0])*sizeof(double);
//read matids (cell_mat_count - cell_counts_by_mat[0])*sizeof(int)
alg2 += (cell_mat_count - cell_counts_by_mat[0])*sizeof(int);
#ifdef FUSED
//read rho_compact+list cell_mat_count*sizeof(double)
alg2 += cell_mat_count*sizeof(double);
//read t_compact+list cell_mat_count*sizeof(double)
alg2 += cell_mat_count*sizeof(double);
//read p_compact+list cell_mat_count*sizeof(double)
alg2 += cell_mat_count*sizeof(double);
//read n Nmats*sizeof(double)
alg2 += Nmats*sizeof(double);
//LINKED - read nextfrac (cell_mat_count - cell_counts_by_mat[0])*sizeof(int)
#ifdef LINKED
alg2 += (cell_mat_count - cell_counts_by_mat[0])*sizeof(double);
//CSR - read mmc_index (ccc.mmc_cells+1) * sizeof(int)
#else
alg2 += (ccc.mmc_cells+1) * sizeof(int);
#endif
#else //FUSED
//read rho_compact+list (sizex*sizey+cell_mat_count - cell_counts_by_mat[0])*sizeof(double)
alg2 += (sizex*sizey+cell_mat_count - cell_counts_by_mat[0])*sizeof(double);
//read t_compact+list (sizex*sizey+cell_mat_count - cell_counts_by_mat[0])*sizeof(double)
alg2 += (sizex*sizey+cell_mat_count - cell_counts_by_mat[0])*sizeof(double);
//read p_compact+list (sizex*sizey+cell_mat_count - cell_counts_by_mat[0])*sizeof(double)
alg2 += (sizex*sizey+cell_mat_count - cell_counts_by_mat[0])*sizeof(double);
//CSR - read mmc_index (ccc.mmc_cells+1) * sizeof(int)
alg2 += (ccc.mmc_cells+1) * sizeof(int);
//read n Nmats*sizeof(double)
alg2 += Nmats*sizeof(double);
#endif
//Alg3
size_t alg3 = 0;
//read x & y
alg3 += 2*sizex*sizey*sizeof(double);
//read imaterial (sizex*sizey)*sizeof(int)
alg3 += (sizex*sizey)*sizeof(int);
//write rho_mat_ave_compact+list cell_mat_count*sizeof(double)
alg3 += cell_mat_count*sizeof(double);
//read matids (cell_mat_count - cell_counts_by_mat[0])*sizeof(int)
alg3 += (cell_mat_count - cell_counts_by_mat[0])*sizeof(int);
//read rho_compact+list cell_mat_count*sizeof(double)
alg3 += cell_mat_count*sizeof(double);
//LINKED - read nextfrac (cell_mat_count - cell_counts_by_mat[0])*sizeof(int)
#ifdef LINKED
alg3 += (cell_mat_count - cell_counts_by_mat[0])*sizeof(double);
//CSR - read mmc_index (ccc.mmc_cells+1) * sizeof(int)
#else
alg3 += (ccc.mmc_cells+1) * sizeof(int);
#endif
//printf("%g %g %g\n", alg1*10.0/t1/1e9, alg1*10.0/t2/1e9, alg3*10.0/t3/1e9);
// Check results
if (!compact_check_results(cc, ccc))
{
goto end;
}
end:
free(mc.rho); free(mc.p); free(mc.Vf); free(mc.t);
free(cc.rho_mat_ave); free(mc.rho_mat_ave); hbw_free(ccc.rho_mat_ave_compact); hbw_free(ccc.rho_mat_ave_compact_list);
free(cc.rho); free(cc.p); free(cc.Vf); free(cc.t);
free(cc.V); free(cc.x); free(cc.y);
free(cc.n);
free(cc.rho_ave); free(mc.rho_ave); hbw_free(ccc.rho_ave_compact);
hbw_free(ccc.rho_compact); hbw_free(ccc.p_compact); hbw_free(ccc.t_compact);
hbw_free(nmats); hbw_free(ccc.imaterial);
hbw_free(ccc.nextfrac); hbw_free(frac2cell); hbw_free(ccc.matids);
hbw_free(ccc.Vf_compact_list); hbw_free(ccc.rho_compact_list);
hbw_free(ccc.t_compact_list); hbw_free(ccc.p_compact_list);
return 0;
} | the_stack |
template<int SHIFT>
__forceinline__ __device__ int ACCS(const int i)
{
return (i & ((LMEM_STACK_SIZE << SHIFT) - 1))*blockDim.x + threadIdx.x;
}
#define BTEST(x) (-(int)(x))
texture<float4, 1, cudaReadModeElementType> texNodeSize;
texture<float4, 1, cudaReadModeElementType> texNodeCenter;
texture<float4, 1, cudaReadModeElementType> texMultipole;
texture<float4, 1, cudaReadModeElementType> texBody;
template<class T>
struct ADDOP {
__device__ static inline T identity() {return (T)(0);}
__device__ static inline T apply(T a, T b) {return (T)(a + b);};
__device__ static inline T unapply(T a, T b) {return (T)(a - b);};
__device__ static inline T mask(bool flag, T b) {return (T)(-(int)(flag) & b);};
};
template<class OP, class T>
// __device__ T inclusive_scan_warp(volatile T *ptr, T mysum, const unsigned int idx = threadIdx.x) {
__device__ __forceinline__ T inclusive_scan_warp(volatile T *ptr, T mysum, const unsigned int idx ) {
const unsigned int lane = idx & 31;
if (lane >= 1) ptr[idx] = mysum = OP::apply(ptr[idx - 1], mysum);
if (lane >= 2) ptr[idx] = mysum = OP::apply(ptr[idx - 2], mysum);
if (lane >= 4) ptr[idx] = mysum = OP::apply(ptr[idx - 4], mysum);
if (lane >= 8) ptr[idx] = mysum = OP::apply(ptr[idx - 8], mysum);
if (lane >= 16) ptr[idx] = mysum = OP::apply(ptr[idx - 16], mysum);
return ptr[idx];
}
__device__ __forceinline__ int inclusive_scan_warp(volatile int *ptr, int mysum, const unsigned int idx) {
const unsigned int lane = idx & 31;
if (lane >= 1) ptr[idx] = mysum = ptr[idx - 1] + mysum;
if (lane >= 2) ptr[idx] = mysum = ptr[idx - 2] + mysum;
if (lane >= 4) ptr[idx] = mysum = ptr[idx - 4] + mysum;
if (lane >= 8) ptr[idx] = mysum = ptr[idx - 8] + mysum;
if (lane >= 16) ptr[idx] = mysum = ptr[idx - 16] + mysum;
return ptr[idx];
}
template<class OP, class T>
__device__ __inline__ T inclusive_scan_block(volatile T *ptr, const T v0, const unsigned int idx) {
const unsigned int lane = idx & 31;
const unsigned int warpid = idx >> 5;
// step 0: Write the valume from the thread to the memory
ptr[idx] = v0;
T mysum = v0;
__syncthreads();
// step 1: Intra-warp scan in each warp
// T val = inclusive_scan_warp<OP, T>(ptr, mysum, idx);
T val = inclusive_scan_warp(ptr, mysum, idx);
__syncthreads();
// step 2: Collect per-warp particle results
if (lane == 31) ptr[warpid] = ptr[idx];
__syncthreads();
mysum = ptr[idx];
// step 3: Use 1st warp to scan per-warp results
if (warpid == 0) inclusive_scan_warp<OP, T>(ptr,mysum, idx);
__syncthreads();
// step 4: Accumulate results from Steps 1 and 3;
if (warpid > 0) val = OP::apply(ptr[warpid - 1], val);
__syncthreads();
// Step 5: Write and return the final result
ptr[idx] = val;
__syncthreads();
return val; //ptr[blockDim.x - 1];
}
template<class OP, class T>
// __device__ T inclusive_scan_block(volatile T *ptr, const unsigned int idx = threadIdx.x) {
__device__ T inclusive_scan_block(volatile T *ptr, const unsigned int idx) {
const unsigned int lane = idx & 31;
const unsigned int warpid = idx >> 5;
T mysum = ptr[idx];
__syncthreads();
// step 1: Intra-warp scan in each warp
T val = inclusive_scan_warp<OP, T>(ptr, mysum, idx);
__syncthreads();
// step 2: Collect per-warp particle results
if (lane == 31) ptr[warpid] = ptr[idx];
__syncthreads();
mysum = ptr[idx];
// step 3: Use 1st warp to scan per-warp results
if (warpid == 0) inclusive_scan_warp<OP, T>(ptr,mysum, idx);
__syncthreads();
// step 4: Accumulate results from Steps 1 and 3;
if (warpid > 0) val = OP::apply(ptr[warpid - 1], val);
__syncthreads();
// Step 5: Write and return the final result
ptr[idx] = val;
__syncthreads();
return val; //ptr[blockDim.x - 1];
}
template<class OP, class T>
// __device__ T inclusive_scan_array(volatile T *ptr_global, const int N, const unsigned int idx = threadIdx.x) {
__device__ T inclusive_scan_array(volatile T *ptr_global, const int N, const unsigned int idx) {
T y = OP::identity();
volatile T *ptr = ptr_global;
for (int p = 0; p < N; p += blockDim.x) {
ptr = &ptr_global[p];
inclusive_scan_block<OP, T>(ptr, idx);
ptr[idx] = OP::apply(ptr[idx], y);
__syncthreads();
y = ptr[blockDim.x - 1];
__syncthreads();
}
return y;
}
#ifdef INDSOFT
__device__ float4 get_D04(float ds2, float epsP, float epsQ, int selfGrav = 1) {
// float eps = fmaxf(epsP, epsQ);
float epseff = epsP + epsQ;
float ids, ids2, ids3;
if(ds2 >= (epseff*epseff))
{
ids = rsqrtf(ds2);
//if(isnan(ids)) ids = 0; not needed if we use non-zero softening
ids = ids*selfGrav; //Prevent selfGravity, instead of using if-statement
ids3 = ids*ids*ids;
}
else
{
//these two lines are faster than a real sqrt
float dist = ds2*selfGrav*rsqrtf(ds2); //Gives NaN is ds is 0
if(isnan(dist)) dist = 0.0f;
//float dist = sqrtf(ds2); //Slower than the two lines above
//assert(!isnan(dist));
float epseffi = 1.f/epseff;
float rhinv = dist*epseffi;
if(rhinv < 0.5f)
{
ids3 = 4.f/3.f + (rhinv*rhinv)*(4.f*rhinv-4.8f);
ids = 1.4f - (rhinv*rhinv)*(8.f/3.f+(rhinv*rhinv)*(3.2f*rhinv-4.8f));
}
else
{
ids3 = 8.f/3.f-6.f*rhinv+4.8f*(rhinv*rhinv)-4.f/3.f*(rhinv*rhinv*rhinv)-1.f/120.f/(rhinv*rhinv*rhinv);
ids = 1.6f-1/(30.f*rhinv)-(rhinv*rhinv)*(16.f/3.f+rhinv*(-8.f+rhinv*(4.8f-rhinv*16.f/15.f)));
}//end if rhin < 0.5
ids = ids*2.f*epseffi;
ids3 = ids3*8.f*(epseffi*epseffi*epseffi);
//Self gravity prevention (and NaN prevention)
ids *= selfGrav;
ids3 *= selfGrav;
} //end dist >= epseff
ids2 = ids*ids;
float ids5 = ids3*ids2;
float ids7 = ids5*ids2;
return (float4){ids, -ids3, +3.0f*ids5, -15.0f*ids7};
}
#else
__device__ float4 get_D04(float ds2, int selfGrav = 1) {
#if 1
float ids = rsqrtf(ds2); //Does not work with zero-softening
if(isnan(ids)) ids = 0; //This does work with zero-softening, few percent performance drop
//float ids = (1.0f / sqrtf(ds2)) * selfGrav; Slower in Pre CUDA4.1
ids *= selfGrav;
#else
const float ids = (ds2 > 0.0f) ? rsqrtf(ds2) : 0.0f;
#endif
const float ids2 = ids*ids;
float ids3 = ids *ids2;
float ids5 = ids3*ids2;
float ids7 = ids5*ids2;
return (float4){ids, -ids3, +3.0f*ids5, -15.0f*ids7};
} // 9 flops
#endif
#ifdef INDSOFT
__device__ float4 add_acc(float4 acc, float4 pos,
float massj, float3 posj, float epsQ,
float &ds2, float eps2P, int selfGrav) {
#else
__device__ float4 add_acc(float4 acc, float4 pos,
float massj, float3 posj,
float &ds2, float eps2, int selfGrav) {
#endif
float3 dr = {pos.x - posj.x,
pos.y - posj.y,
pos.z - posj.z};
#ifdef INDSOFT
ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
float4 D04 = get_D04(ds2, eps2P, epsQ, selfGrav);
#else
ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z + eps2;
float4 D04 = get_D04(ds2, selfGrav);
#endif
float D0 = D04.x*massj;
float D1 = D04.y*massj;
acc.w -= D0;
acc.x += D1*dr.x;
acc.y += D1*dr.y;
acc.z += D1*dr.z;
return acc;
}
#ifdef INDSOFT
__device__ float4 add_acc(float4 acc, float4 pos,
float mass, float3 com,
float3 Q0, float3 Q1, float epsNode, float eps2) {
#else
__device__ float4 add_acc(float4 acc, float4 pos,
float mass, float3 com,
float3 Q0, float3 Q1, float eps2) {
#endif
float3 dr = {pos.x - com.x,
pos.y - com.y,
pos.z - com.z};
#ifdef INDSOFT
float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
float4 D04 = get_D04(ds2, epsNode, eps2);
#else
float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z + eps2;
float4 D04 = get_D04(ds2);
#endif
float D0 = D04.x*mass;
float D1 = D04.y*mass;
float D2 = D04.z*mass;
float D3 = D04.w*mass;
float oct_q11 = Q0.x;
float oct_q22 = Q0.y;
float oct_q33 = Q0.z;
float oct_q12 = Q1.x;
float oct_q13 = Q1.y;
float oct_q23 = Q1.z;
float Qii = oct_q11 + oct_q22 + oct_q33;
float QijRiRj =
(oct_q11*dr.x*dr.x + oct_q22*dr.y*dr.y + oct_q33*dr.z*dr.z) +
2.0f*(oct_q12*dr.y*dr.x + oct_q13*dr.z*dr.x + oct_q23*dr.y*dr.z);
//2.0f was
//volatile float QijRiRj_1 = oct_q12*dr.y*dr.x + oct_q13*dr.z*dr.x + oct_q23*dr.y*dr.z;
//QijRiRj = dr.x*dr.x;// + oct_q22*dr.y*dr.y + oct_q33*dr.z*dr.z; //oct_q11;//*
acc.w -= D0 + 0.5f*D1*Qii + 0.5f*D2*QijRiRj;
float C01a = D1 + 0.5f*D2*Qii + 0.5f*D3*QijRiRj;
acc.x += C01a*dr.x + D2*(oct_q11*dr.x + oct_q12*dr.y + oct_q13*dr.z);
acc.y += C01a*dr.y + D2*(oct_q12*dr.x + oct_q22*dr.y + oct_q23*dr.z);
acc.z += C01a*dr.z + D2*(oct_q13*dr.x + oct_q23*dr.y + oct_q33*dr.z);
return acc;
}
//Minimum distance opening criteria
#ifdef INDSOFT
__device__ bool split_node_grav_md(float4 nodeCenter, float4 nodeSize, float4 groupCenter, float4 groupSize,
float group_eps, float node_eps)
#else
__device__ bool split_node_grav_md(float4 nodeCenter, float4 nodeSize, float4 groupCenter, float4 groupSize)
#endif
{
//Compute the distance between the group and the cell
float3 dr = {fabs(groupCenter.x - nodeCenter.x) - (groupSize.x + nodeSize.x),
fabs(groupCenter.y - nodeCenter.y) - (groupSize.y + nodeSize.y),
fabs(groupCenter.z - nodeCenter.z) - (groupSize.z + nodeSize.z)};
dr.x += fabs(dr.x); dr.x *= 0.5f;
dr.y += fabs(dr.y); dr.y *= 0.5f;
dr.z += fabs(dr.z); dr.z *= 0.5f;
//Distance squared, no need to do sqrt since opening criteria has been squared
float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
#ifdef INDSOFT
//Naar idee van Inti nu minder overbodige openingen
if(ds2 <= ((group_eps + node_eps ) * (group_eps + node_eps) )) return true;
#endif
return (ds2 <= fabs(nodeCenter.w));
}
//Improved Barnes Hut criterium
#ifdef INDSOFT
__device__ bool split_node_grav_impbh(float4 nodeCOM, float4 groupCenter, float4 groupSize,
float group_eps, float node_eps)
#else
__device__ bool split_node_grav_impbh(float4 nodeCOM, float4 groupCenter, float4 groupSize)
#endif
{
//Compute the distance between the group and the cell
float3 dr = {fabs(groupCenter.x - nodeCOM.x) - (groupSize.x),
fabs(groupCenter.y - nodeCOM.y) - (groupSize.y),
fabs(groupCenter.z - nodeCOM.z) - (groupSize.z)};
dr.x += fabs(dr.x); dr.x *= 0.5f;
dr.y += fabs(dr.y); dr.y *= 0.5f;
dr.z += fabs(dr.z); dr.z *= 0.5f;
//Distance squared, no need to do sqrt since opening criteria has been squared
float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
#ifdef INDSOFT
//Extra test
if(ds2 <= ((group_eps + node_eps ) * (group_eps + node_eps) )) return true;
#endif
return (ds2 <= fabs(nodeCOM.w));
}
__device__ float4 get_float4(float4 const volatile &v)
{
return make_float4(v.x, v.y, v.z, v.w);
}
#define TEXTURES
#define OLDPREFIX
#define DOGRAV
template<int DIM2, int SHIFT>
__device__ float4 approximate_gravity(int DIM2x, int DIM2y,
int tid, int tx, int ty,
int body_i, float4 pos_i,
real4 group_pos,
float eps2,
uint2 node_begend,
real4 *multipole_data,
real4 *body_pos,
int *shmem,
int *lmem,
int &ngb,
int &apprCount, int &direCount,
volatile float4 *boxSizeInfo,
float4 groupSize,
volatile float4 *boxCenterInfo,
float group_eps,
real4 *body_vel) {
float4 acc_i = {0.0f, 0.0f, 0.0f, 0.0f};
ngb = -1;
float ds2_min = 1.0e10f;
/*********** set necessary thread constants **********/
const int DIMx = 1 << DIM2x;
const int DIMy = 1 << DIM2y;
const int DIM = 1 << DIM2;
const int offs = ty << DIM2x;
/*********** shared memory distribution **********/
// begin, end, size
// -----------------------
int *approx = (int*)&shmem [ 0]; // 0*DIM, 2*DIM, 2*DIM
int *direct = (int*)&approx[ 2*DIM]; // 2*DIM, 3*DIM, 1*DIM
int *nodes = (int*)&direct[ DIM]; // 3*DIM, 13*DIM, 10*DIM
int *prefix = (int*)&nodes [10*DIM]; // 13*DIM, 15*DIM, 2*DIM
float *node_mon0 = (float* )&nodes [DIM]; // 4*DIM, 5*DIM, 1*DIM
float3 *node_mon1 = (float3*)&node_mon0[DIM]; // 5*DIM, 8*DIM, 3*DIM
float3 *node_oct0 = (float3*)&node_mon1[DIM]; // 8*DIM, 11*DIM, 3*DIM
float3 *node_oct1 = (float3*)&node_oct0[DIM]; // 11*DIM, 14*DIM, 3*DIM
int *body_list = (int* )&nodes [ DIM]; // 4*DIM, 8*DIM, 4*DIM
float *sh_mass = (float* )&body_list[4*DIM]; // 8*DIM, 9*DIM, 1*DIM
float3 *sh_pos = (float3*)&sh_mass [ DIM]; // 9*DIM, 12*DIM 3*DIM
float *sh_pot = sh_mass;
float3 *sh_acc = sh_pos;
int *sh_jid = (int* )&sh_pos[DIM];
float *sh_ds2 = (float*)&sh_acc[DIM];
int *sh_ngb = (int* )&sh_ds2[DIM];
#ifdef INDSOFT
//This works with shmem of dimx15
float *sh_eps = (float* )&sh_jid [DIM]; //JB Partially overwrites the prefix part
float *node_eps = (float*) &approx[ 2*DIM]; //JB 11*DIM, 14*DIM, 3*DIM
#endif
/*********** stack **********/
int *nstack = lmem;
/*********** begin tree-walk **********/
int n_approx = 0;
int n_direct = 0;
for (int root_node = node_begend.x; root_node < node_begend.y; root_node += DIM) {
int n_nodes0 = min(node_begend.y - root_node, DIM);
int n_stack0 = 0;
int n_stack_pre = 0;
{ nstack[ACCS<SHIFT>(n_stack0)] = root_node + tid; n_stack0++; }
/*********** walk each level **********/
while (n_nodes0 > 0) {
int n_nodes1 = 0;
int n_offset = 0;
int n_stack1 = n_stack0;
int c_stack0 = n_stack_pre;
/*********** walk a level **********/
while(c_stack0 < n_stack0) {
/***
**** --> fetch the list of nodes rom LMEM
***/
bool use_node = tid < n_nodes0;
{ prefix[tid] = nstack[ACCS<SHIFT>(c_stack0)]; c_stack0++; }
__syncthreads();
int node = prefix[min(tid, n_nodes0 - 1)];
if(n_nodes0 > 0){ //Work around pre 4.1 compiler bug
n_nodes0 -= DIM;
}
/***
**** --> process each of the nodes in the list in parallel
***/
#ifndef TEXTURES
float4 nodeSize = get_float4(boxSizeInfo[node]); //Fetch the size of the box. Size.w = child info
float4 node_pos = get_float4(boxCenterInfo[node]); //Fetch the center of the box. center.w = opening info
#else
float4 nodeSize = tex1Dfetch(texNodeSize, node);
float4 node_pos = tex1Dfetch(texNodeCenter, node);
#endif
int node_data = __float_as_int(nodeSize.w);
#ifdef INDSOFT
//Very inefficient this but for testing I have to live with it...
float node_eps_val = multipole_data[node*3 + 1].w;
#endif
//Check if a cell has to be opened
#ifdef IMPBH
//Improved barnes hut method
#ifndef TEXTURES
float4 nodeCOM = multipole_data[node*3];
#else
float4 nodeCOM = tex1Dfetch(texMultipole,node*3);
#endif
nodeCOM.w = node_pos.w;
#ifdef INDSOFT
bool split = split_node_grav_impbh(nodeCOM, group_pos, groupSize, group_eps, node_eps_val);
#else
bool split = split_node_grav_impbh(nodeCOM, group_pos, groupSize);
#endif
#else
//Minimum distance method
#ifdef INDSOFT
bool split = split_node_grav_md(node_pos, nodeSize, group_pos, groupSize, group_eps, node_eps_val); //Check if node should be split
#else
bool split = split_node_grav_md(node_pos, nodeSize, group_pos, groupSize); //Check if node should be split
#endif
#endif
bool leaf = node_pos.w <= 0; //Small AND equal incase of a 1 particle cell //Check if it is a leaf
// split = true;
uint mask = BTEST((split && !leaf) && use_node); // mask = #FFFFFFFF if use_node+split+not_a_leaf==true, otherwise zero
int child = node_data & 0x0FFFFFFF; //Index to the first child of the node
int nchild = (((node_data & 0xF0000000) >> 28)) & mask; //The number of children this node has
/***
**** --> calculate prefix
***/
int *prefix0 = &prefix[ 0];
int *prefix1 = &prefix[DIM];
#ifdef OLDPREFIX
int n_total = calc_prefix<DIM2>(prefix, tid, nchild);
prefix[tid] += n_offset - nchild;
__syncthreads();
#else
inclusive_scan_block<ADDOP<int>, int>(prefix, nchild, tid); // inclusive scan to compute memory offset of each child
int n_total = prefix[blockDim.x - 1]; // fetch total number of children, i.e. offset of the last child -1
__syncthreads(); // thread barrier to make sure that warps completed their jobs
prefix[tid] += n_offset - nchild; // convert inclusive into exclusive scan for referencing purpose
__syncthreads(); // thread barrier
#endif
for (int i = n_offset; i < n_offset + n_total; i += DIM) //nullify part of the array that will be filled with children
nodes[tid + i] = 0; //but do not touch those parts which has already been filled
__syncthreads(); //Thread barrier to make sure all warps finished writing data
bool flag = (split && !leaf) && use_node; //Flag = use_node + split + not_a_leaf;Use only non_leaf nodes that are to be split
if (flag) nodes[prefix[tid]] = child; //Thread with the node that is about to be split
__syncthreads(); //writes the first child in the array of nodes
/*** in the following 8 lines, we calculate indexes of all the children that have to be walked from the index of the first child***/
if (flag && nodes[prefix[tid] + 1] == 0) nodes[prefix[tid] + 1] = child + 1; __syncthreads();
if (flag && nodes[prefix[tid] + 2] == 0) nodes[prefix[tid] + 2] = child + 2; __syncthreads();
if (flag && nodes[prefix[tid] + 3] == 0) nodes[prefix[tid] + 3] = child + 3; __syncthreads();
if (flag && nodes[prefix[tid] + 4] == 0) nodes[prefix[tid] + 4] = child + 4; __syncthreads();
if (flag && nodes[prefix[tid] + 5] == 0) nodes[prefix[tid] + 5] = child + 5; __syncthreads();
if (flag && nodes[prefix[tid] + 6] == 0) nodes[prefix[tid] + 6] = child + 6; __syncthreads();
if (flag && nodes[prefix[tid] + 7] == 0) nodes[prefix[tid] + 7] = child + 7; __syncthreads();
n_offset += n_total; //Increase the offset in the array by the number of newly added nodes
/***
**** --> save list of nodes to LMEM
***/
/*** if half of shared memory or more is filled with the the nodes, dump these into slowmem stack ***/
while(n_offset >= DIM) {
n_offset -= DIM;
const int offs1 = ACCS<SHIFT>(n_stack1);
nstack[offs1] = nodes[n_offset + tid]; n_stack1++;
n_nodes1 += DIM;
if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT))
{
//We overwrote our current stack
apprCount = -1; return acc_i;
}
}
__syncthreads();
/******************************/
/******************************/
/***** EVALUATION *****/
/******************************/
/******************************/
#if 1
/***********************************/
/****** APPROX ******/
/***********************************/
#ifdef OLDPREFIX
n_total = calc_prefix<DIM2>(prefix, tid, 1 - (split || !use_node));
#else
inclusive_scan_block<ADDOP<int>, int>(prefix, 1 - (split || !use_node), tid);
n_total = prefix[blockDim.x - 1];
#endif
// n_total = calc_prefix<DIM2>(prefix, tid, !split && use_node); // for some unkown reason this does not work right on the GPU
if (!split && use_node) approx[n_approx + prefix[tid] - 1] = node;
__syncthreads();
n_approx += n_total;
while (n_approx >= DIM) {
n_approx -= DIM;
int address = (approx[n_approx + tid] << 1) + approx[n_approx + tid];
#ifndef TEXTURES
float4 monopole = multipole_data[address ];
float4 octopole0 = multipole_data[address + 1];
float4 octopole1 = multipole_data[address + 2];
#else
float4 monopole = tex1Dfetch(texMultipole, address);
float4 octopole0 = tex1Dfetch(texMultipole, address + 1);
float4 octopole1 = tex1Dfetch(texMultipole, address + 2);
#endif
node_mon0[tid] = monopole.w;
node_mon1[tid] = (float3){monopole.x, monopole.y, monopole.z};
node_oct0[tid] = (float3){octopole0.x, octopole0.y, octopole0.z};
node_oct1[tid] = (float3){octopole1.x, octopole1.y, octopole1.z};
#ifdef INDSOFT
float temp = node_eps[tid]; //Backup value in the shmem into register
node_eps[tid] = octopole0.w;
#endif
__syncthreads();
#pragma unroll
for (int i = 0; i < DIMx; i++)
{
apprCount++;
#ifdef DOGRAV
#ifdef INDSOFT
acc_i = add_acc(acc_i, pos_i,
node_mon0[offs + i], node_mon1[offs + i],
node_oct0[offs + i], node_oct1[offs + i], node_eps[offs + i], eps2);
#else
acc_i = add_acc(acc_i, pos_i,
node_mon0[offs + i], node_mon1[offs + i],
node_oct0[offs + i], node_oct1[offs + i], eps2);
#endif
#endif
}
__syncthreads();
#ifdef INDSOFT
node_eps[tid] = temp; //Restore original value in shmem
__syncthreads();
#endif
}
__syncthreads();
#endif
#if 1
/***********************************/
/****** DIRECT ******/
/***********************************/
int *sh_body = &approx[DIM];
flag = split && leaf && use_node; //flag = split + leaf + use_node
int jbody = node_data & BODYMASK; //the first body in the leaf
int nbody = (((node_data & INVBMASK) >> LEAFBIT)+1) & BTEST(flag); //number of bodies in the leaf masked with the flag
body_list[tid] = direct[tid]; //copy list of bodies from previous pass to body_list
sh_body [tid] = jbody; //store the leafs first body id into shared memory
// step 1
#ifdef OLDPREFIX
calc_prefix<DIM2>(prefix0, tid, flag);
#else
inclusive_scan_block<ADDOP<int>, int>(prefix0, (int)flag, tid); // inclusive scan on flags to construct array
#endif
if (flag) prefix1[prefix0[tid] - 1] = tid; //with tidś whose leaves have to be opened
__syncthreads(); //thread barrier, make sure all warps completed the job
// step 2
#ifdef OLDPREFIX
int n_bodies = calc_prefix<DIM2>(prefix0, tid, nbody);
#else
inclusive_scan_block<ADDOP<int>, int>(prefix0, nbody, tid); // inclusive scan to compute memory offset for each body
int n_bodies = prefix0[blockDim.x - 1]; //Total number of bides extract from the leaves
__syncthreads(); // thread barrier to make sure that warps completed their jobs
#endif
direct [tid] = prefix0[tid]; //Store a copy of inclusive scan in direct
prefix0[tid] -= nbody; //convert inclusive int oexclusive scan
prefix0[tid] += 1; //add unity, since later prefix0[tid] == 0 used to check barrier
int nl_pre = 0; //Number of leaves that have already been processed
#define NJMAX (DIM*4)
while (n_bodies > 0) {
int nb = min(n_bodies, NJMAX - n_direct); //Make sure number of bides to be extracted does not exceed
//the amount of allocated shared memory
// step 0 //nullify part of the body_list that will be filled with bodies
for (int i = n_direct; i < n_direct + nb; i += DIM){ //from the leaves that are being processed
body_list[i + tid] = 0;
}
__syncthreads();
//step 1:
if (flag && (direct[tid] <= nb) && (prefix0[tid] > 0)) //make sure that the thread indeed carries a leaf
body_list[n_direct + prefix0[tid] - 1] = 1; //whose bodies will be extracted
__syncthreads();
//step 2:
#ifdef OLDPREFIX
int nl = calc_prefix<DIM2>(nb, &body_list[n_direct], tid);
#else
int nl = inclusive_scan_array<ADDOP<int>, int> // inclusive scan to compute number of leaves to process
(&body_list[n_direct], nb, tid); // to make sure that there is enough shared memory for bodies
#endif
nb = direct[prefix1[nl_pre + nl - 1]]; // number of bodies stored in these leaves
// step 3:
for (int i = n_direct; i < n_direct + nb; i += DIM) { //segmented fill of the body_list
int j = prefix1[nl_pre + body_list[i + tid] - 1]; // compute the first body in shared j-body array
body_list[i + tid] = (i + tid - n_direct) - //add to the index of the first j-body in a child
(prefix0[j] - 1) + sh_body[j]; //the index of the first child in body_list array
}
__syncthreads();
/**************************************************
* example of what is accomplished in steps 0-4 *
* --------------------------- *
* step 0: body_list = 000000000000000000000 *
* step 1: body_list = 100010001000000100100 *
* step 2: body_list = 111122223333333444555 *
* step 3: body_list = 012301230123456012012 *
* assuming that sh_body[j] = 0 *
***************************************************/
n_bodies -= nb; //subtract from n_bodies number of bodies that have been extracted
nl_pre += nl; //increase the number of leaves that where processed
direct [tid] -= nb; //subtract the number of extracted bodies in this pass
prefix0[tid] = max(prefix0[tid] - nb, 0); //same here, but do not let the number be negative (GT200 bug!?)
n_direct += nb; //increase the number of bodies to be procssed
while(n_direct >= DIM) {
n_direct -= DIM;
float4 posj = body_pos[body_list[n_direct + tid]];
// float4 posj = tex1Dfetch(texBody, body_list[n_direct + tid]);
sh_mass[tid] = posj.w;
sh_pos [tid] = (float3){posj.x, posj.y, posj.z};
sh_jid [tid] = body_list[n_direct + tid];
#ifdef INDSOFT
float temp = sh_eps [tid]; //Store the value from Shmem into a register
sh_eps [tid] = body_vel[body_list[n_direct + tid]].w; //Load the softening
#endif
__syncthreads();
#pragma unroll
for (int j = 0; j < DIMx; j++)
{
int selfGrav = (body_i != sh_jid[offs + j]);
// if (body_i != sh_jid[offs + j]) //If statement replaced by multiplication
{
float ds2 = 1.0e10f;
direCount++;
#ifdef DOGRAV
#ifdef INDSOFT
acc_i = add_acc(acc_i, pos_i, sh_mass[offs + j], sh_pos[offs + j], sh_eps[offs + j], ds2, eps2, selfGrav);
#else
acc_i = add_acc(acc_i, pos_i, sh_mass[offs + j], sh_pos[offs + j], ds2, eps2, selfGrav);
#endif
#endif
ds2 += (!selfGrav)*1.0e10f;
if (ds2 < ds2_min) {
ngb = sh_jid[offs + j];
ds2_min = ds2;
}
}
}
#ifdef INDSOFT
//Restore the shmem value after all threads have used it
__syncthreads();
sh_eps[tid] = temp;
#endif
__syncthreads();
}
}
direct[tid] = body_list[tid];
__syncthreads();
#endif
} //end lvl
n_nodes1 += n_offset;
if (n_offset > 0)
{
nstack[ACCS<SHIFT>(n_stack1)] = nodes[tid]; n_stack1++;
if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT))
{
//We overwrote our current stack
apprCount = -1; return acc_i;
}
}
__syncthreads();
/***
**** --> copy nodes1 to nodes0: done by reassigning the pointers
***/
n_nodes0 = n_nodes1;
n_stack_pre = n_stack0;
n_stack0 = n_stack1;
}//end while levels
}//end for
if(n_approx > 0)
{
#ifdef INDSOFT
float temp = node_eps[tid];
#endif
if (tid < n_approx) {
int address = (approx[tid] << 1) + approx[tid];
#ifndef TEXTURES
float4 monopole = multipole_data[address ];
float4 octopole0 = multipole_data[address + 1];
float4 octopole1 = multipole_data[address + 2];
#else
float4 monopole = tex1Dfetch(texMultipole, address);
float4 octopole0 = tex1Dfetch(texMultipole, address + 1);
float4 octopole1 = tex1Dfetch(texMultipole, address + 2);
#endif
node_mon0[tid] = monopole.w;
node_mon1[tid] = (float3){monopole.x, monopole.y, monopole.z};
node_oct0[tid] = (float3){octopole0.x, octopole0.y, octopole0.z};
node_oct1[tid] = (float3){octopole1.x, octopole1.y, octopole1.z};
#ifdef INDSOFT
node_eps[tid] = octopole0.w;
#endif
} else {
//Set non-active memory locations to zero
node_mon0[tid] = 0.0f;
node_mon1[tid] = (float3){1.0e10f, 1.0e10f, 1.0e10f};
node_oct0[tid] = (float3){0.0f, 0.0f, 0.0f};
node_oct1[tid] = (float3){0.0f, 0.0f, 0.0f};
#ifdef INDSOFT
node_eps[tid] = 0.01f;
#endif
}
__syncthreads();
#pragma unroll
for (int i = 0; i < DIMx; i++)
{
apprCount++;
#ifdef DOGRAV
#ifdef INDSOFT
acc_i = add_acc(acc_i, pos_i,
node_mon0[offs + i], node_mon1[offs + i],
node_oct0[offs + i], node_oct1[offs + i], node_eps[offs + i], eps2);
#else
acc_i = add_acc(acc_i, pos_i,
node_mon0[offs + i], node_mon1[offs + i],
node_oct0[offs + i], node_oct1[offs + i], eps2);
#endif
#endif
}
__syncthreads();
#ifdef INDSOFT
node_eps[tid] = temp;
__syncthreads();
#endif
} //if n_approx > 0
if(n_direct > 0)
{
if (tid < n_direct) {
float4 posj = body_pos[direct[tid]];
// float4 posj = tex1Dfetch(texBody, direct[tid]);
sh_mass[tid] = posj.w;
sh_pos [tid] = (float3){posj.x, posj.y, posj.z};
sh_jid [tid] = direct[tid];
#ifdef INDSOFT
sh_eps [tid] = body_vel[direct[tid]].w;
#endif
} else {
sh_mass[tid] = 0.0f;
sh_pos [tid] = (float3){1.0e10f, 1.0e10f, 1.0e10f};
sh_jid [tid] = -1;
#ifdef INDSOFT
sh_eps [tid] = 0.01f;
#endif
}
__syncthreads();
#pragma unroll
for (int j = 0; j < DIMx; j++) {
if ((sh_jid[offs + j] >= 0)) {
int selfGrav = (body_i != sh_jid[offs + j]);
float ds2 = 1.0e10f;
direCount++;
#ifdef DOGRAV
#ifdef INDSOFT
acc_i = add_acc(acc_i, pos_i, sh_mass[offs + j], sh_pos[offs + j], sh_eps[offs + j], ds2, eps2, selfGrav);
#else
acc_i = add_acc(acc_i, pos_i, sh_mass[offs + j], sh_pos[offs + j], ds2, eps2, selfGrav);
#endif
#endif
ds2 += (!selfGrav)*1.0e10f;
if (ds2 < ds2_min) {
ngb = sh_jid[offs + j];
ds2_min = ds2;
}
}
}
__syncthreads();
}
/***
**** --> reduce data between threads
***/
sh_pot[tid] = acc_i.w;
sh_acc[tid] = (float3){acc_i.x, acc_i.y, acc_i.z};
sh_ds2[tid] = ds2_min;
sh_ngb[tid] = ngb;
__syncthreads();
if (ty == 0) {
#pragma unroll
for (int i = 1; i < DIMy; i++) {
int idx = (i << DIM2x) + tx;
acc_i.w += sh_pot[idx];
acc_i.x += sh_acc[idx].x;
acc_i.y += sh_acc[idx].y;
acc_i.z += sh_acc[idx].z;
if (sh_ds2[idx] < ds2_min) {
ds2_min = sh_ds2[idx];
ngb = sh_ngb[idx];
}
}
}
__syncthreads();
//Sum the interaction counters
sh_ds2[tid] = direCount;
sh_ngb[tid] = apprCount;
__syncthreads();
if (ty == 0) {
#pragma unroll
for (int i = 1; i < DIMy; i++){
int idx = (i << DIM2x) + tx;
direCount += sh_ds2[idx];
apprCount += sh_ngb[idx];
}
}
__syncthreads();
return acc_i;
}
extern "C" __global__ void
__launch_bounds__(NTHREAD)
dev_approximate_gravity(const int n_active_groups,
int n_bodies,
float eps2,
uint2 node_begend,
int *active_groups,
real4 *body_pos,
real4 *multipole_data,
float4 *acc_out,
int *ngb_out,
int *active_inout,
int2 *interactions,
float4 *boxSizeInfo,
float4 *groupSizeInfo,
float4 *boxCenterInfo,
float4 *groupCenterInfo,
real4 *body_vel,
int *MEM_BUF) {
// int grpOffset){
const int blockDim2 = NTHREAD2;
__shared__ int shmem[15*(1 << blockDim2)];
// __shared__ int shmem[24*(1 << blockDim2)]; is possible on FERMI
// int lmem[LMEM_STACK_SIZE];
/*********** check if this block is linked to a leaf **********/
int bid = gridDim.x * blockIdx.y + blockIdx.x;
while(true)
{
if(threadIdx.x == 0)
{
bid = atomicAdd(&active_inout[n_bodies], 1);
shmem[0] = bid;
}
__syncthreads();
bid = shmem[0];
if (bid >= n_active_groups) return;
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int grpOffset = 0;
// volatile int *lmem = &MEM_BUF[blockIdx.x*LMEM_STACK_SIZE*blockDim.x + threadIdx.x*LMEM_STACK_SIZE];
// int *lmem = &MEM_BUF[blockIdx.x*LMEM_STACK_SIZE*blockDim.x + threadIdx.x*LMEM_STACK_SIZE];
int *lmem = &MEM_BUF[blockIdx.x*LMEM_STACK_SIZE*blockDim.x];
/*********** set necessary thread constants **********/
real4 curGroupSize = groupSizeInfo[active_groups[bid + grpOffset]];
int groupData = __float_as_int(curGroupSize.w);
uint body_i = groupData & CRITMASK;
uint nb_i = ((groupData & INVCMASK) >> CRITBIT) + 1;
real4 group_pos = groupCenterInfo[active_groups[bid + grpOffset]];
// if(tid == 0)
// printf("[%f %f %f %f ] \n [%f %f %f %f ] %d %d \n",
// curGroupSize.x, curGroupSize.y, curGroupSize.z, curGroupSize.w,
// group_pos.x, group_pos.y, group_pos.z, group_pos.w, body_i, nb_i);
int DIM2x = 0;
while (((nb_i - 1) >> DIM2x) > 0) DIM2x++;
DIM2x = max(DIM2x,4);
int DIM2y = blockDim2 - DIM2x;
int tx = tid & ((1 << DIM2x) - 1);
int ty = tid >> DIM2x;
body_i += tx%nb_i;
//float4 pos_i = tex1Dfetch(bodies_pos_ref, body_i); // texture read: 4 floats
float4 pos_i = body_pos[body_i];
int ngb_i;
float4 acc_i = {0.0f, 0.0f, 0.0f, 0.0f};
#ifdef INDSOFT
eps2 = body_vel[body_i].w;
float group_eps = eps2;
volatile float *reduc = (float*) &shmem[0];
reduc[threadIdx.x] = eps2;
//Find the maximum softening value for the particles in this group
__syncthreads();
// do reduction in shared mem
if(blockDim.x >= 512) if (tid < 256) {reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 256]);} __syncthreads();
if(blockDim.x >= 256) if (tid < 128) {reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 128]);} __syncthreads();
if(blockDim.x >= 128) if (tid < 64) {reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 64]);} __syncthreads();
if(blockDim.x >= 64) if (tid < 32) { reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 32]);}
if(blockDim.x >= 32) if (tid < 16) { reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 16]);}
if(tid < 8)
{
reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 8]);
reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 4]);
reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 2]);
reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 1]);
}
__syncthreads();
group_eps = reduc[0];
#else
float group_eps = 0;
#endif
int apprCount = 0;
int direCount = 0;
acc_i = approximate_gravity<blockDim2, 0>( DIM2x, DIM2y, tid, tx, ty,
body_i, pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps, body_vel);
if(apprCount < 0)
{
//Try to get access to the big stack, only one block per time is allowed
if(threadIdx.x == 0)
{
int res = atomicExch(&active_inout[n_bodies+1], 1); //If the old value (res) is 0 we can go otherwise sleep
int waitCounter = 0;
while(res != 0)
{
//Sleep
for(int i=0; i < (1024); i++)
{
waitCounter += 1;
}
//Test again
shmem[0] = waitCounter;
res = atomicExch(&active_inout[n_bodies+1], 1);
}
}
__syncthreads();
lmem = &MEM_BUF[gridDim.x*LMEM_STACK_SIZE*blockDim.x]; //Use the extra large buffer
apprCount = direCount = 0;
acc_i = approximate_gravity<blockDim2, 8>( DIM2x, DIM2y, tid, tx, ty,
body_i, pos_i, group_pos,
eps2, node_begend,
multipole_data, body_pos,
shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo,
group_eps, body_vel);
lmem = &MEM_BUF[blockIdx.x*LMEM_STACK_SIZE*blockDim.x]; //Back to normal location
if(threadIdx.x == 0)
{
atomicExch(&active_inout[n_bodies+1], 0); //Release the lock
}
}//end if apprCount < 0
if (tid < nb_i) {
acc_out [body_i] = acc_i;
ngb_out [body_i] = ngb_i;
active_inout[body_i] = 1;
interactions[body_i].x = apprCount;
interactions[body_i].y = direCount ;
}
} //end while
} | the_stack |
namespace xlib {
namespace detail {
template<int WARP_SZ, typename T>
struct WarpSegReduceHelper;
template<int WARP_SZ>
struct WarpSegReduceHelper<WARP_SZ, int> {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(int& value, int max_lane) {
WARP_REDUCE_32BIT(add, s32, r, max_lane)
}
__device__ __forceinline__
static void min(int& value, int max_lane) {
WARP_REDUCE_32BIT(min, s32, r, max_lane)
}
__device__ __forceinline__
static void max(int& value, int max_lane) {
WARP_REDUCE_32BIT(max, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[2], int max_lane) {
WARP_REDUCE_GEN2(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[3], int max_lane) {
WARP_REDUCE_GEN3(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[4], int max_lane) {
WARP_REDUCE_GEN4(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[5], int max_lane) {
WARP_REDUCE_GEN5(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[6], int max_lane) {
WARP_REDUCE_GEN6(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[7], int max_lane) {
WARP_REDUCE_GEN7(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[8], int max_lane) {
WARP_REDUCE_GEN8(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[9], int max_lane) {
WARP_REDUCE_GEN9(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[10], int max_lane) {
WARP_REDUCE_GEN10(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[11], int max_lane) {
WARP_REDUCE_GEN11(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[12], int max_lane) {
WARP_REDUCE_GEN12(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[13], int max_lane) {
WARP_REDUCE_GEN13(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[14], int max_lane) {
WARP_REDUCE_GEN14(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[15], int max_lane) {
WARP_REDUCE_GEN15(add, s32, r, max_lane)
}
__device__ __forceinline__
static void add(int (&value)[16], int max_lane) {
WARP_REDUCE_GEN16(add, s32, r, max_lane)
}
};
//------------------------------------------------------------------------------
template<int WARP_SZ>
struct WarpSegReduceHelper<WARP_SZ, unsigned> {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(unsigned& value, int max_lane) {
WARP_REDUCE_32BIT(add, u32, r, max_lane)
}
__device__ __forceinline__
static void min(unsigned& value, int max_lane) {
WARP_REDUCE_32BIT(min, u32, r, max_lane)
}
__device__ __forceinline__
static void max(unsigned& value, int max_lane) {
WARP_REDUCE_32BIT(max, u32, r, max_lane)
}
};
//------------------------------------------------------------------------------
template<int WARP_SZ>
struct WarpSegReduceHelper<WARP_SZ, float> {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(float& value, int max_lane) {
WARP_REDUCE_32BIT(add, f32, f, max_lane)
}
__device__ __forceinline__
static void min(float& value, int max_lane) {
WARP_REDUCE_32BIT(min, f32, f, max_lane)
}
__device__ __forceinline__
static void max(float& value, int max_lane) {
WARP_REDUCE_32BIT(max, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[2], int max_lane) {
WARP_REDUCE_GEN2(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[3], int max_lane) {
WARP_REDUCE_GEN3(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[4], int max_lane) {
WARP_REDUCE_GEN4(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[5], int max_lane) {
WARP_REDUCE_GEN5(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[6], int max_lane) {
WARP_REDUCE_GEN6(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[7], int max_lane) {
WARP_REDUCE_GEN7(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[8], int max_lane) {
WARP_REDUCE_GEN8(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[9], int max_lane) {
WARP_REDUCE_GEN9(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[10], int max_lane) {
WARP_REDUCE_GEN10(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[11], int max_lane) {
WARP_REDUCE_GEN11(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[12], int max_lane) {
WARP_REDUCE_GEN12(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[13], int max_lane) {
WARP_REDUCE_GEN13(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[14], int max_lane) {
WARP_REDUCE_GEN14(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[15], int max_lane) {
WARP_REDUCE_GEN15(add, f32, f, max_lane)
}
__device__ __forceinline__
static void add(float (&value)[16], int max_lane) {
WARP_REDUCE_GEN16(add, f32, f, max_lane)
}
};
//------------------------------------------------------------------------------
template<int WARP_SZ>
struct WarpSegReduceHelper<WARP_SZ, double> {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(double& value, int max_lane) {
WARP_REDUCE_64BIT(add, f64, d, max_lane)
}
__device__ __forceinline__
static void min(double& value, int max_lane) {
WARP_REDUCE_64BIT(min, f64, d, max_lane)
}
__device__ __forceinline__
static void max(double& value, int max_lane) {
WARP_REDUCE_64BIT(max, f64, d, max_lane)
}
};
//------------------------------------------------------------------------------
template<int WARP_SZ>
struct WarpSegReduceHelper<WARP_SZ, int64_t> {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(int64_t& value, int max_lane) {
WARP_REDUCE_64BIT(add, s64, l, max_lane)
}
__device__ __forceinline__
static void min(int64_t& value, int max_lane) {
WARP_REDUCE_64BIT(min, s64, l, max_lane)
}
__device__ __forceinline__
static void max(int64_t& value, int max_lane) {
WARP_REDUCE_64BIT(max, s64, l, max_lane)
}
};
//------------------------------------------------------------------------------
template<int WARP_SZ>
struct WarpSegReduceHelper<WARP_SZ, uint64_t> {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(uint64_t& value, int max_lane) {
WARP_REDUCE_64BIT(add, u64, l, max_lane)
}
__device__ __forceinline__
static void min(uint64_t& value, int max_lane) {
WARP_REDUCE_64BIT(min, u64, l, max_lane)
}
__device__ __forceinline__
static void max(uint64_t& value, int max_lane) {
WARP_REDUCE_64BIT(max, u64, l, max_lane)
}
};
#undef WARP_REDUCE_32BIT
#undef WARP_REDUCE_64BIT
#undef WARP_REDUCE_GEN1
#undef WARP_REDUCE_GEN2
#undef WARP_REDUCE_GEN3
#undef WARP_REDUCE_GEN4
#undef WARP_REDUCE_GEN5
#undef WARP_REDUCE_GEN6
#undef WARP_REDUCE_GEN7
#undef WARP_REDUCE_GEN8
#undef WARP_REDUCE_GEN9
#undef WARP_REDUCE_GEN10
#undef WARP_REDUCE_GEN11
#undef WARP_REDUCE_GEN12
#undef WARP_REDUCE_GEN13
#undef WARP_REDUCE_GEN14
#undef WARP_REDUCE_GEN15
#undef WARP_REDUCE_GEN16
} // namespace detail
//==============================================================================
//==============================================================================
template<int WARP_SZ>
template<typename T>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>::add(T& value, int max_lane) {
detail::WarpSegReduceHelper<WARP_SZ, T>::add(value, max_lane);
}
template<int WARP_SZ>
template<typename T, int SIZE>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>::add(T (&value)[SIZE], int max_lane) {
detail::WarpSegReduceHelper<WARP_SZ, T>::add(value, max_lane);
}
template<int WARP_SZ>
template<typename T>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>::min(T& value, int max_lane) {
detail::WarpSegReduceHelper<WARP_SZ, T>::min(value, max_lane);
}
template<int WARP_SZ>
template<typename T>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>::max(T& value, int max_lane) {
detail::WarpSegReduceHelper<WARP_SZ, T>::max(value, max_lane);
}
//==============================================================================
template<int WARP_SZ>
template<typename T, typename R>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>::add(const T& value, R* pointer,
bool pivot, int max_lane) {
detail::WarpSegReduceHelper<WARP_SZ, T>::add(value, max_lane);
if (pivot)
*pointer = value;
}
template<int WARP_SZ>
template<typename T, typename R>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>::min(const T& value, R* pointer,
bool pivot, int max_lane) {
detail::WarpSegReduceHelper<WARP_SZ, T>::min(value, max_lane);
if (pivot)
*pointer = value;
}
template<int WARP_SZ>
template<typename T, typename R>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>::max(const T& value, R* pointer,
bool pivot, int max_lane) {
detail::WarpSegReduceHelper<WARP_SZ, T>::max(value, max_lane);
if (pivot)
*pointer = value;
}
//==============================================================================
template<int WARP_SZ>
template<typename T, typename R>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>
::atomicAdd(const T& value, R* pointer, bool pivot, int max_lane) {
auto value_tmp = value;
WarpSegmentedReduce<WARP_SZ>::add(value_tmp, max_lane);
if (pivot) {
if (lane_id() != 0 && max_lane == xlib::WARP_SIZE)
*pointer = value_tmp;
else
xlib::atomic::add(value_tmp, pointer);
}
}
template<int WARP_SZ>
template<typename T, typename R>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>
::atomicMin(const T& value, R* pointer, bool pivot, int max_lane) {
auto value_tmp = value;
WarpSegmentedReduce::min(value_tmp, max_lane);
if (pivot) {
if (lane_id() != 0 && max_lane == xlib::WARP_SIZE)
*pointer = value_tmp;
else
xlib::atomic::min(value_tmp, pointer);
}
}
template<int WARP_SZ>
template<typename T, typename R>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>
::atomicMax(const T& value, R* pointer, bool pivot, int max_lane) {
auto value_tmp = value;
WarpSegmentedReduce::max(value_tmp, max_lane);
if (pivot) {
if (lane_id() != 0 && max_lane == xlib::WARP_SIZE - 1)
*pointer = value_tmp;
else
xlib::atomic::max(value_tmp, pointer);
}
}
//==============================================================================
//==============================================================================
template<int WARP_SZ>
template<typename T>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>::mask_add(T& value, unsigned mask) {
int max_lane = xlib::max_lane<WARP_SZ>(mask);
detail::WarpSegReduceHelper<WARP_SZ, T>::add(value, max_lane);
}
template<int WARP_SZ>
template<typename T>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>::mask_min(T& value, unsigned mask) {
int max_lane = xlib::max_lane<WARP_SZ>(mask);
detail::WarpSegReduceHelper<WARP_SZ, T>::min(value, max_lane);
}
template<int WARP_SZ>
template<typename T>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>::mask_max(T& value, unsigned mask) {
int max_lane = xlib::max_lane<WARP_SZ>(mask);
detail::WarpSegReduceHelper<WARP_SZ, T>::max(value, max_lane);
}
//==============================================================================
template<int WARP_SZ>
template<typename T, typename R>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>
::atomicAdd(const T& value, R* pointer, unsigned mask) {
auto value_tmp = value;
int max_lane = xlib::max_lane<WARP_SZ>(mask);
WarpSegmentedReduce<WARP_SZ>::add(value_tmp, max_lane);
if (lanemask_eq() & mask) {
if (lane_id() != 0 && lanemask_gt() & mask)
*pointer = value_tmp;
else
xlib::atomic::add(value_tmp, pointer);
}
}
template<int WARP_SZ>
template<typename T, typename R>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>
::atomicMin(const T& value, R* pointer, unsigned mask) {
auto value_tmp = value;
int max_lane = xlib::max_lane<WARP_SZ>(mask);
WarpSegmentedReduce::min(value_tmp, max_lane);
if (lanemask_eq() & mask) {
if (lane_id() != 0 && lanemask_gt() & mask)
*pointer = value_tmp;
else
xlib::atomic::min(value_tmp, pointer);
}
}
template<int WARP_SZ>
template<typename T, typename R>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>
::atomicMax(const T& value, R* pointer, unsigned mask) {
auto value_tmp = value;
int max_lane = xlib::max_lane<WARP_SZ>(mask);
WarpSegmentedReduce::max(value_tmp, max_lane);
if (lanemask_eq() & mask) {
if (lane_id() != 0 && lanemask_gt() & mask)
*pointer = value_tmp;
else
xlib::atomic::max(value_tmp, pointer);
}
}
//==============================================================================
//==============================================================================
template<int WARP_SZ>
template<typename T, typename R>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>
::atomicAdd(const T& value, R* pointer, bool normal_store, bool atomic_store,
int max_lane) {
auto value_tmp = value;
WarpSegmentedReduce<WARP_SZ>::add(value_tmp, max_lane);
if (normal_store)
*pointer = value_tmp;
else if (atomic_store)
xlib::atomic::add(value_tmp, pointer);
}
template<int WARP_SZ>
template<typename T>
__device__ __forceinline__
void WarpSegmentedReduce<WARP_SZ>
::conditional_add(T& left, T& right, int predicate, int max_lane) {
const unsigned member_mask = xlib::member_mask<WARP_SZ>();
_Pragma("unroll")
for (int STEP = 0; STEP < xlib::Log2<WARP_SZ>::value; STEP++) {
asm(
"{\n\t\t"
".reg .s32 r1;\n\t\t"
".reg .pred p, q, s;\n\t\t"
"shfl.sync.down.b32 r1|p, %0, %2, %3, %4;\n\t\t"
"setp.ne.and.b32 s|q, %5, 0, p;\n\t\t"
"@s add.s32 %1, r1, %1;\n\t\t"
"@q add.s32 %0, r1, %0;\n\t"
"}"
: "+r"(left), "+r"(right) : "r"(1 << STEP),
"r"(max_lane), "r"(member_mask), "r"(predicate));
/*int tmp = __shfl_down_sync(member_mask, left, 1 << STEP);
if (xlib::lane_id() + (1 << STEP) <= max_lane) {
if (predicate)
right += tmp;
else
left += tmp;
}*/
}
}
} // namespace xlib | the_stack |
#include <cuda.h>
#include <cuda_runtime.h>
#include <helpers/DebugHelper.h>
#include <helpers/TAD.h>
#include <helpers/shape.h>
#include <loops/summarystatsreduce.h>
#include <ops/specials_cuda.h>
#include <system/Environment.h>
#include <system/op_boilerplate.h>
#include <types/float16.h>
#include <types/types.h>
using namespace simdOps;
namespace functions {
namespace summarystats {
template <typename X, typename Z>
void SD_KERNEL summaryStatsReduceT(int op, void const* dx, sd::LongType const* xShapeInfo, int xRank, void* extraParams,
void* z, sd::LongType const* zShapeInfo, int zRank, int* dimension,
int dimensionLength, int postProcessOrNot, bool biasCorrected, int* allocationBuffer,
void* reductionBuffer, sd::LongType const* tadOnlyShapeInfo,
sd::LongType const* tadOffsets) {
functions::summarystats::SummaryStatsReduce<X, Z>::transform(
op, dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, biasCorrected, allocationBuffer,
reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
/**
*
* @param sPartialsRef
* @param tid
* @param extraParams
*/
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void SummaryStatsReduce<X, Z>::aggregatePartials(SummaryStatsData<X>* sPartials, sd::LongType tid,
sd::LongType numElements, void* vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto extraParams = static_cast<Z*>(vextraParams);
sd::LongType floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
SummaryStatsData<X> prev = sPartials[tid - floorPow2];
SummaryStatsData<X> curr = sPartials[tid];
sPartials[tid - floorPow2] = update(prev, curr, extraParams);
}
__syncthreads();
}
for (sd::LongType activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
SummaryStatsData<X> curr = sPartials[tid];
SummaryStatsData<X> next = sPartials[tid + activeThreads];
sPartials[tid] = update(curr, next, extraParams);
}
__syncthreads();
}
};
/**
* @param n n is the number of
* elements to loop through
* @param dx the data to operate on
* @param xVectorInfo the meta data for the vector:
* 0 is the offset
* 1 is the increment/stride
* 2 is the real length of the buffer (n and dx.length won't always be the same)
* 3 is the element wise stride for the buffer
* 4 is the number of elements it takes to get to the next row/column/tensor
* @param gpuInformation
* 0 is the block size
* 1 is the grid size
* 2 is the shared memory size
* @param problemDefinition
* 0 is the number of elements per vector
* 1 is the number of vectors
*/
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void SummaryStatsReduce<X, Z>::transform(void const* vx, sd::LongType const* xShapeInfo, void* vextraParams,
void* vz, sd::LongType const* zShapeInfo, int* dimension,
int dimensionLength, int postProcessOrNot, int* allocationBuffer,
void* vreductionBuffer, sd::LongType const* tadOnlyShapeInfo,
sd::LongType const* tadOffsets) {
auto dx = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
auto reductionBuffer = static_cast<Z*>(vreductionBuffer);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
__shared__ int xElementWiseStride;
int numElements = blockDim.x;
// shared memory space for storing intermediate results
__shared__ SummaryStatsData<X> sPartials[SD_CUDA_BLOCK_SIZE];
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
// length for the tad
__shared__ volatile int xLength;
__shared__ volatile int resultLength;
SummaryStatsData<X> reduction;
reduction.initWithValue(0.0);
reduction.n = 0;
if (threadIdx.x == 0) {
if (zShapeInfo != nullptr)
resultLength = shape::length(zShapeInfo);
else
resultLength = 1;
if (dimensionLength == 1) {
if (resultLength == 1 && (dimension == nullptr || dimension[0] == SD_MAX_DIMENSION))
resultScalar = 1;
else
resultScalar = 0;
} else
resultScalar = 0;
if (resultLength == 1) resultScalar = 1;
auto xStride = shape::stride(xShapeInfo);
auto xOrder = shape::order(xShapeInfo);
if (dimension != nullptr && (dimension[0] != SD_MAX_DIMENSION && dimensionLength == 1)) {
xElementWiseStride = xStride[dimension[0]];
} else {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
}
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ int tadLength;
__shared__ int tadEWS;
__shared__ int numTads;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo); // shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
}
__syncthreads();
if (tadEWS == 0) {
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[r];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[xOffset]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]);
}
__syncthreads();
}
} else {
for (int i = blockIdx.x; i < numTads; i += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[i];
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int x = threadIdx.x; x < tadLength; x += blockDim.x) {
auto indexX = tadOffsetForBlock + x * tadEWS;
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[indexX]);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[i] = OpType::getValue(postProcessOrNot,
sPartials[threadIdx.x]); // postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
} else if (resultScalar) {
__shared__ int n;
if (threadIdx.x == 0) {
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
n = shape::length(xShapeInfo);
}
__syncthreads();
if (xElementWiseStride >= 1) {
for (sd::LongType i = tid; i < n; i += (blockDim.x * gridDim.x)) {
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[i * xElementWiseStride]);
reduction = update(reduction, indexVal2, extraParams);
}
} else {
for (sd::LongType i = tid; i < n; i += blockDim.x * gridDim.x) {
auto offset = shape::getIndexOffset(i, xShapeInfo);
SummaryStatsData<X> indexVal2;
indexVal2.initWithValue(dx[offset]);
reduction = update(reduction, indexVal2, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, blockDim.x, extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int* tc = (unsigned int*)reductionBuffer;
tid = threadIdx.x;
if (threadIdx.x == 0) {
SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*)reductionBuffer;
pBuffer[blockIdx.x] = sPartials[0];
}
__threadfence();
__syncthreads();
if (tid == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*)reductionBuffer;
Z startingVal = startingValue(dx);
SummaryStatsData<X> val;
val.initWithValue(startingVal);
val.n = 0;
sPartials[threadIdx.x] = val;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, gridDim.x, extraParams);
__syncthreads();
if (tid == 0) {
z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
} else {
if (tid == 0) {
unsigned int* tc = (unsigned*)reductionBuffer;
tc[16384] = 0;
z[0] = z[0] = OpType::getValue(postProcessOrNot, sPartials[0]);
}
}
}
};
template <typename X, typename Y>
SD_DEVICE void SummaryStatsReduce<X, Y>::transform(const int opNum, void const* dx, sd::LongType const* xShapeInfo,
void* extraParams, void* z, sd::LongType const* zShapeInfo,
int* dimension, int dimensionLength, int postProcessOrNot,
int* allocationBuffer, void* reductionBuffer,
sd::LongType const* tadOnlyShapeInfo,
sd::LongType const* tadOffsets) {
DISPATCH_BY_OPNUM_TT(transform,
PARAMS(dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, postProcessOrNot,
allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets),
SUMMARY_STATS_OPS);
};
template <typename X, typename Z>
SD_HOST void SummaryStatsReduce<X, Z>::execSummaryStatsReduceScalar(
dim3& launchDims, cudaStream_t* stream, int opNum, void const* vx, sd::LongType const* xShapeInfo,
sd::LongType const* hxShapeInfo, void* vextraParams, void* vz, sd::LongType const* zShapeInfo,
sd::LongType const* hzShapeInfo, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets,
bool biasCorrected, void* reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto extraParams = static_cast<Z*>(vextraParams);
auto z = reinterpret_cast<Z*>(vz);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("D16 opNum:[%i]\n", opNum);
summaryStatsReduceT<X, Z><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1,
1, biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
// this is blocking method since method should return scalar
sd::DebugHelper::checkErrorCode(stream, "execSSReduceScalar(...) failed");
}
template <typename X, typename Z>
SD_HOST void SummaryStatsReduce<X, Z>::execSummaryStatsReduce(
dim3& launchDims, cudaStream_t* stream, int opNum, void const* vx, sd::LongType const* xShapeInfo,
sd::LongType const* hxShapeInfo, void* vextraParams, void* vz, sd::LongType const* zShapeInfo,
sd::LongType const* hzShapeInfo, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets,
bool biasCorrected, void* reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("F17 opNum:[%i]\n", opNum);
auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer);
summaryStatsReduceT<X, Z><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1,
1, biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
template <typename X, typename Z>
SD_HOST void SummaryStatsReduce<X, Z>::execSummaryStatsReduce(
dim3& launchDims, cudaStream_t* stream, int opNum, void const* vx, sd::LongType const* xShapeInfo,
sd::LongType const* hxShapeInfo, void* vextraParams, void* vz, sd::LongType const* zShapeInfo,
sd::LongType const* hzShapeInfo, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo,
sd::LongType const* tadOffsets, bool biasCorrected, void* reductionBuffer) {
auto x = static_cast<X const*>(vx);
auto z = static_cast<Z*>(vz);
auto extraParams = static_cast<Z*>(vextraParams);
if (sd::Environment::getInstance().isDebugAndVerbose()) printf("D18 opNum:[%i]\n", opNum);
summaryStatsReduceT<X, Z><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), dimension,
dimensionLength, 1, biasCorrected, nullptr, reinterpret_cast<Z*>(reductionBuffer), tadShapeInfo, tadOffsets);
DEBUG_KERNEL(stream, opNum);
}
BUILD_DOUBLE_TEMPLATE(template class SummaryStatsReduce, , SD_COMMON_TYPES, SD_FLOAT_TYPES);
} // namespace summarystats
} // namespace functions | the_stack |
* COMPILATION TIP
* nvcc main_draft1.cu ../grid2d/grid2d.cu ../grid2d/sysparam.cu ../dynam/XORMRGgens.cu ../dynam/metropolis.cu ../common/gridsetup.cu -o main
*
* */
#include "../grid2d/grid2d.h" // Spins2d (struct)
#include "../grid2d/sysparam.h" // Sysparam, Avg, TransProb, Sysparam_ptr, Avg_ptr, TransProb_ptr, constTransProb
#include "../dynam/metropolis.h" // initialize_allup
#include "../common/gridsetup.h" // get_maxGridSize()
#include <iostream>
#include <chrono>
__device__ int calcintDeltaE(int* temp, const unsigned int S_x, const unsigned int S_y,
const unsigned int s_x, const unsigned int s_y, const int RAD)
{
// int resultDeltaE =0;
// int stencilindex_x = 0; // stencil index in x-direction
// int stencilindex_y = 0; // stencil index in y-direction
// stencilindex_x = s_x ; // = 1,...S_x-1 = (M_x+1)-1 = M_x
// stencilindex_y = s_y ;
// actual calculation of Delta E
/* resultDeltaE = 2 * temp[ stencilindex_x + stencilindex_y * S_x] *
(temp[ stencilindex_x + 1 + stencilindex_y * S_x]
+ temp[ stencilindex_x + (stencilindex_y + 1)*S_x]
+ temp[ stencilindex_x - 1 + stencilindex_y * S_x]
+ temp[ stencilindex_x + (stencilindex_y - 1)*S_x] );
* */
int resultDeltaE = 2 * temp[ s_x + s_y * S_x] *
(temp[ s_x + 1 + s_y * S_x]
+ temp[ s_x + (s_y + 1)*S_x]
+ temp[ s_x - 1 + s_y * S_x]
+ temp[ s_x + (s_y - 1)*S_x] );
return resultDeltaE;
};
__device__ Sysparam spinflips(cg::thread_group & tg, int* Sptr, float * transprob,
int* temp, size_t Lx, size_t Ly, const float J, curandState *state)
{
Sysparam results_sysparams { 0.f, 0.f, 0.f };
const int RAD = 1; // "radius" of "halo" cells, of width 1 (in this case)
// old way of thread, block indexing
unsigned int k_x = threadIdx.x + blockDim.x * blockIdx.x ;
unsigned int k_y = threadIdx.y + blockDim.y * blockIdx.y ;
unsigned int S_x = static_cast<int>(blockDim.x + 2*RAD);
unsigned int S_y = static_cast<int>(blockDim.y + 2*RAD);
unsigned int s_x = threadIdx.x + RAD; // s_x = 1,2,...S_x-2
unsigned int s_y = threadIdx.y + RAD; // s_y = 1,2,...S_y-2
// use these loops to account for elements not "covered" by the threads in grid that's launched
for (unsigned int l_y=k_y,idxy=0; l_y < Ly; idxy++, l_y += blockDim.y *gridDim.y) {
for (unsigned int l_x=k_x, idxx=0; l_x < Lx; idxx++, l_x += blockDim.x*gridDim.x ) {
int lx =0; // lx gives back global index on lattice grid of spins
int ly =0; // ly gives back global index on lattice grid of spins
/* 0, M_x
* 1
* ...
* M_x-1
* */
for (int i = threadIdx.x; i<S_x; i+=static_cast<int>(blockDim.x) ) {
for (int j = threadIdx.y; j <S_y; j+= static_cast<int>(blockDim.y) ) {
lx = i + static_cast<int>(blockDim.x*blockIdx.x);
ly = j + static_cast<int>(blockDim.y*blockIdx.y);
/* lx+idxx*gridDim.x*blockDim.x, idxx=0,1,.. to how many multiples of gridDim.x*blockDim.x for
* multiples of thread grids to "cover" our lattice grid of spins.
* (lx+idxx*gridDim.x*blockDim.x)%Lx because we want periodic boundary conditions
* I try to future proof this by using inline function periodic
* */
temp[i+j*S_x] =
static_cast<float>(
Sptr[ (lx+idxx*gridDim.x*blockDim.x) % Lx +
blockDim.x * gridDim.x * ( (ly + idxy*gridDim.y*blockDim.y) % Ly ) ] );
}
}
if ( l_x >= Lx || l_y >= Ly) {
return results_sysparams;
}
tg.sync();
// global index k
size_t k = l_x + gridDim.x*blockDim.x * l_y;
/* Copy state to local memory for efficiency */
curandState localState = state[k];
// so-called "checkerboard" - a "checkerboard" pattern is necessitated because the
// change in energy Delta E is dependent upon nearest neighbors, the stencil operation,
// the energy at present time t. This is unlike say the finite difference method approximating
// partial differentiation equations where we can say the new update is dependent upon values at previous time steps
// if tg.thread_rank() even
if ( ( tg.thread_rank() % 2) == 0)
{
// effectively, pick a random spin
// use uniform distribution
if ( curand_uniform(&localState) < (1.f/ ((float) Lx*Ly)) )
{
// do the nearest neighbor (unique) pair of spins summation entirely in shared memory
int intdeltaE = calcintDeltaE(temp, S_x,S_y,s_x,s_y,RAD);
// roll dice, see if we transition or not, given transprob
if ( curand_uniform(&localState) <= transprob[intdeltaE +8] )
{
// instead of loading entire thread block + halo cells to shared memory again, just make single change
// Accept!
Sptr[ k] = temp[ s_x + s_y * S_x] *= -1; // flip 1 spin and accept new spin config
// atomicAdd(&(sysparams->E), ((float) intdeltaE) * J );
// atomicAdd(&(sysparams->M), 2.f*((float) temp[s_x+s_y*S_x]) );
results_sysparams.E += ((float) intdeltaE) * J ;
results_sysparams.M += 2.f*((float) temp[s_x+s_y*S_x]) ;
}
}
}
tg.sync();
// if tg.thread_rank() odd
if ( (tg.thread_rank() % 2) == 1)
{
if (curand_uniform(&localState) < (1.f/ ((float) Lx*Ly)) )
{
// do the nearest neighbor (unique) pair of spins summation entirely in shared memory
int intdeltaE = calcintDeltaE(temp, S_x,S_y,s_x,s_y,RAD);
// roll dice, see if we transition or not, given transprob
if ( curand_uniform(&localState) <= transprob[intdeltaE +8] )
{
// Accept!
Sptr[ k] = temp[ s_x + s_y * S_x] *= -1; // flip 1 spin and accept new spin config
results_sysparams.E += ((float) intdeltaE) * J ;
results_sysparams.M += 2.f*((float) temp[s_x+s_y*S_x]) ;
}
}
}
}
} // END of loops to make threads do "double duty" to cover other elements in our spin lattice grid that wasn't "covered" by our thread grid
return results_sysparams ;
};
__global__ void metropolis_kernel(int* Sptr, Sysparam* sysparams, float* transprob, size_t Lx, size_t Ly, const float J,
curandState *state)
{
extern __shared__ int temp[];
auto ttb = cg::this_thread_block();
// old way
// unsigned int j = blockIdx.x + gridDim.x * blockIdx.y; // block j out of N_x*N_y total number of (thread) blocks
dim3 ttb_gidx = ttb.group_index();
unsigned int j = ttb_gidx.x + ttb_gidx.y * gridDim.x;
// if j is even, 0, 2, ... < N_x*N_y
if ((j % 2) ==0)
{
Sysparam spinflipresults = spinflips(ttb, Sptr, transprob, temp, Lx,Ly,J, state);
atomicAdd(&(sysparams->E), spinflipresults.E );
atomicAdd(&(sysparams->M), spinflipresults.M );
}
if ((j % 2) != 0) {
Sysparam spinflipresults = spinflips(ttb, Sptr, transprob, temp, Lx,Ly,J, state);
atomicAdd(&(sysparams->E), spinflipresults.E );
atomicAdd(&(sysparams->M), spinflipresults.M );
}
// ttb
};
/**
* @fn metropolis
* @brief "driver" function for Metropolis algorithm, single-spin flip scheme for 2-dim. Ising model
* */
void metropolis(Spins2d& spins2d, Sysparam_ptr& sysParams,TransProb_ptr& transProbs,
const std::array<int,3> MAXGRIDSIZES,const dim3 M_is, devStatesXOR & devStates, const unsigned int trials) {
size_t Lx = spins2d.L_is[0]; // total number of spins of system
size_t Ly = spins2d.L_is[1]; // total number of spins of system
const float J = spins2d.J;
unsigned int RAD = 1; // "radius" or width of "halo" cells needed
/* ========== (thread) grid,block dims ========== */
unsigned long MAX_BLOCKS_y = (MAXGRIDSIZES[1] + M_is.y - 1)/ M_is.y;
unsigned int N_y = std::min( MAX_BLOCKS_y, ((Ly + M_is.y - 1)/ M_is.y));
// notice how we're only launching 1/4 of Ly threads in y-direction needed
unsigned int N_y_4th = std::min( MAX_BLOCKS_y, ((Ly/4 + M_is.y - 1)/ M_is.y));
unsigned long MAX_BLOCKS_x = (MAXGRIDSIZES[0] + M_is.x - 1)/ M_is.x;
unsigned int N_x = std::min( MAX_BLOCKS_x, ((Lx + M_is.x - 1)/ M_is.x));
// notice how we're only launching 1/4 of Lx threads in x-direction needed
unsigned int N_x_4th = std::min( MAX_BLOCKS_x, ((Lx/4 + M_is.x - 1)/ M_is.x));
dim3 N_is { N_x,N_y };
dim3 N_is_4th { N_x_4th,N_y_4th }; // single (thread) block dims., i.e. number of threads in a single (thread) block
int sharedBytes = (M_is.x+2*RAD)*(M_is.y + 2*RAD)* sizeof(int);
/* ========== END of (thread) grid,block dims ========== */
metropolis_kernel<<< N_is,M_is,sharedBytes>>>( spins2d.S.get(), sysParams.d_sysparams.get(),
(transProbs.d_transProb.get()->transProb).data(), Lx,Ly, J, devStates.devStates.get());
// sanity check
/* std::array<float,17> h_transProb_out;
cudaMemcpy(&h_transProb_out, (transProbs.d_transProb.get()->transProb).data(), 17*sizeof(float),
cudaMemcpyDeviceToHost); // possible error have to be of same type
for (unsigned int idx=0; idx<17; idx++) { std::cout <<
h_transProb_out[idx]
<< " "; }
std::cout << std::endl;
*/
}
namespace cg = cooperative_groups; // this should go with metropolis.h, initialize_allup_kernel
int main(int argc, char* argv[]) {
constexpr const float initial_temp = 1.f; // typically 1.
constexpr const float final_temp = 3.f; // typically 3.
constexpr const float tempstep = 0.05f; // typically 0.05
// number of spins, related to 2-dim. grid size Lx x Ly
std::array<size_t, 2> L_is { 1<<10, 1<<10 }; // 1<<10 = 1024
std::array<size_t, 2> L_is_small { 4, 4 };
Spins2d spins = {L_is};
Spins2d spins_small = {L_is_small};
std::cout << " L : " << spins.L_is[0]*spins.L_is[1] << std::endl;
Sysparam_ptr sysparams_ptr = { initial_temp } ;
TransProb_ptr transprob_ptr = { initial_temp , 1.f } ;
Avg_ptr avgs_ptr;
// cudaMemcpyToSymbol(constTransProb,&(transprob_ptr.d_transProb), 1*sizeof(TransProb),0,cudaMemcpyDeviceToDevice);
// cudaMemcpy(&constTransProb, transprob_ptr.d_transProb.get(),1*sizeof(TransProb),cudaMemcpyDeviceToDevice);
/* sanity check */
TransProb h_TransProb_out ;
cudaMemcpy(&h_TransProb_out, transprob_ptr.d_transProb.get(), 1*sizeof(TransProb), cudaMemcpyDeviceToHost); // possible error have to be of same type
// cudaMemcpyFromSymbol(&h_TransProb_out, constTransProb, 1*sizeof(TransProb)); // possible error have to be of same type
for (unsigned int idx=0; idx<17; idx++) { std::cout << h_TransProb_out.transProb[idx] << " "; }
std::cout << std::endl;
Sysparam_ptr sysparams_ptr_small = { initial_temp } ;
Avg_ptr avgs_ptr_small;
/* ***** (thread) grid,block dims ***** */
/* min of N_x, number of (thread) blocks on grid in x-direction, and MAX_BLOCKS allowed is
* determined here */
size_t MAXGRIDSIZE = get_maxGridSize();
auto MAXGRIDSIZES = get_maxGridSizes();
std::cout << " MAXGRIDSIZE : " << MAXGRIDSIZE << std::endl;
// (thread) block dims., remember max. no. threads per block is 1024, as of compute capability 5.2
dim3 M_is { 1<<5, 1<<5 };
size_t L = spins.L_is[0]*spins.L_is[1]; // doesn't output correct values for n = 1<<30
unsigned int MAX_BLOCKS_y = (MAXGRIDSIZES[1] + M_is.y - 1)/ M_is.y;
// notice how we're only launching 1/4 of L threads
// unsigned int N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
// int sharedBytes = M_x * sizeof(int);
// (thread) grid, block dims for curandstates and other 1-dim. arrays
unsigned int M_x = 1<<8; // 2^8 = 256
unsigned long MAX_BLOCKS = (MAXGRIDSIZE + M_x - 1)/ M_x;
unsigned int N_x = min( MAX_BLOCKS, (( spins.L_is[0]*spins.L_is[1] + M_x - 1)/ M_x));
/* ***** END of (thread) grid,block dims ***** */
initialize_allup(spins,sysparams_ptr, MAXGRIDSIZES, M_is);
/* sanity check */
Sysparam h_sysparams_out ;
cudaMemcpy(&h_sysparams_out, sysparams_ptr.d_sysparams.get(), 1*sizeof(Sysparam), cudaMemcpyDeviceToHost); // possible error have to be of same type
std::cout << " h_sysparams_out : " << h_sysparams_out.E << " " << h_sysparams_out.M << " "
<< h_sysparams_out.T << std::endl;
// since curand_init calls are slow, do it once for the grid from the host main code
devStatesXOR devstatesXOR = { spins.L_is[0]*spins.L_is[1], N_x,M_x };
metropolis(spins,sysparams_ptr,transprob_ptr,MAXGRIDSIZES,M_is,devstatesXOR,1);
cudaMemcpy(&h_sysparams_out, sysparams_ptr.d_sysparams.get(), 1*sizeof(Sysparam), cudaMemcpyDeviceToHost); // possible error have to be of same type
std::cout << " h_sysparams_out : " << h_sysparams_out.E << " " << h_sysparams_out.M << " "
<< h_sysparams_out.T << std::endl;
} | the_stack |
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <mex.h>
#include "kernels.cx"
#define UseCudaOnDoubles USE_DOUBLE_PRECISION
///////////////////////////////////////
///// CONV ////////////////////////////
///////////////////////////////////////
// thread kernel: computation of gammai = sum_j k(xi,yj)betaj for index i given by thread id.
template < typename TYPE, int DIMPOINT, int DIMSIG, int DIMVECT >
__global__ void DXfsimplex_on_device(TYPE ooSigmax2, TYPE ooSigmaf2,
TYPE *x, TYPE *y,
TYPE *f, TYPE *g,
TYPE *alpha, TYPE *beta,
TYPE *gamma,
int nx, int ny) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// the following line does not work with nvcc 3.0 (it is a bug; it works with anterior and posterior versions)
// extern __shared__ TYPE SharedData[]; // shared data will contain x and alpha data for the block
// here is the bug fix (see http://forums.nvidia.com/index.php?showtopic=166905)
extern __shared__ char SharedData_char[];
TYPE* const SharedData = reinterpret_cast<TYPE*>(SharedData_char);
// end of bug fix
TYPE xi[DIMPOINT], fi[DIMSIG], alphai[DIMPOINT], gammai[DIMPOINT];
if(i<nx) { // we will compute gammai only if i is in the range
// load xi from device global memory
for(int k=0; k<DIMPOINT; k++)
xi[k] = x[i*DIMPOINT+k];
for(int k=0; k<DIMSIG; k++)
fi[k] = f[i*DIMSIG+k];
for(int k=0; k<DIMVECT; k++)
alphai[k] = alpha[i*DIMVECT+k];
for(int k=0; k<DIMPOINT; k++)
gammai[k] = 0.0f;
}
for(int jstart = 0, tile = 0; jstart < ny; jstart += blockDim.x, tile++) {
int j = tile * blockDim.x + threadIdx.x;
if(j<ny) { // we load yj and betaj from device global memory only if j<ny
int inc = DIMPOINT + DIMSIG + DIMVECT;
for(int k=0; k<DIMPOINT; k++)
SharedData[threadIdx.x*inc+k] = y[j*DIMPOINT+k];
for(int k=0; k<DIMSIG; k++)
SharedData[threadIdx.x*inc+DIMPOINT+k] = g[j*DIMSIG+k];
for(int k=0; k<DIMVECT; k++)
SharedData[threadIdx.x*inc+DIMPOINT+DIMSIG+k] = beta[j*DIMVECT+k];
}
__syncthreads();
if(i<nx) { // we compute gammai only if needed
TYPE *yj, *gj, *betaj;
yj = SharedData;
gj = SharedData + DIMPOINT;
betaj = SharedData + DIMPOINT + DIMSIG;
int inc = DIMPOINT + DIMSIG + DIMVECT;
for(int jrel = 0; jrel < blockDim.x && jrel<ny-jstart; jrel++, yj+=inc, gj +=inc, betaj+=inc) {
// distance between points and signals
TYPE dist2_geom = sq_dist<TYPE,DIMPOINT>(xi,yj);
TYPE dist2_sig = sq_dist<TYPE,DIMSIG>(fi,gj);
// Angles between normals
TYPE norm2Xix = 0.0f, norm2Xiy = 0.0f;
for(int k=0; k<DIMVECT; k++) {
norm2Xix += alphai[k]*alphai[k];
norm2Xiy += betaj[k]*betaj[k];
}
TYPE s = sqrt(norm2Xix * norm2Xiy) * dKernel_geom1(dist2_geom,ooSigmax2) * Kernel_sig1(dist2_sig,ooSigmaf2);
for (int k = 0; k < DIMPOINT; k++) {
gammai[k] += 2 * ( xi[k] - yj[k] ) *s;
}
}
}
__syncthreads();
}
// Save the result in global memory.
if(i<nx) {
for (int k = 0; k < DIMPOINT; k++) {
gamma[i*DIMPOINT+k] = gammai[k];
}
}
}
///////////////////////////////////////////////////
template < typename TYPE >
int dXfsimplex(TYPE ooSigmax2,TYPE ooSigmaf2,
TYPE* x_h, TYPE* y_h,
TYPE* f_h, TYPE* g_h,
TYPE* alpha_h, TYPE* beta_h,
TYPE* gamma_h,
int dimPoint, int dimSig, int dimVect, int nx, int ny) {
// Data on the device.
TYPE* x_d;
TYPE* y_d;
TYPE* f_d;
TYPE* g_d;
TYPE* alpha_d;
TYPE* beta_d;
TYPE* gamma_d;
// Allocate arrays on device.
cudaMalloc((void**)&x_d, sizeof(TYPE)*(nx*dimPoint));
cudaMalloc((void**)&y_d, sizeof(TYPE)*(ny*dimPoint));
cudaMalloc((void**)&f_d, sizeof(TYPE)*(nx*dimSig));
cudaMalloc((void**)&g_d, sizeof(TYPE)*(ny*dimSig));
cudaMalloc((void**)&alpha_d, sizeof(TYPE)*(nx*dimVect));
cudaMalloc((void**)&beta_d, sizeof(TYPE)*(ny*dimVect));
cudaMalloc((void**)&gamma_d, sizeof(TYPE)*nx*dimPoint);
// Send data from host to device.
cudaMemcpy(x_d, x_h, sizeof(TYPE)*(nx*dimPoint), cudaMemcpyHostToDevice);
cudaMemcpy(y_d, y_h, sizeof(TYPE)*(ny*dimPoint), cudaMemcpyHostToDevice);
cudaMemcpy(f_d, f_h, sizeof(TYPE)*(nx*dimSig), cudaMemcpyHostToDevice);
cudaMemcpy(g_d, g_h, sizeof(TYPE)*(ny*dimSig), cudaMemcpyHostToDevice);
cudaMemcpy(alpha_d, alpha_h, sizeof(TYPE)*(nx*dimVect), cudaMemcpyHostToDevice);
cudaMemcpy(beta_d, beta_h, sizeof(TYPE)*(ny*dimVect), cudaMemcpyHostToDevice);
// Compute on device.
dim3 blockSize;
blockSize.x = CUDA_BLOCK_SIZE; // number of threads in each block
dim3 gridSize;
gridSize.x = nx / blockSize.x + (nx%blockSize.x==0 ? 0 : 1);
if(dimPoint==1 && dimSig==1 && dimVect==1)
DXfsimplex_on_device<TYPE,1,1,1><<<gridSize,blockSize,blockSize.x*(dimVect+dimSig+dimPoint)*sizeof(TYPE)>>>
(ooSigmax2,ooSigmaf2, x_d, y_d, f_d, g_d, alpha_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==2 && dimSig==1 && dimVect==1)
DXfsimplex_on_device<TYPE,2,1,1><<<gridSize,blockSize,blockSize.x*(dimVect+dimSig+dimPoint)*sizeof(TYPE)>>>
(ooSigmax2,ooSigmaf2, x_d, y_d, f_d, g_d, alpha_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==3 && dimSig==1 && dimVect==1)
DXfsimplex_on_device<TYPE,3,1,1><<<gridSize,blockSize,blockSize.x*(dimVect+dimSig+dimPoint)*sizeof(TYPE)>>>
(ooSigmax2,ooSigmaf2, x_d, y_d, f_d, g_d, alpha_d, beta_d, gamma_d, nx, ny);
else if(dimPoint==4 && dimSig==1 && dimVect==1)
DXfsimplex_on_device<TYPE,4,1,1><<<gridSize,blockSize,blockSize.x*(dimVect+dimSig+dimPoint)*sizeof(TYPE)>>>
(ooSigmax2,ooSigmaf2, x_d, y_d, f_d, g_d, alpha_d, beta_d, gamma_d, nx, ny);
else {
printf("dXfsimplex error: dimensions of Gauss kernel not implemented in cuda\n");
cudaFree(x_d);
cudaFree(y_d);
cudaFree(f_d);
cudaFree(g_d);
cudaFree(alpha_d);
cudaFree(beta_d);
cudaFree(gamma_d);
return(-1);
}
// block until the device has completed
cudaDeviceSynchronize();
// Send data from device to host.
cudaMemcpy(gamma_h, gamma_d, sizeof(TYPE)*nx*dimPoint,cudaMemcpyDeviceToHost);
// Free memory.
cudaFree(x_d);
cudaFree(y_d);
cudaFree(f_d);
cudaFree(g_d);
cudaFree(beta_d);
cudaFree(gamma_d);
cudaFree(alpha_d);
return 0;
}
///////////////////////////////////////////////////
void ExitFcn(void) {
cudaDeviceReset();
}
//////////////////////////////////////////////////////////////////
///////////////// MEX ENTRY POINT ////////////////////////////////
//////////////////////////////////////////////////////////////////
/* the gateway function */
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[])
//plhs: double *gamma
//prhs: double *x, double *y, double *beta, double sigma
{
// register an exit function to prevent crash at matlab exit or recompiling
mexAtExit(ExitFcn);
/* check for proper number of arguments */
if(nrhs != 8)
mexErrMsgTxt("8 inputs required.");
if(nlhs < 1 | nlhs > 1)
mexErrMsgTxt("One output required.");
//////////////////////////////////////////////////////////////
// Input arguments
//////////////////////////////////////////////////////////////
int argu = -1;
//----- the first input argument: x--------------//
argu++;
/* create a pointer to the input vectors srcs */
double *x = mxGetPr(prhs[argu]);
/* input sources */
int dimpoint = mxGetM(prhs[argu]); //mrows
int nx = mxGetN(prhs[argu]); //ncols
//----- the second input argument: y--------------//
argu++;
/* create a pointer to the input vectors trgs */
double *y = mxGetPr(prhs[argu]);
/* get the dimensions of the input targets */
int ny = mxGetN(prhs[argu]); //ncols
/* check to make sure the first dimension is dimpoint */
if( mxGetM(prhs[argu])!=dimpoint ) {
mexErrMsgTxt("Input y must have same number of rows as x.");
}
//----- the third input argument: f--------------//
argu++;
/* create a pointer to the input vectors srcs */
double *f = mxGetPr(prhs[argu]);
/* get dimension of the signal */
int dimsig = mxGetM(prhs[argu]); //mrows
/* check to make sure the second dimension is nx and fist dim is 1*/
if( mxGetM(prhs[argu])*mxGetN(prhs[argu])!=nx ) {
mexErrMsgTxt("Input f must be a vector with the same number of columns as x.");
}
//----- the fourth input argument: g--------------//
argu++;
/* create a pointer to the input vectors trgs */
double *g = mxGetPr(prhs[argu]);
/* check to make sure the second dimension is ny and first dim is 1 */
if( mxGetM(prhs[argu])*mxGetN(prhs[argu])!=ny ) {
mexErrMsgTxt("Input g must be a vector with the same number of columns as y.");
}
//------ the fifth input argument: alpha---------------//
argu++;
/* create a pointer to the input vectors wts */
double *alpha = mxGetPr(prhs[argu]);
/* get the dimensions of the input weights */
int dimvect = mxGetM(prhs[argu]);
/* check to make sure the second dimension is nx */
if( mxGetN(prhs[argu])!=nx ) {
mexErrMsgTxt("Input alpha must have same number of columns as x.");
}
//------ the sixth input argument: beta---------------//
argu++;
/* create a pointer to the input vectors wts */
double *beta = mxGetPr(prhs[argu]);
/* get the dimensions of the input weights */
if (dimvect != mxGetM(prhs[argu])) {
mexErrMsgTxt("Input beta must have the same number of row as alpha");
}
/* check to make sure the second dimension is ny */
if( mxGetN(prhs[argu])!=ny ) {
mexErrMsgTxt("Input beta must have same number of columns as y.");
}
//----- the seventh input argument: sigmax-------------//
argu++;
/* check to make sure the input argument is a scalar */
if( !mxIsDouble(prhs[argu]) || mxIsComplex(prhs[argu]) ||
mxGetN(prhs[argu])*mxGetM(prhs[argu])!=1 ) {
mexErrMsgTxt("Input sigmax must be a scalar.");
}
/* get the input sigma */
double sigmax = mxGetScalar(prhs[argu]);
if (sigmax <= 0.0)
mexErrMsgTxt("Input sigma must be a positive number.");
double oosigmax2 = 1.0f/(sigmax*sigmax);
//----- the eighth input argument: sigmaf-------------//
argu++;
/* check to make sure the input argument is a scalar */
if( !mxIsDouble(prhs[argu]) || mxIsComplex(prhs[argu]) ||
mxGetN(prhs[argu])*mxGetM(prhs[argu])!=1 ) {
mexErrMsgTxt("Input sigmaf must be a scalar.");
}
/* get the input sigma */
double sigmaf = mxGetScalar(prhs[argu]);
if (sigmaf <= 0.0) {
mexErrMsgTxt("Input sigmaf must be a positive number.");
}
double oosigmaf2=1.0f/(sigmaf*sigmaf);
//////////////////////////////////////////////////////////////
// Output arguments
//////////////////////////////////////////////////////////////
/* set the output pointer to the output result(vector) */
plhs[0] = mxCreateDoubleMatrix(dimpoint,nx,mxREAL);
/* create a C pointer to a copy of the output result(vector)*/
double *gamma = mxGetPr(plhs[0]);
#if UseCudaOnDoubles
dXfsimplex<double>(oosigmax2,oosigmaf2,x,y,f,g,alpha,beta,gamma,dimpoint,dimsig,dimvect,nx,ny);
#else
// convert to float
float *x_f = new float[nx*dimpoint];
for(int i=0; i<nx*dimpoint; i++)
x_f[i] = x[i];
float *y_f = new float[ny*dimpoint];
for(int i=0; i<ny*dimpoint; i++)
y_f[i] = y[i];
float *f_f = new float[nx*dimsig];
for(int i=0; i<nx*dimsig; i++)
f_f[i] = f[i];
float *g_f = new float[ny*dimsig];
for(int i=0; i<ny*dimsig; i++)
g_f[i] = g[i];
float *alpha_f = new float[nx*dimvect];
for(int i=0; i<nx*dimvect; i++)
alpha_f[i] = alpha[i];
float *beta_f = new float[ny*dimvect];
for(int i=0; i<ny*dimvect; i++)
beta_f[i] = beta[i];
// function calls;
float *gamma_f = new float[nx*dimpoint];
dXfsimplex<float>(oosigmax2,oosigmaf2,x_f,y_f,f_f,g_f,alpha_f,beta_f,gamma_f,dimpoint,dimsig,dimvect,nx,ny);
for(int i=0; i<nx*dimpoint; i++)
gamma[i] = gamma_f[i];
delete [] x_f;
delete [] y_f;
delete [] f_f;
delete [] g_f;
delete [] alpha_f;
delete [] beta_f;
delete [] gamma_f;
#endif
return;
} | the_stack |
#include <stdio.h>
#define COEFF_L 0.16666666f
#define COEFF_C 0.66666666f
#define COEFF_B 0.83333333f
__device__ __constant__ int c_VoxelNumber;
__device__ __constant__ int3 c_ImageSize;
// Bins: Need 4 values for max 4 channels.
__device__ __constant__ int c_firstTargetBin;
__device__ __constant__ int c_secondTargetBin;
__device__ __constant__ int c_firstResultBin;
__device__ __constant__ int c_secondResultBin;
__device__ __constant__ float c_NormalisedJE;
__device__ __constant__ float c_NMI;
__device__ __constant__ int c_ActiveVoxelNumber;
texture<float, 3, cudaReadModeElementType> firstreferenceImageTexture;
texture<float, 1, cudaReadModeElementType> firstwarpedImageTexture;
texture<float4, 1, cudaReadModeElementType> firstwarpedImageGradientTexture;
texture<float, 1, cudaReadModeElementType> histogramTexture;
texture<float4, 1, cudaReadModeElementType> gradientImageTexture;
texture<int, 1, cudaReadModeElementType> maskTexture;
/// Added for the multichannel stuff. We currently only support 2 target and 2 source channels.
/// So we need another texture for the second target and source channel respectively.
texture<float, 3, cudaReadModeElementType> secondreferenceImageTexture;
texture<float, 1, cudaReadModeElementType> secondwarpedImageTexture;
texture<float4, 1, cudaReadModeElementType> secondwarpedImageGradientTexture;
/* *************************************************************** */
__device__ float GetBasisSplineValue(float x)
{
x=fabsf(x);
float value=0.0f;
if(x<2.0f)
if(x<1.0f)
value = 2.0f/3.0f + (0.5f*x-1.0f)*x*x;
else{
x-=2.0f;
value = -x*x*x/6.0f;
}
return value;
}
/* *************************************************************** */
__device__ float GetBasisSplineDerivativeValue(float ori)
{
float x=fabsf(ori);
float value=0.0f;
if(x<2.0f)
if(x<1.0f)
value = (1.5f*x-2.0f)*ori;
else{
x-=2.0f;
value = -0.5f * x * x;
if(ori<0.0f) value =-value;
}
return value;
}
/* *************************************************************** */
__global__ void reg_getVoxelBasedNMIGradientUsingPW2D_kernel(float4 *voxelNMIGradientArray_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_ActiveVoxelNumber){
const int targetIndex = tex1Dfetch(maskTexture,tid);
int tempIndex=targetIndex;
const int y = tempIndex/c_ImageSize.x;
const int x = tempIndex - y*c_ImageSize.x;
float referenceImageValue = tex3D(firstreferenceImageTexture,
((float)x+0.5f)/(float)c_ImageSize.x,
((float)y+0.5f)/(float)c_ImageSize.y,
0.5f);
float warpedImageValue = tex1Dfetch(firstwarpedImageTexture,targetIndex);
float4 warpedImageGradient = tex1Dfetch(firstwarpedImageGradientTexture,tid);
float4 gradValue = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// No computation is performed if any of the point is part of the background
// The two is added because the image is resample between 2 and bin +2
// if 64 bins are used the histogram will have 68 bins et the image will be between 2 and 65
if( referenceImageValue>0.0f &&
warpedImageValue>0.0f &&
referenceImageValue<c_firstTargetBin &&
warpedImageValue<c_firstResultBin &&
referenceImageValue==referenceImageValue &&
warpedImageValue==warpedImageValue){
// referenceImageValue = floor(referenceImageValue);
// warpedImageValue = floor(warpedImageValue);
float2 resDeriv = make_float2(
warpedImageGradient.x,
warpedImageGradient.y);
if(resDeriv.x==resDeriv.x &&
resDeriv.y==resDeriv.y){
float jointEntropyDerivative_X = 0.0f;
float warpedEntropyDerivative_X = 0.0f;
float referenceEntropyDerivative_X = 0.0f;
float jointEntropyDerivative_Y = 0.0f;
float warpedEntropyDerivative_Y = 0.0f;
float referenceEntropyDerivative_Y = 0.0f;
for(int r=static_cast<int>(referenceImageValue)-1; r<static_cast<int>(referenceImageValue)+3; ++r){
if(-1<r && r<c_firstTargetBin){
for(int w=static_cast<int>(warpedImageValue)-1; w<static_cast<int>(warpedImageValue)+3; ++w){
if(-1<w && w<c_firstResultBin){
float commonValue =
GetBasisSplineValue(referenceImageValue-(float)r) *
GetBasisSplineDerivativeValue(warpedImageValue-(float)w);
float jointLog = tex1Dfetch(histogramTexture, w*c_firstResultBin+r);
float targetLog = tex1Dfetch(histogramTexture, c_firstTargetBin*c_firstResultBin+r);
float resultLog = tex1Dfetch(histogramTexture, c_firstTargetBin*c_firstResultBin+c_firstTargetBin+w);
float temp = commonValue * resDeriv.x;
jointEntropyDerivative_X += temp * jointLog;
referenceEntropyDerivative_X += temp * targetLog;
warpedEntropyDerivative_X += temp * resultLog;
temp = commonValue * resDeriv.y;
jointEntropyDerivative_Y += temp * jointLog;
referenceEntropyDerivative_Y += temp * targetLog;
warpedEntropyDerivative_Y += temp * resultLog;
} // O<t<bin
} // t
} // 0<r<bin
} // r
float NMI= c_NMI;
// (Marc) I removed the normalisation by the voxel number as each gradient has to be normalised in the same way
gradValue.x = (referenceEntropyDerivative_X + warpedEntropyDerivative_X - NMI * jointEntropyDerivative_X) / c_NormalisedJE;
gradValue.y = (referenceEntropyDerivative_Y + warpedEntropyDerivative_Y - NMI * jointEntropyDerivative_Y) / c_NormalisedJE;
}
}
voxelNMIGradientArray_d[targetIndex]=gradValue;
}
return;
}
/* *************************************************************** */
__global__ void reg_getVoxelBasedNMIGradientUsingPW3D_kernel(float4 *voxelNMIGradientArray_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_ActiveVoxelNumber){
const int targetIndex = tex1Dfetch(maskTexture,tid);
int tempIndex=targetIndex;
const int z = tempIndex/(c_ImageSize.x*c_ImageSize.y);
tempIndex -= z*c_ImageSize.x*c_ImageSize.y;
const int y = tempIndex/c_ImageSize.x;
const int x = tempIndex - y*c_ImageSize.x;
float referenceImageValue = tex3D(firstreferenceImageTexture,
((float)x+0.5f)/(float)c_ImageSize.x,
((float)y+0.5f)/(float)c_ImageSize.y,
((float)z+0.5f)/(float)c_ImageSize.z);
float warpedImageValue = tex1Dfetch(firstwarpedImageTexture,targetIndex);
float4 warpedImageGradient = tex1Dfetch(firstwarpedImageGradientTexture,tid);
float4 gradValue = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// No computation is performed if any of the point is part of the background
// The two is added because the image is resample between 2 and bin +2
// if 64 bins are used the histogram will have 68 bins et the image will be between 2 and 65
if( referenceImageValue>0.0f &&
warpedImageValue>0.0f &&
referenceImageValue<c_firstTargetBin &&
warpedImageValue<c_firstResultBin &&
referenceImageValue==referenceImageValue &&
warpedImageValue==warpedImageValue){
// referenceImageValue = floor(referenceImageValue);
// warpedImageValue = floor(warpedImageValue);
float3 resDeriv = make_float3(
warpedImageGradient.x,
warpedImageGradient.y,
warpedImageGradient.z);
if( resDeriv.x==resDeriv.x &&
resDeriv.y==resDeriv.y &&
resDeriv.z==resDeriv.z){
float jointEntropyDerivative_X = 0.0f;
float warpedEntropyDerivative_X = 0.0f;
float referenceEntropyDerivative_X = 0.0f;
float jointEntropyDerivative_Y = 0.0f;
float warpedEntropyDerivative_Y = 0.0f;
float referenceEntropyDerivative_Y = 0.0f;
float jointEntropyDerivative_Z = 0.0f;
float warpedEntropyDerivative_Z = 0.0f;
float referenceEntropyDerivative_Z = 0.0f;
for(int r=static_cast<int>(referenceImageValue)-1; r<static_cast<int>(referenceImageValue)+3; ++r){
if(-1<r && r<c_firstTargetBin){
for(int w=static_cast<int>(warpedImageValue)-1; w<static_cast<int>(warpedImageValue)+3; ++w){
if(-1<w && w<c_firstResultBin){
float commonValue =
GetBasisSplineValue(referenceImageValue-(float)r) *
GetBasisSplineDerivativeValue(warpedImageValue-(float)w);
float jointLog = tex1Dfetch(histogramTexture, w*c_firstResultBin+r);
float targetLog = tex1Dfetch(histogramTexture, c_firstTargetBin*c_firstResultBin+r);
float resultLog = tex1Dfetch(histogramTexture, c_firstTargetBin*c_firstResultBin+c_firstTargetBin+w);
float temp = commonValue * resDeriv.x;
jointEntropyDerivative_X += temp * jointLog;
referenceEntropyDerivative_X += temp * targetLog;
warpedEntropyDerivative_X += temp * resultLog;
temp = commonValue * resDeriv.y;
jointEntropyDerivative_Y += temp * jointLog;
referenceEntropyDerivative_Y += temp * targetLog;
warpedEntropyDerivative_Y += temp * resultLog;
temp = commonValue * resDeriv.z;
jointEntropyDerivative_Z += temp * jointLog;
referenceEntropyDerivative_Z += temp * targetLog;
warpedEntropyDerivative_Z += temp * resultLog;
} // O<t<bin
} // t
} // 0<r<bin
} // r
float NMI= c_NMI;
// (Marc) I removed the normalisation by the voxel number as each gradient has to be normalised in the same way
gradValue.x = (referenceEntropyDerivative_X + warpedEntropyDerivative_X - NMI * jointEntropyDerivative_X) / c_NormalisedJE;
gradValue.y = (referenceEntropyDerivative_Y + warpedEntropyDerivative_Y - NMI * jointEntropyDerivative_Y) / c_NormalisedJE;
gradValue.z = (referenceEntropyDerivative_Z + warpedEntropyDerivative_Z - NMI * jointEntropyDerivative_Z) / c_NormalisedJE;
}
}
voxelNMIGradientArray_d[targetIndex]=gradValue;
}
return;
}
/* *************************************************************** */
// Multichannel NMI gradient. Hardcoded for 2x2 NMI channels.
__global__ void reg_getVoxelBasedNMIGradientUsingPW2x2_kernel(float4 *voxelNMIGradientArray_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_ActiveVoxelNumber){
const int targetIndex = tex1Dfetch(maskTexture,tid);
int tempIndex=targetIndex;
const int z = tempIndex/(c_ImageSize.x*c_ImageSize.y);
tempIndex -= z*c_ImageSize.x*c_ImageSize.y;
const int y = tempIndex/c_ImageSize.x;
const int x = tempIndex - y*c_ImageSize.x;
float4 voxelValues = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
voxelValues.x = tex3D(firstreferenceImageTexture,
((float)x+0.5f)/(float)c_ImageSize.x,
((float)y+0.5f)/(float)c_ImageSize.y,
((float)z+0.5f)/(float)c_ImageSize.z);
voxelValues.y = tex3D(secondreferenceImageTexture,
((float)x+0.5f)/(float)c_ImageSize.x,
((float)y+0.5f)/(float)c_ImageSize.y,
((float)z+0.5f)/(float)c_ImageSize.z);
voxelValues.z = tex1Dfetch(firstwarpedImageTexture,targetIndex);
voxelValues.w = tex1Dfetch(secondwarpedImageTexture,targetIndex);
float4 firstwarpedImageGradient = tex1Dfetch(firstwarpedImageGradientTexture,tid);
float4 secondwarpedImageGradient = tex1Dfetch(secondwarpedImageGradientTexture,tid);
float4 gradValue = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// Could remove some tests (which are not really needed) to reduce register
// count. They should be put in again at some point for completeness and generality.
if (voxelValues.x == voxelValues.x &&
voxelValues.y == voxelValues.y &&
voxelValues.z == voxelValues.z &&
voxelValues.w == voxelValues.w &&
voxelValues.x >= 0.0f &&
voxelValues.y >= 0.0f &&
voxelValues.z >= 0.0f &&
voxelValues.w >= 0.0f &&
voxelValues.x < c_firstTargetBin &&
voxelValues.y < c_secondTargetBin &&
voxelValues.z < c_firstResultBin &&
voxelValues.w < c_secondResultBin)
{
voxelValues.x = (float)((int)voxelValues.x);
voxelValues.y = (float)((int)voxelValues.y);
voxelValues.z = (float)((int)voxelValues.z);
voxelValues.w = (float)((int)voxelValues.w);
if( firstwarpedImageGradient.x==firstwarpedImageGradient.x &&
firstwarpedImageGradient.y==firstwarpedImageGradient.y &&
firstwarpedImageGradient.z==firstwarpedImageGradient.z &&
secondwarpedImageGradient.x==secondwarpedImageGradient.x &&
secondwarpedImageGradient.y==secondwarpedImageGradient.y &&
secondwarpedImageGradient.z==secondwarpedImageGradient.z)
{
float jointEntropyDerivative_X = 0.0f;
float warpedEntropyDerivative_X = 0.0f;
float referenceEntropyDerivative_X = 0.0f;
float jointEntropyDerivative_Y = 0.0f;
float warpedEntropyDerivative_Y = 0.0f;
float referenceEntropyDerivative_Y = 0.0f;
float jointEntropyDerivative_Z = 0.0f;
float warpedEntropyDerivative_Z = 0.0f;
float referenceEntropyDerivative_Z = 0.0f;
float jointLog, targetLog, resultLog, temp;
float4 relative_pos = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
float s_x, s_y, s_z, s_w;
float common_target_value = 0.0f;
int target_flat_index, result_flat_index, total_target_entries, num_probabilities;
for (int i=-1; i<2; ++i) {
relative_pos.x = (int)(voxelValues.x+i);
if (-1<relative_pos.x && relative_pos.x<c_firstTargetBin) {
for (int j=-1; j<2; ++j) {
relative_pos.y = (int)(voxelValues.y+j);
if (-1<relative_pos.y && relative_pos.y<c_secondTargetBin) {
s_x = GetBasisSplineValue(relative_pos.x-voxelValues.x);
s_y = GetBasisSplineValue(relative_pos.y-voxelValues.y);
common_target_value = s_x * s_y;
for (int k=-1; k<2; ++k) {
relative_pos.z = (int)(voxelValues.z+k);
if (-1<relative_pos.z && relative_pos.z<c_firstResultBin) {
s_x = GetBasisSplineDerivativeValue(relative_pos.z-voxelValues.z);
s_w = GetBasisSplineValue(relative_pos.z-voxelValues.z);
for (int l=-1; l<2; ++l) {
relative_pos.w = (int)(voxelValues.w+l);
if (-1<relative_pos.w && relative_pos.w<c_secondResultBin) {
target_flat_index = relative_pos.x + relative_pos.y * c_firstTargetBin;
result_flat_index = relative_pos.z + relative_pos.w * c_firstResultBin;
total_target_entries = c_firstTargetBin * c_secondTargetBin;
num_probabilities = total_target_entries * c_firstResultBin * c_secondResultBin;
jointLog = tex1Dfetch(histogramTexture, target_flat_index + (result_flat_index * total_target_entries));
targetLog = tex1Dfetch(histogramTexture, num_probabilities + target_flat_index);
resultLog = tex1Dfetch(histogramTexture, num_probabilities + total_target_entries + result_flat_index);
// Contribution from floating images. These arithmetic operations use
// a lot of registers. Need to look into whether this can be reduced somehow.
s_y = GetBasisSplineValue(relative_pos.w-voxelValues.w);
s_z = GetBasisSplineDerivativeValue(relative_pos.w-voxelValues.w);
temp = (s_x * firstwarpedImageGradient.x * s_y) +
(s_z * secondwarpedImageGradient.x * s_w);
temp *= common_target_value;
jointEntropyDerivative_X -= temp * jointLog;
referenceEntropyDerivative_X -= temp * targetLog;
warpedEntropyDerivative_X -= temp * resultLog;
temp = (s_x * firstwarpedImageGradient.y * s_y) +
(s_z * secondwarpedImageGradient.y * s_w);
temp *= common_target_value;
jointEntropyDerivative_Y -= temp * jointLog;
referenceEntropyDerivative_Y -= temp * targetLog;
warpedEntropyDerivative_Y -= temp * resultLog;
temp = (s_x * firstwarpedImageGradient.z * s_y) +
(s_z * secondwarpedImageGradient.z * s_w);
temp *= common_target_value;
jointEntropyDerivative_Z -= temp * jointLog;
referenceEntropyDerivative_Z -= temp * targetLog;
warpedEntropyDerivative_Z -= temp * resultLog;
}
}
}
}
}
}
}
}
gradValue.x = (referenceEntropyDerivative_X + warpedEntropyDerivative_X - c_NMI * jointEntropyDerivative_X) / c_NormalisedJE;
gradValue.y = (referenceEntropyDerivative_Y + warpedEntropyDerivative_Y - c_NMI * jointEntropyDerivative_Y) / c_NormalisedJE;
gradValue.z = (referenceEntropyDerivative_Z + warpedEntropyDerivative_Z - c_NMI * jointEntropyDerivative_Z) / c_NormalisedJE;
}
}
voxelNMIGradientArray_d[targetIndex]=gradValue;
}
}
/* *************************************************************** */
__global__ void reg_smoothJointHistogramX_kernel(float *tempHistogram)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_secondTargetBin*c_firstResultBin*c_secondResultBin){
// The starting index is computed
unsigned int startingPoint=tid*c_firstTargetBin;
unsigned int finishPoint=startingPoint+c_firstTargetBin;
// The first point is computed
tempHistogram[startingPoint] = (tex1Dfetch(histogramTexture, startingPoint) * COEFF_C +
tex1Dfetch(histogramTexture, startingPoint+1) * COEFF_L) / COEFF_B;
// The middle points are computed
for(unsigned int i=startingPoint+1; i<finishPoint-1; ++i){
tempHistogram[i] = tex1Dfetch(histogramTexture, i-1) * COEFF_L +
tex1Dfetch(histogramTexture, i) * COEFF_C +
tex1Dfetch(histogramTexture, i+1) * COEFF_L;
}
// The last point is computed
tempHistogram[finishPoint-1] = (tex1Dfetch(histogramTexture, finishPoint-2) * COEFF_L +
tex1Dfetch(histogramTexture, finishPoint-1) * COEFF_C) / COEFF_B;
}
return;
}
__global__ void reg_smoothJointHistogramY_kernel(float *tempHistogram)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_firstResultBin*c_secondResultBin){
// The starting index is computed
unsigned int startingPoint=tid + c_firstTargetBin*(c_secondTargetBin-1)*(c_firstResultBin*(int)(tid/(c_firstTargetBin*c_firstResultBin)) +
(int)(tid/c_firstTargetBin - c_firstResultBin * (int)(tid/(c_firstTargetBin*c_firstResultBin))));
unsigned int increment = c_firstTargetBin;
unsigned int finishPoint=startingPoint+increment*c_secondTargetBin;
// The first point is computed
tempHistogram[startingPoint] = (tex1Dfetch(histogramTexture, startingPoint) * COEFF_C +
tex1Dfetch(histogramTexture, startingPoint+increment) * COEFF_L) / COEFF_B;
// The middle points are computed
for(unsigned int i=startingPoint+increment; i<finishPoint-increment; i+=increment){
tempHistogram[i] = tex1Dfetch(histogramTexture, i-increment) * COEFF_L +
tex1Dfetch(histogramTexture, i) * COEFF_C +
tex1Dfetch(histogramTexture, i+increment) * COEFF_L;
}
// The last point is computed
tempHistogram[finishPoint-increment] = (tex1Dfetch(histogramTexture, finishPoint-2*increment) * COEFF_L +
tex1Dfetch(histogramTexture, finishPoint-increment) * COEFF_C) / COEFF_B;
}
return;
}
__global__ void reg_smoothJointHistogramZ_kernel(float *tempHistogram)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_secondTargetBin*c_secondResultBin){
// The starting index is computed
unsigned int startingPoint=tid+c_firstTargetBin*c_secondTargetBin*(c_firstResultBin-1)*(int)(tid/(c_firstTargetBin*c_secondTargetBin));
unsigned int increment = c_firstTargetBin*c_secondTargetBin;
unsigned int finishPoint=startingPoint+increment*c_firstResultBin;
// The first point is computed
tempHistogram[startingPoint] = (tex1Dfetch(histogramTexture, startingPoint) * COEFF_C +
tex1Dfetch(histogramTexture, startingPoint+increment) * COEFF_L) / COEFF_B;
// The middle points are computed
for(unsigned int i=startingPoint+increment; i<finishPoint-increment; i+=increment){
tempHistogram[i] = tex1Dfetch(histogramTexture, i-increment) * COEFF_L +
tex1Dfetch(histogramTexture, i) * COEFF_C +
tex1Dfetch(histogramTexture, i+increment) * COEFF_L;
}
// The last point is computed
tempHistogram[finishPoint-increment] = (tex1Dfetch(histogramTexture, finishPoint-2*increment) * COEFF_L +
tex1Dfetch(histogramTexture, finishPoint-increment) * COEFF_C) / COEFF_B;
}
return;
}
__global__ void reg_smoothJointHistogramW_kernel(float *tempHistogram)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_secondTargetBin*c_firstResultBin){
// The starting index is computed
unsigned int startingPoint=tid;
unsigned int increment = c_firstTargetBin*c_secondTargetBin*c_firstResultBin;
unsigned int finishPoint=increment*c_secondResultBin;
// The first point is computed
tempHistogram[startingPoint] = (tex1Dfetch(histogramTexture, startingPoint) * COEFF_C +
tex1Dfetch(histogramTexture, startingPoint+increment) * COEFF_L) / COEFF_B;
// The middle points are computed
for(unsigned int i=startingPoint+increment; i<finishPoint-increment; i+=increment){
tempHistogram[i] = tex1Dfetch(histogramTexture, i-increment) * COEFF_L +
tex1Dfetch(histogramTexture, i) * COEFF_C +
tex1Dfetch(histogramTexture, i+increment) * COEFF_L;
}
// The last point is computed
tempHistogram[finishPoint-increment] = (tex1Dfetch(histogramTexture, finishPoint-2*increment) * COEFF_L +
tex1Dfetch(histogramTexture, finishPoint-increment) * COEFF_C) / COEFF_B;
}
return;
}
/// Kernels for marginalisation along the different axes
__global__ void reg_marginaliseTargetX_kernel(float *babyHisto)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_secondTargetBin*c_firstResultBin*c_secondResultBin){
unsigned int startingPoint=tid*c_firstTargetBin;
unsigned int finishPoint=startingPoint+c_firstTargetBin;
float sum=tex1Dfetch(histogramTexture, startingPoint);
float c=0.f,Y,t;
for(unsigned int i=startingPoint+1; i<finishPoint; ++i){
Y = tex1Dfetch(histogramTexture, i) - c;
t = sum + Y;
c = (t-sum)-Y;
sum=t;
}
babyHisto[tid]=sum;
}
}
__global__ void reg_marginaliseTargetXY_kernel(float *babyHisto)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstResultBin*c_secondResultBin){
unsigned int startingPoint=tid*c_secondTargetBin;
unsigned int finishPoint=startingPoint+c_secondTargetBin;
float sum=tex1Dfetch(histogramTexture, startingPoint);
float c=0.f,Y,t;
for(unsigned int i=startingPoint+1; i<finishPoint; ++i){
Y = tex1Dfetch(histogramTexture, i) - c;
t = sum + Y;
c = (t-sum)-Y;
sum=t;
}
babyHisto[tid]=sum;
}
}
__global__ void reg_marginaliseResultX_kernel(float *babyHisto)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_secondTargetBin*c_firstResultBin){
unsigned int startingPoint = tid;
float sum=tex1Dfetch(histogramTexture, startingPoint);
// increment by a the cube
unsigned int increment = c_firstTargetBin*c_secondTargetBin*c_firstResultBin;
float c=0.f,Y,t;
for (unsigned int i = 1; i < c_secondResultBin; ++i)
{
Y = tex1Dfetch(histogramTexture, startingPoint + i *increment) - c;
t = sum + Y;
c = (t-sum)-Y;
sum=t;
}
babyHisto[tid]=sum;
}
}
__global__ void reg_marginaliseResultXY_kernel(float *babyHisto)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid<c_firstTargetBin*c_secondTargetBin){
unsigned int startingPoint=tid;
float sum=tex1Dfetch(histogramTexture, startingPoint);
// increment by the plane.
unsigned int increment = c_firstTargetBin*c_secondTargetBin;
float c=0.f,Y,t;
for (unsigned int i = 1; i < c_firstResultBin; ++i)
{
Y = tex1Dfetch(histogramTexture, startingPoint + i *increment) - c;
t = sum + Y;
c = (t-sum)-Y;
sum=t;
}
babyHisto[tid]=sum;
}
}
#endif | the_stack |
#include <common/types.h>
#include <cub/cub.cuh>
#include <cuda/cub_iterator.cuh>
#include <cuda/cudafuncs.h>
#include <map>
namespace sqaod_cuda {
namespace sq = sqaod;
template<class V, class InIt, class OutIt, class OffIt, int vecLen>
struct DeviceSegmentedSumType : sq::NullBase {
typedef DeviceSegmentedSumType<V, InIt, OutIt, OffIt, vecLen> SelfType;
DeviceSegmentedSumType(Device &device, DeviceStream *devStream);
DeviceSegmentedSumType(DeviceStream *devStream);
virtual ~DeviceSegmentedSumType() WAR_VC_NOTHROW;
void configure(sq::SizeType segLen, sq::SizeType nSegments, bool useTempStorage);
void operator()(InIt in, OutIt out, OffIt segOffset);
/*
* private members
*/
typedef void (SelfType::*SumMethod)(InIt in, OutIt out, OffIt segOffset);
template<int ITEMS_PER_THREAD> void
segmentedSum_32(InIt in, OutIt out, OffIt segOffset);
template<int ITEMS_PER_THREAD> void
segmentedSum_64(InIt in, OutIt out, OffIt segOffset);
template<int BLOCK_DIM, int ITEMS_PER_THREAD> void
segmentedSum_Block(InIt in, OutIt out, OffIt segOffset);
template<int N_REDUCE_THREADS, int ITEMS_PER_THREAD, int OUTPUT_PER_SEG> void
segmentedSum_2step(InIt in, OutIt out, OffIt segOffset);
void reg(int base, int nItems, SumMethod method) {
methodMap_[base * nItems] = method;
}
void registerMethods();
sq::SizeType segLen_;
sq::SizeType nSegments_;
V *d_tempStorage_;
V *d_tempStoragePreAlloc_;
sq::SizeType tempStorageSize_;
DeviceStream *devStream_;
DeviceObjectAllocator *devAlloc_;
typedef std::map<sq::SizeType, SumMethod> MethodMap;
MethodMap methodMap_;
SumMethod sumMethod_;
cudaStream_t stream_;
};
/*
* Kernels
*/
enum { WARP_SIZE = 32 };
template<class V> __device__ __forceinline__
V reduce(const V &v) {
return v;
}
__device__ __forceinline__
float reduce(const float4 &v4) {
return (v4.x + v4.y) + (v4.z + v4.w);
}
__device__ __forceinline__
double reduce(const double4 &v4) {
return (v4.x + v4.y) + (v4.z + v4.w);
}
__device__ __forceinline__
float4 operator+(const float4 &v4_0, const float4 &v4_1) {
return make_float4(v4_0.x + v4_1.x, v4_0.y + v4_1.y, v4_0.z + v4_1.z, v4_0.w + v4_1.w);
}
__device__ __forceinline__
double4 operator+(const double4 &v4_0, const double4 &v4_1) {
return make_double4(v4_0.x + v4_1.x, v4_0.y + v4_1.y, v4_0.z + v4_1.z, v4_0.w + v4_1.w);
}
template<int ITEMS, class V>
__device__ __forceinline__ static
V sumArray(const V *v) {
/* Max 8 */
switch (ITEMS) {
case 1:
return v[0];
case 2:
return v[0] + v[1];
case 3:
return v[0] + v[1] + v[2];
case 4:
return (v[0] + v[1]) + (v[2] + v[3]);
case 5:
return (v[0] + v[1]) + (v[2] + v[3]) + v[4];
case 6:
return (v[0] + v[1]) + (v[2] + v[3]) + (v[4] + v[5]);
case 7:
return ((v[0] + v[1]) + (v[2] + v[3])) + ((v[4] + v[5]) + v[6]);
case 8:
return ((v[0] + v[1]) + (v[2] + v[3])) + ((v[4] + v[5]) + (v[6] + v[7]));
default:
break;
}
return V();
}
/* size <= 32 */
template<int BLOCK_DIM, int ITEMS_PER_THREAD, int OUTPUT_PER_SEG, class InIt, class OutIt, class OffIt>
__global__ static void
segmentedSumKernel_32(InIt in, OutIt out,
OffIt segOffset, sq::SizeType segLen, sq::SizeType nSegments) {
typedef typename std::iterator_traits<OutIt>::value_type V;
typedef typename std::iterator_traits<InIt>::value_type Vin;
typedef typename std::iterator_traits<OffIt>::value_type OffsetT;
int iSubSegment = BLOCK_DIM / WARP_SIZE * blockIdx.x + threadIdx.x / WARP_SIZE;
int iSegment = iSubSegment / OUTPUT_PER_SEG;
int iSegIdx = (iSubSegment % OUTPUT_PER_SEG) * WARP_SIZE + threadIdx.x % WARP_SIZE;
enum { INPUT_STRIDE = WARP_SIZE * OUTPUT_PER_SEG };
V sum = V();
if (iSegment < nSegments) {
OffsetT segBegin = segOffset[iSegment];
Vin v[ITEMS_PER_THREAD];
#pragma unroll
for (int idx = 0; idx < ITEMS_PER_THREAD - 1; ++idx) {
int inIdx = INPUT_STRIDE * idx + iSegIdx;
v[idx] = in[segBegin + inIdx];
}
int lastSegIdx = iSegIdx + INPUT_STRIDE * (ITEMS_PER_THREAD - 1);
v[ITEMS_PER_THREAD - 1] = (lastSegIdx < segLen) ? in[segBegin + lastSegIdx] : Vin();
sum = reduce(sumArray<ITEMS_PER_THREAD>(v));
typedef cub::WarpReduce<V> WarpReduce;
__shared__ typename WarpReduce::TempStorage temp_storage[BLOCK_DIM / WARP_SIZE];
sum = WarpReduce(temp_storage[iSubSegment]).Sum(sum);
if ((threadIdx.x % warpSize) == 0)
out[iSubSegment] = sum;
}
}
template<int BLOCK_DIM, int ITEMS_PER_THREAD, int OUTPUT_PER_SEG,
class InIt, class OutIt, class OffIt>
__global__ static void
segmentedSumKernel_64(InIt in, OutIt out, OffIt segOffset, sq::SizeType segLen,
sq::SizeType nSegments) {
enum {
N_REDUCE_THREADS = 64,
WARPS_IN_BLOCK = BLOCK_DIM / WARP_SIZE,
INPUT_STRIDE = N_REDUCE_THREADS * OUTPUT_PER_SEG,
};
typedef typename std::iterator_traits<OutIt>::value_type V;
typedef typename std::iterator_traits<InIt>::value_type VIn;
typedef typename std::iterator_traits<OffIt>::value_type OffsetT;
int iSubSegment = (BLOCK_DIM / N_REDUCE_THREADS) * blockIdx.x + (threadIdx.x / N_REDUCE_THREADS);
int iSegment = iSubSegment / OUTPUT_PER_SEG;
int warpId = threadIdx.x / WARP_SIZE;
int iSubSegIdxInBlock = warpId / (BLOCK_DIM / N_REDUCE_THREADS);
int iSegIdx = (iSubSegment % OUTPUT_PER_SEG) * N_REDUCE_THREADS + threadIdx.x % N_REDUCE_THREADS;
V sum = V();
if (iSegment < nSegments) {
OffsetT segBegin = segOffset[iSegment];
VIn v[ITEMS_PER_THREAD];
#pragma unroll
for (int idx = 0; idx < ITEMS_PER_THREAD - 1; ++idx) {
int inIdx = INPUT_STRIDE * idx + iSegIdx;
v[idx] = in[segBegin + inIdx];
}
int lastSegIdx = iSegIdx + INPUT_STRIDE * (ITEMS_PER_THREAD - 1);
v[ITEMS_PER_THREAD - 1] = (lastSegIdx < segLen) ? in[segBegin + lastSegIdx] : VIn();
sum = reduce(sumArray<ITEMS_PER_THREAD>(v));
typedef cub::WarpReduce<V> WarpReduce;
__shared__ typename WarpReduce::TempStorage temp_storage[WARPS_IN_BLOCK];
__shared__ V partialSum[WARPS_IN_BLOCK];
sum = WarpReduce(temp_storage[warpId]).Sum(sum);
if (threadIdx.x % WARP_SIZE == 0)
partialSum[warpId] = sum;
__syncthreads();
if ((threadIdx.x % N_REDUCE_THREADS) == 0) {
int shMemOffset = iSubSegIdxInBlock * 2;
sum = partialSum[shMemOffset] + partialSum[shMemOffset + 1];
out[iSubSegment] = sum;
}
}
}
template<int BLOCK_DIM, int ITEMS_PER_THREAD, class InIt, class OutIt, class OffIt>
__global__ static void
segmentedSumKernel_Block(InIt in, OutIt out,
OffIt segOffset, int segLen, sq::SizeType nSegments) {
typedef typename std::iterator_traits<OutIt>::value_type V;
typedef typename std::iterator_traits<InIt>::value_type Vin;
typedef typename std::iterator_traits<OffIt>::value_type OffsetT;
int iSegment = blockIdx.x;
int iSegIdx = threadIdx.x;
V sum = V();
if (iSegment < nSegments) {
OffsetT segBegin = segOffset[iSegment];
Vin v[ITEMS_PER_THREAD];
#pragma unroll
for (int idx = 0; idx < ITEMS_PER_THREAD - 1; ++idx)
v[idx] = in[segBegin + iSegIdx + BLOCK_DIM * idx];
int lastSegIdx = iSegIdx + BLOCK_DIM * (ITEMS_PER_THREAD - 1);
v[ITEMS_PER_THREAD - 1] = (lastSegIdx < segLen) ? in[segBegin + lastSegIdx] : Vin();
sum = reduce(sumArray<ITEMS_PER_THREAD>(v));
typedef cub::BlockReduce<V, BLOCK_DIM> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
sum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0)
out[iSegment] = sum;
}
}
template<int BLOCK_DIM, int ITEMS_PER_THREAD, int OUTPUT_PER_SEG,
class InIt, class OutIt, class OffIt>
__global__ static void
segmentedSumKernel_Striped(InIt in, OutIt out,
OffIt segOffset, int segLen, sq::SizeType nSegments) {
typedef typename std::iterator_traits<OutIt>::value_type V;
typedef typename std::iterator_traits<InIt>::value_type Vin;
typedef typename std::iterator_traits<OffIt>::value_type OffsetT;
int iSegment = blockIdx.x / OUTPUT_PER_SEG;
int iSegBlock = blockIdx.x % OUTPUT_PER_SEG;
int iSegIdx = BLOCK_DIM * iSegBlock + threadIdx.x;
enum { INPUT_STRIDE = BLOCK_DIM * OUTPUT_PER_SEG };
V sum = V();
if (iSegment < nSegments) {
OffsetT segBegin = segOffset[iSegment];
Vin v[ITEMS_PER_THREAD];
#pragma unroll
for (int idx = 0; idx < ITEMS_PER_THREAD - 1; ++idx) {
int inIdx = INPUT_STRIDE * idx + iSegIdx;
v[idx] = in[segBegin + inIdx];
}
int lastSegIdx = iSegIdx + INPUT_STRIDE * (ITEMS_PER_THREAD - 1);
v[ITEMS_PER_THREAD - 1] = (lastSegIdx < segLen) ? in[segBegin + lastSegIdx] : Vin();
sum = reduce(sumArray<ITEMS_PER_THREAD>(v));
typedef cub::BlockReduce<V, BLOCK_DIM> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
sum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0)
out[blockIdx.x] = sum;
}
}
/*
* host kernel callers
*/
template<class V, class InIt, class OutIt, class OffIt, int vecLen>
template<int ITEMS_PER_THREAD> inline void
DeviceSegmentedSumType<V, InIt, OutIt, OffIt, vecLen>::
segmentedSum_32(InIt in, OutIt out, OffIt segOffset) {
enum { BLOCK_DIM = 128 };
dim3 blockDim(BLOCK_DIM);
dim3 gridDim(divru(nSegments_, 4));
#if 0
segmentedSumKernel_32<BLOCK_DIM, ITEMS_PER_THREAD, 1>
<<<gridDim, blockDim, 0, stream_>>>(in, out, segOffset, segLen_, nSegments_);
#else
void *func = (void*)segmentedSumKernel_32<BLOCK_DIM, ITEMS_PER_THREAD, 1, InIt, OutIt, OffIt>;
void *args[] = {(void*)&in, (void*)&out, (void*)&segOffset, (void*)&segLen_, (void*)&nSegments_, NULL};
cudaLaunchKernel(func, gridDim, blockDim, args, 0, stream_);
#endif
DEBUG_SYNC;
}
template<class V, class InIt, class OutIt, class OffIt, int vecLen>
template<int ITEMS_PER_THREAD> inline
void DeviceSegmentedSumType<V, InIt, OutIt, OffIt, vecLen>::
segmentedSum_64(InIt in, OutIt out, OffIt segOffset) {
enum { BLOCK_DIM = 128 };
dim3 blockDim(BLOCK_DIM);
dim3 gridDim(divru(nSegments_, 2));
#if 0
segmentedSumKernel_64<BLOCK_DIM, ITEMS_PER_THREAD, 1>
<<<gridDim, blockDim, 0, stream_>>>(in, out, segOffset, segLen_, nSegments_);
#else
void *func = (void*)segmentedSumKernel_64<BLOCK_DIM, ITEMS_PER_THREAD, 1, InIt, OutIt, OffIt>;
void *args[] = {(void*)&in, (void*)&out, (void*)&segOffset, (void*)&segLen_, (void*)&nSegments_, NULL};
cudaLaunchKernel(func, gridDim, blockDim, args, 0, stream_);
#endif
DEBUG_SYNC;
}
template<class V, class InIt, class OutIt, class OffIt, int vecLen>
template<int BLOCK_DIM, int ITEMS_PER_THREAD> inline
void DeviceSegmentedSumType<V, InIt, OutIt, OffIt, vecLen>::
segmentedSum_Block(InIt in, OutIt out, OffIt segOffset) {
dim3 blockDim(BLOCK_DIM);
dim3 gridDim(nSegments_);
#if 0
segmentedSumKernel_Block<BLOCK_DIM, ITEMS_PER_THREAD>
<<<gridDim, blockDim, 0, stream_>>>(in, out, segOffset, segLen_, nSegments_);
#else
void *func = (void*)segmentedSumKernel_Block<BLOCK_DIM, ITEMS_PER_THREAD, InIt, OutIt, OffIt>;
void *args[] = {(void*)&in, (void*)&out, (void*)&segOffset, (void*)&segLen_, (void*)&nSegments_, NULL};
cudaLaunchKernel(func, gridDim, blockDim, args, 0, stream_);
#endif
DEBUG_SYNC;
}
template<class V, class InIt, class OutIt, class OffIt, int vecLen>
template<int N_REDUCE_THREADS, int ITEMS_PER_THREAD, int OUTPUT_PER_SEG> inline
void DeviceSegmentedSumType<V, InIt, OutIt, OffIt, vecLen>::
segmentedSum_2step(InIt in, OutIt out, OffIt segOffset) {
if (N_REDUCE_THREADS == 32) {
enum { BLOCK_DIM = 128 };
dim3 blockDim(BLOCK_DIM);
dim3 gridDim(divru(nSegments_ * OUTPUT_PER_SEG, 4));
segmentedSumKernel_32<BLOCK_DIM, ITEMS_PER_THREAD, OUTPUT_PER_SEG>
<<<gridDim, blockDim, 0, stream_>>>(in, d_tempStorage_, segOffset, segLen_, nSegments_);
DEBUG_SYNC;
}
else if (N_REDUCE_THREADS == 64) {
enum { BLOCK_DIM = 128 };
dim3 blockDim(BLOCK_DIM);
dim3 gridDim(divru(nSegments_ * OUTPUT_PER_SEG, 2));
segmentedSumKernel_64<BLOCK_DIM, ITEMS_PER_THREAD, OUTPUT_PER_SEG>
<<<gridDim, blockDim, 0, stream_>>>(in, d_tempStorage_, segOffset, segLen_, nSegments_);
DEBUG_SYNC;
}
else {
enum { BLOCK_DIM = N_REDUCE_THREADS };
dim3 blockDim(BLOCK_DIM);
dim3 gridDim(nSegments_ * OUTPUT_PER_SEG);
segmentedSumKernel_Striped<BLOCK_DIM, ITEMS_PER_THREAD, OUTPUT_PER_SEG>
<<<gridDim, blockDim, 0, stream_>>>(in, d_tempStorage_, segOffset, segLen_, nSegments_);
DEBUG_SYNC;
}
enum { BLOCK_DIM_32 = 128 };
dim3 blockDim(BLOCK_DIM_32);
dim3 gridDim(divru(nSegments_, BLOCK_DIM_32 / WARP_SIZE));
segmentedSumKernel_32<BLOCK_DIM_32, 1, 1>
<<<gridDim, blockDim, 0, stream_>>>(d_tempStorage_, out, Linear(OUTPUT_PER_SEG), OUTPUT_PER_SEG, nSegments_);
DEBUG_SYNC;
}
/*
* host methods
*/
template<class V, class InIt, class OutIt, class OffIt, int vecLen> inline
DeviceSegmentedSumType<V, InIt, OutIt, OffIt, vecLen>::
DeviceSegmentedSumType(Device &device, DeviceStream *devStream) {
d_tempStoragePreAlloc_ = NULL;
segLen_ = 0;
nSegments_ = 0;
if (devStream == NULL)
devStream = device.defaultStream();
devStream_ = devStream;
devAlloc_ = device.objectAllocator();
stream_ = NULL;
registerMethods();
if (devStream_ != NULL)
stream_ = devStream_->getCudaStream();
}
template<class V, class InIt, class OutIt, class OffIt, int vecLen> inline
DeviceSegmentedSumType<V, InIt, OutIt, OffIt, vecLen>::
DeviceSegmentedSumType(DeviceStream *devStream){
d_tempStoragePreAlloc_ = NULL;
d_tempStorage_ = NULL;
devStream_ = NULL;
devAlloc_ = NULL;
segLen_ = 0;
nSegments_ = 0;
stream_ = NULL;
registerMethods();
if (devStream_ != NULL)
stream_ = devStream_->getCudaStream();
}
template<class V, class InIt, class OutIt, class OffIt, int vecLen> inline
DeviceSegmentedSumType<V, InIt, OutIt, OffIt, vecLen>::
~DeviceSegmentedSumType() WAR_VC_NOTHROW {
if (d_tempStoragePreAlloc_ != NULL)
devAlloc_->deallocate(d_tempStoragePreAlloc_);
}
template<class V, class InIt, class OutIt, class OffIt, int vecLen> inline
void DeviceSegmentedSumType<V, InIt, OutIt, OffIt, vecLen>::
operator()(InIt in, OutIt out, OffIt segOffset) {
if (d_tempStoragePreAlloc_ != NULL)
d_tempStorage_ = d_tempStoragePreAlloc_;
else if (tempStorageSize_ != 0)
devStream_->allocate(&d_tempStorage_, tempStorageSize_);
(this->*sumMethod_)(in, out, segOffset);
}
template<class V, class InIt, class OutIt, class OffIt, int vecLen> inline void
DeviceSegmentedSumType<V, InIt, OutIt, OffIt, vecLen>::configure(sq::SizeType segLen, sq::SizeType nSegments, bool useTempStorage) {
segLen_ = sq::divru(segLen, vecLen);
nSegments_ = nSegments;
/* choose kernel */
typename MethodMap::iterator it = methodMap_.lower_bound(segLen_);
throwErrorIf(it == methodMap_.end(), "Segment length (%d) not supported.", segLen_);
sumMethod_ = it->second;
d_tempStorage_ = NULL;
tempStorageSize_ = 0;
if (4096 < segLen) {
tempStorageSize_ = 32 * nSegments_;
if (!useTempStorage)
devAlloc_->allocate(&d_tempStoragePreAlloc_, tempStorageSize_);
}
}
/*
* Method registration
*/
template<class V, class InIt, class OutIt, class OffIt, int vecLen> inline
void DeviceSegmentedSumType<V, InIt, OutIt, OffIt, vecLen>::
registerMethods() {
reg(32, 1, &SelfType::segmentedSum_32<1>);
reg(32, 2, &SelfType::segmentedSum_32<2>);
reg(32, 3, &SelfType::segmentedSum_32<3>);
reg(32, 4, &SelfType::segmentedSum_32<4>);
reg(32, 5, &SelfType::segmentedSum_32<5>);
reg(32, 6, &SelfType::segmentedSum_32<6>);
reg(32, 7, &SelfType::segmentedSum_32<7>);
reg(32, 8, &SelfType::segmentedSum_32<8>);
//reg(64, 1, &SelfType::segmentedSum_64<1>);
//reg(64, 2, &SelfType::segmentedSum_64<2>);
//reg(64, 3, &SelfType::segmentedSum_64<3>);
//reg(64, 4, &SelfType::segmentedSum_64<4>);
reg(64, 5, &SelfType::segmentedSum_64<5>);
reg(64, 6, &SelfType::segmentedSum_64<6>);
reg(64, 7, &SelfType::segmentedSum_64<7>);
reg(64, 8, &SelfType::segmentedSum_64<8>);
//reg(128, 1, &SelfType::segmentedSum_Block<128, 1>);
//reg(128, 2, &SelfType::segmentedSum_Block<128, 2>);
//reg(128, 3, &SelfType::segmentedSum_Block<128, 3>);
//reg(128, 4, &SelfType::segmentedSum_Block<128, 4>);
reg(128, 5, &SelfType::segmentedSum_Block<128, 5>);
reg(128, 6, &SelfType::segmentedSum_Block<128, 6>);
reg(128, 7, &SelfType::segmentedSum_Block<128, 7>);
reg(128, 8, &SelfType::segmentedSum_Block<128, 8>);
//reg(256, 1, &SelfType::segmentedSum_Block<256, 1>);
//reg(256, 2, &SelfType::segmentedSum_Block<256, 2>);
//reg(256, 3, &SelfType::segmentedSum_Block<256, 3>);
//reg(256, 4, &SelfType::segmentedSum_Block<256, 4>);
reg(256, 5, &SelfType::segmentedSum_Block<256, 5>);
reg(256, 6, &SelfType::segmentedSum_Block<256, 6>);
reg(256, 7, &SelfType::segmentedSum_Block<256, 7>);
reg(256, 8, &SelfType::segmentedSum_Block<256, 8>);
//reg(512, 1, &SelfType::segmentedSum_Block<512, 1>);
//reg(512, 2, &SelfType::segmentedSum_Block<512, 2>);
//reg(512, 3, &SelfType::segmentedSum_Block<512, 3>);
//reg(512, 4, &SelfType::segmentedSum_Block<512, 4>);
reg(512, 5, &SelfType::segmentedSum_Block<512, 5>);
reg(512, 6, &SelfType::segmentedSum_Block<512, 6>);
reg(512, 7, &SelfType::segmentedSum_Block<512, 7>);
reg(512, 8, &SelfType::segmentedSum_Block<512, 8>);
reg(1024, 5, &SelfType::segmentedSum_2step<32, 5, 32>);
reg(1024, 6, &SelfType::segmentedSum_2step<32, 6, 32>);
reg(1024, 7, &SelfType::segmentedSum_2step<32, 7, 32>);
reg(1024, 8, &SelfType::segmentedSum_2step<32, 8, 32>);
reg(2048, 5, &SelfType::segmentedSum_2step<64, 5, 32>);
reg(2048, 6, &SelfType::segmentedSum_2step<64, 6, 32>);
reg(2048, 7, &SelfType::segmentedSum_2step<64, 7, 32>);
reg(2048, 8, &SelfType::segmentedSum_2step<64, 8, 32>);
#if 0
reg(4096, 5, &SelfType::segmentedSum_2step<128, 5, 32>);
reg(4096, 6, &SelfType::segmentedSum_2step<128, 6, 32>);
reg(4096, 7, &SelfType::segmentedSum_2step<128, 7, 32>);
reg(4096, 8, &SelfType::segmentedSum_2step<128, 8, 32>);
reg(8192, 5, &SelfType::segmentedSum_2step<256, 5, 32>);
reg(8192, 6, &SelfType::segmentedSum_2step<256, 6, 32>);
reg(8192, 7, &SelfType::segmentedSum_2step<256, 7, 32>);
reg(8192, 8, &SelfType::segmentedSum_2step<256, 8, 32>);
#endif
}
/* Segmented sum. */
template<class V, class OutIt>
struct DeviceBatchedSum : public DeviceSegmentedSumType<V, V*, OutIt, Linear, 1> {
typedef DeviceSegmentedSumType<V, V*, OutIt, Linear, 1> Base;
using Base::sumMethod_;
DeviceBatchedSum(Device &device, DeviceStream *devStream = NULL)
: Base(device, devStream) { }
DeviceBatchedSum(DeviceStream *devStream) : Base(devStream) { }
void operator()(const DeviceMatrixType<V> &d_x, OutIt out) {
Base::operator()(d_x.d_data, out, Linear(d_x.stride));
}
};
} | the_stack |
#include "cupoch/geometry/trianglemesh.h"
#include "cupoch/io/class_io/trianglemesh_io.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/filesystem.h"
#define TINYOBJLOADER_IMPLEMENTATION
#include "tiny_obj_loader.h"
namespace cupoch {
namespace io {
bool ReadTriangleMeshFromOBJ(const std::string& filename,
geometry::TriangleMesh& mesh,
bool print_progress) {
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> shapes;
std::vector<tinyobj::material_t> materials;
std::string warn;
std::string err;
std::string mtl_base_path =
utility::filesystem::GetFileParentDirectory(filename);
bool ret = tinyobj::LoadObj(&attrib, &shapes, &materials, &warn, &err,
filename.c_str(), mtl_base_path.c_str());
if (!warn.empty()) {
utility::LogWarning("Read OBJ failed: {}", warn);
}
if (!err.empty()) {
utility::LogWarning("Read OBJ failed: {}", err);
}
if (!ret) {
return false;
}
HostTriangleMesh host_mesh;
// copy vertex and data
for (size_t vidx = 0; vidx < attrib.vertices.size(); vidx += 3) {
tinyobj::real_t vx = attrib.vertices[vidx + 0];
tinyobj::real_t vy = attrib.vertices[vidx + 1];
tinyobj::real_t vz = attrib.vertices[vidx + 2];
host_mesh.vertices_.push_back(Eigen::Vector3f(vx, vy, vz));
}
for (size_t vidx = 0; vidx < attrib.colors.size(); vidx += 3) {
tinyobj::real_t r = attrib.colors[vidx + 0];
tinyobj::real_t g = attrib.colors[vidx + 1];
tinyobj::real_t b = attrib.colors[vidx + 2];
host_mesh.vertex_colors_.push_back(Eigen::Vector3f(r, g, b));
}
// resize normal data and create bool indicator vector
host_mesh.vertex_normals_.resize(host_mesh.vertices_.size());
std::vector<bool> normals_indicator(host_mesh.vertices_.size(), false);
// copy face data and copy normals data
// append face-wise uv data
for (size_t s = 0; s < shapes.size(); s++) {
size_t index_offset = 0;
for (size_t f = 0; f < shapes[s].mesh.num_face_vertices.size(); f++) {
int fv = shapes[s].mesh.num_face_vertices[f];
if (fv != 3) {
utility::LogWarning(
"Read OBJ failed: facet with number of vertices not "
"equal to 3");
return false;
}
Eigen::Vector3i facet;
for (int v = 0; v < fv; v++) {
tinyobj::index_t idx = shapes[s].mesh.indices[index_offset + v];
int vidx = idx.vertex_index;
facet(v) = vidx;
if (!attrib.normals.empty() && !normals_indicator[vidx] &&
(3 * idx.normal_index + 2) < int(attrib.normals.size())) {
tinyobj::real_t nx =
attrib.normals[3 * idx.normal_index + 0];
tinyobj::real_t ny =
attrib.normals[3 * idx.normal_index + 1];
tinyobj::real_t nz =
attrib.normals[3 * idx.normal_index + 2];
host_mesh.vertex_normals_[vidx](0) = nx;
host_mesh.vertex_normals_[vidx](1) = ny;
host_mesh.vertex_normals_[vidx](2) = nz;
normals_indicator[vidx] = true;
}
if (!attrib.texcoords.empty() &&
2 * idx.texcoord_index + 1 < int(attrib.texcoords.size())) {
tinyobj::real_t tx =
attrib.texcoords[2 * idx.texcoord_index + 0];
tinyobj::real_t ty =
attrib.texcoords[2 * idx.texcoord_index + 1];
host_mesh.triangle_uvs_.push_back(Eigen::Vector2f(tx, ty));
}
}
host_mesh.triangles_.push_back(facet);
index_offset += fv;
}
}
// if not all normals have been set, then remove the vertex normals
bool all_normals_set =
std::accumulate(normals_indicator.begin(), normals_indicator.end(),
true, [](bool a, bool b) { return a && b; });
if (!all_normals_set) {
host_mesh.vertex_normals_.clear();
}
// if not all triangles have corresponding uvs, then remove uvs
if (3 * host_mesh.triangles_.size() != host_mesh.triangle_uvs_.size()) {
host_mesh.triangle_uvs_.clear();
}
mesh.Clear();
host_mesh.ToDevice(mesh);
// Now we assert only one shape is stored, we only select the first
// diffuse material
for (auto& material : materials) {
if (!material.diffuse_texname.empty()) {
mesh.texture_ = *(io::CreateImageFromFile(mtl_base_path +
material.diffuse_texname)
->FlipVertical());
break;
}
}
return true;
}
bool WriteTriangleMeshToOBJ(const std::string& filename,
const geometry::TriangleMesh& mesh,
bool write_ascii /* = false*/,
bool compressed /* = false*/,
bool write_vertex_normals /* = true*/,
bool write_vertex_colors /* = true*/,
bool write_triangle_uvs /* = true*/,
bool print_progress) {
std::string object_name = utility::filesystem::GetFileNameWithoutExtension(
utility::filesystem::GetFileNameWithoutDirectory(filename));
std::ofstream file(filename.c_str(), std::ios::out | std::ios::binary);
if (!file) {
utility::LogWarning("Write OBJ failed: unable to open file.");
return false;
}
HostTriangleMesh host_mesh;
host_mesh.FromDevice(mesh);
if (mesh.HasTriangleNormals()) {
utility::LogWarning("Write OBJ can not include triangle normals.");
}
file << "# Created by Open3D " << std::endl;
file << "# object name: " << object_name << std::endl;
file << "# number of vertices: " << host_mesh.vertices_.size() << std::endl;
file << "# number of triangles: " << host_mesh.triangles_.size()
<< std::endl;
// always write material name in obj file, regardless of uvs or textures
file << "mtllib " << object_name << ".mtl" << std::endl;
file << "usemtl " << object_name << std::endl;
utility::ConsoleProgressBar progress_bar(
host_mesh.vertices_.size() + host_mesh.triangles_.size(),
"Writing OBJ: ", print_progress);
write_vertex_normals = write_vertex_normals && mesh.HasVertexNormals();
write_vertex_colors = write_vertex_colors && mesh.HasVertexColors();
for (size_t vidx = 0; vidx < host_mesh.vertices_.size(); ++vidx) {
const Eigen::Vector3f& vertex = host_mesh.vertices_[vidx];
file << "v " << vertex(0) << " " << vertex(1) << " " << vertex(2);
if (write_vertex_colors) {
const Eigen::Vector3f& color = host_mesh.vertex_colors_[vidx];
file << " " << color(0) << " " << color(1) << " " << color(2);
}
file << std::endl;
if (write_vertex_normals) {
const Eigen::Vector3f& normal = host_mesh.vertex_normals_[vidx];
file << "vn " << normal(0) << " " << normal(1) << " " << normal(2)
<< std::endl;
}
++progress_bar;
}
// we are less strict and allows writing to uvs without known material
// potentially this will be useful for exporting conformal map generation
write_triangle_uvs = write_triangle_uvs && mesh.HasTriangleUvs();
// we don't compress uvs into vertex-wise representation.
// loose triangle-wise representation is provided
if (write_triangle_uvs) {
for (auto& uv : host_mesh.triangle_uvs_) {
file << "vt " << uv(0) << " " << uv(1) << std::endl;
}
}
for (size_t tidx = 0; tidx < host_mesh.triangles_.size(); ++tidx) {
const Eigen::Vector3i& triangle = host_mesh.triangles_[tidx];
if (write_vertex_normals && write_triangle_uvs) {
file << "f ";
file << triangle(0) + 1 << "/" << 3 * tidx + 1 << "/"
<< triangle(0) + 1 << " ";
file << triangle(1) + 1 << "/" << 3 * tidx + 2 << "/"
<< triangle(1) + 1 << " ";
file << triangle(2) + 1 << "/" << 3 * tidx + 3 << "/"
<< triangle(2) + 1 << std::endl;
} else if (!write_vertex_normals && write_triangle_uvs) {
file << "f ";
file << triangle(0) + 1 << "/" << 3 * tidx + 1 << " ";
file << triangle(1) + 1 << "/" << 3 * tidx + 2 << " ";
file << triangle(2) + 1 << "/" << 3 * tidx + 3 << std::endl;
} else if (write_vertex_normals && !write_triangle_uvs) {
file << "f " << triangle(0) + 1 << "//" << triangle(0) + 1 << " "
<< triangle(1) + 1 << "//" << triangle(1) + 1 << " "
<< triangle(2) + 1 << "//" << triangle(2) + 1 << std::endl;
} else {
file << "f " << triangle(0) + 1 << " " << triangle(1) + 1 << " "
<< triangle(2) + 1 << std::endl;
}
++progress_bar;
}
// end of writing obj.
//////
//////
// start to write to mtl and texture
std::string parent_dir =
utility::filesystem::GetFileParentDirectory(filename);
std::string mtl_filename = parent_dir + object_name + ".mtl";
std::string tex_filename = parent_dir + object_name + ".png";
// write standard material info to mtl file
std::ofstream mtl_file(mtl_filename.c_str(), std::ios::out);
if (!mtl_file) {
utility::LogWarning(
"Write OBJ successful, but failed to write material file.");
return true;
}
mtl_file << "# Created by Open3D " << std::endl;
mtl_file << "# object name: " << object_name << std::endl;
mtl_file << "newmtl " << object_name << std::endl;
mtl_file << "Ka 1.000 1.000 1.000" << std::endl;
mtl_file << "Kd 1.000 1.000 1.000" << std::endl;
mtl_file << "Ks 0.000 0.000 0.000" << std::endl;
if (write_triangle_uvs && mesh.HasTexture()) {
if (!io::WriteImage(tex_filename, *mesh.texture_.FlipVertical())) {
utility::LogWarning(
"Write OBJ successful, but failed to write texture "
"file.");
return true;
}
mtl_file << "map_Kd " << object_name << ".png";
}
return true;
}
} // namespace io
} // namespace cupoch | the_stack |
#pragma once
#include <math/vector.h>
#include <math/matrix.h>
#include <meta_utils.h>
#include <utils.h>
#include <cstdint>
#include "config.h"
struct BlockRasterizerId
{
__device__
static int rasterizer()
{
return blockIdx.x;
}
};
template <int NUM_BLOCKS, int x_max, int y_max, int bin_size_x, int bin_size_y, int stamp_width, int stamp_height, class RasterizerId = BlockRasterizerId>
class TileSpace
{
protected:
static constexpr int bin_width = bin_size_x * stamp_width;
static constexpr int bin_height = bin_size_y * stamp_height;
static constexpr int num_bins_x = x_max / bin_width;
static constexpr int num_bins_y = y_max / bin_height;
//static_assert(bin_size_x*bin_size_y%WARP_SIZE == 0, "ERROR: bin size must be a multiple of the warpsize");
static_assert(stamp_width*stamp_height%WARP_SIZE == 0, "ERROR: stamp size must be a multiple of the warpsize");
__device__
static int rasterizer()
{
return RasterizerId::rasterizer();
}
public:
static constexpr int TilesPerBinX = bin_size_x;
static constexpr int TilesPerBinY = bin_size_y;
static constexpr int StampsPerTileX = stamp_width;
static constexpr int StampsPerTileY = stamp_height;
//typedef CoverageMask<bin_size_x, bin_size_y> BinCoverageMask;
//typedef CoverageMask<stamp_width, stamp_height> StampCoverageMask;
__device__
static int MyQueue()
{
return rasterizer();
}
__device__
static int num_rasterizers()
{
return NUM_BLOCKS;
}
__device__
static int2 bin(int x, int y)
{
return make_int2(x / bin_width, y / bin_height);
}
__device__
static int left(int i)
{
return i * bin_width;
}
__device__
static int top(int j)
{
return j * bin_height;
}
__device__
static int right(int i)
{
return left(i) + bin_width;
}
__device__
static int bottom(int j)
{
return top(j) + bin_height;
}
__device__
static int rasterizer(int x, int y)
{
return (y + x) % NUM_BLOCKS;
}
__device__
static int rasterizer(int2 b)
{
return (b.y + b.x) % NUM_BLOCKS;
}
__device__
static int4 binBounds(int2 bin)
{
return make_int4(bin.x * bin_width, bin.y * bin_height, (bin.x + 1) * bin_width, (bin.y + 1)* bin_height);
}
__device__
static int4 tileBounds(int2 bin, int2 tile)
{
int x = left(bin.x) + tile.x * stamp_width;
int y = top(bin.y) + tile.y * stamp_height;
return make_int4(x, y, x + stamp_width, y + stamp_height);
}
__device__
static int4 tileBounds(int2 bin, int tile)
{
int x = left(bin.x) + (tile % bin_size_x) * stamp_width;
int y = top(bin.y) + (tile / bin_size_x) * stamp_height;
return make_int4(x, y, x + stamp_width, y + stamp_height);
}
__device__
static int2 tileCoords(int2 bin, int2 tile)
{
return make_int2(bin.x * bin_size_x + tile.x,
bin.y * bin_size_y + tile.y);
}
__device__
static int localTileHitX(int2 bin, float x)
{
int l = left(bin.x);
float diff = (x - l) / static_cast<float>(stamp_width);
int h = 1.0f + max(-1.0f, min(static_cast<float>(stamp_width), diff));
return h - 1;
}
__device__
static int relativeTileHitX(int2 bin, int x)
{
int l = left(bin.x);
int diff = (x - l) / stamp_width;
return diff;
}
__device__
int localTileHitY(int2 bin, float y)
{
int t = top(bin.y);
float diff = (y - t) / static_cast<float>(stamp_height);
int h = 1.0f + max(-1.0f, min(static_cast<float>(stamp_height), diff));
return h - 1;
}
__device__
static int relativeTileHitY(int2 bin, int y)
{
int t = top(bin.y);
int diff = (y - t) / stamp_height;
return diff;
}
__device__
static int relativeStampHitX(int2 bin, int2 tile, int x)
{
int l = left(bin.x) + tile.x * stamp_width;
int diff = (x - l);
return diff;
}
__device__
static int relativeStampHitY(int2 bin, int2 tile, int y)
{
int t = top(bin.y) + tile.y * stamp_height;
int diff = (y - t);
return diff;
}
template <typename F>
__device__
static void traverseTileRows(int2 bin, F f, bool flipY = false, int startoffset = 0)
{
int i_flip = flipY;
int flipperY = 1 - 2 * static_cast<int>(flipY);
int t = top(bin.y) + startoffset * stamp_height;
int y = t + i_flip * bin_height;
#pragma unroll
for (int i = 0; i < bin_size_y; ++i, y += flipperY * stamp_height)
f(y, i);
}
template<class CoordTransform>
class TransformedBin
{
math::float2 start;
math::float2 stamp_size;
math::float2 inv_stamp_size;
public:
__device__
TransformedBin(int left, int top) :
start(CoordTransform::point(math::float2(left-0.5f, top-0.5f))),
stamp_size(CoordTransform::vec(math::float2(stamp_width, stamp_height))),
inv_stamp_size(1.0f / stamp_size.x, 1.0f / stamp_size.y)
{}
template <typename F>
__device__
void traverseTileRows(F f, bool flipY = false, float startoffset = 0.0f)
{
float f_flip = flipY;
float flipperY = 1.0f - 2.0f * static_cast<float>(flipY);
float t = start.y + startoffset * stamp_size.y;
float y = t + f_flip * stamp_size.y * bin_size_y;
float step = flipperY * stamp_size.y;
#pragma unroll
for (int i = 0; i < bin_size_y; ++i, y += step)
f(y, i);
}
__device__
int tileFromX(float x)
{
float diff = (x - start.x)* inv_stamp_size.x;
int h = 1.0f + max(-1.0f, min(static_cast<float>(bin_size_x), diff));
return h - 1;
}
__device__
int tileFromY(float y)
{
float diff = (y - start.y)* inv_stamp_size.y;
int h = 1.0f + max(-1.0f, min(static_cast<float>(bin_size_y), diff));
return h - 1;
}
};
template<class CoordTransform>
__device__
static TransformedBin<CoordTransform> transformBin(int2 bin)
{
return TransformedBin<CoordTransform>(left(bin.x), top(bin.y));
}
template<class CoordTransform>
class TransformedTile
{
math::float2 start;
math::float2 fragment_size;
math::float2 inv_fragment_size;
public:
__device__
TransformedTile(int left, int top) :
start(CoordTransform::point(math::float2(left, top))),
fragment_size(CoordTransform::vec(math::float2(1.0f, 1.0f))),
inv_fragment_size(1.0f / fragment_size.x, 1.0f / fragment_size.y)
{}
template <typename F>
__device__
void traverseStampsRows(F f, float startoffset = 0.0f)
{
float y = start.y + startoffset * fragment_size.y;
float step = fragment_size.y;
#pragma unroll
for (int i = 0; i < stamp_height; ++i, y += step)
f(y, i);
}
__device__
int stampFromX(float x, float addoffset = 0)
{
float diff = (x - start.x)* inv_fragment_size.x + addoffset;
int h = 1.0f + max(-1.0f, min(static_cast<float>(stamp_width), diff));
return h - 1;
}
__device__
int stampFromY(float y, float addoffset = 0)
{
float diff = (y - start.y)* inv_fragment_size.y + addoffset;
int h = 1.0f + max(-1.0f, min(static_cast<float>(stamp_height), diff));
return h - 1;
}
};
template<class CoordTransform>
__device__
static TransformedTile<CoordTransform> transformTile(int2 bin, int2 tile)
{
return TransformedTile<CoordTransform>(left(bin.x) + tile.x * stamp_width, top(bin.y) + tile.y * stamp_height);
}
template<class CoordTransform>
__device__
static math::float4 tileCoords(int2 bin, int2 tile)
{
auto lt = CoordTransform::point({ static_cast<float>(left(bin.x) + tile.x * stamp_width), static_cast<float>(top(bin.y) + tile.y * stamp_height) });
auto rb = CoordTransform::point({ static_cast<float>(left(bin.x) + tile.x * stamp_width + stamp_width), static_cast<float>(top(bin.y) + tile.y * stamp_height + stamp_height) });
return { lt.x, lt.y, rb.x, rb.y };
}
template <typename F>
__device__
static void traverseTiles(int2 bin, F f)
{
int2 p = make_int2(left(bin.x), top(bin.y));
for (int j = 0; j < bin_size_y; ++j)
for (int i = 0; i < bin_size_x; ++i)
{
int x = p.x + i * stamp_width;
int y = p.y + j * stamp_height;
f(bin_size_x*j + i, make_int2(i, j), make_int4(x, y, x + stamp_width, y + stamp_height));
}
}
template <typename F>
__device__
static void traverseTilesWarp(int2 bin, F f)
{
int2 p = make_int2(left(bin.x), top(bin.y));
#pragma unroll
for (int toffset = 0; toffset < bin_size_x * bin_size_y; toffset += WARP_SIZE)
{
int tile = toffset + laneid();
int j = tile / bin_size_x;
int i = tile % bin_size_x;
int x = p.x + i * stamp_width;
int y = p.y + j * stamp_height;
f(tile, make_int4(x, y, x + stamp_width, y + stamp_height));
}
}
template <typename F>
__device__
static void traverseStampWarp(int2 tile, F f)
{
#pragma unroll
for (int sample = 0; sample < stamp_width * stamp_height; sample += WARP_SIZE)
{
int s = sample + laneid();
int j = s / stamp_width;
int i = s % stamp_width;
int x = tile.x + i;
int y = tile.y + j;
f(x, y);
}
}
template <typename F>
__device__
static void traverseStampsWarp(int2 bin, int2 tile, F f)
{
int px = left(bin.x) + tile.x * stamp_width;
int py = top(bin.y) + tile.y * stamp_height;
int part = 0;
#pragma unroll
for (int toffset = 0; toffset < stamp_width * stamp_height; toffset += WARP_SIZE)
{
int stamp = toffset + laneid();
int j = stamp / stamp_width;
int i = stamp % stamp_width;
int x = px + i;
int y = py + j;
f(stamp, px, py, x, y, part);
++part;
}
}
__device__
static uchar4 myRasterizerColor()
{
return make_uchar4(rasterizer() % 5 * 50, rasterizer() / 5 % 5 * 50, rasterizer() / 25 % 5 * 50, 255);
}
};
template <PATTERNTECHNIQUE technique, int NUM_BLOCKS, int x_max, int y_max, int bin_size_x, int bin_size_y, int stamp_width, int stamp_height, class RasterizerId = BlockRasterizerId>
class PatternTileSpace {};
template < int NUM_BLOCKS, int x_max, int y_max, int bin_size_x, int bin_size_y, int stamp_width, int stamp_height, class RasterizerId>
class PatternTileSpace <PATTERNTECHNIQUE::DIAGONAL, NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
: public TileSpace<NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
{
public:
__device__
static int numHitBinsForMyRasterizer(const int2 start, const int2 end)
{
const int r = rasterizer();
const int w = end.x - start.x + 1;
const int h = end.y - start.y + 1;
int full_elements_x = w / NUM_BLOCKS;
int full_elements_y = h / NUM_BLOCKS;
int right = w - full_elements_x * NUM_BLOCKS;
int top = h - full_elements_y * NUM_BLOCKS;
int full_sums = full_elements_x * h + full_elements_y * right;
int start_offset = (NUM_BLOCKS + r + 1 - rasterizer(start)) % NUM_BLOCKS;
int first_hit_diag = min(right, start_offset) - min(right, max(start_offset - top, 0));
int second_hit_diag = right - min(right, start_offset + NUM_BLOCKS - top);
int res = full_sums + first_hit_diag + second_hit_diag;
return res;
}
__device__
static int2 getHitBinForMyRasterizer(int i, const int2 start, const int2 end)
{
const int r = rasterizer();
const int w = end.x - start.x + 1;
const int h = end.y - start.y + 1;
int full_elements_x = w / NUM_BLOCKS;
int full_elements_y = h / NUM_BLOCKS;
int right = w - full_elements_x * NUM_BLOCKS;
int top = h - full_elements_y * NUM_BLOCKS;
int start_offset = (NUM_BLOCKS + r + 1 - rasterizer(start)) % NUM_BLOCKS;
int x, y;
int region0bound = full_elements_y * (full_elements_x * NUM_BLOCKS + right);
int region1bound = region0bound + min(start_offset, top);
if (i < region0bound)
{
x = i % (full_elements_x * NUM_BLOCKS + right);
int row = i / (full_elements_x * NUM_BLOCKS + right);
int yoffset = (NUM_BLOCKS - start_offset + x) % NUM_BLOCKS;
y = (row + 1) * NUM_BLOCKS - yoffset - 1;
}
else if (i < region1bound)
{
x = i + start_offset - region1bound;
y = full_elements_y * NUM_BLOCKS + start_offset - x - 1;
}
else
{
int k = i - region1bound;
int x_block = k / top;
int x_local = k - x_block * top;
x = (x_block + 1) * NUM_BLOCKS + start_offset - top + x_local;
y = full_elements_y * NUM_BLOCKS + top - 1 - x_local;
}
return make_int2(start.x + x, start.y + y);
}
template <typename F>
__device__
static unsigned int traverseRasterizers(int2 start, int2 end, F f)
{
int r = rasterizer(start);
int num = end.x - start.x + end.y - start.y + 1;
int endr = min(r + num, NUM_BLOCKS);
for (int i = r; i < endr; ++i)
f(i);
for (int i = 0; i < min(r + num - endr, r); ++i)
f(i);
return min(num, NUM_BLOCKS);
//unsigned int c = 0;
//for (int b = start.y; b <= end.y && c < NUM_BLOCKS; ++b, ++c)
//{
// f(r);
// r = (r + 1) >= NUM_BLOCKS ? 0 : r + 1;
//}
//for (int b = start.x + 1; b <= end.x && c < NUM_BLOCKS; ++b, ++c)
//{
// f(r);
// r = (r + 1) >= NUM_BLOCKS ? 0 : r + 1;
//}
//return c;
}
};
template < int NUM_BLOCKS, int x_max, int y_max, int bin_size_x, int bin_size_y, int stamp_width, int stamp_height, class RasterizerId>
class PatternTileSpace <PATTERNTECHNIQUE::OFFSET, NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
: public TileSpace<NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
{
private:
static constexpr int PARTIAL = OFFSET_PARAMETER;
static constexpr int num_rasterizers_s = NUM_BLOCKS*PARTIAL;
static constexpr int def_shift_per_line = (NUM_BLOCKS + PARTIAL - 1) / PARTIAL;
public:
__device__
//static int numHitBinsForMyRasterizer(const int2 start, const int2 end)
static int numHitBinsForMyRasterizer(const int2 bb_from, const int2 end)
{
//maybe rename later
const int2 bb_to = { end.x + 1, end.y + 1 };
//
const int rasterizer_id = rasterizer();
const int w = bb_to.x - bb_from.x;
const int h = bb_to.y - bb_from.y;
const int full_sets_per_line = w / NUM_BLOCKS;
const int found_full = full_sets_per_line * h;
const int bb_from_s = (bb_from.x + full_sets_per_line * NUM_BLOCKS)*PARTIAL;
const int bb_max_s = bb_to.x * PARTIAL - 1;
const int rasterizer_id_s = rasterizer_id*PARTIAL;
const int step = (bb_from_s - rasterizer_id_s + (NUM_BLOCKS + 1)*NUM_BLOCKS - 1) / NUM_BLOCKS - NUM_BLOCKS;
int part_start_column_s = rasterizer_id_s + step*NUM_BLOCKS;
int part_start_row = (-step + num_bins_y * PARTIAL + 1);
int possible_per_rep = ((bb_max_s - part_start_column_s + NUM_BLOCKS) / NUM_BLOCKS);
int lines_big = part_start_row - bb_from.y;
int lines_small = part_start_row - bb_to.y;
int seq_reps_big = lines_big / PARTIAL;
int seq_reps_small = lines_small / PARTIAL;
int found_rem_big = (seq_reps_big*possible_per_rep + min(possible_per_rep, lines_big - seq_reps_big*PARTIAL));
int found_rem_small = (seq_reps_small*possible_per_rep + min(possible_per_rep, lines_small - seq_reps_small*PARTIAL));
int found_remaining = found_rem_big - found_rem_small;
return (found_full + found_remaining);
}
__device__
static int2 getHitBinForMyRasterizer(int i, const int2 bb_from, const int2 end)
{
//maybe rename later
const int2 bb_to = { end.x + 1, end.y + 1 };
//
const int rasterizer_id = rasterizer();
int bb_from_s = bb_from.x * PARTIAL;
int bb_max_s = bb_to.x * PARTIAL - 1;
int rasterizer_id_s = rasterizer_id*PARTIAL;
int w = bb_to.x - bb_from.x;
int h = bb_to.y - bb_from.y;
int full_sets_per_line = w / NUM_BLOCKS;
int found_full = full_sets_per_line * h;
int X = 0;
int Y = 0;
if (i < found_full)
{
int y_times = i / full_sets_per_line;
Y = bb_to.y - y_times - 1;
int step_s = (rasterizer_id_s + (num_rasterizers_s - Y*NUM_BLOCKS) - bb_from_s) % num_rasterizers_s;
int x_off = i - (bb_to.y - 1 - Y)*full_sets_per_line;
X = bb_from.x + step_s / PARTIAL + x_off*NUM_BLOCKS;
}
else
{
bb_from_s = bb_from_s + (full_sets_per_line*NUM_BLOCKS*PARTIAL);
int step = (bb_from_s - rasterizer_id_s + (NUM_BLOCKS + 1)*NUM_BLOCKS - 1) / NUM_BLOCKS - NUM_BLOCKS;
int part_start_column_s = rasterizer_id_s + step*NUM_BLOCKS;
int part_start_row = (-step + num_bins_y * PARTIAL + 1);
int possible_per_rep = ((bb_max_s - part_start_column_s + NUM_BLOCKS) / NUM_BLOCKS);
int lines_small = part_start_row - bb_to.y;
int seq_reps_small = lines_small / PARTIAL;
int found_rem_small = (seq_reps_small*possible_per_rep + min(possible_per_rep, lines_small - seq_reps_small*PARTIAL));
int higher_id = found_rem_small + (i - found_full);
int rel_seq = higher_id / possible_per_rep;
int found_in_seqs = rel_seq*possible_per_rep;
int base_y = part_start_row - rel_seq*PARTIAL;
int id_in_seq = higher_id - found_in_seqs;
Y = base_y - id_in_seq - 1;
X = (part_start_column_s + id_in_seq*NUM_BLOCKS) / PARTIAL;
}
return make_int2(X, Y);
}
template <typename F>
__device__
//static unsigned int traverseRasterizers(int2 start, int2 end, F f)
static unsigned int traverseRasterizers(int2 b_from, int2 b_end, F f)
{
//maybe rename later
const int2 b_to = { b_end.x + 1, b_end.y + 1 };
//
const int rasterizer_id = rasterizer();
const int h = b_to.y - b_from.y;
int shift = b_from.y * NUM_BLOCKS;
int def_shift = (shift + PARTIAL - 1) / PARTIAL;
int def_shift_s = PARTIAL*def_shift;
int rt = (b_from.x + def_shift) % NUM_BLOCKS;
int startx = b_from.x, endx = b_from.x + NUM_BLOCKS;
int start = startx, end = min(endx, b_to.x);
unsigned int first_element = rt, element, found = 0;
for (int row = 0; row < h && start < end; row++)
{
element = first_element + (start - b_from.x);
for (int i = start; i < end; i++)
{
element = min(element, element - NUM_BLOCKS);
f(element);
element++;
found++;
}
startx = end - def_shift_per_line;
endx = endx - def_shift_per_line;
first_element = first_element + def_shift_per_line;
shift = shift + NUM_BLOCKS;
def_shift_s = def_shift_s + def_shift_per_line*PARTIAL;
int shift_compare = (def_shift_s - PARTIAL);
if (shift <= shift_compare)
{
def_shift_s = shift_compare;
startx = startx + 1;
endx = endx + 1;
first_element = first_element - 1;
}
start = max(startx, b_from.x);
end = min(endx, b_to.x);
}
return found;
}
};
template < int NUM_BLOCKS, int x_max, int y_max, int bin_size_x, int bin_size_y, int stamp_width, int stamp_height, class RasterizerId>
class PatternTileSpace <PATTERNTECHNIQUE::OFFSET_SHIFT, NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
: public TileSpace<NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
{
static constexpr int PARTIAL = OFFSET_PARAMETER;
static constexpr int SD = (NUM_BLOCKS + 1) / PARTIAL;
static constexpr int LD = SD + (NUM_BLOCKS + 1) - (SD * PARTIAL);
static constexpr int SD_LD_DIFF = LD - SD;
static constexpr int LD_SD_DIFF = 2 * SD - LD;
static constexpr int LINES_PER_REP = ((PARTIAL * SD) + SD_LD_DIFF) * PARTIAL - PARTIAL;
static constexpr int LINES_PER_BLOCK = SD * PARTIAL - 1;
static constexpr int LINES_PER_BIG_BLOCK = LD * PARTIAL - 1;
static constexpr int LINES_PER_INTRO_BLOCK = SD_LD_DIFF*PARTIAL - 1;
static constexpr int BIG = 10000;
static constexpr int BIG_NUM_R = NUM_BLOCKS * BIG;
static constexpr int BIG_NUM_SD = SD * BIG;
static constexpr int num_rasterizers_s = NUM_BLOCKS*PARTIAL;
static constexpr int def_shift_per_line = (NUM_BLOCKS + PARTIAL - 1) / PARTIAL;
struct Phase
{
int in_block;
int in_line;
int left;
};
struct PhaseState
{
Phase phase;
bool intro;
int lines;
int sum;
};
public:
__inline__ __device__ static int firstInLine(int x, int y, int id)
{
int l_id = rrasterizer(math::int2(x, y));
return x + (id - l_id) + (l_id > id ? NUM_BLOCKS : 0);
}
__inline__ __device__ static int firstZeroBelow(math::int2 point, int id)
{
int shift = ((BIG * NUM_BLOCKS) + (id - point.x)) % NUM_BLOCKS;
int y = -(shift - SD) * PARTIAL;
if (y > point.y)
{ y -= LINES_PER_REP; }
return y;
}
__inline__ __device__ static int cap(PhaseState& state)
{
if (state.intro)
{
int sum = 0;
int diags_seen = state.lines / PARTIAL;
int l = diags_seen;
int limit = min(l, state.phase.left);
sum += limit * state.phase.in_line;
l -= limit;
limit = min(SD, l);
sum += limit * max(0, state.phase.in_line - 1);
l -= limit;
sum += l * max(0, state.phase.in_line - 2);
int max_for_line = max(0, state.phase.in_line - (diags_seen < state.phase.left ? 0 : 1) - (diags_seen < state.phase.left + SD ? 0 : 1));
int lines_seen = state.lines - diags_seen * PARTIAL;
sum += min(lines_seen, max_for_line);
return state.sum - sum;
}
else
{
int blox = (state.lines - 1) / LINES_PER_BLOCK;
state.lines -= blox * LINES_PER_BLOCK;
int diags_seen = state.lines / PARTIAL;
int in_diags = min(state.phase.left, diags_seen) * state.phase.in_line + max(0, diags_seen - state.phase.left) * (state.phase.in_line - 1);
int lines_seen = state.lines - diags_seen * PARTIAL;
int in_lines = min(lines_seen, state.phase.in_line - (diags_seen < state.phase.left ? 0 : 1));
return state.sum - (blox * state.phase.in_block + in_diags + in_lines);
}
}
__inline__ __device__ static void computePhases(int dim, Phase& small_phase, Phase& big_phase, Phase& intro_phase, Phase& outro_phase, int& blocks_w_small, int& blocks_w_big, int& outro_exists)
{
int smalls_exist = (dim > SD ? 1 : 0);
small_phase.in_line = min(PARTIAL, (dim + SD - 1) / SD);
small_phase.left = dim - (small_phase.in_line - 1) * SD;
small_phase.in_block = min(SD, small_phase.left) * small_phase.in_line + max(0, min(dim, SD) - small_phase.left) * (small_phase.in_line - 1);
int bigs_exist = (dim > LD ? 1 : 0);
big_phase.in_line = max(1, small_phase.in_line - (SD_LD_DIFF < small_phase.left ? 0 : 1));
big_phase.left = !bigs_exist * dim + bigs_exist * (dim - SD * (big_phase.in_line - 1) - SD_LD_DIFF);
big_phase.in_block = big_phase.left * big_phase.in_line + (SD - big_phase.left) * (big_phase.in_line - 1);
blocks_w_big = bigs_exist * (big_phase.in_line - 1);
blocks_w_small = max(1, PARTIAL - blocks_w_big - 1);
intro_phase.in_line = small_phase.in_line - (small_phase.left < SD ? 1 : 0);
intro_phase.left = smalls_exist * min(SD, dim - SD * intro_phase.in_line);
intro_phase.in_block = min(SD_LD_DIFF, intro_phase.left) * intro_phase.in_line + max(0, SD_LD_DIFF - intro_phase.left) * max(0, intro_phase.in_line - 1);
outro_exists = (PARTIAL - blocks_w_big > 1 ? 1 : 0);
outro_phase.in_line = big_phase.in_line;
outro_phase.left = !smalls_exist * dim + smalls_exist * min(SD, dim - SD * (outro_phase.in_line - 1));
outro_phase.in_block = outro_exists * (outro_phase.left * outro_phase.in_line + (SD - outro_phase.left) * (outro_phase.in_line - 1));
}
__inline__ __device__ static void updateState(bool condition, PhaseState& state, int max_blocks, Phase& phase, bool intro, int lines, int value)
{
if (state.sum >= 0)
{
if (condition)
{
state.phase = phase;
state.intro = intro;
state.lines = lines;
state.sum = -value - 1;
}
else
{
state.sum += max_blocks * phase.in_block;
}
}
}
__inline__ __device__ static void findStartAndFinish(PhaseState& start, PhaseState& end, int max_blocks, Phase& phase, int bb_from_y, int bb_end_y, int& limit_y, int y_add, bool intro = false)
{
updateState(bb_from_y <= (limit_y + y_add), start, max_blocks, phase, intro, bb_from_y - limit_y, start.sum);
updateState(bb_end_y <= (limit_y + y_add), end, max_blocks, phase, intro, bb_end_y - limit_y, end.sum);
limit_y += y_add;
}
__inline__ __device__ static int2 findSumIndexInPhase(PhaseState& state, int bb_x, int bin_id)
{
int id = (-state.sum) - 1;
int blox = (state.phase.in_block == 0 ? 0 : id / state.phase.in_block);
int rem = (id - (blox*state.phase.in_block));
int t = 0, diags = 0;
if (state.phase.in_line > 0) //full diags
{
t = min(state.phase.left, rem / state.phase.in_line);
diags += t;
rem -= t * state.phase.in_line;
if (t == state.phase.left && --state.phase.in_line) //not so full diags
{
t = min(SD, rem / state.phase.in_line);
diags += t;
rem -= t * state.phase.in_line;
if (t == SD && --state.phase.in_line) //less full diags
{
t = rem / state.phase.in_line;
diags += t;
rem -= t * state.phase.in_line;
}
}
}
int y = state.lines + (blox * SD + diags) * PARTIAL - blox + rem;
return make_int2(firstInLine(bb_x, y, bin_id), y);
}
__inline__ __device__ static void findStart(PhaseState& start, int max_blocks, Phase& phase, int bb_from_y, int& limit_y, int y_add, bool intro = false)
{
updateState(bb_from_y <= (limit_y + y_add), start, max_blocks, phase, intro, bb_from_y - limit_y, start.sum);
limit_y += y_add;
}
__inline__ __device__ static void findIndexAfterStart(PhaseState& target, int max_blocks, Phase& phase, int i, int& limit_y, int y_add, bool intro = false)
{
updateState(i < (target.sum + max_blocks * phase.in_block), target, max_blocks, phase, intro, target.lines + limit_y, i - target.sum);
limit_y += y_add;
}
static __forceinline__ __device__ int OIB(int a, int b)
{
//return (a > b ? 1 : 0);
return !max(0, b - a + 1);
}
static __forceinline__ __device__ int DIVUP(int a, int b)
{
return (a + b - 1) / b;
}
__inline__ __device__ static int rrasterizer(math::int2 point)
{
int shift = point.y / PARTIAL;
int in_line = (point.y - (shift*PARTIAL));
int rem = in_line * SD;
int id = ((BIG * NUM_BLOCKS) + (point.x - shift - rem)) % NUM_BLOCKS;
return id;
}
__device__ static int numHitBinsForMyRasterizer(const int2 from, const int2 end)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
const int id = TileSpace::rasterizer();
math::int2 dim = bb_end - bb_from;
if (dim.x <= 2 && dim.y <= 2)
{
int c_id;
c_id = rrasterizer(bb_from);
if (c_id == id)
{ return 1; }
int horiz = (bb_from.x != bb_end.x - 1);
int verti = (bb_from.y != bb_end.y - 1);
if (horiz && (c_id + 1 == id || c_id + 1 == id + NUM_BLOCKS))
{ return 1; }
if (verti)
{
math::int2 t_pos = math::int2(from.x, end.y);
c_id = rrasterizer(t_pos);
if (c_id == id)
{ return 1; }
if (horiz && (c_id + 1 == id || c_id + 1 == id + NUM_BLOCKS))
{ return 1; }
}
return 0;
}
int full = dim.x / NUM_BLOCKS;
int sum = full * (bb_end.y - bb_from.y);
bb_from.x = bb_from.x + full * NUM_BLOCKS;
dim.x = bb_end.x - bb_from.x;
if (dim.x > 0)
{
Phase small_phase, big_phase, intro_phase, outro_phase;
int blocks_w_small, blocks_w_big, outro_exists;
computePhases(dim.x, small_phase, big_phase, intro_phase, outro_phase, blocks_w_small, blocks_w_big, outro_exists);
int blocks_per_rep = intro_phase.in_block + outro_phase.in_block + blocks_w_small * small_phase.in_block + blocks_w_big * big_phase.in_block;
int start_y = firstZeroBelow(bb_from, id);
PhaseState start;
bb_from.y = bb_from.y - start_y;
start.sum = (bb_from.y < LINES_PER_REP ? 0 : blocks_per_rep);
bb_from.y -= (bb_from.y < LINES_PER_REP ? 0 : LINES_PER_REP);
PhaseState end;
bb_end.y = bb_end.y - start_y;
end.sum = (bb_end.y < LINES_PER_REP ? 0 : blocks_per_rep);
bb_end.y -= (bb_end.y < LINES_PER_REP ? 0 : LINES_PER_REP);
int limit_y = 0;
findStartAndFinish(start, end, 1, intro_phase, bb_from.y, bb_end.y, limit_y, LINES_PER_INTRO_BLOCK, true); //Intro
findStartAndFinish(start, end, blocks_w_big, big_phase, bb_from.y, bb_end.y, limit_y, blocks_w_big * LINES_PER_BLOCK); //Bigs
if (outro_exists)
{ findStartAndFinish(start, end, 1, outro_phase, bb_from.y, bb_end.y, limit_y, LINES_PER_BLOCK); } //Outro
findStartAndFinish(start, end, blocks_w_small, small_phase, bb_from.y, bb_end.y, limit_y, blocks_w_small * LINES_PER_BLOCK); //Smalls
sum += cap(start) - cap(end);
}
return sum;
}
__device__ static int2 getHitBinForMyRasterizer(int i, const int2 from, const int2 end)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
const int bin_id = TileSpace::rasterizer();
math::int2 dim = bb_end - bb_from;
if (dim.x <= 2 && dim.y <= 2)
{
int id;
id = rrasterizer(bb_from);
if (id == bin_id)
{ return make_int2(from.x, from.y); }
if (id + 1 == bin_id || id + 1 == bin_id + NUM_BLOCKS)
{ return make_int2(from.x + 1, bb_from.y); }
else
{
math::int2 t_pos = math::int2(from.x, end.y);
id = rrasterizer(t_pos);
if (id == bin_id)
{ return make_int2(t_pos.x, t_pos.y); }
return make_int2(end.x, end.y);
}
}
int full = dim.x / NUM_BLOCKS;
int in_full_lines = full * (bb_end.y - bb_from.y);
if (i < in_full_lines)
{
int y_l = i / full;
int x_l = i - (y_l * full);
int y = bb_from.y + y_l;
int x = firstInLine(bb_from.x, y, bin_id) + x_l * NUM_BLOCKS;
return make_int2(x, y);
}
else
{
bb_from.x = bb_from.x + full * NUM_BLOCKS;
dim.x = bb_end.x - bb_from.x;
Phase small_phase, big_phase, intro_phase, outro_phase;
int blocks_w_small, blocks_w_big, outro_exists;
computePhases(dim.x, small_phase, big_phase, intro_phase, outro_phase, blocks_w_small, blocks_w_big, outro_exists);
int blocks_per_rep = intro_phase.in_block + outro_phase.in_block + blocks_w_small * small_phase.in_block + blocks_w_big * big_phase.in_block;
PhaseState start, target;
start.sum = target.sum = 0;
target.lines = firstZeroBelow(bb_from, bin_id); //bring target lines directly to screen space
bb_from.y = bb_from.y - target.lines;
if (bb_from.y >= LINES_PER_REP)
{
start.sum = blocks_per_rep;
bb_from.y -= LINES_PER_REP;
}
int limit_y = 0;
findStart(start, 1, intro_phase, bb_from.y, limit_y, LINES_PER_INTRO_BLOCK, true); //Intro
findStart(start, blocks_w_big, big_phase, bb_from.y, limit_y, blocks_w_big * LINES_PER_BLOCK); //Bigs
if (outro_exists)
{ findStart(start, 1, outro_phase, bb_from.y, limit_y, LINES_PER_BLOCK); } //Outro
findStart(start, blocks_w_small, small_phase, bb_from.y, limit_y, blocks_w_small * LINES_PER_BLOCK); //Smalls
int relative_i = -cap(start) - 1 + (i - in_full_lines);
if (relative_i >= blocks_per_rep)
{
target.sum = blocks_per_rep;
target.lines += LINES_PER_REP;
}
limit_y = 0;
findIndexAfterStart(target, 1, intro_phase, relative_i, limit_y, LINES_PER_INTRO_BLOCK, true);
findIndexAfterStart(target, blocks_w_big, big_phase, relative_i, limit_y, blocks_w_big * LINES_PER_BLOCK); //Bigs
if (outro_exists)
{ findIndexAfterStart(target, 1, outro_phase, relative_i, limit_y, LINES_PER_BLOCK); }
findIndexAfterStart(target, blocks_w_small, small_phase, relative_i, limit_y, blocks_w_small * LINES_PER_BLOCK); //Smalls
return findSumIndexInPhase(target, bb_from.x, bin_id);
}
}
template <typename F>
__device__ static unsigned int traverseRasterizers(int2 from, int2 end, F f)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
math::int2 dim = bb_end - bb_from;
int lower_diags = bb_from.y / PARTIAL;
int upper_diags = lower_diags + 1;
int end_diags = bb_end.y / PARTIAL;
int fulls = max(0, end_diags - upper_diags);
if ((dim.x >= NUM_BLOCKS) || ((dim.x >= SD) && (PARTIAL * SD + fulls > NUM_BLOCKS)))
{
for (int i = 0; i < NUM_BLOCKS; i++)
{ f(i); }
return NUM_BLOCKS;
}
else
{
int lower_y = lower_diags * PARTIAL;
int upper_y = end_diags * PARTIAL;
uint64_t in_line = (1ULL << dim.x) - 1;
uint64_t combined = 0x0;
if (fulls > 0)
{
for (int shift = 1; shift <= PARTIAL*SD; shift += SD)
{ combined |= (in_line << shift) | (in_line >> (NUM_BLOCKS - shift)); }
for (int shift = 1; shift <= min(fulls - 1, LD); shift++)
{ combined |= (combined << 1) | (combined >> (NUM_BLOCKS - 1)); }
}
if (end_diags != lower_diags)
{
for (int i = 0, shift = fulls + 1; i < bb_end.y - upper_y; i++, shift += SD)
{ combined |= (in_line << shift) | (in_line >> (NUM_BLOCKS - shift)); }
}
if (lower_diags != upper_diags)
{
for (int i = (bb_from.y - lower_y), shift = (bb_from.y - lower_y) * SD; (i < PARTIAL) && (i + lower_y < bb_end.y); i++, shift += SD)
{ combined |= (in_line << shift) | (in_line >> (NUM_BLOCKS - shift)); }
}
combined &= (1ULL << NUM_BLOCKS) - 1;
int seen = 0;
int reference = rrasterizer(math::int2(end.x, lower_y));
while (int current_id = __ffsll(combined))
{
combined -= (1ULL << current_id - 1);
current_id = reference - (current_id - 1);
current_id += (current_id < 0 ? NUM_BLOCKS : 0);
f(current_id);
seen++;
}
return seen;
}
}
};
template < int NUM_BLOCKS, int x_max, int y_max, int bin_size_x, int bin_size_y, int stamp_width, int stamp_height, class RasterizerId>
class PatternTileSpace <PATTERNTECHNIQUE::OFFSET_SHIFT_SLIM, NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
: public TileSpace<NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
{
static constexpr int PARTIAL = OFFSET_PARAMETER;
static constexpr int SD = (NUM_BLOCKS + 1) / PARTIAL;
static constexpr int LD = SD + (NUM_BLOCKS + 1) - (SD * PARTIAL);
static constexpr int SD_LD_DIFF = LD - SD;
static constexpr int LD_SD_DIFF = 2 * SD - LD;
static constexpr int LINES_PER_REP = ((PARTIAL * SD) + SD_LD_DIFF) * PARTIAL - PARTIAL;
static constexpr int LINES_PER_BLOCK = SD * PARTIAL - 1;
static constexpr int LINES_PER_BIG_BLOCK = LD * PARTIAL - 1;
static constexpr int LINES_PER_INTRO_BLOCK = SD_LD_DIFF*PARTIAL - 1;
static constexpr int BIG = 10000;
static constexpr int BIG_NUM_R = NUM_BLOCKS * BIG;
static constexpr int BIG_NUM_SD = SD * BIG;
static constexpr int num_rasterizers_s = NUM_BLOCKS*PARTIAL;
static constexpr int def_shift_per_line = (NUM_BLOCKS + PARTIAL - 1) / PARTIAL;
public:
static __forceinline__ __device__ int OIB(int a, int b)
{
//return (a > b ? 1 : 0);
return !max(0, b - a + 1);
}
static __forceinline__ __device__ int DIVUP(int a, int b)
{
return (a + b - 1) / b;
}
__inline__ __device__ static int rrasterizer(math::int2 point)
{
int shift = point.y / PARTIAL;
int in_line = (point.y - (shift*PARTIAL));
int rem = in_line * SD;
int id = ((BIG * NUM_BLOCKS) + (point.x - shift - rem)) % NUM_BLOCKS;
return id;
}
__device__ static int numHitBinsForMyRasterizer(const int2 from, const int2 end)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
int id = rasterizer();
int dimx = bb_end.x - bb_from.x;
int dimy = bb_end.y - bb_from.y;
int full = dimx / NUM_BLOCKS;
int sum = full * (bb_end.y - bb_from.y);
bb_from.x += full * NUM_BLOCKS;
dimx = bb_end.x - bb_from.x;
int shift = bb_from.y / PARTIAL;
int in_line = (bb_from.y - (shift*PARTIAL));
int step = ((BIG_NUM_R)+id - (bb_from.x - shift - in_line * SD)) % NUM_BLOCKS;
int shifts_left = step / SD;
shifts_left -= OIB(shifts_left, in_line) & OIB(shifts_left * SD + SD_LD_DIFF, step);
int left_x = (bb_from.x + step) - (shifts_left * SD + (shifts_left > in_line) * SD_LD_DIFF);
int till_zero = (PARTIAL - in_line) + shifts_left;
int in = DIVUP(bb_end.x - left_x, SD);
till_zero -= OIB(till_zero, PARTIAL) * PARTIAL;
in -= OIB(in, till_zero) & OIB(in * SD - LD_SD_DIFF + 1, (bb_end.x - left_x));
int blox = DIVUP(dimy + shifts_left, PARTIAL);
int till_in = SD + !(till_zero - PARTIAL) * SD_LD_DIFF - (left_x - bb_from.x) - 1;
int till_out = !!in * (bb_end.x - (left_x + (in - 1) * SD + OIB(in, till_zero) * SD_LD_DIFF)) + !in*(till_in + dimx);
int one_on_top = OIB(blox, till_in) * OIB(till_in + dimx + 1, blox);
int remaining_lines = (bb_from.y - shifts_left + blox * PARTIAL) - bb_end.y;
int in_last_block = !!remaining_lines * (in - OIB(blox, till_out) + OIB(blox, till_in));
int in_last_line = one_on_top * (!!in_last_block) + max(0, remaining_lines - (PARTIAL - in_last_block + one_on_top));
sum += blox * in - max(0, blox - till_out) + max(0, blox - till_in);
sum -= min(in, shifts_left) + in_last_line;
return sum;
}
__device__ static int2 getHitBinForMyRasterizer(int i, const int2 from, const int2 end)
{
//////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
int2 res;
//////
int id = rasterizer();
int dimx = bb_end.x - bb_from.x;
int dimy = bb_end.y - bb_from.y;
int full = dimx / NUM_BLOCKS;
int in_full_lines = full * (bb_end.y - bb_from.y);
if (i < in_full_lines)
{
int y_l = i / full;
int x_l = i - (y_l * full);
res.y = bb_from.y + y_l;
int shifts = res.y / PARTIAL;
int offsets = res.y - shifts * PARTIAL;
int step = (id + shifts + offsets * SD - bb_from.x + BIG_NUM_R) % NUM_BLOCKS;
res.x = bb_from.x + step + x_l * NUM_BLOCKS;
}
else
{
bb_from.x += full * NUM_BLOCKS;
dimx = bb_end.x - bb_from.x;
res.x = dimx;
res.y = dimx;
int shift = bb_from.y / PARTIAL;
int in_line = (bb_from.y - (shift*PARTIAL));
int step = (BIG_NUM_R + id - (bb_from.x - shift - in_line * SD)) % NUM_BLOCKS;
int shifts_left = step / SD;
shifts_left -= OIB(shifts_left, in_line) & OIB(shifts_left * SD + SD_LD_DIFF, step);
int left_x = bb_from.x + step - (shifts_left * SD + (shifts_left > in_line) * SD_LD_DIFF);
int till_zero = (PARTIAL - in_line) + shifts_left;
int in = DIVUP(bb_end.x - left_x, SD);
till_zero -= OIB(till_zero, PARTIAL) * PARTIAL;
in -= OIB(in, till_zero) & OIB(in * SD - LD_SD_DIFF, end.x - left_x);
i += min(in, shifts_left) - in_full_lines;
int till_in = SD + !(till_zero - PARTIAL) * SD_LD_DIFF - (left_x - bb_from.x) - 1;
int till_out = !!in * (bb_end.x - left_x - (in - 1) * SD - OIB(in, till_zero) * SD_LD_DIFF) + !in * (till_in + dimx);
int v = min(till_in, till_out), block = 0, line = in;
if (i >= v * line)
{
i -= v * line;
block = v;
line = in + !(till_in - block) - !(till_out - block);
v = max(till_in, till_out) - v;
if (i >= v * line)
{
i -= v * line;
block += v;
line = in + !max(0, till_in - block) - !max(0, till_out - block);
}
}
int b = i / line;
i -= b * line;
int top = !(i - line + 1) & OIB(block + 1, till_in);
block += b;
res.x = top * (bb_from.x - till_in + block) + !top * (left_x + block + i*SD + OIB(i + 1, till_zero) * SD_LD_DIFF);
res.y = bb_from.y - shifts_left + block * PARTIAL + !top * i + top * (PARTIAL - 1);
}
return res;
}
template <typename F>
__device__ static unsigned int traverseRasterizers(int2 from, int2 end, F f)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
math::int2 dim = bb_end - bb_from;
int lower_diags = bb_from.y / PARTIAL;
int upper_diags = lower_diags + 1;
int end_diags = bb_end.y / PARTIAL;
int fulls = max(0, end_diags - upper_diags);
if ((dim.x >= NUM_BLOCKS) || ((dim.x >= SD) && (PARTIAL * SD + fulls > NUM_BLOCKS)))
{
for (int i = 0; i < NUM_BLOCKS; i++)
{
f(i);
}
return NUM_BLOCKS;
}
else
{
int lower_y = lower_diags * PARTIAL;
int upper_y = end_diags * PARTIAL;
uint64_t in_line = (1ULL << dim.x) - 1;
uint64_t combined = 0x0;
if (fulls > 0)
{
for (int shift = 1; shift <= PARTIAL*SD; shift += SD)
{
combined |= (in_line << shift) | (in_line >> (NUM_BLOCKS - shift));
}
for (int shift = 1; shift <= min(fulls - 1, LD); shift++)
{
combined |= (combined << 1) | (combined >> (NUM_BLOCKS - 1));
}
}
if (end_diags != lower_diags)
{
for (int i = 0, shift = fulls + 1; i < bb_end.y - upper_y; i++, shift += SD)
{
combined |= (in_line << shift) | (in_line >> (NUM_BLOCKS - shift));
}
}
if (lower_diags != upper_diags)
{
for (int i = (bb_from.y - lower_y), shift = (bb_from.y - lower_y) * SD; (i < PARTIAL) && (i + lower_y < bb_end.y); i++, shift += SD)
{
combined |= (in_line << shift) | (in_line >> (NUM_BLOCKS - shift));
}
}
combined &= (1ULL << NUM_BLOCKS) - 1;
int seen = 0;
int reference = rrasterizer(math::int2(end.x, lower_y));
while (int current_id = __ffsll(combined))
{
combined -= (1ULL << current_id - 1);
current_id = reference - (current_id - 1);
current_id += (current_id < 0 ? NUM_BLOCKS : 0);
f(current_id);
seen++;
}
return seen;
}
}
};
template < int NUM_BLOCKS, int x_max, int y_max, int bin_size_x, int bin_size_y, int stamp_width, int stamp_height, class RasterizerId>
class PatternTileSpace <PATTERNTECHNIQUE::DIAGONAL_ITERATIVE, NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
: public TileSpace<NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
{
public:
__device__ static int numHitBinsForMyRasterizer(const int2 from, const int2 end)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
int id = rasterizer();
int sum = 0;
for (int i = bb_from.y; i < bb_end.y; i++)
{
for (int j = bb_from.x; j < bb_end.x; j++)
{
if (id == rasterizer(make_int2(j, i)))
{
sum++;
}
}
}
return sum;
}
__device__ static int2 getHitBinForMyRasterizer(int i, const int2 from, const int2 end)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
int id = rasterizer();
int sum = 0;
for (int y = bb_from.y; y < bb_end.y; y++)
{
for (int x = bb_from.x; x < bb_end.x; x++)
{
if (id == rasterizer(make_int2(x, y)))
{
sum++;
}
if (sum == i + 1)
{
return make_int2(x, y);
}
}
}
}
template <typename F>
__device__ static unsigned int traverseRasterizers(int2 from, int2 end, F f)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
math::int2 dim = bb_end - bb_from;
uint64_t v = 0x0ULL;
uint64_t one = 0x1ULL;
for (int i = 0; i < dim.y; i++)
{
for (int j = 0; j < dim.x; j++)
{
int id = rasterizer(make_int2(bb_from.x + j, bb_from.y + i));
v |= (one << id);
}
}
int seen = 0;
while (int current_id = __ffs(v))
{
v -= (one << (current_id - 1));
f(current_id - 1);
seen++;
}
return seen;
}
};
template < int NUM_BLOCKS, int x_max, int y_max, int bin_size_x, int bin_size_y, int stamp_width, int stamp_height, class RasterizerId>
class PatternTileSpace <PATTERNTECHNIQUE::OFFSET_SHIFT_ITERATIVE, NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
: public TileSpace<NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
{
static constexpr int PARTIAL = OFFSET_PARAMETER;
static constexpr int SD = (NUM_BLOCKS + 1) / PARTIAL;
static constexpr int LD = SD + (NUM_BLOCKS + 1) - (SD * PARTIAL);
static constexpr int SD_LD_DIFF = LD - SD;
static constexpr int BIG = 10000;
static constexpr int BIG_NUM_R = NUM_BLOCKS * BIG;
__inline__ __device__ static int rrasterizer(math::int2 point)
{
int shift = point.y / PARTIAL;
int in_line = (point.y - (shift*PARTIAL));
int rem = in_line * SD;
int id = ((BIG * NUM_BLOCKS) + (point.x - shift - rem)) % NUM_BLOCKS;
return id;
}
public:
__device__ static int numHitBinsForMyRasterizer(const int2 from, const int2 end)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
int id = rasterizer();
int sum = 0;
for (int i = bb_from.y; i < bb_end.y; i++)
{
for (int j = bb_from.x; j < bb_end.x; j++)
{
if (id == rrasterizer(math::int2(j, i)))
{
sum++;
}
}
}
return sum;
}
__device__ static int2 getHitBinForMyRasterizer(int i, const int2 from, const int2 end)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
int id = rasterizer();
int sum = 0;
for (int y = bb_from.y; y < bb_end.y; y++)
{
for (int x = bb_from.x; x < bb_end.x; x++)
{
if (id == rrasterizer(math::int2(x, y)))
{
sum++;
}
if (sum == i + 1)
{
return make_int2(x, y);
}
}
}
}
template <typename F>
__device__ static unsigned int traverseRasterizers(int2 from, int2 end, F f)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
math::int2 dim = bb_end - bb_from;
uint64_t v = 0x0ULL;
uint64_t one = 0x1ULL;
for (int i = 0; i < dim.y; i++)
{
for (int j = 0; j < dim.x; j++)
{
int id = rrasterizer(math::int2(bb_from.x + j, bb_from.y + i));
v |= (one << id);
}
}
int seen = 0;
while (int current_id = __ffs(v))
{
v -= (one << (current_id - 1));
f(current_id - 1);
seen++;
}
return seen;
}
};
template < int NUM_BLOCKS, int x_max, int y_max, int bin_size_x, int bin_size_y, int stamp_width, int stamp_height, class RasterizerId>
class PatternTileSpace <PATTERNTECHNIQUE::OFFSET_ITERATIVE, NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
: public TileSpace<NUM_BLOCKS, x_max, y_max, bin_size_x, bin_size_y, stamp_width, stamp_height, RasterizerId>
{
static constexpr int PARTIAL = OFFSET_PARAMETER;
static constexpr int SD = (NUM_BLOCKS + 1) / PARTIAL;
static constexpr int LD = SD + (NUM_BLOCKS + 1) - (SD * PARTIAL);
static constexpr int SD_LD_DIFF = LD - SD;
static constexpr int BIG = 10000;
static constexpr int BIG_NUM_R = NUM_BLOCKS * BIG;
__inline__ __device__ static int rrasterizer(math::int2 point)
{
int id = ((point.x*PARTIAL + point.y*NUM_BLOCKS) / PARTIAL) % NUM_BLOCKS;
return id;
}
public:
__device__ static int numHitBinsForMyRasterizer(const int2 from, const int2 end)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
int id = rasterizer();
int sum = 0;
for (int i = bb_from.y; i < bb_end.y; i++)
{
for (int j = bb_from.x; j < bb_end.x; j++)
{
if (id == rrasterizer(math::int2(j, i)))
{
sum++;
}
}
}
return sum;
}
__device__ static int2 getHitBinForMyRasterizer(int i, const int2 from, const int2 end)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
int id = rasterizer();
int sum = 0;
for (int y = bb_from.y; y < bb_end.y; y++)
{
for (int x = bb_from.x; x < bb_end.x; x++)
{
if (id == rrasterizer(math::int2(x, y)))
{
sum++;
}
if (sum == i + 1)
{
return make_int2(x, y);
}
}
}
}
template <typename F>
__device__ static unsigned int traverseRasterizers(int2 from, int2 end, F f)
{
////
math::int2 bb_from(from.x, from.y);
math::int2 bb_end(end.x + 1, end.y + 1);
////
math::int2 dim = bb_end - bb_from;
uint64_t v = 0x0ULL;
uint64_t one = 0x1ULL;
for (int i = 0; i < dim.y; i++)
{
for (int j = 0; j < dim.x; j++)
{
int id = rrasterizer(math::int2(bb_from.x + j, bb_from.y + i));
v |= (one << id);
}
}
int seen = 0;
while (int current_id = __ffs(v))
{
v -= (one << (current_id - 1));
f(current_id - 1);
seen++;
}
return seen;
}
};
#endif // INCLUDED_CURE_OFFSET_SHIFT_BIN_TILE_SPACE | the_stack |
#define NUM_THREADS 64
// #define RADIUS 32
__global__ void se3_build_forward_kernel(
const torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> attention,
const torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> transforms,
const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> points,
const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> targets,
const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> weights,
const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> intrinsics,
torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> Hx,
torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> bx,
int radius)
{
/* Dense transform layer aggregation step
Inputs:
attention: [B, H, W, H, W]
transforms: [B, H, W, 4, 4]
points: [B, 3, H, W]
targets: [B, 2, H, W]
weights: [B, 2, H, W]
intrinsics: [B, 4]
Outputs:
Hx: [B, H, W, 6, 6]
bx: [B, H, W, 6, 1]
*/
int batch_id = blockIdx.x; // batch_index
int tx = threadIdx.x;
int ix = blockIdx.y * NUM_THREADS + tx; // image_index
int ht = attention.size(1);
int wd = attention.size(2);
int dim = ht * wd;
int h1 = ix / wd;
int w1 = ix % wd;
const float* Gdata = transforms[batch_id].data();
const float* Xdata = points[batch_id].data();
const float* rdata = targets[batch_id].data();
const float* wdata = weights[batch_id].data();
__shared__ float fx, fy, cx, cy;
if (tx == 0) {
fx = intrinsics[batch_id][0];
fy = intrinsics[batch_id][1];
cx = intrinsics[batch_id][2];
cy = intrinsics[batch_id][3];
}
float G[12];
if (ix < dim) {
for (int k=0; k<12; k++)
G[k] = Gdata[ix + k*dim];
}
// linear system
float H[6][6];
float b[6];
for (int ii=0; ii<6; ii++) {
b[ii] = 0.0f;
for (int jj=0; jj<6; jj++) {
H[ii][jj] = 0.0f;
}
}
// jacobians
float Ju[6];
float Jv[6];
float Jz[6];
__shared__ float X0[3][NUM_THREADS];
__shared__ float rvec[3][NUM_THREADS];
__shared__ float wvec[3][NUM_THREADS];
__syncthreads();
for (int i=0; i<dim; i+=NUM_THREADS) {
// load in data
int jx = i + tx;
if (jx < dim) {
X0[0][tx] = Xdata[jx+0*dim];
X0[1][tx] = Xdata[jx+1*dim];
X0[2][tx] = Xdata[jx+2*dim];
rvec[0][tx] = rdata[jx+0*dim];
rvec[1][tx] = rdata[jx+1*dim];
rvec[2][tx] = rdata[jx+2*dim];
wvec[0][tx] = wdata[jx+0*dim];
wvec[1][tx] = wdata[jx+1*dim];
wvec[2][tx] = wdata[jx+2*dim];
}
__syncthreads();
for (int j=0; j<NUM_THREADS; j++) {
jx = i + j;
if (ix<dim && jx<dim) {
int h2 = jx / wd;
int w2 = jx % wd;
int r = max(abs(h1-h2), abs(w1-w2));
if (r > radius)
continue;
float w = attention[batch_id][h1][w1][h2][w2];
float wu = w * wvec[0][j];
float wv = w * wvec[1][j];
float wz = w * wvec[2][j];
float X1, Y1, Z1;
X1 = G[0]*X0[0][j] + G[1]*X0[1][j] + G[2]*X0[2][j] + G[3];
Y1 = G[4]*X0[0][j] + G[5]*X0[1][j] + G[6]*X0[2][j] + G[7];
Z1 = G[8]*X0[0][j] + G[9]*X0[1][j] + G[10]*X0[2][j] + G[11];
if (Z1 < 0.1) Z1 = 0.001;
// residual vectors
float ru = rvec[0][j] - (fx * (X1 / Z1) + cx);
float rv = rvec[1][j] - (fy * (Y1 / Z1) + cy);
float rz = rvec[2][j] - (1.0 / Z1);
if (abs(ru) > 250 || abs(rv) > 250 || Z1 < 0.1) {
continue;
}
float d = 1.f/Z1;
float d2 = d*d;
// x-jacobians
Ju[0] = fx * d;
Ju[1] = fx * 0.0;
Ju[2] = fx * (-X1*d2);
Ju[3] = fx * (-X1*Y1*d2);
Ju[4] = fx * (1 + X1*X1*d2);
Ju[5] = fx * (-Y1*d);
// y-jacobians
Jv[0] = fy * 0.0;
Jv[1] = fy * d;
Jv[2] = fy * (-Y1*d2);
Jv[3] = fy * -1 * (1+Y1*Y1*d2);
Jv[4] = fy * X1*Y1*d2;
Jv[5] = fy * X1*d;
// z-jacobians
Jz[0] = 0.0;
Jz[1] = 0.0;
Jz[2] = -d2;
Jz[3] = d * Y1;
Jz[4] = -d * X1;
Jz[5] = 0.0;
for (int ii=0; ii<6; ii++) {
b[ii] += wu*ru*Ju[ii] + wv*rv*Jv[ii] + wz*rz*Jz[ii];
for (int jj=0; jj<6; jj++) {
H[ii][jj] += wu*Ju[ii]*Ju[jj] + wv*Jv[ii]*Jv[jj] + wz*Jz[ii]*Jz[jj];
}
}
}
}
__syncthreads();
}
if (ix < dim) {
for (int ii=0; ii<6; ii++) {
bx[batch_id][ii][0][h1][w1] = b[ii];
for (int jj=0; jj<6; jj++) {
Hx[batch_id][ii][jj][h1][w1] = H[ii][jj];
}
}
}
}
__global__ void se3_build_backward_kernel(
const torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> attention,
const torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> transforms,
const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> points,
const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> targets,
const torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> weights,
const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> intrinsics,
const torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> Hx_grad,
const torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> bx_grad,
torch::PackedTensorAccessor32<float,5,torch::RestrictPtrTraits> attention_grad,
torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> targets_grad,
torch::PackedTensorAccessor32<float,4,torch::RestrictPtrTraits> weights_grad,
int radius)
{
int batch_id = blockIdx.x; // batch_index
int tx = threadIdx.x;
int ix = blockIdx.y * NUM_THREADS + tx; // image_index
int ht = attention.size(1);
int wd = attention.size(2);
int dim = ht * wd;
int h2 = ix / wd;
int w2 = ix % wd;
const float* Gdata = transforms[batch_id].data();
const float* Hdata = Hx_grad[batch_id].data();
const float* bdata = bx_grad[batch_id].data();
__shared__ float fx, fy, cx, cy;
if (tx == 0) {
fx = intrinsics[batch_id][0];
fy = intrinsics[batch_id][1];
cx = intrinsics[batch_id][2];
cy = intrinsics[batch_id][3];
}
float X0[3];
X0[0] = points[batch_id][0][h2][w2];
X0[1] = points[batch_id][1][h2][w2];
X0[2] = points[batch_id][2][h2][w2];
float target_u = targets[batch_id][0][h2][w2];
float target_v = targets[batch_id][1][h2][w2];
float target_z = targets[batch_id][2][h2][w2];
float wu = weights[batch_id][0][h2][w2];
float wv = weights[batch_id][1][h2][w2];
float wz = weights[batch_id][2][h2][w2];
// jacobians
float Ju[6], Jv[6], Jz[6];
float diff_ru = 0.0f;
float diff_rv = 0.0f;
float diff_rz = 0.0f;
float diff_wu = 0.0f;
float diff_wv = 0.0f;
float diff_wz = 0.0f;
__shared__ float Gs[12][NUM_THREADS];
__shared__ float H_grad[36][NUM_THREADS];
__shared__ float b_grad[6][NUM_THREADS];
__syncthreads();
for (int i=0; i<dim; i+=NUM_THREADS) {
int jx = i + tx;
if (jx < dim) {
for (int k=0; k<12; k++)
Gs[k][tx] = Gdata[jx + k*dim];
for (int k=0; k<36; k++)
H_grad[k][tx] = Hdata[jx + k*dim];
for (int k=0; k<6; k++)
b_grad[k][tx] = bdata[jx + k*dim];
}
__syncthreads();
for (int j=0; j<NUM_THREADS; j++) {
jx = i + j;
if (ix<dim && jx<dim) {
int h1 = jx / wd;
int w1 = jx % wd;
int r = max(abs(h1-h2), abs(w1-w2));
if (r > radius)
continue;
float w = attention[batch_id][h1][w1][h2][w2];
float diff_w = 0.0f;
float X1, Y1, Z1;
X1 = Gs[0][j]*X0[0] + Gs[1][j]*X0[1] + Gs[2][j]*X0[2] + Gs[3][j];
Y1 = Gs[4][j]*X0[0] + Gs[5][j]*X0[1] + Gs[6][j]*X0[2] + Gs[7][j];
Z1 = Gs[8][j]*X0[0] + Gs[9][j]*X0[1] + Gs[10][j]*X0[2] + Gs[11][j];
if (Z1 < 0.1) Z1 = 0.001;
// residual vectors
float ru = target_u - (fx * (X1 / Z1) + cx);
float rv = target_v - (fy * (Y1 / Z1) + cy);
float rz = target_z - (1.0 / Z1);
if (abs(ru) > 50 || abs(rv) > 50 || Z1 < 0.1) {
continue;
}
float d = 1.f/Z1;
float d2 = d*d;
// x-jacobians
Ju[0] = fx * d;
Ju[1] = fx * 0.0;
Ju[2] = fx * (-X1*d2);
Ju[3] = fx * (-X1*Y1*d2);
Ju[4] = fx * (1 + X1*X1*d2);
Ju[5] = fx * (-Y1*d);
// y-jacobians
Jv[0] = fy * 0.0;
Jv[1] = fy * d;
Jv[2] = fy * (-Y1*d2);
Jv[3] = fy * -1 * (1+Y1*Y1*d2);
Jv[4] = fy * X1*Y1*d2;
Jv[5] = fy * X1*d;
// z-jacobians
Jz[0] = 0.0;
Jz[1] = 0.0;
Jz[2] = -d2;
Jz[3] = d * Y1;
Jz[4] = -d * X1;
Jz[5] = 0.0;
for (int ii=0; ii<6; ii++) {
// residual gradients
diff_ru += w*wu*Ju[ii]*b_grad[ii][j];
diff_rv += w*wv*Jv[ii]*b_grad[ii][j];
diff_rz += w*wz*Jz[ii]*b_grad[ii][j];
// weights gradients
diff_wu += w*ru*Ju[ii]*b_grad[ii][j];
diff_wv += w*rv*Jv[ii]*b_grad[ii][j];
diff_wz += w*rz*Jz[ii]*b_grad[ii][j];
// embedding weight
diff_w += (wu*ru*Ju[ii] + wv*rv*Jv[ii] + wz*rz*Jz[ii]) * b_grad[ii][j];
for (int jj=0; jj<6; jj++) {
diff_wu += w*Ju[ii]*Ju[jj]*H_grad[6*ii+jj][j];
diff_wv += w*Jv[ii]*Jv[jj]*H_grad[6*ii+jj][j];
diff_wz += w*Jz[ii]*Jz[jj]*H_grad[6*ii+jj][j];
diff_w += (wu*Ju[ii]*Ju[jj] + wv*Jv[ii]*Jv[jj] + wz*Jz[ii]*Jz[jj])*H_grad[6*ii+jj][j];
}
}
attention_grad[batch_id][h1][w1][h2][w2] = diff_w;
}
}
__syncthreads();
}
targets_grad[batch_id][0][h2][w2] = diff_ru;
targets_grad[batch_id][1][h2][w2] = diff_rv;
targets_grad[batch_id][2][h2][w2] = diff_rz;
weights_grad[batch_id][0][h2][w2] = diff_wu;
weights_grad[batch_id][1][h2][w2] = diff_wv;
weights_grad[batch_id][2][h2][w2] = diff_wz;
}
std::vector<torch::Tensor> se3_build_cuda(
torch::Tensor attention,
torch::Tensor transforms,
torch::Tensor points,
torch::Tensor targets,
torch::Tensor weights,
torch::Tensor intrinsics,
int radius)
{
int batch_size = attention.size(0);
int ht = attention.size(1);
int wd = attention.size(2);
dim3 grid = dim3(batch_size, (ht*wd + NUM_THREADS-1) / NUM_THREADS);
auto opts = attention.options();
torch::Tensor H = torch::zeros({batch_size, 6, 6, ht, wd}, opts);
torch::Tensor b = torch::zeros({batch_size, 6, 1, ht, wd}, opts);
se3_build_forward_kernel<<<grid, NUM_THREADS>>>(
attention.packed_accessor32<float,5,torch::RestrictPtrTraits>(),
transforms.packed_accessor32<float,5,torch::RestrictPtrTraits>(),
points.packed_accessor32<float,4,torch::RestrictPtrTraits>(),
targets.packed_accessor32<float,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<float,4,torch::RestrictPtrTraits>(),
intrinsics.packed_accessor32<float,2,torch::RestrictPtrTraits>(),
H.packed_accessor32<float,5,torch::RestrictPtrTraits>(),
b.packed_accessor32<float,5,torch::RestrictPtrTraits>(),
radius);
return {H, b};
}
std::vector<torch::Tensor> se3_build_backward_cuda(
torch::Tensor attention,
torch::Tensor transforms,
torch::Tensor points,
torch::Tensor targets,
torch::Tensor weights,
torch::Tensor intrinsics,
torch::Tensor H_grad,
torch::Tensor b_grad,
int radius)
{
int batch_size = attention.size(0);
int ht = attention.size(1);
int wd = attention.size(2);
dim3 grid = dim3(batch_size, (ht*wd + NUM_THREADS-1) / NUM_THREADS);
torch::Tensor attention_grad = torch::zeros_like(attention);
torch::Tensor targets_grad = torch::zeros_like(targets);
torch::Tensor weights_grad = torch::zeros_like(weights);
se3_build_backward_kernel<<<grid, NUM_THREADS>>>(
attention.packed_accessor32<float,5,torch::RestrictPtrTraits>(),
transforms.packed_accessor32<float,5,torch::RestrictPtrTraits>(),
points.packed_accessor32<float,4,torch::RestrictPtrTraits>(),
targets.packed_accessor32<float,4,torch::RestrictPtrTraits>(),
weights.packed_accessor32<float,4,torch::RestrictPtrTraits>(),
intrinsics.packed_accessor32<float,2,torch::RestrictPtrTraits>(),
H_grad.packed_accessor32<float,5,torch::RestrictPtrTraits>(),
b_grad.packed_accessor32<float,5,torch::RestrictPtrTraits>(),
attention_grad.packed_accessor32<float,5,torch::RestrictPtrTraits>(),
targets_grad.packed_accessor32<float,4,torch::RestrictPtrTraits>(),
weights_grad.packed_accessor32<float,4,torch::RestrictPtrTraits>(),
radius);
return {attention_grad, targets_grad, weights_grad};
} | the_stack |
#pragma once
#include <gunrock/util/test_utils.cuh>
#include <gunrock/util/multithread_utils.cuh>
namespace gunrock {
namespace util {
namespace scan {
template <typename _SizeT, int Block_N>
__device__ __forceinline__ void ScanLoop(_SizeT* s_Buffer, _SizeT* Sum,
_SizeT Sum_Offset) {
_SizeT Step = 1;
#pragma unrool
for (int i = 0; i < Block_N; i++) {
_SizeT k = threadIdx.x * Step * 2 + Step - 1;
if (k + Step < blockDim.x * 2) s_Buffer[k + Step] += s_Buffer[k];
Step *= 2;
__syncthreads();
} // for i
if (threadIdx.x == blockDim.x - 1) {
if (Sum_Offset != -1) Sum[Sum_Offset] = s_Buffer[blockDim.x * 2 - 1];
s_Buffer[blockDim.x * 2 - 1] = 0;
} // if
__syncthreads();
Step /= 2;
#pragma unrool
for (int i = Block_N - 1; i >= 0; i--) {
_SizeT k = threadIdx.x * Step * 2 + Step - 1;
if (k + Step < blockDim.x * 2) {
_SizeT t = s_Buffer[k];
s_Buffer[k] = s_Buffer[k + Step];
s_Buffer[k + Step] += t;
}
Step /= 2;
__syncthreads();
} // for i
}
template <typename _VertexId, typename _SizeT, int Block_N>
__global__ void Step0(const _SizeT N, const _SizeT M, const _SizeT N_Next,
const _VertexId* const Select, const int* const Splict,
_SizeT* Buffer, _SizeT* Sum) {
extern __shared__ _SizeT s_Buffer[];
int Splict0 = -1, Splict1 = -1;
_SizeT x =
(blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x * 2 + threadIdx.x;
if (x - threadIdx.x >= N) return;
if (x < N)
if (Select[x] != 0) Splict0 = Splict[x];
if (x + blockDim.x < N)
if (Select[x + blockDim.x] != 0) Splict1 = Splict[x + blockDim.x];
for (int y = 0; y < M; y++) {
if (y == Splict0)
s_Buffer[threadIdx.x] = 1;
else
s_Buffer[threadIdx.x] = 0;
if (y == Splict1)
s_Buffer[threadIdx.x + blockDim.x] = 1;
else
s_Buffer[threadIdx.x + blockDim.x] = 0;
__syncthreads();
if (x / blockDim.x / 2 < N_Next)
ScanLoop<_SizeT, Block_N>(s_Buffer, Sum, y * N_Next + x / blockDim.x / 2);
else
ScanLoop<_SizeT, Block_N>(s_Buffer, Sum, -1);
if (y == Splict0) Buffer[x] = s_Buffer[threadIdx.x];
if (y == Splict1)
Buffer[x + blockDim.x] = s_Buffer[threadIdx.x + blockDim.x];
} // for y
} // Step0
template <typename _VertexId, typename _SizeT, int Block_N>
__global__ void Step0b(const _SizeT N, const _SizeT M, const _SizeT N_Next,
const _VertexId* const Keys, const int* const Splict,
_SizeT* Buffer, _SizeT* Sum) {
extern __shared__ _SizeT s_Buffer[];
int Splict0 = -1, Splict1 = -1;
_SizeT x =
(blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x * 2 + threadIdx.x;
if (x - threadIdx.x >= N) return;
if (x < N) Splict0 = Splict[Keys[x]];
if (x + blockDim.x < N) Splict1 = Splict[Keys[x + blockDim.x]];
for (int y = 0; y < M; y++) {
if (y == Splict0)
s_Buffer[threadIdx.x] = 1;
else
s_Buffer[threadIdx.x] = 0;
if (y == Splict1)
s_Buffer[threadIdx.x + blockDim.x] = 1;
else
s_Buffer[threadIdx.x + blockDim.x] = 0;
__syncthreads();
if (x / blockDim.x / 2 < N_Next)
ScanLoop<_SizeT, Block_N>(s_Buffer, Sum, y * N_Next + x / blockDim.x / 2);
else
ScanLoop<_SizeT, Block_N>(s_Buffer, Sum, -1);
if (y == Splict0) Buffer[x] = s_Buffer[threadIdx.x];
if (y == Splict1)
Buffer[x + blockDim.x] = s_Buffer[threadIdx.x + blockDim.x];
}
}
template <typename _VertexId, typename _SizeT, int Block_N>
__global__ void Step0d(const _SizeT N, const _SizeT M, const _SizeT N_Next,
const _VertexId* const Keys, const _SizeT* const Offsets,
const int* const Splict, _SizeT* Buffer, _SizeT* Sum) {
extern __shared__ _SizeT s_Buffer[];
_VertexId K0 = 0, K1 = 1;
bool mark0 = false, mark1 = false;
_SizeT x =
(blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x * 2 + threadIdx.x;
if (x - threadIdx.x >= N) return;
if (x < N) K0 = Keys[x];
if (x + blockDim.x < N) K1 = Keys[x + blockDim.x];
for (int y = 0; y < M; y++) {
s_Buffer[threadIdx.x] = 0;
mark0 = false;
s_Buffer[threadIdx.x + blockDim.x] = 0;
mark1 = false;
if (x < N) {
for (_SizeT i = Offsets[K0]; i < Offsets[K0 + 1]; i++)
if (y == Splict[i]) {
s_Buffer[threadIdx.x] = 1;
mark0 = true;
break;
}
}
if (x + blockDim.x < N) {
for (_SizeT i = Offsets[K1]; i < Offsets[K1 + 1]; i++)
if (y == Splict[i]) {
s_Buffer[threadIdx.x + blockDim.x] = 1;
mark1 = true;
break;
}
}
__syncthreads();
if (x / blockDim.x / 2 < N_Next)
ScanLoop<_SizeT, Block_N>(s_Buffer, Sum, y * N_Next + x / blockDim.x / 2);
else
ScanLoop<_SizeT, Block_N>(s_Buffer, Sum, -1);
if (mark0) Buffer[x + y * N] = s_Buffer[threadIdx.x];
if (mark1)
Buffer[x + blockDim.x + y * N] = s_Buffer[threadIdx.x + blockDim.x];
}
}
template <typename _SizeT, int Block_N>
__global__ void Step1(const _SizeT N, _SizeT* Buffer, _SizeT* Sum) {
extern __shared__ _SizeT s_Buffer[];
_SizeT y = blockIdx.y * blockDim.y + threadIdx.y;
_SizeT x = blockIdx.x * blockDim.x * 2 + threadIdx.x;
if (x >= N)
s_Buffer[threadIdx.x] = 0;
else
s_Buffer[threadIdx.x] = Buffer[y * N + x];
if (x + blockDim.x >= N)
s_Buffer[threadIdx.x + blockDim.x] = 0;
else
s_Buffer[threadIdx.x + blockDim.x] = Buffer[y * N + x + blockDim.x];
__syncthreads();
ScanLoop<_SizeT, Block_N>(s_Buffer, Sum, y * gridDim.x + blockIdx.x);
if (x < N) Buffer[y * N + x] = s_Buffer[threadIdx.x];
if (x + blockDim.x < N)
Buffer[y * N + x + blockDim.x] = s_Buffer[threadIdx.x + blockDim.x];
} // Step1
template <typename _SizeT>
__global__ void Step2(const _SizeT N, const _SizeT* Sum, _SizeT* Buffer) {
_SizeT x = blockIdx.x * blockDim.x + threadIdx.x;
_SizeT y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < N) Buffer[y * N + x] += Sum[y * gridDim.x + blockIdx.x];
} // Step2
template <typename _SizeT>
__global__ void Step2_5(_SizeT* Length, _SizeT* Offset, _SizeT Num_Rows) {
Offset[0] = 0;
for (int i = 0; i < Num_Rows; i++) Offset[i + 1] = Offset[i] + Length[i];
}
template <typename _VertexId, typename _SizeT, bool EXCLUSIVE>
__global__ void Step3(const _SizeT N, const _SizeT N_Next,
const _VertexId* const Select, const int* const Splict,
const _SizeT* const Offset, const _SizeT* const Sum,
const _SizeT* const Buffer, _SizeT* Result) {
_SizeT x_Next = blockIdx.x + blockIdx.y * gridDim.x;
_SizeT x = x_Next * blockDim.x + threadIdx.x;
if (x >= N) return;
if (Select[x] == 0) {
Result[x] = -1;
return;
}
_SizeT r = Buffer[x] + Offset[Splict[x]];
if (x_Next > 0) r += Sum[Splict[x] * N_Next + x_Next];
if (!EXCLUSIVE) r += 1;
Result[x] = r;
} // Step3
template <typename _VertexId, typename _SizeT, bool EXCLUSIVE>
__global__ void Step3b(const _SizeT N, const _SizeT N_Next,
const _SizeT num_associate, const _VertexId* const Key,
const int* const Splict,
const _VertexId* const Convertion,
const _SizeT* const Offset, const _SizeT* const Sum,
const _SizeT* const Buffer, _VertexId* Result,
_VertexId** associate_in, _VertexId** associate_out) {
_SizeT x_Next = blockIdx.x + blockIdx.y * gridDim.x;
_SizeT x = x_Next * blockDim.x + threadIdx.x;
if (x >= N) return;
_VertexId key = Key[x];
_SizeT tOffset = Offset[1];
_SizeT splict = Splict[key];
_SizeT r = Buffer[x] + Offset[splict];
if (x_Next > 0) r += Sum[splict * N_Next + x_Next];
if (!EXCLUSIVE) r += 1;
Result[r] = Convertion[key];
if (splict > 0)
for (int i = 0; i < num_associate; i++) {
associate_out[i][r - tOffset] = associate_in[i][key];
}
}
template <typename _VertexId, typename _SizeT, typename _Value, bool EXCLUSIVE>
__global__ void Step3c(
const _SizeT N, const _SizeT N_Next, const _SizeT num_vertex_associate,
const _SizeT num_value__associate, const _VertexId* const Key,
const int* const Splict, const _VertexId* const Convertion,
const _SizeT* const Offset, const _SizeT* const Sum,
const _SizeT* const Buffer, _VertexId* Result,
_VertexId** vertex_associate_in, _VertexId** vertex_associate_out,
_Value** value__associate_in, _Value** value__associate_out) {
_SizeT x_Next = blockIdx.x + blockIdx.y * gridDim.x;
_SizeT x = x_Next * blockDim.x + threadIdx.x;
if (x >= N) return;
_VertexId key = Key[x];
_SizeT tOffset = Offset[1];
_SizeT splict = Splict[key];
_SizeT r = Buffer[x] + Offset[splict];
if (x_Next > 0) r += Sum[splict * N_Next + x_Next];
if (!EXCLUSIVE) r += 1;
Result[r] = Convertion[key];
if (splict > 0) {
for (int i = 0; i < num_vertex_associate; i++)
vertex_associate_out[i][r - tOffset] = vertex_associate_in[i][key];
for (int i = 0; i < num_value__associate; i++)
value__associate_out[i][r - tOffset] = value__associate_in[i][key];
}
}
template <typename _VertexId, typename _SizeT, typename _Value, bool EXCLUSIVE,
_SizeT num_vertex_associate, _SizeT num_value__associate>
__global__ void Step3c(const _SizeT N, const _SizeT N_Next,
// const _SizeT num_vertex_associate,
// const _SizeT num_value__associate,
const _VertexId* const Key, const int* const Splict,
const _VertexId* const Convertion,
const _SizeT* const Offset, const _SizeT* const Sum,
const _SizeT* const Buffer, _VertexId* Result,
_VertexId** vertex_associate_in,
_VertexId** vertex_associate_out,
_Value** value__associate_in,
_Value** value__associate_out) {
_SizeT x_Next = blockIdx.x + blockIdx.y * gridDim.x;
_SizeT x = x_Next * blockDim.x + threadIdx.x;
if (x >= N) return;
_VertexId key = Key[x];
_SizeT tOffset = Offset[1];
_SizeT splict = Splict[key];
_SizeT r = Buffer[x] + Offset[splict];
if (x_Next > 0) r += Sum[splict * N_Next + x_Next];
if (!EXCLUSIVE) r += 1;
Result[r] = Convertion[key];
if (splict > 0) {
#pragma unroll
for (int i = 0; i < num_vertex_associate; i++)
vertex_associate_out[i][r - tOffset] = vertex_associate_in[i][key];
#pragma unroll
for (int i = 0; i < num_value__associate; i++)
value__associate_out[i][r - tOffset] = value__associate_in[i][key];
}
}
template <typename _VertexId, typename _SizeT, typename _Value, bool EXCLUSIVE,
_SizeT num_vertex_associate, _SizeT num_value__associate>
__global__ void Step3d(const _SizeT N, const _SizeT N_Next,
// const _SizeT num_vertex_associate,
// const _SizeT num_value__associate,
const _VertexId* const Key, const int* const Splict,
const _VertexId* const Convertion,
const _SizeT* const In_Offset,
const _SizeT* const Out_Offset, const _SizeT* const Sum,
const _SizeT* const Buffer, _VertexId* Result,
_VertexId** vertex_associate_in,
_VertexId** vertex_associate_out,
_Value** value__associate_in,
_Value** value__associate_out) {
_SizeT x_Next = blockIdx.x + blockIdx.y * gridDim.x;
_SizeT x = x_Next * blockDim.x + threadIdx.x;
if (x >= N) return;
_VertexId key = Key[x];
_SizeT tOffset = Out_Offset[1];
for (_SizeT i = In_Offset[key]; i < In_Offset[key + 1]; i++) {
_SizeT splict = Splict[i];
_SizeT r = Buffer[x + splict * N] + Out_Offset[splict];
if (x_Next > 0) r += Sum[splict * N_Next + x_Next];
if (!EXCLUSIVE) r += 1;
Result[r] = Convertion[i];
if (splict > 0) {
#pragma unroll
for (int j = 0; j < num_vertex_associate; j++)
vertex_associate_out[j][r - tOffset] = vertex_associate_in[j][key];
#pragma unroll
for (int j = 0; j < num_value__associate; j++)
value__associate_out[j][r - tOffset] = value__associate_in[j][key];
}
}
}
template <typename VertexId, // Type of the select array
typename SizeT, // Type of counters
bool EXCLUSIVE = true, // Whether or not this is an exclusive scan
SizeT BLOCK_SIZE = 256, // Size of element to process by a block
SizeT BLOCK_N = 8, // log2(BLOCKSIZE)
typename Value = VertexId> // Type of values
struct MultiScan {
SizeT* History_Size;
SizeT** d_Buffer;
SizeT* h_Offset1;
SizeT* d_Offset1;
SizeT Max_Elements, Max_Rows;
MultiScan() {
Max_Elements = 0;
Max_Rows = 0;
History_Size = NULL;
d_Buffer = NULL;
h_Offset1 = NULL;
d_Offset1 = NULL;
}
__host__ void Scan(
const SizeT Num_Elements, const SizeT Num_Rows,
const VertexId* const d_Select, // Selection flag, 1 = Selected
const int* const d_Splict, // Spliction mark
const SizeT* const d_Offset, // Offset of each sub-array
SizeT* d_Length, // Length of each sub-array
SizeT* d_Result) // The scan result
{
SizeT* History_Size = new SizeT[40];
SizeT** d_Buffer = new SizeT*[40];
SizeT Current_Size = Num_Elements;
int Current_Level = 0;
dim3 Block_Size, Grid_Size;
for (int i = 0; i < 40; i++) d_Buffer[i] = NULL;
d_Buffer[0] = d_Result;
History_Size[0] = Current_Size;
History_Size[1] = Current_Size / BLOCK_SIZE;
if ((History_Size[0] % BLOCK_SIZE) != 0) History_Size[1]++;
util::GRError(
cudaMalloc(&(d_Buffer[1]), sizeof(SizeT) * History_Size[1] * Num_Rows),
"cudaMalloc d_Buffer[1] failed", __FILE__, __LINE__);
while (Current_Size > 1) {
Block_Size = dim3(BLOCK_SIZE / 2, 1, 1);
if (Current_Level == 0) {
Grid_Size = dim3(History_Size[1] / 32, 32, 1);
if ((History_Size[1] % 32) != 0) Grid_Size.x++;
Step0<VertexId, SizeT, BLOCK_N>
<<<Grid_Size, Block_Size, sizeof(SizeT) * BLOCK_SIZE>>>(
Current_Size, Num_Rows, History_Size[1], d_Select, d_Splict,
d_Buffer[0], d_Buffer[1]);
cudaDeviceSynchronize();
util::GRError("Step0 failed", __FILE__, __LINE__);
} else {
Grid_Size = dim3(History_Size[Current_Level + 1], Num_Rows, 1);
Step1<SizeT, BLOCK_N>
<<<Grid_Size, Block_Size, sizeof(SizeT) * BLOCK_SIZE>>>(
Current_Size, d_Buffer[Current_Level],
d_Buffer[Current_Level + 1]);
cudaDeviceSynchronize();
util::GRError("Step1 failed", __FILE__, __LINE__);
}
Current_Level++;
Current_Size = History_Size[Current_Level];
if (Current_Size > 1) {
History_Size[Current_Level + 1] = Current_Size / BLOCK_SIZE;
if ((Current_Size % BLOCK_SIZE) != 0) History_Size[Current_Level + 1]++;
util::GRError(
cudaMalloc(
&(d_Buffer[Current_Level + 1]),
sizeof(SizeT) * History_Size[Current_Level + 1] * Num_Rows),
"cudaMalloc d_Buffer failed", __FILE__, __LINE__);
}
} // while Current_Size>1
util::GRError(
cudaMemcpy(d_Length, d_Buffer[Current_Level], sizeof(SizeT) * Num_Rows,
cudaMemcpyDeviceToDevice),
"cudaMemcpy d_Length failed", __FILE__, __LINE__);
Current_Level--;
while (Current_Level > 1) {
Block_Size = dim3(BLOCK_SIZE, 1, 1);
Grid_Size = dim3(History_Size[Current_Level], Num_Rows, 1);
Step2<SizeT><<<Grid_Size, Block_Size>>>(History_Size[Current_Level - 1],
d_Buffer[Current_Level],
d_Buffer[Current_Level - 1]);
cudaDeviceSynchronize();
util::GRError("Step2 failed", __FILE__, __LINE__);
Current_Level--;
} // while Current_Level>1
Block_Size = dim3(BLOCK_SIZE, 1, 1);
Grid_Size = dim3(History_Size[1] / 32, 32, 1);
if ((History_Size[1] % 32) != 0) Grid_Size.x++;
Step3<VertexId, SizeT, EXCLUSIVE><<<Grid_Size, Block_Size>>>(
Num_Elements, History_Size[1], d_Select, d_Splict, d_Offset,
d_Buffer[1], d_Buffer[0], d_Result);
cudaDeviceSynchronize();
util::GRError("Step3 failed", __FILE__, __LINE__);
for (int i = 1; i < 40; i++)
if (d_Buffer[i] != NULL) {
util::GRError(cudaFree(d_Buffer[i]), "cudaFree d_Buffer failed",
__FILE__, __LINE__);
d_Buffer[i] = NULL;
}
delete[] d_Buffer;
d_Buffer = NULL;
delete[] History_Size;
History_Size = NULL;
} // Scan
__host__ void Scan_with_Keys(const SizeT Num_Elements, const SizeT Num_Rows,
const SizeT Num_Associate,
const VertexId* const d_Keys, VertexId* d_Result,
const int* const d_Splict, // Spliction mark
const VertexId* const d_Convertion,
SizeT* d_Length, // Length of each sub-array
VertexId** d_Associate_in,
VertexId** d_Associate_out) // The scan result
{
if (Num_Elements <= 0) return;
SizeT* History_Size = new SizeT[40];
SizeT** d_Buffer = new SizeT*[40];
SizeT Current_Size = Num_Elements;
int Current_Level = 0;
dim3 Block_Size, Grid_Size;
SizeT* h_Offset1 = new SizeT[Num_Rows + 1];
SizeT* d_Offset1;
util::GRError(
cudaMalloc((void**)&d_Offset1, sizeof(SizeT) * (Num_Rows + 1)),
"cudaMalloc d_Offset1 failed", __FILE__, __LINE__);
for (int i = 0; i < 40; i++) d_Buffer[i] = NULL;
History_Size[0] = Current_Size;
History_Size[1] = Current_Size / BLOCK_SIZE;
if ((History_Size[0] % BLOCK_SIZE) != 0) History_Size[1]++;
util::GRError(cudaMalloc(&(d_Buffer[0]), sizeof(SizeT) * History_Size[0]),
"cudaMalloc d_Buffer[0] failed", __FILE__, __LINE__);
util::GRError(
cudaMalloc(&(d_Buffer[1]), sizeof(SizeT) * History_Size[1] * Num_Rows),
"cudaMalloc d_Buffer[1] failed", __FILE__, __LINE__);
while (Current_Size > 1 || Current_Level == 0) {
Block_Size = dim3(BLOCK_SIZE / 2, 1, 1);
if (Current_Level == 0) {
Grid_Size = dim3(History_Size[1] / 32, 32, 1);
if ((History_Size[1] % 32) != 0) Grid_Size.x++;
Step0b<VertexId, SizeT, BLOCK_N>
<<<Grid_Size, Block_Size, sizeof(SizeT) * BLOCK_SIZE>>>(
History_Size[0], Num_Rows, History_Size[1], d_Keys, d_Splict,
d_Buffer[0], d_Buffer[1]);
cudaDeviceSynchronize();
util::GRError("Step0b failed", __FILE__, __LINE__);
} else {
Grid_Size = dim3(History_Size[Current_Level + 1], Num_Rows, 1);
Step1<SizeT, BLOCK_N>
<<<Grid_Size, Block_Size, sizeof(SizeT) * BLOCK_SIZE>>>(
Current_Size, d_Buffer[Current_Level],
d_Buffer[Current_Level + 1]);
cudaDeviceSynchronize();
util::GRError("Step1 failed", __FILE__, __LINE__);
}
Current_Level++;
Current_Size = History_Size[Current_Level];
if (Current_Size > 1) {
History_Size[Current_Level + 1] = Current_Size / BLOCK_SIZE;
if ((Current_Size % BLOCK_SIZE) != 0) History_Size[Current_Level + 1]++;
util::GRError(
cudaMalloc(
&(d_Buffer[Current_Level + 1]),
sizeof(SizeT) * History_Size[Current_Level + 1] * Num_Rows),
"cudaMalloc d_Buffer failed", __FILE__, __LINE__);
}
} // while Current_Size>1
util::GRError(
cudaMemcpy(d_Length, d_Buffer[Current_Level], sizeof(SizeT) * Num_Rows,
cudaMemcpyDeviceToDevice),
"cudaMemcpy d_Length failed", __FILE__, __LINE__);
Current_Level--;
while (Current_Level > 1) {
Block_Size = dim3(BLOCK_SIZE, 1, 1);
Grid_Size = dim3(History_Size[Current_Level], Num_Rows, 1);
Step2<SizeT><<<Grid_Size, Block_Size>>>(History_Size[Current_Level - 1],
d_Buffer[Current_Level],
d_Buffer[Current_Level - 1]);
cudaDeviceSynchronize();
util::GRError("Step2 failed", __FILE__, __LINE__);
Current_Level--;
} // while Current_Level>1
Block_Size = dim3(BLOCK_SIZE, 1, 1);
Grid_Size = dim3(History_Size[1] / 32, 32, 1);
h_Offset1[0] = 0;
util::GRError(cudaMemcpy(&(h_Offset1[1]), d_Length,
sizeof(SizeT) * Num_Rows, cudaMemcpyDeviceToHost),
"cudaMemcpy h_Offset1 failed", __FILE__, __LINE__);
for (int i = 0; i < Num_Rows; i++) h_Offset1[i + 1] += h_Offset1[i];
util::GRError(
cudaMemcpy(d_Offset1, h_Offset1, sizeof(SizeT) * (Num_Rows + 1),
cudaMemcpyHostToDevice),
"cudaMemcpy d_Offset1 failed", __FILE__, __LINE__);
if ((History_Size[1] % 32) != 0) Grid_Size.x++;
Step3b<VertexId, SizeT, EXCLUSIVE><<<Grid_Size, Block_Size>>>(
Num_Elements, History_Size[1], Num_Associate, d_Keys, d_Splict,
d_Convertion, d_Offset1, d_Buffer[1], d_Buffer[0], d_Result,
d_Associate_in, d_Associate_out);
cudaDeviceSynchronize();
util::GRError("Step3b failed", __FILE__, __LINE__);
for (int i = 0; i < 40; i++)
if (d_Buffer[i] != NULL) {
util::GRError(cudaFree(d_Buffer[i]), "cudaFree d_Buffer failed",
__FILE__, __LINE__);
d_Buffer[i] = NULL;
}
util::GRError(cudaFree(d_Offset1), "cudaFree d_Offset1 failed", __FILE__,
__LINE__);
d_Offset1 = NULL;
delete[] h_Offset1;
h_Offset1 = NULL;
delete[] d_Buffer;
d_Buffer = NULL;
delete[] History_Size;
History_Size = NULL;
} // Scan_with_Keys
__host__ void Scan_with_dKeys(
const SizeT Num_Elements, const SizeT Num_Rows,
const SizeT Num_Vertex_Associate, const SizeT Num_Value__Associate,
const VertexId* const d_Keys, VertexId* d_Result,
const int* const d_Splict, // Spliction mark
const VertexId* const d_Convertion,
SizeT* d_Length, // Length of each sub-array
VertexId** d_Vertex_Associate_in, VertexId** d_Vertex_Associate_out,
Value** d_Value__Associate_in,
Value** d_Value__Associate_out) // The scan result
{
if (Num_Elements <= 0) return;
SizeT* History_Size = new SizeT[40];
SizeT** d_Buffer = new SizeT*[40];
SizeT Current_Size = Num_Elements;
int Current_Level = 0;
dim3 Block_Size, Grid_Size;
SizeT* h_Offset1 = new SizeT[Num_Rows + 1];
SizeT* d_Offset1;
util::GRError(
cudaMalloc((void**)&d_Offset1, sizeof(SizeT) * (Num_Rows + 1)),
"cudaMalloc d_Offset1 failed", __FILE__, __LINE__);
for (int i = 0; i < 40; i++) d_Buffer[i] = NULL;
History_Size[0] = Current_Size;
History_Size[1] = Current_Size / BLOCK_SIZE;
if ((History_Size[0] % BLOCK_SIZE) != 0) History_Size[1]++;
util::GRError(cudaMalloc(&(d_Buffer[0]), sizeof(SizeT) * History_Size[0]),
"cudaMalloc d_Buffer[0] failed", __FILE__, __LINE__);
util::GRError(
cudaMalloc(&(d_Buffer[1]), sizeof(SizeT) * History_Size[1] * Num_Rows),
"cudaMalloc d_Buffer[1] failed", __FILE__, __LINE__);
while (Current_Size > 1 || Current_Level == 0) {
Block_Size = dim3(BLOCK_SIZE / 2, 1, 1);
if (Current_Level == 0) {
Grid_Size = dim3(History_Size[1] / 32, 32, 1);
if ((History_Size[1] % 32) != 0) Grid_Size.x++;
Step0b<VertexId, SizeT, BLOCK_N>
<<<Grid_Size, Block_Size, sizeof(SizeT) * BLOCK_SIZE>>>(
History_Size[0], Num_Rows, History_Size[1], d_Keys, d_Splict,
d_Buffer[0], d_Buffer[1]);
cudaDeviceSynchronize();
util::GRError("Step0b failed", __FILE__, __LINE__);
} else {
Grid_Size = dim3(History_Size[Current_Level + 1], Num_Rows, 1);
Step1<SizeT, BLOCK_N>
<<<Grid_Size, Block_Size, sizeof(SizeT) * BLOCK_SIZE>>>(
Current_Size, d_Buffer[Current_Level],
d_Buffer[Current_Level + 1]);
cudaDeviceSynchronize();
util::GRError("Step1 failed", __FILE__, __LINE__);
}
Current_Level++;
Current_Size = History_Size[Current_Level];
if (Current_Size > 1) {
History_Size[Current_Level + 1] = Current_Size / BLOCK_SIZE;
if ((Current_Size % BLOCK_SIZE) != 0) History_Size[Current_Level + 1]++;
util::GRError(
cudaMalloc(
&(d_Buffer[Current_Level + 1]),
sizeof(SizeT) * History_Size[Current_Level + 1] * Num_Rows),
"cudaMalloc d_Buffer failed", __FILE__, __LINE__);
}
} // while Current_Size>1
util::GRError(
cudaMemcpy(d_Length, d_Buffer[Current_Level], sizeof(SizeT) * Num_Rows,
cudaMemcpyDeviceToDevice),
"cudaMemcpy d_Length failed", __FILE__, __LINE__);
Current_Level--;
while (Current_Level > 1) {
Block_Size = dim3(BLOCK_SIZE, 1, 1);
Grid_Size = dim3(History_Size[Current_Level], Num_Rows, 1);
Step2<SizeT><<<Grid_Size, Block_Size>>>(History_Size[Current_Level - 1],
d_Buffer[Current_Level],
d_Buffer[Current_Level - 1]);
cudaDeviceSynchronize();
util::GRError("Step2 failed", __FILE__, __LINE__);
Current_Level--;
} // while Current_Level>1
Block_Size = dim3(BLOCK_SIZE, 1, 1);
Grid_Size = dim3(History_Size[1] / 32, 32, 1);
h_Offset1[0] = 0;
util::GRError(cudaMemcpy(&(h_Offset1[1]), d_Length,
sizeof(SizeT) * Num_Rows, cudaMemcpyDeviceToHost),
"cudaMemcpy h_Offset1 failed", __FILE__, __LINE__);
for (int i = 0; i < Num_Rows; i++) h_Offset1[i + 1] += h_Offset1[i];
util::GRError(
cudaMemcpy(d_Offset1, h_Offset1, sizeof(SizeT) * (Num_Rows + 1),
cudaMemcpyHostToDevice),
"cudaMemcpy d_Offset1 failed", __FILE__, __LINE__);
if ((History_Size[1] % 32) != 0) Grid_Size.x++;
Step3c<VertexId, SizeT, Value, EXCLUSIVE><<<Grid_Size, Block_Size>>>(
Num_Elements, History_Size[1], Num_Vertex_Associate,
Num_Value__Associate, d_Keys, d_Splict, d_Convertion, d_Offset1,
d_Buffer[1], d_Buffer[0], d_Result, d_Vertex_Associate_in,
d_Vertex_Associate_out, d_Value__Associate_in, d_Value__Associate_out);
cudaDeviceSynchronize();
util::GRError("Step3b failed", __FILE__, __LINE__);
for (int i = 0; i < 40; i++)
if (d_Buffer[i] != NULL) {
util::GRError(cudaFree(d_Buffer[i]), "cudaFree d_Buffer failed",
__FILE__, __LINE__);
d_Buffer[i] = NULL;
}
util::GRError(cudaFree(d_Offset1), "cudaFree d_Offset1 failed", __FILE__,
__LINE__);
d_Offset1 = NULL;
delete[] h_Offset1;
h_Offset1 = NULL;
delete[] d_Buffer;
d_Buffer = NULL;
delete[] History_Size;
History_Size = NULL;
} // Scan_with_dKeys
// template < SizeT Num_Vertex_Associate,
// SizeT Num_Value__Associate>
__host__ void Init(const SizeT Max_Elements, const SizeT Max_Rows)
// const SizeT Num_Vertex_Associate,
// const SizeT Num_Value__Associate,
// const VertexId* const d_Keys,
// VertexId* d_Result,
// const int* const d_Splict, // Spliction mark
// const VertexId* const d_Convertion,
// SizeT* d_Length, // Length of each sub-array
// VertexId** d_Vertex_Associate_in,
// VertexId** d_Vertex_Associate_out,
// Value** d_Value__Associate_in,
// Value** d_Value__Associate_out) // The scan result
{
this->Max_Elements = Max_Elements;
this->Max_Rows = Max_Rows;
History_Size = new SizeT[40];
d_Buffer = new SizeT*[40];
SizeT Current_Size = Max_Elements;
int Current_Level = 0;
dim3 Block_Size, Grid_Size;
h_Offset1 = new SizeT[Max_Rows + 1];
util::GRError(
cudaMalloc((void**)&d_Offset1, sizeof(SizeT) * (Max_Rows + 1)),
"cudaMalloc d_Offset1 failed", __FILE__, __LINE__);
for (int i = 0; i < 40; i++) d_Buffer[i] = NULL;
History_Size[0] = Current_Size;
History_Size[1] = Current_Size / BLOCK_SIZE;
if ((History_Size[0] % BLOCK_SIZE) != 0) History_Size[1]++;
util::GRError(cudaMalloc(&(d_Buffer[0]), sizeof(SizeT) * History_Size[0]),
"cudaMalloc d_Buffer[0] failed", __FILE__, __LINE__);
util::GRError(
cudaMalloc(&(d_Buffer[1]), sizeof(SizeT) * History_Size[1] * Max_Rows),
"cudaMalloc d_Buffer[1] failed", __FILE__, __LINE__);
while (Current_Size > 1 || Current_Level == 0) {
Current_Level++;
Current_Size = History_Size[Current_Level];
if (Current_Size > 1) {
History_Size[Current_Level + 1] = Current_Size / BLOCK_SIZE;
if ((Current_Size % BLOCK_SIZE) != 0) History_Size[Current_Level + 1]++;
util::GRError(
cudaMalloc(
&(d_Buffer[Current_Level + 1]),
sizeof(SizeT) * History_Size[Current_Level + 1] * Max_Rows),
"cudaMalloc d_Buffer failed", __FILE__, __LINE__);
}
} // while Current_Size>1
}
void Release() {
for (int i = 0; i < 40; i++)
if (d_Buffer[i] != NULL) {
util::GRError(cudaFree(d_Buffer[i]), "cudaFree d_Buffer failed",
__FILE__, __LINE__);
d_Buffer[i] = NULL;
}
util::GRError(cudaFree(d_Offset1), "cudaFree d_Offset1 failed", __FILE__,
__LINE__);
d_Offset1 = NULL;
delete[] h_Offset1;
h_Offset1 = NULL;
delete[] d_Buffer;
d_Buffer = NULL;
delete[] History_Size;
History_Size = NULL;
} // Scan_with_dKeys
template <SizeT Num_Vertex_Associate, SizeT Num_Value__Associate>
__host__ void Scan_with_dKeys2(const SizeT Num_Elements, const SizeT Num_Rows,
// const SizeT Num_Vertex_Associate,
// const SizeT Num_Value__Associate,
const VertexId* const d_Keys,
VertexId* d_Result,
const int* const d_Splict, // Spliction mark
const VertexId* const d_Convertion,
SizeT* d_Length, // Length of each sub-array
VertexId** d_Vertex_Associate_in,
VertexId** d_Vertex_Associate_out,
Value** d_Value__Associate_in,
Value** d_Value__Associate_out,
cudaStream_t stream = 0) // The scan result
{
if (Num_Elements <= 0) {
util::MemsetKernel<<<128, 1, 0, stream>>>(d_Length, 0, Num_Rows);
cudaStreamSynchronize(stream);
return;
}
// SizeT *History_Size = new SizeT[40];
// SizeT **d_Buffer = new SizeT*[40];
SizeT Current_Size = Num_Elements;
int Current_Level = 0;
dim3 Block_Size, Grid_Size;
// SizeT *h_Offset1 = new SizeT[Num_Rows+1];
// SizeT *d_Offset1;
if (Num_Elements > Max_Elements || Num_Rows > Max_Rows) {
printf("Scanner expended: %d,%d -> %d,%d \n", Max_Elements, Max_Rows,
Num_Elements, Num_Rows);
fflush(stdout);
Release();
Init(Num_Elements, Num_Rows);
}
// util::GRError(cudaMalloc((void**)&d_Offset1, sizeof(SizeT)*(Num_Rows+1)),
// "cudaMalloc d_Offset1 failed", __FILE__, __LINE__);
// for (int i=0;i<40;i++) d_Buffer[i]=NULL;
History_Size[0] = Current_Size;
History_Size[1] = Current_Size / BLOCK_SIZE;
if ((History_Size[0] % BLOCK_SIZE) != 0) History_Size[1]++;
// util::GRError(cudaMalloc(&(d_Buffer[0]), sizeof(SizeT) *
// History_Size[0]),
// "cudaMalloc d_Buffer[0] failed", __FILE__, __LINE__);
// util::GRError(cudaMalloc(&(d_Buffer[1]), sizeof(SizeT) * History_Size[1]
// * Num_Rows),
// "cudaMalloc d_Buffer[1] failed", __FILE__, __LINE__);
while (Current_Size > 1 || Current_Level == 0) {
Block_Size = dim3(BLOCK_SIZE / 2, 1, 1);
if (Current_Level == 0) {
Grid_Size = dim3(History_Size[1] / 32, 32, 1);
if ((History_Size[1] % 32) != 0) Grid_Size.x++;
Step0b<VertexId, SizeT, BLOCK_N>
<<<Grid_Size, Block_Size, sizeof(SizeT) * BLOCK_SIZE, stream>>>(
History_Size[0], Num_Rows, History_Size[1], d_Keys, d_Splict,
d_Buffer[0], d_Buffer[1]);
// cudaDeviceSynchronize();
// util::GRError("Step0b failed", __FILE__, __LINE__);
} else {
Grid_Size = dim3(History_Size[Current_Level + 1], Num_Rows, 1);
Step1<SizeT, BLOCK_N>
<<<Grid_Size, Block_Size, sizeof(SizeT) * BLOCK_SIZE, stream>>>(
Current_Size, d_Buffer[Current_Level],
d_Buffer[Current_Level + 1]);
// cudaDeviceSynchronize();
// util::GRError("Step1 failed", __FILE__, __LINE__);
}
Current_Level++;
Current_Size = History_Size[Current_Level];
if (Current_Size > 1) {
History_Size[Current_Level + 1] = Current_Size / BLOCK_SIZE;
if ((Current_Size % BLOCK_SIZE) != 0) History_Size[Current_Level + 1]++;
// util::GRError(cudaMalloc(&(d_Buffer[Current_Level+1]),
// sizeof(SizeT)*History_Size[Current_Level+1]*Num_Rows),
// "cudaMalloc d_Buffer failed", __FILE__, __LINE__);
}
} // while Current_Size>1
// util::GRError(cudaMemcpy(d_Length, d_Buffer[Current_Level], sizeof(SizeT)
// * Num_Rows, cudaMemcpyDeviceToDevice),
// "cudaMemcpy d_Length failed", __FILE__, __LINE__);
MemsetCopyVectorKernel<<<128, 1, 0, stream>>>(
d_Length, d_Buffer[Current_Level], Num_Rows);
Current_Level--;
while (Current_Level > 1) {
Block_Size = dim3(BLOCK_SIZE, 1, 1);
Grid_Size = dim3(History_Size[Current_Level], Num_Rows, 1);
Step2<SizeT><<<Grid_Size, Block_Size, 0, stream>>>(
History_Size[Current_Level - 1], d_Buffer[Current_Level],
d_Buffer[Current_Level - 1]);
// cudaDeviceSynchronize();
// util::GRError("Step2 failed", __FILE__, __LINE__);
Current_Level--;
} // while Current_Level>1
Step2_5<<<1, 1, 0, stream>>>(d_Length, d_Offset1, Num_Rows);
Block_Size = dim3(BLOCK_SIZE, 1, 1);
Grid_Size = dim3(History_Size[1] / 32, 32, 1);
/*h_Offset1[0]=0;
util::GRError(cudaMemcpy(&(h_Offset1[1]), d_Length, sizeof(SizeT)*Num_Rows,
cudaMemcpyDeviceToHost), "cudaMemcpy h_Offset1 failed", __FILE__, __LINE__);
for (int i=0;i<Num_Rows;i++) h_Offset1[i+1]+=h_Offset1[i];
util::GRError(cudaMemcpy(d_Offset1, h_Offset1, sizeof(SizeT)*(Num_Rows+1),
cudaMemcpyHostToDevice), "cudaMemcpy d_Offset1 failed", __FILE__, __LINE__);
*/
if ((History_Size[1] % 32) != 0) Grid_Size.x++;
Step3c<VertexId, SizeT, Value, EXCLUSIVE, Num_Vertex_Associate,
Num_Value__Associate><<<Grid_Size, Block_Size, 0, stream>>>(
Num_Elements, History_Size[1],
// Num_Vertex_Associate,
// Num_Value__Associate,
d_Keys, d_Splict, d_Convertion, d_Offset1, d_Buffer[1], d_Buffer[0],
d_Result, d_Vertex_Associate_in, d_Vertex_Associate_out,
d_Value__Associate_in, d_Value__Associate_out);
cudaStreamSynchronize(stream);
// cudaDeviceSynchronize();
// util::GRError("Step3b failed", __FILE__, __LINE__);
// for (int i=0;i<40;i++)
// if (d_Buffer[i]!=NULL)
//{
// util::GRError(cudaFree(d_Buffer[i]),
// "cudaFree d_Buffer failed", __FILE__, __LINE__);
// d_Buffer[i]=NULL;
//}
// util::GRError(cudaFree(d_Offset1),"cudaFree d_Offset1 failed", __FILE__,
// __LINE__); d_Offset1=NULL; delete[] h_Offset1; h_Offset1 = NULL;
// delete[] d_Buffer; d_Buffer = NULL;
// delete[] History_Size; History_Size = NULL;
} // Scan_with_dKeys
template <SizeT Num_Vertex_Associate, SizeT Num_Value__Associate>
__host__ void Scan_with_dKeys_Backward(
const SizeT Num_Elements, const SizeT Num_Rows,
// const SizeT Num_Vertex_Associate,
// const SizeT Num_Value__Associate,
const VertexId* const d_Keys, const SizeT* const d_Offset,
VertexId* d_Result,
const int* const d_Splict, // Spliction mark
const VertexId* const d_Convertion,
SizeT* d_Length, // Length of each sub-array
VertexId** d_Vertex_Associate_in, VertexId** d_Vertex_Associate_out,
Value** d_Value__Associate_in,
Value** d_Value__Associate_out) // The scan result
{
if (Num_Elements <= 0) return;
SizeT* History_Size = new SizeT[40];
SizeT** d_Buffer = new SizeT*[40];
SizeT Current_Size = Num_Elements;
int Current_Level = 0;
dim3 Block_Size, Grid_Size;
SizeT* h_Offset1 = new SizeT[Num_Rows + 1];
SizeT* d_Offset1;
util::GRError(
cudaMalloc((void**)&d_Offset1, sizeof(SizeT) * (Num_Rows + 1)),
"cudaMalloc d_Offset1 failed", __FILE__, __LINE__);
for (int i = 0; i < 40; i++) d_Buffer[i] = NULL;
History_Size[0] = Current_Size;
History_Size[1] = Current_Size / BLOCK_SIZE;
if ((History_Size[0] % BLOCK_SIZE) != 0) History_Size[1]++;
util::GRError(
cudaMalloc(&(d_Buffer[0]), sizeof(SizeT) * History_Size[0] * Num_Rows),
"cudaMalloc d_Buffer[0] failed", __FILE__, __LINE__);
util::GRError(
cudaMalloc(&(d_Buffer[1]), sizeof(SizeT) * History_Size[1] * Num_Rows),
"cudaMalloc d_Buffer[1] failed", __FILE__, __LINE__);
while (Current_Size > 1 || Current_Level == 0) {
Block_Size = dim3(BLOCK_SIZE / 2, 1, 1);
if (Current_Level == 0) {
Grid_Size = dim3(History_Size[1] / 32, 32, 1);
if ((History_Size[1] % 32) != 0) Grid_Size.x++;
Step0d<VertexId, SizeT, BLOCK_N>
<<<Grid_Size, Block_Size, sizeof(SizeT) * BLOCK_SIZE>>>(
History_Size[0], Num_Rows, History_Size[1], d_Keys, d_Offset,
d_Splict, d_Buffer[0], d_Buffer[1]);
cudaDeviceSynchronize();
util::GRError("Step0b failed", __FILE__, __LINE__);
} else {
Grid_Size = dim3(History_Size[Current_Level + 1], Num_Rows, 1);
Step1<SizeT, BLOCK_N>
<<<Grid_Size, Block_Size, sizeof(SizeT) * BLOCK_SIZE>>>(
Current_Size, d_Buffer[Current_Level],
d_Buffer[Current_Level + 1]);
cudaDeviceSynchronize();
util::GRError("Step1 failed", __FILE__, __LINE__);
}
Current_Level++;
Current_Size = History_Size[Current_Level];
if (Current_Size > 1) {
History_Size[Current_Level + 1] = Current_Size / BLOCK_SIZE;
if ((Current_Size % BLOCK_SIZE) != 0) History_Size[Current_Level + 1]++;
util::GRError(
cudaMalloc(
&(d_Buffer[Current_Level + 1]),
sizeof(SizeT) * History_Size[Current_Level + 1] * Num_Rows),
"cudaMalloc d_Buffer failed", __FILE__, __LINE__);
}
} // while Current_Size>1
util::GRError(
cudaMemcpy(d_Length, d_Buffer[Current_Level], sizeof(SizeT) * Num_Rows,
cudaMemcpyDeviceToDevice),
"cudaMemcpy d_Length failed", __FILE__, __LINE__);
Current_Level--;
while (Current_Level > 1) {
Block_Size = dim3(BLOCK_SIZE, 1, 1);
Grid_Size = dim3(History_Size[Current_Level], Num_Rows, 1);
Step2<SizeT><<<Grid_Size, Block_Size>>>(History_Size[Current_Level - 1],
d_Buffer[Current_Level],
d_Buffer[Current_Level - 1]);
cudaDeviceSynchronize();
util::GRError("Step2 failed", __FILE__, __LINE__);
Current_Level--;
} // while Current_Level>1
Block_Size = dim3(BLOCK_SIZE, 1, 1);
Grid_Size = dim3(History_Size[1] / 32, 32, 1);
h_Offset1[0] = 0;
util::GRError(cudaMemcpy(&(h_Offset1[1]), d_Length,
sizeof(SizeT) * Num_Rows, cudaMemcpyDeviceToHost),
"cudaMemcpy h_Offset1 failed", __FILE__, __LINE__);
for (int i = 0; i < Num_Rows; i++) h_Offset1[i + 1] += h_Offset1[i];
util::GRError(
cudaMemcpy(d_Offset1, h_Offset1, sizeof(SizeT) * (Num_Rows + 1),
cudaMemcpyHostToDevice),
"cudaMemcpy d_Offset1 failed", __FILE__, __LINE__);
// for (int k=0;k<Num_Rows;k++)
// util::cpu_mt::PrintGPUArray<SizeT,
// SizeT>("Buffer1",d_Buffer[1]+k*History_Size[1],History_Size[1]);
// util::cpu_mt::PrintGPUArray<SizeT,
// SizeT>("Buffer0",d_Buffer[0],History_Size[0]);
if ((History_Size[1] % 32) != 0) Grid_Size.x++;
Step3d<VertexId, SizeT, Value, EXCLUSIVE, Num_Vertex_Associate,
Num_Value__Associate><<<Grid_Size, Block_Size>>>(
Num_Elements, History_Size[1],
// Num_Vertex_Associate,
// Num_Value__Associate,
d_Keys, d_Splict, d_Convertion, d_Offset, d_Offset1, d_Buffer[1],
d_Buffer[0], d_Result, d_Vertex_Associate_in, d_Vertex_Associate_out,
d_Value__Associate_in, d_Value__Associate_out);
cudaDeviceSynchronize();
util::GRError("Step3b failed", __FILE__, __LINE__);
for (int i = 0; i < 40; i++)
if (d_Buffer[i] != NULL) {
util::GRError(cudaFree(d_Buffer[i]), "cudaFree d_Buffer failed",
__FILE__, __LINE__);
d_Buffer[i] = NULL;
}
util::GRError(cudaFree(d_Offset1), "cudaFree d_Offset1 failed", __FILE__,
__LINE__);
d_Offset1 = NULL;
delete[] h_Offset1;
h_Offset1 = NULL;
delete[] d_Buffer;
d_Buffer = NULL;
delete[] History_Size;
History_Size = NULL;
} // Scan_with_dKeys
}; // struct MultiScan
} // namespace scan
} // namespace util
} // namespace gunrock | the_stack |
This example shows how to run convolution kernels using functions and data structures
provided by CUTLASS using tensor cores; which we run on a NVIDIA Turing GPU.
Writing a single high performance convolution kernel is hard but do-able. Whereas writing
high performance kernels at scale which works for multiple problem sizes with good abstractions is
really hard. CUTLASS solves this problem by providing simplified abstractions to compose
multiple sections of implicit gemm kernel. When used properly, the kernels can hit peak performance
of GPU easily.
CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp
and thread-block level, they compute on their own tile-size with higher level of tile sizes being
composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used
to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute
threadblock-tile (tile size computed by a threadblock).
In thie example, we split variable initialization into
1. Setting up data properties : describes how tensors are laid out in the memory and how the kernel
can view them (logical to physical mapping)
2. Setting up computation properties : describes how the above set tensors will be used to compute
output of convolution.
First, we setup the data types of the input tensor A, weights' tensor B and output tensor C along
with alpha, beta as the equation for convolution is C = alpha * Conv(A, B) + beta * C. In CUTLASS,
the kernels first compute Conv(A, B) and leave the rest of the computation to end of the kernel as
alpha * X + beta * C is a simple element-wise operation on X (Conv(A, B)) and C. We call this as
epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to
ElementComputeEpilogue = float. We want to use MMA instructions on Turing and they support 4-bit
signed integer. But int4b_t is not fully supported by Nvidia software stack, so CUTLASS introduces
cutlass::int4b_t. We use the data type for elements in input tensor A and B as cutlass::int4b_t. We
convey this to CUTLASS kernel by initializing template variables ElementAccumulator (int32_t),
ElementComputeEpilogue (float), ElementInputA (cutlass::int4b_t), ElementInputB (cutlass::int4b_t),
ElementOutput (int32_t). Communicating just the data type is not enough. As the data is laid out
linearly in memory, we have to convey the layout of tensors. We do that by initializing template
variables LayoutInputA, LayoutInputB and LayoutOutput to TensorNHWC cutlass variable. Next, we setup
rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template
variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of
elements per vector memory access (32), data type of accumulator (int32_t) and data type of
computation of linear combination (alpha * X + beta * C).
Now that we setup the properties of data, we have to setup properties of computation.
Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x128,
64x64x128, 8x8x32 (MxNxK) respectively. When passed to instantiate CUTLASS Implicit GEMM kernel, it
internally deduces the amount of threads needed per thread-block, amount of shared memory, storing
data in bank-conflict free manner, and ton of other variables required to compose, intialize and
launch a high performance Implicit GEMM kernel. This is the beauty of CUTLASS, it relieves developer
from understanding and coding complicated hardware optimizations which can easily go wrong.
CUTLASS also supports multiple MMA pipelines in a threadblock. What are MMA pipelines? MMA pipelines
constitute the whole process of loading input data from global memory to shared memory, loading data
from shared memory to registers, doing matrix multiplication, store to global memory. The below flow
sequence shows a typical mma pipeline.
tensor in global memory -> registers -> tile in shared memory -> registers -> mma -> registers ->
output to global memory
The problem with single pipeline is, each stage is synchronous which means, each stage has to wait
until the previous finished executing. There are stages in the pipeline which do not have fixed
latency, for example, the loads from global memory and shared memory. Therefore, we can add one more
pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads.
Finally, the pipeline in a kernel looks like
(1) tensor in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5)
mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) tensor in global
memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers ->
(9) output to global memory
This way, you can hide the second global memory load latency by doing computation on already loaded
input data.
There are few more template variables initialized such as, which threadblock tile of output matrix
is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on.
These are all put together to create a template variable which describes CUTLASS Implicit GEMM
kernel using cutlass::conv::device::ImplicitGemm template.
The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it.
We use CUTLASS utilities to initialize, fill, compare tensors as they are simple and doesn't come
in the way of learning CUTLASS.
Once all the tensors are initialized and filled with data, create arguments tuple to launch CUTLASS
kernel which takes problem size (N = 1, H = 64, W = 64, C = 128), filter size (K = 64,
R = 3, S = 3, C = 128 ), padding, strides, dilation, tensors, alpha, beta and the
important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space
memory required by the kernel we instantiated. If yes, we create it and pass it along with other
arguments created to intialize CUTLASS kernel then, the kernel is launched.
In this example, we later on launch a reference convolution kernel (from CUTLASS utilities) to
compare if the output from CUTLASS kernel is same as the reference implicit GEMM kernel.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = int32_t; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::int4b_t; // Data type of elements in input tensor
using ElementInputB = cutlass::int4b_t; // Data type of elements in input tensor
using ElementOutput = cutlass::int4b_t; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm75;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 128>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 2;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationClamp<
ElementOutput, // Data type of output matrix.
8, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAddSaturate,
cutlass::conv::IteratorAlgorithm::kAnalytic
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(false),
measure_performance(true),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of int4b_t elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 32 elements.
//
int const kAlignment = 32;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "09_turing_tensorop_conv2dfprop example\n\n"
<< " This example uses Turing's Tensor Core operators on int4 data types to compute\n"
<< " forward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n <int> Input tensor extent N\n"
<< " --h <int> Input tensor extent H\n"
<< " --w <int> Input tensor extent W\n"
<< " --c <int> Input tensor extent C\n"
<< " --k <int> Filter extent K\n"
<< " --r <int> Filter extent R\n"
<< " --s <int> Filter extent S\n\n"
<< " --alpha <float> Epilogue scalar alpha\n"
<< " --beta <float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations <int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag <string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/09_turing_tensorop_conv2dfprop/09_turing_tensorop_conv2dfprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size);
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size());
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_c(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill tensor C on host with zeros
cutlass::reference::host::TensorFill(
tensor_c.host_view());
// Fill tensor C for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_c.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_ref_c.sync_device();
//
// Define arguments for CUTLASS Convolution
//
// mode (kCrossCorrelation or kConvolution)
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices);
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c.device_ref(),
tensor_c.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on host...\n";
// Compute with reference implementation
cutlass::reference::host::Conv2dFprop<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverterClamp<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_a.host_ref(),
tensor_b.host_ref(),
tensor_c.host_ref(),
tensor_ref_c.host_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_c.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_c.host_view(),
tensor_ref_c.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "09_tensor_conv_workspace_conv2dfprop_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_c.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_c.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
// Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2.
//
// CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
return 0;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major > 7 || (props.major == 7 && props.minor >= 5))) {
std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75."
<< std::endl;
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {1, 32, 64, 128, 256, 512};
struct Benchmark {
int h, w, c, k, r, s;
} layers[] = {
{56, 56, 64, 256, 1, 1},
{56, 56, 64, 64, 1, 1},
{56, 56, 64, 64, 3, 3},
{56, 56, 256, 64, 1, 1},
{56, 56, 256, 512, 1, 1},
{56, 56, 256, 128, 1, 1},
{28, 28, 128, 128, 3, 3},
{28, 28, 128, 512, 1, 1},
{28, 28, 512, 128, 1, 1},
{28, 28, 512, 1024, 1, 1},
{28, 28, 512, 256, 1, 1},
{14, 14, 256, 256, 3, 3},
{14, 14, 256, 1024, 1, 1},
{14, 14, 1024, 256, 1, 1},
{14, 14, 1024, 2048, 1, 1},
{14, 14, 1024, 512, 1, 1},
{7, 7, 512, 512, 3, 3},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////// | the_stack |
using namespace std;
typedef uint8_t uint8;
typedef unsigned int uint32;
typedef unsigned long long int uint64;
#define STREAM_BLOCK 16
#define BLOCK_SIZE 32
#define BLOCK_D_SIZE 64
#define INTEGRAL_BLOCK_SIZE 8
#define XDIM_MAX_THREADS 1024
#define XDIM_H_THREADS 512
#define XDIM_Q_THREADS 256
#define SHARED_MEMORY 49152
#define INIT_BLOCK 8
__global__ void CensusTransformKernel(const float* image, uint64* census ,int rows, int cols ){
__shared__ float cens_slice[7*XDIM_MAX_THREADS/2];
const int Row = blockIdx.x;
int Col = threadIdx.x;
const int wr = 7/2;
const int wc = 9/2;
const int steps = (ceil((double)cols/blockDim.x));
for(int i=0; i<steps; i++){
if(Col < cols){
for(int wh=0; wh<7; wh++){
cens_slice[threadIdx.x*7+wh] = image[(Row+wh)*cols + Col ];
}
}
__syncthreads();
if( Row < rows-7 && Col < cols-9 && threadIdx.x<blockDim.x-9 ){
uint8 center = cens_slice[(threadIdx.x+wc)*7+wr];
uint64 censtrans =0;
for(int ww=0; ww<9;ww++){
for ( int wh=0; wh<7;wh++){
if( (center < cens_slice[(threadIdx.x+ww)*7+wh]) )
censtrans ^= 1 << (wh*9+ww);
}
}
census[ (Row+wr)*cols + (Col+wc) ] = censtrans;
}
Col += blockDim.x - 9;
__syncthreads();
}
}
__global__ void CensusSADKernel(uint64* censusl, uint64* censusr, float* cost, int rows, int cols, int ndisp){
extern __shared__ uint64 censr_slice_sm[];
uint64 censl_slice =0;
const int Row = blockIdx.y;
const int Col =blockIdx.x*blockDim.x + threadIdx.x;
const int wr = 7/2;
const int wc = 9/2;
int threaddispl = 0;
if(blockIdx.x >0){
threaddispl=ndisp;
}
if(blockIdx.x > 0 && threadIdx.x < ndisp && (int)(Col -(ndisp-blockDim.x)) >=0){
censr_slice_sm[Col-cols] = censusr[(Row+wr)*cols + (Col -(ndisp-blockDim.x)+wc) ];
}
if(Col<cols){
censl_slice = censusl[(Row+wr)*cols + (Col+wc) ];
censr_slice_sm[threaddispl+ threadIdx.x ] = censusr[(Row+wr)*cols + (Col+wc) ];
}
__syncthreads();
if(Row < rows-7 && Col < cols-9){
#pragma unroll
for (int d=0; d< ndisp; d++){
const int dind = threaddispl+threadIdx.x-d;
if(dind >0){
cost [ (d*rows+(Row+wr))*cols + (Col+wc) ]= (float)__popcll(censl_slice ^ censr_slice_sm[threaddispl+threadIdx.x-d ]);
}
}
}
}
void usage(void){
std::cout << "Census fixed window CUDA implementation" << std::endl;
std::cout << "Arguments" << std::endl;
std::cout << "-l:\t\t Left image | File containing names of the left images" << std::endl;
std::cout << "-r:\t\t Right image | File containing the names of the right images" << std::endl;
std::cout << "-ndisp:\t\t Number of Disparities" << std::endl;
std::cout << "-dopost:\t Default false. If set, activates sgm cost optimization" << std::endl;
std::cout << "-list:\t\t Default is single file. If set, left and right files should be lists of images." << std::endl;
std::cout << "-out:\t\t Output directory for disparity images." << std::endl;
std::cout << "-out_type:\t Output image type. Supports pgm|pfm|png|disp(uint16 png format)." << std::endl;
std::cout << "-postconf:\t Optional configuration file for post-processing." << std::endl;
std::cout << "-h:\t\t Prints this help" << std::endl;
}
int main(int argc, char* argv[]){
string leftfile;
string rightfile;
string out=string(".");
string out_t=string("disp");
int ndisp=256;
bool post=false;
bool single=true;
int argsassigned = 0;
int required=0;
postparams params;
//sgm params
params.pi1=30;
params.pi2=150;
params.tau_so=1;
params.alpha1=2;
params.sgm_q1=3;
params.sgm_q2=2;
params.alpha2=6;
params.sigma = 5.99;
params.kernel_size=5;
int direction =-1;
for(int i=0; i<argc; i++){
if( !strcmp(argv[i], "-l") ){
leftfile = string(argv[++i]);
argsassigned++;
required++;
}else if( !strcmp(argv[i],"-r") ){
rightfile = string(argv[++i]);
argsassigned++;
required++;
}else if( !strcmp(argv[i],"-ndisp") ){
ndisp= atoi(argv[++i]);
argsassigned++;
required++;
}else if( !strcmp(argv[i], "-dopost") ){
post= true;
argsassigned++;
}else if(!strcmp(argv[i],"-list")){
single=false;
argsassigned++;
}else if(!strcmp(argv[i],"-out")){
out=string(argv[++i]);
argsassigned++;
}else if(!strcmp(argv[i],"-out_type")){
out_t=string(argv[++i]);
argsassigned++;
}else if(!strcmp(argv[i],"-postconf")){
parseConf(params ,string(argv[++i]));
argsassigned++;
}else if(!strcmp(argv[i],"-h")){
usage();
return 0;
}
}
if(argsassigned == 0){
usage();
return 0;
}
if(argsassigned ==1){
leftfile = string("../../leftimg.txt");
rightfile = string("../../rightimg.txt");
}
else if( required < 3 ){
usage();
return 0;
}
std::vector<string> limg;
std::vector<string> rimg;
if (single){
limg.push_back(leftfile);
rimg.push_back(rightfile);
}else{
limg = getImages(leftfile);
rimg = getImages(rightfile);
}
imgio* imgutil = new imgio();
imgutil->read_image_meta(limg[0].c_str());
//######################### Allocate memory on the device ###########################################//
float* imgl;
size_t ibytes = imgutil->getWidth()*imgutil->getHeight()*sizeof(float);
cudaMallocHost( (void**) &imgl, ibytes );
float* imgr;
cudaMallocHost( (void**) &imgr, ibytes );
cudaStream_t stream1;
cudaStream_t stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
float* cost_d;
size_t bytes = imgutil->getWidth()*imgutil->getHeight()*ndisp*sizeof(float);
cudaMalloc( (void**) &cost_d, bytes );
float* post_cost_d;
cudaMalloc( (void**) &post_cost_d, bytes );
uint64 *census_l_d;
cudaMalloc(&census_l_d, imgutil->getWidth()*imgutil->getHeight()*sizeof(uint64));
cudaMemsetAsync(census_l_d, 0,imgutil->getWidth()*imgutil->getHeight()*sizeof(uint64),stream1);
uint64 *census_r_d;
cudaMalloc(&census_r_d, imgutil->getWidth()*imgutil->getHeight()*sizeof(uint64));
cudaMemsetAsync(census_r_d, 0,imgutil->getWidth()*imgutil->getHeight()*sizeof(uint64),stream2);
float* disp_h;
size_t dbytes = imgutil->getWidth()*imgutil->getHeight()*sizeof(float);
cudaMallocHost( (void**) &disp_h, dbytes );
float * disp_d;
cudaMalloc(&disp_d, dbytes);
float * disp_tmp;
cudaMalloc(&disp_tmp, dbytes);
float* imgl_d;
cudaMalloc(&imgl_d, imgutil->getWidth()*imgutil->getHeight()*sizeof(float));
float* imgr_d;
cudaMalloc(&imgr_d, imgutil->getWidth()*imgutil->getHeight()*sizeof(float));
dim3 dimBlockCens(XDIM_MAX_THREADS/2);
dim3 dimGridCens(imgutil->getHeight()-7);
int threads = XDIM_MAX_THREADS;
dim3 dimBlock(threads);
dim3 dimGrid(ceil((float) imgutil->getWidth() /threads),imgutil->getHeight()-7);
dim3 swapBlock(BLOCK_D_SIZE,16,1);
dim3 swapGrid(ceil((float)imgutil->getWidth()*imgutil->getHeight()/BLOCK_D_SIZE),ceil((float)ndisp/BLOCK_D_SIZE));
dim3 argBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 argGrid(ceil((float) imgutil->getWidth() / BLOCK_SIZE),ceil( (float)imgutil->getHeight()/ BLOCK_SIZE));
int width = imgutil->getWidth();
int height = imgutil->getHeight();
int size1 = height*ndisp;
int size2 = width*ndisp;
float * tmp_d;
cudaMalloc(&tmp_d, width*ndisp*sizeof(float));
cudaMemsetAsync(tmp_d,0 , width*ndisp*sizeof(float),stream1);
float* left_cross;
cudaMalloc(&left_cross, 4*height*width*sizeof(float));
cudaMemsetAsync(left_cross,0 , 4*height*width*sizeof(float),stream2);
float* right_cross;
cudaMalloc(&right_cross, 4*height*width*sizeof(float));
cudaMemsetAsync(right_cross,0 , 4*height*width*sizeof(float),stream1);
int kr = ceil(params.sigma*3);
int ks = kr*2+1;
float * kernel = (float*)calloc(ks*ks,sizeof(float));
for (int i=0; i<ks; i++){
for(int j=0; j<ks; j++){
int y= (i-1)-kr;
int x= (j-1)-kr;
kernel[i*ks+j] = exp( -(x*x+y*y)/(2*params.sigma*params.sigma) );
}
}
float *kernel_d;
cudaMalloc(&kernel_d, ks*ks*sizeof(float));
cudaMemcpyAsync( kernel_d, kernel, ks*ks*sizeof(float), cudaMemcpyHostToDevice,stream2);
//#######################################################################################################################//
for(size_t i=0; i<limg.size(); i++){
imgutil->read_image(limg[i],imgl);
imgutil->read_image(rimg[i],imgr);
cudaMemcpyAsync( imgl_d, imgl, width*height*sizeof(float), cudaMemcpyHostToDevice,stream1);
cudaMemcpyAsync( imgr_d, imgr, width*height*sizeof(float), cudaMemcpyHostToDevice,stream2);
cudaMemsetAsync(cost_d,90 , height*width*ndisp*sizeof(float),stream1);
CensusTransformKernel<<<dimGridCens, dimBlockCens,0, stream1>>>(imgl_d,census_l_d,height, width);
CensusTransformKernel<<<dimGridCens, dimBlockCens,0, stream2>>>(imgr_d,census_r_d,height, width);
CensusSADKernel<<<dimGrid, dimBlock,(threads+ndisp)*sizeof(uint64)>>>(census_l_d,census_r_d,cost_d,height, width,ndisp);
if(post){
swap_axis<<< swapGrid, swapBlock >>>( cost_d, post_cost_d,height,width,ndisp );
cudaMemset(cost_d,0 , height*width*ndisp*sizeof(float));
for (int step = 0; step < width; step++) {
sgm_loop<0><<<(size1 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>(
imgl_d,
imgr_d,
post_cost_d,
cost_d,
tmp_d,
params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction,
height,
width,
ndisp,
step);
}
for (int step = 0; step < width; step++) {
sgm_loop<1><<<(size1 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>(
imgl_d,
imgr_d,
post_cost_d,
cost_d,
tmp_d,
params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction,
height,
width,
ndisp,
step);
}
for (int step = 0; step < height; step++) {
sgm_loop<2><<<(size2 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>(
imgl_d,
imgr_d,
post_cost_d,
cost_d,
tmp_d,
params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction,
height,
width,
ndisp,
step);
}
for (int step = 0; step < height; step++) {
sgm_loop<3><<<(size2 - 1) / ndisp + 1, ndisp,2*ndisp*sizeof(float)>>>(
imgl_d,
imgr_d,
post_cost_d,
cost_d,
tmp_d,
params.pi1, params.pi2, params.tau_so, params.alpha1, params.sgm_q1, params.sgm_q2, direction,
height,
width,
ndisp,
step);
}
argmin<<<argGrid, argBlock>>>( disp_d, cost_d, height, width,ndisp );
subpixel_enchancement<<<(height*width - 1) / TB + 1, TB>>>(
disp_d,
cost_d,
disp_tmp,
height*width,
height*width,
ndisp);
median2d<<<(height*width - 1) / TB + 1, TB>>>(
disp_tmp,
disp_d,
height*width,
height,
width,
params.kernel_size / 2);
mean2d<<<(height*width - 1) / TB + 1, TB>>>(
disp_d,
kernel_d,
disp_tmp,
height*width,
ks / 2,
height,
width,
params.alpha2);
}else{
argmin_d<<<argGrid, argBlock>>>( disp_tmp, cost_d, height, width,ndisp );
}
cudaMemcpy( disp_h, disp_tmp, height*width*sizeof(float), cudaMemcpyDeviceToHost );
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
imgutil->write_image(out + string("/") +limg[i].substr(limg[i].find_last_of("/")+1) ,disp_h,out_t);
}
cudaFreeHost(imgl);
cudaFreeHost(imgr);
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaFree(left_cross);
cudaFree(right_cross);
cudaFree(tmp_d);
cudaFreeHost(imgl);
cudaFreeHost(imgr);
cudaFreeHost(disp_h);
cudaFree(disp_d);
cudaFree(disp_tmp);
cudaFree(imgl_d);
cudaFree(imgr_d);
cudaFree(cost_d);
cudaFree(post_cost_d);
cudaFree(census_l_d);
cudaFree(census_r_d);
delete imgutil;
return 0;
} | the_stack |
* bisection.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <assert.h>
// includes, project
#include <helper_functions.h>
#include <helper_cuda.h>
#include "config.h"
#include "structs.h"
#include "matlab.h"
#include "util.h"
#include "gerschgorin.h"
#include "bisect_small.cuh"
#include "bisect_large.cuh"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
bool runTest(int argc, char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
bool bQAResults = false;
printf("Starting eigenvalues\n");
bQAResults = runTest(argc, argv);
printf("Test %s\n", bQAResults ? "Succeeded!" : "Failed!");
exit(bQAResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize the input data to the algorithm
//! @param input handles to the input data
//! @param exec_path path where executable is run (argv[0])
//! @param mat_size size of the matrix
//! @param user_defined 1 if the matrix size has been requested by the user,
//! 0 if the default size
////////////////////////////////////////////////////////////////////////////////
void initInputData(InputData &input, char *exec_path,
const unsigned int mat_size,
const unsigned int user_defined) {
// allocate memory
input.a = (float *)malloc(sizeof(float) * mat_size);
input.b = (float *)malloc(sizeof(float) * mat_size);
if (1 == user_defined) {
// initialize diagonal and superdiagonal entries with random values
srand(278217421);
// srand( clock());
for (unsigned int i = 0; i < mat_size; ++i) {
input.a[i] = (float)(2.0 * (((double)rand() / (double)RAND_MAX) - 0.5));
input.b[i] = (float)(2.0 * (((double)rand() / (double)RAND_MAX) - 0.5));
}
// the first element of s is used as padding on the device (thus the
// whole vector is copied to the device but the kernels are launched
// with (s+1) as start address
input.b[0] = 0.0f;
} else {
// read default matrix
unsigned int input_data_size = mat_size;
char *diag_path = sdkFindFilePath("diagonal.dat", exec_path);
assert(NULL != diag_path);
sdkReadFile(diag_path, &(input.a), &input_data_size, false);
char *sdiag_path = sdkFindFilePath("superdiagonal.dat", exec_path);
assert(NULL != sdiag_path);
sdkReadFile(sdiag_path, &(input.b), &input_data_size, false);
free(diag_path);
free(sdiag_path);
}
// allocate device memory for input
checkCudaErrors(cudaMalloc((void **)&(input.g_a), sizeof(float) * mat_size));
checkCudaErrors(
cudaMalloc((void **)&(input.g_b_raw), sizeof(float) * mat_size));
// copy data to device
checkCudaErrors(cudaMemcpy(input.g_a, input.a, sizeof(float) * mat_size,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(input.g_b_raw, input.b, sizeof(float) * mat_size,
cudaMemcpyHostToDevice));
input.g_b = input.g_b_raw + 1;
}
////////////////////////////////////////////////////////////////////////////////
//! Clean up input data, in particular allocated memory
//! @param input handles to the input data
////////////////////////////////////////////////////////////////////////////////
void cleanupInputData(InputData &input) {
freePtr(input.a);
freePtr(input.b);
checkCudaErrors(cudaFree(input.g_a));
input.g_a = NULL;
checkCudaErrors(cudaFree(input.g_b_raw));
input.g_b_raw = NULL;
input.g_b = NULL;
}
////////////////////////////////////////////////////////////////////////////////
//! Check if a specific matrix size has to be used
//! @param argc number of command line arguments (from main(argc, argv)
//! @param argv pointers to command line arguments (from main(argc, argv)
//! @param matrix_size size of matrix, updated if specific size specified on
//! command line
////////////////////////////////////////////////////////////////////////////////
void getMatrixSize(int argc, char **argv, unsigned int &mat_size,
unsigned int &user_defined) {
int temp = -1;
if (checkCmdLineFlag(argc, (const char **)argv, "matrix-size")) {
temp = getCmdLineArgumentInt(argc, (const char **)argv, "matrix-size");
}
if (temp > 0) {
mat_size = (unsigned int)temp;
// data type short is used in the kernel
assert(mat_size < (1 << 16));
// mat_size should be large than 2
assert(mat_size >= 2);
user_defined = 1;
}
printf("Matrix size: %i x %i\n", mat_size, mat_size);
}
////////////////////////////////////////////////////////////////////////////////
//! Check if a specific precision of the eigenvalue has to be obtained
//! @param argc number of command line arguments (from main(argc, argv)
//! @param argv pointers to command line arguments (from main(argc, argv)
//! @param iters_timing numbers of iterations for timing, updated if a
//! specific number is specified on the command line
//! @param user_defined 1 if the precision has been requested by the user,
//! 0 if the default size
////////////////////////////////////////////////////////////////////////////////
void getPrecision(int argc, char **argv, float &precision,
unsigned int &user_defined) {
float temp = -1.0f;
if (checkCmdLineFlag(argc, (const char **)argv, "precision")) {
temp = getCmdLineArgumentFloat(argc, (const char **)argv, "precision");
printf("Precision is between [0.001, 0.000001]\n");
}
if (temp > 1e-6 && temp <= 0.001) {
precision = temp;
user_defined = 1;
}
printf("Precision: %f\n", precision);
}
////////////////////////////////////////////////////////////////////////////////
//! Check if a particular number of iterations for timings has to be used
//! @param argc number of command line arguments (from main(argc, argv)
//! @param argv pointers to command line arguments (from main(argc, argv)
//! @param iters_timing number of timing iterations, updated if user
//! specific value
////////////////////////////////////////////////////////////////////////////////
void getItersTiming(int argc, char **argv, unsigned int &iters_timing) {
int temp = -1;
if (checkCmdLineFlag(argc, (const char **)argv, "iters-timing")) {
temp = getCmdLineArgumentInt(argc, (const char **)argv, "iters-timing");
}
if (temp > 0) {
iters_timing = temp;
}
printf("Iterations to be timed: %i\n", iters_timing);
}
////////////////////////////////////////////////////////////////////////////////
//! Check if a particular filename has to be used for the file where the result
//! is stored
//! @param argc number of command line arguments (from main(argc, argv)
//! @param argv pointers to command line arguments (from main(argc, argv)
//! @param filename filename of result file, updated if user specified
//! filename
////////////////////////////////////////////////////////////////////////////////
void getResultFilename(int argc, char **argv, char *&filename) {
char *temp = NULL;
getCmdLineArgumentString(argc, (const char **)argv, "filename-result", &temp);
if (NULL != temp) {
filename = (char *)malloc(sizeof(char) * strlen(temp));
strcpy(filename, temp);
free(temp);
}
printf("Result filename: '%s'\n", filename);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
bool runTest(int argc, char **argv) {
bool bCompareResult = false;
findCudaDevice(argc, (const char **)argv);
StopWatchInterface *timer = NULL;
StopWatchInterface *timer_total = NULL;
sdkCreateTimer(&timer);
sdkCreateTimer(&timer_total);
// default
unsigned int mat_size = 2048;
// flag if the matrix size is due to explicit user request
unsigned int user_defined = 0;
// desired precision of eigenvalues
float precision = 0.00001f;
unsigned int iters_timing = 100;
char *result_file = (char *)"eigenvalues.dat";
// check if there is a command line request for the matrix size
getMatrixSize(argc, argv, mat_size, user_defined);
// check if user requested specific precision
getPrecision(argc, argv, precision, user_defined);
// check if user requested specific number of iterations for timing
getItersTiming(argc, argv, iters_timing);
// file name for result file
getResultFilename(argc, argv, result_file);
// set up input
InputData input;
initInputData(input, argv[0], mat_size, user_defined);
// compute Gerschgorin interval
float lg = FLT_MAX;
float ug = -FLT_MAX;
computeGerschgorin(input.a, input.b + 1, mat_size, lg, ug);
printf("Gerschgorin interval: %f / %f\n", lg, ug);
// two kernels, for small matrices a lot of overhead can be avoided
if (mat_size <= MAX_SMALL_MATRIX) {
// initialize memory for result
ResultDataSmall result;
initResultSmallMatrix(result, mat_size);
// run the kernel
computeEigenvaluesSmallMatrix(input, result, mat_size, lg, ug, precision,
iters_timing);
// get the result from the device and do some sanity checks,
// save the result
processResultSmallMatrix(input, result, mat_size, result_file);
// clean up
cleanupResultSmallMatrix(result);
printf("User requests non-default argument(s), skipping self-check!\n");
bCompareResult = true;
} else {
// initialize memory for result
ResultDataLarge result;
initResultDataLargeMatrix(result, mat_size);
// run the kernel
computeEigenvaluesLargeMatrix(input, result, mat_size, precision, lg, ug,
iters_timing);
// get the result from the device and do some sanity checks
// save the result if user specified matrix size
bCompareResult = processResultDataLargeMatrix(
input, result, mat_size, result_file, user_defined, argv[0]);
// cleanup
cleanupResultDataLargeMatrix(result);
}
cleanupInputData(input);
return bCompareResult;
} | the_stack |
#include "include/common.h"
using Vec3ida = Eigen::Matrix<int, 3, 1, Eigen::DontAlign>;
namespace kinectfusion {
namespace internal {
namespace cuda {
__device__ __forceinline__
// 三线型插值
float interpolate_trilinearly(
const Vec3fda& point, // 想要得到 TSDF 数值的点的坐标(非整数)
const PtrStepSz<short2>& volume, // TSDF Volume 对象
const int3& volume_size, // TSDF Volume 对象 大小
const float voxel_scale) // TSDF Volume 中的体素坐标和现实世界中长度的度量关系
{
// 本函数中考虑的, 都是
// 这个点在 Volume 下的坐标, 转换成为整数下标标的表示
Vec3ida point_in_grid = point.cast<int>();
// 恢复成体素中心点的坐标
const float vx = (static_cast<float>(point_in_grid.x()) + 0.5f);
const float vy = (static_cast<float>(point_in_grid.y()) + 0.5f);
const float vz = (static_cast<float>(point_in_grid.z()) + 0.5f);
// 查看原始的点的坐标是否更偏向于某个坐标轴上数值更小的一侧, 如果是的话就坐标-1; 否则不变
// 为什么要减1? 为什么在数值更大的一侧就不用减? 画图:(只看x轴)
// ^x
// |
// |
// | * (TSDF) ++++++++++++
// | +
// | +(a)
// |--------------------------------------------------------+----------------
// | +
// | * point++++++++++++++
// | * vx TSDF++++++++++++(1-a) * vx ++++++++++++++++
// | * point ++++++++++++++
// | +
// |----* point_in_grid------+----------* point_in_grid----------------------------------
// | +
// | +
// | (*) TSDF +(a)
// | +
// | +
// |----* point_in_grid-1 ++++-------------------------------------------------------------
// |
// 分成这两种情况是为了方便计算不同组织形式下的插值
point_in_grid.x() = (point.x() < vx) ? (point_in_grid.x() - 1) : point_in_grid.x();
point_in_grid.y() = (point.y() < vy) ? (point_in_grid.y() - 1) : point_in_grid.y();
point_in_grid.z() = (point.z() < vz) ? (point_in_grid.z() - 1) : point_in_grid.z();
// +0.5f 的原因是, point_in_grid 处体素存储的TSDF值是体素的中心点的TSDF值
// 三线型插值, ref: https://en.wikipedia.org/wiki/Trilinear_interpolation
// 计算精确的(浮点型)的点坐标和整型化之后的点坐标的差
const float a = (point.x() - (static_cast<float>(point_in_grid.x()) + 0.5f));
const float b = (point.y() - (static_cast<float>(point_in_grid.y()) + 0.5f));
const float c = (point.z() - (static_cast<float>(point_in_grid.z()) + 0.5f));
return
static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y())[point_in_grid.x()].x) * DIVSHORTMAX
// volume[ x ][ y ][ z ], C000
* (1 - a) * (1 - b) * (1 - c) +
static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y())[point_in_grid.x()].x) * DIVSHORTMAX
// volume[ x ][ y ][z+1], C001
* (1 - a) * (1 - b) * c +
static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x()].x) * DIVSHORTMAX
// volume[ x ][y+1][ z ], C010
* (1 - a) * b * (1 - c) +
static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x()].x) * DIVSHORTMAX
// volume[ x ][y+1][z+1], C011
* (1 - a) * b * c +
static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y())[point_in_grid.x() + 1].x) * DIVSHORTMAX
// volume[x+1][ y ][ z ], C100
* a * (1 - b) * (1 - c) +
static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y())[point_in_grid.x() + 1].x) * DIVSHORTMAX
// volume[x+1][ y ][z+1], C101
* a * (1 - b) * c +
static_cast<float>(volume.ptr((point_in_grid.z()) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x() + 1].x) * DIVSHORTMAX
// volume[x+1][y+1][ z ], C110
* a * b * (1 - c) +
static_cast<float>(volume.ptr((point_in_grid.z() + 1) * volume_size.y + point_in_grid.y() + 1)[point_in_grid.x() + 1].x) * DIVSHORTMAX
// volume[x+1][y+1][z+1], C111
* a * b * c;
}
/*************************************************************
NOTE 下面的两个函数是为了得到在raycast过程中, 什么时候射线开始射入 volume, 什么时候射线射出 volume
这里有一个假设: 相机是一直都向Volume的方向观测的
如果只看x和y轴, 那么坐标系是这样定义的:
y^
|
|
|---------
| |
| volume |
| |
-------------|------------->x
|
|
首先想求的是在每个轴上进行 raycast 需要最短的时间. 在步长一致的情况下:
在每个轴上耗费的单位时间数tmin = (当前相机位置在这个轴上到 volume 的距离)/(raycast方向在这个轴上的分量)
类似地, 若要射线完整地穿过 volume 所需要的最长的时间, 对于每个轴:
在每个轴上耗费的时间单位数tmax = (当前相机位置在这个轴上到 volume 另一端的距离)/(raycast方向在这个轴上的分量)
而为了近似得到当前射线方向按照给定步长前进所需的最少时间, 程序这样计算
final_min_time = max(txmin, tymin, tzmin)
目的是保证当射线前进了 final_min_time 后, 所有的轴上(几乎)一定接触到了 Volume , 可以进行 raycast 过程了
类似地为了近似地得到当前射线方向按照给定步长前进,走出Volume所耗费的最少时间, 程序也使用了比较保守的策略:
final_max_time = min(txmax, tymax, tzmax)
这样能够确定经过了 final_max_time 之后, 射线在其中一个轴上就脱离 volmue 了, 相当于射线已经出了 volume, raycast就可以停止了
// ! 但是上述的分析在相机处于某些区域的时候可能站不住脚, 比如相机的位姿中 0<ty<volume.size.y的时候并且direct.y > 0. 得到的tmin是个负值
这个时候就会出现计算错误的情况
**************************************************************/
// __forceinline__: 强制为内联函数
__device__ __forceinline__
// 求射线为了射入Volume, 在给定步长下所需要的最少的前进次数(也可以理解为前进所需要的时间)
float get_min_time(
const float3& volume_max, // 体素的范围(真实尺度)
const Vec3fda& origin, // 出发点, 也就是相机当前的位置
const Vec3fda& direction) // 射线方向
{
// 分别计算三个轴上的次数, 并且返回其中最大; 当前进了这个最大的次数之后, 三个轴上射线的分量就都已经射入volume了
float txmin = ((direction.x() > 0 ? 0.f : volume_max.x) - origin.x()) / direction.x();
float tymin = ((direction.y() > 0 ? 0.f : volume_max.y) - origin.y()) / direction.y();
float tzmin = ((direction.z() > 0 ? 0.f : volume_max.z) - origin.z()) / direction.z();
return fmax(fmax(txmin, tymin), tzmin);
}
__device__ __forceinline__
// 求射线为了射出Volume, 在给定步长下所需要的最少的前进次数(也可以理解为前进所需要的时间)
float get_max_time(const float3& volume_max, const Vec3fda& origin, const Vec3fda& direction)
{
// 分别计算三个轴上的次数, 并且返回其中最小. 当前进了这个最小的次数后, 三个轴上的射线的分量中就有一个已经射出了volume了
float txmax = ((direction.x() > 0 ? volume_max.x : 0.f) - origin.x()) / direction.x();
float tymax = ((direction.y() > 0 ? volume_max.y : 0.f) - origin.y()) / direction.y();
float tzmax = ((direction.z() > 0 ? volume_max.z : 0.f) - origin.z()) / direction.z();
return fmin(fmin(txmax, tymax), tzmax);
}
__global__
void raycast_tsdf_kernel(
const PtrStepSz<short2> tsdf_volume, // Global TSDF Volume
const PtrStepSz<uchar3> color_volume, // Global Color Volume
PtrStepSz<float3> model_vertex, // 推理出来的顶点图
PtrStepSz<float3> model_normal, // 推理出来的法向图
PtrStepSz<uchar3> model_color, // 推理出来的颜色图
const int3 volume_size, // Volume 尺寸
const float voxel_scale, // Volume 缩放洗漱
const CameraParameters cam_parameters, // 当前图层相机内参
const float truncation_distance, // 截断距离
const Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation, // 相机位姿的旋转矩阵
const Vec3fda translation) // 相机位姿的平移向量
{
// step 0 获取当前线程要处理的图像像素, 并且进行合法性检查
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
// 合法性检查: 判断是否在当前图层图像范围内
if (x >= model_vertex.cols || y >= model_vertex.rows)
return;
// step 2 计算 raycast 射线, 以及应该在何处开始, 在何处结束
// 计算 Volume 对应的空间范围
// ! 但是我觉得, 这个范围其实和当前的线程id没有关系, 我们完全可以离线计算啊, 这里让 512*512*512 的每一个线程都计算一次是不是太浪费计算资源了
const float3 volume_range = make_float3(volume_size.x * voxel_scale,
volume_size.y * voxel_scale,
volume_size.z * voxel_scale);
// 计算当前的点和相机光心的连线, 使用的是在当前相机坐标系下的坐标; 由于后面只是为了得到方向所以这里没有乘Z
const Vec3fda pixel_position(
(x - cam_parameters.principal_x) / cam_parameters.focal_x, // X/Z
(y - cam_parameters.principal_y) / cam_parameters.focal_y, // Y/Z
1.f); // Z/Z
// 得到这个连线的方向(从相机指向空间中的反投影射线)在世界坐标系下的表示, 联想: P_w = R_{wc} * P_c
Vec3fda ray_direction = (rotation * pixel_position);
ray_direction.normalize();
// fmax: CUDA 中 float 版的 max() 函数
// 参数 translation 应该理解为相机光心在世界坐标系下的坐标
// 获得 raycast 的起始位置
float ray_length = fmax(get_min_time(volume_range, translation, ray_direction), 0.f);
// 验证是否合法: 起始位置的射线长度应该小于等于结束位置的射线长度
if (ray_length >= get_max_time(volume_range, translation, ray_direction))
return;
// 在开始位置继续前进一个体素, 确保该位置已经接触到 volume
ray_length += voxel_scale;
Vec3fda grid = (translation + (ray_direction * ray_length)) / voxel_scale;
// 拿到 Grid 对应体素处的 TSDF 值, 这里充当当前射线的上一次的TSDF计算结果
// 如果拿到的坐标并不在 volume 中, 那么得到的 tsdf 值无法确定, 甚至可能会触发段错误
// __float2int_rd: 向下取整
float tsdf = static_cast<float>(tsdf_volume.ptr(
__float2int_rd(grid(2)) * volume_size.y + __float2int_rd(grid(1)))[__float2int_rd(grid(0))].x) *
DIVSHORTMAX;
// 计算最大搜索长度(考虑了光线开始“投射”的时候已经走过的路程 ray_length )
// ! 不明白这里为什么是根号2 而不是根号3
// ! 这里没有乘 SCALE 也应该有问题
const float max_search_length = ray_length + volume_range.x * sqrt(2.f);
// step 3 开始迭代搜索了, raycasting 开始. 步长为一半截断距离
for (; ray_length < max_search_length; ray_length += truncation_distance * 0.5f) {
// step 3.1 获取当前射线位置的 TSDF
// 计算当前次前进后, 射线到达的体素id
grid = ((translation + (ray_direction * (ray_length + truncation_distance * 0.5f))) / voxel_scale);
// 合法性检查
if (grid.x() < 1 || grid.x() >= volume_size.x - 1 || grid.y() < 1 ||
grid.y() >= volume_size.y - 1 ||
grid.z() < 1 || grid.z() >= volume_size.z - 1)
continue;
// 保存上一次的 TSDF 值, 用于进行下面的判断
const float previous_tsdf = tsdf;
// 计算当前 Grid 处的 TSDF 值
tsdf = static_cast<float>(tsdf_volume.ptr(
__float2int_rd(grid(2)) * volume_size.y + __float2int_rd(grid(1)))[__float2int_rd(
grid(0))].x) *
DIVSHORTMAX;
// step 3.2 判断是否穿过了平面
if (previous_tsdf < 0.f && tsdf > 0.f) //Zero crossing from behind
// 这种情况是从平面的后方穿出了
break;
if (previous_tsdf > 0.f && tsdf < 0.f) { //Zero crossing
// step 3.3 确实在当前的位置穿过了平面, 计算当前射线与该平面的交点
// 精确确定这个平面所在的位置(反映为射线的长度), 计算公式与论文中式(15)保持一致
const float t_star =
ray_length - truncation_distance * 0.5f * previous_tsdf / (tsdf - previous_tsdf);
// 计算射线和这个平面的交点. 下文简称平面顶点. vec3f 类型
const auto vertex = translation + ray_direction * t_star;
// 计算平面顶点在 volume 中的位置
const Vec3fda location_in_grid = (vertex / voxel_scale);
// 然后进行合法性检查, 如果确认这个 vertex 不在我们的 Volume 中那么我们就不管它了
if (location_in_grid.x() < 1 || location_in_grid.x() >= volume_size.x - 1 ||
location_in_grid.y() < 1 || location_in_grid.y() >= volume_size.y - 1 ||
location_in_grid.z() < 1 || location_in_grid.z() >= volume_size.z - 1)
break;
// step 3.4 分x, y, z三个轴, 计算这个 Grid 点所在处的平面的法向量
// normal - 法向量
// shifted - 中间变量, 用于滑动
Vec3fda normal, shifted;
// step 3.4.1 对 x 轴方向
shifted = location_in_grid;
// 在平面顶点的体素位置的基础上, 哎我滑~ 如果滑出体素范围就不管了
shifted.x() += 1;
if (shifted.x() >= volume_size.x - 1)
break;
// 这里得到的是 TSDF 值.
// 为什么不直接使用 shifted 对应体素的 TSDF 值而是进行三线性插值, 是因为 Volume 中只保存了体素中心点到平面的距离,
// 但是这里的 location_in_grid+1 也就是 shifted 是个浮点数, 为了得到相对准确的TSDF值, 需要进行三线性插值
const float Fx1 = interpolate_trilinearly(
shifted, // vertex 点在Volume的坐标滑动之后的点, Vec3fda
tsdf_volume, // TSDF Volume
volume_size, // Volume 的大小
voxel_scale); // 尺度信息
// 类似的操作, 不过滑动的时候换了一个方向
shifted = location_in_grid;
shifted.x() -= 1;
if (shifted.x() < 1)
break;
const float Fx2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
// 由于 TSDF 值就反映了该体素中心点处到相机反投影平面的距离, 所以这里可以使用这个数据来进行表示
// ! 但是这样基于这个点周围体素中的距离都没有被截断才比较准确, 否则可能出现一个轴上的法向量为0的情况
normal.x() = (Fx1 - Fx2);
// step 3.4.2 对 y 轴方向
shifted = location_in_grid;
shifted.y() += 1;
if (shifted.y() >= volume_size.y - 1)
break;
const float Fy1 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
shifted = location_in_grid;
shifted.y() -= 1;
if (shifted.y() < 1)
break;
const float Fy2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
normal.y() = (Fy1 - Fy2);
// step 3.4.3 对 z 轴方向
shifted = location_in_grid;
shifted.z() += 1;
if (shifted.z() >= volume_size.z - 1)
break;
const float Fz1 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
shifted = location_in_grid;
shifted.z() -= 1;
if (shifted.z() < 1)
break;
const float Fz2 = interpolate_trilinearly(shifted, tsdf_volume, volume_size, voxel_scale);
normal.z() = (Fz1 - Fz2);
// step 3.4.4 检查法向量是否计算成功, 如果成功进行归一化
if (normal.norm() == 0)
break;
// 如果法向量计算成功, 那么首先归一化
normal.normalize();
// step 3.5 保存平面顶点和平面法向数据
// 然后将计算结果保存到顶点图和法向图中
model_vertex.ptr(y)[x] = make_float3(vertex.x(), vertex.y(), vertex.z());
model_normal.ptr(y)[x] = make_float3(normal.x(), normal.y(), normal.z());
// step 3.6 获取该点处的彩色数据
// 将浮点类型的这个顶点在Volume中的位置转换成为以int类型表示的
auto location_in_grid_int = location_in_grid.cast<int>();
// 然后就可以使用这个整数下标获取 Color Volume 中存储的彩色数据了, 将它保存到彩色图中
model_color.ptr(y)[x] = color_volume.ptr(
location_in_grid_int.z() * volume_size.y +
location_in_grid_int.y())[location_in_grid_int.x()];
break;
}
} // raycasting
}
// 执行当前帧的指定图层上的表面推理
void surface_prediction(
const VolumeData& volume, // Global Volume
GpuMat& model_vertex, // 推理得到的顶点图
GpuMat& model_normal, // 推理得到的法向图
GpuMat& model_color, // 推理得到的颜色
const CameraParameters& cam_parameters, // 当前图层的相机内参
const float truncation_distance, // 截断距离
const Eigen::Matrix4f& pose) // 当前帧的相机位姿
{
// step 0 数据准备: 清空顶点图\法向图\彩色图
model_vertex.setTo(0);
model_normal.setTo(0);
model_color.setTo(0);
// step 1 计算线程数量, 这和当前图层图像的大小有关
dim3 threads(32, 32);
dim3 blocks((model_vertex.cols + threads.x - 1) / threads.x,
(model_vertex.rows + threads.y - 1) / threads.y);
// step 2 调用核函数进行并行计算
raycast_tsdf_kernel<<<blocks, threads>>>(
volume.tsdf_volume, // Global TSDF Volume
volume.color_volume, // Global Color Volume
model_vertex, // 推理出来的顶点图
model_normal, // 推理出来的法向图
model_color, // 推理出来的颜色图
volume.volume_size, // Volume 尺寸
volume.voxel_scale, // Volume 缩放洗漱
cam_parameters, // 当前图层相机内参
truncation_distance, // 截断距离
pose.block(0, 0, 3, 3), // 从相机位姿中提取旋转矩阵
pose.block(0, 3, 3, 1)); // 从相机位姿中提取平移向量
// step 3 等待线程同步, 然后结束
cudaThreadSynchronize();
}
}
}
} | the_stack |
namespace matrix
{
using namespace h2o4gpu;
void max_index_per_column(Matrix<float>& A, std::vector<int>& result_array, device::DeviceContext& context){
int result;
for (int i=0; i<A.columns(); i++) {
safe_cublas(cublasIsamax(context.cublas_handle, A.rows(), A.data() + i*A.rows(), 1, &result));
result_array[i] = result - 1 + i * A.rows();
}
}
void max_index_per_column(Matrix<double>& A, std::vector<int>& result_array, device::DeviceContext& context){
int result;
for (int i=0; i<A.columns(); i++) {
safe_cublas(cublasIdamax(context.cublas_handle, A.rows(), A.data() + i*A.rows(), 1, &result));
result_array[i] = result - 1 + i * A.rows();
}
}
template<typename T, typename U>
void multiply(Matrix<T>& A, const U a, device::DeviceContext& context)
{
thrust::transform(A.dptr(), A.dptr() + A.size(), A.dptr(), [=]__device__ (U val)
{
return val * a;
}
);
}
template<typename T>
void subtract(const Matrix<T>& A, const Matrix<T>& B, Matrix<T>& C, device::DeviceContext& context)
{
auto counting = thrust::make_counting_iterator(0);
const T* d_A = A.data();
const T* d_B = B.data();
T* d_C = C.data();
thrust::for_each(counting, counting + A.rows() * A.columns(), [=]__device__(int idx)
{
d_C[idx] = d_A[idx] - d_B[idx];
});
}
template<typename T>
void add(const Matrix<T>& A, const Matrix<T>& B, Matrix<T>& C, device::DeviceContext& context)
{
auto counting = thrust::make_counting_iterator(0);
const T* d_A = A.data();
const T* d_B = B.data();
T* d_C = C.data();
thrust::for_each(counting, counting + A.rows() * A.columns(), [=]__device__(int idx)
{
d_C[idx] = d_A[idx] + d_B[idx];
});
}
template<typename T>
void normalize_vector_thrust(Matrix<T>& M, device::DeviceContext& context){
float M_inner = thrust::inner_product(M.dptr(), M.dptr() + M.size(), M.dptr(), 0.0f); //Will allocate memory for every call to fxn.
M.transform([=]__device__ (float val){return val / std::sqrt(M_inner);});
}
void multiply_diag(const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& C, device::DeviceContext& context, bool left_diag)
{
cublasSideMode_t mode = left_diag ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT;
int m = C.rows();
int n = C.columns();
int lda = m;
int incx = 1; //Review what this should be...
int ldc = m;
safe_cublas(cublasSdgmm(context.cublas_handle, mode, m, n, A.data(), lda, B.data(), incx, C.data(), ldc));
}
void multiply_diag(const Matrix<double>& A, const Matrix<double>& B, Matrix<double>& C, device::DeviceContext& context, bool left_diag)
{
cublasSideMode_t mode = left_diag ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT;
int m = C.rows();
int n = C.columns();
int lda = m;
int incx = 1; //Review what this should be...
int ldc = m;
safe_cublas(cublasDdgmm(context.cublas_handle, mode, m, n, A.data(), lda, B.data(), incx, C.data(), ldc));
}
void multiply(const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& C, device::DeviceContext& context, bool transpose_a, bool transpose_b, float alpha)
{
cublasOperation_t op_a = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t op_b = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
const float beta = 0;
int m = C.rows();
int n = C.columns();
int k = transpose_a ? A.rows() : A.columns();
int lda = transpose_a ? k : m;
int ldb = transpose_b ? n : k;
int ldc = m;
safe_cublas(cublasSgemm(context.cublas_handle, op_a, op_b, m, n, k, &alpha, A.data(), lda, B.data(), ldb, &beta, C.data(), ldc));
}
void multiply(const Matrix<double>& A, const Matrix<double>& B, Matrix<double>& C, device::DeviceContext& context, bool transpose_a, bool transpose_b, double alpha)
{
cublasOperation_t op_a = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t op_b = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
const double beta = 0;
int m = C.rows();
int n = C.columns();
int k = transpose_a ? A.rows() : A.columns();
int lda = transpose_a ? k : m;
int ldb = transpose_b ? n : k;
int ldc = m;
safe_cublas(cublasDgemm(context.cublas_handle, op_a, op_b, m, n, k, &alpha, A.data(), lda, B.data(), ldb, &beta, C.data(), ldc));
}
void transpose(const Matrix<float>& A, Matrix<float>& B, device::DeviceContext& context)
{
h2o4gpu_check(A.rows() == B.columns()&&A.columns() == B.rows(), "Transpose dimensions incorrect");
const float alpha = 1.0f;
const float beta = 0.0f;
safe_cublas(cublasSgeam(context.cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, B.rows(), B.columns(), &alpha, A.data(), A.rows(), &beta, NULL, B.rows(), B.data(), B.rows()));
}
void transpose(const Matrix<double>& A, Matrix<double>& B, device::DeviceContext& context)
{
h2o4gpu_check(A.rows() == B.columns()&&A.columns() == B.rows(), "Transpose dimensions incorrect");
const double alpha = 1.0f;
const double beta = 0.0f;
safe_cublas(cublasDgeam(context.cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, B.rows(), B.columns(), &alpha, A.data(), A.rows(), &beta, NULL, B.rows(), B.data(), B.rows()));
}
void normalize_columns(Matrix<float>& M, Matrix<float>& M_temp, Matrix<float>& column_length, const Matrix<float>& ones, device::DeviceContext& context)
{
thrust::transform(M.dptr(), M.dptr() + M.size(), M_temp.dptr(), sqr_op());
auto d_column_length = column_length.data();
auto d_ones = ones.data();
const float alpha = 1.0f;
const float beta = 0.0f;
safe_cublas(cublasSgemv(context.cublas_handle, CUBLAS_OP_T, M.rows(), M.columns(), &alpha, M_temp.data(), M.rows(), d_ones, 1, &beta, d_column_length, 1));
thrust::transform(column_length.dptr(), column_length.dptr() + column_length.size(), column_length.dptr(), [=]__device__(float val)
{
if (val == 0.0)
{
return 0.0;
}
return 1.0/ sqrt(val);
});
safe_cublas(cublasSdgmm(context.cublas_handle, CUBLAS_SIDE_RIGHT, M.rows(), M.columns(), M.data(), M.rows(), d_column_length, 1, M.data(), M.rows()));
}
void normalize_columns(Matrix<double>& M, Matrix<double>& M_temp, Matrix<double>& column_length, const Matrix<double>& ones, device::DeviceContext& context)
{
thrust::transform(M.dptr(), M.dptr() + M.size(), M_temp.dptr(), sqr_op());
auto d_column_length = column_length.data();
auto d_ones = ones.data();
const double alpha = 1.0f;
const double beta = 0.0f;
safe_cublas(cublasDgemv(context.cublas_handle, CUBLAS_OP_T, M.rows(), M.columns(), &alpha, M_temp.data(), M.rows(), d_ones, 1, &beta, d_column_length, 1));
thrust::transform(column_length.dptr(), column_length.dptr() + column_length.size(), column_length.dptr(), [=]__device__(double val)
{
if (val == 0.0)
{
return 0.0;
}
return 1.0/ sqrt(val);
});
safe_cublas(cublasDdgmm(context.cublas_handle, CUBLAS_SIDE_RIGHT, M.rows(), M.columns(), M.data(), M.rows(), d_column_length, 1, M.data(), M.rows()));
}
void normalize_columns(Matrix<float>& M, device::DeviceContext& context)
{
Matrix<float> M_temp(M.rows(), M.columns());
Matrix<float> columns_length(1, M.columns());
Matrix<float> ones(1, M.columns());
ones.fill(1.0f);
normalize_columns(M, M_temp, columns_length, ones, context);
}
void normalize_columns(Matrix<double>& M, device::DeviceContext& context)
{
Matrix<double> M_temp(M.rows(), M.columns());
Matrix<double> columns_length(1, M.columns());
Matrix<double> ones(1, M.columns());
ones.fill(1.0f);
normalize_columns(M, M_temp, columns_length, ones, context);
}
void normalize_vector_cublas(Matrix<float>& M, device::DeviceContext& context){
float norm2 = 0.0;
safe_cublas(cublasSnrm2(context.cublas_handle, M.rows(), M.data(), 1.0, &norm2));
M.transform([=]__device__ (float val){return val * (1/norm2);});
}
void normalize_vector_cublas(Matrix<double>& M, device::DeviceContext& context){
double norm2 = 0.0;
safe_cublas(cublasDnrm2(context.cublas_handle, M.rows(), M.data(), 1.0, &norm2));
M.transform([=]__device__ (float val){return val * (1/norm2);});
}
void residual(const Matrix<float>& X, const Matrix<float>& D, const Matrix<float>& S, Matrix<float>& R, device::DeviceContext& context)
{
multiply(D, S, R, context);
subtract(X, R, R, context);
}
void residual(const Matrix<double>& X, const Matrix<double>& D, const Matrix<double>& S, Matrix<double>& R, device::DeviceContext& context)
{
multiply(D, S, R, context);
subtract(X, R, R, context);
}
void calculate_eigen_pairs_exact(const Matrix<float>& X, Matrix<float>& Q, Matrix<float>& w, device::DeviceContext& context)
{
h2o4gpu_check(X.rows() == X.columns(), "X must be a symmetric matrix");
h2o4gpu_check(X.rows() == Q.rows() && X.columns() == Q.columns(), "X and Q must have the same dimension");
h2o4gpu_check(w.rows() == Q.columns(), "Q and w should have the same number of columns");
int lwork;
safe_cusolver(cusolverDnSsyevd_bufferSize(context.cusolver_handle, CUSOLVER_EIG_MODE_VECTOR, CUBLAS_FILL_MODE_UPPER, X.rows(), X.data(), X.columns(), w.data(), &lwork));
float *d_work;
safe_cuda(cudaMalloc(&d_work, sizeof(float) * lwork));
int *dev_info = NULL;
safe_cuda(cudaMalloc ((void**)&dev_info, sizeof(int)));
Q.copy(X);
safe_cusolver(cusolverDnSsyevd(context.cusolver_handle, CUSOLVER_EIG_MODE_VECTOR, CUBLAS_FILL_MODE_UPPER, Q.rows(), Q.data(), Q.columns(), w.data(), d_work, lwork, dev_info));
safe_cuda(cudaDeviceSynchronize());
safe_cuda(cudaFree(d_work));
safe_cuda(cudaFree(dev_info));
safe_cuda(cudaGetLastError());
}
void calculate_eigen_pairs_exact(const Matrix<double>& X, Matrix<double>& Q, Matrix<double>& w, device::DeviceContext& context)
{
h2o4gpu_check(X.rows() == X.columns(), "X must be a symmetric matrix");
h2o4gpu_check(X.rows() == Q.rows() && X.columns() == Q.columns(), "X and Q must have the same dimension");
h2o4gpu_check(w.rows() == Q.columns(), "Q and w should have the same number of columns");
int lwork;
safe_cusolver(cusolverDnDsyevd_bufferSize(context.cusolver_handle, CUSOLVER_EIG_MODE_VECTOR, CUBLAS_FILL_MODE_UPPER, X.rows(), X.data(), X.columns(), w.data(), &lwork));
double *d_work;
safe_cuda(cudaMalloc(&d_work, sizeof(double) * lwork));
int *dev_info = NULL;
safe_cuda(cudaMalloc ((void**)&dev_info, sizeof(int)));
Q.copy(X);
safe_cusolver(cusolverDnDsyevd(context.cusolver_handle, CUSOLVER_EIG_MODE_VECTOR, CUBLAS_FILL_MODE_UPPER, Q.rows(), Q.data(), Q.columns(), w.data(), d_work, lwork, dev_info));
safe_cuda(cudaDeviceSynchronize());
safe_cuda(cudaFree(d_work));
safe_cuda(cudaFree(dev_info));
safe_cuda(cudaGetLastError());
}
void dot_product(Matrix<float>& b_k1, Matrix<float>& b_k, float* eigen_value_estimate, device::DeviceContext& context)
{
safe_cublas(cublasSdot(context.cublas_handle, b_k1.rows(), b_k1.data(), 1.0, b_k.data(), 1.0, eigen_value_estimate));
}
void dot_product(Matrix<double>& b_k1, Matrix<double>& b_k, double* eigen_value_estimate, device::DeviceContext& context)
{
safe_cublas(cublasDdot(context.cublas_handle, b_k1.rows(), b_k1.data(), 1.0, b_k.data(), 1.0, eigen_value_estimate));
}
//----------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Stricly floating point operations that are not used
void linear_solve(const Matrix<float>& A, Matrix<float>& X, const Matrix<float>& B, device::DeviceContext& context)
{
h2o4gpu_check(A.rows()>= A.columns(),"Linear solve requires m >= n");
h2o4gpu_check(X.rows()>= X.columns(),"Linear solve requires n >= k"); //TODO: is this restriction necessary?
Matrix<float> A_copy(A);
Matrix<float> B_copy(A.rows(), A.columns());
thrust::copy(B.dptr(), B.dptr() + B.size(), B_copy.dptr());
thrust::fill(B_copy.dptr() + B.size(), B_copy.dptr() + B_copy.size(), 0.0f);
int work_size = 0;
safe_cusolver(cusolverDnSgeqrf_bufferSize(context.cusolver_handle, A_copy.rows(), A_copy.columns(), A_copy.data(), A_copy.rows(), &work_size));
thrust::device_vector<float> work(work_size);
float* d_work = thrust::raw_pointer_cast(work.data());
thrust::device_vector<float> tau((std::min)(A.rows(), A.columns()));
float* d_tau = thrust::raw_pointer_cast(tau.data());
thrust::device_vector<int> dev_info(1);
int* d_dev_info = thrust::raw_pointer_cast(dev_info.data());
safe_cusolver(cusolverDnSgeqrf(context.cusolver_handle, A_copy.rows(), A_copy.columns(), A_copy.data(), A_copy.rows(), d_tau, d_work, work_size, d_dev_info));
h2o4gpu_check(dev_info[0] == 0, "geqrf unsuccessful");
safe_cusolver(cusolverDnSormqr(context.cusolver_handle, CUBLAS_SIDE_LEFT, CUBLAS_OP_T, A.rows(), A.columns(), (std::min)(A.rows(), A.columns()), A_copy.data(), A.rows(), d_tau, B_copy.data(), A.rows(), d_work, work_size, d_dev_info));
h2o4gpu_check(dev_info[0] == 0, "ormqr unsuccessful");
Matrix<float> R(A.columns(), A.columns());
Matrix<float> QTB(A.columns(), B.columns());
auto counting = thrust::make_counting_iterator(0);
int n = R.columns();
int m = A.rows();
auto d_R = R.data();
auto d_A_copy = A_copy.data();
auto d_QTB = QTB.data();
auto d_B_copy = B_copy.data();
int qtb_columns = QTB.columns();
thrust::for_each(counting, counting + R.size(), [=]__device__ (int idx)
{
int row = idx % n;
int column = idx / n;
d_R[idx] = d_A_copy[column * m + row];
if (column < qtb_columns)
{
d_QTB[idx] = d_B_copy[column * m + row];
}
});
const float alpha = 1.0f;
safe_cublas(cublasStrsm(context.cublas_handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, QTB.rows(), QTB.columns(), &alpha, R.data(), R.rows(), QTB.data(), QTB.rows()));
thrust::copy(QTB.dptr(), QTB.dptr() + QTB.size(), X.data());
}
void pseudoinverse(const Matrix<float>& A, Matrix<float>& pinvA, device::DeviceContext& context)
{
h2o4gpu_check(A.rows() == pinvA.columns() && A.columns() == pinvA.rows(), "pseudoinverse dimensions incorrect");
//Add zero rows if m < n such that m >= n
Matrix<float> A_extended((std::max)(A.columns(), A.rows()), A.columns());
auto counting = thrust::make_counting_iterator(0);
int A_column_size = A.rows();
int A_extended_column_size = A_extended.rows();
auto d_A = A.data();
auto d_A_extended = A_extended.data();
thrust::for_each(counting, counting + A_extended.size(), [=]__device__(int idx)
{
int row = idx % A_extended_column_size;
if (row < A_column_size)
{
int column = idx / A_extended_column_size;
d_A_extended[idx] = d_A[A_column_size * column + row];
}
else
{
d_A_extended[idx] = 0;
}
});
int work_size = 0;
safe_cusolver(cusolverDnSgesvd_bufferSize(context.cusolver_handle, A_extended.rows(), A_extended.columns(), &work_size));
Matrix<float> work(work_size, 1);
Matrix<float> S((std::min)(A_extended.rows(), A_extended.columns()), 1);
Matrix<float> U(A_extended.rows(), A_extended.rows());
Matrix<float> VT(A_extended.columns(), A_extended.columns());
Matrix<int> dev_info(1, 1);
safe_cusolver (cusolverDnSgesvd(context.cusolver_handle, 'A', 'A', A_extended.rows(), A_extended.columns(), d_A_extended, A_extended.rows(), S.data(), U.data(), U.rows(), VT.data(), VT.rows(), work.data(), work_size, NULL, dev_info.data()));
float eps = 1e-5;
thrust::transform(S.dptr(), S.dptr() + S.size(), S.dptr(), [=]__device__(float val)
{
if (abs(val) < eps)
{
return 0.0;
}
else
{
return 1.0 / val;
}
});
Matrix<float> UT(A_extended.rows(), A_extended.rows());
//Calculate transpose of U
const float alpha = 1.0;
const float beta = 0.0;
safe_cublas(cublasSgeam(context.cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, UT.rows(), UT.columns(), &alpha, U.data(), UT.rows(), &beta,NULL, UT.rows(), UT.data(), UT.rows()));
safe_cublas(cublasSdgmm(context.cublas_handle, CUBLAS_SIDE_LEFT, UT.rows(), UT.columns(), UT.data(), UT.rows(), S.data(), 1, U.data(), U.rows()));
Matrix<float> pinvA_extended(A_extended.columns(), A_extended.rows());
multiply(VT, U, pinvA_extended, context, true);
thrust::copy(pinvA_extended.dptr(), pinvA_extended.dptr() + pinvA.size(), pinvA.dptr());
}
void f_normalize(Matrix<float>& M, device::DeviceContext& context)
{
Matrix<float> temp(M.rows(), M.columns());
thrust::transform(M.dptr(), M.dptr() + M.size(), temp.dptr(), sqr_op());
float sum = thrust::reduce(temp.dptr(), temp.dptr() + temp.size());
multiply(M, 1.0 / std::sqrt(sum), context);
thrust::transform(M.dptr(), M.dptr() + M.size(), temp.dptr(), sqr_op());
float final_sum = thrust::reduce(temp.dptr(), temp.dptr() + temp.size());
printf("f norm sum squares: %1.4f\n", final_sum);
}
void normalize_columns_cub(Matrix<float>& M, device::DeviceContext& context)
{
//Create alias so device Lamba does not dereference this pointer
int m = M.rows();
thrust::device_vector<float> temp(M.size());
thrust::device_vector<float> length_squared(M.columns());
thrust::transform(M.dptr(), M.dptr() + M.size(), temp.begin(), [=]__device__(float val)
{
return val * val;
});
thrust::device_vector<int> column_segments(M.columns() + 1);
auto counting = thrust::make_counting_iterator(0);
thrust::transform(counting, counting + column_segments.size(), column_segments.begin(), [=]__device__(int idx)
{
return idx * m;
});
// Determine temporary device storage requirements
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
auto segments = thrust::raw_pointer_cast(column_segments.data());
auto sum_in = thrust::raw_pointer_cast(temp.data());
auto sum_out = thrust::raw_pointer_cast(length_squared.data());
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, sum_in, sum_out,
M.columns(), segments, segments + 1);
// Allocate temporary storage
cudaMalloc(&d_temp_storage, temp_storage_bytes);
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, sum_in, sum_out,
M.columns(), segments, segments + 1);
//Scale
auto d_length_squared = thrust::raw_pointer_cast(length_squared.data());
auto d_data = M.data();
thrust::transform(counting, counting + M.size(), M.dptr(), [=]__device__(int idx)
{
int col = idx / m;
float length_squared = d_length_squared[col];
if (length_squared > 0.0)
{
return d_data[idx] / std::sqrt(d_length_squared[col]);
}
else
{
return 0.0f;
}
});
cudaFree(d_temp_storage);
}
}
//Orignal Impl
template void matrix::multiply<double>(Matrix<double>& A, const float a, device::DeviceContext& context);
//Impl for floats and doubles
template void matrix::multiply<float>(Matrix<float>& A, const float a, device::DeviceContext& context);
template void matrix::multiply<double>(Matrix<double>& A, const double a, device::DeviceContext& context);
template void matrix::subtract<float>(const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& C, device::DeviceContext& context);
template void matrix::subtract<double>(const Matrix<double>& A, const Matrix<double>& B, Matrix<double>& C, device::DeviceContext& context);
template void matrix::add<float>(const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& C, device::DeviceContext& context);
template void matrix::add<double>(const Matrix<double>& A, const Matrix<double>& B, Matrix<double>& C, device::DeviceContext& context);
template void matrix::normalize_vector_thrust<float>(Matrix<float>& M, device::DeviceContext& context);
template void matrix::normalize_vector_thrust<double>(Matrix<double>& M, device::DeviceContext& context); | the_stack |
#include "br2cu.h"
#include "mcx_core.h"
#include "tictoc.h"
#include "mcx_const.h"
#ifdef USE_MT_RAND
#include "mt_rand_s.cu" // use Mersenne Twister RNG (MT)
#else
#include "logistic_rand.cu" // use Logistic Lattice ring 5 RNG (LL5)
#endif
// optical properties saved in the constant memory
// {x}:mua,{y}:mus,{z}:anisotropy (g),{w}:refractive index (n)
__constant__ float4 gproperty[MAX_PROP];
__constant__ float4 gdetpos[MAX_DETECTORS];
// kernel constant parameters
__constant__ MCXParam gcfg[1];
extern __shared__ float sharedmem[]; //max 64 tissue types when block size=64
// tested with texture memory for media, only improved 1% speed
// to keep code portable, use global memory for now
// also need to change all media[idx1d] to tex1Dfetch() below
//texture<uchar, 1, cudaReadModeElementType> texmedia;
__device__ inline void atomicadd(float* address, float value){
#if __CUDA_ARCH__ >= 200 // for Fermi, atomicAdd supports floats
atomicAdd(address,value);
#elif __CUDA_ARCH__ >= 110
// float-atomic-add from
// http://forums.nvidia.com/index.php?showtopic=158039&view=findpost&p=991561
float old = value;
while ((old = atomicExch(address, atomicExch(address, 0.0f)+old))!=0.0f);
#endif
}
__device__ inline void clearpath(float *p,int maxmediatype){
uint i;
for(i=0;i<maxmediatype;i++)
p[i]=0.f;
}
__device__ inline void clearcache(float *p,int len){
uint i;
if(threadIdx.x==0)
for(i=0;i<len;i++)
p[i]=0.f;
}
#ifdef USE_CACHEBOX
__device__ inline void savecache(float *data,float *cache){
uint x,y,z;
if(threadIdx.x==0){
for(z=gcfg->cp0.z;z<=gcfg->cp1.z;z++)
for(y=gcfg->cp0.y;y<=gcfg->cp1.y;y++)
for(x=gcfg->cp0.x;x<=gcfg->cp1.x;x++){
atomicadd(data+z*gcfg->dimlen.y+y*gcfg->dimlen.x+x,
cache[(z-gcfg->cp0.z)*gcfg->cachebox.y+(y-gcfg->cp0.y)*gcfg->cachebox.x+(x-gcfg->cp0.x)]);
}
}
}
#endif
#ifdef SAVE_DETECTORS
__device__ inline uint finddetector(MCXpos *p0){
uint i;
for(i=0;i<gcfg->detnum;i++){
if((gdetpos[i].x-p0->x)*(gdetpos[i].x-p0->x)+
(gdetpos[i].y-p0->y)*(gdetpos[i].y-p0->y)+
(gdetpos[i].z-p0->z)*(gdetpos[i].z-p0->z) < gdetpos[i].w){
return i+1;
}
}
return 0;
}
__device__ inline void savedetphoton(float n_det[],uint *detectedphoton,float weight,float *ppath,MCXpos *p0){
uint j,baseaddr=0;
j=finddetector(p0);
if(j){
baseaddr=atomicAdd(detectedphoton,1);
if(baseaddr<gcfg->maxdetphoton){
baseaddr*=gcfg->maxmedia+2;
n_det[baseaddr++]=j;
n_det[baseaddr++]=weight;
for(j=0;j<gcfg->maxmedia;j++){
n_det[baseaddr+j]=ppath[j]; // save partial pathlength to the memory
}
}
}
}
#endif
__device__ inline void launchnewphoton(MCXpos *p,MCXdir *v,MCXtime *f,Medium *prop,uint *idx1d,
uchar *mediaid,uchar isdet, float ppath[],float energyloss[],float n_det[],uint *dpnum){
*energyloss+=p->w; // sum all the remaining energy
#ifdef SAVE_DETECTORS
// let's handle detectors here
if(gcfg->savedet){
if(*mediaid==0 && isdet)
savedetphoton(n_det,dpnum,v->nscat,ppath,p);
clearpath(ppath,gcfg->maxmedia);
}
#endif
*((float4*)p)=gcfg->ps;
*((float4*)v)=gcfg->c0;
*((float4*)f)=float4(0.f,0.f,gcfg->minaccumtime,f->ndone+1);
*idx1d=gcfg->idx1dorig;
*mediaid=gcfg->mediaidorig;
*((float4*)(prop))=gproperty[*mediaid]; //always use mediaid to read gproperty[]
}
/*
this is the core Monte Carlo simulation kernel, please see Fig. 1 in Fang2009
everything in the GPU kernels is in grid-unit. To convert back to length, use
cfg->unitinmm (scattering/absorption coeff, T, speed etc)
*/
kernel void mcx_main_loop(int nphoton,int ophoton,uchar media[],float field[],
float genergy[],uint n_seed[],float4 n_pos[],float4 n_dir[],float4 n_len[],
float n_det[], uint *detectedphoton){
int idx= blockDim.x * blockIdx.x + threadIdx.x;
MCXpos p,p0;//{x,y,z}: coordinates in grid unit, w:packet weight
MCXdir v; //{x,y,z}: unitary direction vector in grid unit, nscat:total scat event
MCXtime f; //pscat: remaining scattering probability,t: photon elapse time,
//tnext: next accumulation time, ndone: completed photons
float energyloss=genergy[idx<<1];
float energyabsorbed=genergy[(idx<<1)+1];
uint idx1d, idx1dold; //idx1dold is related to reflection
#ifdef TEST_RACING
int cc=0;
#endif
uchar mediaid,mediaidold;
char medid=-1;
float atten; //can be taken out to minimize registers
float n1; //reflection var
//for MT RNG, these will be zero-length arrays and be optimized out
RandType t[RAND_BUF_LEN],tnew[RAND_BUF_LEN];
Medium prop; //can become float2 if no reflection (mua/musp is in 1/grid unit)
float len,cphi,sphi,theta,stheta,ctheta,tmp0,tmp1;
float *ppath=sharedmem;
#ifdef USE_CACHEBOX
#ifdef SAVE_DETECTORS
float *cachebox=sharedmem+(gcfg->savedet ? blockDim.x*gcfg->maxmedia: 0);
#else
float *cachebox=sharedmem;
#endif
if(gcfg->skipradius2>EPS) clearcache(cachebox,(gcfg->cp1.x-gcfg->cp0.x+1)*(gcfg->cp1.y-gcfg->cp0.y+1)*(gcfg->cp1.z-gcfg->cp0.z+1));
#else
float accumweight=0.f;
#endif
#ifdef SAVE_DETECTORS
ppath=sharedmem+threadIdx.x*gcfg->maxmedia;
#endif
*((float4*)(&p))=n_pos[idx];
*((float4*)(&v))=n_dir[idx];
*((float4*)(&f))=n_len[idx];
gpu_rng_init(t,tnew,n_seed,idx);
if(gcfg->savedet) clearpath(ppath,gcfg->maxmedia);
// assuming the initial position is within the domain (mcx_config is supposed to ensure)
idx1d=gcfg->idx1dorig;
mediaid=gcfg->mediaidorig;
if(mediaid==0) {
return; // the initial position is not within the medium
}
*((float4*)(&prop))=gproperty[mediaid]; //always use mediaid to read gproperty[]
/*
using a while-loop to terminate a thread by np will cause MT RNG to be 3.5x slower
LL5 RNG will only be slightly slower than for-loop with photon-move criterion
we have switched to while-loop since v0.4.9, as LL5 was only minimally effected
and we do not use MT as the default RNG.
*/
while(f.ndone<(idx<ophoton?nphoton+1:nphoton)) {
GPUDEBUG(("*i= (%d) L=%f w=%e a=%f\n",(int)f.ndone,f.pscat,p.w,f.t));
// dealing with scattering
if(f.pscat<=0.f) { // if this photon has finished his current jump, get next scat length & angles
rand_need_more(t,tnew);
f.pscat=rand_next_scatlen(t); // random scattering probability, unit-less
GPUDEBUG(("next scat len=%20.16e \n",f.pscat));
if(p.w<1.f){ // if this is not my first jump
//random arimuthal angle
tmp0=TWO_PI*rand_next_aangle(t); //next arimuth angle
sincosf(tmp0,&sphi,&cphi);
GPUDEBUG(("next angle phi %20.16e\n",tmp0));
//Henyey-Greenstein Phase Function, "Handbook of Optical
//Biomedical Diagnostics",2002,Chap3,p234, also see Boas2002
if(prop.g>EPS){ //if prop.g is too small, the distribution of theta is bad
tmp0=(1.f-prop.g*prop.g)/(1.f-prop.g+2.f*prop.g*rand_next_zangle(t));
tmp0*=tmp0;
tmp0=(1.f+prop.g*prop.g-tmp0)/(2.f*prop.g);
// when ran=1, CUDA gives me 1.000002 for tmp0 which produces nan later
// detected by Ocelot,thanks to Greg Diamos,see http://bit.ly/cR2NMP
tmp0=max(-1.f, min(1.f, tmp0));
theta=acosf(tmp0);
stheta=sinf(theta);
ctheta=tmp0;
}else{
theta=ONE_PI*rand_next_zangle(t);
sincosf(theta,&stheta,&ctheta);
}
GPUDEBUG(("next scat angle theta %20.16e\n",theta));
if( v.z>-1.f+EPS && v.z<1.f-EPS ) {
tmp0=1.f-v.z*v.z; //reuse tmp to minimize registers
tmp1=rsqrtf(tmp0);
tmp1=stheta*tmp1;
*((float4*)(&v))=float4(
tmp1*(v.x*v.z*cphi - v.y*sphi) + v.x*ctheta,
tmp1*(v.y*v.z*cphi + v.x*sphi) + v.y*ctheta,
-tmp1*tmp0*cphi + v.z*ctheta,
v.nscat
);
GPUDEBUG(("new dir: %10.5e %10.5e %10.5e\n",v.x,v.y,v.z));
}else{
*((float4*)(&v))=float4(stheta*cphi,stheta*sphi,(v.z>0.f)?ctheta:-ctheta,v.nscat);
GPUDEBUG(("new dir-z: %10.5e %10.5e %10.5e\n",v.x,v.y,v.z));
}
v.nscat++;
}
}
n1=prop.n;
*((float4*)(&prop))=gproperty[mediaid];
len=gcfg->minstep*prop.mus; //unitless (minstep=grid, mus=1/grid)
// dealing with absorption
p0=p;
if(len>f.pscat){ //scattering ends in this voxel: mus*gcfg->minstep > s
tmp0=f.pscat/prop.mus; // unit=grid
*((float4*)(&p))=float4(p.x+v.x*tmp0,p.y+v.y*tmp0,p.z+v.z*tmp0,
p.w*expf(-prop.mua*tmp0)); //mua=1/grid, tmp0=grid
f.pscat=SAME_VOXEL;
f.t+=tmp0*prop.n*gcfg->oneoverc0; //propagation time (unit=s)
if(gcfg->savedet) ppath[mediaid-1]+=tmp0; //(unit=grid)
GPUDEBUG((">>ends in voxel %f<%f %f [%d]\n",f.pscat,len,prop.mus,idx1d));
}else{ //otherwise, move gcfg->minstep
if(mediaid!=medid)
atten=expf(-prop.mua*gcfg->minstep);
*((float4*)(&p))=float4(p.x+v.x,p.y+v.y,p.z+v.z,p.w*atten);
medid=mediaid;
f.pscat-=len; //remaining probability: sum(s_i*mus_i), unit-less
f.t+=gcfg->minaccumtime*prop.n; //propagation time (unit=s)
if(gcfg->savedet) ppath[mediaid-1]+=gcfg->minstep; //(unit=grid)
GPUDEBUG((">>keep going %f<%f %f [%d] %e %e\n",f.pscat,len,prop.mus,idx1d,f.t,f.tnext));
}
mediaidold=media[idx1d];
idx1dold=idx1d;
idx1d=(int(floorf(p.z))*gcfg->dimlen.y+int(floorf(p.y))*gcfg->dimlen.x+int(floorf(p.x)));
GPUDEBUG(("old and new voxels: %d<->%d\n",idx1dold,idx1d));
if(p.x<0||p.y<0||p.z<0||p.x>=gcfg->maxidx.x||p.y>=gcfg->maxidx.y||p.z>=gcfg->maxidx.z){
mediaid=0;
}else{
mediaid=(media[idx1d] & MED_MASK);
}
// dealing with boundaries
//if it hits the boundary, exceeds the max time window or exits the domain, rebound or launch a new one
if(mediaid==0||f.t>gcfg->tmax||f.t>gcfg->twin1||(gcfg->dorefint && n1!=gproperty[mediaid].w) ){
float flipdir=0.f;
float3 htime; //reflection var
if(gcfg->doreflect) {
//time-of-flight to hit the wall in each direction
htime.x=(v.x>EPS||v.x<-EPS)?(floorf(p0.x)+(v.x>0.f)-p0.x)/v.x:VERY_BIG;
htime.y=(v.y>EPS||v.y<-EPS)?(floorf(p0.y)+(v.y>0.f)-p0.y)/v.y:VERY_BIG;
htime.z=(v.z>EPS||v.z<-EPS)?(floorf(p0.z)+(v.z>0.f)-p0.z)/v.z:VERY_BIG;
//get the direction with the smallest time-of-flight
tmp0=fminf(fminf(htime.x,htime.y),htime.z);
flipdir=(tmp0==htime.x?1.f:(tmp0==htime.y?2.f:(tmp0==htime.z&&idx1d!=idx1dold)?3.f:0.f));
//move to the 1st intersection pt
tmp0*=JUST_ABOVE_ONE;
htime.x=floorf(p0.x+tmp0*v.x);
htime.y=floorf(p0.y+tmp0*v.y);
htime.z=floorf(p0.z+tmp0*v.z);
if(htime.x>=0&&htime.y>=0&&htime.z>=0&&htime.x<gcfg->maxidx.x&&htime.y<gcfg->maxidx.y&&htime.z<gcfg->maxidx.z){
if(media[int(htime.z*gcfg->dimlen.y+htime.y*gcfg->dimlen.x+htime.x)]==mediaidold){ //if the first vox is not air
GPUDEBUG((" first try failed: [%.1f %.1f,%.1f] %d (%.1f %.1f %.1f)\n",htime.x,htime.y,htime.z,
media[int(htime.z*gcfg->dimlen.y+htime.y*gcfg->dimlen.x+htime.x)], gcfg->maxidx.x, gcfg->maxidx.y,gcfg->maxidx.z));
htime.x=(v.x>EPS||v.x<-EPS)?(floorf(p.x)+(v.x<0.f)-p.x)/(-v.x):VERY_BIG;
htime.y=(v.y>EPS||v.y<-EPS)?(floorf(p.y)+(v.y<0.f)-p.y)/(-v.y):VERY_BIG;
htime.z=(v.z>EPS||v.z<-EPS)?(floorf(p.z)+(v.z<0.f)-p.z)/(-v.z):VERY_BIG;
tmp0=fminf(fminf(htime.x,htime.y),htime.z);
tmp1=flipdir; //save the previous ref. interface id
flipdir=(tmp0==htime.x?1.f:(tmp0==htime.y?2.f:(tmp0==htime.z&&idx1d!=idx1dold)?3.f:0.f));
//if(gcfg->doreflect3){
tmp0*=JUST_ABOVE_ONE;
htime.x=floorf(p.x-tmp0*v.x); //move to the last intersection pt
htime.y=floorf(p.y-tmp0*v.y);
htime.z=floorf(p.z-tmp0*v.z);
if(tmp1!=flipdir&&htime.x>=0&&htime.y>=0&&htime.z>=0&&htime.x<gcfg->maxidx.x&&htime.y<gcfg->maxidx.y&&htime.z<gcfg->maxidx.z){
if(media[int(htime.z*gcfg->dimlen.y+htime.y*gcfg->dimlen.x+htime.x)]!=mediaidold){ //this is an air voxel
GPUDEBUG((" second try failed: [%.1f %.1f,%.1f] %d (%.1f %.1f %.1f)\n",htime.x,htime.y,htime.z,
media[int(htime.z*gcfg->dimlen.y+htime.y*gcfg->dimlen.x+htime.x)], gcfg->maxidx.x, gcfg->maxidx.y,gcfg->maxidx.z));
/*to compute the remaining interface, we used the following fact to accelerate:
if there exist 3 intersections, photon must pass x/y/z interface exactly once,
we solve the coeff of the following equation to find the last interface:
a*1+b*2+c=3
a*1+b*3+c=2 -> [a b c]=[-1 -1 6], this will give the remaining interface id
a*2+b*3+c=1
*/
flipdir=-tmp1-flipdir+6.f;
}
}
//}
}
}
}
*((float4*)(&prop))=gproperty[mediaid]; // optical property across the interface
GPUDEBUG(("->ID%d J%d C%d tlen %e flip %d %.1f!=%.1f dir=%f %f %f pos=%f %f %f\n",idx,(int)v.nscat,
(int)f.ndone,f.t, (int)flipdir, n1,prop.n,v.x,v.y,v.z,p.x,p.y,p.z));
//recycled some old register variables to save memory
//if hit boundary within the time window and is n-mismatched, rebound
if(gcfg->doreflect&&f.t<gcfg->tmax&&f.t<gcfg->twin1&& flipdir>0.f && n1!=prop.n &&p.w>gcfg->minenergy){
float Rtotal=1.f;
tmp0=n1*n1;
tmp1=prop.n*prop.n;
if(flipdir>=3.f) { //flip in z axis
cphi=fabs(v.z);
sphi=v.x*v.x+v.y*v.y;
}else if(flipdir>=2.f){ //flip in y axis
cphi=fabs(v.y);
sphi=v.x*v.x+v.z*v.z;
}else if(flipdir>=1.f){ //flip in x axis
cphi=fabs(v.x); //cos(si)
sphi=v.y*v.y+v.z*v.z; //sin(si)^2
}
len=1.f-tmp0/tmp1*sphi; //1-[n1/n2*sin(si)]^2 = cos(ti)^2
GPUDEBUG((" ref len=%f %f+%f=%f w=%f\n",len,cphi,sphi,cphi*cphi+sphi,p.w));
if(len>0.f) { // if not total internal reflection
ctheta=tmp0*cphi*cphi+tmp1*len;
stheta=2.f*n1*prop.n*cphi*sqrtf(len);
Rtotal=(ctheta-stheta)/(ctheta+stheta);
ctheta=tmp1*cphi*cphi+tmp0*len;
Rtotal=(Rtotal+(ctheta-stheta)/(ctheta+stheta))*0.5f;
GPUDEBUG((" dir=%f %f %f htime=%f %f %f Rs=%f\n",v.x,v.y,v.z,htime.x,htime.y,htime.z,Rtotal));
GPUDEBUG((" ID%d J%d C%d flip=%3f (%d %d) cphi=%f sphi=%f p=%f %f %f p0=%f %f %f\n",
idx,(int)v.nscat,(int)f.tnext,
flipdir,idx1dold,idx1d,cphi,sphi,p.x,p.y,p.z,p0.x,p0.y,p0.z));
} // else, total internal reflection
if(Rtotal<1.f && rand_next_reflect(t)>Rtotal){ // do transmission
if(mediaid==0){ // transmission to external boundary
launchnewphoton(&p,&v,&f,&prop,&idx1d,&mediaid,(mediaidold & DET_MASK),
ppath,&energyloss,n_det,detectedphoton);
continue;
}
tmp0=n1/prop.n;
if(flipdir>=3.f) { //transmit through z plane
v.x=tmp0*v.x;
v.y=tmp0*v.y;
}else if(flipdir>=2.f){ //transmit through y plane
v.x=tmp0*v.x;
v.z=tmp0*v.z;
}else if(flipdir>=1.f){ //transmit through x plane
v.y=tmp0*v.y;
v.z=tmp0*v.z;
}
tmp0=rsqrtf(v.x*v.x+v.y*v.y+v.z*v.z);
v.x=v.x*tmp0;
v.y=v.y*tmp0;
v.z=v.z*tmp0;
}else{ //do reflection
if(flipdir>=3.f) { //flip in z axis
v.z=-v.z;
}else if(flipdir>=2.f){ //flip in y axis
v.y=-v.y;
}else if(flipdir>=1.f){ //flip in x axis
v.x=-v.x;
}
p=p0; //move back
idx1d=idx1dold;
mediaid=(media[idx1d] & MED_MASK);
*((float4*)(&prop))=gproperty[mediaid];
n1=prop.n;
}
}else{ // launch a new photon
launchnewphoton(&p,&v,&f,&prop,&idx1d,&mediaid,(mediaidold & DET_MASK),ppath,
&energyloss,n_det,detectedphoton);
continue;
}
}
// saving fluence to the memory
if(f.t>=f.tnext){
GPUDEBUG(("field add to %d->%f(%d) t(%e)>t0(%e)\n",idx1d,p.w,(int)f.ndone,f.t,f.tnext));
// if t is within the time window, which spans cfg->maxgate*cfg->tstep wide
if(gcfg->save2pt && f.t>=gcfg->twin0 && f.t<gcfg->twin1){
energyabsorbed+=p.w*prop.mua;
#ifdef TEST_RACING
// enable TEST_RACING to determine how many missing accumulations due to race
if( (p.x-gcfg->ps.x)*(p.x-gcfg->ps.x)+(p.y-gcfg->ps.y)*(p.y-gcfg->ps.y)+(p.z-gcfg->ps.z)*(p.z-gcfg->ps.z)>gcfg->skipradius2) {
field[idx1d+(int)(floorf((f.t-gcfg->twin0)*gcfg->Rtstep))*gcfg->dimlen.z]+=1.f;
cc++;
}
#else
#ifndef USE_ATOMIC
// set gcfg->skipradius2 to only start depositing energy when dist^2>gcfg->skipradius2
if(gcfg->skipradius2>EPS){
#ifdef USE_CACHEBOX
if(p.x<gcfg->cp1.x+1.f && p.x>=gcfg->cp0.x &&
p.y<gcfg->cp1.y+1.f && p.y>=gcfg->cp0.y &&
p.z<gcfg->cp1.z+1.f && p.z>=gcfg->cp0.z){
atomicadd(cachebox+(int(p.z-gcfg->cp0.z)*gcfg->cachebox.y
+int(p.y-gcfg->cp0.y)*gcfg->cachebox.x+int(p.x-gcfg->cp0.x)),p.w);
#else
if((p.x-gcfg->ps.x)*(p.x-gcfg->ps.x)+(p.y-gcfg->ps.y)*(p.y-gcfg->ps.y)+(p.z-gcfg->ps.z)*(p.z-gcfg->ps.z)<=gcfg->skipradius2){
accumweight+=p.w*prop.mua; // weight*absorption
#endif
}else{
field[idx1d+(int)(floorf((f.t-gcfg->twin0)*gcfg->Rtstep))*gcfg->dimlen.z]+=p.w;
}
}else{
field[idx1d+(int)(floorf((f.t-gcfg->twin0)*gcfg->Rtstep))*gcfg->dimlen.z]+=p.w;
}
#else
// ifndef CUDA_NO_SM_11_ATOMIC_INTRINSICS
atomicadd(& field[idx1d+(int)(floorf((f.t-gcfg->twin0)*gcfg->Rtstep))*gcfg->dimlen.z], p.w);
#endif
#endif
}
f.tnext+=gcfg->minaccumtime*prop.n; // fluence is a temporal-integration, unit=s
}
}
// cachebox saves the total absorbed energy of all time in the sphere r<sradius.
// in non-atomic mode, cachebox is more accurate than saving to the grid
// as it is not influenced by race conditions.
// now I borrow f.tnext to pass this value back
#ifdef USE_CACHEBOX
if(gcfg->skipradius2>EPS){
f.tnext=0.f;
savecache(field,cachebox);
}
#else
f.tnext=accumweight;
#endif
genergy[idx<<1]=energyloss;
genergy[(idx<<1)+1]=energyabsorbed;
#ifdef TEST_RACING
n_seed[idx]=cc;
#endif
n_pos[idx]=*((float4*)(&p));
n_dir[idx]=*((float4*)(&v));
n_len[idx]=*((float4*)(&f));
}
kernel void mcx_sum_trueabsorption(float energy[],uchar media[], float field[], int maxgate,uint3 dimlen){
int i;
float phi=0.f;
int idx= blockIdx.x*dimlen.y+blockIdx.y*dimlen.x+ threadIdx.x;
for(i=0;i<maxgate;i++){
phi+=field[i*dimlen.z+idx];
}
energy[2]+=phi*gproperty[media[idx] & MED_MASK ].x;
}
/*
assert cuda memory allocation result
*/
void mcx_cu_assess(cudaError_t cuerr,const char *file, const int linenum){
if(cuerr!=cudaSuccess){
mcx_error(-(int)cuerr,(char *)cudaGetErrorString(cuerr),file,linenum);
}
}
/*
query GPU info and set active GPU
*/
int mcx_set_gpu(Config *cfg){
#if __DEVICE_EMULATION__
return 1;
#else
int dev;
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0){
fprintf(stderr,"No CUDA-capable GPU device found\n");
return 0;
}
if (cfg->gpuid && cfg->gpuid > deviceCount){
fprintf(stderr,"Specified GPU ID is out of range\n");
return 0;
}
// scan from the first device
for (dev = 0; dev<deviceCount; dev++) {
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp, dev);
if(cfg->autopilot && ((cfg->gpuid && dev==cfg->gpuid-1)
||(cfg->gpuid==0 && dev==deviceCount-1) )){
unsigned int needmem=cfg->dim.x*cfg->dim.y*cfg->dim.z; /*for mediam*/
if(cfg->autopilot==1){
cfg->nblocksize=64;
cfg->nthread=256*dp.multiProcessorCount*dp.multiProcessorCount;
needmem+=cfg->nthread*sizeof(float4)*4+sizeof(float)*cfg->maxdetphoton*(cfg->medianum+1)+10*1024*1024; /*keep 10M for other things*/
cfg->maxgate=((unsigned int)dp.totalGlobalMem-needmem)/(cfg->dim.x*cfg->dim.y*cfg->dim.z);
cfg->maxgate=MIN((int)((cfg->tend-cfg->tstart)/cfg->tstep+0.5),cfg->maxgate);
fprintf(cfg->flog,"autopilot mode: setting thread number to %d, block size to %d and time gates to %d\n",cfg->nthread,cfg->nblocksize,cfg->maxgate);
}else if(cfg->autopilot==2){
cfg->nblocksize=64;
cfg->nthread=dp.multiProcessorCount*128;
fprintf(cfg->flog,"autopilot mode: setting thread number to %d and block size to %d\n",cfg->nthread,cfg->nblocksize);
}
}
if (strncmp(dp.name, "Device Emulation", 16)) {
if(cfg->isgpuinfo){
printf("============================= GPU Infomation ================================\n");
printf("Device %d of %d:\t\t%s\n",dev+1,deviceCount,dp.name);
printf("Compute Capability:\t%u.%u\n",dp.major,dp.minor);
printf("Global Memory:\t\t%u B\nConstant Memory:\t%u B\n\
Shared Memory:\t\t%u B\nRegisters:\t\t%u\nClock Speed:\t\t%.2f GHz\n",
(unsigned int)dp.totalGlobalMem,(unsigned int)dp.totalConstMem,
(unsigned int)dp.sharedMemPerBlock,(unsigned int)dp.regsPerBlock,dp.clockRate*1e-6f);
#if CUDART_VERSION >= 2000
printf("Number of MPs:\t\t%u\nNumber of Cores:\t%u\n",
dp.multiProcessorCount,dp.multiProcessorCount<<3);
#endif
}
}
}
if(cfg->isgpuinfo==2 && cfg->exportfield==NULL){ //list GPU info only
exit(0);
}
if (cfg->gpuid==0)
mcx_cu_assess(cudaSetDevice(deviceCount-1),__FILE__,__LINE__);
else
mcx_cu_assess(cudaSetDevice(cfg->gpuid-1),__FILE__,__LINE__);
return 1;
#endif
}
/*
host code for MCX kernels
*/
void mcx_run_simulation(Config *cfg){
int i,iter;
float minstep=MIN(MIN(cfg->steps.x,cfg->steps.y),cfg->steps.z);
float4 p0=float4(cfg->srcpos.x,cfg->srcpos.y,cfg->srcpos.z,1.f);
float4 c0=float4(cfg->srcdir.x,cfg->srcdir.y,cfg->srcdir.z,0.f);
float3 maxidx=float3(cfg->dim.x,cfg->dim.y,cfg->dim.z);
float t;
float energyloss=0.f,energyabsorbed=0.f;
float *energy;
int threadphoton, oddphotons;
unsigned int photoncount=0,printnum;
unsigned int tic,tic0,tic1,toc=0,fieldlen;
uint3 cp0=cfg->crop0,cp1=cfg->crop1;
uint2 cachebox;
uint3 dimlen;
float Vvox,scale,eabsorp;
dim3 mcgrid, mcblock;
dim3 clgrid, clblock;
int dimxyz=cfg->dim.x*cfg->dim.y*cfg->dim.z;
uchar *media=(uchar *)(cfg->vol);
float *field;
MCXParam param={cfg->steps,minstep,0,0,cfg->tend,R_C0*cfg->unitinmm,cfg->isrowmajor,
cfg->issave2pt,cfg->isreflect,cfg->isrefint,cfg->issavedet,1.f/cfg->tstep,
p0,c0,maxidx,uint3(0,0,0),cp0,cp1,uint2(0,0),cfg->minenergy,
cfg->sradius*cfg->sradius,minstep*R_C0*cfg->unitinmm,cfg->maxdetphoton,
cfg->medianum-1,cfg->detnum,0,0};
if(cfg->respin>1){
field=(float *)calloc(sizeof(float)*dimxyz,cfg->maxgate*2);
}else{
field=(float *)calloc(sizeof(float)*dimxyz,cfg->maxgate); //the second half will be used to accumulate
}
float4 *Ppos;
float4 *Pdir;
float4 *Plen,*Plen0;
uint *Pseed;
float *Pdet;
uint detected=0,sharedbuf=0;
if(cfg->nthread%cfg->nblocksize)
cfg->nthread=(cfg->nthread/cfg->nblocksize)*cfg->nblocksize;
threadphoton=cfg->nphoton/cfg->nthread/cfg->respin;
oddphotons=cfg->nphoton/cfg->respin-threadphoton*cfg->nthread;
mcgrid.x=cfg->nthread/cfg->nblocksize;
mcblock.x=cfg->nblocksize;
clgrid.x=cfg->dim.x;
clgrid.y=cfg->dim.y;
clblock.x=cfg->dim.z;
Ppos=(float4*)malloc(sizeof(float4)*cfg->nthread);
Pdir=(float4*)malloc(sizeof(float4)*cfg->nthread);
Plen=(float4*)malloc(sizeof(float4)*cfg->nthread);
Plen0=(float4*)malloc(sizeof(float4)*cfg->nthread);
Pseed=(uint*)malloc(sizeof(uint)*cfg->nthread*RAND_SEED_LEN);
energy=(float*)calloc(cfg->nthread*2,sizeof(float));
Pdet=(float*)calloc(cfg->maxdetphoton,sizeof(float)*(cfg->medianum+1));
uchar *gmedia;
mcx_cu_assess(cudaMalloc((void **) &gmedia, sizeof(uchar)*(dimxyz)),__FILE__,__LINE__);
float *gfield;
mcx_cu_assess(cudaMalloc((void **) &gfield, sizeof(float)*(dimxyz)*cfg->maxgate),__FILE__,__LINE__);
//cudaBindTexture(0, texmedia, gmedia);
float4 *gPpos;
mcx_cu_assess(cudaMalloc((void **) &gPpos, sizeof(float4)*cfg->nthread),__FILE__,__LINE__);
float4 *gPdir;
mcx_cu_assess(cudaMalloc((void **) &gPdir, sizeof(float4)*cfg->nthread),__FILE__,__LINE__);
float4 *gPlen;
mcx_cu_assess(cudaMalloc((void **) &gPlen, sizeof(float4)*cfg->nthread),__FILE__,__LINE__);
uint *gPseed;
mcx_cu_assess(cudaMalloc((void **) &gPseed, sizeof(uint)*cfg->nthread*RAND_SEED_LEN),__FILE__,__LINE__);
float *gPdet;
mcx_cu_assess(cudaMalloc((void **) &gPdet, sizeof(float)*cfg->maxdetphoton*(cfg->medianum+1)),__FILE__,__LINE__);
uint *gdetected;
mcx_cu_assess(cudaMalloc((void **) &gdetected, sizeof(uint)),__FILE__,__LINE__);
float *genergy;
cudaMalloc((void **) &genergy, sizeof(float)*cfg->nthread*2);
/*volume is assumbed to be col-major*/
cachebox.x=(cp1.x-cp0.x+1);
cachebox.y=(cp1.y-cp0.y+1)*(cp1.x-cp0.x+1);
dimlen.x=cfg->dim.x;
dimlen.y=cfg->dim.y*cfg->dim.x;
dimlen.z=cfg->dim.x*cfg->dim.y*cfg->dim.z;
param.dimlen=dimlen;
param.cachebox=cachebox;
param.idx1dorig=(int(floorf(p0.z))*dimlen.y+int(floorf(p0.y))*dimlen.x+int(floorf(p0.x)));
param.mediaidorig=(cfg->vol[param.idx1dorig] & MED_MASK);
Vvox=cfg->steps.x*cfg->steps.y*cfg->steps.z;
if(cfg->seed>0)
srand(cfg->seed);
else
srand(time(0));
for (i=0; i<cfg->nthread; i++) {
Ppos[i]=p0; // initial position
Pdir[i]=c0;
Plen[i]=float4(0.f,0.f,param.minaccumtime,0.f);
}
fprintf(cfg->flog,"\
###############################################################################\n\
# Monte Carlo eXtreme (MCX) -- CUDA #\n\
# Copyright (c) 2009-2011 Qianqian Fang <fangq at nmr.mgh.harvard.edu> #\n\
# #\n\
# Martinos Center for Biomedical Imaging, Massachusetts General Hospital #\n\
###############################################################################\n\
$MCX $Rev:: 247 $ Last Commit $Date:: 2011-05-03 20:50:55#$ by $Author:: fangq$\n\
###############################################################################\n");
tic=StartTimer();
#ifdef MCX_TARGET_NAME
fprintf(cfg->flog,"- variant name: [%s] compiled for GPU Capability [%d] with CUDA [%d]\n",
MCX_TARGET_NAME,MCX_CUDA_ARCH,CUDART_VERSION);
#else
fprintf(cfg->flog,"- code name: [Vanilla MCX] compiled for GPU Capacity [%d] with CUDA [%d]\n",
MCX_CUDA_ARCH,CUDART_VERSION);
#endif
fprintf(cfg->flog,"- compiled with: RNG [%s] Seed Length [%d]\n",MCX_RNG_NAME,RAND_SEED_LEN);
#ifdef SAVE_DETECTORS
fprintf(cfg->flog,"- this version CAN save photons at the detectors\n\n");
#else
fprintf(cfg->flog,"- this version CAN NOT save photons at the detectors\n\n");
#endif
fprintf(cfg->flog,"threadph=%d oddphotons=%d np=%d nthread=%d repetition=%d\n",threadphoton,oddphotons,
cfg->nphoton,cfg->nthread,cfg->respin);
fprintf(cfg->flog,"initializing streams ...\t");
fflush(cfg->flog);
fieldlen=dimxyz*cfg->maxgate;
cudaMemcpy(gmedia, media, sizeof(uchar) *dimxyz, cudaMemcpyHostToDevice);
cudaMemcpy(genergy,energy,sizeof(float) *cfg->nthread*2, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(gproperty, cfg->prop, cfg->medianum*sizeof(Medium), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(gdetpos, cfg->detpos, cfg->detnum*sizeof(float4), 0, cudaMemcpyHostToDevice);
fprintf(cfg->flog,"init complete : %d ms\n",GetTimeMillis()-tic);
/*
if one has to simulate a lot of time gates, using the GPU global memory
requires extra caution. If the total global memory is bigger than the total
memory to save all the snapshots, i.e. size(field)*(tend-tstart)/tstep, one
simply sets cfg->maxgate to the total gate number; this will run GPU kernel
once. If the required memory is bigger than the video memory, set cfg->maxgate
to a number which fits, and the snapshot will be saved with an increment of
cfg->maxgate snapshots. In this case, the later simulations will restart from
photon launching and exhibit redundancies.
The calculation of the energy conservation will only reflect the last simulation.
*/
#ifdef USE_CACHEBOX
if(cfg->sradius>EPS || cfg->sradius<0.f)
sharedbuf+=sizeof(float)*((cp1.x-cp0.x+1)*(cp1.y-cp0.y+1)*(cp1.z-cp0.z+1));
#endif
if(cfg->issavedet)
sharedbuf+=cfg->nblocksize*sizeof(float)*(cfg->medianum-1);
fprintf(cfg->flog,"requesting %d bytes of shared memory\n",sharedbuf);
//simulate for all time-gates in maxgate groups per run
for(t=cfg->tstart;t<cfg->tend;t+=cfg->tstep*cfg->maxgate){
param.twin0=t;
param.twin1=t+cfg->tstep*cfg->maxgate;
cudaMemcpyToSymbol(gcfg, ¶m, sizeof(MCXParam), 0, cudaMemcpyHostToDevice);
fprintf(cfg->flog,"lauching mcx_main_loop for time window [%.2ens %.2ens] ...\n"
,param.twin0*1e9,param.twin1*1e9);
//total number of repetition for the simulations, results will be accumulated to field
for(iter=0;iter<cfg->respin;iter++){
cudaMemset(gfield,0,sizeof(float)*fieldlen); // cost about 1 ms
cudaMemset(gPdet,0,sizeof(float)*cfg->maxdetphoton*(cfg->medianum+1));
cudaMemset(gdetected,0,sizeof(float));
cudaMemcpy(gPpos, Ppos, sizeof(float4)*cfg->nthread, cudaMemcpyHostToDevice);
cudaMemcpy(gPdir, Pdir, sizeof(float4)*cfg->nthread, cudaMemcpyHostToDevice);
cudaMemcpy(gPlen, Plen, sizeof(float4)*cfg->nthread, cudaMemcpyHostToDevice);
for (i=0; i<cfg->nthread*RAND_SEED_LEN; i++)
Pseed[i]=rand();
cudaMemcpy(gPseed, Pseed, sizeof(uint)*cfg->nthread*RAND_SEED_LEN, cudaMemcpyHostToDevice);
tic0=GetTimeMillis();
fprintf(cfg->flog,"simulation run#%2d ... \t",iter+1); fflush(cfg->flog);
mcx_main_loop<<<mcgrid,mcblock,sharedbuf>>>(threadphoton,oddphotons,gmedia,gfield,genergy,
gPseed,gPpos,gPdir,gPlen,gPdet,gdetected);
cudaThreadSynchronize();
cudaMemcpy(&detected, gdetected,sizeof(uint),cudaMemcpyDeviceToHost);
tic1=GetTimeMillis();
toc+=tic1-tic0;
fprintf(cfg->flog,"kernel complete: \t%d ms\nretrieving fields ... \t",tic1-tic);
mcx_cu_assess(cudaGetLastError(),__FILE__,__LINE__);
cudaMemcpy(Plen0, gPlen, sizeof(float4)*cfg->nthread, cudaMemcpyDeviceToHost);
cfg->his.totalphoton=0;
for(i=0;i<cfg->nthread;i++)
cfg->his.totalphoton+=int(Plen0[i].w+0.5f);
photoncount+=cfg->his.totalphoton;
#ifdef SAVE_DETECTORS
if(cfg->issavedet){
cudaMemcpy(Pdet, gPdet,sizeof(float)*cfg->maxdetphoton*(cfg->medianum+1),cudaMemcpyDeviceToHost);
mcx_cu_assess(cudaGetLastError(),__FILE__,__LINE__);
if(detected>cfg->maxdetphoton){
fprintf(cfg->flog,"WARNING: the detected photon (%d) \
is more than what your have specified (%d), please use the -H option to specify a greater number\t"
,detected,cfg->maxdetphoton);
}else{
fprintf(cfg->flog,"detected %d photons\t",detected);
}
cfg->his.unitinmm=cfg->unitinmm;
cfg->his.detected=detected;
cfg->his.savedphoton=MIN(detected,cfg->maxdetphoton);
if(cfg->exportdetected) //you must allocate the buffer long enough
memcpy(cfg->exportdetected,Pdet,cfg->his.savedphoton*(cfg->medianum+1)*sizeof(float));
else
mcx_savedata(Pdet,cfg->his.savedphoton*(cfg->medianum+1),
photoncount>cfg->his.totalphoton,"mch",cfg);
}
#endif
//handling the 2pt distributions
if(cfg->issave2pt){
cudaMemcpy(field, gfield,sizeof(float) *dimxyz*cfg->maxgate,cudaMemcpyDeviceToHost);
fprintf(cfg->flog,"transfer complete:\t%d ms\n",GetTimeMillis()-tic); fflush(cfg->flog);
if(cfg->respin>1){
for(i=0;i<fieldlen;i++) //accumulate field, can be done in the GPU
field[fieldlen+i]+=field[i];
}
if(iter+1==cfg->respin){
if(cfg->respin>1) //copy the accumulated fields back
memcpy(field,field+fieldlen,sizeof(float)*fieldlen);
if(cfg->isnormalized){
//normalize field if it is the last iteration, temporarily do it in CPU
//mcx_sum_trueabsorption<<<clgrid,clblock>>>(genergy,gmedia,gfield,
// cfg->maxgate,threaddim);
fprintf(cfg->flog,"normalizing raw data ...\t");
cudaMemcpy(energy,genergy,sizeof(float)*cfg->nthread*2,cudaMemcpyDeviceToHost);
eabsorp=0.f;
for(i=1;i<cfg->nthread;i++){
energy[0]+=energy[i<<1];
energy[1]+=energy[(i<<1)+1];
}
for(i=0;i<cfg->nthread;i++)
eabsorp+=Plen0[i].z; // the accumulative absorpted energy near the source
eabsorp+=energy[1];
scale=(cfg->nphoton-energy[0])/(cfg->nphoton*Vvox*cfg->tstep*eabsorp);
if(cfg->unitinmm!=1.f)
scale/=(cfg->unitinmm*cfg->unitinmm); /* Vvox*(U*U*U) * (Tstep) * (Eabsorp/U) */
fprintf(cfg->flog,"normalization factor alpha=%f\n",scale); fflush(cfg->flog);
mcx_normalize(field,scale,fieldlen);
}
fprintf(cfg->flog,"data normalization complete : %d ms\n",GetTimeMillis()-tic);
if(cfg->exportfield) //you must allocate the buffer long enough
memcpy(cfg->exportfield,field,fieldlen*sizeof(float));
else{
fprintf(cfg->flog,"saving data to file ...\t");
mcx_savedata(field,fieldlen,t>cfg->tstart,"mc2",cfg);
fprintf(cfg->flog,"saving data complete : %d ms\n\n",GetTimeMillis()-tic);
fflush(cfg->flog);
}
}
}
}
if(param.twin1<cfg->tend){
cudaMemset(genergy,0,sizeof(float)*cfg->nthread*2);
}
}
cudaMemcpy(Ppos, gPpos, sizeof(float4)*cfg->nthread, cudaMemcpyDeviceToHost);
cudaMemcpy(Pdir, gPdir, sizeof(float4)*cfg->nthread, cudaMemcpyDeviceToHost);
cudaMemcpy(Plen, gPlen, sizeof(float4)*cfg->nthread, cudaMemcpyDeviceToHost);
cudaMemcpy(Pseed, gPseed,sizeof(uint) *cfg->nthread*RAND_SEED_LEN, cudaMemcpyDeviceToHost);
cudaMemcpy(energy,genergy,sizeof(float)*cfg->nthread*2,cudaMemcpyDeviceToHost);
for (i=0; i<cfg->nthread; i++) {
energyloss+=energy[i<<1];
energyabsorbed+=energy[(i<<1)+1];
}
#ifdef TEST_RACING
{
float totalcount=0.f,hitcount=0.f;
for (i=0; i<fieldlen; i++)
hitcount+=field[i];
for (i=0; i<cfg->nthread; i++)
totalcount+=Pseed[i];
fprintf(cfg->flog,"expected total recording number: %f, got %f, missed %f\n",
totalcount,hitcount,(totalcount-hitcount)/totalcount);
}
#endif
printnum=cfg->nthread<cfg->printnum?cfg->nthread:cfg->printnum;
for (i=0; i<printnum; i++) {
fprintf(cfg->flog,"% 4d[A% f % f % f]C%3d J%5d W% 8f(P%6.3f %6.3f %6.3f)T% 5.3e L% 5.3f %.0f\n", i,
Pdir[i].x,Pdir[i].y,Pdir[i].z,(int)Plen[i].w,(int)Pdir[i].w,Ppos[i].w,
Ppos[i].x,Ppos[i].y,Ppos[i].z,Plen[i].y,Plen[i].x,(float)Pseed[i]);
}
// total energy here equals total simulated photons+unfinished photons for all threads
fprintf(cfg->flog,"simulated %d photons (%d) with %d threads (repeat x%d)\nMCX simulation speed: %.2f photon/ms\n",
photoncount,cfg->nphoton,cfg->nthread,cfg->respin,(double)photoncount/toc); fflush(cfg->flog);
fprintf(cfg->flog,"exit energy:%16.8e + absorbed energy:%16.8e = total: %16.8e\n",
energyloss,cfg->nphoton-energyloss,(float)cfg->nphoton);fflush(cfg->flog);
fflush(cfg->flog);
cudaFree(gmedia);
cudaFree(gfield);
cudaFree(gPpos);
cudaFree(gPdir);
cudaFree(gPlen);
cudaFree(gPseed);
cudaFree(genergy);
cudaFree(gPdet);
cudaFree(gdetected);
cudaThreadExit();
free(Ppos);
free(Pdir);
free(Plen);
free(Plen0);
free(Pseed);
free(Pdet);
free(energy);
free(field);
} | the_stack |
#include <data/Spin_System.hpp>
#include <engine/Hamiltonian_Heisenberg.hpp>
#include <engine/Vectormath.hpp>
#include <engine/Neighbours.hpp>
#include <engine/FFT.hpp>
#include <engine/Backend_par.hpp>
#include <utility/Constants.hpp>
#include <Eigen/Dense>
#include <Eigen/Core>
#include <complex>
using namespace Data;
using namespace Utility;
namespace C = Utility::Constants;
using Engine::Vectormath::check_atom_type;
using Engine::Vectormath::idx_from_pair;
using Engine::Vectormath::cu_check_atom_type;
using Engine::Vectormath::cu_idx_from_pair;
using Engine::Vectormath::cu_tupel_from_idx;
namespace Engine
{
// Construct a Heisenberg Hamiltonian with pairs
Hamiltonian_Heisenberg::Hamiltonian_Heisenberg(
scalar external_field_magnitude, Vector3 external_field_normal,
intfield anisotropy_indices, scalarfield anisotropy_magnitudes, vectorfield anisotropy_normals,
pairfield exchange_pairs, scalarfield exchange_magnitudes,
pairfield dmi_pairs, scalarfield dmi_magnitudes, vectorfield dmi_normals,
DDI_Method ddi_method, intfield ddi_n_periodic_images, bool ddi_pb_zero_padding, scalar ddi_radius,
quadrupletfield quadruplets, scalarfield quadruplet_magnitudes,
std::shared_ptr<Data::Geometry> geometry,
intfield boundary_conditions
) :
Hamiltonian(boundary_conditions),
geometry(geometry),
external_field_magnitude(external_field_magnitude * C::mu_B), external_field_normal(external_field_normal),
anisotropy_indices(anisotropy_indices), anisotropy_magnitudes(anisotropy_magnitudes), anisotropy_normals(anisotropy_normals),
exchange_pairs_in(exchange_pairs), exchange_magnitudes_in(exchange_magnitudes), exchange_shell_magnitudes(0),
dmi_pairs_in(dmi_pairs), dmi_magnitudes_in(dmi_magnitudes), dmi_normals_in(dmi_normals), dmi_shell_magnitudes(0), dmi_shell_chirality(0),
quadruplets(quadruplets), quadruplet_magnitudes(quadruplet_magnitudes),
ddi_method(ddi_method), ddi_n_periodic_images(ddi_n_periodic_images), ddi_pb_zero_padding(ddi_pb_zero_padding), ddi_cutoff_radius(ddi_radius),
fft_plan_reverse(FFT::FFT_Plan()), fft_plan_spins(FFT::FFT_Plan())
{
// Generate interaction pairs, constants etc.
this->Update_Interactions();
}
// Construct a Heisenberg Hamiltonian from shells
Hamiltonian_Heisenberg::Hamiltonian_Heisenberg(
scalar external_field_magnitude, Vector3 external_field_normal,
intfield anisotropy_indices, scalarfield anisotropy_magnitudes, vectorfield anisotropy_normals,
scalarfield exchange_shell_magnitudes,
scalarfield dmi_shell_magnitudes, int dmi_shell_chirality,
DDI_Method ddi_method, intfield ddi_n_periodic_images, bool ddi_pb_zero_padding, scalar ddi_radius,
quadrupletfield quadruplets, scalarfield quadruplet_magnitudes,
std::shared_ptr<Data::Geometry> geometry,
intfield boundary_conditions
) :
Hamiltonian(boundary_conditions),
geometry(geometry),
external_field_magnitude(external_field_magnitude * C::mu_B), external_field_normal(external_field_normal),
anisotropy_indices(anisotropy_indices), anisotropy_magnitudes(anisotropy_magnitudes), anisotropy_normals(anisotropy_normals),
exchange_pairs_in(0), exchange_magnitudes_in(0), exchange_shell_magnitudes(exchange_shell_magnitudes),
dmi_pairs_in(0), dmi_magnitudes_in(0), dmi_normals_in(0), dmi_shell_magnitudes(dmi_shell_magnitudes), dmi_shell_chirality(dmi_shell_chirality),
quadruplets(quadruplets), quadruplet_magnitudes(quadruplet_magnitudes),
ddi_method(ddi_method), ddi_n_periodic_images(ddi_n_periodic_images), ddi_pb_zero_padding(ddi_pb_zero_padding), ddi_cutoff_radius(ddi_radius),
fft_plan_reverse(FFT::FFT_Plan()), fft_plan_spins(FFT::FFT_Plan())
{
// Generate interaction pairs, constants etc.
this->Update_Interactions();
}
void Hamiltonian_Heisenberg::Update_Interactions()
{
// When parallelising (cuda or openmp), we need all neighbours per spin
const bool use_redundant_neighbours = true;
// Exchange
this->exchange_pairs = pairfield(0);
this->exchange_magnitudes = scalarfield(0);
if( exchange_shell_magnitudes.size() > 0 )
{
// Generate Exchange neighbours
intfield exchange_shells(0);
Neighbours::Get_Neighbours_in_Shells(*geometry, exchange_shell_magnitudes.size(), exchange_pairs, exchange_shells, use_redundant_neighbours);
for (unsigned int ipair = 0; ipair < exchange_pairs.size(); ++ipair)
{
this->exchange_magnitudes.push_back(exchange_shell_magnitudes[exchange_shells[ipair]]);
}
}
else
{
// Use direct list of pairs
this->exchange_pairs = this->exchange_pairs_in;
this->exchange_magnitudes = this->exchange_magnitudes_in;
if( use_redundant_neighbours )
{
for (int i = 0; i < exchange_pairs_in.size(); ++i)
{
auto& p = exchange_pairs_in[i];
auto& t = p.translations;
this->exchange_pairs.push_back(Pair{p.j, p.i, {-t[0], -t[1], -t[2]}});
this->exchange_magnitudes.push_back(exchange_magnitudes_in[i]);
}
}
}
// DMI
this->dmi_pairs = pairfield(0);
this->dmi_magnitudes = scalarfield(0);
this->dmi_normals = vectorfield(0);
if( dmi_shell_magnitudes.size() > 0 )
{
// Generate DMI neighbours and normals
intfield dmi_shells(0);
Neighbours::Get_Neighbours_in_Shells(*geometry, dmi_shell_magnitudes.size(), dmi_pairs, dmi_shells, use_redundant_neighbours);
for (unsigned int ineigh = 0; ineigh < dmi_pairs.size(); ++ineigh)
{
this->dmi_normals.push_back(Neighbours::DMI_Normal_from_Pair(*geometry, dmi_pairs[ineigh], dmi_shell_chirality));
this->dmi_magnitudes.push_back(dmi_shell_magnitudes[dmi_shells[ineigh]]);
}
}
else
{
// Use direct list of pairs
this->dmi_pairs = this->dmi_pairs_in;
this->dmi_magnitudes = this->dmi_magnitudes_in;
this->dmi_normals = this->dmi_normals_in;
for (int i = 0; i < dmi_pairs_in.size(); ++i)
{
auto& p = dmi_pairs_in[i];
auto& t = p.translations;
this->dmi_pairs.push_back(Pair{p.j, p.i, {-t[0], -t[1], -t[2]}});
this->dmi_magnitudes.push_back(dmi_magnitudes_in[i]);
this->dmi_normals.push_back(-dmi_normals_in[i]);
}
}
// Dipole-dipole (cutoff)
scalar radius = this->ddi_cutoff_radius;
if( this->ddi_method != DDI_Method::Cutoff )
radius = 0;
this->ddi_pairs = Engine::Neighbours::Get_Pairs_in_Radius(*this->geometry, radius);
this->ddi_magnitudes = scalarfield(this->ddi_pairs.size());
this->ddi_normals = vectorfield(this->ddi_pairs.size());
for (unsigned int i = 0; i < this->ddi_pairs.size(); ++i)
{
Engine::Neighbours::DDI_from_Pair(
*this->geometry,
{ this->ddi_pairs[i].i, this->ddi_pairs[i].j,
{ this->ddi_pairs[i].translations[0],
this->ddi_pairs[i].translations[1],
this->ddi_pairs[i].translations[2] }},
this->ddi_magnitudes[i], this->ddi_normals[i]);
}
// Dipole-dipole (FFT)
this->Prepare_DDI();
// Update, which terms still contribute
this->Update_Energy_Contributions();
}
void Hamiltonian_Heisenberg::Update_Energy_Contributions()
{
this->energy_contributions_per_spin = std::vector<std::pair<std::string, scalarfield>>(0);
// External field
if( std::abs(this->external_field_magnitude) > 1e-60 )
{
this->energy_contributions_per_spin.push_back({"Zeeman", scalarfield(0)});
this->idx_zeeman = this->energy_contributions_per_spin.size()-1;
}
else this->idx_zeeman = -1;
// Anisotropy
if( this->anisotropy_indices.size() > 0 )
{
this->energy_contributions_per_spin.push_back({"Anisotropy", scalarfield(0) });
this->idx_anisotropy = this->energy_contributions_per_spin.size()-1;
}
else this->idx_anisotropy = -1;
// Exchange
if( this->exchange_pairs.size() > 0 )
{
this->energy_contributions_per_spin.push_back({"Exchange", scalarfield(0) });
this->idx_exchange = this->energy_contributions_per_spin.size()-1;
}
else this->idx_exchange = -1;
// DMI
if( this->dmi_pairs.size() > 0 )
{
this->energy_contributions_per_spin.push_back({"DMI", scalarfield(0) });
this->idx_dmi = this->energy_contributions_per_spin.size()-1;
}
else this->idx_dmi = -1;
// Dipole-Dipole
if( this->ddi_method != DDI_Method::None )
{
this->energy_contributions_per_spin.push_back({"DDI", scalarfield(0) });
this->idx_ddi = this->energy_contributions_per_spin.size()-1;
}
else this->idx_ddi = -1;
// Quadruplets
if( this->quadruplets.size() > 0 )
{
this->energy_contributions_per_spin.push_back({"Quadruplets", scalarfield(0) });
this->idx_quadruplet = this->energy_contributions_per_spin.size()-1;
}
else this->idx_quadruplet = -1;
}
void Hamiltonian_Heisenberg::Energy_Contributions_per_Spin(const vectorfield & spins, std::vector<std::pair<std::string, scalarfield>> & contributions)
{
if( contributions.size() != this->energy_contributions_per_spin.size() )
{
contributions = this->energy_contributions_per_spin;
}
int nos = spins.size();
for (auto& pair : contributions)
{
// Allocate if not already allocated
if (pair.second.size() != nos) pair.second = scalarfield(nos, 0);
// Otherwise set to zero
else for (auto& pair : contributions) Vectormath::fill(pair.second, 0);
}
// External field
if( this->idx_zeeman >=0 ) E_Zeeman(spins, contributions[idx_zeeman].second);
// Anisotropy
if( this->idx_anisotropy >=0 ) E_Anisotropy(spins, contributions[idx_anisotropy].second);
// Exchange
if( this->idx_exchange >=0 ) E_Exchange(spins, contributions[idx_exchange].second);
// DMI
if( this->idx_dmi >=0 ) E_DMI(spins,contributions[idx_dmi].second);
// DDI
if( this->idx_ddi >=0 ) E_DDI(spins, contributions[idx_ddi].second);
// Quadruplets
if (this->idx_quadruplet >=0 ) E_Quadruplet(spins, contributions[idx_quadruplet].second);
}
__global__ void CU_E_Zeeman(const Vector3 * spins, const int * atom_types, const int n_cell_atoms, const scalar * mu_s, const scalar external_field_magnitude, const Vector3 external_field_normal, scalar * Energy, size_t n_cells_total)
{
for(auto icell = blockIdx.x * blockDim.x + threadIdx.x;
icell < n_cells_total;
icell += blockDim.x * gridDim.x)
{
for (int ibasis=0; ibasis<n_cell_atoms; ++ibasis)
{
int ispin = icell + ibasis;
if ( cu_check_atom_type(atom_types[ispin]) )
Energy[ispin] -= mu_s[ispin] * external_field_magnitude * external_field_normal.dot(spins[ispin]);
}
}
}
void Hamiltonian_Heisenberg::E_Zeeman(const vectorfield & spins, scalarfield & Energy)
{
int size = geometry->n_cells_total;
CU_E_Zeeman<<<(size+1023)/1024, 1024>>>(spins.data(), this->geometry->atom_types.data(), geometry->n_cell_atoms, geometry->mu_s.data(), this->external_field_magnitude, this->external_field_normal, Energy.data(), size);
CU_CHECK_AND_SYNC();
}
__global__ void CU_E_Anisotropy(const Vector3 * spins, const int * atom_types, const int n_cell_atoms, const int n_anisotropies, const int * anisotropy_indices, const scalar * anisotropy_magnitude, const Vector3 * anisotropy_normal, scalar * Energy, size_t n_cells_total)
{
for(auto icell = blockIdx.x * blockDim.x + threadIdx.x;
icell < n_cells_total;
icell += blockDim.x * gridDim.x)
{
for (int iani=0; iani<n_anisotropies; ++iani)
{
int ispin = icell*n_cell_atoms + anisotropy_indices[iani];
if ( cu_check_atom_type(atom_types[ispin]) )
Energy[ispin] -= anisotropy_magnitude[iani] * pow(anisotropy_normal[iani].dot(spins[ispin]), 2);
}
}
}
void Hamiltonian_Heisenberg::E_Anisotropy(const vectorfield & spins, scalarfield & Energy)
{
int size = geometry->n_cells_total;
CU_E_Anisotropy<<<(size+1023)/1024, 1024>>>(spins.data(), this->geometry->atom_types.data(), this->geometry->n_cell_atoms, this->anisotropy_indices.size(), this->anisotropy_indices.data(), this->anisotropy_magnitudes.data(), this->anisotropy_normals.data(), Energy.data(), size);
CU_CHECK_AND_SYNC();
}
__global__ void CU_E_Exchange(const Vector3 * spins, const int * atom_types, const int * boundary_conditions, const int * n_cells, int n_cell_atoms,
int n_pairs, const Pair * pairs, const scalar * magnitudes, scalar * Energy, size_t size)
{
int bc[3]={boundary_conditions[0],boundary_conditions[1],boundary_conditions[2]};
int nc[3]={n_cells[0],n_cells[1],n_cells[2]};
for(auto icell = blockIdx.x * blockDim.x + threadIdx.x;
icell < size;
icell += blockDim.x * gridDim.x)
{
for(auto ipair = 0; ipair < n_pairs; ++ipair)
{
int ispin = pairs[ipair].i + icell*n_cell_atoms;
int jspin = cu_idx_from_pair(icell, bc, nc, n_cell_atoms, atom_types, pairs[ipair]);
if (jspin >= 0)
{
Energy[ispin] -= 0.5 * magnitudes[ipair] * spins[ispin].dot(spins[jspin]);
}
}
}
}
void Hamiltonian_Heisenberg::E_Exchange(const vectorfield & spins, scalarfield & Energy)
{
int size = geometry->n_cells_total;
CU_E_Exchange<<<(size+1023)/1024, 1024>>>(spins.data(), this->geometry->atom_types.data(), boundary_conditions.data(), geometry->n_cells.data(), geometry->n_cell_atoms,
this->exchange_pairs.size(), this->exchange_pairs.data(), this->exchange_magnitudes.data(), Energy.data(), size);
CU_CHECK_AND_SYNC();
}
__global__ void CU_E_DMI(const Vector3 * spins, const int * atom_types, const int * boundary_conditions, const int * n_cells, int n_cell_atoms,
int n_pairs, const Pair * pairs, const scalar * magnitudes, const Vector3 * normals, scalar * Energy, size_t size)
{
int bc[3]={boundary_conditions[0],boundary_conditions[1],boundary_conditions[2]};
int nc[3]={n_cells[0],n_cells[1],n_cells[2]};
for(auto icell = blockIdx.x * blockDim.x + threadIdx.x;
icell < size;
icell += blockDim.x * gridDim.x)
{
for(auto ipair = 0; ipair < n_pairs; ++ipair)
{
int ispin = pairs[ipair].i + icell*n_cell_atoms;
int jspin = cu_idx_from_pair(icell, bc, nc, n_cell_atoms, atom_types, pairs[ipair]);
if (jspin >= 0)
{
Energy[ispin] -= 0.5 * magnitudes[ipair] * normals[ipair].dot(spins[ispin].cross(spins[jspin]));
}
}
}
}
void Hamiltonian_Heisenberg::E_DMI(const vectorfield & spins, scalarfield & Energy)
{
int size = geometry->n_cells_total;
CU_E_DMI<<<(size+1023)/1024, 1024>>>(spins.data(), this->geometry->atom_types.data(), boundary_conditions.data(), geometry->n_cells.data(), geometry->n_cell_atoms,
this->dmi_pairs.size(), this->dmi_pairs.data(), this->dmi_magnitudes.data(), this->dmi_normals.data(), Energy.data(), size);
CU_CHECK_AND_SYNC();
}
void Hamiltonian_Heisenberg::E_DDI(const vectorfield & spins, scalarfield & Energy)
{
if( this->ddi_method == DDI_Method::FFT )
this->E_DDI_FFT(spins, Energy);
else if( this->ddi_method == DDI_Method::Cutoff )
{
// TODO: Merge these implementations in the future
if( ddi_cutoff_radius >= 0 )
this->E_DDI_Cutoff(spins, Energy);
else
this->E_DDI_Direct(spins, Energy);
}
}
void Hamiltonian_Heisenberg::E_DDI_Direct(const vectorfield & spins, scalarfield & Energy)
{
vectorfield gradients_temp;
gradients_temp.resize(geometry->nos);
Vectormath::fill(gradients_temp, {0,0,0});
this->Gradient_DDI_Direct(spins, gradients_temp);
#pragma omp parallel for
for (int ispin = 0; ispin < geometry->nos; ispin++)
{
Energy[ispin] += 0.5 * spins[ispin].dot(gradients_temp[ispin]);
}
}
void Hamiltonian_Heisenberg::E_DDI_Cutoff(const vectorfield & spins, scalarfield & Energy)
{
// //scalar mult = -mu_B*mu_B*1.0 / 4.0 / Pi; // multiply with mu_B^2
// scalar mult = 0.5*0.0536814951168; // mu_0*mu_B**2/(4pi*10**-30) -- the translations are in angstr�m, so the |r|[m] becomes |r|[m]*10^-10
// // scalar result = 0.0;
// for (unsigned int i_pair = 0; i_pair < ddi_pairs.size(); ++i_pair)
// {
// if (ddi_magnitudes[i_pair] > 0.0)
// {
// for (int da = 0; da < geometry->n_cells[0]; ++da)
// {
// for (int db = 0; db < geometry->n_cells[1]; ++db)
// {
// for (int dc = 0; dc < geometry->n_cells[2]; ++dc)
// {
// std::array<int, 3 > translations = { da, db, dc };
// // int idx_i = ddi_pairs[i_pair].i;
// // int idx_j = ddi_pairs[i_pair].j;
// int idx_i = idx_from_translations(geometry->n_cells, geometry->n_cell_atoms, translations);
// int idx_j = idx_from_translations(geometry->n_cells, geometry->n_cell_atoms, translations, ddi_pairs[i_pair].translations);
// Energy[idx_i] -= mult / std::pow(ddi_magnitudes[i_pair], 3.0) *
// (3 * spins[idx_j].dot(ddi_normals[i_pair]) * spins[idx_i].dot(ddi_normals[i_pair]) - spins[idx_i].dot(spins[idx_j]));
// Energy[idx_j] -= mult / std::pow(ddi_magnitudes[i_pair], 3.0) *
// (3 * spins[idx_j].dot(ddi_normals[i_pair]) * spins[idx_i].dot(ddi_normals[i_pair]) - spins[idx_i].dot(spins[idx_j]));
// }
// }
// }
// }
// }
}// end DipoleDipole
// TODO: add dot_scaled to Vectormath and use that
__global__ void CU_E_DDI_FFT(scalar * Energy, const Vector3 * spins, const Vector3 * gradients , const int nos, const int n_cell_atoms, const scalar * mu_s)
{
for(int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < nos; idx += blockDim.x * gridDim.x)
{
Energy[idx] += 0.5 * spins[idx].dot(gradients[idx]);
}
}
void Hamiltonian_Heisenberg::E_DDI_FFT(const vectorfield & spins, scalarfield & Energy)
{
//todo maybe the gradient should be cached somehow, it is quite inefficient to calculate it
//again just for the energy
vectorfield gradients_temp(geometry->nos);
Vectormath::fill(gradients_temp, {0,0,0});
this->Gradient_DDI(spins, gradients_temp);
CU_E_DDI_FFT<<<(geometry->nos + 1023)/1024, 1024>>>(Energy.data(), spins.data(), gradients_temp.data(), geometry->nos, geometry->n_cell_atoms, geometry->mu_s.data());
// === DEBUG: begin gradient comparison ===
// vectorfield gradients_temp_dir;
// gradients_temp_dir.resize(this->geometry->nos);
// Vectormath::fill(gradients_temp_dir, {0,0,0});
// Gradient_DDI_Direct(spins, gradients_temp_dir);
// //get deviation
// std::array<scalar, 3> deviation = {0,0,0};
// std::array<scalar, 3> avg = {0,0,0};
// std::array<scalar, 3> avg_ft = {0,0,0};
// for(int i = 0; i < this->geometry->nos; i++)
// {
// for(int d = 0; d < 3; d++)
// {
// deviation[d] += std::pow(gradients_temp[i][d] - gradients_temp_dir[i][d], 2);
// avg[d] += gradients_temp_dir[i][d];
// avg_ft[d] += gradients_temp[i][d];
// }
// }
// std::cerr << "Avg. Gradient (Direct) = " << avg[0]/this->geometry->nos << " " << avg[1]/this->geometry->nos << " " << avg[2]/this->geometry->nos << std::endl;
// std::cerr << "Avg. Gradient (FFT) = " << avg_ft[0]/this->geometry->nos << " " << avg_ft[1]/this->geometry->nos << " " << avg_ft[2]/this->geometry->nos << std::endl;
// std::cerr << "Relative Error in % = " << (avg_ft[0]/avg[0]-1)*100 << " " << (avg_ft[1]/avg[1]-1)*100 << " " << (avg_ft[2]/avg[2]-1)*100 << std::endl;
// std::cerr << "Avg. Deviation = " << std::pow(deviation[0]/this->geometry->nos, 0.5) << " " << std::pow(deviation[1]/this->geometry->nos, 0.5) << " " << std::pow(deviation[2]/this->geometry->nos, 0.5) << std::endl;
// std::cerr << " ---------------- " << std::endl;
// ==== DEBUG: end gradient comparison ====
}// end DipoleDipole
void Hamiltonian_Heisenberg::Gradient_DDI_Direct(const vectorfield & spins, vectorfield & gradient)
{
scalar mult = C::mu_0 * C::mu_B * C::mu_B / ( 4*C::Pi * 1e-30 );
scalar d, d3, d5;
Vector3 diff;
Vector3 diff_img;
int img_a = boundary_conditions[0] == 0 ? 0 : ddi_n_periodic_images[0];
int img_b = boundary_conditions[1] == 0 ? 0 : ddi_n_periodic_images[1];
int img_c = boundary_conditions[2] == 0 ? 0 : ddi_n_periodic_images[2];
for(int idx1 = 0; idx1 < geometry->nos; idx1++)
{
for(int idx2 = 0; idx2 < geometry->nos; idx2++)
{
auto& m2 = spins[idx2];
diff = this->geometry->positions[idx2] - this->geometry->positions[idx1];
scalar Dxx = 0, Dxy = 0, Dxz = 0, Dyy = 0, Dyz = 0, Dzz = 0;
for(int a_pb = - img_a; a_pb <= img_a; a_pb++)
{
for(int b_pb = - img_b; b_pb <= img_b; b_pb++)
{
for(int c_pb = -img_c; c_pb <= img_c; c_pb++)
{
diff_img = diff + a_pb * geometry->n_cells[0] * geometry->bravais_vectors[0] * geometry->lattice_constant
+ b_pb * geometry->n_cells[1] * geometry->bravais_vectors[1] * geometry->lattice_constant
+ c_pb * geometry->n_cells[2] * geometry->bravais_vectors[2] * geometry->lattice_constant;
d = diff_img.norm();
if(d > 1e-10)
{
d3 = d * d * d;
d5 = d * d * d * d * d;
Dxx += mult * (3 * diff_img[0]*diff_img[0] / d5 - 1/d3);
Dxy += mult * 3 * diff_img[0]*diff_img[1] / d5; //same as Dyx
Dxz += mult * 3 * diff_img[0]*diff_img[2] / d5; //same as Dzx
Dyy += mult * (3 * diff_img[1]*diff_img[1] / d5 - 1/d3);
Dyz += mult * 3 * diff_img[1]*diff_img[2] / d5; //same as Dzy
Dzz += mult * (3 * diff_img[2]*diff_img[2] / d5 - 1/d3);
}
}
}
}
auto& mu = geometry->mu_s[idx2];
gradient[idx1][0] -= (Dxx * m2[0] + Dxy * m2[1] + Dxz * m2[2]) * geometry->mu_s[idx1] * geometry->mu_s[idx2];
gradient[idx1][1] -= (Dxy * m2[0] + Dyy * m2[1] + Dyz * m2[2]) * geometry->mu_s[idx1] * geometry->mu_s[idx2];
gradient[idx1][2] -= (Dxz * m2[0] + Dyz * m2[1] + Dzz * m2[2]) * geometry->mu_s[idx1] * geometry->mu_s[idx2];
}
}
}
void Hamiltonian_Heisenberg::E_Quadruplet(const vectorfield & spins, scalarfield & Energy)
{
// for (unsigned int iquad = 0; iquad < quadruplets.size(); ++iquad)
// {
// for (int da = 0; da < geometry->n_cells[0]; ++da)
// {
// for (int db = 0; db < geometry->n_cells[1]; ++db)
// {
// for (int dc = 0; dc < geometry->n_cells[2]; ++dc)
// {
// std::array<int, 3 > translations = { da, db, dc };
// // int i = quadruplets[iquad].i;
// // int j = quadruplets[iquad].j;
// // int k = quadruplets[iquad].k;
// // int l = quadruplets[iquad].l;
// int i = idx_from_translations(geometry->n_cells, geometry->n_cell_atoms, translations);
// int j = idx_from_translations(geometry->n_cells, geometry->n_cell_atoms, translations, quadruplets[iquad].d_j);
// int k = idx_from_translations(geometry->n_cells, geometry->n_cell_atoms, translations, quadruplets[iquad].d_k);
// int l = idx_from_translations(geometry->n_cells, geometry->n_cell_atoms, translations, quadruplets[iquad].d_l);
// Energy[i] -= 0.25*quadruplet_magnitudes[iquad] * (spins[i].dot(spins[j])) * (spins[k].dot(spins[l]));
// Energy[j] -= 0.25*quadruplet_magnitudes[iquad] * (spins[i].dot(spins[j])) * (spins[k].dot(spins[l]));
// Energy[k] -= 0.25*quadruplet_magnitudes[iquad] * (spins[i].dot(spins[j])) * (spins[k].dot(spins[l]));
// Energy[l] -= 0.25*quadruplet_magnitudes[iquad] * (spins[i].dot(spins[j])) * (spins[k].dot(spins[l]));
// }
// }
// }
// }
}
scalar Hamiltonian_Heisenberg::Energy_Single_Spin(int ispin, const vectorfield & spins)
{
scalar Energy = 0;
if( check_atom_type(this->geometry->atom_types[ispin]) )
{
int icell = ispin / this->geometry->n_cell_atoms;
int ibasis = ispin - icell*this->geometry->n_cell_atoms;
auto& mu_s = this->geometry->mu_s;
Pair pair_inv;
// External field
if (this->idx_zeeman >= 0)
Energy -= mu_s[ispin] * this->external_field_magnitude * this->external_field_normal.dot(spins[ispin]);
// Anisotropy
if (this->idx_anisotropy >= 0)
{
for (int iani = 0; iani < anisotropy_indices.size(); ++iani)
{
if (anisotropy_indices[iani] == ibasis)
{
if (check_atom_type(this->geometry->atom_types[ispin]))
Energy -= this->anisotropy_magnitudes[iani] * std::pow(anisotropy_normals[iani].dot(spins[ispin]), 2.0);
}
}
}
// Exchange
if( this->idx_exchange >= 0 )
{
for( unsigned int ipair = 0; ipair < exchange_pairs.size(); ++ipair )
{
const auto& pair = exchange_pairs[ipair];
if( pair.i == ibasis )
{
int jspin = idx_from_pair(ispin, boundary_conditions, geometry->n_cells, geometry->n_cell_atoms, geometry->atom_types, pair);
if (jspin >= 0)
Energy -= this->exchange_magnitudes[ipair] * spins[ispin].dot(spins[jspin]);
}
}
}
// DMI
if (this->idx_dmi >= 0)
{
for (unsigned int ipair = 0; ipair < dmi_pairs.size(); ++ipair)
{
const auto& pair = dmi_pairs[ipair];
if( pair.i == ibasis )
{
int jspin = idx_from_pair(ispin, boundary_conditions, geometry->n_cells, geometry->n_cell_atoms, geometry->atom_types, pair);
if (jspin >= 0)
Energy -= this->dmi_magnitudes[ipair] * this->dmi_normals[ipair].dot(spins[ispin].cross(spins[jspin]));
}
}
}
// Quadruplets
if (this->idx_quadruplet >= 0)
{
for (unsigned int iquad = 0; iquad < quadruplets.size(); ++iquad)
{
auto translations = Vectormath::translations_from_idx(geometry->n_cells, geometry->n_cell_atoms, icell);
int ispin = quadruplets[iquad].i + icell*geometry->n_cell_atoms;
int jspin = quadruplets[iquad].j + Vectormath::idx_from_translations(geometry->n_cells, geometry->n_cell_atoms, translations, quadruplets[iquad].d_j);
int kspin = quadruplets[iquad].k + Vectormath::idx_from_translations(geometry->n_cells, geometry->n_cell_atoms, translations, quadruplets[iquad].d_k);
int lspin = quadruplets[iquad].l + Vectormath::idx_from_translations(geometry->n_cells, geometry->n_cell_atoms, translations, quadruplets[iquad].d_l);
if ( check_atom_type(this->geometry->atom_types[ispin]) && check_atom_type(this->geometry->atom_types[jspin]) &&
check_atom_type(this->geometry->atom_types[kspin]) && check_atom_type(this->geometry->atom_types[lspin]) )
{
Energy -= 0.25*quadruplet_magnitudes[iquad] * (spins[ispin].dot(spins[jspin])) * (spins[kspin].dot(spins[lspin]));
}
}
}
}
return Energy;
}
void Hamiltonian_Heisenberg::Gradient(const vectorfield & spins, vectorfield & gradient)
{
// Set to zero
Vectormath::fill(gradient, {0,0,0});
// External field
if(idx_zeeman >= 0)
this->Gradient_Zeeman(gradient);
// Anisotropy
if(idx_anisotropy >= 0)
this->Gradient_Anisotropy(spins, gradient);
// Exchange
if(idx_exchange >= 0)
this->Gradient_Exchange(spins, gradient);
// DMI
if(idx_dmi >= 0)
this->Gradient_DMI(spins, gradient);
// DDI
if(idx_ddi >= 0)
this->Gradient_DDI(spins, gradient);
// Quadruplet
if(idx_quadruplet)
this->Gradient_Quadruplet(spins, gradient);
}
void Hamiltonian_Heisenberg::Gradient_and_Energy(const vectorfield & spins, vectorfield & gradient, scalar & energy)
{
// Set to zero
Vectormath::fill(gradient, {0,0,0});
energy = 0;
auto N = spins.size();
auto s = spins.data();
auto mu_s = geometry->mu_s.data();
auto g = gradient.data();
// Anisotropy
if(idx_anisotropy >= 0)
this->Gradient_Anisotropy(spins, gradient);
// Exchange
if(idx_exchange >= 0)
this->Gradient_Exchange(spins, gradient);
// DMI
if(idx_dmi >= 0)
this->Gradient_DMI(spins, gradient);
// DDI
if(idx_ddi >= 0)
this->Gradient_DDI(spins, gradient);
energy += Backend::par::reduce( N, [s,g] SPIRIT_LAMBDA ( int idx ) { return 0.5 * g[idx].dot(s[idx]) ;} );
// External field
if(idx_zeeman >= 0)
{
Vector3 ext_field = external_field_normal * external_field_magnitude;
this->Gradient_Zeeman(gradient);
energy += Backend::par::reduce( N, [s, ext_field, mu_s] SPIRIT_LAMBDA ( int idx ) { return -mu_s[idx] * ext_field.dot(s[idx]) ;} );
}
// Quadruplets
if(idx_quadruplet > 0)
{
// Kind of a bandaid fix
this->Gradient_Quadruplet(spins, gradient);
if(energy_contributions_per_spin[idx_quadruplet].second.size() != spins.size())
{
energy_contributions_per_spin[idx_quadruplet].second.resize(spins.size());
};
Vectormath::fill(energy_contributions_per_spin[idx_quadruplet].second, 0);
E_Quadruplet(spins, energy_contributions_per_spin[idx_quadruplet].second);
energy += Vectormath::sum(energy_contributions_per_spin[idx_quadruplet].second);
}
}
__global__ void CU_Gradient_Zeeman( const int * atom_types, const int n_cell_atoms, const scalar * mu_s, const scalar external_field_magnitude, const Vector3 external_field_normal, Vector3 * gradient, size_t n_cells_total)
{
for(auto icell = blockIdx.x * blockDim.x + threadIdx.x;
icell < n_cells_total;
icell += blockDim.x * gridDim.x)
{
for (int ibasis=0; ibasis<n_cell_atoms; ++ibasis)
{
int ispin = icell + ibasis;
if ( cu_check_atom_type(atom_types[ispin]) )
gradient[ispin] -= mu_s[ispin] * external_field_magnitude*external_field_normal;
}
}
}
void Hamiltonian_Heisenberg::Gradient_Zeeman(vectorfield & gradient)
{
int size = geometry->n_cells_total;
CU_Gradient_Zeeman<<<(size+1023)/1024, 1024>>>( this->geometry->atom_types.data(), geometry->n_cell_atoms, geometry->mu_s.data(), this->external_field_magnitude, this->external_field_normal, gradient.data(), size );
CU_CHECK_AND_SYNC();
}
__global__ void CU_Gradient_Anisotropy(const Vector3 * spins, const int * atom_types, const int n_cell_atoms, const int n_anisotropies, const int * anisotropy_indices, const scalar * anisotropy_magnitude, const Vector3 * anisotropy_normal, Vector3 * gradient, size_t n_cells_total)
{
for(auto icell = blockIdx.x * blockDim.x + threadIdx.x;
icell < n_cells_total;
icell += blockDim.x * gridDim.x)
{
for (int iani=0; iani<n_anisotropies; ++iani)
{
int ispin = icell*n_cell_atoms + anisotropy_indices[iani];
if ( cu_check_atom_type(atom_types[ispin]) )
{
scalar sc = -2 * anisotropy_magnitude[iani] * anisotropy_normal[iani].dot(spins[ispin]);
gradient[ispin] += sc*anisotropy_normal[iani];
}
}
}
}
void Hamiltonian_Heisenberg::Gradient_Anisotropy(const vectorfield & spins, vectorfield & gradient)
{
int size = geometry->n_cells_total;
CU_Gradient_Anisotropy<<<(size+1023)/1024, 1024>>>( spins.data(), this->geometry->atom_types.data(), this->geometry->n_cell_atoms, this->anisotropy_indices.size(), this->anisotropy_indices.data(), this->anisotropy_magnitudes.data(), this->anisotropy_normals.data(), gradient.data(), size );
CU_CHECK_AND_SYNC();
}
__global__ void CU_Gradient_Exchange(const Vector3 * spins, const int * atom_types, const int * boundary_conditions, const int * n_cells, int n_cell_atoms,
int n_pairs, const Pair * pairs, const scalar * magnitudes, Vector3 * gradient, size_t size)
{
int bc[3]={boundary_conditions[0],boundary_conditions[1],boundary_conditions[2]};
int nc[3]={n_cells[0],n_cells[1],n_cells[2]};
for(auto icell = blockIdx.x * blockDim.x + threadIdx.x;
icell < size;
icell += blockDim.x * gridDim.x)
{
for(auto ipair = 0; ipair < n_pairs; ++ipair)
{
int ispin = pairs[ipair].i + icell*n_cell_atoms;
int jspin = cu_idx_from_pair(icell, bc, nc, n_cell_atoms, atom_types, pairs[ipair]);
if (jspin >= 0)
{
gradient[ispin] -= magnitudes[ipair]*spins[jspin];
}
}
}
}
void Hamiltonian_Heisenberg::Gradient_Exchange(const vectorfield & spins, vectorfield & gradient)
{
int size = geometry->n_cells_total;
CU_Gradient_Exchange<<<(size+1023)/1024, 1024>>>( spins.data(), this->geometry->atom_types.data(), boundary_conditions.data(), geometry->n_cells.data(), geometry->n_cell_atoms,
this->exchange_pairs.size(), this->exchange_pairs.data(), this->exchange_magnitudes.data(), gradient.data(), size );
CU_CHECK_AND_SYNC();
}
__global__ void CU_Gradient_DMI(const Vector3 * spins, const int * atom_types, const int * boundary_conditions, const int * n_cells, int n_cell_atoms,
int n_pairs, const Pair * pairs, const scalar * magnitudes, const Vector3 * normals, Vector3 * gradient, size_t size)
{
int bc[3]={boundary_conditions[0],boundary_conditions[1],boundary_conditions[2]};
int nc[3]={n_cells[0],n_cells[1],n_cells[2]};
for(auto icell = blockIdx.x * blockDim.x + threadIdx.x;
icell < size;
icell += blockDim.x * gridDim.x)
{
for(auto ipair = 0; ipair < n_pairs; ++ipair)
{
int ispin = pairs[ipair].i + icell*n_cell_atoms;
int jspin = cu_idx_from_pair(icell, bc, nc, n_cell_atoms, atom_types, pairs[ipair]);
if (jspin >= 0)
{
gradient[ispin] -= magnitudes[ipair]*spins[jspin].cross(normals[ipair]);
}
}
}
}
void Hamiltonian_Heisenberg::Gradient_DMI(const vectorfield & spins, vectorfield & gradient)
{
int size = geometry->n_cells_total;
CU_Gradient_DMI<<<(size+1023)/1024, 1024>>>( spins.data(), this->geometry->atom_types.data(), boundary_conditions.data(), geometry->n_cells.data(), geometry->n_cell_atoms,
this->dmi_pairs.size(), this->dmi_pairs.data(), this->dmi_magnitudes.data(), this->dmi_normals.data(), gradient.data(), size );
CU_CHECK_AND_SYNC();
}
void Hamiltonian_Heisenberg::Gradient_DDI(const vectorfield & spins, vectorfield & gradient)
{
if( this->ddi_method == DDI_Method::FFT )
this->Gradient_DDI_FFT(spins, gradient);
else if( this->ddi_method == DDI_Method::Cutoff )
{
// TODO: Merge these implementations in the future
if( this->ddi_cutoff_radius >= 0 )
this->Gradient_DDI_Cutoff(spins, gradient);
else
this->Gradient_DDI_Direct(spins, gradient);
}
}
void Hamiltonian_Heisenberg::Gradient_DDI_Cutoff(const vectorfield & spins, vectorfield & gradient)
{
// TODO
}
__global__ void CU_FFT_Pointwise_Mult(FFT::FFT_cpx_type * ft_D_matrices, FFT::FFT_cpx_type * ft_spins, FFT::FFT_cpx_type * res_mult, int* iteration_bounds, int i_b1, int* inter_sublattice_lookup, FFT::StrideContainer dipole_stride, FFT::StrideContainer spin_stride)
{
int n = iteration_bounds[0] * iteration_bounds[1] * iteration_bounds[2] * iteration_bounds[3];
int tupel[4];
int idx_b1, idx_b2, idx_d;
for(int ispin = blockIdx.x * blockDim.x + threadIdx.x; ispin < n; ispin += blockDim.x * gridDim.x)
{
cu_tupel_from_idx(ispin, tupel, iteration_bounds, 4); // tupel now is {i_b2, a, b, c}
int& b_inter = inter_sublattice_lookup[i_b1 + tupel[0] * iteration_bounds[0]];
idx_b1 = i_b1 * spin_stride.basis + tupel[1] * spin_stride.a + tupel[2] * spin_stride.b + tupel[3] * spin_stride.c;
idx_b2 = tupel[0] * spin_stride.basis + tupel[1] * spin_stride.a + tupel[2] * spin_stride.b + tupel[3] * spin_stride.c;
idx_d = b_inter * dipole_stride.basis + tupel[1] * dipole_stride.a + tupel[2] * dipole_stride.b + tupel[3] * dipole_stride.c;
auto& fs_x = ft_spins[idx_b2 ];
auto& fs_y = ft_spins[idx_b2 + 1 * spin_stride.comp];
auto& fs_z = ft_spins[idx_b2 + 2 * spin_stride.comp];
auto& fD_xx = ft_D_matrices[idx_d ];
auto& fD_xy = ft_D_matrices[idx_d + 1 * dipole_stride.comp];
auto& fD_xz = ft_D_matrices[idx_d + 2 * dipole_stride.comp];
auto& fD_yy = ft_D_matrices[idx_d + 3 * dipole_stride.comp];
auto& fD_yz = ft_D_matrices[idx_d + 4 * dipole_stride.comp];
auto& fD_zz = ft_D_matrices[idx_d + 5 * dipole_stride.comp];
if(tupel[0] == 0)
{
res_mult[idx_b1 ].x = FFT::mult3D(fD_xx, fD_xy, fD_xz, fs_x, fs_y, fs_z).x;
res_mult[idx_b1 ].y = FFT::mult3D(fD_xx, fD_xy, fD_xz, fs_x, fs_y, fs_z).y;
res_mult[idx_b1 + 1 * spin_stride.comp].x = FFT::mult3D(fD_xy, fD_yy, fD_yz, fs_x, fs_y, fs_z).x;
res_mult[idx_b1 + 1 * spin_stride.comp].y = FFT::mult3D(fD_xy, fD_yy, fD_yz, fs_x, fs_y, fs_z).y;
res_mult[idx_b1 + 2 * spin_stride.comp].x = FFT::mult3D(fD_xz, fD_yz, fD_zz, fs_x, fs_y, fs_z).x;
res_mult[idx_b1 + 2 * spin_stride.comp].y = FFT::mult3D(fD_xz, fD_yz, fD_zz, fs_x, fs_y, fs_z).y;
} else {
atomicAdd(&res_mult[idx_b1 ].x, FFT::mult3D(fD_xx, fD_xy, fD_xz, fs_x, fs_y, fs_z).x);
atomicAdd(&res_mult[idx_b1 ].y, FFT::mult3D(fD_xx, fD_xy, fD_xz, fs_x, fs_y, fs_z).y);
atomicAdd(&res_mult[idx_b1 + 1 * spin_stride.comp].x, FFT::mult3D(fD_xy, fD_yy, fD_yz, fs_x, fs_y, fs_z).x);
atomicAdd(&res_mult[idx_b1 + 1 * spin_stride.comp].y, FFT::mult3D(fD_xy, fD_yy, fD_yz, fs_x, fs_y, fs_z).y);
atomicAdd(&res_mult[idx_b1 + 2 * spin_stride.comp].x, FFT::mult3D(fD_xz, fD_yz, fD_zz, fs_x, fs_y, fs_z).x);
atomicAdd(&res_mult[idx_b1 + 2 * spin_stride.comp].y, FFT::mult3D(fD_xz, fD_yz, fD_zz, fs_x, fs_y, fs_z).y);
}
}
}
__global__ void CU_Write_FFT_Gradients(FFT::FFT_real_type * resiFFT, Vector3 * gradient, FFT::StrideContainer spin_stride , int * iteration_bounds, int n_cell_atoms, scalar * mu_s, int sublattice_size)
{
int nos = iteration_bounds[0] * iteration_bounds[1] * iteration_bounds[2] * iteration_bounds[3];
int tupel[4];
int idx_pad;
for(int idx_orig = blockIdx.x * blockDim.x + threadIdx.x; idx_orig < nos; idx_orig += blockDim.x * gridDim.x)
{
cu_tupel_from_idx(idx_orig, tupel, iteration_bounds, 4); //tupel now is {ib, a, b, c}
idx_pad = tupel[0] * spin_stride.basis + tupel[1] * spin_stride.a + tupel[2] * spin_stride.b + tupel[3] * spin_stride.c;
gradient[idx_orig][0] -= mu_s[idx_orig] * resiFFT[idx_pad ] / sublattice_size;
gradient[idx_orig][1] -= mu_s[idx_orig] * resiFFT[idx_pad + 1 * spin_stride.comp] / sublattice_size;
gradient[idx_orig][2] -= mu_s[idx_orig] * resiFFT[idx_pad + 2 * spin_stride.comp] / sublattice_size;
}
}
void Hamiltonian_Heisenberg::Gradient_DDI_FFT(const vectorfield & spins, vectorfield & gradient)
{
auto& ft_D_matrices = transformed_dipole_matrices;
auto& ft_spins = fft_plan_spins.cpx_ptr;
auto& res_iFFT = fft_plan_reverse.real_ptr;
auto& res_mult = fft_plan_reverse.cpx_ptr;
int number_of_mults = it_bounds_pointwise_mult[0] * it_bounds_pointwise_mult[1] * it_bounds_pointwise_mult[2] * it_bounds_pointwise_mult[3];
FFT_Spins(spins);
// TODO: also parallelize over i_b1
// Loop over basis atoms (i.e sublattices) and add contribution of each sublattice
for(int i_b1 = 0; i_b1 < geometry->n_cell_atoms; ++i_b1)
CU_FFT_Pointwise_Mult<<<(number_of_mults + 1023) / 1024, 1024>>>(ft_D_matrices.data(), ft_spins.data(), res_mult.data(), it_bounds_pointwise_mult.data(), i_b1, inter_sublattice_lookup.data(), dipole_stride, spin_stride);
FFT::batch_iFour_3D(fft_plan_reverse);
CU_Write_FFT_Gradients<<<(geometry->nos + 1023) / 1024, 1024>>>(res_iFFT.data(), gradient.data(), spin_stride, it_bounds_write_gradients.data(), geometry->n_cell_atoms, geometry->mu_s.data(), sublattice_size);
}//end Field_DipoleDipole
void Hamiltonian_Heisenberg::Gradient_Quadruplet(const vectorfield & spins, vectorfield & gradient)
{
// for (unsigned int iquad = 0; iquad < quadruplets.size(); ++iquad)
// {
// int i = quadruplets[iquad].i;
// int j = quadruplets[iquad].j;
// int k = quadruplets[iquad].k;
// int l = quadruplets[iquad].l;
// for (int da = 0; da < geometry->n_cells[0]; ++da)
// {
// for (int db = 0; db < geometry->n_cells[1]; ++db)
// {
// for (int dc = 0; dc < geometry->n_cells[2]; ++dc)
// {
// std::array<int, 3 > translations = { da, db, dc };
// int ispin = idx_from_translations(geometry->n_cells, geometry->n_cell_atoms, translations);
// int jspin = idx_from_translations(geometry->n_cells, geometry->n_cell_atoms, translations, quadruplets[iquad].d_j);
// int kspin = idx_from_translations(geometry->n_cells, geometry->n_cell_atoms, translations, quadruplets[iquad].d_k);
// int lspin = idx_from_translations(geometry->n_cells, geometry->n_cell_atoms, translations, quadruplets[iquad].d_l);
// gradient[ispin] -= quadruplet_magnitudes[iquad] * spins[jspin] * (spins[kspin].dot(spins[lspin]));
// gradient[jspin] -= quadruplet_magnitudes[iquad] * spins[ispin] * (spins[kspin].dot(spins[lspin]));
// gradient[kspin] -= quadruplet_magnitudes[iquad] * (spins[ispin].dot(spins[jspin])) * spins[lspin];
// gradient[lspin] -= quadruplet_magnitudes[iquad] * (spins[ispin].dot(spins[jspin])) * spins[kspin];
// }
// }
// }
// }
}
void Hamiltonian_Heisenberg::Hessian(const vectorfield & spins, MatrixX & hessian)
{
int nos = spins.size();
const int N = geometry->n_cell_atoms;
// --- Set to zero
hessian.setZero();
// --- Single Spin elements
for( int icell = 0; icell < geometry->n_cells_total; ++icell )
{
for( int iani = 0; iani < anisotropy_indices.size(); ++iani )
{
int ispin = icell*N + anisotropy_indices[iani];
if( check_atom_type(this->geometry->atom_types[ispin]) )
{
for( int alpha = 0; alpha < 3; ++alpha )
{
for ( int beta = 0; beta < 3; ++beta )
{
int i = 3 * ispin + alpha;
int j = 3 * ispin + alpha;
hessian(i, j) += -2.0 * this->anisotropy_magnitudes[iani] *
this->anisotropy_normals[iani][alpha] *
this->anisotropy_normals[iani][beta];
}
}
}
}
}
// --- Spin Pair elements
// Exchange
for( int icell = 0; icell < geometry->n_cells_total; ++icell )
{
for( unsigned int i_pair = 0; i_pair < exchange_pairs.size(); ++i_pair )
{
int ispin = exchange_pairs[i_pair].i + icell*geometry->n_cell_atoms;
int jspin = idx_from_pair(ispin, boundary_conditions, geometry->n_cells, geometry->n_cell_atoms, geometry->atom_types, exchange_pairs[i_pair]);
if( jspin >= 0 )
{
for( int alpha = 0; alpha < 3; ++alpha )
{
int i = 3 * ispin + alpha;
int j = 3 * jspin + alpha;
hessian(i, j) += -exchange_magnitudes[i_pair];
}
}
}
}
// DMI
for( int icell = 0; icell < geometry->n_cells_total; ++icell )
{
for( unsigned int i_pair = 0; i_pair < dmi_pairs.size(); ++i_pair )
{
int ispin = dmi_pairs[i_pair].i + icell*geometry->n_cell_atoms;
int jspin = idx_from_pair(ispin, boundary_conditions, geometry->n_cells, geometry->n_cell_atoms, geometry->atom_types, dmi_pairs[i_pair]);
if( jspin >= 0 )
{
int i = 3*ispin;
int j = 3*jspin;
hessian(i+2, j+1) += dmi_magnitudes[i_pair] * dmi_normals[i_pair][0];
hessian(i+1, j+2) += -dmi_magnitudes[i_pair] * dmi_normals[i_pair][0];
hessian(i, j+2) += dmi_magnitudes[i_pair] * dmi_normals[i_pair][1];
hessian(i+2, j) += -dmi_magnitudes[i_pair] * dmi_normals[i_pair][1];
hessian(i+1, j) += dmi_magnitudes[i_pair] * dmi_normals[i_pair][2];
hessian(i, j+1) += -dmi_magnitudes[i_pair] * dmi_normals[i_pair][2];
}
}
}
// Tentative Dipole-Dipole (only works for open boundary conditions)
if( ddi_method != DDI_Method::None )
{
scalar mult = C::mu_0 * C::mu_B * C::mu_B / ( 4*C::Pi * 1e-30 );
for( int idx1 = 0; idx1 < geometry->nos; idx1++ )
{
for( int idx2 = 0; idx2 < geometry->nos; idx2++ )
{
auto diff = this->geometry->positions[idx2] - this->geometry->positions[idx1];
scalar d = diff.norm(), d3, d5;
scalar Dxx = 0, Dxy = 0, Dxz = 0, Dyy = 0, Dyz = 0, Dzz = 0;
if( d > 1e-10 )
{
d3 = d * d * d;
d5 = d * d * d * d * d;
Dxx += mult * (3 * diff[0]*diff[0] / d5 - 1/d3);
Dxy += mult * 3 * diff[0]*diff[1] / d5; //same as Dyx
Dxz += mult * 3 * diff[0]*diff[2] / d5; //same as Dzx
Dyy += mult * (3 * diff[1]*diff[1] / d5 - 1/d3);
Dyz += mult * 3 * diff[1]*diff[2] / d5; //same as Dzy
Dzz += mult * (3 * diff[2]*diff[2] / d5 - 1/d3);
}
int i = 3 * idx1;
int j = 3 * idx2;
hessian(i + 0, j + 0) += -geometry->mu_s[idx1] * geometry->mu_s[idx2] * ( Dxx );
hessian(i + 1, j + 0) += -geometry->mu_s[idx1] * geometry->mu_s[idx2] * ( Dxy );
hessian(i + 2, j + 0) += -geometry->mu_s[idx1] * geometry->mu_s[idx2] * ( Dxz );
hessian(i + 0, j + 1) += -geometry->mu_s[idx1] * geometry->mu_s[idx2] * ( Dxy );
hessian(i + 1, j + 1) += -geometry->mu_s[idx1] * geometry->mu_s[idx2] * ( Dyy );
hessian(i + 2, j + 1) += -geometry->mu_s[idx1] * geometry->mu_s[idx2] * ( Dyz );
hessian(i + 0, j + 2) += -geometry->mu_s[idx1] * geometry->mu_s[idx2] * ( Dxz );
hessian(i + 1, j + 2) += -geometry->mu_s[idx1] * geometry->mu_s[idx2] * ( Dyz );
hessian(i + 2, j + 2) += -geometry->mu_s[idx1] * geometry->mu_s[idx2] * ( Dzz );
}
}
}
//// Dipole-Dipole
//for (unsigned int i_pair = 0; i_pair < this->DD_indices.size(); ++i_pair)
// {
// // indices
// int idx_1 = DD_indices[i_pair][0];
// int idx_2 = DD_indices[i_pair][1];
// // prefactor
// scalar prefactor = 0.0536814951168
// * mu_s[idx_1] * mu_s[idx_2]
// / std::pow(DD_magnitude[i_pair], 3);
// // components
// for (int alpha = 0; alpha < 3; ++alpha)
// {
// for (int beta = 0; beta < 3; ++beta)
// {
// int idx_h = idx_1 + alpha*nos + 3 * nos*(idx_2 + beta*nos);
// if (alpha == beta)
// hessian[idx_h] += prefactor;
// hessian[idx_h] += -3.0*prefactor*DD_normal[i_pair][alpha] * DD_normal[i_pair][beta];
// }
// }
// }
// Quadruplets
}
void Hamiltonian_Heisenberg::Sparse_Hessian(const vectorfield & spins, SpMatrixX & hessian)
{
int nos = spins.size();
const int N = geometry->n_cell_atoms;
typedef Eigen::Triplet<scalar> T;
std::vector<T> tripletList;
tripletList.reserve( geometry->n_cells_total * (anisotropy_indices.size() * 9 + exchange_pairs.size() * 2 + dmi_pairs.size() * 3) );
// --- Single Spin elements
for( int icell = 0; icell < geometry->n_cells_total; ++icell )
{
for( int iani = 0; iani < anisotropy_indices.size(); ++iani )
{
int ispin = icell*N + anisotropy_indices[iani];
if( check_atom_type(this->geometry->atom_types[ispin]) )
{
for( int alpha = 0; alpha < 3; ++alpha )
{
for ( int beta = 0; beta < 3; ++beta )
{
int i = 3 * ispin + alpha;
int j = 3 * ispin + alpha;
scalar res = -2.0 * this->anisotropy_magnitudes[iani] *
this->anisotropy_normals[iani][alpha] *
this->anisotropy_normals[iani][beta];
tripletList.push_back( T(i, j, res ) );
}
}
}
}
}
// --- Spin Pair elements
// Exchange
for( int icell = 0; icell < geometry->n_cells_total; ++icell )
{
for( unsigned int i_pair = 0; i_pair < exchange_pairs.size(); ++i_pair )
{
int ispin = exchange_pairs[i_pair].i + icell*geometry->n_cell_atoms;
int jspin = idx_from_pair(ispin, boundary_conditions, geometry->n_cells, geometry->n_cell_atoms, geometry->atom_types, exchange_pairs[i_pair]);
if( jspin >= 0 )
{
for( int alpha = 0; alpha < 3; ++alpha )
{
int i = 3 * ispin + alpha;
int j = 3 * jspin + alpha;
tripletList.push_back( T(i, j, -exchange_magnitudes[i_pair] ) );
#ifndef SPIRIT_USE_OPENMP
tripletList.push_back( T(j, i, -exchange_magnitudes[i_pair] ) );
#endif
}
}
}
}
// DMI
for( int icell = 0; icell < geometry->n_cells_total; ++icell )
{
for( unsigned int i_pair = 0; i_pair < dmi_pairs.size(); ++i_pair )
{
int ispin = dmi_pairs[i_pair].i + icell*geometry->n_cell_atoms;
int jspin = idx_from_pair(ispin, boundary_conditions, geometry->n_cells, geometry->n_cell_atoms, geometry->atom_types, dmi_pairs[i_pair]);
if( jspin >= 0 )
{
int i = 3*ispin;
int j = 3*jspin;
tripletList.push_back( T(i+2, j+1, dmi_magnitudes[i_pair] * dmi_normals[i_pair][0] ) );
tripletList.push_back( T(i+1, j+2, -dmi_magnitudes[i_pair] * dmi_normals[i_pair][0] ) );
tripletList.push_back( T(i, j+2, dmi_magnitudes[i_pair] * dmi_normals[i_pair][1] ) );
tripletList.push_back( T(i+2, j, -dmi_magnitudes[i_pair] * dmi_normals[i_pair][1] ) );
tripletList.push_back( T(i+1, j, dmi_magnitudes[i_pair] * dmi_normals[i_pair][2] ) );
tripletList.push_back( T(i, j+1, -dmi_magnitudes[i_pair] * dmi_normals[i_pair][2] ) );
#ifndef SPIRIT_USE_OPENMP
tripletList.push_back( T(j+1, i+2, dmi_magnitudes[i_pair] * dmi_normals[i_pair][0]) );
tripletList.push_back( T(j+2, i+1, -dmi_magnitudes[i_pair] * dmi_normals[i_pair][0]) );
tripletList.push_back( T(j+2, i , dmi_magnitudes[i_pair] * dmi_normals[i_pair][1]) );
tripletList.push_back( T(j, i+2 , -dmi_magnitudes[i_pair] * dmi_normals[i_pair][1]) );
tripletList.push_back( T(j, i+1 , dmi_magnitudes[i_pair] * dmi_normals[i_pair][2]) );
tripletList.push_back( T(j+1, i , -dmi_magnitudes[i_pair] * dmi_normals[i_pair][2]) );
#endif
}
}
}
hessian.setFromTriplets(tripletList.begin(), tripletList.end());
}
__global__ void CU_Write_FFT_Spin_Input(FFT::FFT_real_type* fft_spin_inputs, const Vector3 * spins, int * iteration_bounds, FFT::StrideContainer spin_stride, scalar * mu_s)
{
int nos = iteration_bounds[0] * iteration_bounds[1] * iteration_bounds[2] * iteration_bounds[3];
int tupel[4];
int idx_pad;
for(int idx_orig = blockIdx.x * blockDim.x + threadIdx.x; idx_orig < nos; idx_orig += blockDim.x * gridDim.x)
{
cu_tupel_from_idx(idx_orig, tupel, iteration_bounds, 4); //tupel now is {ib, a, b, c}
idx_pad = tupel[0] * spin_stride.basis + tupel[1] * spin_stride.a + tupel[2] * spin_stride.b + tupel[3] * spin_stride.c;
fft_spin_inputs[idx_pad ] = spins[idx_orig][0] * mu_s[idx_orig];
fft_spin_inputs[idx_pad + 1 * spin_stride.comp ] = spins[idx_orig][1] * mu_s[idx_orig];
fft_spin_inputs[idx_pad + 2 * spin_stride.comp ] = spins[idx_orig][2] * mu_s[idx_orig];
}
}
void Hamiltonian_Heisenberg::FFT_Spins(const vectorfield & spins)
{
CU_Write_FFT_Spin_Input<<<(geometry->nos + 1023) / 1024, 1024>>>(fft_plan_spins.real_ptr.data(), spins.data(), it_bounds_write_spins.data(), spin_stride, geometry->mu_s.data());
FFT::batch_Four_3D(fft_plan_spins);
}
__global__ void CU_Write_FFT_Dipole_Input(FFT::FFT_real_type* fft_dipole_inputs, int* iteration_bounds, const Vector3* translation_vectors, int n_cell_atoms, Vector3* cell_atom_translations, int* n_cells, int* inter_sublattice_lookup, int* img, FFT::StrideContainer dipole_stride)
{
int tupel[3];
int sublattice_size = iteration_bounds[0] * iteration_bounds[1] * iteration_bounds[2];
// Prefactor of ddi interaction
scalar mult = C::mu_0 * C::mu_B * C::mu_B / ( 4*C::Pi * 1e-30 );
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < sublattice_size; i += blockDim.x * gridDim.x)
{
cu_tupel_from_idx(i, tupel, iteration_bounds, 3); // tupel now is {a, b, c}
auto& a = tupel[0];
auto& b = tupel[1];
auto& c = tupel[2];
int b_inter = -1;
for(int i_b1 = 0; i_b1 < n_cell_atoms; ++i_b1)
{
for(int i_b2 = 0; i_b2 < n_cell_atoms; ++i_b2)
{
if(i_b1 != i_b2 || i_b1 == 0)
{
b_inter++;
inter_sublattice_lookup[i_b1 + i_b2 * n_cell_atoms] = b_inter;
int a_idx = a < n_cells[0] ? a : a - iteration_bounds[0];
int b_idx = b < n_cells[1] ? b : b - iteration_bounds[1];
int c_idx = c < n_cells[2] ? c : c - iteration_bounds[2];
scalar Dxx = 0, Dxy = 0, Dxz = 0, Dyy = 0, Dyz = 0, Dzz = 0;
Vector3 diff;
// Iterate over periodic images
for(int a_pb = - img[0]; a_pb <= img[0]; a_pb++)
{
for(int b_pb = - img[1]; b_pb <= img[1]; b_pb++)
{
for(int c_pb = -img[2]; c_pb <= img[2]; c_pb++)
{
diff = (a_idx + a_pb * n_cells[0]) * translation_vectors[0]
+ (b_idx + b_pb * n_cells[1]) * translation_vectors[1]
+ (c_idx + c_pb * n_cells[2]) * translation_vectors[2]
+ cell_atom_translations[i_b1]
- cell_atom_translations[i_b2];
if(diff.norm() > 1e-10)
{
auto d = diff.norm();
auto d3 = d * d * d;
auto d5 = d * d * d * d * d;
Dxx += mult * (3 * diff[0]*diff[0] / d5 - 1/d3);
Dxy += mult * 3 * diff[0]*diff[1] / d5; //same as Dyx
Dxz += mult * 3 * diff[0]*diff[2] / d5; //same as Dzx
Dyy += mult * (3 * diff[1]*diff[1] / d5 - 1/d3);
Dyz += mult * 3 * diff[1]*diff[2] / d5; //same as Dzy
Dzz += mult * (3 * diff[2]*diff[2] / d5 - 1/d3);
}
}
}
}
int idx = b_inter * dipole_stride.basis + a * dipole_stride.a + b * dipole_stride.b + c * dipole_stride.c;
fft_dipole_inputs[idx ] = Dxx;
fft_dipole_inputs[idx + 1 * dipole_stride.comp] = Dxy;
fft_dipole_inputs[idx + 2 * dipole_stride.comp] = Dxz;
fft_dipole_inputs[idx + 3 * dipole_stride.comp] = Dyy;
fft_dipole_inputs[idx + 4 * dipole_stride.comp] = Dyz;
fft_dipole_inputs[idx + 5 * dipole_stride.comp] = Dzz;
} else {
inter_sublattice_lookup[i_b1 + i_b2 * n_cell_atoms] = 0;
}
}
}
}
}
void Hamiltonian_Heisenberg::FFT_Dipole_Matrices(FFT::FFT_Plan & fft_plan_dipole, int img_a, int img_b, int img_c)
{
auto& fft_dipole_inputs = fft_plan_dipole.real_ptr;
field<int> img = {
img_a,
img_b,
img_c
};
// Work around to make bravais vectors and cell_atoms available to GPU as they are currently saves as std::vectors and not fields ...
auto translation_vectors = field<Vector3>();
auto cell_atom_translations = field<Vector3>();
for(int i=0; i<3; i++)
translation_vectors.push_back(geometry->lattice_constant * geometry->bravais_vectors[i]);
for(int i=0; i<geometry->n_cell_atoms; i++)
cell_atom_translations.push_back(geometry->positions[i]);
CU_Write_FFT_Dipole_Input<<<(sublattice_size + 1023)/1024, 1024>>>
( fft_dipole_inputs.data(), it_bounds_write_dipole.data(), translation_vectors.data(),
geometry->n_cell_atoms, cell_atom_translations.data(), geometry->n_cells.data(),
inter_sublattice_lookup.data(), img.data(), dipole_stride
);
FFT::batch_Four_3D(fft_plan_dipole);
}
void Hamiltonian_Heisenberg::Prepare_DDI()
{
Clean_DDI();
if(ddi_method != DDI_Method::FFT)
return;
// We perform zero-padding in a lattice direction if the dimension of the system is greater than 1 *and*
// - the boundary conditions are open, or
// - the boundary conditions are periodic and zero-padding is explicitly requested
n_cells_padded.resize(3);
for(int i=0; i<3; i++)
{
n_cells_padded[i] = geometry->n_cells[i];
bool perform_zero_padding = geometry->n_cells[i] > 1 && (boundary_conditions[i] == 0 || ddi_pb_zero_padding);
if(perform_zero_padding)
n_cells_padded[i] *= 2;
}
sublattice_size = n_cells_padded[0] * n_cells_padded[1] * n_cells_padded[2];
inter_sublattice_lookup.resize(geometry->n_cell_atoms * geometry->n_cell_atoms);
// We dont need to transform over length 1 dims
std::vector<int> fft_dims;
for(int i = 2; i >= 0; i--) // Notice that reverse order is important!
{
if(n_cells_padded[i] > 1)
fft_dims.push_back(n_cells_padded[i]);
}
// Count how many distinct inter-lattice contributions we need to store
n_inter_sublattice = 0;
for(int i = 0; i < geometry->n_cell_atoms; i++)
{
for(int j = 0; j < geometry->n_cell_atoms; j++)
{
if(i != 0 && i==j) continue;
n_inter_sublattice++;
}
}
// Set the iteration bounds for the nested for loops that are flattened in the kernels
it_bounds_write_spins = { geometry->n_cell_atoms,
geometry->n_cells[0],
geometry->n_cells[1],
geometry->n_cells[2] };
it_bounds_write_dipole = { n_cells_padded[0],
n_cells_padded[1],
n_cells_padded[2]};
it_bounds_pointwise_mult = { geometry->n_cell_atoms,
(n_cells_padded[0]/2 + 1), // due to redundancy in real fft
n_cells_padded[1],
n_cells_padded[2] };
it_bounds_write_gradients = { geometry->n_cell_atoms,
geometry->n_cells[0],
geometry->n_cells[1],
geometry->n_cells[2] };
FFT::FFT_Plan fft_plan_dipole = FFT::FFT_Plan(fft_dims, false, 6 * n_inter_sublattice, sublattice_size);
fft_plan_spins = FFT::FFT_Plan(fft_dims, false, 3 * geometry->n_cell_atoms, sublattice_size);
fft_plan_reverse = FFT::FFT_Plan(fft_dims, true, 3 * geometry->n_cell_atoms, sublattice_size);
field<int*> temp_s = {&spin_stride.comp, &spin_stride.basis, &spin_stride.a, &spin_stride.b, &spin_stride.c};
field<int*> temp_d = {&dipole_stride.comp, &dipole_stride.basis, &dipole_stride.a, &dipole_stride.b, &dipole_stride.c};;
FFT::get_strides(temp_s, {3, this->geometry->n_cell_atoms, n_cells_padded[0], n_cells_padded[1], n_cells_padded[2]});
FFT::get_strides(temp_d, {6, n_inter_sublattice, n_cells_padded[0], n_cells_padded[1], n_cells_padded[2]});
// Perform FFT of dipole matrices
int img_a = boundary_conditions[0] == 0 ? 0 : ddi_n_periodic_images[0];
int img_b = boundary_conditions[1] == 0 ? 0 : ddi_n_periodic_images[1];
int img_c = boundary_conditions[2] == 0 ? 0 : ddi_n_periodic_images[2];
FFT_Dipole_Matrices(fft_plan_dipole, img_a, img_b, img_c);
transformed_dipole_matrices = std::move(fft_plan_dipole.cpx_ptr);
if (save_dipole_matrices)
{
dipole_matrices = std::move(fft_plan_dipole.real_ptr);
}
}// End prepare
void Hamiltonian_Heisenberg::Clean_DDI()
{
fft_plan_spins = FFT::FFT_Plan();
fft_plan_reverse = FFT::FFT_Plan();
}
// Hamiltonian name as string
static const std::string name = "Heisenberg";
const std::string& Hamiltonian_Heisenberg::Name() { return name; }
}
#endif | the_stack |
using at::Tensor;
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename scalar_t>
__device__ scalar_t bilinear_interpolate(const scalar_t* bottom_data, const int height, const int width,
scalar_t y, scalar_t x, const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. -ly, hx = 1. - lx;
// do bilinear interpolation
scalar_t v1 = bottom_data[y_low * width + x_low];
scalar_t v2 = bottom_data[y_low * width + x_high];
scalar_t v3 = bottom_data[y_high * width + x_low];
scalar_t v4 = bottom_data[y_high * width + x_high];
scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__global__ void ROIAlignAvgForward(const bool aligned, const int nthreads, const scalar_t* bottom_data, const scalar_t spatial_scale, const int height, const int width,
const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio,
const scalar_t* bottom_rois, scalar_t* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % aligned_width;
int ph = (index / aligned_width) % aligned_height;
int c = (index / aligned_width / aligned_height) % channels;
int n = index / aligned_width / aligned_height / channels;
const scalar_t* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
scalar_t offset = aligned ? (scalar_t)0.5 : (scalar_t)0.0;
scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
scalar_t roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
scalar_t roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
// Force malformed ROIs to be 1x1
scalar_t roi_width = roi_end_w - roi_start_w;
scalar_t roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = max(roi_width, (scalar_t)1.);
roi_height = max(roi_height, (scalar_t)1.);
}
// scalar_t roi_width = fmaxf(roi_end_w - roi_start_w, 1.f);
// scalar_t roi_height = fmaxf(roi_end_h - roi_start_h, 1.f);
scalar_t bin_size_h = roi_height / aligned_height;
scalar_t bin_size_w = roi_width / aligned_width;
const scalar_t* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / aligned_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / aligned_width);
// We do average (integral) pooling inside a bin
const scalar_t count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
scalar_t output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const scalar_t y = roi_start_h + ph * bin_size_h +
(iy + .5f) * bin_size_h / roi_bin_grid_h; // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const scalar_t x = roi_start_w + pw * bin_size_w +
(ix + .5f) * bin_size_w / roi_bin_grid_w;
scalar_t val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
int ROIAlignAvgForwardLaucher(const bool aligned, Tensor bottom_data, const float spatial_scale, const int num_rois, const int height, const int width,
const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio,
Tensor bottom_rois, Tensor top_data) {
const int kThreadsPerBlock = 1024;
const int output_size = num_rois * aligned_height * aligned_width * channels;
cudaError_t err;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(bottom_data.scalar_type(), "roi_align_forward_cuda", ([&] {
ROIAlignAvgForward<scalar_t><<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>(
aligned, output_size,
bottom_data.data_ptr<scalar_t>(),
(scalar_t)spatial_scale, height, width,
channels, aligned_height, aligned_width, sampling_ratio,
bottom_rois.data_ptr<scalar_t>(),
top_data.data_ptr<scalar_t>() );
}));
err = cudaGetLastError();
if(cudaSuccess != err) {
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return 1;
}
template <typename scalar_t>
__device__ void bilinear_interpolate_gradient(const int height, const int width, scalar_t y, scalar_t x,
scalar_t& w1, scalar_t& w2, scalar_t& w3, scalar_t& w4,
int& x_low, int& x_high, int& y_low, int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename scalar_t>
__global__ void ROIAlignAvgBackward(const bool aligned, const int nthreads, const scalar_t* top_diff, const scalar_t spatial_scale, const int height, const int width,
const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio,
scalar_t* bottom_diff, const scalar_t* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % aligned_width;
int ph = (index / aligned_width) % aligned_height;
int c = (index / aligned_width / aligned_height) % channels;
int n = index / aligned_width / aligned_height / channels;
const scalar_t* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
scalar_t offset = aligned ? (scalar_t)0.5 : (scalar_t)0.0;
scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
scalar_t roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
scalar_t roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
// Force malformed ROIs to be 1x1
scalar_t roi_width = roi_end_w - roi_start_w;
scalar_t roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = fmaxf(roi_width, (scalar_t)1.);
roi_height = fmaxf(roi_height, (scalar_t)1.);
}
scalar_t bin_size_h = roi_height / aligned_height;
scalar_t bin_size_w = roi_width / aligned_width;
scalar_t* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * aligned_height * aligned_width;
const scalar_t* offset_top_diff = top_diff + top_offset;
const scalar_t top_diff_this_bin = offset_top_diff[ph * aligned_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / aligned_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / aligned_width);
// We do average (integral) pooling inside a bin
const scalar_t count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const scalar_t y = roi_start_h + ph * bin_size_h +
(iy + .5f) * bin_size_h / roi_bin_grid_h; // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const scalar_t x = roi_start_w + pw * bin_size_w +
(ix + .5f) * bin_size_w / roi_bin_grid_w;
scalar_t w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height, width, y, x, w1, w2, w3, w4,
x_low, x_high, y_low, y_high, index);
scalar_t g1 = top_diff_this_bin * w1 / count;
scalar_t g2 = top_diff_this_bin * w2 / count;
scalar_t g3 = top_diff_this_bin * w3 / count;
scalar_t g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(offset_bottom_diff + y_low * width + x_low, g1);
atomicAdd(offset_bottom_diff + y_low * width + x_high, g2);
atomicAdd(offset_bottom_diff + y_high * width + x_low, g3);
atomicAdd(offset_bottom_diff + y_high * width + x_high, g4);
// gpu_atomic_add<scalar_t>(g1, offset_bottom_diff + y_low * width + x_low);
// gpu_atomic_add<scalar_t>(g2, offset_bottom_diff + y_low * width + x_high);
// gpu_atomic_add<scalar_t>(g3, offset_bottom_diff + y_high * width + x_low);
// gpu_atomic_add<scalar_t>(g4, offset_bottom_diff + y_high * width + x_high);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
int ROIAlignAvgBackwardLaucher(const bool aligned, Tensor top_diff, const float spatial_scale, const int batch_size, const int num_rois, const int height, const int width,
const int channels, const int aligned_height, const int aligned_width, const int sampling_ratio,
Tensor bottom_rois, Tensor bottom_diff) {
const int kThreadsPerBlock = 1024;
const int output_size = num_rois * aligned_height * aligned_width * channels;
cudaError_t err;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(top_diff.scalar_type(), "roi_align_forward_cuda", ([&] {
ROIAlignAvgBackward<scalar_t><<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>(
aligned, output_size,
top_diff.data_ptr<scalar_t>(), (scalar_t)spatial_scale, height, width, channels,
aligned_height, aligned_width, sampling_ratio,
bottom_diff.data_ptr<scalar_t>(),
bottom_rois.data_ptr<scalar_t>()
);
}));
err = cudaGetLastError();
if(cudaSuccess != err) {
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return 1;
} | the_stack |
#include "raytracer.h"
#include "cudautils.h"
namespace RayTracer {
/* Implements the M-T ray-triangle intersection algorithm. */
static __device__ bool intersects(
const float3& vertex1,
const float3& vertex2,
const float3& vertex3,
const float3& startPos, const float3& endPos
) {
const float EPSILON = 1e-6f;
float3 diff = endPos - startPos;
float dist = len(diff);
float3 dir = diff / dist;
float3 edge1 = vertex2 - vertex1;
float3 edge2 = vertex3 - vertex1;
float3 pVec = cross(dir, edge2);
float det = dot(edge1, pVec);
if (det < EPSILON) {
return false;
}
float3 tVec = startPos - vertex1;
float u = dot(tVec, pVec);
if (u < 0.0f || u > det) {
return false;
}
float3 qVec = cross(tVec, edge1);
float v = dot(dir, qVec);
if (v < 0.0f || u + v > det) {
return false;
}
float t = dot(edge2, qVec) / det;
return (0.0f < t && t < dist);
}
/**
* Widens a triangle by a small margin, to better deal with ray-tracing
* edge cases.
*/
__global__ void map_widen_tris(Triangle* triangles, size_t numTris) {
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numTris) {
return;
}
const float EPSILON = 1e-3f;
Triangle& tri = triangles[index];
float3& vertex1 = tri.vertices[0];
float3& vertex2 = tri.vertices[1];
float3& vertex3 = tri.vertices[2];
float3 center = (vertex1 + vertex2 + vertex3) / 3.0f;
vertex1 += normalized(vertex1 - center) * EPSILON;
vertex2 += normalized(vertex2 - center) * EPSILON;
vertex3 += normalized(vertex3 - center) * EPSILON;
}
static const size_t MAX_DEPTH = 10;
__global__ void split_nodes(
Triangle* triangles,
KDNode* nodes,
size_t depth
) {
//printf("Split nodes (depth %u)...\n", static_cast<unsigned int>(depth));
KDNode& node = nodes[threadIdx.x];
if (depth <= 1 || node.numTris <= 3) {
//printf(
// "Found a leaf! (%u tris)\n",
// static_cast<unsigned int>(node.numTris)
//);
node.type = KDNodeType::LEAF;
return;
}
float3 nodeSize = node.tmax - node.tmin;
float3 rightTMin;
//printf(
// "(%u) Node size: (%f, %f, %f)\n",
// static_cast<unsigned int>(depth),
// nodeSize.x, nodeSize.y, nodeSize.z
//);
if (nodeSize.x > nodeSize.y && nodeSize.x > nodeSize.z
|| nodeSize.x == nodeSize.y && nodeSize.x > nodeSize.z
|| nodeSize.x > nodeSize.y && nodeSize.x == nodeSize.z
) {
/* Split along the x-axis. */
nodeSize.x *= 0.5;
node.axis = Axis::X;
node.pos = node.tmin.x + nodeSize.x;
rightTMin = node.tmin + make_float3(nodeSize.x, 0.0f, 0.0f);
//printf(
// "(%u) Split at x = %f\n",
// static_cast<unsigned int>(depth),
// node.pos
//);
}
else if (nodeSize.y > nodeSize.x && nodeSize.y > nodeSize.z
|| nodeSize.y == nodeSize.x && nodeSize.y > nodeSize.z
|| nodeSize.y > nodeSize.x && nodeSize.y == nodeSize.z
) {
/* Split along the y-axis. */
nodeSize.y *= 0.5;
node.axis = Axis::Y;
node.pos = node.tmin.y + nodeSize.y;
rightTMin = node.tmin + make_float3(0.0f, nodeSize.y, 0.0f);
//printf(
// "(%u) Split at y = %f\n",
// static_cast<unsigned int>(depth),
// node.pos
//);
}
else {
/* Split along the z-axis. */
nodeSize.z *= 0.5;
node.axis = Axis::Z;
node.pos = node.tmin.z + nodeSize.z;
rightTMin = node.tmin + make_float3(0.0f, 0.0f, nodeSize.z);
//printf(
// "(%u) Split at z = %f\n",
// static_cast<unsigned int>(depth),
// node.pos
//);
}
size_t* leftTriIDs;
CUDA_CHECK_ERROR_DEVICE(
cudaMalloc(&leftTriIDs, sizeof(size_t) * node.numTris)
);
size_t* rightTriIDs;
CUDA_CHECK_ERROR_DEVICE(
cudaMalloc(&rightTriIDs, sizeof(size_t) * node.numTris)
);
size_t numLeft = 0;
size_t numRight = 0;
for (size_t i=0; i<node.numTris; i++) {
size_t triangleID = node.triangleIDs[i];
Triangle& tri = triangles[triangleID];
bool onLeft = false;
bool onRight = false;
for (int vertex=0; vertex<3; vertex++) {
switch (node.axis) {
case Axis::X:
if (tri.vertices[vertex].x <= node.pos) {
onLeft = true;
}
if (tri.vertices[vertex].x >= node.pos) {
onRight = true;
}
break;
case Axis::Y:
if (tri.vertices[vertex].y <= node.pos) {
onLeft = true;
}
if (tri.vertices[vertex].y >= node.pos) {
onRight = true;
}
break;
case Axis::Z:
if (tri.vertices[vertex].z <= node.pos) {
onLeft = true;
}
if (tri.vertices[vertex].z >= node.pos) {
onRight = true;
}
break;
}
if (onLeft && onRight) {
break;
}
}
if (onLeft) {
leftTriIDs[numLeft++] = triangleID;
}
if (onRight) {
rightTriIDs[numRight++] = triangleID;
}
}
//printf("cudaFree %p\n", node.triangleIDs);
//cudaFree(node.triangleIDs);
CUDA_CHECK_ERROR_DEVICE(
cudaMalloc(&node.children, sizeof(KDNode) * 2)
);
node.children[0].type = KDNodeType::NODE;
node.children[0].tmin = node.tmin;
node.children[0].tmax = node.tmin + nodeSize;
node.children[0].triangleIDs = leftTriIDs;
node.children[0].numTris = numLeft;
node.children[1].type = KDNodeType::NODE;
node.children[1].tmin = rightTMin;
node.children[1].tmax = node.tmax;
node.children[1].triangleIDs = rightTriIDs;
node.children[1].numTris = numRight;
KERNEL_LAUNCH_DEVICE(
split_nodes, 1, 2,
triangles, node.children, depth - 1
);
}
__global__ void cleanup_nodes(KDNode* nodes) {
KDNode& node = nodes[threadIdx.x];
CUDA_CHECK_ERROR_DEVICE(cudaFree(node.triangleIDs));
if (node.type == KDNodeType::NODE) {
KDNode* children = node.children;
if (threadIdx.x == 0) {
CUDA_CHECK_ERROR_DEVICE(cudaFree(nodes));
}
KERNEL_LAUNCH_DEVICE(
cleanup_nodes, 1, 2,
children
);
}
}
/***********************
* CUDARayTracer Class *
***********************/
const size_t CUDARayTracer::MAX_LEAVES = 1024;
CUDARayTracer::CUDARayTracer() :
m_triangles(nullptr),
m_triangleIDs(nullptr),
m_numTriangles(0),
m_pTreeRoot(nullptr),
m_tmin(make_float3()),
m_tmax(make_float3()) {}
CUDARayTracer::~CUDARayTracer() {
if (m_triangles != nullptr) {
cudaFree(m_triangles);
}
if (m_pTreeRoot != nullptr) {
destroy_tree();
}
}
__host__ void CUDARayTracer::build_tree(void) {
KDNode root;
root.type = KDNodeType::NODE;
root.tmin = m_tmin;
root.tmax = m_tmax;
root.triangleIDs = m_triangleIDs;
root.numTris = m_numTriangles;
CUDA_CHECK_ERROR(cudaMalloc(&m_pTreeRoot, sizeof(KDNode)));
CUDA_CHECK_ERROR(
cudaMemcpy(
m_pTreeRoot, &root, sizeof(KDNode),
cudaMemcpyHostToDevice
)
);
KERNEL_LAUNCH(
split_nodes, 1, 1,
m_triangles, m_pTreeRoot, MAX_DEPTH
);
CUDA_CHECK_ERROR(cudaDeviceSynchronize());
}
__host__ void CUDARayTracer::destroy_tree(void) {
KDNode root;
CUDA_CHECK_ERROR(
cudaMemcpy(
&root, m_pTreeRoot, sizeof(KDNode),
cudaMemcpyDeviceToHost
)
);
CUDA_CHECK_ERROR(cudaFree(root.triangleIDs));
if (root.type == KDNodeType::NODE) {
KDNode* children = root.children;
CUDA_CHECK_ERROR(cudaFree(m_pTreeRoot));
KERNEL_LAUNCH(
cleanup_nodes, 1, 2,
children
);
CUDA_CHECK_ERROR(cudaDeviceSynchronize());
}
}
__host__ void CUDARayTracer::add_triangles(
const std::vector<Triangle>& tris
) {
// This method should only ever be called exactly once.
assert(m_triangles == nullptr);
assert(m_numTriangles == 0);
m_numTriangles = tris.size();
size_t trianglesSize = sizeof(Triangle) * m_numTriangles;
CUDA_CHECK_ERROR(cudaMalloc(&m_triangles, trianglesSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
m_triangles, tris.data(), trianglesSize,
cudaMemcpyHostToDevice
)
);
/*
* Widen each triangle by a small margin, to better deal with ray-
* tracing edge cases.
*/
const size_t BLOCK_WIDTH = 1024;
size_t numBlocks = div_ceil(m_numTriangles, BLOCK_WIDTH);
KERNEL_LAUNCH(
map_widen_tris,
numBlocks, BLOCK_WIDTH,
m_triangles, m_numTriangles
);
CUDA_CHECK_ERROR(cudaDeviceSynchronize());
std::vector<size_t> triangleIDs;
for (size_t i=0; i<m_numTriangles; i++) {
triangleIDs.push_back(i);
}
size_t triangleIDsSize = sizeof(size_t) * m_numTriangles;
CUDA_CHECK_ERROR(cudaMalloc(&m_triangleIDs, triangleIDsSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
m_triangleIDs, triangleIDs.data(),
triangleIDsSize,
cudaMemcpyHostToDevice
)
);
//std::cout << "cudaMalloc: " << m_triangleIDs << std::endl;
/* Figure out the bounding box of the KD Tree. */
for (const Triangle& tri : tris) {
for (int vertex=0; vertex<3; vertex++) {
m_tmin.x = std::min(m_tmin.x, tri.vertices[vertex].x);
m_tmin.y = std::min(m_tmin.y, tri.vertices[vertex].y);
m_tmin.z = std::min(m_tmin.z, tri.vertices[vertex].z);
m_tmax.x = std::max(m_tmax.x, tri.vertices[vertex].x);
m_tmax.y = std::max(m_tmax.y, tri.vertices[vertex].y);
m_tmax.z = std::max(m_tmax.z, tri.vertices[vertex].z);
}
}
build_tree();
}
//__device__ Triangle* CUDARayTracer::get_triangles(void) {
// return m_triangles;
//}
//__device__ size_t* CUDARayTracer::get_tri_ids(void) {
// return m_triangleIDs;
//}
//__device__ Triangle& CUDARayTracer::get_tri_indirect(size_t i) {
// return m_triangles[m_triangleIDs[i]];
//}
__device__ bool CUDARayTracer::LOS_blocked(
const float3& startPos, const float3& endPos
) {
const float EPSILON = 1e-6f;
float3 dir = normalized(endPos - startPos);
float3 invDir = make_float3(
1.0f / (dir.x + ((dir.x < 0) ? -EPSILON : EPSILON)),
1.0f / (dir.y + ((dir.y < 0) ? -EPSILON : EPSILON)),
1.0f / (dir.z + ((dir.z < 0) ? -EPSILON : EPSILON))
);
struct StackEntry {
KDNode* pNode;
float3 start;
float3 end;
};
StackEntry stack[1024]; // empty ascending stack
size_t stackSize = 0;
stack[stackSize++] = {
m_pTreeRoot,
startPos,
endPos,
};
while (stackSize > 0) {
if (stackSize >= 1024) {
printf("ALERT: Raytracer stack size too big!!!\n");
return false;
}
StackEntry& entry = stack[--stackSize];
KDNode* pNode = entry.pNode;
float3 start = entry.start;
float3 end = entry.end;
float len = dist(start, end);
KDNode* children = pNode->children;
float t;
switch (pNode->type) {
case KDNodeType::LEAF:
for (size_t ti=0; ti<pNode->numTris; ti++) {
Triangle& tri = m_triangles[pNode->triangleIDs[ti]];
// The M-T intersection algorithm uses CCW vertex
// winding, but Source uses CW winding. So, we need to
// pass the vertices in reverse order to get backface
// culling to work correctly.
bool isLOSBlocked = intersects(
tri.vertices[2], tri.vertices[1], tri.vertices[0],
startPos, endPos
);
if (isLOSBlocked) {
return true;
}
}
break;
case KDNodeType::NODE:
bool dirPositive;
switch (pNode->axis) {
case Axis::X:
t = (pNode->pos - start.x) * invDir.x;
dirPositive = dir.x >= 0.0f;
break;
case Axis::Y:
t = (pNode->pos - start.y) * invDir.y;
dirPositive = dir.y >= 0.0f;
break;
case Axis::Z:
t = (pNode->pos - start.z) * invDir.z;
dirPositive = dir.z >= 0.0f;
break;
}
if (t < 0.0) {
// Plane is "behind" the line start.
// Recurse on the right side if dir is positive.
// Recurse on the left side if dir is negative.
stack[stackSize++] = {
&children[dirPositive ? 1 : 0],
start,
end,
};
}
else if (t >= len) {
// Plane is "ahead" of the line end.
// Recurse on the left side if dir is positive.
// Recurse on the right side if dir is negative.
stack[stackSize++] = {
&children[dirPositive ? 0 : 1],
start,
end
};
}
else {
// The line segment straddles the plane.
// Clip the line and recurse on both sides.
float3 clipPoint = start + t * dir;
stack[stackSize++] = {
&children[dirPositive ? 0 : 1],
start,
clipPoint,
};
if (stackSize >= 1024) {
printf("ALERT: Stack size too big!!!\n");
return false;
}
stack[stackSize++] = {
&children[dirPositive ? 1 : 0],
clipPoint,
end,
};
}
break;
}
}
return false;
}
} | the_stack |
#include "caffe/layer.hpp"
#include "caffe/layers/l1_loss_layer.hpp"
#include "caffe/solver.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
//#include "caffe/layers/custom_data_layer.hpp"
//#include "../../../../../../../usr/include/c++/4.9/iosfwd"
using namespace std;
namespace caffe {
template<typename Dtype>
void visualize(const Dtype *data, int width, int height, string img_key) {
// cv::Mat img1(cv::Size(width, height), CV_8UC3), img2(cv::Size(width, height), CV_8UC3);
cv::Mat flow_x(cv::Size(width, height), CV_8UC1), flow_y(cv::Size(width, height), CV_8UC1);
//flow_x
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++) {
unsigned char &grey = flow_x.at<uchar>(y, x);
grey = (unsigned char) (data[y * width + x] * 20);
}
//flow_y
int flow_y_offset = height * width;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++) {
// if(flow_y_offset + y * width + x % 100 ==0)
// LOG(INFO) <<data[flow_y_offset + y * width + x];
unsigned char &grey = flow_y.at<uchar>(y, x);
grey = (unsigned char) (data[flow_y_offset + y * width + x] * 20);
}
cv::imwrite("visualize/" + img_key + "_l1_flow_x.jpg", flow_x);
cv::imwrite("visualize/" + img_key + "_l1_flow_y.jpg", flow_y);
LOG(INFO) << "Img:" << img_key << " wrote.";
}
template<typename Dtype>
void visualize_flownet(const Dtype *data, int width, int height, string img_key) {
cv::Mat flow_x(cv::Size(width, height), CV_8UC1), flow_y(cv::Size(width, height), CV_8UC1);
#define CAST(v, L, H) ((v) > (H) ? 255 : (v) < (L) ? 0 : cvRound(255 * ((v) - (L)) / ((H)-(L))))
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++) {
unsigned char &grey = flow_x.at<uchar>(y, x);
grey = (unsigned char) CAST(data[y * width + x], -40, 40);
}
int flow_y_offset = height * width;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++) {
// if(flow_y_offset + y * width + x % 100 ==0)
// LOG(INFO) <<data[flow_y_offset + y * width + x];
unsigned char &grey = flow_y.at<uchar>(y, x);
grey = (unsigned char) CAST(data[flow_y_offset + y * width + x], -40, 40);
}
#undef CAST
cv::imwrite("visualize/" + img_key + "_l1_flow_x.jpg", flow_x);
cv::imwrite("visualize/" + img_key + "_l1_flow_y.jpg", flow_y);
LOG(INFO) << "Img:" << img_key << " wrote.";
}
template<typename Dtype>
__global__ void ComputeSign(const int n, const Dtype *in, Dtype *out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? Dtype(1) : Dtype(-1);
}
}
// TODO maybe change the way of detecting NaNs
template<typename Dtype>
__global__ void FindNotNaNs(const int n, const Dtype *in, Dtype *out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] == in[index] ? Dtype(1) : Dtype(0);
}
}
template<typename Dtype>
__global__ void KillNaNs(const int n, const Dtype *in, Dtype *out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] == in[index] ? in[index] : Dtype(0);
}
}
template<typename Dtype>
__global__ void KillMasked(const int n, const Dtype *in, Dtype *out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > Dtype(0.5) ? out[index] : Dtype(0);
// out[index] = out[index]==out[index] ? out[index] : Dtype(0);
// out[index] = out[index]>1e3 ? 0 : out[index];
// out[index] = out[index]<-1e3 ? 0 : out[index];
}
}
template<typename Dtype>
__global__ void KillMaskedAcrossChannels(const int n, const int width_height, const Dtype *in, Dtype *out) {
CUDA_KERNEL_LOOP(index, n) {
const int mask_idx = index % width_height;
out[index] = in[mask_idx] > Dtype(0.5) ? out[index] : Dtype(0);
}
}
template<typename Dtype>
__global__ void MaskPlateauValues(const int n, const Dtype *in, Dtype *out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
if (fabs(in[index]) < plateau) out[index] = Dtype(0); // Mask out plateau values and keep other as is
}
}
template<typename Dtype>
__global__ void MaskPlateauValuesInitial(const int n, const Dtype *in, Dtype *out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = (fabs(in[index]) < plateau) ? Dtype(0) : Dtype(1);
}
}
template<typename Dtype>
void L1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
///*TODO:Uncomment this to visualize
for(int n = 0; n < bottom[1]->num(); n++){
std::stringstream ss, ss_pred;
int id = int(rand() % 10000);
ss << bottom[1]->shape(3)<< "_" <<bottom[1]->shape(2)<< n << "_gt_tsn_" << id;
ss_pred << bottom[0]->shape(3)<< "_" <<bottom[0]->shape(2)<< n << "_pred_tsn_"<< id;
visualize(bottom[1]->cpu_data(), bottom[1]->shape(3), bottom[1]->shape(2), ss.str());
visualize_flownet(bottom[0]->cpu_data(), bottom[0]->shape(3), bottom[0]->shape(2), ss_pred.str());
LOG(INFO) << "LABEL SPAPE: " << bottom[1]->shape(0)<< " " << bottom[1]->shape(1)
<< " "<<bottom[1]->shape(2)<<" "<< bottom[1]->shape(3);
}
//*/
// int counter = 0;
// for (int i=0; i<bottom[0]->count(); i++){
//
//// LOG(INFO) << "Data: " << bottom[0]->cpu_data()[i];
//// LOG(INFO) << "Label: " << bottom[1]->cpu_data()[i];
//// LOG(INFO) << "Loss: " << bottom[0]->cpu_data()[i] - bottom[1]->cpu_data()[i];
// if ( bottom[1]->cpu_data()[i] == 0) counter++;
//
//}
//LOG(INFO)<< counter << " zeros in label";
Blob<Dtype> *diffptr = diff_top_vec_[0];
Dtype dot, loss;
if (bottom.size() > 1) {
diff_layer_->Forward(bottom, diff_top_vec_);
}
// if necessary, compute the number of not-NaNs
int count = bottom[0]->count();
int num = bottom[0]->num();
FindNotNaNs<Dtype> << < CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, diffptr->gpu_data(), mask_.mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
// for (int i = 0; i < mask_.count(); i++){
// if(i%100==0) LOG(INFO)<<"mask: "<< mask_.cpu_data()[i];
// }
if (this->layer_param_.l1_loss_param().normalize_by_num_entries()) {
caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_);
normalize_coeff_ /= mask_.channels();
} else {
normalize_coeff_ = num;
}
// LOG(INFO)<<"normalize_coeff_"<<normalize_coeff_;
if (this->layer_param_.l1_loss_param().l2_per_location()) {
// set masked (NaNs only) to zero
KillMasked<Dtype> << < CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
square_layer_->Forward(diff_top_vec_, square_top_vec_);
sum_layer_->Forward(square_top_vec_, sum_top_vec_);
// Mask plateau in summed blob (only one channel):
if (this->layer_param_.l1_loss_param().plateau() > 0) {
float plateau_val_squared =
this->layer_param_.l1_loss_param().plateau() * this->layer_param_.l1_loss_param().plateau();
MaskPlateauValuesInitial<Dtype> << < CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS >> >
(
sum_output_.count(), sum_output_.gpu_data(), plateau_l2_.mutable_gpu_data(), plateau_val_squared);
// LOG(INFO)<<"plateau_val_squared: "<<plateau_val_squared;
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
KillMasked<Dtype> << < CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS >> > (
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sqrt_layer_->Forward(sum_top_vec_, sqrt_top_vec_);
// Note sign_ is set to all ones in Reshape
caffe_gpu_dot(sqrt_output_.count(), sqrt_output_.gpu_data(), sign_.gpu_data(), &dot);
} else {
// Mask plateau:
if (this->layer_param_.l1_loss_param().plateau() > 0) {
MaskPlateauValues<Dtype> << < CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, diffptr->gpu_data(), mask_.mutable_gpu_data(), this->layer_param_.l1_loss_param().plateau());
CUDA_POST_KERNEL_CHECK;
}
//mask_.print("MASK2");
// set masked (NaNs, plateau) to zero
KillMasked<Dtype> << < CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
ComputeSign<Dtype> << < CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, diffptr->gpu_data(), sign_.mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
caffe_gpu_dot(count, diffptr->gpu_data(), sign_.gpu_data(), &dot);
}
// for(int i = 0; i < diffptr->count(); i++){
// LOG(INFO) <<"L1 diff "<< i << "/" << diffptr->count() - 1 << " : " << diffptr->cpu_data()[i];
// }
// LOG(INFO)<<"dot: "<<dot<<" "<<normalize_coeff_;
loss = dot / normalize_coeff_;
top[0]->mutable_cpu_data()[0] = loss;
}
template<typename Dtype>
void L1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top,
const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) {
bool prop_down = propagate_down[0];
if (bottom.size() > 1) prop_down |= propagate_down[1];
Blob<Dtype> *diffptr = diff_top_vec_[0];
if (prop_down) {
const Dtype alpha = top[0]->cpu_diff()[0] / normalize_coeff_;
if (this->layer_param_.l1_loss_param().l2_per_location()) {
vector<bool> prop_down(1, true);
caffe_gpu_axpby(sqrt_output_.count(), alpha, sign_.gpu_data(),
Dtype(0), sqrt_output_.mutable_gpu_diff());
sqrt_layer_->Backward(sqrt_top_vec_, prop_down, sum_top_vec_);
if (this->layer_param_.l1_loss_param().plateau() > 0) {
KillMasked<Dtype> << < CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS >> > (
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_diff());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sum_layer_->Backward(sum_top_vec_, prop_down, square_top_vec_);
square_layer_->Backward(square_top_vec_, prop_down, diff_top_vec_);
} else {
caffe_gpu_axpby(diffptr->count(), alpha, sign_.gpu_data(),
Dtype(0), diffptr->mutable_gpu_diff());
}
KillMasked<Dtype> << < CAFFE_GET_BLOCKS(diffptr->count()), CAFFE_CUDA_NUM_THREADS >> > (
diffptr->count(), mask_.gpu_data(), diffptr->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (bottom.size() > 1) {
diff_layer_->Backward(diff_top_vec_, propagate_down, bottom);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(L1LossLayer);
} // namespace caffe | the_stack |
#include <cusp/array1d.h>
#include <cusp/array2d.h>
#include <cusp/complex.h>
#include <cusp/blas/blas.h>
template <class MemorySpace>
void TestAmax(void)
{
typedef typename cusp::array1d<float, MemorySpace> Array;
typedef typename cusp::array1d<float, MemorySpace>::view View;
Array x(6);
View view_x(x);
x[0] = 0.0f;
x[1] = -5.0f;
x[2] = 4.0f;
x[3] = -3.0f;
x[4] = 7.0f;
x[5] = 1.0f;
ASSERT_EQUAL(cusp::blas::amax(x), 4);
ASSERT_EQUAL(cusp::blas::amax(view_x), 4);
}
DECLARE_HOST_DEVICE_UNITTEST(TestAmax)
template <class MemorySpace>
void TestComplexAmax(void)
{
typedef cusp::complex<float> ValueType;
typedef typename cusp::array1d<ValueType, MemorySpace> Array;
typedef typename cusp::array1d<ValueType, MemorySpace>::view View;
Array x(6);
View view_x(x);
x[0] = ValueType( 7.0, 1.0);
x[1] = ValueType(-5.0, 0.0);
x[2] = ValueType( 4.0, -3.0);
x[3] = ValueType(-3.0, 4.0);
x[4] = ValueType( 0.0, -5.0);
x[5] = ValueType( 1.0, 7.0);
ASSERT_EQUAL(cusp::blas::amax(x), 0);
ASSERT_EQUAL(cusp::blas::amax(view_x), 0);
}
DECLARE_HOST_DEVICE_UNITTEST(TestComplexAmax)
template <class MemorySpace>
void TestAxpy(void)
{
typedef typename cusp::array1d<float, MemorySpace> Array;
typedef typename cusp::array1d<float, MemorySpace>::view View;
Array x(4);
Array y(4);
x[0] = 7.0f;
y[0] = 0.0f;
x[1] = 5.0f;
y[1] = -2.0f;
x[2] = 4.0f;
y[2] = 0.0f;
x[3] = -3.0f;
y[3] = 5.0f;
cusp::blas::axpy(x, y, 2.0f);
ASSERT_EQUAL(y[0], 14.0);
ASSERT_EQUAL(y[1], 8.0);
ASSERT_EQUAL(y[2], 8.0);
ASSERT_EQUAL(y[3], -1.0);
View view_x(x);
View view_y(y);
cusp::blas::axpy(view_x, view_y, 2.0f);
ASSERT_EQUAL(y[0], 28.0);
ASSERT_EQUAL(y[1], 18.0);
ASSERT_EQUAL(y[2], 16.0);
ASSERT_EQUAL(y[3], -7.0);
// test size checking
Array w(3);
ASSERT_THROWS(cusp::blas::axpy(x, w, 1.0f), cusp::invalid_input_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestAxpy)
template <class MemorySpace>
void TestAxpby(void)
{
typedef typename cusp::array1d<float, MemorySpace> Array;
typedef typename cusp::array1d<float, MemorySpace>::view View;
Array x(4);
Array y(4);
Array z(4,0);
x[0] = 7.0f;
y[0] = 0.0f;
x[1] = 5.0f;
y[1] = -2.0f;
x[2] = 4.0f;
y[2] = 0.0f;
x[3] = -3.0f;
y[3] = 5.0f;
cusp::blas::axpby(x, y, z, 2.0f, 1.0f);
ASSERT_EQUAL(z[0], 14.0);
ASSERT_EQUAL(z[1], 8.0);
ASSERT_EQUAL(z[2], 8.0);
ASSERT_EQUAL(z[3], -1.0);
z[0] = 0.0f;
z[1] = 0.0f;
z[2] = 0.0f;
z[3] = 0.0f;
View view_x(x);
View view_y(y);
View view_z(z);
cusp::blas::axpby(view_x, view_y, view_z, 2.0f, 1.0f);
ASSERT_EQUAL(z[0], 14.0);
ASSERT_EQUAL(z[1], 8.0);
ASSERT_EQUAL(z[2], 8.0);
ASSERT_EQUAL(z[3], -1.0);
// test size checking
Array w(3);
ASSERT_THROWS(cusp::blas::axpby(x, y, w, 2.0f, 1.0f), cusp::invalid_input_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestAxpby)
template <class MemorySpace>
void TestAxpbypcz(void)
{
typedef typename cusp::array1d<float, MemorySpace> Array;
typedef typename cusp::array1d<float, MemorySpace>::view View;
Array x(4);
Array y(4);
Array z(4);
Array w(4,0);
x[0] = 7.0f;
y[0] = 0.0f;
z[0] = 1.0f;
x[1] = 5.0f;
y[1] = -2.0f;
z[1] = 0.0f;
x[2] = 4.0f;
y[2] = 0.0f;
z[2] = 3.0f;
x[3] = -3.0f;
y[3] = 5.0f;
z[3] = -2.0f;
cusp::blas::axpbypcz(x, y, z, w, 2.0f, 1.0f, 3.0f);
ASSERT_EQUAL(w[0], 17.0);
ASSERT_EQUAL(w[1], 8.0);
ASSERT_EQUAL(w[2], 17.0);
ASSERT_EQUAL(w[3], -7.0);
w[0] = 0.0f;
w[1] = 0.0f;
w[2] = 0.0f;
w[3] = 0.0f;
View view_x(x);
View view_y(y);
View view_z(z);
View view_w(w);
cusp::blas::axpbypcz(view_x, view_y, view_z, view_w, 2.0f, 1.0f, 3.0f);
ASSERT_EQUAL(w[0], 17.0);
ASSERT_EQUAL(w[1], 8.0);
ASSERT_EQUAL(w[2], 17.0);
ASSERT_EQUAL(w[3], -7.0);
// test size checking
Array output(3);
ASSERT_THROWS(cusp::blas::axpbypcz(x, y, z, output, 2.0f, 1.0f, 3.0f), cusp::invalid_input_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestAxpbypcz)
template <class MemorySpace>
void TestXmy(void)
{
typedef typename cusp::array1d<float, MemorySpace> Array;
typedef typename cusp::array1d<float, MemorySpace>::view View;
Array x(4);
Array y(4);
Array z(4,0);
x[0] = 7.0f;
y[0] = 0.0f;
x[1] = 5.0f;
y[1] = -2.0f;
x[2] = 4.0f;
y[2] = 0.0f;
x[3] = -3.0f;
y[3] = 5.0f;
cusp::blas::xmy(x, y, z);
ASSERT_EQUAL(z[0], 0.0f);
ASSERT_EQUAL(z[1], -10.0f);
ASSERT_EQUAL(z[2], 0.0f);
ASSERT_EQUAL(z[3], -15.0f);
z[0] = 0.0f;
z[1] = 0.0f;
z[2] = 0.0f;
z[3] = 0.0f;
View view_x(x);
View view_y(y);
View view_z(z);
cusp::blas::xmy(view_x, view_y, view_z);
ASSERT_EQUAL(z[0], 0.0f);
ASSERT_EQUAL(z[1], -10.0f);
ASSERT_EQUAL(z[2], 0.0f);
ASSERT_EQUAL(z[3], -15.0f);
// test size checking
Array output(3);
ASSERT_THROWS(cusp::blas::xmy(x, y, output), cusp::invalid_input_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestXmy)
template <class MemorySpace>
void TestCopy(void)
{
typedef typename cusp::array1d<float, MemorySpace> Array;
typedef typename cusp::array1d<float, MemorySpace>::view View;
Array x(4);
View view_x(x);
x[0] = 7.0f;
x[1] = 5.0f;
x[2] = 4.0f;
x[3] = -3.0f;
{
Array y(4, -1);
cusp::blas::copy(x, y);
ASSERT_EQUAL(x, y);
}
{
Array y(4, -1);
View view_y(y);
cusp::blas::copy(view_x, view_y);
ASSERT_EQUAL(x, y);
}
// test size checking
cusp::array1d<float, MemorySpace> w(3);
ASSERT_THROWS(cusp::blas::copy(w, x), cusp::invalid_input_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestCopy)
template <class MemorySpace>
void TestDot(void)
{
typedef typename cusp::array1d<float, MemorySpace> Array;
typedef typename cusp::array1d<float, MemorySpace>::view View;
Array x(6);
Array y(6);
x[0] = 7.0f;
y[0] = 0.0f;
x[1] = 5.0f;
y[1] = -2.0f;
x[2] = 4.0f;
y[2] = 0.0f;
x[3] = -3.0f;
y[3] = 5.0f;
x[4] = 0.0f;
y[4] = 6.0f;
x[5] = 4.0f;
y[5] = 1.0f;
ASSERT_EQUAL(cusp::blas::dot(x, y), -21.0f);
ASSERT_EQUAL(cusp::blas::dot(View(x), View(y)), -21.0f);
// test size checking
cusp::array1d<float, MemorySpace> w(3);
ASSERT_THROWS(cusp::blas::dot(x, w), cusp::invalid_input_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestDot)
template <class MemorySpace>
void TestDotc(void)
{
typedef typename cusp::array1d<cusp::complex<float>, MemorySpace> Array;
typedef typename cusp::array1d<cusp::complex<float>, MemorySpace>::view View;
Array x(6);
Array y(6);
x[0] = cusp::complex<float>( 7.0f, 0.0f);
y[0] = cusp::complex<float>( 0.0f, 0.0f);
x[1] = cusp::complex<float>( 5.0f, 0.0f);
y[1] = cusp::complex<float>(-2.0f, 0.0f);
x[2] = cusp::complex<float>( 4.0f, 0.0f);
y[2] = cusp::complex<float>( 0.0f, 0.0f);
x[3] = cusp::complex<float>(-3.0f, 0.0f);
y[3] = cusp::complex<float>( 5.0f, 0.0f);
x[4] = cusp::complex<float>( 0.0f, 0.0f);
y[4] = cusp::complex<float>( 6.0f, 0.0f);
x[5] = cusp::complex<float>( 4.0f, 0.0f);
y[5] = cusp::complex<float>( 1.0f, 0.0f);
ASSERT_EQUAL(cusp::blas::dotc(x, y), -21.0f);
ASSERT_EQUAL(cusp::blas::dotc(View(x), View(y)), -21.0f);
// test size checking
Array w(3);
ASSERT_THROWS(cusp::blas::dotc(x, w), cusp::invalid_input_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestDotc)
template <class MemorySpace>
void TestFill(void)
{
typedef typename cusp::array1d<float, MemorySpace> Array;
typedef typename cusp::array1d<float, MemorySpace>::view View;
Array x(4);
View view_x(x);
x[0] = 7.0f;
x[1] = 5.0f;
x[2] = 4.0f;
x[3] = -3.0f;
cusp::blas::fill(x, 2.0f);
ASSERT_EQUAL(x[0], 2.0);
ASSERT_EQUAL(x[1], 2.0);
ASSERT_EQUAL(x[2], 2.0);
ASSERT_EQUAL(x[3], 2.0);
cusp::blas::fill(view_x, 1.0f);
ASSERT_EQUAL(x[0], 1.0);
ASSERT_EQUAL(x[1], 1.0);
ASSERT_EQUAL(x[2], 1.0);
ASSERT_EQUAL(x[3], 1.0);
}
DECLARE_HOST_DEVICE_UNITTEST(TestFill)
template <class MemorySpace>
void TestNrm1(void)
{
typedef typename cusp::array1d<float, MemorySpace> Array;
typedef typename cusp::array1d<float, MemorySpace>::view View;
Array x(6);
View view_x(x);
x[0] = 7.0f;
x[1] = 5.0f;
x[2] = 4.0f;
x[3] = -3.0f;
x[4] = 0.0f;
x[5] = 1.0f;
ASSERT_EQUAL(cusp::blas::nrm1(x), 20.0f);
ASSERT_EQUAL(cusp::blas::nrm1(view_x), 20.0f);
}
DECLARE_HOST_DEVICE_UNITTEST(TestNrm1)
template <class MemorySpace>
void TestComplexNrm1(void)
{
typedef typename cusp::array1d<cusp::complex<float>, MemorySpace> Array;
typedef typename cusp::array1d<cusp::complex<float>, MemorySpace>::view View;
Array x(6);
View view_x(x);
x[0] = 7.0f;
x[1] = 5.0f;
x[2] = 4.0f;
x[3] = -3.0f;
x[4] = 0.0f;
x[5] = 1.0f;
ASSERT_EQUAL(cusp::blas::nrm1(x), 20.0f);
ASSERT_EQUAL(cusp::blas::nrm1(view_x), 20.0f);
}
DECLARE_HOST_DEVICE_UNITTEST(TestComplexNrm1)
template <class MemorySpace>
void TestNrm2(void)
{
typedef typename cusp::array1d<float, MemorySpace> Array;
typedef typename cusp::array1d<float, MemorySpace>::view View;
Array x(6);
View view_x(x);
x[0] = 7.0f;
x[1] = 5.0f;
x[2] = 4.0f;
x[3] = -3.0f;
x[4] = 0.0f;
x[5] = 1.0f;
ASSERT_EQUAL(cusp::blas::nrm2(x), 10.0f);
ASSERT_EQUAL(cusp::blas::nrm2(view_x), 10.0f);
}
DECLARE_HOST_DEVICE_UNITTEST(TestNrm2)
template <class MemorySpace>
void TestComplexNrm2(void)
{
typedef typename cusp::array1d<cusp::complex<float>, MemorySpace> Array;
typedef typename cusp::array1d<cusp::complex<float>, MemorySpace>::view View;
Array x(6);
View view_x(x);
x[0] = 7.0f;
x[1] = 5.0f;
x[2] = 4.0f;
x[3] = -3.0f;
x[4] = 0.0f;
x[5] = 1.0f;
ASSERT_EQUAL(cusp::blas::nrm2(x), 10.0f);
ASSERT_EQUAL(cusp::blas::nrm2(view_x), 10.0f);
}
DECLARE_HOST_DEVICE_UNITTEST(TestComplexNrm2)
template <class MemorySpace>
void TestNrmmax(void)
{
typedef typename cusp::array1d<float, MemorySpace> Array;
typedef typename cusp::array1d<float, MemorySpace>::view View;
Array x(6);
View view_x(x);
x[0] = 0.0f;
x[1] = -5.0f;
x[2] = 4.0f;
x[3] = -3.0f;
x[4] = 7.0f;
x[5] = 1.0f;
ASSERT_EQUAL(cusp::blas::nrmmax(x), 7.0f);
ASSERT_EQUAL(cusp::blas::nrmmax(view_x), 7.0f);
}
DECLARE_HOST_DEVICE_UNITTEST(TestNrmmax)
template <class MemorySpace>
void TestComplexNrmmax(void)
{
typedef typename cusp::array1d<cusp::complex<float>, MemorySpace> Array;
typedef typename cusp::array1d<cusp::complex<float>, MemorySpace>::view View;
Array x(6);
View view_x(x);
x[0] = 7.0f;
x[1] = -5.0f;
x[2] = 4.0f;
x[3] = -3.0f;
x[4] = 0.0f;
x[5] = 1.0f;
ASSERT_EQUAL(cusp::blas::nrmmax(x), 7.0f);
ASSERT_EQUAL(cusp::blas::nrmmax(view_x), 7.0f);
}
DECLARE_HOST_DEVICE_UNITTEST(TestComplexNrmmax)
template <class MemorySpace>
void TestScal(void)
{
typedef typename cusp::array1d<float, MemorySpace> Array1d;
typedef typename cusp::array1d<float, MemorySpace>::view View1d;
typedef typename cusp::array2d<float, MemorySpace> Array2d;
typedef typename cusp::array2d<float, MemorySpace>::view View2d;
Array1d x(6);
View1d view_x(x);
x[0] = 7.0f;
x[1] = 5.0f;
x[2] = 4.0f;
x[3] = -3.0f;
x[4] = 0.0f;
x[5] = 4.0f;
cusp::blas::scal(x, 4.0f);
ASSERT_EQUAL(x[0], 28.0);
ASSERT_EQUAL(x[1], 20.0);
ASSERT_EQUAL(x[2], 16.0);
ASSERT_EQUAL(x[3], -12.0);
ASSERT_EQUAL(x[4], 0.0);
ASSERT_EQUAL(x[5], 16.0);
cusp::blas::scal(view_x, 2.0f);
ASSERT_EQUAL(x[0], 56.0);
ASSERT_EQUAL(x[1], 40.0);
ASSERT_EQUAL(x[2], 32.0);
ASSERT_EQUAL(x[3], -24.0);
ASSERT_EQUAL(x[4], 0.0);
ASSERT_EQUAL(x[5], 32.0);
Array2d X(6,1);
View2d view_X(X);
X(0,0) = 7.0f;
X(1,0) = 5.0f;
X(2,0) = 4.0f;
X(3,0) = -3.0f;
X(4,0) = 0.0f;
X(5,0) = 4.0f;
cusp::blas::scal(X.column(0), 4.0f);
ASSERT_EQUAL(X.column(0)[0], 28.0);
ASSERT_EQUAL(X.column(0)[1], 20.0);
ASSERT_EQUAL(X.column(0)[2], 16.0);
ASSERT_EQUAL(X.column(0)[3], -12.0);
ASSERT_EQUAL(X.column(0)[4], 0.0);
ASSERT_EQUAL(X.column(0)[5], 16.0);
cusp::blas::scal(view_X.column(0), 2.0f);
ASSERT_EQUAL(X.column(0)[0], 56.0);
ASSERT_EQUAL(X.column(0)[1], 40.0);
ASSERT_EQUAL(X.column(0)[2], 32.0);
ASSERT_EQUAL(X.column(0)[3], -24.0);
ASSERT_EQUAL(X.column(0)[4], 0.0);
ASSERT_EQUAL(X.column(0)[5], 32.0);
}
DECLARE_HOST_DEVICE_UNITTEST(TestScal)
template <class MemorySpace>
void TestGemv(void)
{
typedef cusp::array2d<float, MemorySpace> Array2d;
typedef cusp::array1d<float, MemorySpace> Array1d;
Array2d A(6,6);
Array1d x(6);
ASSERT_THROWS(cusp::blas::gemv(A, x, x), cusp::not_implemented_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestGemv)
template <class MemorySpace>
void TestGer(void)
{
typedef typename cusp::array2d<float, MemorySpace> Array2d;
typedef typename cusp::array1d<float, MemorySpace> Array1d;
Array2d A(6,6);
Array1d x(6);
ASSERT_THROWS(cusp::blas::ger(x, x, A), cusp::not_implemented_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestGer)
template <class MemorySpace>
void TestSymv(void)
{
typedef typename cusp::array2d<float, MemorySpace> Array2d;
typedef typename cusp::array1d<float, MemorySpace> Array1d;
Array2d A(6,6);
Array1d x(6);
ASSERT_THROWS(cusp::blas::symv(A, x, x), cusp::not_implemented_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestSymv)
template <class MemorySpace>
void TestSyr(void)
{
typedef typename cusp::array2d<float, MemorySpace> Array2d;
typedef typename cusp::array1d<float, MemorySpace> Array1d;
Array2d A(6,6);
Array1d x(6);
ASSERT_THROWS(cusp::blas::syr(x, A), cusp::not_implemented_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestSyr)
template <class MemorySpace>
void TestTrmv(void)
{
typedef typename cusp::array2d<float, MemorySpace> Array2d;
typedef typename cusp::array1d<float, MemorySpace> Array1d;
Array2d A(6,6);
Array1d x(6);
ASSERT_THROWS(cusp::blas::trmv(A, x), cusp::not_implemented_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestTrmv)
template <class MemorySpace>
void TestTrsv(void)
{
typedef typename cusp::array2d<float, MemorySpace> Array2d;
typedef typename cusp::array1d<float, MemorySpace> Array1d;
Array2d A(6,6);
Array1d x(6);
ASSERT_THROWS(cusp::blas::trsv(A, x), cusp::not_implemented_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestTrsv)
template <class MemorySpace>
void TestGemm(void)
{
typedef typename cusp::array2d<float, MemorySpace> Array;
Array A(6,6,1);
Array B(6,6,0);
cusp::blas::gemm(A, A, B);
Array C(6,6,6);
ASSERT_EQUAL(B.values, C.values);
}
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
template<>
void TestGemm<cusp::system::cuda::detail::par_t>(void)
{
typedef typename cusp::array2d<float, cusp::device_memory> Array;
Array A(6,6);
ASSERT_THROWS(cusp::blas::gemm(A, A, A), cusp::not_implemented_exception);
}
#endif
DECLARE_HOST_DEVICE_UNITTEST(TestGemm)
template <class MemorySpace>
void TestSymm(void)
{
typedef typename cusp::array2d<float, MemorySpace> Array;
Array A(6,6);
ASSERT_THROWS(cusp::blas::symm(A, A, A), cusp::not_implemented_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestSymm)
template <class MemorySpace>
void TestSyrk(void)
{
typedef typename cusp::array2d<float, MemorySpace> Array;
Array A(6,6);
ASSERT_THROWS(cusp::blas::syrk(A, A), cusp::not_implemented_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestSyrk)
template <class MemorySpace>
void TestSyr2k(void)
{
typedef typename cusp::array2d<float, MemorySpace> Array;
Array A(6,6);
ASSERT_THROWS(cusp::blas::syr2k(A, A, A), cusp::not_implemented_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestSyr2k)
template <class MemorySpace>
void TestTrmm(void)
{
typedef typename cusp::array2d<float, MemorySpace> Array;
Array A(6,6);
ASSERT_THROWS(cusp::blas::trmm(A, A), cusp::not_implemented_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestTrmm)
template <class MemorySpace>
void TestTrsm(void)
{
typedef typename cusp::array2d<float, MemorySpace> Array;
Array A(6,6);
ASSERT_THROWS(cusp::blas::trsm(A, A), cusp::not_implemented_exception);
}
DECLARE_HOST_DEVICE_UNITTEST(TestTrsm)
template <typename Array>
int amax(my_system& system, const Array& x)
{
system.validate_dispatch();
return 0;
}
template <typename Array>
typename cusp::norm_type<typename Array::value_type>::type
asum(my_system& system, const Array& x)
{
system.validate_dispatch();
return 0;
}
template <typename Array1, typename Array2, typename ScalarType>
void axpy(my_system& system, const Array1& x, Array2& y, const ScalarType alpha)
{
system.validate_dispatch();
return;
}
template <typename Array1, typename Array2, typename Array3,
typename ScalarType1, typename ScalarType2>
void axpby(my_system& system, const Array1& x, const Array2& y, Array3& output,
ScalarType1 alpha, ScalarType2 beta)
{
system.validate_dispatch();
return;
}
template <typename Array1, typename Array2, typename Array3, typename Array4,
typename ScalarType1, typename ScalarType2, typename ScalarType3>
void axpbypcz(my_system& system, const Array1& x, const Array2& y, const Array3& z, Array4& output,
ScalarType1 alpha, ScalarType2 beta, ScalarType3 gamma)
{
system.validate_dispatch();
return;
}
template <typename Array1, typename Array2, typename Array3>
void xmy(my_system& system, const Array1& x, const Array2& y, Array3& output)
{
system.validate_dispatch();
return;
}
template <typename Array1, typename Array2>
void copy(my_system& system, const Array1& x, Array2& y)
{
system.validate_dispatch();
return;
}
template <typename Array1, typename Array2>
typename Array1::value_type
dot(my_system& system, const Array1& x, const Array2& y)
{
system.validate_dispatch();
return 0;
}
template <typename Array1, typename Array2>
typename Array1::value_type
dotc(my_system& system, const Array1& x, const Array2& y)
{
system.validate_dispatch();
return 0;
}
template <typename Array, typename ScalarType>
void fill(my_system& system, Array& array, const ScalarType alpha)
{
system.validate_dispatch();
return;
}
template <typename Array>
typename cusp::norm_type<typename Array::value_type>::type
nrm1(my_system& system, const Array& array)
{
system.validate_dispatch();
return 0;
}
template <typename Array>
typename cusp::norm_type<typename Array::value_type>::type
nrm2(my_system& system, const Array& array)
{
system.validate_dispatch();
return 0;
}
template <typename Array>
typename cusp::norm_type<typename Array::value_type>::type
nrmmax(my_system& system, const Array& array)
{
system.validate_dispatch();
return 0;
}
template <typename Array, typename ScalarType>
void scal(my_system& system, Array& x, const ScalarType alpha)
{
system.validate_dispatch();
return;
}
template<typename Array2d1, typename Array1d1, typename Array1d2>
void gemv(my_system& system, const Array2d1& A, const Array1d1& x, Array1d2& y, float alpha = 1.0, float beta = 0.0)
{
system.validate_dispatch();
return;
}
template<typename Array1d1, typename Array1d2, typename Array2d1>
void ger(my_system& system, const Array1d1& x, const Array1d2& y, Array2d1& A, float alpha = 1.0)
{
system.validate_dispatch();
return;
}
template <typename Array2d1, typename Array1d1, typename Array1d2>
void symv(my_system& system, const Array2d1& A, const Array1d1& x, Array1d2& y, float alpha = 1.0, float beta = 0.0)
{
system.validate_dispatch();
return;
}
template <typename Array1d, typename Array2d>
void syr(my_system& system, const Array1d& x, Array2d& A, float alpha = 1.0)
{
system.validate_dispatch();
return;
}
template<typename Array2d, typename Array1d>
void trmv(my_system& system, const Array2d& A, Array1d& x)
{
system.validate_dispatch();
return;
}
template<typename Array2d, typename Array1d>
void trsv(my_system& system, const Array2d& A, Array1d& x)
{
system.validate_dispatch();
return;
}
template<typename Array2d1, typename Array2d2, typename Array2d3>
void gemm(my_system& system, const Array2d1& A, const Array2d2& B, Array2d3& C, float alpha = 1.0, float beta = 0.0)
{
system.validate_dispatch();
return;
}
template<typename Array2d1, typename Array2d2, typename Array2d3>
void symm(my_system& system, const Array2d1& A, const Array2d2& B, Array2d3& C, float alpha = 1.0, float beta = 0.0)
{
system.validate_dispatch();
return;
}
template<typename Array2d1, typename Array2d2>
void syrk(my_system& system, const Array2d1& A, Array2d2& B, float alpha = 1.0, float beta = 0.0)
{
system.validate_dispatch();
return;
}
template<typename Array2d1, typename Array2d2, typename Array2d3>
void syr2k(my_system& system, const Array2d1& A, const Array2d2& B, Array2d3& C, float alpha = 1.0, float beta = 0.0)
{
system.validate_dispatch();
return;
}
template<typename Array2d1, typename Array2d2>
void trmm(my_system& system, const Array2d1& A, Array2d2& B, float alpha = 1.0)
{
system.validate_dispatch();
return;
}
template<typename Array2d1, typename Array2d2>
void trsm(my_system& system, const Array2d1& A, Array2d2& B, float alpha = 1.0)
{
system.validate_dispatch();
return;
}
void TestBlasDispatch()
{
// initialize testing variables
cusp::array2d<float, cusp::device_memory> A;
cusp::array1d<float, cusp::device_memory> x;
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::amax(sys, x);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::asum(sys, x);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::axpy(sys, x, x, 1);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::axpby(sys, x, x, x, 1, 1);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::axpbypcz(sys, x, x, x, x, 1, 1, 1);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::xmy(sys, x, x, x);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::copy(sys, x, x);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::dot(sys, x, x);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::dotc(sys, x, x);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::fill(sys, x, 1);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::nrm1(sys, x);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::nrm2(sys, x);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::nrmmax(sys, x);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::scal(sys, x, 0);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::gemv(sys, A, x, x);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::ger(sys, x, x, A);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::symv(sys, A, x, x);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::syr(sys, x, A);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::trmv(sys, A, x);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::trsv(sys, A, x);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::gemm(sys, A, A, A);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::symm(sys, A, A, A);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::syrk(sys, A, A);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::syr2k(sys, A, A, A);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::trmm(sys, A, A);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
{
my_system sys(0);
// call with explicit dispatching
cusp::blas::trsm(sys, A, A);
// check if dispatch policy was used
ASSERT_EQUAL(true, sys.is_valid());
}
}
DECLARE_UNITTEST(TestBlasDispatch); | the_stack |
#include "kernels.h"
using namespace cub;
/**
@brief: transform_0213
Split the attention heads and reshape input
during backward progress of encoder self-attention
@thread
gridDim.x = batch_size
gridDim.y = seq_len
blockDim.x = min(hidden_dim, MAX_THREADS)
@param
input: [batch_size, seq_len, hidden_dim]
output: [batch_size, nhead, seq_len, head_dim]
batch_size: the size of the current batch
seq_len: the sequence length of the current batch
hidden_dim: dim of the hidden tensor
nhead: number of attention heads
*/
template <typename T>
__global__ void transform_0213(T *output, const T *input, int hidden_dim,
int head_dim);
template <>
__global__ void transform_0213<float>(float *output, const float *input,
int hidden_dim, int head_dim) {
int batch_id = blockIdx.x;
int token_id = blockIdx.y;
int seq_len = gridDim.y;
int nhead = hidden_dim / head_dim;
// [b, s, h]
int src_offset = flat_3dim(batch_id, token_id, 0, seq_len, hidden_dim);
// [b, nh, s, ad]
int trg_offset =
flat_4dim(batch_id, 0, token_id, 0, nhead, seq_len, head_dim);
const float4 *input4 = reinterpret_cast<const float4 *>(input);
float4 *res4 = reinterpret_cast<float4 *>(output);
float4 vinput4;
for (std::size_t i = threadIdx.x; i < hidden_dim; i += blockDim.x) {
vinput4 = input4[src_offset + i];
int head_id = i / head_dim;
int dim_id = i % head_dim;
int cur_trg_offset = flat_3dim(head_id, 0, dim_id, seq_len, head_dim);
res4[trg_offset + cur_trg_offset] = vinput4;
}
}
template <>
__global__ void transform_0213<__half>(__half *output, const __half *input,
int hidden_dim, int head_dim) {
int batch_id = blockIdx.x;
int token_id = blockIdx.y;
int seq_len = gridDim.y;
int nhead = hidden_dim / head_dim;
// [b, s, h]
int src_offset = flat_3dim(batch_id, token_id, 0, seq_len, hidden_dim);
// [b, nh, s, ad]
int trg_offset =
flat_4dim(batch_id, 0, token_id, 0, nhead, seq_len, head_dim);
const float4 *input4 = reinterpret_cast<const float4 *>(input);
float4 *res4 = reinterpret_cast<float4 *>(output);
float4 vinput4;
for (std::size_t i = threadIdx.x; i < hidden_dim; i += blockDim.x) {
vinput4 = input4[src_offset + i];
int head_id = i / head_dim;
int dim_id = i % head_dim;
int cur_trg_offset = flat_3dim(head_id, 0, dim_id, seq_len, head_dim);
res4[trg_offset + cur_trg_offset] = vinput4;
}
}
// [b, s, h] -> [b, nh, s, ad]
template <>
void launch_transform_0213<float>(float *output, const float *input,
int batch_size, int seq_len, int hidden_dim,
int nhead, cudaStream_t stream) {
hidden_dim >>= 2;
int head_dim = hidden_dim / nhead;
dim3 grid_dim(batch_size, seq_len);
dim3 block_dim(min(hidden_dim, MAX_THREADS));
transform_0213<float>
<<<grid_dim, block_dim, 0, stream>>>(output, input, hidden_dim, head_dim);
}
template <>
void launch_transform_0213<__half>(__half *output, const __half *input,
int batch_size, int seq_len, int hidden_dim,
int nhead, cudaStream_t stream) {
hidden_dim >>= 3;
int head_dim = hidden_dim / nhead;
dim3 grid_dim(batch_size, seq_len);
dim3 block_dim(min(hidden_dim, MAX_THREADS));
transform_0213<__half>
<<<grid_dim, block_dim, 0, stream>>>(output, input, hidden_dim, head_dim);
}
/**
@brief: bias_add_transform_20314
Add bias to input, transform from
[0, 1, 2, 3, 4] to [2, 0, 3, 1, 4]
@thread
gridDim.x = dim_0
gridDim.y = dim_1
gridDim.z = dim_2
blockDim.x = min(dim_3 * dim_4, MAX_THREADS)
@param
input: [dim_0, dim_1, dim_2, dim_3, dim_4]
bias: [dim_2, dim_3, dim_4]
output: [dim_2, dim_0, dim_3, dim_1, dim_4]
*/
template <typename T>
__global__ void bias_add_transform_20314(T *output, const T *input,
const T *bias, int dim_3, int dim_4);
template <>
__global__ void bias_add_transform_20314<float>(float *output,
const float *input,
const float *bias, int dim_3,
int dim_4) {
int id0 = blockIdx.x;
int id1 = blockIdx.y;
int id2 = blockIdx.z;
int dim_0 = gridDim.x;
int dim_1 = gridDim.y;
int dim_2 = gridDim.z;
int dim_34 = dim_3 * dim_4;
int src_offset = flat_4dim(id0, id1, id2, 0, dim_1, dim_2, dim_34);
int trg_offset = flat_5dim(id2, id0, 0, id1, 0, dim_0, dim_3, dim_1, dim_4);
int bias_offset = flat_2dim(id2, 0, dim_34);
const float4 *qkv4 = reinterpret_cast<const float4 *>(input);
const float4 *bias4 = reinterpret_cast<const float4 *>(bias);
float4 *res4 = reinterpret_cast<float4 *>(output);
float4 vqkv4;
float4 vbias4;
float4 vres4;
for (std::size_t i = threadIdx.x; i < dim_34; i += blockDim.x) {
vqkv4 = qkv4[src_offset + i];
vbias4 = bias4[bias_offset + i];
vres4.x = vqkv4.x + vbias4.x;
vres4.y = vqkv4.y + vbias4.y;
vres4.z = vqkv4.z + vbias4.z;
vres4.w = vqkv4.w + vbias4.w;
int id3 = i / dim_4;
int id4 = i % dim_4;
int cur_trg_offset = flat_3dim(id3, 0, id4, dim_1, dim_4);
res4[trg_offset + cur_trg_offset] = vres4;
}
}
template <>
__global__ void bias_add_transform_20314<__half>(__half *output,
const __half *input,
const __half *bias, int dim_3,
int dim_4) {
int id0 = blockIdx.x;
int id1 = blockIdx.y;
int id2 = blockIdx.z;
int dim_0 = gridDim.x;
int dim_1 = gridDim.y;
int dim_2 = gridDim.z;
int dim_34 = dim_3 * dim_4;
int src_offset = flat_4dim(id0, id1, id2, 0, dim_1, dim_2, dim_34);
int trg_offset = flat_5dim(id2, id0, 0, id1, 0, dim_0, dim_3, dim_1, dim_4);
int bias_offset = flat_2dim(id2, 0, dim_34);
const float4 *qkv4 = reinterpret_cast<const float4 *>(input);
const float4 *bias4 = reinterpret_cast<const float4 *>(bias);
float4 *res4 = reinterpret_cast<float4 *>(output);
float4 vqkv4;
float4 vbias4;
float4 vres4;
__half2 *h2_qkv = reinterpret_cast<__half2 *>(&vqkv4);
__half2 *h2_bias = reinterpret_cast<__half2 *>(&vbias4);
__half2 *h2_res = reinterpret_cast<__half2 *>(&vres4);
for (std::size_t i = threadIdx.x; i < dim_34; i += blockDim.x) {
vqkv4 = qkv4[src_offset + i];
vbias4 = bias4[bias_offset + i];
h2_res[0] = __hadd2(h2_qkv[0], h2_bias[0]);
h2_res[1] = __hadd2(h2_qkv[1], h2_bias[1]);
h2_res[2] = __hadd2(h2_qkv[2], h2_bias[2]);
h2_res[3] = __hadd2(h2_qkv[3], h2_bias[3]);
int id3 = i / dim_4;
int id4 = i % dim_4;
int cur_trg_offset = flat_3dim(id3, 0, id4, dim_1, dim_4);
res4[trg_offset + cur_trg_offset] = vres4;
}
}
// [b, s, 3, h] -> [3, b, nh, s, ad]
template <>
void launch_bias_add_transform_20314<float>(float *output, const float *input,
const float *bias, int dim_0,
int dim_1, int dim_2, int dim_3,
int dim_4, cudaStream_t stream) {
dim_4 >>= 2;
dim3 grid_dim(dim_0, dim_1, dim_2);
dim3 block_dim(min(dim_3 * dim_4, MAX_THREADS));
bias_add_transform_20314<float>
<<<grid_dim, block_dim, 0, stream>>>(output, input, bias, dim_3, dim_4);
}
template <>
void launch_bias_add_transform_20314<__half>(__half *output,
const __half *input,
const __half *bias, int dim_0,
int dim_1, int dim_2, int dim_3,
int dim_4, cudaStream_t stream) {
dim_4 >>= 3;
dim3 grid_dim(dim_0, dim_1, dim_2);
dim3 block_dim(min(dim_3 * dim_4, MAX_THREADS));
bias_add_transform_20314<__half>
<<<grid_dim, block_dim, 0, stream>>>(output, input, bias, dim_3, dim_4);
}
/**
@brief: transform4d_0213
Reshape the input matrix to merge the heads
@thread
gridDim.x = (num_all + max_block_thread - 1) / max_block_thread
blockDim.x = max_block_thread
@param
input: [trans_count, batch_size, nhead, seq_len, head_dim]
output: [batch_size, seq_len, trans_count, nhead, head_dim]
batch_size: the size of the current batch
seq_len: the sequence length of the current batch
hidden_dim: dim of the hidden tensor
nhead: number of attention heads
trans_count: 1 or 3, the count of matrice need to be transformed
*/
template <typename T>
__global__ void transform4d_0213(T *output, const T *input, int batch_size,
int seq_len, int trans_count, int nhead,
int head_dim, int num_all) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= num_all) {
return;
}
int trans_id, batch_id, head_id, token_id, dim_id;
decompose_5dim(offset, batch_size, nhead, seq_len, head_dim, &trans_id,
&batch_id, &head_id, &token_id, &dim_id);
// [b, s, tc, nh, ad]
int trg_offset = flat_5dim(batch_id, token_id, trans_id, head_id, dim_id,
seq_len, trans_count, nhead, head_dim);
const float4 *input4 = reinterpret_cast<const float4 *>(input);
float4 *res4 = reinterpret_cast<float4 *>(output);
res4[trg_offset] = input4[offset];
}
// [tc, b, nh, s, ad] -> [b, s, tc, nh, ad]
template <>
void launch_transform4d_0213<float>(float *output, const float *input,
int batch_size, int seq_len, int hidden_dim,
int nhead, int trans_count,
cudaStream_t stream) {
hidden_dim >>= 2;
int head_dim = hidden_dim / nhead;
int num_all = batch_size * seq_len * trans_count * hidden_dim;
int nblock = (num_all + MAX_THREADS - 1) / MAX_THREADS;
transform4d_0213<float><<<nblock, MAX_THREADS, 0, stream>>>(
output, input, batch_size, seq_len, trans_count, nhead, head_dim,
num_all);
}
template <>
void launch_transform4d_0213<__half>(__half *output, const __half *input,
int batch_size, int seq_len,
int hidden_dim, int nhead, int trans_count,
cudaStream_t stream) {
hidden_dim >>= 3;
int head_dim = hidden_dim / nhead;
int num_all = batch_size * seq_len * trans_count * hidden_dim;
int nblock = (num_all + MAX_THREADS - 1) / MAX_THREADS;
transform4d_0213<__half><<<nblock, MAX_THREADS, 0, stream>>>(
output, input, batch_size, seq_len, trans_count, nhead, head_dim,
num_all);
} | the_stack |
struct LSTMInitParams {
DnnHandle handle;
int batchSize, inputSize, outputSize;
};
LSTMTensors RnnModel::add_lstm_node(Tensor x, Tensor hx, Tensor cx,
ParallelConfig pc, SharedVariable params)
{
assert(x.numDim == 3);
assert(hx.numDim == 2);
assert(cx.numDim == 2);
assert(x.adim[2] == LSTM_PER_NODE_LENGTH);
assert(x.pdim[2] == LSTM_PER_NODE_LENGTH);
int batch_size = x.adim[1];
assert(hx.adim[1] == batch_size);
assert(cx.adim[1] == batch_size);
int input_size = x.adim[0];
int output_size = hx.adim[0];
assert(cx.adim[0] == output_size);
LSTM* node = new LSTM(config, x, hx, cx, batch_size,
input_size, output_size, pc, params);
layers.push_back(node);
LSTMTensors output;
output.x = node->outputs[0];
output.hx = node->outputs[1];
output.cx = node->outputs[2];
return output;
}
/*
output[0]: y
output[1]: hy
output[2]: cy
*/
LSTM::LSTM(RnnConfig config, Tensor x, Tensor hx, Tensor cx,
int _batch_size, int _input_size, int _output_size,
ParallelConfig pc, SharedVariable _params)
: RnnOp(x, hx, cx, pc, _params), batch_size(_batch_size),
input_size(_input_size), output_size(_output_size)
{
printf("LSTM node: batch(%d) input(%d) output(%d)\n",
batch_size, input_size, output_size);
Context ctx = config.lg_ctx;
HighLevelRuntime* runtime = config.lg_hlr;
assert(pc.nDims == 1);
{
Rect<1> rect(Point<1>(0), Point<1>(pc.dim[0]-1));
part_rect = rect;
}
IndexSpaceT<1> part_is = runtime->create_index_space(ctx, part_rect);
FieldSpace fs = config.field_space;
Rect<3, coord_t> y_rect(Point<3>(0, 0, 0),
Point<3>(output_size-1, batch_size-1, LSTM_PER_NODE_LENGTH-1));
IndexSpaceT<3> y_is = runtime->create_index_space(ctx, y_rect);
LogicalRegion y_lr = runtime->create_logical_region(ctx, y_is, fs);
LogicalRegion y_grad_lr = runtime->create_logical_region(ctx, y_is, fs);
int num_par_n = part_rect.hi[0] - part_rect.lo[0] + 1;
assert(batch_size % num_par_n == 0);
int extent_n = batch_size / num_par_n;
int extent_c = output_size;
Rect<3, coord_t> extent(Point<3>(0, 0, 0), Point<3>(extent_c-1, extent_n-1, LSTM_PER_NODE_LENGTH-1));
Transform<3, 1, coord_t> trans;
trans[0][0] = 0; trans[1][0] = extent_n; trans[2][0] = 0;
IndexPartition y_ip =
runtime->create_partition_by_restriction(ctx, y_is, part_is, trans, extent);
assert(runtime->is_index_partition_disjoint(ctx, y_ip));
assert(runtime->is_index_partition_complete(ctx, y_ip));
LogicalPartition y_lp = runtime->get_logical_partition(ctx, y_lr, y_ip);
LogicalPartition y_grad_lp = runtime->get_logical_partition(ctx, y_grad_lr, y_ip);
outputs[0].region = y_lr;
outputs[0].region_grad = y_grad_lr;
outputs[0].partition = y_lp;
outputs[0].partition_grad = y_grad_lp;
outputs[0].numDim = 3;
outputs[0].adim[0] = output_size;
outputs[0].adim[1] = batch_size;
outputs[0].adim[2] = LSTM_PER_NODE_LENGTH;
outputs[0].pdim[0] = extent_c;
outputs[0].pdim[1] = extent_n;
outputs[0].pdim[2] = LSTM_PER_NODE_LENGTH;
Rect<2, coord_t> hy_rect(Point<2>(0, 0),
Point<2>(output_size-1, batch_size-1));
IndexSpaceT<2> hy_is = runtime->create_index_space(ctx, hy_rect);
LogicalRegion hy_lr = runtime->create_logical_region(ctx, hy_is, fs);
LogicalRegion hy_grad_lr = runtime->create_logical_region(ctx, hy_is, fs);
Rect<2, coord_t> hy_ext(Point<2>(0, 0), Point<2>(extent_c-1, extent_n-1));
Transform<2, 1, coord_t> hy_trans;
hy_trans[0][0] = 0; hy_trans[1][0] = extent_n;
IndexPartition hy_ip =
runtime->create_partition_by_restriction(ctx, hy_is, part_is, hy_trans, hy_ext);
assert(runtime->is_index_partition_disjoint(ctx, hy_ip));
assert(runtime->is_index_partition_complete(ctx, hy_ip));
LogicalPartition hy_lp = runtime->get_logical_partition(ctx, hy_lr, hy_ip);
LogicalPartition hy_grad_lp = runtime->get_logical_partition(ctx, hy_grad_lr, hy_ip);
outputs[1].region = hy_lr;
outputs[1].region_grad = hy_grad_lr;
outputs[1].partition = hy_lp;
outputs[1].partition_grad = hy_grad_lp;
outputs[1].numDim = 2;
outputs[1].adim[0] = output_size;
outputs[1].adim[1] = batch_size;
outputs[1].pdim[0] = extent_c;
outputs[1].pdim[1] = extent_n;
LogicalRegion cy_lr = runtime->create_logical_region(ctx, hy_is, fs);
LogicalRegion cy_grad_lr = runtime->create_logical_region(ctx, hy_is, fs);
LogicalPartition cy_lp = runtime->get_logical_partition(ctx, cy_lr, hy_ip);
LogicalPartition cy_grad_lp = runtime->get_logical_partition(ctx, cy_grad_lr, hy_ip);
outputs[2] = outputs[1];
outputs[2].region = cy_lr;
outputs[2].region_grad = cy_grad_lr;
outputs[2].partition = cy_lp;
outputs[2].partition_grad = cy_grad_lp;
}
/*
regions[0] (I): x
regions[1] (I): hx
regions[2] (I): cx
regions[3] (I): w
regions[4] (O): y
regions[5] (O): hy
regions[6] (O): cy
*/
OpMeta* LSTM::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const int numLayers = 1;
const int seqLength = LSTM_PER_NODE_LENGTH;
const float dropoutRate = 0.2f;
assert(regions.size() == 7);
assert(task->regions.size() == 7);
Rect<1> para_rect =
runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
const LSTMInitParams* lstm = (LSTMInitParams*) task->args;
LSTMMeta* m = new LSTMMeta(lstm->handle);
#ifndef DISABLE_COMPUTATION
checkCUDNN(cudnnCreateRNNDescriptor(&m->rnnDesc));
checkCUDNN(cudnnCreateDropoutDescriptor(&m->dropoutDesc));
size_t dropoutSize;
void *dropoutStates;
checkCUDNN(cudnnDropoutGetStatesSize(m->handle.dnn, &dropoutSize));
checkCUDA(cudaMalloc(&dropoutStates, dropoutSize));
checkCUDNN(cudnnSetDropoutDescriptor(m->dropoutDesc, m->handle.dnn, dropoutRate,
dropoutStates, dropoutSize, 10/*seed*/));
checkCUDNN(cudnnSetRNNDescriptor_v5(m->rnnDesc, lstm->outputSize, numLayers, m->dropoutDesc,
CUDNN_LINEAR_INPUT, CUDNN_UNIDIRECTIONAL, CUDNN_LSTM,
CUDNN_DATA_FLOAT));
for (int i = 0; i < seqLength; i++) {
checkCUDNN(cudnnCreateTensorDescriptor(&m->xDescs[i]));
int dims[] = {lstm->batchSize, lstm->inputSize, 1};
int strides[] = {dims[1] * dims[2], dims[2], 1};
checkCUDNN(cudnnSetTensorNdDescriptor(m->xDescs[i], CUDNN_DATA_FLOAT,
3, dims, strides));
}
size_t workSpaceSize;
checkCUDNN(cudnnGetRNNWorkspaceSize(m->handle.dnn, m->rnnDesc, seqLength,
m->xDescs, &workSpaceSize));
// Assert that we have enough work space
assert(workSpaceSize <= m->handle.workSpaceSize);
checkCUDNN(cudnnGetRNNTrainingReserveSize(m->handle.dnn, m->rnnDesc, seqLength,
m->xDescs, &m->reserveSpaceSize));
checkCUDA(cudaMalloc(&m->reserveSpace, m->reserveSpaceSize));
size_t paramsSize;
checkCUDNN(cudnnGetRNNParamsSize(m->handle.dnn, m->rnnDesc, m->xDescs[0],
¶msSize, CUDNN_DATA_FLOAT));
assert(paramsSize == sizeof(float) * para_rect.volume());
{
int dims[] = {(int)paramsSize, 1, 1};
checkCUDNN(cudnnCreateFilterDescriptor(&m->wDesc));
checkCUDNN(cudnnSetFilterNdDescriptor(m->wDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
3, dims));
}
{
checkCUDNN(cudnnCreateTensorDescriptor(&m->hxDesc));
checkCUDNN(cudnnCreateTensorDescriptor(&m->cxDesc));
checkCUDNN(cudnnCreateTensorDescriptor(&m->hyDesc));
checkCUDNN(cudnnCreateTensorDescriptor(&m->cyDesc));
int dims[] = {numLayers, lstm->batchSize, lstm->outputSize};
int strides[] = {dims[1] * dims[2], dims[2], 1};
checkCUDNN(cudnnSetTensorNdDescriptor(m->hxDesc, CUDNN_DATA_FLOAT,
3, dims, strides));
checkCUDNN(cudnnSetTensorNdDescriptor(m->cxDesc, CUDNN_DATA_FLOAT,
3, dims, strides));
checkCUDNN(cudnnSetTensorNdDescriptor(m->hyDesc, CUDNN_DATA_FLOAT,
3, dims, strides));
checkCUDNN(cudnnSetTensorNdDescriptor(m->cyDesc, CUDNN_DATA_FLOAT,
3, dims, strides));
}
for (int i = 0; i < seqLength; i++) {
checkCUDNN(cudnnCreateTensorDescriptor(&m->yDescs[i]));
int dims[] = {lstm->batchSize, lstm->outputSize, 1};
int strides[] = {dims[1] * dims[2], dims[2], 1};
checkCUDNN(cudnnSetTensorNdDescriptor(m->yDescs[i], CUDNN_DATA_FLOAT,
3, dims, strides));
}
m->profiling_runtime = true;
return m;
#endif
}
void LSTM::init(const RnnModel& model)
{
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
LSTMInitParams initParams;
initParams.handle = model.dnn_handlers[paraConfig.gpu[idx]];
initParams.batchSize = outputs[0].pdim[1];
initParams.inputSize = inputs[0].pdim[0];
initParams.outputSize = outputs[0].pdim[0];
// For now assume batch sizes equal
assert(inputs[0].pdim[1] == outputs[0].pdim[1]);
TaskLauncher launcher(LSTM_INIT_TASK_ID, TaskArgument(&initParams, sizeof(initParams)),
Predicate::TRUE_PRED, 0/*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
// add region requirements for x, hx, cx
for (int i = 0; i < 3; i++) {
LogicalRegion x =
runtime->get_logical_subregion_by_color(inputs[i].partition, dp);
launcher.add_region_requirement(RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[i].region));
launcher.add_field(i, FID_DATA);
}
launcher.add_region_requirement(
RegionRequirement(params.region, READ_ONLY, EXCLUSIVE, params.region));
launcher.add_field(3, FID_DATA);
for (int i = 0; i < 3; i++) {
LogicalRegion x =
runtime->get_logical_subregion_by_color(outputs[i].partition, dp);
launcher.add_region_requirement(RegionRequirement(x, WRITE_ONLY, EXCLUSIVE, outputs[i].region));
launcher.add_field(4 + i, FID_DATA);
}
Future f = runtime->execute_task(ctx, launcher);
meta[idx] = f.get_result<OpMeta*>();
}
}
/*
regions[0] (I): x
regions[1] (I): hx
regions[2] (I): cx
regions[3] (I): w
regions[4] (O): y
regions[5] (O): hy
regions[6] (O): cy
*/
void LSTM::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 7);
assert(task->regions.size() == 7);
const LSTMMeta* m = *((LSTMMeta**) task->args);
const AccessorRO<float, 3> acc_x(regions[0], FID_DATA);
const AccessorRO<float, 2> acc_hx(regions[1], FID_DATA);
const AccessorRO<float, 2> acc_cx(regions[2], FID_DATA);
const AccessorRO<float, 1> acc_w(regions[3], FID_DATA);
const AccessorWO<float, 3> acc_y(regions[4], FID_DATA);
const AccessorWO<float, 2> acc_hy(regions[5], FID_DATA);
const AccessorWO<float, 2> acc_cy(regions[6], FID_DATA);
Rect<3> rect_x, rect_y;
Rect<2> rect_hx, rect_cx, rect_hy, rect_cy;
Rect<1> rect_w;
rect_x = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_hx = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_cx = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_w = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
rect_y = runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space());
rect_hy = runtime->get_index_space_domain(ctx, task->regions[5].region.get_index_space());
rect_cy = runtime->get_index_space_domain(ctx, task->regions[6].region.get_index_space());
assert(acc_x.accessor.is_dense_arbitrary(rect_x));
assert(acc_hx.accessor.is_dense_arbitrary(rect_hx));
assert(acc_cx.accessor.is_dense_arbitrary(rect_cx));
assert(acc_w.accessor.is_dense_arbitrary(rect_w));
assert(acc_y.accessor.is_dense_arbitrary(rect_y));
assert(acc_hy.accessor.is_dense_arbitrary(rect_hy));
assert(acc_cy.accessor.is_dense_arbitrary(rect_cy));
assert(rect_hx == rect_cx);
assert(rect_hx == rect_hy);
assert(rect_hx == rect_cy);
const float *x_ptr = acc_x.ptr(rect_x.lo);
const float *hx_ptr = acc_hx.ptr(rect_hx.lo);
const float *cx_ptr = acc_cx.ptr(rect_cx.lo);
const float *w_ptr = acc_w.ptr(rect_w.lo);
float *y_ptr = acc_y.ptr(rect_y.lo);
float *hy_ptr = acc_hy.ptr(rect_hy.lo);
float *cy_ptr = acc_cy.ptr(rect_cy.lo);
cudaEvent_t t_start, t_end;
if (m->profiling_runtime) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
checkCUDNN(cudnnRNNForwardTraining(m->handle.dnn, m->rnnDesc,
LSTM_PER_NODE_LENGTH/*seqLength*/,
m->xDescs, x_ptr, m->hxDesc, hx_ptr,
m->cxDesc, cx_ptr, m->wDesc, w_ptr,
m->yDescs, y_ptr, m->hyDesc, hy_ptr,
m->cyDesc, cy_ptr,
m->handle.workSpace, m->handle.workSpaceSize,
m->reserveSpace, m->reserveSpaceSize));
if (m->profiling_runtime) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("LSTM forward time = %.2fms\n", elapsed);
}
#ifdef PRINT_INTERMEDIATE_RESULT
print_tensor<3, float>(y_ptr, rect_y, "lstm_fwd:y");
#endif
#endif
}
void LSTM::forward(const RnnModel& model)
{
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
OpMeta* mp = meta[idx];
TaskLauncher launcher(LSTM_FWD_TASK_ID, TaskArgument(&mp, sizeof(OpMeta*)),
Predicate::TRUE_PRED, 0/*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
// add region requirements for x, hx, cx
for (int i = 0; i < 3; i++) {
LogicalRegion x =
runtime->get_logical_subregion_by_color(inputs[i].partition, dp);
launcher.add_region_requirement(RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[i].region));
launcher.add_field(i, FID_DATA);
}
launcher.add_region_requirement(
RegionRequirement(params.region, READ_ONLY, EXCLUSIVE, params.region));
launcher.add_field(3, FID_DATA);
for (int i = 0; i < 3; i++) {
LogicalRegion x =
runtime->get_logical_subregion_by_color(outputs[i].partition, dp);
launcher.add_region_requirement(RegionRequirement(x, WRITE_ONLY, EXCLUSIVE, outputs[i].region));
launcher.add_field(4 + i, FID_DATA);
}
runtime->execute_task(ctx, launcher);
}
}
/*
regions[0] (I): x
regions[1] (I): hx
regions[2] (I): cx
regions[3] (I): w
regions[4] (I): y
regions[5] (I): hy
regions[6] (I): cy
regions[7] (O): x_grad
regions[8] (O): hx_grad
regions[9] (O): cx_grad
regions[10] (I/O): w_grad
regions[11] (I): y_grad
regions[12] (I): hy_grad
regions[13] (I): cy_grad
*/
void LSTM::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 14);
assert(task->regions.size() == 14);
const LSTMMeta* m = *((LSTMMeta**) task->args);
const AccessorRO<float, 3> acc_x(regions[0], FID_DATA);
const AccessorRO<float, 2> acc_hx(regions[1], FID_DATA);
const AccessorRO<float, 2> acc_cx(regions[2], FID_DATA);
const AccessorRO<float, 1> acc_w(regions[3], FID_DATA);
const AccessorRO<float, 3> acc_y(regions[4], FID_DATA);
const AccessorRO<float, 2> acc_hy(regions[5], FID_DATA);
const AccessorRO<float, 2> acc_cy(regions[6], FID_DATA);
const AccessorWO<float, 3> acc_x_grad(regions[7], FID_DATA);
const AccessorWO<float, 2> acc_hx_grad(regions[8], FID_DATA);
const AccessorWO<float, 2> acc_cx_grad(regions[9], FID_DATA);
const AccessorRW<float, 1> acc_w_grad(regions[10], FID_DATA);
const AccessorRO<float, 3> acc_y_grad(regions[11], FID_DATA);
const AccessorRO<float, 2> acc_hy_grad(regions[12], FID_DATA);
const AccessorRO<float, 2> acc_cy_grad(regions[13], FID_DATA);
Rect<3> rect_x, rect_y, rect_x_grad, rect_y_grad;
Rect<2> rect_hx, rect_cx, rect_hy, rect_cy,
rect_hx_grad, rect_cx_grad,
rect_hy_grad, rect_cy_grad;
Rect<1> rect_w, rect_w_grad;
rect_x =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_hx =
runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_cx =
runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_w =
runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
rect_y =
runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space());
rect_hy =
runtime->get_index_space_domain(ctx, task->regions[5].region.get_index_space());
rect_cy =
runtime->get_index_space_domain(ctx, task->regions[6].region.get_index_space());
rect_x_grad =
runtime->get_index_space_domain(ctx, task->regions[7].region.get_index_space());
rect_hx_grad =
runtime->get_index_space_domain(ctx, task->regions[8].region.get_index_space());
rect_cx_grad =
runtime->get_index_space_domain(ctx, task->regions[9].region.get_index_space());
rect_w_grad =
runtime->get_index_space_domain(ctx, task->regions[10].region.get_index_space());
rect_y_grad =
runtime->get_index_space_domain(ctx, task->regions[11].region.get_index_space());
rect_hy_grad =
runtime->get_index_space_domain(ctx, task->regions[12].region.get_index_space());
rect_cy_grad =
runtime->get_index_space_domain(ctx, task->regions[13].region.get_index_space());
assert(acc_x.accessor.is_dense_arbitrary(rect_x));
assert(acc_hx.accessor.is_dense_arbitrary(rect_hx));
assert(acc_cx.accessor.is_dense_arbitrary(rect_cx));
assert(acc_w.accessor.is_dense_arbitrary(rect_w));
assert(acc_y.accessor.is_dense_arbitrary(rect_y));
assert(acc_hy.accessor.is_dense_arbitrary(rect_hy));
assert(acc_cy.accessor.is_dense_arbitrary(rect_cy));
assert(acc_x_grad.accessor.is_dense_arbitrary(rect_x_grad));
assert(acc_hx_grad.accessor.is_dense_arbitrary(rect_hx_grad));
assert(acc_cx_grad.accessor.is_dense_arbitrary(rect_cx_grad));
assert(acc_w_grad.accessor.is_dense_arbitrary(rect_w_grad));
assert(acc_y_grad.accessor.is_dense_arbitrary(rect_y_grad));
assert(acc_hy_grad.accessor.is_dense_arbitrary(rect_hy_grad));
assert(acc_cy_grad.accessor.is_dense_arbitrary(rect_cy_grad));
const float *x_ptr = acc_x.ptr(rect_x.lo);
const float *hx_ptr = acc_hx.ptr(rect_hx.lo);
const float *cx_ptr = acc_cx.ptr(rect_cx.lo);
const float *w_ptr = acc_w.ptr(rect_w.lo);
const float *y_ptr = acc_y.ptr(rect_y.lo);
const float *hy_ptr = acc_hy.ptr(rect_hy.lo);
const float *cy_ptr = acc_cy.ptr(rect_cy.lo);
float *x_grad_ptr = acc_x_grad.ptr(rect_x_grad.lo);
float *hx_grad_ptr = acc_hx_grad.ptr(rect_hx_grad.lo);
float *cx_grad_ptr = acc_cx_grad.ptr(rect_cx_grad.lo);
float *w_grad_ptr = acc_w_grad.ptr(rect_w_grad.lo);
const float *y_grad_ptr = acc_y_grad.ptr(rect_y_grad.lo);
const float *hy_grad_ptr = acc_hy_grad.ptr(rect_hy_grad.lo);
const float *cy_grad_ptr = acc_cy_grad.ptr(rect_cy_grad.lo);
cudaEvent_t t_start, t_end;
if (m->profiling_runtime) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
checkCUDNN(cudnnRNNBackwardData(m->handle.dnn, m->rnnDesc,
LSTM_PER_NODE_LENGTH/*seqLength*/,
m->yDescs, y_ptr, m->yDescs, y_grad_ptr,
m->hyDesc, hy_grad_ptr, m->cyDesc, cy_grad_ptr,
m->wDesc, w_ptr, m->hxDesc, hx_ptr,
m->cxDesc, cx_ptr, m->xDescs, x_grad_ptr,
m->hxDesc, hx_grad_ptr, m->cxDesc, cx_grad_ptr,
m->handle.workSpace, m->handle.workSpaceSize,
m->reserveSpace, m->reserveSpaceSize));
checkCUDNN(cudnnRNNBackwardWeights(m->handle.dnn, m->rnnDesc,
LSTM_PER_NODE_LENGTH/*seqLength*/,
m->xDescs, x_ptr, m->hxDesc, hx_ptr,
m->yDescs, y_ptr,
m->handle.workSpace, m->handle.workSpaceSize,
m->wDesc, w_grad_ptr,
m->reserveSpace, m->reserveSpaceSize));
if (m->profiling_runtime) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("LSTM backward time = %.2fms\n", elapsed);
}
#ifdef PRINT_INTERMEDIATE_RESULT
print_tensor<1, float>(w_grad_ptr, rect_w_grad, "lstm_bwd:w_grad");
print_tensor<3, float>(x_grad_ptr, rect_x_grad, "lstm_bwd:x_grad");
print_tensor<2, float>(hx_grad_ptr, rect_hx_grad, "lstm_bwd:hx_grad");
print_tensor<2, float>(cx_grad_ptr, rect_cx_grad, "lstm_bwd:cx_grad");
#endif
#endif
}
void LSTM::backward(const RnnModel& model)
{
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
OpMeta* mp = meta[idx];
DomainPoint dp(*it);
TaskLauncher launcher(LSTM_BWD_TASK_ID, TaskArgument(&mp, sizeof(OpMeta*)),
Predicate::TRUE_PRED, 0/*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
// add region requirements for x, hx, cx
for (int i = 0; i < 3; i++) {
LogicalRegion x =
runtime->get_logical_subregion_by_color(inputs[i].partition, dp);
launcher.add_region_requirement(RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[i].region));
launcher.add_field(i, FID_DATA);
}
launcher.add_region_requirement(
RegionRequirement(params.region, READ_ONLY, EXCLUSIVE, params.region));
launcher.add_field(3, FID_DATA);
for (int i = 0; i < 3; i++) {
LogicalRegion x =
runtime->get_logical_subregion_by_color(outputs[i].partition, dp);
launcher.add_region_requirement(RegionRequirement(x, READ_ONLY, EXCLUSIVE, outputs[i].region));
launcher.add_field(4 + i, FID_DATA);
}
// add region requirements for gradients
for (int i = 0; i < 3; i++) {
LogicalRegion x =
runtime->get_logical_subregion_by_color(inputs[i].partition_grad, dp);
launcher.add_region_requirement(RegionRequirement(x, WRITE_ONLY, EXCLUSIVE, inputs[i].region_grad));
launcher.add_field(7+i, FID_DATA);
}
launcher.add_region_requirement(
RegionRequirement(params.gradients[paraConfig.gpu[idx]], READ_WRITE, EXCLUSIVE,
params.gradients[paraConfig.gpu[idx]]));
launcher.add_field(10, FID_DATA);
for (int i = 0; i < 3; i++) {
LogicalRegion x =
runtime->get_logical_subregion_by_color(outputs[i].partition_grad, dp);
launcher.add_region_requirement(RegionRequirement(x, READ_ONLY, EXCLUSIVE, outputs[i].region_grad));
launcher.add_field(11 + i, FID_DATA);
}
runtime->execute_task(ctx, launcher);
}
}
void LSTM::update(const RnnModel& model)
{
} | the_stack |
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_functions.h>
#include <device_launch_parameters.h>
#include <thrust/random.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/sequence.h>
#include <thrust/functional.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <helper_math.h>
#include "bvh.h"
#include "gpu_vdb.h"
#define BLOCK_SIZE 32
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall(cudaError err, const char *file, const int line) {
#ifdef CUDA_ERROR_CHECK
if (cudaSuccess != err) {
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
inline void __cudaCheckError(const char *file, const int line) {
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if (cudaSuccess != err) {
fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
//////////////////////////////////////////////////////////////////////////
// Device functions
//////////////////////////////////////////////////////////////////////////
/**
* Longest common prefix for Morton code
*/
__device__ int LongestCommonPrefix(int i, int j, int numTriangles,
MortonCode* mortonCodes, int* triangleIDs) {
if (i < 0 || i > numTriangles - 1 || j < 0 || j > numTriangles - 1) {
return -1;
}
MortonCode mi = mortonCodes[i];
MortonCode mj = mortonCodes[j];
if (mi == mj) {
return __clzll(mi ^ mj) + __clzll(triangleIDs[i] ^ triangleIDs[j]);
}
else {
return __clzll(mi ^ mj);
}
}
/**
* Expand bits, used in Morton code calculation
*/
__device__ MortonCode bitExpansion(MortonCode i) {
i = (i * 0x00010001u) & 0xFF0000FFu;
i = (i * 0x00000101u) & 0x0F00F00Fu;
i = (i * 0x00000011u) & 0xC30C30C3u;
i = (i * 0x00000005u) & 0x49249249u;
return i;
}
/**
* Compute morton code given volume centroid scaled to [0,1] of scene bounding box
*/
__device__ MortonCode ComputeMortonCode(float x, float y, float z) {
x = min(max(x * 1024.0f, 0.0f), 1023.0f);
y = min(max(y * 1024.0f, 0.0f), 1023.0f);
z = min(max(z * 1024.0f, 0.0f), 1023.0f);
MortonCode xx = bitExpansion((MortonCode)x);
MortonCode yy = bitExpansion((MortonCode)y);
MortonCode zz = bitExpansion((MortonCode)z);
return xx * 4 + yy * 2 + zz;
}
__device__ AABB divide_bbox(int idx, float3 pmin, float3 pmax) {
float3 min = make_float3(.0f);
float3 max = make_float3(.0f);
float half_x = (pmin.x + pmax.x)*0.5;
float half_y = (pmin.y + pmax.y)*0.5;
float half_z = (pmin.z + pmax.z)*0.5;
if (idx == 0) {
min = make_float3(pmin.x, half_y, pmin.z);
max = make_float3(half_x, pmax.y, half_z);
}
if (idx == 1) {
min = make_float3(half_x, half_y, pmin.z);
max = make_float3(pmax.x, pmax.y, half_z);
}
if (idx == 2) {
min = pmin;
max = make_float3(half_x, half_y, half_z);
}
if (idx == 3) {
min = make_float3(half_x, pmin.y, pmin.z);
max = make_float3(pmax.x, half_y, half_z);
}
if (idx == 4) {
min = make_float3(pmin.x, half_y, half_z);
max = make_float3(half_x, pmax.y, pmax.z);
}
if (idx == 5) {
min = make_float3(half_x, half_y, half_z);
max = pmax;
}
if (idx == 6) {
min = make_float3(pmin.x, pmin.y, half_z);
max = make_float3(half_x, half_y, pmax.z);
}
if (idx == 7) {
min = make_float3(half_x, pmin.y, half_z);
max = make_float3(pmax.x, half_y, pmax.z);
}
return AABB(min, max);
}
__device__ void build_octree_recursive(GPU_VDB *vdbs, int num_volumes, OCTNode *root, int depth, bool m_debug) {
if (depth > 0) {
if (root->num_volumes > 0) {
for (int i = 0; i < 8; ++i) {
root->children[i] = new OCTNode;
root->children[i]->parent = root;
root->children[i]->depth = depth;
float3 pmin = root->bbox.pmin;
float3 pmax = root->bbox.pmax;
root->children[i]->bbox = divide_bbox(i, pmin, pmax);
int idx = 0;
for (int y = 0; y < num_volumes; ++y) {
if (Overlaps(root->children[i]->bbox, vdbs[y].Bounds())) {
root->children[i]->num_volumes++;
root->children[i]->vol_indices[idx] = y;
root->children[i]->max_extinction = fmaxf(root->children[i]->max_extinction, vdbs[y].vdb_info.max_density);
root->children[i]->min_extinction = fminf(root->children[i]->min_extinction, vdbs[y].vdb_info.min_density);
root->children[i]->voxel_size = fminf(root->children[i]->voxel_size, vdbs[y].vdb_info.voxelsize);
idx++;
}
}
if (root->children[i]->num_volumes>0) root->children[i]->has_children = true;
if (m_debug) {
printf("num volumes for child %d-%d is %d ", depth, i, root->children[i]->num_volumes);
if (root->children[i]->num_volumes > 0) {
printf("volume indices: ");
for (int x = 0; x < root->children[i]->num_volumes; ++x) {
printf("%d ", root->children[i]->vol_indices[x]);
}
}
printf(" max extinction: %f\n", root->children[i]->max_extinction);
printf(" min extinction: %f\n", root->children[i]->min_extinction);
}
build_octree_recursive(vdbs, num_volumes, root->children[i], depth - 1, m_debug);
}
}
}
}
//////////////////////////////////////////////////////////////////////////
// Kernels
//////////////////////////////////////////////////////////////////////////
__global__ void ComputeBoundingBoxes(GPU_VDB* volumes,
int numVolumes,
AABB* boundingBoxes) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numVolumes) boundingBoxes[i] = volumes[i].Bounds();
}
__global__ void DebugBVH(BVHNode* BVHLeaves, BVHNode* BVHNodes, int numVolumes) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// do in serial
if (i == 0) {
for (int j = 0; j < numVolumes; j++) {
BVHNode* currentNode = BVHLeaves + j;
printf("BBox for volumeIdx %d: pmin: (%f,%f,%f), pmax: (%f,%f,%f)\n",
(BVHLeaves + j)->volIndex,
currentNode->boundingBox.pmin.x,
currentNode->boundingBox.pmin.y,
currentNode->boundingBox.pmin.z,
currentNode->boundingBox.pmax.x,
currentNode->boundingBox.pmax.y,
currentNode->boundingBox.pmax.z);
}
//parents:
for (int j = 0; j < numVolumes; j++) {
BVHNode* currentNode = (BVHLeaves + j)->parent;
printf("BBox for parent node of volumeIdx %d: pmin: (%f,%f,%f), pmax: (%f,%f,%f)\n",
(BVHLeaves + j)->volIndex,
currentNode->boundingBox.pmin.x,
currentNode->boundingBox.pmin.y,
currentNode->boundingBox.pmin.z,
currentNode->boundingBox.pmax.x,
currentNode->boundingBox.pmax.y,
currentNode->boundingBox.pmax.z);
}
for (int j = 0; j < numVolumes; j++) {
BVHNode* currentNode = (BVHLeaves + j)->parent->parent;
printf("BBox for parents parent node of volumeIdx %d: pmin: (%f,%f,%f), pmax: (%f,%f,%f)\n",
(BVHLeaves + j)->volIndex,
currentNode->boundingBox.pmin.x,
currentNode->boundingBox.pmin.y,
currentNode->boundingBox.pmin.z,
currentNode->boundingBox.pmax.x,
currentNode->boundingBox.pmax.y,
currentNode->boundingBox.pmax.z);
}
for (int j = 0; j < numVolumes; j++) {
BVHNode* currentNode = (BVHLeaves + j)->parent->parent->parent;
printf("BBox for parents parents parent node of volumeIdx %d: pmin: (%f,%f,%f), pmax: (%f,%f,%f)\n",
(BVHLeaves + j)->volIndex,
currentNode->boundingBox.pmin.x,
currentNode->boundingBox.pmin.y,
currentNode->boundingBox.pmin.z,
currentNode->boundingBox.pmax.x,
currentNode->boundingBox.pmax.y,
currentNode->boundingBox.pmax.z);
}
}
}
__global__ void ComputeMortonCodes(const GPU_VDB* volumes, int numTriangles, AABB sceneBounds, MortonCode* mortonCodes) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numTriangles) {
// Compute volume centroid
float3 centroid = volumes[i].Bounds().Centroid();
// Normalize triangle centroid to lie within [0,1] of scene bounding box
float x = (centroid.x - sceneBounds.pmin.x) / (sceneBounds.pmax.x - sceneBounds.pmin.x);
float y = (centroid.y - sceneBounds.pmin.y) / (sceneBounds.pmax.y - sceneBounds.pmin.y);
float z = (centroid.z - sceneBounds.pmin.z) / (sceneBounds.pmax.z - sceneBounds.pmin.z);
// Compute morton code
mortonCodes[i] = ComputeMortonCode(x, y, z);
}
}
__global__ void ConstructBVH(BVHNode* BVHNodes, BVHNode* BVHLeaves, int* nodeCounter, GPU_VDB* volumes, int* volumeIDs, int numVolumes) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numVolumes) {
BVHNode* leaf = BVHLeaves + i;
int volumeIdx = volumeIDs[i];
// Handle leaf first
leaf->volIndex = volumeIdx;
leaf->boundingBox = volumes[volumeIdx].Bounds();
BVHNode* current = leaf->parent;
int currentIndex = current - BVHNodes;
int res = atomicAdd(nodeCounter + currentIndex, 1);
// Go up and handle internal nodes
while (true) {
if (res == 0) {
return;
}
AABB leftBoundingBox = current->leftChild->boundingBox;
AABB rightBoundingBox = current->rightChild->boundingBox;
// Compute current bounding box
current->boundingBox = UnionB(leftBoundingBox, rightBoundingBox);
// If current is root, return
if (current == BVHNodes) {
return;
}
current = current->parent;
currentIndex = current - BVHNodes;
res = atomicAdd(nodeCounter + currentIndex, 1);
}
}
}
__global__ void BuildRadixTree(BVHNode* radixTreeNodes, BVHNode* radixTreeLeaves, MortonCode* mortonCodes, int* volumeIds, int numVolumes) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numVolumes - 1) {
// Run radix tree construction algorithm
// Determine direction of the range (+1 or -1)
int d = LongestCommonPrefix(i, i + 1, numVolumes, mortonCodes, volumeIds) -
LongestCommonPrefix(i, i - 1, numVolumes, mortonCodes, volumeIds) >= 0 ? 1 : -1;
// Compute upper bound for the length of the range
int deltaMin = LongestCommonPrefix(i, i - d, numVolumes, mortonCodes, volumeIds);
//int lmax = 128;
int lmax = 2;
while (LongestCommonPrefix(i, i + lmax * d, numVolumes, mortonCodes, volumeIds) > deltaMin) {
//lmax = lmax * 4;
lmax = lmax * 2;
}
// Find the other end using binary search
int l = 0;
int divider = 2;
for (int t = lmax / divider; t >= 1; divider *= 2) {
if (LongestCommonPrefix(i, i + (l + t) * d, numVolumes, mortonCodes, volumeIds) > deltaMin) {
l = l + t;
}
if (t == 1) break;
t = lmax / divider;
}
int j = i + l * d;
// Find the split position using binary search
int deltaNode = LongestCommonPrefix(i, j, numVolumes, mortonCodes, volumeIds);
int s = 0;
divider = 2;
for (int t = (l + (divider - 1)) / divider; t >= 1; divider *= 2) {
if (LongestCommonPrefix(i, i + (s + t) * d, numVolumes, mortonCodes, volumeIds) > deltaNode) {
s = s + t;
}
if (t == 1) break;
t = (l + (divider - 1)) / divider;
}
int gamma = i + s * d + min(d, 0);
//printf("i:%d, d:%d, deltaMin:%d, deltaNode:%d, lmax:%d, l:%d, j:%d, gamma:%d. \n", i, d, deltaMin, deltaNode, lmax, l, j, gamma);
// Output child pointers
BVHNode* current = radixTreeNodes + i;
if (min(i, j) == gamma) {
current->leftChild = radixTreeLeaves + gamma;
(radixTreeLeaves + gamma)->parent = current;
}
else {
current->leftChild = radixTreeNodes + gamma;
(radixTreeNodes + gamma)->parent = current;
}
if (max(i, j) == gamma + 1) {
current->rightChild = radixTreeLeaves + gamma + 1;
(radixTreeLeaves + gamma + 1)->parent = current;
}
else {
current->rightChild = radixTreeNodes + gamma + 1;
(radixTreeNodes + gamma + 1)->parent = current;
}
current->minId = min(i, j);
current->maxId = max(i, j);
}
}
__global__ void pass_octree(GPU_VDB *volumes, int num_volumes, OCTNode *root, int depth, bool m_debug) {
build_octree_recursive(volumes, num_volumes, root, depth, m_debug);
}
extern "C" void BuildBVH(BVH& bvh, GPU_VDB* volumes, int numVolumes, AABB &sceneBounds, bool debug_bvh) {
int blockSize = BLOCK_SIZE;
int gridSize = (numVolumes + blockSize - 1) / blockSize;
// Timing metrics
float total = 0;
float elapsed;
cudaEvent_t start, stop;
if (debug_bvh) printf("Number of volumes: %i\n", numVolumes);
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Pre-process stage, scene bounding box
// TODO: add check if this has been done already
// if we already have scenebounds and have new/modified triangles, no need to start over
// Should only do this if scene has changed (added tris, moved tris)
// Compute bounding boxes
if (debug_bvh) printf("Computing volume bounding boxes...");
cudaEventRecord(start, 0);
thrust::device_vector<AABB> boundingBoxes(numVolumes);
ComputeBoundingBoxes <<<gridSize, blockSize>>> (volumes, numVolumes, boundingBoxes.data().get());
CudaCheckError();
checkCudaErrors(cudaGetLastError());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed);
total += elapsed;
thrust::host_vector<AABB> bounding_boxes_h = boundingBoxes;
// Compute scene bounding box
if (debug_bvh) printf("Computing scene bounding box...");
cudaEventRecord(start, 0);
sceneBounds = thrust::reduce(boundingBoxes.begin(), boundingBoxes.end(), AABB(), AABBUnion());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed);
total += elapsed;
if (debug_bvh) printf("Total pre-computation time for scene was %f ms. \n", total);
total = 0;
if (debug_bvh) {
printf("Scene boundingbox:\n");
printf("pmin: %f, %f, %f\n", sceneBounds.pmin.x, sceneBounds.pmin.y, sceneBounds.pmin.z);
printf("pmax: %f, %f, %f\n", sceneBounds.pmax.x, sceneBounds.pmax.y, sceneBounds.pmax.z);
}
// Pre-process done, start building BVH
// Compute Morton codes
thrust::device_vector<MortonCode> mortonCodes(numVolumes);
if (debug_bvh) printf("Computing Morton codes...");
cudaEventRecord(start, 0);
ComputeMortonCodes <<<gridSize, blockSize>>> (volumes, numVolumes, sceneBounds, mortonCodes.data().get());
CudaCheckError();
checkCudaErrors(cudaGetLastError());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed);
total += elapsed;
// Sort triangle indices with Morton code as key
thrust::device_vector<int> triangleIDs(numVolumes);
thrust::sequence(triangleIDs.begin(), triangleIDs.end());
if (debug_bvh) printf("Sort volumes...");
cudaEventRecord(start, 0);
try {
thrust::sort_by_key(mortonCodes.begin(), mortonCodes.end(), triangleIDs.begin());
}
catch (thrust::system_error e) {
printf("Error inside sort: %s\n", e.what());
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
if (debug_bvh) printf(" done! Sorting took %f ms. \n", elapsed);
total += elapsed;
// Build radix tree of BVH nodes
checkCudaErrors(cudaMalloc((void**)&bvh.BVHNodes, (numVolumes - 1) * sizeof(BVHNode)));
checkCudaErrors(cudaMalloc((void**)&bvh.BVHLeaves, numVolumes * sizeof(BVHNode)));
if (debug_bvh) printf("Building radix tree...");
cudaEventRecord(start, 0);
BuildRadixTree <<<gridSize, blockSize>>> (bvh.BVHNodes, bvh.BVHLeaves, mortonCodes.data().get(), triangleIDs.data().get(), numVolumes);
CudaCheckError();
checkCudaErrors(cudaGetLastError());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed);
total += elapsed;
// Build BVH
thrust::device_vector<int> nodeCounters(numVolumes);
if (debug_bvh) printf("Building BVH...");
cudaEventRecord(start, 0);
ConstructBVH <<<gridSize, blockSize >>> (bvh.BVHNodes, bvh.BVHLeaves, nodeCounters.data().get(), volumes, triangleIDs.data().get(), numVolumes);
CudaCheckError();
checkCudaErrors(cudaDeviceSynchronize());
//DebugBVH << <gridSize, blockSize >> >(bvh.BVHLeaves, bvh.BVHNodes, numVolumes);
checkCudaErrors(cudaGetLastError());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed);
total += elapsed;
if (debug_bvh) printf("Total BVH construction time was %f ms. \n", total);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
extern "C" void build_octree(OCTNode *root, GPU_VDB *volumes, int num_volumes, int depth, bool m_debug) {
float elapsed;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (m_debug) printf("Building Octree...");
cudaEventRecord(start, 0);
pass_octree << <1, 1 >> > (volumes, num_volumes, root, depth, false);
CudaCheckError();
checkCudaErrors(cudaGetLastError());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
checkCudaErrors(cudaDeviceSynchronize());
if(m_debug) printf("done! Computation took %f ms. \n", elapsed);
cudaEventDestroy(start);
cudaEventDestroy(stop);
} | the_stack |
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* Copyright (c) Chris Choy (chrischoy@ai.stanford.edu).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
// Use the torch for GPU memory management. Thrust resize gives segfulat during
// debugging -g #include <torch/extension.h>
#include "convolution.cuh"
#include "../../utils.h"
#include <THC/THCAtomics.cuh>
namespace kaolin {
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB, Dtype *C,
const Itype *in_map, const Itype *out_map) {
// Use in_feat as A and kernel as B
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. x is for rows, y is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * in_row + s + tx] : 0;
Bs[ty][tx] = ((s + ty) < hB && x < wB) ? B[wB * (s + ty) + x] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < wB)
atomicAdd(&C[wB * out_row + x], Csub);
// C[wB * out_row + x] += Csub;
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B^T, E = D^T * A
* wA is A's width and wB is B's width
*
* +---+
* |B^T|
* +-------+
* | | |
* | A | C |
* | | |
* | | |
* +------------------+
* | D^T | E |
* +----------+---+
*
*/
template <typename Dtype, typename Itype, int BLOCK_SIZE>
__global__ void matmul2(const Dtype *A, const int wA, const int hA,
const Dtype *B, const int wB, const int hB,
const Dtype *D, const int wD, const int hD, Dtype *C,
Dtype *E, const Itype *in_map, const Itype *out_map) {
// Use grad_out_feat as A, transposed kernel weight as B, and in_feat as D
// Block index
const int bx = blockIdx.x;
const int by = blockIdx.y;
// Thread index
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Coordinate. y is for rows, x is for columns.
const int x = BLOCK_SIZE * bx + tx;
const int y = BLOCK_SIZE * by + ty;
const Itype in_row = y < hA ? in_map[y] : 0;
const Itype out_row = y < hA ? out_map[y] : 0;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Dtype Csub = 0;
Dtype Esub = 0;
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Dtype As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Dtype BTs[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Ds used to
// store the sub-matrix of D
__shared__ Dtype DTs[BLOCK_SIZE][BLOCK_SIZE];
// For Ds = D^T[...:..., ...:...], use the transposed grid dimension for A
DTs[ty][tx] = (x < wD && y < hD) ? D[wD * in_row + x] : 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int s = 0; s < wA; s += BLOCK_SIZE) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = ((s + tx) < wA && y < hA) ? A[wA * out_row + s + tx] : 0;
// Transposed kernel
BTs[ty][tx] = ((s + ty) < wB && x < hB) ? B[wB * x + s + ty] : 0;
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * BTs[k][tx];
}
// For Esub, reset to 0
Esub = 0;
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Esub += DTs[k][ty] * As[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
// For the E matrix which requires accmulation of multiple blocks, use
// atomic addition. This can be replaced with a more sophisticaed reduction
// algorithm.
if ((bx * BLOCK_SIZE + ty) < wD && (s + tx) < wA)
atomicAdd(&E[wA * (bx * BLOCK_SIZE + ty) + (s + tx)], Esub);
}
// Write the block sub-matrix to device memory;
// each thread writes one element
if (y < hA && x < hB)
atomicAdd(&C[hB * in_row + x], Csub);
}
namespace minkowski {
template <typename Dtype, typename Itype>
void ConvolutionForwardKernelGPU(const Dtype *d_in_feat, int in_nchannel,
Dtype *d_out_feat, int out_nchannel,
const Dtype *d_kernel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream) {
CUDA_CHECK(cudaDeviceSynchronize());
int n_active_in_volume, shared_mem_size = -1;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel > 24 && out_nchannel > 24))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
// Iterate through each spatial kernel and get indices for in_map and out_map
for (int k = 0; k < in_maps.size(); k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
for (int s = 0; s < num_div; s++) {
int offset = step * s;
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((out_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
matmul<Dtype, Itype, 32> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 24:
matmul<Dtype, Itype, 24> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 16:
matmul<Dtype, Itype, 16> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 8:
matmul<Dtype, Itype, 8> << <grid, threads, 0, stream >> > (
d_in_feat, in_nchannel, curr_num_active,
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, d_out_feat, in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
}
}
CUDA_CHECK(cudaGetLastError());
}
CUDA_CHECK(cudaDeviceSynchronize());
}
template void ConvolutionForwardKernelGPU<float, int32_t>(
const float *d_in_feat, int in_nchannel, float *d_out_feat,
int out_nchannel, const float *d_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream);
template void ConvolutionForwardKernelGPU<double, int32_t>(
const double *d_in_feat, int in_nchannel, double *d_out_feat,
int out_nchannel, const double *d_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream);
template <typename Dtype, typename Itype>
void ConvolutionBackwardKernelGPU(const Dtype *d_in_feat, Dtype *d_grad_in_feat,
int in_nchannel, const Dtype *d_grad_out_feat,
int out_nchannel, const Dtype *d_kernel,
Dtype *d_grad_kernel,
const pInOutMaps<Itype> &in_maps,
const pInOutMaps<Itype> &out_maps,
int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream) {
CUDA_CHECK(cudaDeviceSynchronize());
int n_active_in_volume, shared_mem_size = -1;
// Define the shared memory size
if ((in_nchannel > 16 && out_nchannel > 16 &&
in_nchannel * out_nchannel >= 512) ||
(in_nchannel % 32 == 0 && out_nchannel % 32 == 0))
shared_mem_size = 32;
else if (in_nchannel % 24 == 0 && out_nchannel % 24 == 0)
shared_mem_size = 24;
else if ((in_nchannel > 8 && out_nchannel > 8) ||
(in_nchannel % 16 == 0 && out_nchannel % 16 == 0))
shared_mem_size = 16;
else
shared_mem_size = 8;
dim3 threads(shared_mem_size, shared_mem_size);
for (int k = 0; k < in_maps.size(); k++) {
n_active_in_volume = in_maps[k].size();
if (n_active_in_volume == 0)
continue;
int num_grid = (n_active_in_volume + shared_mem_size - 1) / shared_mem_size;
int num_div = (num_grid + MAX_GRID - 1) / MAX_GRID;
int step = (n_active_in_volume + num_div - 1) / num_div;
for (int s = 0; s < num_div; s++) {
int offset = step * s;
int remainder = n_active_in_volume - step * s;
int curr_num_active = remainder < step ? remainder : step;
dim3 grid((in_nchannel + threads.x - 1) / threads.x,
(curr_num_active + threads.y - 1) / threads.y);
switch (shared_mem_size) {
case 32:
matmul2<Dtype, Itype, 32> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 24:
matmul2<Dtype, Itype, 24> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 16:
matmul2<Dtype, Itype, 16> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
case 8:
matmul2<Dtype, Itype, 8> << <grid, threads, 0, stream >> > (
d_grad_out_feat, out_nchannel, curr_num_active, // A
&d_kernel[k * in_nchannel * out_nchannel], out_nchannel,
in_nchannel, // B
d_in_feat, in_nchannel, curr_num_active, // D
d_grad_in_feat, // C
&d_grad_kernel[k * in_nchannel * out_nchannel], // E
in_maps[k].data() + offset, out_maps[k].data() + offset);
break;
}
}
CUDA_CHECK(cudaGetLastError());
}
CUDA_CHECK(cudaDeviceSynchronize());
}
template void ConvolutionBackwardKernelGPU<float, int32_t>(
const float *d_in_feat, float *d_grad_in_feat, int in_nchannel,
const float *d_grad_out_feat, int out_nchannel, const float *d_kernel,
float *p_grad_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream);
template void ConvolutionBackwardKernelGPU<double, int32_t>(
const double *d_in_feat, double *d_grad_in_feat, int in_nchannel,
const double *d_grad_out_feat, int out_nchannel, const double *d_kernel,
double *p_grad_kernel, const pInOutMaps<int32_t> &in_map,
const pInOutMaps<int32_t> &out_map, int out_nrows, cublasHandle_t cuhandle,
cudaStream_t stream);
} // end namespace minkowski
} // namespace kaolin | the_stack |
#include <doctest.h>
#include <taskflow/taskflow.hpp>
#include <taskflow/cuda/cudaflow.hpp>
#include <taskflow/cuda/algorithm/transform.hpp>
#include <taskflow/cuda/algorithm/for_each.hpp>
#include <taskflow/cuda/algorithm/reduce.hpp>
#include <taskflow/cuda/algorithm/scan.hpp>
#include <taskflow/cuda/algorithm/find.hpp>
#include <taskflow/cuda/algorithm/sort.hpp>
//verify
template <typename T>
__global__
void verify(const T* a, const T* b, bool* check, size_t size) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for(;tid < size; tid += gridDim.x * blockDim.x) {
if(a[tid] != b[tid]) {
*check = false;
return;
}
}
}
//add
template <typename T>
__global__
void add(const T* a, const T* b, T* c, size_t size) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for(;tid < size; tid += gridDim.x * blockDim.x) {
c[tid] = a[tid] + b[tid];
}
}
//multiply
template <typename T>
__global__
void multiply(const T* a, const T* b, T* c, size_t size) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for(;tid < size; tid += gridDim.x * blockDim.x) {
c[tid] = a[tid] * b[tid];
}
}
//----------------------------------------------------------------------
//offload_n
//----------------------------------------------------------------------
template <typename F>
void offload_n() {
tf::Executor executor;
tf::Taskflow taskflow;
for(size_t N = 1; N < 65532; N = N * 2 + 1) {
taskflow.clear();
int* a {nullptr};
int* ans_a {nullptr};
bool* check {nullptr};
int times = ::rand() % 7;
//allocate
auto allocate_t = taskflow.emplace([&]() {
REQUIRE(cudaMallocManaged(&a, N * sizeof(int)) == cudaSuccess);
REQUIRE(cudaMallocManaged(&ans_a, N * sizeof(int)) == cudaSuccess);
REQUIRE(cudaMallocManaged(&check, sizeof(bool)) == cudaSuccess);
}).name("allocate");
//initialize
auto initialize_t = taskflow.emplace([&]() {
std::generate_n(a, N, [&](){ return ::rand() % N; });
std::memcpy(ans_a, a, N * sizeof(int));
*check = true;
}).name("initialize");
//offload
auto offload_t = taskflow.emplace([&](F& cf) {
cf.kernel(
32, 512, 0,
add<int>,
a, a, a, N
);
cf.offload_n(times+1);
}).name("offload");
//verify
auto verify_t = taskflow.emplace([&](F& cf) {
auto ans_t = cf.for_each(
ans_a, ans_a + N,
[=] __device__(int& v) { v *= std::pow(2, (times + 1)); }
);
auto verify_t = cf.kernel(
32, 512, 0,
verify<int>,
a, ans_a, check, N
);
ans_t.precede(verify_t);
cf.offload();
REQUIRE(*check);
}).name("verify");
//free memory
auto deallocate_t = taskflow.emplace([&]() {
REQUIRE(cudaFree(a) == cudaSuccess);
REQUIRE(cudaFree(ans_a) == cudaSuccess);
REQUIRE(cudaFree(check) == cudaSuccess);
}).name("deallocate");
allocate_t.precede(initialize_t);
initialize_t.precede(offload_t);
offload_t.precede(verify_t);
verify_t.precede(deallocate_t);
executor.run(taskflow).wait();
}
}
TEST_CASE("cudaFlow.offload" * doctest::timeout(300)) {
offload_n<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.offload" * doctest::timeout(300)) {
offload_n<tf::cudaFlowCapturer>();
}
//----------------------------------------------------------------------
//join_n
//----------------------------------------------------------------------
template <typename F>
void join() {
tf::Executor executor;
tf::Taskflow taskflow;
for(size_t N = 1; N < 65532; N = N * 2 + 1) {
taskflow.clear();
int* a {nullptr};
int* ans_a {nullptr};
bool* check {nullptr};
int times = ::rand() % 7;
//allocate
auto allocate_t = taskflow.emplace([&]() {
REQUIRE(cudaMallocManaged(&a, N * sizeof(int)) == cudaSuccess);
REQUIRE(cudaMallocManaged(&ans_a, N * sizeof(int)) == cudaSuccess);
REQUIRE(cudaMallocManaged(&check, sizeof(bool)) == cudaSuccess);
}).name("allocate");
//initialize
auto initialize_t = taskflow.emplace([&]() {
std::generate_n(a, N, [&](){ return ::rand() % N; });
std::memcpy(ans_a, a, N * sizeof(int));
*check = true;
}).name("initialize");
//join
auto join_t = taskflow.emplace([&](F& cf) {
cf.kernel(
32, 512, 0,
add<int>,
a, a, a, N
);
cf.offload_n(times);
}).name("join");
//verify
auto verify_t = taskflow.emplace([&](F& cf) {
auto ans_t = cf.for_each(
ans_a, ans_a + N,
[=] __device__(int& v) { v *= std::pow(2, (times)); }
);
auto verify_t = cf.kernel(
32, 512, 0,
verify<int>,
a, ans_a, check, N
);
ans_t.precede(verify_t);
cf.offload();
REQUIRE(*check);
}).name("verify");
//free memory
auto deallocate_t = taskflow.emplace([&]() {
REQUIRE(cudaFree(a) == cudaSuccess);
REQUIRE(cudaFree(ans_a) == cudaSuccess);
REQUIRE(cudaFree(check) == cudaSuccess);
}).name("deallocate");
allocate_t.precede(initialize_t);
initialize_t.precede(join_t);
join_t.precede(verify_t);
verify_t.precede(deallocate_t);
executor.run(taskflow).wait();
}
}
TEST_CASE("cudaFlow.join" * doctest::timeout(300)) {
join<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.join" * doctest::timeout(300)) {
join<tf::cudaFlowCapturer>();
}
//----------------------------------------------------------------------
//update kernel
//----------------------------------------------------------------------
template <typename F, typename T>
void update_kernel() {
tf::Executor executor;
tf::Taskflow taskflow;
for(size_t N = 1; N < 65529; N = N * 2 + 1) {
taskflow.clear();
std::vector<T*> operand(3, nullptr);
std::vector<T*> ans_operand(3, nullptr);
std::vector<int> ind(3);
std::generate_n(ind.data(), 3, [&](){ return ::rand() % 3; });
bool* check {nullptr};
//allocate
auto allocate_t = taskflow.emplace([&]() {
for(int i = 0; i < 3; ++i) {
REQUIRE(cudaMallocManaged(&operand[i], N * sizeof(T)) == cudaSuccess);
REQUIRE(cudaMallocManaged(&ans_operand[i], N * sizeof(T)) == cudaSuccess);
}
REQUIRE(cudaMallocManaged(&check, sizeof(bool)) == cudaSuccess);
}).name("allocate");
//initialize
auto initialize_t = taskflow.emplace([&](){
for(int i = 0; i < 3; ++i) {
std::generate_n(operand[i], N, [&](){ return ::rand() % N - N / 2 + i; });
std::memcpy(ans_operand[i], operand[i], N * sizeof(T));
}
*check = true;
}).name("initialize");
//update_kernel
auto add_t = taskflow.emplace([&](F& cf) {
auto multi_t = cf.kernel(
32, 512, 0,
multiply<T>,
operand[ind[0]], operand[ind[1]], operand[ind[2]], N
);
auto add_t = cf.kernel(
32, 512, 0,
add<T>,
operand[ind[1]], operand[ind[2]], operand[ind[0]], N
);
multi_t.precede(add_t);
cf.offload();
cf.kernel(
multi_t,
64, 128, 0, multiply<T>,
operand[ind[2]], operand[ind[0]], operand[ind[1]], N
);
cf.kernel(
add_t,
16, 256, 0, add<T>,
operand[ind[1]], operand[ind[0]], operand[ind[2]], N
);
cf.offload();
cf.kernel(
multi_t,
8, 1024, 0, multiply<T>,
operand[ind[0]], operand[ind[2]], operand[ind[1]], N
);
cf.kernel(
add_t,
64, 64, 0, add<T>,
operand[ind[2]], operand[ind[1]], operand[ind[0]], N
);
cf.offload();
}).name("add");
//verify
auto verify_t = taskflow.emplace([&](F& cf) {
auto multi1_t = cf.transform(
ans_operand[ind[0]], ans_operand[ind[0]] + N, ans_operand[ind[1]],
ans_operand[ind[2]],
[] __device__ (T& v1, T& v2) { return v1 * v2; }
);
auto add1_t = cf.transform(
ans_operand[ind[1]], ans_operand[ind[1]]+N, ans_operand[ind[2]],
ans_operand[ind[0]],
[] __device__ (T& v1, T& v2) { return v1 + v2; }
);
auto multi2_t = cf.transform(
ans_operand[ind[2]], ans_operand[ind[2]] + N, ans_operand[ind[0]],
ans_operand[ind[1]],
[] __device__ (T& v1, T& v2) { return v1 * v2; }
);
auto add2_t = cf.transform(
ans_operand[ind[1]], ans_operand[ind[1]] + N, ans_operand[ind[0]],
ans_operand[ind[2]],
[] __device__ (T& v1, T& v2) { return v1 + v2; }
);
auto multi3_t = cf.transform(
ans_operand[ind[0]], ans_operand[ind[0]] + N, ans_operand[ind[2]],
ans_operand[ind[1]],
[] __device__ (T& v1, T& v2) { return v1 * v2; }
);
auto add3_t = cf.transform(
ans_operand[ind[2]], ans_operand[ind[2]] + N, ans_operand[ind[1]],
ans_operand[ind[0]],
[] __device__ (T& v1, T& v2) { return v1 + v2; }
);
auto verify1_t = cf.kernel(
32, 512, 0,
verify<T>,
operand[ind[0]], ans_operand[ind[0]], check, N
);
auto verify2_t = cf.kernel(
32, 512, 0,
verify<T>,
operand[ind[1]], ans_operand[ind[1]], check, N
);
auto verify3_t = cf.kernel(
32, 512, 0,
verify<T>,
operand[ind[2]], ans_operand[ind[2]], check, N
);
multi1_t.precede(add1_t);
add1_t.precede(multi2_t);
multi2_t.precede(add2_t);
add2_t.precede(multi3_t);
multi3_t.precede(add3_t);
add3_t.precede(verify1_t).precede(verify2_t).precede(verify3_t);
cf.offload();
REQUIRE(*check);
}).name("verify");
//free memory
auto deallocate_t = taskflow.emplace([&]() {
for(int i = 0; i < 3; ++i) {
REQUIRE(cudaFree(operand[i]) == cudaSuccess);
REQUIRE(cudaFree(ans_operand[i]) == cudaSuccess);
}
REQUIRE(cudaFree(check) == cudaSuccess);
}).name("deallocate");
allocate_t.precede(initialize_t);
initialize_t.precede(add_t);
add_t.precede(verify_t);
verify_t.precede(deallocate_t);
executor.run(taskflow).wait();
}
}
TEST_CASE("cudaFlow.update.kernel.int" * doctest::timeout(300)) {
update_kernel<tf::cudaFlow, int>();
}
TEST_CASE("cudaFlow.update.kernel.float" * doctest::timeout(300)) {
update_kernel<tf::cudaFlow, float>();
}
TEST_CASE("cudaFlow.update.kernel.double" * doctest::timeout(300)) {
update_kernel<tf::cudaFlow, double>();
}
TEST_CASE("cudaFlowCapturer.update.kernel.int" * doctest::timeout(300)) {
update_kernel<tf::cudaFlowCapturer, int>();
}
TEST_CASE("cudaFlowCapturer.update.kernel.float" * doctest::timeout(300)) {
update_kernel<tf::cudaFlowCapturer, float>();
}
TEST_CASE("cudaFlowCapturer.update.kernel.double" * doctest::timeout(300)) {
update_kernel<tf::cudaFlowCapturer, double>();
}
//----------------------------------------------------------------------
// update copy
//----------------------------------------------------------------------
template <typename F, typename T>
void update_copy() {
tf::Executor executor;
tf::Taskflow taskflow;
for(int N = 1; N < 65459; N = N * 2 + 1) {
taskflow.clear();
std::vector<T> ha(N, N + 5);
std::vector<T> hb(N, N - 31);
std::vector<T> hc(N, N - 47);
std::vector<T> hz(N);
T* da {nullptr};
T* db {nullptr};
T* dc {nullptr};
T* dz {nullptr};
//allocate
auto allocate_t = taskflow.emplace([&]() {
REQUIRE(cudaMalloc(&da, N * sizeof(T)) == cudaSuccess);
REQUIRE(cudaMalloc(&db, N * sizeof(T)) == cudaSuccess);
REQUIRE(cudaMalloc(&dc, N * sizeof(T)) == cudaSuccess);
REQUIRE(cudaMalloc(&dz, N * sizeof(T)) == cudaSuccess);
}).name("allocate");
//update_copy
auto h2d_t = taskflow.emplace([&](F& cf) {
auto h2d_t = cf.copy(da, ha.data(), N).name("h2d");
cf.offload();
cf.copy(h2d_t, db, hb.data(), N);
cf.offload();
cf.copy(h2d_t, dc, hc.data(), N);
cf.offload();
});
auto kernel_t = taskflow.emplace([&](F& cf) {
auto add1_t = cf.transform(
da, da+N, db, dz,
[] __device__ (T& v1, T& v2) { return v1 + v2; }
);
auto add2_t = cf.transform(
dc, dc+N, dz, dc,
[] __device__ (T& v1, T& v2) { return v1 - v2; }
);
add1_t.precede(add2_t);
});
auto d2h_t = taskflow.emplace([&](F& cf) {
auto d2h_t = cf.copy(hc.data(), dc, N).name("d2h");
cf.offload();
cf.copy(d2h_t, hz.data(), dz, N);
cf.offload();
});
//verify
auto verify_t = taskflow.emplace([&]() {
for(auto& c: hc) {
REQUIRE(c == -21 - N);
}
for(auto& z: hz) {
REQUIRE(z == 2 * N - 26);
}
});
//free memory
auto deallocate_t = taskflow.emplace([&]() {
REQUIRE(cudaFree(da) == cudaSuccess);
REQUIRE(cudaFree(db) == cudaSuccess);
REQUIRE(cudaFree(dc) == cudaSuccess);
REQUIRE(cudaFree(dz) == cudaSuccess);
}).name("deallocate");
allocate_t.precede(h2d_t);
h2d_t.precede(kernel_t);
kernel_t.precede(d2h_t);
d2h_t.precede(verify_t);
verify_t.precede(deallocate_t);
executor.run(taskflow).wait();
}
}
TEST_CASE("cudaFlow.update.copy.int" * doctest::timeout(300)) {
update_copy<tf::cudaFlow, int>();
}
TEST_CASE("cudaFlow.update.copy.float" * doctest::timeout(300)) {
update_copy<tf::cudaFlow, float>();
}
TEST_CASE("cudaFlow.update.copy.double" * doctest::timeout(300)) {
update_copy<tf::cudaFlow, double>();
}
TEST_CASE("cudaFlowCapturer.update.copy.int" * doctest::timeout(300)) {
update_copy<tf::cudaFlowCapturer, int>();
}
TEST_CASE("cudaFlowCapturer.update.copy.float" * doctest::timeout(300)) {
update_copy<tf::cudaFlowCapturer, float>();
}
TEST_CASE("cudaFlowCapturer.update.copy.double" * doctest::timeout(300)) {
update_copy<tf::cudaFlowCapturer, double>();
}
//----------------------------------------------------------------------
//update memcpy
//----------------------------------------------------------------------
template <typename F, typename T>
void update_memcpy() {
tf::Executor executor;
tf::Taskflow taskflow;
for(int N = 1; N < 65459; N = N * 2 + 1) {
taskflow.clear();
std::vector<T> ha(N, N + 5);
std::vector<T> hb(N, N - 31);
std::vector<T> hc(N, N - 47);
std::vector<T> hz(N);
T* da {nullptr};
T* db {nullptr};
T* dc {nullptr};
T* dz {nullptr};
//allocate
auto allocate_t = taskflow.emplace([&]() {
REQUIRE(cudaMalloc(&da, N * sizeof(T)) == cudaSuccess);
REQUIRE(cudaMalloc(&db, N * sizeof(T)) == cudaSuccess);
REQUIRE(cudaMalloc(&dc, N * sizeof(T)) == cudaSuccess);
REQUIRE(cudaMalloc(&dz, N * sizeof(T)) == cudaSuccess);
}).name("allocate");
//update_memcpy
auto h2d_t = taskflow.emplace([&](F& cf) {
auto h2d_t = cf.memcpy(da, ha.data(), sizeof(T) * N).name("h2d");
cf.offload();
cf.memcpy(h2d_t, db, hb.data(), sizeof(T) * N);
cf.offload();
cf.memcpy(h2d_t, dc, hc.data(), sizeof(T) * N);
cf.offload();
});
auto kernel_t = taskflow.emplace([&](F& cf) {
auto add1_t = cf.transform(
da, da+N, db, dz,
[] __device__ (T& v1, T& v2) { return v1 + v2; }
);
auto add2_t = cf.transform(
dc, dc+N, dz, dc,
[] __device__ (T& v1, T& v2) { return v1 - v2; }
);
add1_t.precede(add2_t);
});
auto d2h_t = taskflow.emplace([&](F& cf) {
auto d2h_t = cf.memcpy(hc.data(), dc, sizeof(T) * N).name("d2h");
cf.offload();
cf.memcpy(d2h_t, hz.data(), dz, sizeof(T) * N);
cf.offload();
});
//verify
auto verify_t = taskflow.emplace([&]() {
for(auto& c: hc) {
REQUIRE(c == -21 - N);
}
for(auto& z: hz) {
REQUIRE(z == 2 * N - 26);
}
});
//free memory
auto deallocate_t = taskflow.emplace([&]() {
REQUIRE(cudaFree(da) == cudaSuccess);
REQUIRE(cudaFree(db) == cudaSuccess);
REQUIRE(cudaFree(dc) == cudaSuccess);
REQUIRE(cudaFree(dz) == cudaSuccess);
}).name("deallocate");
allocate_t.precede(h2d_t);
h2d_t.precede(kernel_t);
kernel_t.precede(d2h_t);
d2h_t.precede(verify_t);
verify_t.precede(deallocate_t);
executor.run(taskflow).wait();
}
}
TEST_CASE("cudaFlow.update.memcpy.int" * doctest::timeout(300)) {
update_memcpy<tf::cudaFlow, int>();
}
TEST_CASE("cudaFlow.update.memcpy.float" * doctest::timeout(300)) {
update_memcpy<tf::cudaFlow, float>();
}
TEST_CASE("cudaFlow.update.memcpy.double" * doctest::timeout(300)) {
update_memcpy<tf::cudaFlow, double>();
}
TEST_CASE("cudaFlowCapturer.update.memcpy.int" * doctest::timeout(300)) {
update_memcpy<tf::cudaFlowCapturer, int>();
}
TEST_CASE("cudaFlowCapturer.update.memcpy.float" * doctest::timeout(300)) {
update_memcpy<tf::cudaFlowCapturer, float>();
}
TEST_CASE("cudaFlowCapturer.update.memcpy.double" * doctest::timeout(300)) {
update_memcpy<tf::cudaFlowCapturer, double>();
}
//----------------------------------------------------------------------
//update memset
//----------------------------------------------------------------------
template <typename F, typename T>
void update_memset() {
tf::Executor executor;
tf::Taskflow taskflow;
for(size_t N = 1; N < 65199; N = N * 2 + 1) {
taskflow.clear();
T* a {nullptr};
T* b {nullptr};
T* ans_a {nullptr};
T* ans_b {nullptr};
bool* check {nullptr};
//allocate
auto allocate_t = taskflow.emplace([&]() {
REQUIRE(cudaMallocManaged(&a, N * sizeof(T)) == cudaSuccess);
REQUIRE(cudaMallocManaged(&b, (N + 37) * sizeof(T)) == cudaSuccess);
REQUIRE(cudaMallocManaged(&ans_a, N * sizeof(T)) == cudaSuccess);
REQUIRE(cudaMallocManaged(&ans_b, (N + 37) * sizeof(T)) == cudaSuccess);
REQUIRE(cudaMallocManaged(&check, sizeof(bool)) == cudaSuccess);
}).name("allocate");
//initialize
auto initialize_t = taskflow.emplace([&]() {
std::generate_n(a, N, [&](){ return ::rand() % N - N / 2; });
std::generate_n(b, N + 37, [&](){ return ::rand() % N + N / 2; });
REQUIRE(cudaMemset(ans_a, 0, N * sizeof(T)) == cudaSuccess);
REQUIRE(cudaMemset(ans_b, 1, (N + 37) * sizeof(T)) == cudaSuccess);
*check = true;
}).name("initialize");
//update_memset
auto memset_t = taskflow.emplace([&](F& cf) {
auto memset_t = cf.memset(ans_a, 0, N * sizeof(T));
cf.offload();
cf.memset(memset_t, a, 0, N * sizeof(T));
cf.offload();
cf.memset(memset_t, b, 1, (N + 37) * sizeof(T));
cf.offload();
}).name("memset");
//verify
auto verify_t = taskflow.emplace([&](F& cf) {
cf.kernel(
32, 512, 0,
verify<T>,
a, ans_a, check, N
);
cf.kernel(
32, 512, 0,
verify<T>,
b, ans_b, check, N + 37
);
cf.offload();
REQUIRE(*check);
}).name("verify");
//free memory
auto deallocate_t = taskflow.emplace([&]() {
REQUIRE(cudaFree(a) == cudaSuccess);
REQUIRE(cudaFree(b) == cudaSuccess);
REQUIRE(cudaFree(ans_a) == cudaSuccess);
REQUIRE(cudaFree(ans_b) == cudaSuccess);
REQUIRE(cudaFree(check) == cudaSuccess);
}).name("deallocate");
allocate_t.precede(initialize_t);
initialize_t.precede(memset_t);
memset_t.precede(verify_t);
verify_t.precede(deallocate_t);
executor.run(taskflow).wait();
}
}
TEST_CASE("cudaFlow.update.memset.int" * doctest::timeout(300)) {
update_memset<tf::cudaFlow, int>();
}
TEST_CASE("cudaFlow.update.memset.float" * doctest::timeout(300)) {
update_memset<tf::cudaFlow, float>();
}
TEST_CASE("cudaFlow.update.memset.double" * doctest::timeout(300)) {
update_memset<tf::cudaFlow, double>();
}
TEST_CASE("cudaFlowCapturer.update.memset.int" * doctest::timeout(300)) {
update_memset<tf::cudaFlowCapturer, int>();
}
TEST_CASE("cudaFlowCapturer.update.memset.float" * doctest::timeout(300)) {
update_memset<tf::cudaFlowCapturer, float>();
}
TEST_CASE("cudaFlowCapturer.update.memset.double" * doctest::timeout(300)) {
update_memset<tf::cudaFlowCapturer, double>();
}
// ----------------------------------------------------------------------------
// update algorithms
// ----------------------------------------------------------------------------
struct SetValue {
__device__ void operator()(int& a) const { a = v; }
int v;
};
struct SetValueOnIndex {
__device__ void operator()(int i) const { data[i] = v; }
int* data;
int v;
};
struct AddOrMultiply {
__device__ int operator()(int a, int b) const { return v ? a + b : a *b; }
bool v;
};
struct AddScalar {
__device__ int operator()(int a) const { return a + v; }
int v;
};
struct MultiplyScalar {
__device__ int operator()(int a) const { return a*v; }
int v;
};
struct LessOrGreater {
__device__ int operator()(int a, int b) const { return v ? a < b : a > b; }
bool v;
};
struct IsEqual {
int v;
__device__ bool operator()(int a) const { return v == a; }
};
// ----------------------------------------------------------------------------
// update for_each
// ----------------------------------------------------------------------------
template <typename F>
void update_for_each() {
F cf;
for(int N=1; N<=100000; N += (N/10+1)) {
cf.clear();
auto data = tf::cuda_malloc_shared<int>(N);
// for each task
//auto task = cf.for_each(data, data+N, [] __device__ (int& a){ a = 100; });
auto task = cf.for_each(data, data+N, SetValue{100});
cf.offload();
REQUIRE(cf.num_tasks() == 1);
for(int i=0; i<N; i++) {
REQUIRE(data[i] == 100);
}
// update for each parameters
//cf.for_each(task, data, data+N, [] __device__ (int& a){ a = 1234; });
cf.for_each(task, data, data+N, SetValue{1234});
cf.offload();
REQUIRE(cf.num_tasks() == 1);
for(int i=0; i<N; i++) {
REQUIRE(data[i] == 1234);
}
tf::cuda_free(data);
}
}
TEST_CASE("cudaFlow.update.for_each" * doctest::timeout(300)) {
update_for_each<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.for_each" * doctest::timeout(300)) {
update_for_each<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// update for_each_index
// ----------------------------------------------------------------------------
template <typename F>
void update_for_each_index() {
F cf;
for(int N=1; N<=100000; N += (N/10+1)) {
cf.clear();
auto data = tf::cuda_malloc_shared<int>(N);
// for each index
auto task = cf.for_each_index(0, N, 1, SetValueOnIndex{data, 100});
cf.offload();
REQUIRE(cf.num_tasks() == 1);
for(int i=0; i<N; i++) {
REQUIRE(data[i] == 100);
}
// update for each index
cf.for_each_index(task, 0, N, 1, SetValueOnIndex{data, -100});
cf.offload();
REQUIRE(cf.num_tasks() == 1);
for(int i=0; i<N; i++) {
REQUIRE(data[i] == -100);
}
tf::cuda_free(data);
}
}
TEST_CASE("cudaFlow.update.for_each_index" * doctest::timeout(300)) {
update_for_each_index<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.for_each_index" * doctest::timeout(300)) {
update_for_each_index<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// update reduce
// ----------------------------------------------------------------------------
template <typename F>
void update_reduce() {
F cf;
for(int N=1; N<=100000; N += (N/10+1)) {
cf.clear();
auto data = tf::cuda_malloc_shared<int>(N);
auto soln = tf::cuda_malloc_shared<int>(1);
for(int i=0; i<N; i++) data[i] = -1;
*soln = 0;
// reduce
//auto task = cf.reduce(
// data, data + N, soln, [] __device__ (int a, int b){ return a + b; }
//);
auto task = cf.reduce(data, data + N, soln, AddOrMultiply{true});
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(*soln == -N);
// update reduce range
*soln = -1;
//cf.reduce(
// task, data, data + N, soln, [] __device__ (int a, int b){ return a * b; }
//);
cf.reduce(task, data, data + N, soln, AddOrMultiply{false});
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(*soln == ((N&1) ? 1 : -1));
tf::cuda_free(data);
tf::cuda_free(soln);
}
}
TEST_CASE("cudaFlow.update.reduce" * doctest::timeout(300)) {
update_reduce<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.reduce" * doctest::timeout(300)) {
update_reduce<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// update uninitialized reduce
// ----------------------------------------------------------------------------
template <typename F>
void update_uninitialized_reduce() {
F cf;
for(int N=1; N<=100000; N += (N/10+1)) {
cf.clear();
auto data = tf::cuda_malloc_shared<int>(N);
auto soln = tf::cuda_malloc_shared<int>(1);
for(int i=0; i<N; i++) data[i] = -1;
// uninitialized_reduce
//auto task = cf.uninitialized_reduce(
// data, data + N, soln, [] __device__ (int a, int b){ return a + b; }
//);
auto task = cf.uninitialized_reduce(data, data + N, soln, AddOrMultiply{true});
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(*soln == -N);
// update reduce range
//cf.uninitialized_reduce(
// task, data, data + N, soln, [] __device__ (int a, int b){ return a * b; }
//);
cf.uninitialized_reduce(
task, data, data + N, soln, AddOrMultiply{false}
);
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(*soln == ((N&1) ? -1 : 1));
tf::cuda_free(data);
tf::cuda_free(soln);
}
}
TEST_CASE("cudaFlow.update.uninitialized_reduce" * doctest::timeout(300)) {
update_uninitialized_reduce<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.uninitialized_reduce" * doctest::timeout(300)) {
update_uninitialized_reduce<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// update transform reduce
// ----------------------------------------------------------------------------
template <typename F>
void update_transform_reduce() {
F cf;
for(int N=1; N<=100000; N += (N/10+1)) {
cf.clear();
auto data = tf::cuda_malloc_shared<int>(N);
auto soln = tf::cuda_malloc_shared<int>(1);
for(int i=0; i<N; i++) data[i] = -1;
*soln = 0;
// transform_reduce
auto task = cf.transform_reduce(
data, data + N, soln, AddOrMultiply{true}, AddScalar{2}
//[] __device__ (int a, int b) { return a + b; },
//[] __device__ (int a) { return a + 2; }
);
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(*soln == N);
// update reduce range
*soln = 8;
cf.transform_reduce(
task, data, data + N, soln, AddOrMultiply{false}, AddScalar{1}
//[] __device__ (int a, int b){ return a * b; },
//[] __device__ (int a) { return a + 1; }
);
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(*soln == 0);
tf::cuda_free(data);
tf::cuda_free(soln);
}
}
TEST_CASE("cudaFlow.update.transform_reduce" * doctest::timeout(300)) {
update_transform_reduce<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.transform_reduce" * doctest::timeout(300)) {
update_transform_reduce<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// update transform uninitialized reduce
// ----------------------------------------------------------------------------
template <typename F>
void update_transform_uninitialized_reduce() {
F cf;
for(int N=1; N<=100000; N += (N/10+1)) {
cf.clear();
auto data = tf::cuda_malloc_shared<int>(N);
auto soln = tf::cuda_malloc_shared<int>(1);
for(int i=0; i<N; i++) data[i] = -1;
*soln = 100;
// transform_reduce
auto task = cf.transform_uninitialized_reduce(
data, data + N, soln, AddOrMultiply{true}, AddScalar{2}
//[] __device__ (int a, int b) { return a + b; },
//[] __device__ (int a) { return a + 2; }
);
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(*soln == N);
// update reduce range
*soln = 8;
cf.transform_uninitialized_reduce(
task, data, data + N, soln, AddOrMultiply{false}, AddScalar{0}
//[] __device__ (int a, int b){ return a * b; },
//[] __device__ (int a) { return a; }
);
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(*soln == ((N&1) ? -1 : 1));
tf::cuda_free(data);
tf::cuda_free(soln);
}
}
TEST_CASE("cudaFlow.update.transform_uninitialized_reduce" * doctest::timeout(300)) {
update_transform_uninitialized_reduce<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.transform_uninitialized_reduce" * doctest::timeout(300)) {
update_transform_uninitialized_reduce<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// transform
// ----------------------------------------------------------------------------
template <typename F>
void update_transform() {
F cf;
for(int N=1; N<=100000; N += (N/10+1)) {
cf.clear();
auto input1 = tf::cuda_malloc_shared<int>(N);
auto input2 = tf::cuda_malloc_shared<int>(N);
auto output = tf::cuda_malloc_shared<int>(N);
for(int i=0; i<N; i++) {
input1[i] = i;
input2[i] = -i;
output[i] = 7;
}
// transform
auto task = cf.transform(
input1, input1+N, output, MultiplyScalar{2}
//[] __device__ (int& a) { return 2*a; }
);
cf.offload();
REQUIRE(cf.num_tasks() == 1);
for(int i=0; i<N; i++) {
REQUIRE(output[i] == input1[i]*2);
}
// update transform parameters
cf.transform(task,
input2, input2+N, output, MultiplyScalar{10}
// [] __device__ (int& a) { return 10*a; }
);
cf.offload();
REQUIRE(cf.num_tasks() == 1);
for(int i=0; i<N; i++) {
REQUIRE(output[i] == input2[i]*10);
}
tf::cuda_free(input1);
tf::cuda_free(input2);
tf::cuda_free(output);
}
}
TEST_CASE("cudaFlow.update.transform" * doctest::timeout(300)) {
update_transform<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.transform" * doctest::timeout(300)) {
update_transform<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// binary transform
// ----------------------------------------------------------------------------
// update binary_transform
template <typename F>
void update_binary_transform() {
F cf;
for(int N=1; N<=100000; N += (N/10+1)) {
cf.clear();
auto input1 = tf::cuda_malloc_shared<int>(N);
auto input2 = tf::cuda_malloc_shared<int>(N);
auto output = tf::cuda_malloc_shared<int>(N);
for(int i=0; i<N; i++) {
input1[i] = i;
input2[i] = -i;
output[i] = 7;
}
// transform
auto task = cf.transform(
input1, input1+N, input2, output, AddOrMultiply{false}
//[] __device__ (int a, int b) { return a*b; }
);
cf.offload();
REQUIRE(cf.num_tasks() == 1);
for(int i=0; i<N; i++) {
REQUIRE(output[i] == input1[i] * input2[i]);
}
// update transform parameters
cf.transform(task,
input1, input1+N, input2, output, AddOrMultiply{true}
//[] __device__ (int a, int b) { return a+b; }
);
cf.offload();
REQUIRE(cf.num_tasks() == 1);
for(int i=0; i<N; i++) {
REQUIRE(output[i] == input1[i]+input2[i]);
}
tf::cuda_free(input1);
tf::cuda_free(input2);
tf::cuda_free(output);
}
}
TEST_CASE("cudaFlow.update.binary_transform" * doctest::timeout(300)) {
update_binary_transform<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.binary_transform" * doctest::timeout(300)) {
update_binary_transform<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// update scan
// ----------------------------------------------------------------------------
template <typename F>
void update_scan() {
F cf;
for(int N=1; N<=100000; N += (N/10+1)) {
cf.clear();
auto input1 = tf::cuda_malloc_shared<int>(N);
auto input2 = tf::cuda_malloc_shared<int>(N);
auto output1 = tf::cuda_malloc_shared<int>(N);
auto output2 = tf::cuda_malloc_shared<int>(N);
for(int i=0; i<N; i++) {
input1[i] = i;
input2[i] = -i;
output1[i] = 0;
output2[i] = 0;
}
// scan
auto inclusive_scan = cf.inclusive_scan(
input1, input1+N, output1, AddOrMultiply{true}
);
auto exclusive_scan = cf.exclusive_scan(
input2, input2+N, output2, AddOrMultiply{true}
);
cf.offload();
REQUIRE(cf.num_tasks() == 2);
for(int i=1; i<N; i++) {
REQUIRE(output1[i] == output1[i-1] + input1[i]);
REQUIRE(output2[i] == output2[i-1] + input2[i-1]);
}
// update scan
cf.inclusive_scan(inclusive_scan,
input2, input2+N, output2, AddOrMultiply{true}
);
cf.exclusive_scan(exclusive_scan,
input1, input1+N, output1, AddOrMultiply{true}
);
cf.offload();
REQUIRE(cf.num_tasks() == 2);
for(int i=1; i<N; i++) {
REQUIRE(output1[i] == output1[i-1] + input1[i-1]);
REQUIRE(output2[i] == output2[i-1] + input2[i]);
}
// ---------- transform_scan
cf.clear();
inclusive_scan = cf.transform_inclusive_scan(
input1, input1+N, output1, AddOrMultiply{true}, MultiplyScalar{2}
//[] __device__ (int a, int b) { return a + b; },
//[] __device__ (int a) { return a * 2; }
);
exclusive_scan = cf.transform_exclusive_scan(
input2, input2+N, output2, AddOrMultiply{true}, MultiplyScalar{10}
//[] __device__ (int a, int b) { return a + b; },
//[] __device__ (int a) { return a * 10; }
);
cf.offload();
REQUIRE(cf.num_tasks() == 2);
for(int i=1; i<N; i++) {
REQUIRE(output1[i] == output1[i-1] + input1[i]*2);
REQUIRE(output2[i] == output2[i-1] + input2[i-1]*10);
}
// ---------- update transform scan
cf.transform_inclusive_scan(inclusive_scan,
input2, input2+N, output2, AddOrMultiply{true}, MultiplyScalar{2}
//[] __device__ (int a, int b) { return a + b; },
//[] __device__ (int a) { return a * 2; }
);
cf.transform_exclusive_scan(exclusive_scan,
input1, input1+N, output1, AddOrMultiply{true}, MultiplyScalar{10}
//[] __device__ (int a, int b) { return a + b; },
//[] __device__ (int a) { return a * 10; }
);
cf.offload();
REQUIRE(cf.num_tasks() == 2);
for(int i=1; i<N; i++) {
REQUIRE(output2[i] == output2[i-1] + input2[i]*2);
REQUIRE(output1[i] == output1[i-1] + input1[i-1]*10);
}
tf::cuda_free(input1);
tf::cuda_free(input2);
tf::cuda_free(output1);
tf::cuda_free(output2);
}
}
TEST_CASE("cudaFlow.update.scan" * doctest::timeout(300)) {
update_scan<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.scan" * doctest::timeout(300)) {
update_scan<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// update merge
// ----------------------------------------------------------------------------
template <typename F>
void update_merge() {
F cf;
for(int N=1; N<=100000; N += (N/10+1)) {
cf.clear();
auto input1 = tf::cuda_malloc_shared<int>(N);
auto input2 = tf::cuda_malloc_shared<int>(2*N);
auto output1 = tf::cuda_malloc_shared<int>(3*N);
auto output2 = tf::cuda_malloc_shared<int>(3*N);
std::iota(input1, input1+N, 0);
std::iota(input2, input2+2*N, 0);
std::merge(input1, input1+N, input2, input2+2*N, output2);
// merge
auto merge = cf.merge(
input1, input1+N, input2, input2+2*N, output1, tf::cuda_less<int>()
);
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(std::is_sorted(output1, output1+3*N));
for(int i=0; i<3*N; i++) {
REQUIRE(output1[i] == output2[i]);
output1[i] = output2[i] = rand();
}
// update merge
cf.merge(merge,
input1, input1+N, input2, input2+N, output2, tf::cuda_less<int>()
);
cf.offload();
std::merge(input1, input1+N, input2, input2+N, output1);
REQUIRE(cf.num_tasks() == 1);
REQUIRE(std::is_sorted(output2, output2+2*N));
for(int i=0; i<2*N; i++) {
REQUIRE(output1[i] == output2[i]);
}
tf::cuda_free(input1);
tf::cuda_free(input2);
tf::cuda_free(output1);
tf::cuda_free(output2);
}
}
TEST_CASE("cudaFlow.update.merge" * doctest::timeout(300)) {
update_merge<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.merge" * doctest::timeout(300)) {
update_merge<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// update sort
// ----------------------------------------------------------------------------
template <typename F>
void update_sort() {
F cf;
for(int N=1; N<=100000; N += (N/100+1)) {
cf.clear();
auto input1 = tf::cuda_malloc_shared<int>(N);
auto input2 = tf::cuda_malloc_shared<int>(N);
for(int i=0; i<N; i++) {
input1[i] = rand();
input2[i] = rand();
}
// create sort
auto sort = cf.sort(input1, input1+N, LessOrGreater{true});
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(std::is_sorted(input1, input1+N));
// update sort
cf.sort(sort, input2, input2+N, LessOrGreater{true});
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(std::is_sorted(input2, input2+N, std::less<int>()));
// update sort with a different kernel
cf.sort(sort, input1, input1+N, tf::cuda_greater<int>());
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(std::is_sorted(input1, input1+N, std::greater<int>()));
// free the data
tf::cuda_free(input1);
tf::cuda_free(input2);
}
}
TEST_CASE("cudaFlow.update.sort" * doctest::timeout(300)) {
update_sort<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.sort" * doctest::timeout(300)) {
update_sort<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// update sort_by_key
// ----------------------------------------------------------------------------
template <typename F>
void update_sort_by_key() {
std::random_device rd;
std::mt19937 g(rd());
F cf;
for(int N=1; N<=100000; N += (N/100+1)) {
cf.clear();
auto input1 = tf::cuda_malloc_shared<int>(N);
auto input2 = tf::cuda_malloc_shared<int>(N);
auto index1 = tf::cuda_malloc_shared<int>(N);
auto index2 = tf::cuda_malloc_shared<int>(N);
std::vector<int> index(N);
for(int i=0; i<N; i++) {
input1[i] = i;
input2[i] = i;
index1[i] = i;
index2[i] = i;
index [i] = i;
}
std::shuffle(input1, input1+N, g);
std::shuffle(input2, input2+N, g);
// create sort
std::sort(index.begin(), index.end(), [&](auto i, auto j){
return input1[i] < input1[j];
});
auto sort = cf.sort_by_key(input1, input1+N, index1, LessOrGreater{true});
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(std::is_sorted(input1, input1+N));
for(int i=0; i<N; i++) {
REQUIRE(index[i] == index1[i]);
}
// update sort
for(int i=0; i<N; i++) {
index[i] = i;
}
std::sort(index.begin(), index.end(), [&](auto i, auto j){
return input2[i] > input2[j];
});
cf.sort_by_key(sort, input2, input2+N, index2, LessOrGreater{false});
cf.offload();
REQUIRE(cf.num_tasks() == 1);
REQUIRE(std::is_sorted(input2, input2+N, std::greater<int>()));
for(int i=0; i<N; i++) {
REQUIRE(index[i] == index2[i]);
}
// free the data
tf::cuda_free(input1);
tf::cuda_free(input2);
tf::cuda_free(index1);
tf::cuda_free(index2);
}
}
TEST_CASE("cudaFlow.update.sort_by_key" * doctest::timeout(300)) {
update_sort_by_key<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.sort_by_key" * doctest::timeout(300)) {
update_sort_by_key<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// update find
// ----------------------------------------------------------------------------
template <typename F>
void update_find() {
F cf;
for(unsigned N=1; N<=100000; N += (N/100+1)) {
cf.clear();
auto input1 = tf::cuda_malloc_shared<int>(N);
auto input2 = tf::cuda_malloc_shared<int>(N);
auto index1 = tf::cuda_malloc_shared<unsigned>(1);
auto index2 = tf::cuda_malloc_shared<unsigned>(1);
for(unsigned i=0; i<N; i++) {
input1[i] = i;
input2[i] = i;
}
// create find
auto find_if = cf.find_if(input1, input1+N, index1, IsEqual{(int)(N/2)});
cf.offload();
REQUIRE(*index1 != N);
REQUIRE(input1[*index1] == N/2);
// update find
cf.find_if(find_if, input2, input2+N, index2, IsEqual{(int)(N/2 + 1)});
cf.offload();
REQUIRE(cf.num_tasks() == 1);
if( N/2+1 >= N) {
REQUIRE(*index2 == N);
}
else {
REQUIRE(input2[*index2] == (N/2+1));
}
// free the data
tf::cuda_free(input1);
tf::cuda_free(input2);
tf::cuda_free(index1);
tf::cuda_free(index2);
}
}
TEST_CASE("cudaFlow.update.find" * doctest::timeout(300)) {
update_find<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.find" * doctest::timeout(300)) {
update_find<tf::cudaFlowCapturer>();
}
// ----------------------------------------------------------------------------
// update min-/max-element
// ----------------------------------------------------------------------------
template <typename F>
void update_minmax_element() {
F cf;
for(unsigned N=1; N<=100000; N += (N/100+1)) {
cf.clear();
auto input1 = tf::cuda_malloc_shared<int>(N);
auto input2 = tf::cuda_malloc_shared<int>(N);
auto index1 = tf::cuda_malloc_shared<unsigned>(1);
auto index2 = tf::cuda_malloc_shared<unsigned>(1);
for(unsigned i=0; i<N; i++) {
input1[i] = rand();
input2[i] = rand();
}
// create find
auto find_min = cf.min_element(input1, input1+N, index1, tf::cuda_less<int>());
auto find_max = cf.max_element(input2, input2+N, index2, tf::cuda_less<int>());
cf.offload();
REQUIRE(input1[*index1] == *std::min_element(input1, input1+N));
REQUIRE(input2[*index2] == *std::max_element(input2, input2+N));
// update find
cf.min_element(find_min, input2, input2+N, index2, tf::cuda_less<int>());
cf.max_element(find_max, input1, input1+N, index1, tf::cuda_less<int>());
cf.offload();
REQUIRE(cf.num_tasks() == 2);
REQUIRE(input2[*index2] == *std::min_element(input2, input2+N));
REQUIRE(input1[*index1] == *std::max_element(input1, input1+N));
// free the data
tf::cuda_free(input1);
tf::cuda_free(input2);
tf::cuda_free(index1);
tf::cuda_free(index2);
}
}
TEST_CASE("cudaFlow.update.minmax_element" * doctest::timeout(300)) {
update_minmax_element<tf::cudaFlow>();
}
TEST_CASE("cudaFlowCapturer.update.minmax_element" * doctest::timeout(300)) {
update_minmax_element<tf::cudaFlowCapturer>();
} | the_stack |
#define EMUSYNC __syncthreads();
#else
#define EMUSYNC
#endif
#include "support_kernels.cu"
//Reduce function to get the minimum timestep
__device__ void get_TnextD(const int n_bodies,
double2 *time,
double *tnext, volatile double *sdata) {
//float2 time : x is time begin, y is time end
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 1.0e10f;
double tmin = 1.0e10f;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n_bodies) {
if (i < n_bodies) tmin = fmin(tmin, time[i ].y);
if (i + blockSize < n_bodies) tmin = fmin(tmin, time[i + blockSize].y);
i += gridSize;
}
sdata[tid] = tmin;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 64]); } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 32]); EMUSYNC; }
if (blockSize >= 32) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 16]); EMUSYNC; }
if (blockSize >= 16) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 8]); EMUSYNC; }
if (blockSize >= 8) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 4]); EMUSYNC; }
if (blockSize >= 4) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 2]); EMUSYNC; }
if (blockSize >= 2) { sdata[tid] = tmin = fmin(tmin, sdata[tid + 1]); EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) tnext[blockIdx.x] = sdata[0];
}
extern "C" __global__ void get_Tnext(const int n_bodies,
double2 *time,
double *tnext) {
extern __shared__ double sdata[];
get_TnextD(n_bodies, time, tnext, sdata);
}
//Reduce function to get the number of active particles
__device__ void get_nactiveD(const int n_bodies,
uint *valid,
uint *tnact, volatile int *sdataInt) {
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdataInt[tid] = 0;
int sum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n_bodies) {
if (i < n_bodies) sum = sum + valid[i ];
if (i + blockSize < n_bodies) sum = sum + valid[i + blockSize];
i += gridSize;
}
sdataInt[tid] = sum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdataInt[tid] = sum = sum + sdataInt[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdataInt[tid] = sum = sum + sdataInt[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdataInt[tid] = sum = sum + sdataInt[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdataInt[tid] = sum = sum + sdataInt[tid + 32]; EMUSYNC; }
if (blockSize >= 32) { sdataInt[tid] = sum = sum + sdataInt[tid + 16]; EMUSYNC; }
if (blockSize >= 16) { sdataInt[tid] = sum = sum + sdataInt[tid + 8]; EMUSYNC; }
if (blockSize >= 8) { sdataInt[tid] = sum = sum + sdataInt[tid + 4]; EMUSYNC; }
if (blockSize >= 4) { sdataInt[tid] = sum = sum + sdataInt[tid + 2]; EMUSYNC; }
if (blockSize >= 2) { sdataInt[tid] = sum = sum + sdataInt[tid + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) tnact[blockIdx.x] = sdataInt[0];
}
//Reduce function to get the number of active particles
extern "C" __global__ void get_nactive(const int n_bodies,
uint *valid,
uint *tnact) {
extern __shared__ int sdataInt[];
get_nactiveD(n_bodies, valid, tnact, sdataInt);
}
#if 0
extern "C" __global__ void predict_particles(const int n_bodies,
float tc,
float tp,
real4 *pos,
real4 *vel,
real4 *acc,
float2 *time,
uint *body2grouplist,
uint *valid_list){
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint idx = bid * blockDim.x + tid;
if (idx >= n_bodies) return;
float4 p = pos [idx];
float4 v = vel [idx];
float4 a = acc [idx];
float tb = time[idx].x;
float te = time[idx].y;
float dt_cb = tc - tb;
float dt_pb = tp - tb;
v.x -= a.x*dt_pb;
v.y -= a.y*dt_pb;
v.z -= a.z*dt_pb;
p.x -= (v.x*dt_pb + a.x*dt_pb*dt_pb*0.5f);
p.y -= (v.y*dt_pb + a.y*dt_pb*dt_pb*0.5f);
p.z -= (v.z*dt_pb + a.z*dt_pb*dt_pb*0.5f);
p.x += (v.x*dt_cb + a.x*dt_cb*dt_cb*0.5f);
p.y += (v.y*dt_cb + a.y*dt_cb*dt_cb*0.5f);
p.z += (v.z*dt_cb + a.z*dt_cb*dt_cb*0.5f);
v.x += a.x*dt_cb;
v.y += a.y*dt_cb;
v.z += a.z*dt_cb;
pos[idx] = p;
vel[idx] = v;
//Set the group to active if the time current = time end of
//this particle. Can be that multiple particles write to the
//same location but the net result is the same
int grpID = body2grouplist[idx];
if(tc == te)
{
valid_list[grpID] = grpID | (1 << 31);
}
}
#endif
#if 1
extern "C" __global__ void predict_particles(const int n_bodies,
double tc,
double tp,
real4 *pos,
real4 *vel,
real4 *acc,
double2 *time,
uint *body2grouplist,
uint *valid_list,
real4 *pPos,
real4 *pVel){
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint idx = bid * blockDim.x + tid;
if (idx >= n_bodies) return;
float4 p = pos [idx];
float4 v = vel [idx];
float4 a = acc [idx];
double tb = time[idx].x;
double te = time[idx].y;
float dt_cb = (float)(tc - tb);
// float dt_pb = tp - tb;
p.x += v.x*dt_cb + a.x*dt_cb*dt_cb*0.5f;
p.y += v.y*dt_cb + a.y*dt_cb*dt_cb*0.5f;
p.z += v.z*dt_cb + a.z*dt_cb*dt_cb*0.5f;
v.x += a.x*dt_cb;
v.y += a.y*dt_cb;
v.z += a.z*dt_cb;
pPos[idx] = p;
pVel[idx] = v;
//Set the group to active if the time current = time end of
//this particle. Can be that multiple particles write to the
//same location but the net result is the same
int grpID = body2grouplist[idx];
if(tc == te)
{
valid_list[grpID] = grpID | (1 << 31);
}
}
#endif
extern "C" __global__ void setActiveGroups(const int n_bodies,
double tc,
double2 *time,
uint *body2grouplist,
uint *valid_list){
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint idx = bid * blockDim.x + tid;
if (idx >= n_bodies) return;
double te = time[idx].y;
//Set the group to active if the time current = time end of
//this particle. Can be that multiple particles write to the
//same location but the net result is the same
int grpID = body2grouplist[idx];
//Test not only the article with current time, but any particle
//with time diff less then 1/16k
/* if(te-tc <= (1./16384))
{
valid_list[grpID] = grpID | (1 << 31);
}
*/
if(te <= tc)
{
valid_list[grpID] = grpID | (1 << 31);
}
}
#ifdef _AMUSE_STOPPING_CONDITIONS_
extern "C" __global__ void correct_particles(const int n_bodies,
double tc,
double2 *time,
uint *active_list,
real4 *vel,
real4 *acc0,
real4 *acc1,
real4 *pos,
real4 *pPos,
real4 *pVel,
int *ngb,
int *pairDetection) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
//Check if particle is set to active during approx grav
if (active_list[idx] != 1) return;
float4 v = vel [idx];
float4 a0 = acc0[idx];
float4 a1 = acc1[idx];
double tb = time[idx].x;
// float dt = time[idx].y;
float dt_cb = (float)(tc - tb);
//Store the predicted position as the one to use
pos[idx] = pPos[idx];
//Correct the position
v = pVel[idx];
dt_cb *= 0.5f;
v.x += (a1.x - a0.x)*dt_cb;
v.y += (a1.y - a0.y)*dt_cb;
v.z += (a1.z - a0.z)*dt_cb;
//Store the corrected velocity, accelaration and the new time step info
vel [idx] = v;
acc0[idx] = a1;
//Code specific to stopping conditions
int j = ngb[idx];
#if 1
if(j >= 0) //Only check if we have a valid nearby neighbour
{
float4 posi = pPos[idx];
float4 posj = pPos[j];
float radj = vel[j].w; //Particle radius is stored in w component of velocity
float radi = v.w;
//Compute distance and compare to summed radius
float ds2 = ((posi.x-posj.x)*(posi.x-posj.x)) +
((posi.y-posj.y)*(posi.y-posj.y)) +
((posi.z-posj.z)*(posi.z-posj.z));
float rsum = radi + radj;
if (ds2 <= rsum*rsum)
{
float4 veli = pVel[idx];
float4 velj = pVel[j];
//Compute distance and compare to summed radius
float r = ((posi.x-posj.x)*(posi.x-posj.x)) +
((posi.y-posj.y)*(posi.y-posj.y)) +
((posi.z-posj.z)*(posi.z-posj.z));
float v = ((veli.x-velj.x)*(veli.x-velj.x)) +
((veli.y-velj.y)*(veli.y-velj.y)) +
((veli.z-velj.z)*(veli.z-velj.z));
float vr = ((posi.x-posj.x)*(veli.x-velj.x)) +
((posi.y-posj.y)*(veli.y-velj.y)) +
((posi.z-posj.z)*(veli.z-velj.z));
//TODO remove these expensive operations instead just
//do vr*vr and EPS*EPS
r = sqrt(r);
v = sqrt(v);
#define EPS 0.001 // see couple/multiples.py
// if (abs(vr) < EPS*r*v)
if(1) //JB: 9 sept 13 . Disabled untill we figure out why tests fail
{
//Collision detected, store the indices of the involved particles
//Note that this will create double items in the final list
//if j is nearest neighbour of i and i nearest neighbour of j
pairDetection[2*idx+0] = idx | (1 << 31);
pairDetection[2*idx+1] = j | (1 << 31);
//Another option is to store it like this, but this destroys the
//info about pairs
}
}//if ds2 <=
}//if j >= 0
#endif
}
#else
extern "C" __global__ void correct_particles(const int n_bodies,
double tc,
double2 *time,
uint *active_list,
real4 *vel,
real4 *acc0,
real4 *acc1,
real4 *pos,
real4 *pPos,
real4 *pVel) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
//Check if particle is set to active during approx grav
if (active_list[idx] != 1) return;
float4 v = vel [idx];
float4 a0 = acc0[idx];
float4 a1 = acc1[idx];
float tb = time[idx].x;
// float dt = time[idx].y;
float dt_cb = (float)(tc - tb);
//Store the predicted position as the one to use
pos[idx] = pPos[idx];
//Correct the position
v = pVel[idx];
dt_cb *= 0.5f;
v.x += (a1.x - a0.x)*dt_cb;
v.y += (a1.y - a0.y)*dt_cb;
v.z += (a1.z - a0.z)*dt_cb;
//Store the corrected velocity, accelaration and the new time step info
vel [idx] = v;
acc0[idx] = a1;
// time[idx] = (float2){tc, tc + dt};
}
#endif
#if 0
extern "C" __global__ void correct_particles(const int n_bodies,
float tc,
float2 *time,
uint *active_list,
real4 *vel,
real4 *acc0,
real4 *acc1) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
//Check if particle is set to active during approx grav
if (active_list[idx] != 1) return;
float4 v = vel [idx];
float4 a0 = acc0[idx];
float4 a1 = acc1[idx];
float tb = time[idx].x;
float dt_cb = tc - tb;
v.x -= a0.x * dt_cb;
v.y -= a0.y * dt_cb;
v.z -= a0.z * dt_cb;
dt_cb *= 0.5f;
v.x += (a0.x + a1.x)*dt_cb;
v.y += (a0.y + a1.y)*dt_cb;
v.z += (a0.z + a1.z)*dt_cb;
vel [idx] = v;
acc0[idx] = a1;
}
#endif
extern "C" __global__ void compute_dt(const int n_bodies,
double tc,
float eta,
int dt_limit,
float eps2,
double2 *time,
real4 *vel,
int *ngb,
real4 *bodies_pos,
real4 *bodies_acc,
uint *active_list,
float timeStep){
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
//Check if particle is set to active during approx grav
if (active_list[idx] != 1) return;
int j = ngb[idx];
float4 ri, rj;
float4 vi, vj;
float4 ai, aj;
float ds2, mi, mj;
ri = bodies_pos[idx];
mi = ri.w;
vi = vel[idx];
ai = bodies_acc[idx];
int j1, j2;
if (j >= 0) {
rj = bodies_pos[j];
float3 dr = {ri.x - rj.x,
ri.y - rj.y,
ri.z - rj.z};
ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
} else {
j1 = max(0, idx - 1);
rj = bodies_pos[j1];
mj = rj.w;
float3 dr = {ri.x - rj.x,
ri.y - rj.y,
ri.z - rj.z};
if (idx != j1) ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
else ds2 = 1.0e10f;
j2 = min(n_bodies-1, idx + 1);
rj = bodies_pos[j2];
dr = (float3){ri.x - rj.x,
ri.y - rj.y,
ri.z - rj.z};
if (idx != j2) {
if (dr.x*dr.x + dr.y*dr.y + dr.z*dr.z < ds2) {
ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
j = j2;
mj = rj.w;
} else {
j = j1;
};
} else {
j = j1;
}
}
//Add softening to the distance between the chosen particles
ds2 += eps2;
vj = vel[j];
aj = bodies_acc[j];
const float3 vda = make_float3(ai.x - aj.x,
ai.y - aj.y,
ai.z - aj.z);
const float3 vdv = make_float3(vi.x - vj.x,
vi.y - vj.y,
vi.z - vj.z);
const float vs2 = vdv.x*vdv.x + vdv.y*vdv.y + vdv.z*vdv.z;
//Compute the minimum crossing time
const float mct = (ds2*ds2) / (vs2*vs2);
//Free fall time
float da2 = vda.x*vda.x + vda.y*vda.y + vda.z*vda.z;
float mij = mi + mj; //Sum masses
da2 *= (mij*mij);
const float fft = (ds2 / da2);
//Time step is minimum of the free fall time and minimum crossing time
float dt_est = sqrt(sqrt(min(mct, fft)));
//Make it a power of 2
float dt_param = eta; //eta
// float dt_param = 1.0; //eta
float dt = dt_est*dt_param;
int power = -(int)__log2f(dt) + 1;
power = max(power, dt_limit);
int count = 0;
dt = 1.0f/(1 << power);
while(fmodf(tc, dt) != 0.0f)
{
dt *= 0.5f; // could be slow!
count++;
if(count > 30)
{
dt = timeStep;
break;
}
}
//if(dt < 1./16384) dt = 1./16384;
//if(dt < 1./1048576) dt = 1./1048576;
time[idx].x = tc;
#ifdef ADAPTIVE_TIMESTEP
//Prevent a time-step smaller than specified through the interface
if(dt < timeStep)
dt = timeStep;
time[idx].y = tc + (double)dt;
#else
time[idx].y = tc + timeStep;
#endif
// if(idx % 1000 == 0)
// time[idx].y = tc + 1./2048 ;
// else
// time[idx].y = tc + timeStep;
#if 0
ds2 = ds2*__powf(10.0f, 0.666667f) + eps2;
// ds2 += eps2;
vj = vel[j];
aj = bodies_acc[j];
float3 vda = {ai.x - aj.x,
ai.y - aj.y,
ai.z - aj.z};
float3 vdv = {vi.x - vj.x,
vi.y - vj.y,
vi.z - vj.z};
float da = sqrtf(vda.x*vda.x + vda.y*vda.y + vda.z*vda.z);
float dv = sqrtf(vdv.x*vdv.x + vdv.y*vdv.y + vdv.z*vdv.z);
float ds = sqrtf(ds2);
float dt = eta * dv/da*(sqrt(2*da*ds/(dv*dv) + 1) - 1);
int power = -(int)__log2f(dt) + 1;
power = max(power, dt_limit);
dt = 1.0f/(1 << power);
while(fmodf(tc, dt) != 0.0f) dt *= 0.5f; // could be slow!
// dt = 0.015625;
dt = 1.0f/(1 << 8);
dt = 1.0f/(1 << 6);
dt = 1.0f/(1 << 7);
dt = timeStep;
time[idx].x = tc;
//time[idx].y = tc + dt;
time[idx].y = tc + dt;
#endif
}
//Reduce function to get the energy of the system in single precision
__device__ void compute_energyD(const int n_bodies,
real4 *pos,
real4 *vel,
real4 *acc,
float2 *energy, volatile float *shDataKin) {
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
volatile float *shDataPot = (float*)&shDataKin [blockSize];
float eKin, ePot;
shDataKin[tid] = eKin = 0; //Stores Ekin
shDataPot[tid] = ePot = 0; //Stores Epot
real4 temp;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n_bodies) {
if (i < n_bodies)
{
//Ekin
temp = vel[i];
eKin += pos[i].w*0.5*(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
//Epot
ePot += pos[i].w*0.5*acc[i].w;
}
if (i + blockSize < n_bodies)
{
temp = vel[i + blockSize];
eKin += pos[i + blockSize].w*0.5*(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
//Epot
ePot += pos[i + blockSize].w*0.5*acc[i + blockSize].w;
}
i += gridSize;
}
shDataKin[tid] = eKin;
shDataPot[tid] = ePot;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) {
shDataPot[tid] = ePot = ePot + shDataPot[tid + 256];
shDataKin[tid] = eKin = eKin + shDataKin[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) {
shDataPot[tid] = ePot = ePot + shDataPot[tid + 128];
shDataKin[tid] = eKin = eKin + shDataKin[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) {
shDataPot[tid] = ePot = ePot + shDataPot[tid + 64];
shDataKin[tid] = eKin = eKin + shDataKin[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 32]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 32]; EMUSYNC; }
if (blockSize >= 32) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 16]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 16]; EMUSYNC; }
if (blockSize >= 16) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 8]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 8]; EMUSYNC; }
if (blockSize >= 8) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 4]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 4]; EMUSYNC; }
if (blockSize >= 4) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 2]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 2]; EMUSYNC; }
if (blockSize >= 2) {shDataKin[tid] = eKin = eKin + shDataKin[tid + 1]; shDataPot[tid] = ePot = ePot + shDataPot[tid + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) energy[blockIdx.x] = (float2){shDataKin[0], shDataPot[0] };
}
extern "C" __global__ void compute_energy(const int n_bodies,
real4 *pos,
real4 *vel,
real4 *acc,
float2 *energy) {
extern __shared__ float shDataKin[];
compute_energyD(n_bodies, pos, vel, acc, energy,shDataKin);
}
//Reduce function to get the energy of the system in double precision
__device__ void compute_energy_doubleD(const int n_bodies,
real4 *pos,
real4 *vel,
real4 *acc,
double2 *energy, volatile double *shDDataKin) {
// perform first level of reduction,
// reading from global memory, writing to shared memory
const int blockSize = blockDim.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
volatile double *shDDataPot = (double*)&shDDataKin [blockSize];
double eKin, ePot;
shDDataKin[tid] = eKin = 0; //Stores Ekin
shDDataPot[tid] = ePot = 0; //Stores Epot
real4 temp;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n_bodies) {
if (i < n_bodies)
{
//Ekin
temp = vel[i];
eKin += pos[i].w*0.5*(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
//Epot
ePot += pos[i].w*0.5*acc[i].w;
}
if (i + blockSize < n_bodies)
{
temp = vel[i + blockSize];
eKin += pos[i + blockSize].w*0.5*(temp.x*temp.x + temp.y*temp.y + temp.z*temp.z);
//Epot
ePot += pos[i + blockSize].w*0.5*acc[i + blockSize].w;
}
i += gridSize;
}
shDDataKin[tid] = eKin;
shDDataPot[tid] = ePot;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) {
shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 256];
shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) {
shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 128];
shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) {
shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 64];
shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 32]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 32]; EMUSYNC; }
if (blockSize >= 32) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 16]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 16]; EMUSYNC; }
if (blockSize >= 16) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 8]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 8]; EMUSYNC; }
if (blockSize >= 8) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 4]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 4]; EMUSYNC; }
if (blockSize >= 4) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 2]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 2]; EMUSYNC; }
if (blockSize >= 2) {shDDataKin[tid] = eKin = eKin + shDDataKin[tid + 1]; shDDataPot[tid] = ePot = ePot + shDDataPot[tid + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) energy[blockIdx.x] = (double2){shDDataKin[0], shDDataPot[0] };
}
//Reduce function to get the energy of the system
extern "C" __global__ void compute_energy_double(const int n_bodies,
real4 *pos,
real4 *vel,
real4 *acc,
double2 *energy) {
extern __shared__ double shDDataKin[];
compute_energy_doubleD(n_bodies, pos, vel, acc, energy, shDDataKin);
}
extern "C" __global__ void distanceCheck(const int n_bodies,
real4 *pos,
int *ids,
real4 *out,
const int numberOfBH,
real4 *vel)
{
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int dim = blockDim.x * blockDim.y;
int idx = bid * dim + tid;
if (idx >= n_bodies) return;
int partID = ids[idx];
if(partID < numberOfBH)
{
real4 curPos = pos[idx];
//curPos.w = partID;
out[partID*2+0] = curPos;
out[partID*2+1] = vel[idx];
}
} | the_stack |
* \test Tests routines for matrix-vector operaions (BLAS level 2) using floating point arithmetic.
**/
//
// *** System
//
#include <iostream>
#include <vector>
//
// *** ViennaCL
//
//#define VIENNACL_DEBUG_ALL
#include "viennacl/scalar.hpp"
#include "viennacl/matrix.hpp"
#include "viennacl/vector.hpp"
#include "viennacl/linalg/prod.hpp"
#include "viennacl/linalg/norm_2.hpp"
#include "viennacl/linalg/direct_solve.hpp"
#include "viennacl/linalg/lu.hpp"
#include "viennacl/linalg/sum.hpp"
#include "viennacl/tools/random.hpp"
//
// -------------------------------------------------------------
//
template<typename ScalarType>
ScalarType diff(ScalarType & s1, viennacl::scalar<ScalarType> & s2)
{
viennacl::backend::finish();
if (s1 != s2)
return (s1 - s2) / std::max(std::fabs(s1), std::fabs(s2));
return 0;
}
template<typename ScalarType, typename VCLVectorType>
ScalarType diff(std::vector<ScalarType> const & v1, VCLVectorType const & v2)
{
std::vector<ScalarType> v2_cpu(v2.size());
viennacl::backend::finish(); //workaround for a bug in APP SDK 2.7 on Trinity APUs (with Catalyst 12.8)
viennacl::copy(v2.begin(), v2.end(), v2_cpu.begin());
ScalarType norm_inf = 0;
for (unsigned int i=0;i<v1.size(); ++i)
{
if ( std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) ) > 0 )
{
ScalarType tmp = std::fabs(v2_cpu[i] - v1[i]) / std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) );
if (tmp > norm_inf)
norm_inf = tmp;
}
}
return norm_inf;
}
template<typename ScalarType, typename VCLMatrixType>
ScalarType diff(std::vector<std::vector<ScalarType> > const & mat1, VCLMatrixType const & mat2)
{
std::vector<std::vector<ScalarType> > mat2_cpu(mat2.size1(), std::vector<ScalarType>(mat2.size2()));
viennacl::backend::finish(); //workaround for a bug in APP SDK 2.7 on Trinity APUs (with Catalyst 12.8)
viennacl::copy(mat2, mat2_cpu);
ScalarType ret = 0;
ScalarType act = 0;
for (std::size_t i = 0; i < mat2_cpu.size(); ++i)
{
for (std::size_t j = 0; j < mat2_cpu[i].size(); ++j)
{
act = std::fabs(mat2_cpu[i][j] - mat1[i][j]) / std::max( std::fabs(mat2_cpu[i][j]), std::fabs(mat1[i][j]) );
if (act > ret)
ret = act;
}
}
//std::cout << ret << std::endl;
return ret;
}
//
// -------------------------------------------------------------
//
template<typename NumericT, typename Epsilon,
typename STLMatrixType, typename STLVectorType,
typename VCLMatrixType, typename VCLVectorType1, typename VCLVectorType2>
int test_prod_rank1(Epsilon const & epsilon,
STLMatrixType & std_m1, STLVectorType & std_v1, STLVectorType & std_v2, STLMatrixType & std_m2,
VCLMatrixType & vcl_m1, VCLVectorType1 & vcl_v1, VCLVectorType2 & vcl_v2, VCLMatrixType & vcl_m2)
{
int retval = EXIT_SUCCESS;
// sync data:
std_v1 = std::vector<NumericT>(std_v1.size(), NumericT(0.1234));
std_v2 = std::vector<NumericT>(std_v2.size(), NumericT(0.4321));
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
viennacl::copy(std_m1, vcl_m1);
// --------------------------------------------------------------------------
std::cout << "Rank 1 update" << std::endl;
for (std::size_t i=0; i<std_m1.size(); ++i)
for (std::size_t j=0; j<std_m1[i].size(); ++j)
std_m1[i][j] += std_v1[i] * std_v2[j];
vcl_m1 += viennacl::linalg::outer_prod(vcl_v1, vcl_v2);
if ( std::fabs(diff(std_m1, vcl_m1)) > epsilon )
{
std::cout << "# Error at operation: rank 1 update" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_m1, vcl_m1)) << std::endl;
return EXIT_FAILURE;
}
// --------------------------------------------------------------------------
std::cout << "Scaled rank 1 update - CPU Scalar" << std::endl;
for (std::size_t i=0; i<std_m1.size(); ++i)
for (std::size_t j=0; j<std_m1[i].size(); ++j)
std_m1[i][j] += NumericT(4.2) * std_v1[i] * std_v2[j];
vcl_m1 += NumericT(2.1) * viennacl::linalg::outer_prod(vcl_v1, vcl_v2);
vcl_m1 += viennacl::linalg::outer_prod(vcl_v1, vcl_v2) * NumericT(2.1); //check proper compilation
if ( std::fabs(diff(std_m1, vcl_m1)) > epsilon )
{
std::cout << "# Error at operation: scaled rank 1 update - CPU Scalar" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_m1, vcl_m1)) << std::endl;
return EXIT_FAILURE;
}
// --------------------------------------------------------------------------
std::cout << "Scaled rank 1 update - GPU Scalar" << std::endl;
for (std::size_t i=0; i<std_m1.size(); ++i)
for (std::size_t j=0; j<std_m1[i].size(); ++j)
std_m1[i][j] += NumericT(4.2) * std_v1[i] * std_v2[j];
vcl_m1 += viennacl::scalar<NumericT>(NumericT(2.1)) * viennacl::linalg::outer_prod(vcl_v1, vcl_v2);
vcl_m1 += viennacl::linalg::outer_prod(vcl_v1, vcl_v2) * viennacl::scalar<NumericT>(NumericT(2.1)); //check proper compilation
if ( std::fabs(diff(std_m1, vcl_m1)) > epsilon )
{
std::cout << "# Error at operation: scaled rank 1 update - GPU Scalar" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_m1, vcl_m1)) << std::endl;
return EXIT_FAILURE;
}
//reset vcl_matrix:
viennacl::copy(std_m1, vcl_m1);
// --------------------------------------------------------------------------
std::cout << "Matrix-Vector product" << std::endl;
for (std::size_t i=0; i<std_m1.size(); ++i)
{
std_v1[i] = 0;
for (std::size_t j=0; j<std_m1[i].size(); ++j)
std_v1[i] += std_m1[i][j] * std_v2[j];
}
vcl_v1 = viennacl::linalg::prod(vcl_m1, vcl_v2);
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: matrix-vector product" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
// --------------------------------------------------------------------------
std::cout << "Matrix-Vector product with scaled add" << std::endl;
NumericT alpha = static_cast<NumericT>(2.786);
NumericT beta = static_cast<NumericT>(1.432);
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
for (std::size_t i=0; i<std_m1.size(); ++i)
{
NumericT tmp = 0;
for (std::size_t j=0; j<std_m1[i].size(); ++j)
tmp += std_m1[i][j] * std_v2[j];
std_v1[i] = alpha * tmp + beta * std_v1[i];
}
vcl_v1 = alpha * viennacl::linalg::prod(vcl_m1, vcl_v2) + beta * vcl_v1;
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: matrix-vector product with scaled additions" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
// --------------------------------------------------------------------------
std::cout << "Matrix-Vector product with matrix expression" << std::endl;
for (std::size_t i=0; i<std_m1.size(); ++i)
{
std_v1[i] = 0;
for (std::size_t j=0; j<std_m1[i].size(); ++j)
std_v1[i] += (std_m1[i][j] + std_m1[i][j]) * std_v2[j];
}
vcl_v1 = viennacl::linalg::prod(vcl_m1 + vcl_m1, vcl_v2);
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: matrix-vector product" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
// --------------------------------------------------------------------------
std::cout << "Matrix-Vector product with vector expression" << std::endl;
for (std::size_t i=0; i<std_m1.size(); ++i)
{
std_v1[i] = 0;
for (std::size_t j=0; j<std_m1[i].size(); ++j)
std_v1[i] += std_m1[i][j] * NumericT(3) * std_v2[j];
}
vcl_v1 = viennacl::linalg::prod(vcl_m1, NumericT(3) * vcl_v2);
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: matrix-vector product" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
// --------------------------------------------------------------------------
std::cout << "Matrix-Vector product with matrix and vector expression" << std::endl;
for (std::size_t i=0; i<std_m1.size(); ++i)
{
std_v1[i] = 0;
for (std::size_t j=0; j<std_m1[i].size(); ++j)
std_v1[i] += (std_m1[i][j] + std_m1[i][j]) * (std_v2[j] + std_v2[j]);
}
vcl_v1 = viennacl::linalg::prod(vcl_m1 + vcl_m1, vcl_v2 + vcl_v2);
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: matrix-vector product" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
// --------------------------------------------------------------------------
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
std::cout << "Transposed Matrix-Vector product" << std::endl;
for (std::size_t i=0; i<std_m1[0].size(); ++i)
{
std_v2[i] = 0;
for (std::size_t j=0; j<std_m1.size(); ++j)
std_v2[i] += alpha * std_m1[j][i] * std_v1[j];
}
vcl_v2 = alpha * viennacl::linalg::prod(trans(vcl_m1), vcl_v1);
if ( std::fabs(diff(std_v2, vcl_v2)) > epsilon )
{
std::cout << "# Error at operation: transposed matrix-vector product" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v2, vcl_v2)) << std::endl;
retval = EXIT_FAILURE;
}
std::cout << "Transposed Matrix-Vector product with scaled add" << std::endl;
for (std::size_t i=0; i<std_m1[0].size(); ++i)
{
NumericT tmp = 0;
for (std::size_t j=0; j<std_m1.size(); ++j)
tmp += std_m1[j][i] * std_v1[j];
std_v2[i] = alpha * tmp + beta * std_v2[i];
}
vcl_v2 = alpha * viennacl::linalg::prod(trans(vcl_m1), vcl_v1) + beta * vcl_v2;
if ( std::fabs(diff(std_v2, vcl_v2)) > epsilon )
{
std::cout << "# Error at operation: transposed matrix-vector product with scaled additions" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v2, vcl_v2)) << std::endl;
retval = EXIT_FAILURE;
}
// --------------------------------------------------------------------------
std::cout << "Row sum with matrix" << std::endl;
for (std::size_t i=0; i<std_m1.size(); ++i)
{
std_v1[i] = 0;
for (std::size_t j=0; j<std_m1[i].size(); ++j)
std_v1[i] += std_m1[i][j];
}
vcl_v1 = viennacl::linalg::row_sum(vcl_m1);
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: row sum" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
// --------------------------------------------------------------------------
std::cout << "Row sum with matrix expression" << std::endl;
for (std::size_t i=0; i<std_m1.size(); ++i)
{
std_v1[i] = 0;
for (std::size_t j=0; j<std_m1[i].size(); ++j)
std_v1[i] += std_m1[i][j] + std_m1[i][j];
}
vcl_v1 = viennacl::linalg::row_sum(vcl_m1 + vcl_m1);
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: row sum (with expression)" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
// --------------------------------------------------------------------------
std::cout << "Column sum with matrix" << std::endl;
for (std::size_t i=0; i<std_m1[0].size(); ++i)
{
std_v2[i] = 0;
for (std::size_t j=0; j<std_m1.size(); ++j)
std_v2[i] += std_m1[j][i];
}
vcl_v2 = viennacl::linalg::column_sum(vcl_m1);
if ( std::fabs(diff(std_v2, vcl_v2)) > epsilon )
{
std::cout << "# Error at operation: column sum" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v2, vcl_v2)) << std::endl;
retval = EXIT_FAILURE;
}
// --------------------------------------------------------------------------
std::cout << "Column sum with matrix expression" << std::endl;
for (std::size_t i=0; i<std_m1[0].size(); ++i)
{
std_v2[i] = 0;
for (std::size_t j=0; j<std_m1.size(); ++j)
std_v2[i] += std_m1[j][i] + std_m1[j][i];
}
vcl_v2 = viennacl::linalg::column_sum(vcl_m1 + vcl_m1);
if ( std::fabs(diff(std_v2, vcl_v2)) > epsilon )
{
std::cout << "# Error at operation: column sum (with expression)" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v2, vcl_v2)) << std::endl;
retval = EXIT_FAILURE;
}
// --------------------------------------------------------------------------
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
std::cout << "Row extraction from matrix" << std::endl;
for (std::size_t j=0; j<std_m1[7].size(); ++j)
std_v2[j] = std_m1[7][j];
vcl_v2 = row(vcl_m1, std::size_t(7));
if ( std::fabs(diff(std_v2, vcl_v2)) > epsilon )
{
std::cout << "# Error at operation: diagonal extraction from matrix" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v2, vcl_v2)) << std::endl;
retval = EXIT_FAILURE;
}
std::cout << "Column extraction from matrix" << std::endl;
for (std::size_t i=0; i<std_m1.size(); ++i)
std_v1[i] = std_m1[i][7];
vcl_v1 = column(vcl_m1, std::size_t(7));
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: diagonal extraction from matrix" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v2, vcl_v2)) << std::endl;
retval = EXIT_FAILURE;
}
// --------------------------------------------------------------------------
viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v2.begin(), std_v2.end(), vcl_v2.begin());
viennacl::copy(std_m2, vcl_m2);
STLMatrixType A = std_m2;
std::cout << "Diagonal extraction from matrix" << std::endl;
for (std::size_t i=0; i<std_m1[0].size(); ++i)
std_v2[i] = std_m1[i + 3][i];
vcl_v2 = diag(vcl_m1, static_cast<int>(-3));
if ( std::fabs(diff(std_v2, vcl_v2)) > epsilon )
{
std::cout << "# Error at operation: diagonal extraction from matrix" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v2, vcl_v2)) << std::endl;
retval = EXIT_FAILURE;
}
std::cout << "Matrix diagonal assignment from vector" << std::endl;
A = std::vector<std::vector<NumericT> >(A.size(), std::vector<NumericT>(A[0].size()));
for (std::size_t i=0; i<std_m1[0].size(); ++i)
A[i + (A.size() - std_m1[i].size())][i] = std_v2[i];
vcl_m2 = diag(vcl_v2, static_cast<int>(std_m1[0].size()) - static_cast<int>(A.size()));
if ( std::fabs(diff(A, vcl_m2)) > epsilon )
{
std::cout << "# Error at operation: Matrix assignment from diagonal" << std::endl;
std::cout << " diff: " << std::fabs(diff(A, vcl_m2)) << std::endl;
retval = EXIT_FAILURE;
}
// --------------------------------------------------------------------------
return retval;
}
template<typename NumericT>
void inplace_solve_upper(std::vector<std::vector<NumericT> > const & A, std::vector<NumericT> & b, bool unit_diagonal)
{
for (std::size_t i2=0; i2<A.size(); ++i2)
{
std::size_t i = A.size() - i2 - 1;
for (std::size_t j = i+1; j < A.size(); ++j)
b[i] -= A[i][j] * b[j];
b[i] = unit_diagonal ? b[i] : b[i] / A[i][i];
}
}
template<typename NumericT>
void inplace_solve(std::vector<std::vector<NumericT> > const & A, std::vector<NumericT> & b, viennacl::linalg::upper_tag)
{
inplace_solve_upper(A, b, false);
}
template<typename NumericT>
void inplace_solve(std::vector<std::vector<NumericT> > const & A, std::vector<NumericT> & b, viennacl::linalg::unit_upper_tag)
{
inplace_solve_upper(A, b, true);
}
template<typename NumericT>
void inplace_solve_lower(std::vector<std::vector<NumericT> > const & A, std::vector<NumericT> & b, bool unit_diagonal)
{
for (std::size_t i=0; i<A.size(); ++i)
{
for (std::size_t j = 0; j < i; ++j)
b[i] -= A[i][j] * b[j];
b[i] = unit_diagonal ? b[i] : b[i] / A[i][i];
}
}
template<typename NumericT>
void inplace_solve(std::vector<std::vector<NumericT> > const & A, std::vector<NumericT> & b, viennacl::linalg::lower_tag)
{
inplace_solve_lower(A, b, false);
}
template<typename NumericT>
void inplace_solve(std::vector<std::vector<NumericT> > const & A, std::vector<NumericT> & b, viennacl::linalg::unit_lower_tag)
{
inplace_solve_lower(A, b, true);
}
template<typename NumericT, typename TagT>
std::vector<NumericT> solve(std::vector<std::vector<NumericT> > const & A, std::vector<NumericT> const & b, TagT)
{
std::vector<NumericT> ret(b);
inplace_solve(A, ret, TagT());
return ret;
}
template<typename NumericT, typename Epsilon,
typename STLMatrixType, typename STLVectorType,
typename VCLMatrixType, typename VCLVectorType1>
int test_solve(Epsilon const & epsilon,
STLMatrixType & std_m1, STLVectorType & std_v1,
VCLMatrixType & vcl_m1, VCLVectorType1 & vcl_v1)
{
int retval = EXIT_SUCCESS;
// sync data:
//viennacl::copy(std_v1.begin(), std_v1.end(), vcl_v1.begin());
viennacl::copy(std_v1, vcl_v1);
viennacl::copy(std_m1, vcl_m1);
/////////////////// test direct solvers ////////////////////////////
//upper triangular:
std::cout << "Upper triangular solver" << std::endl;
std_v1 = solve(std_m1, std_v1, viennacl::linalg::upper_tag());
vcl_v1 = viennacl::linalg::solve(vcl_m1, vcl_v1, viennacl::linalg::upper_tag());
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: upper triangular solver" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
//upper unit triangular:
std::cout << "Upper unit triangular solver" << std::endl;
viennacl::copy(std_v1, vcl_v1);
std_v1 = solve(std_m1, std_v1, viennacl::linalg::unit_upper_tag());
vcl_v1 = viennacl::linalg::solve(vcl_m1, vcl_v1, viennacl::linalg::unit_upper_tag());
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: unit upper triangular solver" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
//lower triangular:
std::cout << "Lower triangular solver" << std::endl;
viennacl::copy(std_v1, vcl_v1);
std_v1 = solve(std_m1, std_v1, viennacl::linalg::lower_tag());
vcl_v1 = viennacl::linalg::solve(vcl_m1, vcl_v1, viennacl::linalg::lower_tag());
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: lower triangular solver" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
//lower unit triangular:
std::cout << "Lower unit triangular solver" << std::endl;
viennacl::copy(std_v1, vcl_v1);
std_v1 = solve(std_m1, std_v1, viennacl::linalg::unit_lower_tag());
vcl_v1 = viennacl::linalg::solve(vcl_m1, vcl_v1, viennacl::linalg::unit_lower_tag());
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: unit lower triangular solver" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
STLMatrixType std_m1_trans(std_m1[0].size(), std::vector<NumericT>(std_m1.size()));
for (std::size_t i=0; i<std_m1.size(); ++i)
for (std::size_t j=0; j<std_m1[i].size(); ++j)
std_m1_trans[j][i] = std_m1[i][j];
//transposed upper triangular:
std::cout << "Transposed upper triangular solver" << std::endl;
viennacl::copy(std_v1, vcl_v1);
std_v1 = solve(std_m1_trans, std_v1, viennacl::linalg::upper_tag());
vcl_v1 = viennacl::linalg::solve(trans(vcl_m1), vcl_v1, viennacl::linalg::upper_tag());
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: upper triangular solver" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
//transposed upper unit triangular:
std::cout << "Transposed unit upper triangular solver" << std::endl;
viennacl::copy(std_v1, vcl_v1);
std_v1 = solve(std_m1_trans, std_v1, viennacl::linalg::unit_upper_tag());
vcl_v1 = viennacl::linalg::solve(trans(vcl_m1), vcl_v1, viennacl::linalg::unit_upper_tag());
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: unit upper triangular solver" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
//transposed lower triangular:
std::cout << "Transposed lower triangular solver" << std::endl;
viennacl::copy(std_v1, vcl_v1);
std_v1 = solve(std_m1_trans, std_v1, viennacl::linalg::lower_tag());
vcl_v1 = viennacl::linalg::solve(trans(vcl_m1), vcl_v1, viennacl::linalg::lower_tag());
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: lower triangular solver" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
//transposed lower unit triangular:
std::cout << "Transposed unit lower triangular solver" << std::endl;
viennacl::copy(std_v1, vcl_v1);
std_v1 = solve(std_m1_trans, std_v1, viennacl::linalg::unit_lower_tag());
vcl_v1 = viennacl::linalg::solve(trans(vcl_m1), vcl_v1, viennacl::linalg::unit_lower_tag());
if ( std::fabs(diff(std_v1, vcl_v1)) > epsilon )
{
std::cout << "# Error at operation: unit lower triangular solver" << std::endl;
std::cout << " diff: " << std::fabs(diff(std_v1, vcl_v1)) << std::endl;
retval = EXIT_FAILURE;
}
return retval;
}
//
// -------------------------------------------------------------
//
template< typename NumericT, typename F, typename Epsilon >
int test(Epsilon const& epsilon)
{
int retval = EXIT_SUCCESS;
viennacl::tools::uniform_random_numbers<NumericT> randomNumber;
std::size_t num_rows = 141; //note: use num_rows > num_cols + 3 for diag() tests to work
std::size_t num_cols = 103;
// --------------------------------------------------------------------------
std::vector<NumericT> std_v1(num_rows);
for (std::size_t i = 0; i < std_v1.size(); ++i)
std_v1[i] = randomNumber();
std::vector<NumericT> std_v2 = std::vector<NumericT>(num_cols, NumericT(3.1415));
std::vector<std::vector<NumericT> > std_m1(std_v1.size(), std::vector<NumericT>(std_v2.size()));
for (std::size_t i = 0; i < std_m1.size(); ++i)
for (std::size_t j = 0; j < std_m1[i].size(); ++j)
std_m1[i][j] = static_cast<NumericT>(0.1) * randomNumber();
std::vector<std::vector<NumericT> > std_m2(std_v1.size(), std::vector<NumericT>(std_v1.size()));
for (std::size_t i = 0; i < std_m2.size(); ++i)
{
for (std::size_t j = 0; j < std_m2[i].size(); ++j)
std_m2[i][j] = static_cast<NumericT>(-0.1) * randomNumber();
std_m2[i][i] = static_cast<NumericT>(2) + randomNumber();
}
viennacl::vector<NumericT> vcl_v1_native(std_v1.size());
viennacl::vector<NumericT> vcl_v1_large(4 * std_v1.size());
viennacl::vector_range< viennacl::vector<NumericT> > vcl_v1_range(vcl_v1_large, viennacl::range(3, std_v1.size() + 3));
viennacl::vector_slice< viennacl::vector<NumericT> > vcl_v1_slice(vcl_v1_large, viennacl::slice(2, 3, std_v1.size()));
viennacl::vector<NumericT> vcl_v2_native(std_v2.size());
viennacl::vector<NumericT> vcl_v2_large(4 * std_v2.size());
viennacl::vector_range< viennacl::vector<NumericT> > vcl_v2_range(vcl_v2_large, viennacl::range(8, std_v2.size() + 8));
viennacl::vector_slice< viennacl::vector<NumericT> > vcl_v2_slice(vcl_v2_large, viennacl::slice(6, 2, std_v2.size()));
viennacl::matrix<NumericT, F> vcl_m1_native(std_m1.size(), std_m1[0].size());
viennacl::matrix<NumericT, F> vcl_m1_large(4 * std_m1.size(), 4 * std_m1[0].size());
viennacl::matrix_range< viennacl::matrix<NumericT, F> > vcl_m1_range(vcl_m1_large,
viennacl::range(8, std_m1.size() + 8),
viennacl::range(std_m1[0].size(), 2 * std_m1[0].size()) );
viennacl::matrix_slice< viennacl::matrix<NumericT, F> > vcl_m1_slice(vcl_m1_large,
viennacl::slice(6, 2, std_m1.size()),
viennacl::slice(std_m1[0].size(), 2, std_m1[0].size()) );
viennacl::matrix<NumericT, F> vcl_m2_native(std_m2.size(), std_m2[0].size());
viennacl::matrix<NumericT, F> vcl_m2_large(4 * std_m2.size(), 4 * std_m2[0].size());
viennacl::matrix_range< viennacl::matrix<NumericT, F> > vcl_m2_range(vcl_m2_large,
viennacl::range(8, std_m2.size() + 8),
viennacl::range(std_m2[0].size(), 2 * std_m2[0].size()) );
viennacl::matrix_slice< viennacl::matrix<NumericT, F> > vcl_m2_slice(vcl_m2_large,
viennacl::slice(6, 2, std_m2.size()),
viennacl::slice(std_m2[0].size(), 2, std_m2[0].size()) );
/* std::cout << "Matrix resizing (to larger)" << std::endl;
matrix.resize(2*num_rows, 2*num_cols, true);
for (unsigned int i = 0; i < matrix.size1(); ++i)
{
for (unsigned int j = (i<result.size() ? rhs.size() : 0); j < matrix.size2(); ++j)
matrix(i,j) = 0;
}
vcl_matrix.resize(2*num_rows, 2*num_cols, true);
viennacl::copy(vcl_matrix, matrix);
if ( std::fabs(diff(matrix, vcl_matrix)) > epsilon )
{
std::cout << "# Error at operation: matrix resize (to larger)" << std::endl;
std::cout << " diff: " << std::fabs(diff(matrix, vcl_matrix)) << std::endl;
return EXIT_FAILURE;
}
matrix(12, 14) = NumericT(1.9);
matrix(19, 16) = NumericT(1.0);
matrix (13, 15) = NumericT(-9);
vcl_matrix(12, 14) = NumericT(1.9);
vcl_matrix(19, 16) = NumericT(1.0);
vcl_matrix (13, 15) = NumericT(-9);
std::cout << "Matrix resizing (to smaller)" << std::endl;
matrix.resize(result.size(), rhs.size(), true);
vcl_matrix.resize(result.size(), rhs.size(), true);
if ( std::fabs(diff(matrix, vcl_matrix)) > epsilon )
{
std::cout << "# Error at operation: matrix resize (to smaller)" << std::endl;
std::cout << " diff: " << std::fabs(diff(matrix, vcl_matrix)) << std::endl;
return EXIT_FAILURE;
}
*/
//
// Run a bunch of tests for rank-1-updates, matrix-vector products
//
std::cout << "------------ Testing rank-1-updates and matrix-vector products ------------------" << std::endl;
std::cout << "* m = full, v1 = full, v2 = full" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_native, vcl_v1_native, vcl_v2_native, vcl_m2_native);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = full, v1 = full, v2 = range" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_native, vcl_v1_native, vcl_v2_range, vcl_m2_native);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = full, v1 = full, v2 = slice" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_native, vcl_v1_native, vcl_v2_slice, vcl_m2_native);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
// v1 = range
std::cout << "* m = full, v1 = range, v2 = full" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_native, vcl_v1_range, vcl_v2_native, vcl_m2_native);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = full, v1 = range, v2 = range" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_native, vcl_v1_range, vcl_v2_range, vcl_m2_native);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = full, v1 = range, v2 = slice" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_native, vcl_v1_range, vcl_v2_slice, vcl_m2_native);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
// v1 = slice
std::cout << "* m = full, v1 = slice, v2 = full" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_native, vcl_v1_slice, vcl_v2_native, vcl_m2_native);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = full, v1 = slice, v2 = range" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_native, vcl_v1_slice, vcl_v2_range, vcl_m2_native);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = full, v1 = slice, v2 = slice" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_native, vcl_v1_slice, vcl_v2_slice, vcl_m2_native);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
///////////////////////////// matrix_range
std::cout << "* m = range, v1 = full, v2 = full" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_range, vcl_v1_native, vcl_v2_native, vcl_m2_range);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = range, v1 = full, v2 = range" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_range, vcl_v1_native, vcl_v2_range, vcl_m2_range);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = range, v1 = full, v2 = slice" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_range, vcl_v1_native, vcl_v2_slice, vcl_m2_range);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
// v1 = range
std::cout << "* m = range, v1 = range, v2 = full" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_range, vcl_v1_range, vcl_v2_native, vcl_m2_range);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = range, v1 = range, v2 = range" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_range, vcl_v1_range, vcl_v2_range, vcl_m2_range);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = range, v1 = range, v2 = slice" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_range, vcl_v1_range, vcl_v2_slice, vcl_m2_range);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
// v1 = slice
std::cout << "* m = range, v1 = slice, v2 = full" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_range, vcl_v1_slice, vcl_v2_native, vcl_m2_range);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = range, v1 = slice, v2 = range" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_range, vcl_v1_slice, vcl_v2_range, vcl_m2_range);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = range, v1 = slice, v2 = slice" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_range, vcl_v1_slice, vcl_v2_slice, vcl_m2_range);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
///////////////////////////// matrix_slice
std::cout << "* m = slice, v1 = full, v2 = full" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_slice, vcl_v1_native, vcl_v2_native, vcl_m2_slice);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = slice, v1 = full, v2 = range" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_slice, vcl_v1_native, vcl_v2_range, vcl_m2_slice);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = slice, v1 = full, v2 = slice" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_slice, vcl_v1_native, vcl_v2_slice, vcl_m2_slice);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
// v1 = range
std::cout << "* m = slice, v1 = range, v2 = full" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_slice, vcl_v1_range, vcl_v2_native, vcl_m2_slice);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = slice, v1 = range, v2 = range" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_slice, vcl_v1_range, vcl_v2_range, vcl_m2_slice);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = slice, v1 = range, v2 = slice" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_slice, vcl_v1_range, vcl_v2_slice, vcl_m2_slice);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
// v1 = slice
std::cout << "* m = slice, v1 = slice, v2 = full" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_slice, vcl_v1_slice, vcl_v2_native, vcl_m2_slice);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = slice, v1 = slice, v2 = range" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_slice, vcl_v1_slice, vcl_v2_range, vcl_m2_slice);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = slice, v1 = slice, v2 = slice" << std::endl;
retval = test_prod_rank1<NumericT>(epsilon,
std_m1, std_v1, std_v2, std_m2,
vcl_m1_slice, vcl_v1_slice, vcl_v2_slice, vcl_m2_slice);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
//
// Testing triangular solve() routines
//
std::cout << "------------ Testing triangular solves ------------------" << std::endl;
std::cout << "* m = full, v1 = full" << std::endl;
retval = test_solve<NumericT>(epsilon,
std_m2, std_v1,
vcl_m2_native, vcl_v1_native);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = full, v1 = range" << std::endl;
retval = test_solve<NumericT>(epsilon,
std_m2, std_v1,
vcl_m2_native, vcl_v1_range);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = full, v1 = slice" << std::endl;
retval = test_solve<NumericT>(epsilon,
std_m2, std_v1,
vcl_m2_native, vcl_v1_slice);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
///////// matrix_range
std::cout << "* m = range, v1 = full" << std::endl;
retval = test_solve<NumericT>(epsilon,
std_m2, std_v1,
vcl_m2_range, vcl_v1_native);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = range, v1 = range" << std::endl;
retval = test_solve<NumericT>(epsilon,
std_m2, std_v1,
vcl_m2_range, vcl_v1_range);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = range, v1 = slice" << std::endl;
retval = test_solve<NumericT>(epsilon,
std_m2, std_v1,
vcl_m2_range, vcl_v1_slice);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
//////// matrix_slice
std::cout << "* m = slice, v1 = full" << std::endl;
retval = test_solve<NumericT>(epsilon,
std_m2, std_v1,
vcl_m2_slice, vcl_v1_native);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = slice, v1 = range" << std::endl;
retval = test_solve<NumericT>(epsilon,
std_m2, std_v1,
vcl_m2_slice, vcl_v1_range);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
std::cout << "* m = slice, v1 = slice" << std::endl;
retval = test_solve<NumericT>(epsilon,
std_m2, std_v1,
vcl_m2_slice, vcl_v1_slice);
if (retval == EXIT_FAILURE)
{
std::cout << " --- FAILED! ---" << std::endl;
return retval;
}
else
std::cout << " --- PASSED ---" << std::endl;
////////////// Final test for full LU decomposition:
//full solver:
std::cout << "Full solver" << std::endl;
unsigned int lu_dim = 100;
std::vector<std::vector<NumericT> > square_matrix(lu_dim, std::vector<NumericT>(lu_dim));
std::vector<NumericT> lu_rhs(lu_dim);
std::vector<NumericT> lu_result(lu_dim);
viennacl::matrix<NumericT, F> vcl_square_matrix(lu_dim, lu_dim);
viennacl::vector<NumericT> vcl_lu_rhs(lu_dim);
for (std::size_t i=0; i<lu_dim; ++i)
for (std::size_t j=0; j<lu_dim; ++j)
square_matrix[i][j] = -static_cast<NumericT>(0.5) * randomNumber();
//put some more weight on diagonal elements:
for (std::size_t j=0; j<lu_dim; ++j)
{
square_matrix[j][j] = static_cast<NumericT>(20.0) + randomNumber();
lu_result[j] = NumericT(0.1) + randomNumber();
}
for (std::size_t i=0; i<lu_dim; ++i)
for (std::size_t j=0; j<lu_dim; ++j)
lu_rhs[i] += square_matrix[i][j] * lu_result[j];
viennacl::copy(square_matrix, vcl_square_matrix);
viennacl::copy(lu_rhs, vcl_lu_rhs);
// ViennaCL:
viennacl::linalg::lu_factorize(vcl_square_matrix);
viennacl::linalg::lu_substitute(vcl_square_matrix, vcl_lu_rhs);
if ( std::fabs(diff(lu_result, vcl_lu_rhs)) > epsilon )
{
std::cout << "# Error at operation: dense solver" << std::endl;
std::cout << " diff: " << std::fabs(diff(lu_rhs, vcl_lu_rhs)) << std::endl;
retval = EXIT_FAILURE;
}
return retval;
}
//
// -------------------------------------------------------------
//
int main()
{
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "## Test :: Matrix" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
int retval = EXIT_SUCCESS;
// std::cout << std::endl;
// std::cout << "----------------------------------------------" << std::endl;
// std::cout << std::endl;
// {
// typedef float NumericT;
// NumericT epsilon = NumericT(1.0E-3);
// std::cout << "# Testing setup:" << std::endl;
// std::cout << " eps: " << epsilon << std::endl;
// std::cout << " numeric: float" << std::endl;
// std::cout << " layout: row-major" << std::endl;
// retval = test<NumericT, viennacl::row_major>(epsilon);
// if ( retval == EXIT_SUCCESS )
// std::cout << "# Test passed" << std::endl;
// else
// return retval;
// }
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
{
typedef float NumericT;
NumericT epsilon = NumericT(1.0E-3);
std::cout << "# Testing setup:" << std::endl;
std::cout << " eps: " << epsilon << std::endl;
std::cout << " numeric: float" << std::endl;
std::cout << " layout: column-major" << std::endl;
retval = test<NumericT, viennacl::column_major>(epsilon);
if ( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
}
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
{
typedef double NumericT;
NumericT epsilon = 1.0E-11;
std::cout << "# Testing setup:" << std::endl;
std::cout << " eps: " << epsilon << std::endl;
std::cout << " numeric: double" << std::endl;
std::cout << " layout: row-major" << std::endl;
retval = test<NumericT, viennacl::row_major>(epsilon);
if ( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
}
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
{
typedef double NumericT;
NumericT epsilon = 1.0E-11;
std::cout << "# Testing setup:" << std::endl;
std::cout << " eps: " << epsilon << std::endl;
std::cout << " numeric: double" << std::endl;
std::cout << " layout: column-major" << std::endl;
retval = test<NumericT, viennacl::column_major>(epsilon);
if ( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
}
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
}
std::cout << std::endl;
std::cout << "------- Test completed --------" << std::endl;
std::cout << std::endl;
return retval;
} | the_stack |
#define PI 3.141592653
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65000;
return min(optimal_block_num, max_block_num);
}
template <typename scalar_t>
__device__ scalar_t bilinear_interpolate(const scalar_t *bottom_data,
const int height, const int width,
scalar_t y, scalar_t x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
// do bilinear interpolation
scalar_t lt = bottom_data[y_low * width + x_low];
scalar_t rt = bottom_data[y_low * width + x_high];
scalar_t lb = bottom_data[y_high * width + x_low];
scalar_t rb = bottom_data[y_high * width + x_high];
scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
scalar_t val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb);
return val;
}
template <typename scalar_t>
__global__ void RiROIAlignForward(const int nthreads, const scalar_t *bottom_data,
const scalar_t *bottom_rois,
const scalar_t spatial_scale,
const int sample_num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int nOrientation,
scalar_t *top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int o = (index / pooled_width / pooled_height) % nOrientation;
int c = (index / pooled_width / pooled_height / nOrientation) % channels;
int n = index / pooled_width / pooled_height / nOrientation / channels;
const scalar_t* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
scalar_t roi_center_w = offset_bottom_rois[1] * spatial_scale;
scalar_t roi_center_h = offset_bottom_rois[2] * spatial_scale;
scalar_t roi_width = offset_bottom_rois[3] * spatial_scale;
scalar_t roi_height = offset_bottom_rois[4] * spatial_scale;
// scalar_t theta = offset_bottom_rois[5] * M_PI / 180.0;
scalar_t theta = offset_bottom_rois[5];
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (scalar_t)1.);
roi_height = max(roi_height, (scalar_t)1.);
scalar_t bin_size_h = static_cast<scalar_t>(roi_height) / static_cast<scalar_t>(pooled_height);
scalar_t bin_size_w = static_cast<scalar_t>(roi_width) / static_cast<scalar_t>(pooled_width);
// TODO
// find aligned index
scalar_t ind_float = theta * nOrientation / (2 * PI);
int ind = floor(ind_float);
scalar_t l_var = ind_float - (scalar_t)ind;
scalar_t r_var = 1.0 - l_var;
// correct start channel
ind = (ind + nOrientation) % nOrientation;
// rotated channel
int ind_rot = (o - ind + nOrientation) % nOrientation;
int ind_rot_plus = (ind_rot + 1 + nOrientation) % nOrientation;
const scalar_t* offset_bottom_data =
bottom_data + (roi_batch_ind * channels * nOrientation + c * nOrientation + ind_rot) * height * width;
const scalar_t* offset_bottom_data_plus =
bottom_data + (roi_batch_ind * channels * nOrientation + c * nOrientation + ind_rot_plus) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
scalar_t roi_start_h = -roi_height / 2.0;
scalar_t roi_start_w = -roi_width / 2.0;
scalar_t cosscalar_theta = cos(theta);
scalar_t sinscalar_theta = sin(theta);
// We do average (integral) pooling inside a bin
const scalar_t count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
scalar_t output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const scalar_t yy = roi_start_h + ph * bin_size_h +
static_cast<scalar_t>(iy + .5f) * bin_size_h /
static_cast<scalar_t>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const scalar_t xx = roi_start_w + pw * bin_size_w +
static_cast<scalar_t>(ix + .5f) * bin_size_w /
static_cast<scalar_t>(roi_bin_grid_w);
// Rotate by theta around the center and translate
// scalar_t x = xx * cosscalar_theta + yy * sinscalar_theta + roi_center_w;
// scalar_t y = yy * cosscalar_theta - xx * sinscalar_theta + roi_center_h;
scalar_t x = xx * cosscalar_theta - yy * sinscalar_theta + roi_center_w;
scalar_t y = xx * sinscalar_theta + yy * cosscalar_theta + roi_center_h;
scalar_t val = bilinear_interpolate<scalar_t>(
offset_bottom_data, height, width, y, x);
scalar_t val_plus = bilinear_interpolate<scalar_t>(
offset_bottom_data_plus, height, width, y, x);
output_val += r_var * val + l_var * val_plus;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
int RiROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
const int nOrientation,
at::Tensor output) {
const int output_size = num_rois * pooled_height * pooled_width * channels * nOrientation;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
features.type(), "RiROIAlignLaucherForward", ([&] {
const scalar_t *bottom_data = features.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *top_data = output.data<scalar_t>();
RiROIAlignForward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>> (
output_size, bottom_data, rois_data, scalar_t(spatial_scale),
sample_num, channels, height, width, pooled_height,
pooled_width, nOrientation, top_data);
}));
THCudaCheck(cudaGetLastError());
return 1;
}
template <typename scalar_t>
__device__ void bilinear_interpolate_gradient(const int height, const int width,
scalar_t y, scalar_t x,
scalar_t &w1, scalar_t &w2,
scalar_t &w3, scalar_t &w4,
int &x_low, int &x_high,
int &y_low, int &y_high) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (scalar_t)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (scalar_t)x_low;
} else {
x_high = x_low + 1;
}
scalar_t ly = y - y_low;
scalar_t lx = x - x_low;
scalar_t hy = 1. - ly;
scalar_t hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename scalar_t>
__global__ void RiROIAlignBackward(
const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_rois,
const scalar_t spatial_scale, const int sample_num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int nOrientation, scalar_t *bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int o = (index / pooled_width / pooled_height) % nOrientation;
int c = (index / pooled_width / pooled_height / nOrientation) % channels;
int n = index / pooled_width / pooled_height / nOrientation / channels;
const scalar_t* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not round
scalar_t roi_center_w = offset_bottom_rois[1] * spatial_scale;
scalar_t roi_center_h = offset_bottom_rois[2] * spatial_scale;
scalar_t roi_width = offset_bottom_rois[3] * spatial_scale;
scalar_t roi_height = offset_bottom_rois[4] * spatial_scale;
// scalar_t theta = offset_bottom_rois[5] * M_PI / 180.0;
scalar_t theta = offset_bottom_rois[5];
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (scalar_t)1.);
roi_height = max(roi_height, (scalar_t)1.);
scalar_t bin_size_h = static_cast<scalar_t>(roi_height) / static_cast<scalar_t>(pooled_height);
scalar_t bin_size_w = static_cast<scalar_t>(roi_width) / static_cast<scalar_t>(pooled_width);
// find aligned index
scalar_t ind_float = theta * nOrientation / (2 * PI);
int ind = floor(ind_float);
scalar_t l_var = ind_float - (scalar_t)ind;
scalar_t r_var = 1.0 - l_var;
// correct start channel
ind = (ind + nOrientation) % nOrientation;
// rotated channel
int ind_rot = (o - ind + nOrientation) % nOrientation;
int ind_rot_plus = (ind_rot + 1 + nOrientation) % nOrientation;
scalar_t* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels * nOrientation + c * nOrientation + ind_rot) * height * width;
scalar_t* offset_bottom_diff_plus =
bottom_diff + (roi_batch_ind * channels * nOrientation + c * nOrientation + ind_rot_plus) * height * width;
int top_offset = (n * channels * nOrientation + c * nOrientation + o) * pooled_height * pooled_width;
const scalar_t* offset_top_diff = top_diff + top_offset;
const scalar_t top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sample_num > 0)
? sample_num
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sample_num > 0) ? sample_num : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
scalar_t roi_start_h = -roi_height / 2.0;
scalar_t roi_start_w = -roi_width / 2.0;
scalar_t cosTheta = cos(theta);
scalar_t sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const scalar_t count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const scalar_t yy = roi_start_h + ph * bin_size_h +
static_cast<scalar_t>(iy + .5f) * bin_size_h /
static_cast<scalar_t>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const scalar_t xx = roi_start_w + pw * bin_size_w +
static_cast<scalar_t>(ix + .5f) * bin_size_w /
static_cast<scalar_t>(roi_bin_grid_w);
// Rotate by theta around the center and translate
// scalar_t x = xx * cosTheta + yy * sinTheta + roi_center_w;
// T y = yy * cosTheta - xx * sinTheta + roi_center_h;
scalar_t x = xx * cosTheta - yy * sinTheta + roi_center_w;
scalar_t y = xx * sinTheta + yy * cosTheta + roi_center_h;
scalar_t w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient<scalar_t>(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high);
scalar_t g1 = top_diff_this_bin * w1 / count;
scalar_t g2 = top_diff_this_bin * w2 / count;
scalar_t g3 = top_diff_this_bin * w3 / count;
scalar_t g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_bottom_diff + y_low * width + x_low, g1*r_var);
atomicAdd(
offset_bottom_diff + y_low * width + x_high, g2*r_var);
atomicAdd(
offset_bottom_diff + y_high * width + x_low, g3*r_var);
atomicAdd(
offset_bottom_diff + y_high * width + x_high, g4*r_var);
atomicAdd(
offset_bottom_diff_plus + y_low * width + x_low, g1*l_var);
atomicAdd(
offset_bottom_diff_plus + y_low * width + x_high, g2*l_var);
atomicAdd(
offset_bottom_diff_plus + y_high * width + x_low, g3*l_var);
atomicAdd(
offset_bottom_diff_plus + y_high * width + x_high, g4*l_var);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
int RiROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
const int nOrientation,
at::Tensor bottom_grad) {
const int output_size = num_rois * pooled_height * pooled_width * channels * nOrientation;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_grad.type(), "RiROIAlignLaucherBackward", ([&] {
const scalar_t *top_diff = top_grad.data<scalar_t>();
const scalar_t *rois_data = rois.data<scalar_t>();
scalar_t *bottom_diff = bottom_grad.data<scalar_t>();
if (sizeof(scalar_t) == sizeof(double)) {
fprintf(stderr, "double is not supported\n");
exit(-1);
}
RiROIAlignBackward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, top_diff, rois_data, spatial_scale, sample_num,
channels, height, width, pooled_height, pooled_width, nOrientation,
bottom_diff);
}));
THCudaCheck(cudaGetLastError());
return 1;
} | the_stack |
#include <cassert>
#include <cusolverSp.h>
#include <cusolverSp_LOWLEVEL_PREVIEW.h>
// geometry processing and shape analysis framework
namespace gproshan {
struct cu_spAxb
{
int * A_col_ptrs, * A_row_indices;
real_t * A_values, * x, * b;
cu_spAxb(const int m, const int nnz, const real_t * hA_values, const int * hA_col_ptrs, const int * hA_row_indices, const real_t * hb, real_t * hx)
{
cudaMalloc(&A_col_ptrs, (m + 1) * sizeof(int));
cudaMemcpy(A_col_ptrs, hA_col_ptrs, (m + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&A_row_indices, nnz * sizeof(int));
cudaMemcpy(A_row_indices, hA_row_indices, nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&A_values, nnz * sizeof(real_t));
cudaMemcpy(A_values, hA_values, nnz * sizeof(real_t), cudaMemcpyHostToDevice);
cudaMalloc(&b, nnz * sizeof(real_t));
cudaMemcpy(b, hb, nnz * sizeof(real_t), cudaMemcpyHostToDevice);
cudaMalloc(&x, m * sizeof(real_t));
}
~cu_spAxb()
{
cudaFree(A_col_ptrs);
cudaFree(A_row_indices);
cudaFree(A_values);
cudaFree(b);
cudaFree(x);
}
};
double solve_positive_definite_cusolver(const int m, const int nnz, const real_t * hA_values, const int * hA_col_ptrs, const int * hA_row_indices, const real_t * hb, real_t * hx, const bool host)
{
cudaDeviceReset();
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// solve Ax = b
int singularity;
cusolverSpHandle_t handle_cusolver;
cusolverSpCreate(&handle_cusolver);
cusparseMatDescr_t descr = 0;
cusparseCreateMatDescr(&descr);
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
if(host)
{
#ifdef SINGLE_P
cusolverSpScsrlsvcholHost(handle_cusolver, m, nnz, descr, hA_values, hA_col_ptrs, hA_row_indices, hb, 0, 0, hx, &singularity);
#else
cusolverSpDcsrlsvcholHost(handle_cusolver, m, nnz, descr, hA_values, hA_col_ptrs, hA_row_indices, hb, 0, 0, hx, &singularity);
#endif
}
else
{
// allocate A, x, b into device
cu_spAxb data(m, nnz, hA_values, hA_col_ptrs, hA_row_indices, hb, hx);
cusolverStatus_t status;
#ifdef SINGLE_P
status = cusolverSpScsrlsvchol(handle_cusolver, m, nnz, descr, data.A_values, data.A_col_ptrs, data.A_row_indices, data.b, 0, 0, data.x, &singularity);
#else
status = cusolverSpDcsrlsvchol(handle_cusolver, m, nnz, descr, data.A_values, data.A_col_ptrs, data.A_row_indices, data.b, 0, 0, data.x, &singularity);
#endif
if(status == CUSOLVER_STATUS_SUCCESS)
cudaMemcpy(hx, data.x, m * sizeof(real_t), cudaMemcpyDeviceToHost);
else
memset(hx, 0, m * sizeof(real_t));
}
// printf("%d\n", singularity != -1);
cusparseDestroyMatDescr(descr);
cusolverSpDestroy(handle_cusolver);
// end Ax = b
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return (double) time / 1000;
}
double solve_positive_definite_cusparse(const int m, const int nnz, const real_t * hA_values, const int * hA_col_ptrs, const int * hA_row_indices, const real_t * hb, real_t * hx)
{
cudaDeviceReset();
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate A, x, b into device
cu_spAxb data(m, nnz, hA_values, hA_col_ptrs, hA_row_indices, hb, hx);
// aux vector y to device
real_t * dy;
cudaMalloc(&dy, m * sizeof(real_t));
cusparseHandle_t handle;
cusparseCreate(&handle);
// SOLVE Ax = b
cusparseMatDescr_t descr_M = 0;
cusparseMatDescr_t descr_L = 0;
csric02Info_t info_M = 0;
csrsv2Info_t info_L = 0;
csrsv2Info_t info_Lt = 0;
int buffer_size_M;
int buffer_size_L;
int buffer_size_Lt;
int buffer_size;
void * buffer = 0;
int structural_zero;
int numerical_zero;
const real_t alpha = 1.;
const cusparseSolvePolicy_t policy_M = CUSPARSE_SOLVE_POLICY_NO_LEVEL;
const cusparseSolvePolicy_t policy_L = CUSPARSE_SOLVE_POLICY_NO_LEVEL;
const cusparseSolvePolicy_t policy_Lt = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
const cusparseOperation_t trans_L = CUSPARSE_OPERATION_NON_TRANSPOSE;
const cusparseOperation_t trans_Lt = CUSPARSE_OPERATION_TRANSPOSE;
cusparseCreateMatDescr(&descr_M);
cusparseSetMatIndexBase(descr_M, CUSPARSE_INDEX_BASE_ZERO);
cusparseSetMatType(descr_M, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseCreateMatDescr(&descr_L);
cusparseSetMatIndexBase(descr_L, CUSPARSE_INDEX_BASE_ZERO);
cusparseSetMatType(descr_L, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatFillMode(descr_L, CUSPARSE_FILL_MODE_LOWER);
cusparseSetMatDiagType(descr_L, CUSPARSE_DIAG_TYPE_NON_UNIT);
cusparseCreateCsric02Info(&info_M);
cusparseCreateCsrsv2Info(&info_L);
cusparseCreateCsrsv2Info(&info_Lt);
#ifdef SINGLE_P
cusparseScsric02_bufferSize(handle, m, nnz, descr_M, data.A_values, data.A_col_ptrs, data.A_row_indices, info_M, &buffer_size_M);
cusparseScsrsv2_bufferSize(handle, trans_L, m, nnz, descr_L, data.A_values, data.A_col_ptrs, data.A_row_indices, info_L, &buffer_size_L);
cusparseScsrsv2_bufferSize(handle, trans_Lt, m, nnz, descr_L, data.A_values, data.A_col_ptrs, data.A_row_indices, info_Lt, &buffer_size_Lt);
#else
cusparseDcsric02_bufferSize(handle, m, nnz, descr_M, data.A_values, data.A_col_ptrs, data.A_row_indices, info_M, &buffer_size_M);
cusparseDcsrsv2_bufferSize(handle, trans_L, m, nnz, descr_L, data.A_values, data.A_col_ptrs, data.A_row_indices, info_L, &buffer_size_L);
cusparseDcsrsv2_bufferSize(handle, trans_Lt, m, nnz, descr_L, data.A_values, data.A_col_ptrs, data.A_row_indices, info_Lt, &buffer_size_Lt);
#endif
buffer_size = max(buffer_size_M, max(buffer_size_L, buffer_size_Lt));
cudaMalloc(&buffer, buffer_size);
#ifdef SINGLE_P
cusparseScsric02_analysis(handle, m, nnz, descr_M, data.A_values, data.A_col_ptrs, data.A_row_indices, info_M, policy_M, buffer);
#else
cusparseDcsric02_analysis(handle, m, nnz, descr_M, data.A_values, data.A_col_ptrs, data.A_row_indices, info_M, policy_M, buffer);
#endif
if(CUSPARSE_STATUS_ZERO_PIVOT == cusparseXcsric02_zeroPivot(handle, info_M, &structural_zero))
printf("A(%d,%d) is missing\n", structural_zero, structural_zero);
#ifdef SINGLE_P
cusparseScsrsv2_analysis(handle, trans_L, m, nnz, descr_L, data.A_values, data.A_col_ptrs, data.A_row_indices, info_L, policy_L, buffer);
cusparseScsrsv2_analysis(handle, trans_Lt, m, nnz, descr_L, data.A_values, data.A_col_ptrs, data.A_row_indices, info_Lt, policy_Lt, buffer);
cusparseScsric02(handle, m, nnz, descr_M, data.A_values, data.A_col_ptrs, data.A_row_indices, info_M, policy_M, buffer);
#else
cusparseDcsrsv2_analysis(handle, trans_L, m, nnz, descr_L, data.A_values, data.A_col_ptrs, data.A_row_indices, info_L, policy_L, buffer);
cusparseDcsrsv2_analysis(handle, trans_Lt, m, nnz, descr_L, data.A_values, data.A_col_ptrs, data.A_row_indices, info_Lt, policy_Lt, buffer);
cusparseDcsric02(handle, m, nnz, descr_M, data.A_values, data.A_col_ptrs, data.A_row_indices, info_M, policy_M, buffer);
#endif
if(CUSPARSE_STATUS_ZERO_PIVOT == cusparseXcsric02_zeroPivot(handle, info_M, &numerical_zero))
printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero);
// SOLVE
cudaEventRecord(start, 0);
#ifdef SINGLE_P
cusparseScsrsv2_solve(handle, trans_L, m, nnz, &alpha, descr_L, data.A_values, data.A_col_ptrs, data.A_row_indices, info_L, data.b, dy, policy_L, buffer);
cusparseScsrsv2_solve(handle, trans_Lt, m, nnz, &alpha, descr_L, data.A_values, data.A_col_ptrs, data.A_row_indices, info_Lt, dy, data.x, policy_Lt, buffer);
#else
cusparseDcsrsv2_solve(handle, trans_L, m, nnz, &alpha, descr_L, data.A_values, data.A_col_ptrs, data.A_row_indices, info_L, data.b, dy, policy_L, buffer);
cusparseDcsrsv2_solve(handle, trans_Lt, m, nnz, &alpha, descr_L, data.A_values, data.A_col_ptrs, data.A_row_indices, info_Lt, dy, data.x, policy_Lt, buffer);
#endif
// copy sol x to host
cudaMemcpy(hx, data.x, m * sizeof(real_t), cudaMemcpyDeviceToHost);
// END SOLVE
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// FREE
cudaFree(buffer);
cusparseDestroyMatDescr(descr_M);
cusparseDestroyMatDescr(descr_L);
cusparseDestroyCsric02Info(info_M);
cusparseDestroyCsrsv2Info(info_L);
cusparseDestroyCsrsv2Info(info_Lt);
cusparseDestroy(handle);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return (double) time / 1000;
}
double solve_positive_definite_cusolver_preview(const int m, const int nnz, const real_t * hA_values, const int * hA_col_ptrs, const int * hA_row_indices, const real_t * hb, real_t * hx, const bool host)
{
cudaDeviceReset();
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// SOLVE Ax = b
cusolverSpHandle_t cusolver_handle = nullptr;
cusparseHandle_t cusparse_handle = nullptr;
// cudaStream_t stream = nullptr;
cusparseMatDescr_t descr = nullptr;
size_t size_iternal = 0;
size_t size_chol = 0;
void * buffer = nullptr;
int singularity;
cusolverSpCreate(&cusolver_handle);
cusparseCreate(&cusparse_handle);
/*
cudaStreamCreate(&stream);
cusolverSpSetStream(cusolver_handle, stream);
cusparseSetStream(cusparse_handle, stream);
*/
cusparseCreateMatDescr(&descr);
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
if(host)
{
csrcholInfoHost_t info;
cusolverSpCreateCsrcholInfoHost(&info);
cusolverSpXcsrcholAnalysisHost(cusolver_handle, m, nnz, descr, hA_col_ptrs, hA_row_indices, info);
#ifdef SINGLE_P
cusolverSpScsrcholBufferInfoHost(cusolver_handle, m, nnz, descr, hA_values, hA_col_ptrs, hA_row_indices, info, &size_iternal, &size_chol);
#else
cusolverSpDcsrcholBufferInfoHost(cusolver_handle, m, nnz, descr, hA_values, hA_col_ptrs, hA_row_indices, info, &size_iternal, &size_chol);
#endif
buffer = new char[size_chol];
#ifdef SINGLE_P
cusolverSpScsrcholFactorHost(cusolver_handle, m, nnz, descr, hA_values, hA_col_ptrs, hA_row_indices, info, buffer);
cusolverSpScsrcholZeroPivotHost(cusolver_handle, info, 0, &singularity);
#else
cusolverSpDcsrcholFactorHost(cusolver_handle, m, nnz, descr, hA_values, hA_col_ptrs, hA_row_indices, info, buffer);
cusolverSpDcsrcholZeroPivotHost(cusolver_handle, info, 0, &singularity);
#endif
assert(singularity == -1);
// SOLVE
cudaEventRecord(start, 0);
#ifdef SINGLE_P
cusolverSpScsrcholSolveHost(cusolver_handle, m, hb, hx, info, buffer);
#else
cusolverSpDcsrcholSolveHost(cusolver_handle, m, hb, hx, info, buffer);
#endif
// END SOLVE
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// FREE
delete [] (char*) buffer;
cusolverSpDestroyCsrcholInfoHost(info);
}
else
{
cu_spAxb data(m, nnz, hA_values, hA_col_ptrs, hA_row_indices, hb, hx);
csrcholInfo_t info;
cusolverSpCreateCsrcholInfo(&info);
cusolverSpXcsrcholAnalysis(cusolver_handle, m, nnz, descr, data.A_col_ptrs, data.A_row_indices, info);
#ifdef SINGLE_P
cusolverSpScsrcholBufferInfo(cusolver_handle, m, nnz, descr, data.A_values, data.A_col_ptrs, data.A_row_indices, info, &size_iternal, &size_chol);
#else
cusolverSpDcsrcholBufferInfo(cusolver_handle, m, nnz, descr, data.A_values, data.A_col_ptrs, data.A_row_indices, info, &size_iternal, &size_chol);
#endif
cudaMalloc(&buffer, size_chol);
#ifdef SINGLE_P
cusolverSpScsrcholFactor(cusolver_handle, m, nnz, descr, data.A_values, data.A_col_ptrs, data.A_row_indices, info, buffer);
cusolverSpScsrcholZeroPivot(cusolver_handle, info, 0, &singularity);
#else
cusolverSpDcsrcholFactor(cusolver_handle, m, nnz, descr, data.A_values, data.A_col_ptrs, data.A_row_indices, info, buffer);
cusolverSpDcsrcholZeroPivot(cusolver_handle, info, 0, &singularity);
#endif
// assert(singularity == -1);
// SOLVE
cudaEventRecord(start, 0);
#ifdef SINGLE_P
cusolverSpScsrcholSolve(cusolver_handle, m, data.b, data.x, info, buffer);
#else
cusolverSpDcsrcholSolve(cusolver_handle, m, data.b, data.x, info, buffer);
#endif
// END SOLVE
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaMemcpy(hx, data.x, m * sizeof(real_t), cudaMemcpyDeviceToHost);
// FREE
cudaFree(buffer);
cusolverSpDestroyCsrcholInfo(info);
}
// cudaStreamDestroy(stream);
cusparseDestroyMatDescr(descr);
cusparseDestroy(cusparse_handle);
cusolverSpDestroy(cusolver_handle);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return (double) time / 1000;
}
} // namespace gproshan | the_stack |
#include <vector>
// header file to use mshadow
#include "mshadow/tensor.h"
// helper function to load mnist dataset
#include "util.h"
// this namespace contains all data structures, functions
using namespace mshadow;
// this namespace contains all operator overloads
using namespace mshadow::expr;
// define operations
struct relu{
MSHADOW_XINLINE static real_t Map(real_t a) {
using namespace std;
return max(a, 0.0f);
}
};
struct relu_grad {
MSHADOW_XINLINE static real_t Map(real_t a) {
return a > 0.0f ? 1.0f : 0.0f;
}
};
/*! \brief interface for nnet, interfacd allows use to use GPU/CPU implementation in a unified way */
class INNet{
public:
virtual void Forward(const Tensor<cpu, 4, real_t>& inbatch, Tensor<cpu, 2, real_t> &oubatch) = 0;
virtual void Backprop(const Tensor<cpu, 2, real_t>& gradout) = 0;
virtual void Update(void) = 0;
virtual ~INNet() {}
};
/*!
* \brief simple two layer conv-net conv-pool-flat-fullc
* this implementation is device invariant
*/
template<typename xpu>
class ConvNet : public INNet {
public:
// initialize the network
ConvNet(int batch_size, int insize, int nchannel, int ksize, int kstride, int psize, int num_out)
:rnd(0), ksize(ksize), kstride(kstride), psize(psize) {
// setup stream
Stream<xpu> *stream = NewStream<xpu>();
ninput.set_stream(stream);
nhidden.set_stream(stream);
nhiddenbak.set_stream(stream);
npool.set_stream(stream);
npoolbak.set_stream(stream);
nflat.set_stream(stream);
nout.set_stream(stream);
hbias.set_stream(stream); g_hbias.set_stream(stream);
obias.set_stream(stream); g_obias.set_stream(stream);
Ki2h.set_stream(stream); g_Ki2h.set_stream(stream);
Wh2o.set_stream(stream); g_Wh2o.set_stream(stream);
tmp_col.set_stream(stream);
tmp_dst.set_stream(stream);
// setup nodes
ninput.Resize(Shape4(batch_size, 1, insize, insize));
nhidden.Resize(Shape4(batch_size, nchannel, (insize - ksize)/kstride+1, (insize -ksize)/kstride+1));
nhiddenbak.Resize(nhidden.shape_);
npool.Resize(Shape4(batch_size, nchannel, (nhidden.size(2)+1-psize)/psize, (nhidden.size(3)+1-psize)/psize));
npoolbak.Resize(npool.shape_);
nflat.Resize(Shape2(batch_size, npool.size(1)*npool.size(2)*npool.size(3)));
nout.Resize(Shape2(batch_size, num_out));
// setup bias
hbias.Resize(Shape1(nchannel)); g_hbias.Resize(hbias.shape_);
obias.Resize(Shape1(num_out)); g_obias.Resize(obias.shape_);
hbias = 0.0f; obias = 0.0f;
// setup weights
Ki2h.Resize(Shape2(nchannel, ksize*ksize)); g_Ki2h.Resize(Ki2h.shape_);
Wh2o.Resize(Shape2(nflat.size(1), num_out)); g_Wh2o.Resize(Wh2o.shape_);
rnd.SampleGaussian(&Ki2h, 0, 0.01f);
rnd.SampleGaussian(&Wh2o, 0, 0.01f);
printf("conv=%d, pool=%d\n", nhidden.size(3), npool.size(3));
}
virtual ~ConvNet() {}
// forward propagation
virtual void Forward(const Tensor<cpu, 4, real_t>& inbatch, Tensor<cpu, 2, real_t> &oubatch) {
index_t batch_size = inbatch.size(0);
// copy data to input layer
Copy(ninput, inbatch, ninput.stream_);
// first layer, conv, use stride=2
ConvForward(ninput, Ki2h, nhidden, ksize, kstride, tmp_col, tmp_dst);
// add bias
nhidden += broadcast<1>(hbias, nhidden.shape_);
// activation, relu, backup activation in nhidden
nhidden = F<relu>(nhidden);
Copy(nhiddenbak, nhidden, nhiddenbak.stream_);
// max pooling
npool = pool<red::maximum>(nhiddenbak, npool[0][0].shape_, psize, psize, psize);
Copy(npoolbak, npool, npoolbak.stream_);
// flat
nflat = reshape(npool, nflat.shape_);
// second layer fullc
nout = dot(nflat, Wh2o);
nout += repmat(obias, batch_size);
// softmax calculation
Softmax(nout, nout);
// copy result out
Copy(oubatch, nout, nout.stream_);
}
// back propagation
virtual void Backprop(const Tensor<cpu, 2, real_t>& gradout) {
// copy gradient to output layer
Copy(nout, gradout, nout.stream_);
// calc grad of final layer
g_obias = sum_rows(nout);
g_Wh2o = dot(nflat.T(), nout);
// backprop to previous layer
nflat = dot(nout, Wh2o.T());
npool = reshape(nflat, npool.shape_);
// backprop pooling layer
nhiddenbak = unpool<red::maximum>(nhiddenbak, npoolbak, npool, psize, psize, psize);
// calculate gradient of relu layer
nhidden = F<relu_grad>(nhidden) * nhiddenbak;
// calc grad of layer 1
g_hbias = sumall_except_dim<1>(nhidden);
ConvBackWard(nhidden, Ki2h, g_Ki2h, ninput, ksize, kstride, tmp_col, tmp_dst);
}
// update weight
virtual void Update(void) {
// run SGD
const float eta = 0.1;
const float wd = 0.00001;
// update weight
Ki2h -= eta * (wd * Ki2h + g_Ki2h);
Wh2o -= eta * (wd * Wh2o + g_Wh2o);
// no regularization for bias
hbias-= eta * g_hbias;
obias-= eta * g_obias;
}
private:
// forward convolution, tmp_col and tmp_dst are helper structure
inline static void ConvForward(const Tensor<xpu, 4, real_t> &in,
const Tensor<xpu, 2, real_t> &kernel,
Tensor<xpu, 4, real_t> &out,
int ksize, int kstride,
TensorContainer<xpu, 2, real_t> &tmp_col,
TensorContainer<xpu, 2, real_t> &tmp_dst) {
index_t oheight = (in.size(2) - ksize)/kstride + 1;
index_t owidth = (in.size(3) - ksize)/kstride + 1;
index_t nbatch = in.size(0);
index_t nchannel = out.size(1);
// we directly unpack all local patches and do a dot product
// this cost lots of memory, normally for large image, only unpack several image at a time
tmp_col.Resize(Shape2(in.size(1)*ksize*ksize, nbatch*oheight*owidth));
tmp_dst.Resize(Shape2(nchannel, nbatch*oheight*owidth));
// unpack local patches , stride=1
tmp_col = unpack_patch2col(in, ksize, ksize, kstride, kstride, 1, 1);
tmp_dst = dot(kernel, tmp_col);
// reshape, then swap axis, we chain equations together
out = swapaxis<1,0>(reshape(tmp_dst, Shape4(nchannel, nbatch, oheight, owidth)));
}
// backward convolution, calculate gradient of kernel, and backprop back to in
inline static void ConvBackWard(const Tensor<xpu, 4, real_t> &out,
const Tensor<xpu, 2, real_t> &kernel,
Tensor<xpu, 2, real_t> &g_kernel,
Tensor<xpu, 4, real_t> &in,
int ksize, int kstride,
TensorContainer<xpu, 2, real_t> &tmp_col,
TensorContainer<xpu, 2, real_t> &tmp_dst) {
index_t oheight = (in.size(2) - ksize)/kstride + 1;
index_t owidth = (in.size(3) - ksize)/kstride + 1;
index_t nbatch = in.size(0);
index_t nchannel = out.size(1);
// we directly unpack all local patches and do a dot product
// this cost lots of memory, normally for large image, only unpack several image at a time
tmp_col.Resize(Shape2(in.size(1) * ksize * ksize,
nbatch * oheight * owidth));
tmp_dst.Resize(Shape2(nchannel, nbatch * oheight * owidth));
// unpack local patches
tmp_col = unpack_patch2col(in, ksize, ksize, kstride, kstride, 1, 1);
tmp_dst = reshape(swapaxis<1,0>(out), tmp_dst.shape_);
g_kernel = dot(tmp_dst, tmp_col.T());
// backpropgation: not necessary for first layer, but included anyway
tmp_col = dot(kernel.T(), tmp_dst);
in = pack_col2patch(tmp_col, in.shape_, ksize, ksize, kstride, kstride, 1, 1);
}
private:
// random seed generator
Random<xpu, real_t> rnd;
// kernel size, pooling size
int ksize, kstride, psize;
// nodes in neural net
TensorContainer<xpu, 4, real_t> ninput, nhidden, nhiddenbak, npool, npoolbak;
TensorContainer<xpu, 2, real_t> nflat, nout;
// temp helper structure
TensorContainer<xpu, 2, real_t> tmp_col, tmp_dst;
// hidden bias, gradient
TensorContainer<xpu, 1, real_t> hbias, obias, g_hbias, g_obias;
// weight, gradient: Ki2h is actually convoltuion kernel, with shape=(num_channel,ksize*ksize)
TensorContainer<xpu, 2, real_t> Ki2h, Wh2o, g_Ki2h, g_Wh2o;
};
// helper function to get the max inde
inline int MaxIndex(Tensor<cpu, 1, real_t> pred) {
int maxidx = 0;
for (index_t i = 1; i < pred.size(0); ++i) {
if(pred[i] > pred[maxidx]) maxidx = (int)i;
}
return maxidx;
}
int main(int argc, char *argv[]) {
if(argc < 2) {
printf("Usage: cpu or gpu\n"); return 0;
}
srand(0);
// settings
int batch_size = 100;
int insize = 28;
int nchannel = 10;
int ksize = 5;
int kstride = 1;
int psize = 2;
int num_out = 10;
// choose which version to use
INNet *net;
if (!strcmp(argv[1], "gpu")) {
InitTensorEngine<gpu>();
net = new ConvNet<gpu>(batch_size, insize, nchannel, ksize, kstride, psize, num_out);
} else {
InitTensorEngine<cpu>();
net = new ConvNet<cpu>(batch_size, insize, nchannel, ksize, kstride, psize, num_out);
}
// temp output layer
TensorContainer<cpu, 2, real_t> pred;
pred.Resize(Shape2(batch_size, num_out));
// label
std::vector<int> ytrain, ytest;
// data
TensorContainer<cpu, 2, real_t> xtrain_, xtest_;
LoadMNIST("train-images-idx3-ubyte", "train-labels-idx1-ubyte", ytrain, xtrain_, true);
LoadMNIST("t10k-images-idx3-ubyte", "t10k-labels-idx1-ubyte", ytest, xtest_, false);
TensorContainer<cpu, 4, real_t> xtrain(Shape4(xtrain_.size(0), 1, insize, insize));
TensorContainer<cpu, 4, real_t> xtest(Shape4(xtest_.size(0), 1, insize, insize));
xtrain = reshape(xtrain_, xtrain.shape_);
xtest = reshape(xtest_, xtest.shape_);
int num_iter = 20;
for (int i = 0; i < num_iter; ++ i) {
// training
for (index_t j = 0; j + batch_size <= xtrain.size(0); j += batch_size) {
net->Forward(xtrain.Slice(j, j + batch_size), pred);
// set gradient into pred
for (int k = 0; k < batch_size; ++ k) {
pred[k][ ytrain[k+j] ] -= 1.0f;
}
// scale gradient by batchs zie
pred *= 1.0f / batch_size;
// run backprop
net->Backprop(pred);
// update net parameters
net->Update();
}
// evaluation
long nerr = 0;
for (index_t j = 0; j + batch_size <= xtest.size(0); j += batch_size) {
net->Forward(xtest.Slice(j, j + batch_size), pred);
for (int k = 0; k < batch_size; ++ k) {
nerr += MaxIndex(pred[k]) != ytest[j+k];
}
}
printf("round %d: test-err=%f\n", i, (float)nerr/xtest.size(0));
}
delete net;
if (!strcmp(argv[1], "gpu")) {
ShutdownTensorEngine<gpu>();
} else {
ShutdownTensorEngine<cpu>();
}
return 0;
} | the_stack |
#include <stdexcept>
#if __CUDA_ARCH__ >= 530
#define CUDA_SUPPORTS_FP16
#endif
//TODO maybe tune this number, it varies by GPU
static const int targetNumThreads = 512;
void splitThreadsAcrossDim01(int dim0Size, int dim1Size, int& threads0, int& blocks0, int& threads1, int& blocks1) {
if(dim0Size > targetNumThreads) {
threads0 = targetNumThreads/2;
blocks0 = (dim0Size + threads0 - 1) / threads0;
threads1 = 1;
blocks1 = dim1Size;
}
else if(dim0Size > targetNumThreads/2) {
threads0 = dim0Size;
blocks0 = 1;
threads1 = 1;
blocks1 = dim1Size;
}
else {
threads0 = dim0Size;
blocks0 = 1;
threads1 = targetNumThreads / dim0Size;
blocks1 = (dim1Size + threads1 - 1) / threads1;
}
}
//--------------------------------------------------------------------------------------------------------------
template <typename T>
__global__
void channelConcatKernel(
const T* inA,
const T* inB,
T* out,
int chwA,
int chwB,
int numBlocksA,
int numBlocksB,
int n
) {
if(blockIdx.x < numBlocksA) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < chwA) {
int nchwA = n*chwA;
int chwOut = (chwA+chwB);
int aIdx = index;
int outIdx = index;
while(aIdx < nchwA) {
out[outIdx] = inA[aIdx];
aIdx += chwA;
outIdx += chwOut;
}
}
}
else {
int index = (blockIdx.x - numBlocksA) * blockDim.x + threadIdx.x;
if(index < chwB) {
int nchwB = n*chwB;
int chwOut = (chwA+chwB);
int bIdx = index;
int outIdx = chwA+index;
while(bIdx < nchwB) {
out[outIdx] = inB[bIdx];
bIdx += chwB;
outIdx += chwOut;
}
}
}
}
template <typename T>
void customCudaChannelConcatTemplate(const T* inA, const T* inB, T* out, int chwA, int chwB, int n) {
int blockSize = targetNumThreads;
int numBlocksA = (chwA + blockSize-1) / blockSize;
int numBlocksB = (chwB + blockSize-1) / blockSize;
int numBlocks = numBlocksA + numBlocksB;
channelConcatKernel<<<numBlocks, blockSize>>>(inA,inB,out,chwA,chwB,numBlocksA,numBlocksB,n);
}
template void customCudaChannelConcatTemplate<float>(const float* inA, const float* inB, float* out, int chwA, int chwB, int n);
template void customCudaChannelConcatTemplate<half>(const half* inA, const half* inB, half* out, int chwA, int chwB, int n);
void customCudaChannelConcat(const float* inA, const float* inB, float* out, int chwA, int chwB, int n) {
customCudaChannelConcatTemplate<float>(inA,inB,out,chwA,chwB,n);
}
void customCudaChannelConcat(const half* inA, const half* inB, half* out, int chwA, int chwB, int n) {
customCudaChannelConcatTemplate<half>(inA,inB,out,chwA,chwB,n);
}
//--------------------------------------------------------------------------------------------------------------
template <typename T>
__global__
void extractChannel0KernelNHWC(const T *in, T* out, int nhwSize, int cSize)
{
int nhwIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(nhwIdx < nhwSize) {
out[nhwIdx] = in[nhwIdx*cSize];
}
}
template <typename T>
void customCudaChannel0ExtractNHWCTemplate(const T *in, T* out, int n, int hw, int c) {
int nhw = n*hw;
int blockSize = targetNumThreads;
int numBlocks = (nhw+blockSize-1)/blockSize;
extractChannel0KernelNHWC<<<numBlocks,blockSize>>>(in,out,nhw,c);
}
template <typename T>
__global__
void extractChannel0KernelNCHW(const T *in, T* out, int nSize, int cSize, int hwSize)
{
int hwIdx = blockIdx.x * blockDim.x + threadIdx.x;
int nIdx = blockIdx.y * blockDim.y + threadIdx.y;
if(hwIdx < hwSize && nIdx < nSize) {
out[nIdx * hwSize + hwIdx] = in[nIdx * cSize * hwSize + hwIdx];
}
}
template <typename T>
void customCudaChannel0ExtractNCHWTemplate(const T *in, T* out, int nSize, int cSize, int hwSize) {
int hwThreads;
int hwBlocks;
int nThreads;
int nBlocks;
splitThreadsAcrossDim01(hwSize, nSize, hwThreads, hwBlocks, nThreads, nBlocks);
if(nBlocks > 65536)
throw std::runtime_error("customCudaChannel0ExtractNCHW: nSize too large given hwSize");
dim3 grid(hwBlocks,nBlocks,1);
dim3 threads(hwThreads,nThreads,1);
extractChannel0KernelNCHW<<<grid,threads>>>(in,out,nSize,cSize,hwSize);
}
void customCudaChannel0ExtractNCHW(const float* in, float* out, int n, int c, int hw) {
customCudaChannel0ExtractNCHWTemplate<float>(in,out,n,c,hw);
}
void customCudaChannel0ExtractNCHW(const half* in, half* out, int n, int c, int hw) {
customCudaChannel0ExtractNCHWTemplate<half>(in,out,n,c,hw);
}
void customCudaChannel0ExtractNHWC(const float* in, float* out, int n, int hw, int c) {
customCudaChannel0ExtractNHWCTemplate<float>(in,out,n,hw,c);
}
void customCudaChannel0ExtractNHWC(const half* in, half* out, int n, int hw, int c) {
customCudaChannel0ExtractNHWCTemplate<half>(in,out,n,hw,c);
}
//--------------------------------------------------------------------------------------------------------------
__global__
void sumChannelsNCHWKernel(const float* in, float* out, int cSize, int xySize, float scaleSum)
{
extern __shared__ float sumPoolNCHWShared[];
int xyId = threadIdx.x;
int xyBlockDim = blockDim.x;
int cId = threadIdx.y;
int cBlockDim = blockDim.y;
int cIdx = blockIdx.y * cBlockDim + cId;
int nIdx = blockIdx.z;
int xycSize = xySize*cSize;
int sharedIdx = xyId + cId * xyBlockDim;
float acc = 0.0f;
if(cIdx < cSize) {
int xyIdx = xyId;
while(xyIdx < xySize) {
acc += in[xyIdx + cIdx * xySize + nIdx * xycSize];
xyIdx += xyBlockDim;
}
sumPoolNCHWShared[sharedIdx] = acc;
}
__syncthreads();
for(int s = xyBlockDim>>1; s > 0; s >>= 1) {
if(xyId < s) {
sumPoolNCHWShared[sharedIdx] += sumPoolNCHWShared[sharedIdx + s];
}
__syncthreads();
}
if(xyId == 0 && cIdx < cSize)
out[cIdx + nIdx * cSize] = sumPoolNCHWShared[sharedIdx] * scaleSum;
}
__global__
void valueHeadPoolChannelsNCHWKernel(const float* in, float* out, int nSize, int cSize, int xySize, const float* maskSum)
{
extern __shared__ float sumPoolNCHWShared[];
int xyId = threadIdx.x;
int xyBlockDim = blockDim.x;
int cId = threadIdx.y;
int cBlockDim = blockDim.y;
int cIdx = blockIdx.y * cBlockDim + cId;
int nIdx = blockIdx.z;
int xycSize = xySize*cSize;
int sharedIdx = xyId + cId * xyBlockDim;
float acc = 0.0f;
if(cIdx < cSize) {
int xyIdx = xyId;
while(xyIdx < xySize) {
acc += in[xyIdx + cIdx * xySize + nIdx * xycSize];
xyIdx += xyBlockDim;
}
sumPoolNCHWShared[sharedIdx] = acc;
}
__syncthreads();
for(int s = xyBlockDim>>1; s > 0; s >>= 1) {
if(xyId < s) {
sumPoolNCHWShared[sharedIdx] += sumPoolNCHWShared[sharedIdx + s];
}
__syncthreads();
}
if(xyId == 0 && cIdx < cSize) {
float sum = sumPoolNCHWShared[sharedIdx];
float div = maskSum[nIdx];
float sqrtdiv = sqrt(div);
float mean = sum/div;
out[cIdx + nIdx * cSize*3] = mean;
out[cIdx + nIdx * cSize*3 + cSize] = mean * (sqrtdiv - 14.0f) * 0.1f;
out[cIdx + nIdx * cSize*3 + cSize*2] = mean * ((sqrtdiv - 14.0f) * (sqrtdiv - 14.0f) * 0.01f - 0.1f);
}
}
__global__
void gPoolChannelsNCHWKernel(const float* in, float* out, int cSize, int xySize, const float* maskSum, int sharedMemElts)
{
extern __shared__ float poolNCHWShared[];
float* sumShared = (float*)poolNCHWShared;
float* maxShared = (float*)poolNCHWShared + sharedMemElts;
int xyId = threadIdx.x;
int xyBlockDim = blockDim.x;
int cId = threadIdx.y;
int cBlockDim = blockDim.y;
int cIdx = blockIdx.y * cBlockDim + cId;
int nIdx = blockIdx.z;
int xycSize = xySize*cSize;
int sharedIdx = xyId + cId * xyBlockDim;
if(cIdx < cSize) {
float accSum = 0.0f;
float accMax = 0.0f;
int xyIdx = xyId;
while(xyIdx < xySize) {
float a = in[xyIdx + cIdx * xySize + nIdx * xycSize];
accSum += a;
accMax = fmaxf(accMax, a);
xyIdx += xyBlockDim;
}
sumShared[sharedIdx] = accSum;
maxShared[sharedIdx] = accMax;
}
__syncthreads();
for(int s = xyBlockDim>>1; s > 0; s >>= 1) {
if(xyId < s) {
sumShared[sharedIdx] += sumShared[sharedIdx + s];
maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx], maxShared[sharedIdx + s]);
}
__syncthreads();
}
if(xyId == 0 && cIdx < cSize) {
float sum = sumShared[sharedIdx];
float div = maskSum[nIdx];
float sqrtdiv = sqrt(div);
float mean = sum/div;
out[cIdx + nIdx * (cSize*3)] = mean;
out[cIdx + nIdx * (cSize*3) + cSize] = mean * (sqrtdiv - 14.0f) * 0.1f;
out[cIdx + nIdx * (cSize*3) + cSize*2] = maxShared[sharedIdx];
}
}
void customCudaPoolRowsSumNCHW(const float* in, float* out, int nSize, int cSize, int xySize, float scaleSum) {
if(nSize > 65536)
throw std::runtime_error("customCudaPoolRowsSumNCHW: nSize too large");
if(cSize > 65536)
throw std::runtime_error("customCudaPoolRowsSumNCHW: cSize too large");
//Use up as many threads as possible along the xy dimension.
int xyThreads = 1;
while(xyThreads < targetNumThreads && xyThreads < xySize/2)
xyThreads *= 2;
//Distribute the extra threads along the c dimension.
int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads);
int cBlocks = (cSize + cThreads - 1) / cThreads;
//We need one shared memory spot per thread
int sharedMemSize = sizeof(float) * cThreads * xyThreads;
dim3 grid(1,cBlocks,nSize);
dim3 threads(xyThreads,cThreads,1);
sumChannelsNCHWKernel<<<grid,threads,sharedMemSize>>>(in,out,cSize,xySize,scaleSum);
}
void customCudaValueHeadPoolNCHW(const float* in, float* out, int nSize, int cSize, int xySize, const float* maskSum) {
if(nSize > 65536)
throw std::runtime_error("customCudaValueHeadPoolNCHW: nSize too large");
if(cSize > 65536)
throw std::runtime_error("customCudaValueHeadPoolNCHW: cSize too large");
//Use up as many threads as possible along the xy dimension.
int xyThreads = 1;
while(xyThreads < targetNumThreads && xyThreads < xySize/2)
xyThreads *= 2;
//Distribute the extra threads along the c dimension.
int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads);
int cBlocks = (cSize + cThreads - 1) / cThreads;
//We need one shared memory spot per thread
int sharedMemSize = sizeof(float) * cThreads * xyThreads;
dim3 grid(1,cBlocks,nSize);
dim3 threads(xyThreads,cThreads,1);
valueHeadPoolChannelsNCHWKernel<<<grid,threads,sharedMemSize>>>(in,out,nSize,cSize,xySize,maskSum);
}
void customCudaPoolRowsGPoolNCHW(const float* in, float* out, int nSize, int cSize, int xySize, const float* maskSum) {
if(nSize > 65536)
throw std::runtime_error("customCudaPoolRowsGPoolNCHW: nSize too large");
if(cSize > 65536)
throw std::runtime_error("customCudaPoolRowsGPoolNCHW: cSize too large");
//Use up as many threads as possible along the xy dimension.
int xyThreads = 1;
while(xyThreads < targetNumThreads && xyThreads < xySize/2)
xyThreads *= 2;
//Distribute the extra threads along the c dimension.
int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads);
int cBlocks = (cSize + cThreads - 1) / cThreads;
//We need one shared memory spot per thread, and then we double it because we need both sum and max.
//We also make sure it's a power of two to address any alignment concerns.
int sharedMemElts = 128;
while(sharedMemElts < cThreads * xyThreads)
sharedMemElts *= 2;
int sharedMemSize = sizeof(float) * sharedMemElts * 2;
dim3 grid(1,cBlocks,nSize);
dim3 threads(xyThreads,cThreads,1);
gPoolChannelsNCHWKernel<<<grid,threads,sharedMemSize>>>(in,out,cSize,xySize,maskSum,sharedMemElts);
}
//--------------------------------------------------------------------------------------------------------------
#ifdef CUDA_SUPPORTS_FP16
__global__
void gPoolChannelsNCHWHalfKernel(const half* in, half* out, int cSize, int xySize, const float* maskSum, int sharedMemElts)
{
extern __shared__ float poolNCHWShared[];
float* sumShared = (float*)poolNCHWShared;
float* maxShared = (float*)poolNCHWShared + sharedMemElts;
int xyId = threadIdx.x;
int xyBlockDim = blockDim.x;
int cId = threadIdx.y;
int cBlockDim = blockDim.y;
int cIdx = blockIdx.y * cBlockDim + cId;
int nIdx = blockIdx.z;
int xycSize = xySize*cSize;
int sharedIdx = xyId + cId * xyBlockDim;
if(cIdx < cSize) {
float accSum = 0.0f;
float accMax = 0.0f;
int xyIdx = xyId;
while(xyIdx < xySize) {
float a = __half2float(in[xyIdx + cIdx * xySize + nIdx * xycSize]);
accSum += a;
accMax = fmaxf(accMax, a);
xyIdx += xyBlockDim;
}
sumShared[sharedIdx] = accSum;
maxShared[sharedIdx] = accMax;
}
__syncthreads();
for(int s = xyBlockDim>>1; s > 0; s >>= 1) {
if(xyId < s) {
sumShared[sharedIdx] += sumShared[sharedIdx + s];
maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx], maxShared[sharedIdx + s]);
}
__syncthreads();
}
if(xyId == 0 && cIdx < cSize) {
float sum = sumShared[sharedIdx];
float div = maskSum[nIdx];
float sqrtdiv = sqrt(div);
float mean = sum/div;
out[cIdx + nIdx * (cSize*3)] = __float2half(mean);
out[cIdx + nIdx * (cSize*3) + cSize] = __float2half(mean * (sqrtdiv - 14.0f) * 0.1f);
out[cIdx + nIdx * (cSize*3) + cSize*2] = __float2half(maxShared[sharedIdx]);
}
}
#else
__global__
void gPoolChannelsNCHWHalfKernel(const half* in, half* out, int cSize, int xySize, const float* maskSum, int sharedMemElts)
{
//Do nothing, FP16 not supported
}
#endif
void customCudaPoolRowsGPoolNCHW(const half* in, half* out, int nSize, int cSize, int xySize, const float* maskSum) {
if(nSize > 65536)
throw std::runtime_error("customCudaPoolRowsGPoolNCHW: nSize too large");
if(cSize > 65536)
throw std::runtime_error("customCudaPoolRowsGPoolNCHW: cSize too large");
//Use up as many threads as possible along the xy dimension.
int xyThreads = 1;
while(xyThreads < targetNumThreads && xyThreads < xySize/2)
xyThreads *= 2;
//Distribute the extra threads along the c dimension.
int cThreads = (targetNumThreads < xyThreads) ? 1 : (targetNumThreads / xyThreads);
int cBlocks = (cSize + cThreads - 1) / cThreads;
//We need one shared memory spot per thread, and then we double it because we need both sum and max.
//We also make sure it's a power of two to address any alignment concerns.
int sharedMemElts = 128;
while(sharedMemElts < cThreads * xyThreads)
sharedMemElts *= 2;
int sharedMemSize = sizeof(float) * sharedMemElts * 2;
dim3 grid(1,cBlocks,nSize);
dim3 threads(xyThreads,cThreads,1);
gPoolChannelsNCHWHalfKernel<<<grid,threads,sharedMemSize>>>(in,out,cSize,xySize,maskSum,sharedMemElts);
}
//--------------------------------------------------------------------------------------------------------------
__global__
void sumChannelsNHWCKernel(const float* in, float* out, int xySize, int cSize, float scaleSum)
{
extern __shared__ float sumPoolNHWCShared[];
int cId = threadIdx.x;
int cBlockDim = blockDim.x;
int xyId = threadIdx.y;
int xyBlockDim = blockDim.y;
int cIdx = blockIdx.x * cBlockDim + cId;
int nIdx = blockIdx.z;
int sharedIdx = cId + cBlockDim * xyId;
int xycSize = xySize*cSize;
sumPoolNHWCShared[sharedIdx] = 0;
if(cIdx < cSize) {
int xyIdx = xyId;
while(xyIdx < xySize) {
sumPoolNHWCShared[sharedIdx] += in[cIdx + xyIdx * cSize + nIdx * xycSize];
xyIdx += xyBlockDim;
}
}
__syncthreads();
for(int s = xyBlockDim>>1; s > 0; s >>= 1) {
if(xyId < s) {
sumPoolNHWCShared[sharedIdx] += sumPoolNHWCShared[sharedIdx + cBlockDim * s];
}
__syncthreads();
}
if(xyId == 0 && cIdx < cSize)
out[cIdx + nIdx * cSize] = sumPoolNHWCShared[sharedIdx] * scaleSum;
}
__global__
void valueHeadPoolChannelsNHWCKernel(const float* in, float* out, int nSize, int xySize, int cSize, const float* maskSum)
{
extern __shared__ float sumPoolNHWCShared[];
int cId = threadIdx.x;
int cBlockDim = blockDim.x;
int xyId = threadIdx.y;
int xyBlockDim = blockDim.y;
int cIdx = blockIdx.x * cBlockDim + cId;
int nIdx = blockIdx.z;
int sharedIdx = cId + cBlockDim * xyId;
int xycSize = xySize*cSize;
sumPoolNHWCShared[sharedIdx] = 0;
if(cIdx < cSize) {
int xyIdx = xyId;
while(xyIdx < xySize) {
sumPoolNHWCShared[sharedIdx] += in[cIdx + xyIdx * cSize + nIdx * xycSize];
xyIdx += xyBlockDim;
}
}
__syncthreads();
for(int s = xyBlockDim>>1; s > 0; s >>= 1) {
if(xyId < s) {
sumPoolNHWCShared[sharedIdx] += sumPoolNHWCShared[sharedIdx + cBlockDim * s];
}
__syncthreads();
}
if(xyId == 0 && cIdx < cSize) {
float sum = sumPoolNHWCShared[sharedIdx];
float div = maskSum[nIdx];
float sqrtdiv = sqrt(div);
float mean = sum/div;
out[cIdx + nIdx * cSize*3] = mean;
out[cIdx + nIdx * cSize*3 + cSize] = mean * (sqrtdiv - 14.0f) * 0.1f;
out[cIdx + nIdx * cSize*3 + cSize*2] = mean * ((sqrtdiv - 14.0f) * (sqrtdiv - 14.0f) * 0.01f - 0.1f);
}
}
__global__
void gPoolChannelsNHWCKernel(const float* in, float* out, int xySize, int cSize, const float* maskSum, int sharedMemElts)
{
extern __shared__ float poolNHWCShared[];
float* sumShared = (float*)poolNHWCShared;
float* maxShared = (float*)poolNHWCShared + sharedMemElts;
int cId = threadIdx.x;
int cBlockDim = blockDim.x;
int xyId = threadIdx.y;
int xyBlockDim = blockDim.y;
int cIdx = blockIdx.x * cBlockDim + cId;
int nIdx = blockIdx.z;
int sharedIdx = cId + cBlockDim * xyId;
int xycSize = xySize*cSize;
sumShared[sharedIdx] = 0;
maxShared[sharedIdx] = 0;
if(cIdx < cSize) {
int xyIdx = xyId;
while(xyIdx < xySize) {
float a = in[cIdx + xyIdx * cSize + nIdx * xycSize];
sumShared[sharedIdx] += a;
maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],a);
xyIdx += xyBlockDim;
}
}
__syncthreads();
for(int s = xyBlockDim>>1; s > 0; s >>= 1) {
if(xyId < s) {
sumShared[sharedIdx] += sumShared[sharedIdx + cBlockDim * s];
maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],maxShared[sharedIdx + cBlockDim * s]);
}
__syncthreads();
}
if(xyId == 0 && cIdx < cSize) {
float sum = sumShared[sharedIdx];
float div = maskSum[nIdx];
float sqrtdiv = sqrt(div);
float mean = sum/div;
out[cIdx + nIdx * (cSize*3)] = mean;
out[cIdx + nIdx * (cSize*3) + cSize] = mean * (sqrtdiv - 14.0f) * 0.1f;
out[cIdx + nIdx * (cSize*3) + cSize*2] = maxShared[sharedIdx];
}
}
void customCudaPoolRowsSumNHWC(const float* in, float* out, int nSize, int xySize, int cSize, float scaleSum) {
if(nSize > 65536)
throw std::runtime_error("customCudaPoolRowsSumNHWC: nSize too large");
if(cSize > 65536)
throw std::runtime_error("customCudaPoolRowsSumNHWC: cSize too large");
//Use up to two warps worth of threads along the channel dimension, which is the
//most compact
int cThreads = 1;
while(cThreads < 64 && cThreads < cSize/2)
cThreads *= 2;
int cBlocks = (cSize + cThreads - 1) / cThreads;
//Distribute the extra threads to perform parallel reduction along the xy dimension.
int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads);
//We need one shared memory spot per thread
int sharedMemSize = sizeof(float) * cThreads * xyThreads;
dim3 grid(cBlocks,1,nSize);
dim3 threads(cThreads,xyThreads,1);
sumChannelsNHWCKernel<<<grid,threads,sharedMemSize>>>(in,out,xySize,cSize,scaleSum);
}
void customCudaValueHeadPoolNHWC(const float* in, float* out, int nSize, int xySize, int cSize, const float* maskSum) {
if(nSize > 65536)
throw std::runtime_error("customCudaValueHeadPoolNHWC: nSize too large");
if(cSize > 65536)
throw std::runtime_error("customCudaValueHeadPoolNHWC: cSize too large");
//Use up to two warps worth of threads along the channel dimension, which is the
//most compact
int cThreads = 1;
while(cThreads < 64 && cThreads < cSize/2)
cThreads *= 2;
int cBlocks = (cSize + cThreads - 1) / cThreads;
//Distribute the extra threads to perform parallel reduction along the xy dimension.
int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads);
//We need one shared memory spot per thread
int sharedMemSize = sizeof(float) * cThreads * xyThreads;
dim3 grid(cBlocks,1,nSize);
dim3 threads(cThreads,xyThreads,1);
valueHeadPoolChannelsNHWCKernel<<<grid,threads,sharedMemSize>>>(in,out,nSize,xySize,cSize,maskSum);
}
void customCudaPoolRowsGPoolNHWC(const float* in, float* out, int nSize, int xySize, int cSize, const float* maskSum) {
if(nSize > 65536)
throw std::runtime_error("customCudaPoolRowsGPoolNHWC: nSize too large");
if(cSize > 65536)
throw std::runtime_error("customCudaPoolRowsGPoolNHWC: cSize too large");
//Use up to two warps worth of threads along the channel dimension, which is the
//most compact
int cThreads = 1;
while(cThreads < 64 && cThreads < cSize/2)
cThreads *= 2;
int cBlocks = (cSize + cThreads - 1) / cThreads;
//Distribute the extra threads to perform parallel reduction along the xy dimension.
int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads);
//We need one shared memory spot per thread, and then we double it because we need both sum and max.
//We also make sure it's a power of two to address any alignment concerns.
int sharedMemElts = 128;
while(sharedMemElts < cThreads * xyThreads)
sharedMemElts *= 2;
int sharedMemSize = sizeof(float) * sharedMemElts * 2;
dim3 grid(cBlocks,1,nSize);
dim3 threads(cThreads,xyThreads,1);
gPoolChannelsNHWCKernel<<<grid,threads,sharedMemSize>>>(in,out,xySize,cSize,maskSum,sharedMemElts);
}
//--------------------------------------------------------------------------------------------------------------
#ifdef CUDA_SUPPORTS_FP16
__global__
void gPoolChannelsNHWCHalfKernel(const half* in, half* out, int xySize, int cSize, const float* maskSum, int sharedMemElts)
{
extern __shared__ float poolNHWCShared[];
float* sumShared = (float*)poolNHWCShared;
float* maxShared = (float*)poolNHWCShared + sharedMemElts;
int cId = threadIdx.x;
int cBlockDim = blockDim.x;
int xyId = threadIdx.y;
int xyBlockDim = blockDim.y;
int cIdx = blockIdx.x * cBlockDim + cId;
int nIdx = blockIdx.z;
int sharedIdx = cId + cBlockDim * xyId;
int xycSize = xySize*cSize;
sumShared[sharedIdx] = 0;
maxShared[sharedIdx] = 0;
if(cIdx < cSize) {
int xyIdx = xyId;
while(xyIdx < xySize) {
float a = __half2float(in[cIdx + xyIdx * cSize + nIdx * xycSize]);
sumShared[sharedIdx] += a;
maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],a);
xyIdx += xyBlockDim;
}
}
__syncthreads();
for(int s = xyBlockDim>>1; s > 0; s >>= 1) {
if(xyId < s) {
sumShared[sharedIdx] += sumShared[sharedIdx + cBlockDim * s];
maxShared[sharedIdx] = fmaxf(maxShared[sharedIdx],maxShared[sharedIdx + cBlockDim * s]);
}
__syncthreads();
}
if(xyId == 0 && cIdx < cSize) {
float sum = sumShared[sharedIdx];
float div = maskSum[nIdx];
float sqrtdiv = sqrt(div);
float mean = sum/div;
out[cIdx + nIdx * (cSize*3)] = __float2half(mean);
out[cIdx + nIdx * (cSize*3) + cSize] = __float2half(mean * (sqrtdiv - 14.0f) * 0.1f);
out[cIdx + nIdx * (cSize*3) + cSize*2] = __float2half(maxShared[sharedIdx]);
}
}
#else
__global__
void gPoolChannelsNHWCHalfKernel(const half* in, half* out, int xySize, int cSize, const float* maskSum, int sharedMemElts)
{
//Do nothing, FP16 not supported
}
#endif
void customCudaPoolRowsGPoolNHWC(const half* in, half* out, int nSize, int xySize, int cSize, const float* maskSum) {
if(nSize > 65536)
throw std::runtime_error("customCudaPoolRowsGPoolNHWC: nSize too large");
if(cSize > 65536)
throw std::runtime_error("customCudaPoolRowsGPoolNHWC: cSize too large");
//Use up to two warps worth of threads along the channel dimension, which is the
//most compact
int cThreads = 1;
while(cThreads < 64 && cThreads < cSize/2)
cThreads *= 2;
int cBlocks = (cSize + cThreads - 1) / cThreads;
//Distribute the extra threads to perform parallel reduction along the xy dimension.
int xyThreads = (targetNumThreads < cThreads) ? 1 : (targetNumThreads / cThreads);
//We need one shared memory spot per thread, and then we double it because we need both sum and max.
//We also make sure it's a power of two to address any alignment concerns.
int sharedMemElts = 128;
while(sharedMemElts < cThreads * xyThreads)
sharedMemElts *= 2;
int sharedMemSize = sizeof(float) * sharedMemElts * 2;
dim3 grid(cBlocks,1,nSize);
dim3 threads(cThreads,xyThreads,1);
gPoolChannelsNHWCHalfKernel<<<grid,threads,sharedMemSize>>>(in,out,xySize,cSize,maskSum,sharedMemElts);
}
//--------------------------------------------------------------------------------------------------------------
__global__
void copyToHalfKernel(const float *in, half* out, int n)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < n) {
out[idx] = __float2half(in[idx]);
}
}
__global__
void copyFromHalfKernel(const half *in, float* out, int n)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < n) {
out[idx] = __half2float(in[idx]);
}
}
void customCudaCopyToHalf(const float* in, half* out, int n) {
int blockSize = targetNumThreads;
int numBlocks = (n+blockSize-1)/blockSize;
copyToHalfKernel<<<numBlocks, blockSize>>>(in,out,n);
}
void customCudaCopyFromHalf(const half* in, float* out, int n) {
int blockSize = targetNumThreads;
int numBlocks = (n+blockSize-1)/blockSize;
copyFromHalfKernel<<<numBlocks, blockSize>>>(in,out,n);
}
//--------------------------------------------------------------------------------------------------------------
#ifdef CUDA_SUPPORTS_FP16
__global__
void addTensorInplaceHalfKernel(half *buf, const half* biases, int nSize)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < nSize) {
buf[idx] = __hadd(buf[idx],biases[idx]);
}
}
#else
__global__
void addTensorInplaceHalfKernel(half *buf, const half* biases, int nSize)
{
//Do nothing, FP16 not supported
}
#endif
void customCudaAddTensorInplace(half* buf, const half* biases, int nSize) {
int blockSize = targetNumThreads;
int numBlocks = (nSize+blockSize-1)/blockSize;
addTensorInplaceHalfKernel<<<numBlocks, blockSize>>>(buf,biases,nSize);
}
//--------------------------------------------------------------------------------------------------------------
__global__
void addCBiasInplaceNCKernel(float *buf, const float* biases, int nSize, int cSize)
{
int cIdx = blockIdx.x * blockDim.x + threadIdx.x;
int nIdx = blockIdx.y * blockDim.y + threadIdx.y;
if(cIdx < cSize && nIdx < nSize) {
int idx = nIdx * cSize + cIdx;
buf[idx] = buf[idx] + biases[cIdx];
}
}
#ifdef CUDA_SUPPORTS_FP16
__global__
void addCBiasInplaceNCHalfKernel(half *buf, const half* biases, int nSize, int cSize)
{
int cIdx = blockIdx.x * blockDim.x + threadIdx.x;
int nIdx = blockIdx.y * blockDim.y + threadIdx.y;
if(cIdx < cSize && nIdx < nSize) {
int idx = nIdx * cSize + cIdx;
buf[idx] = __hadd(buf[idx],biases[cIdx]);
}
}
#else
__global__
void addCBiasInplaceNCHalfKernel(half *buf, const half* biases, int nSize, int cSize)
{
//Do nothing, FP16 not supported
}
#endif
void sharedAddCBiasInplaceNC(void* buf, const void* biases, int nSize, int cSize, bool isHalf) {
int cThreads;
int cBlocks;
int nThreads;
int nBlocks;
splitThreadsAcrossDim01(cSize, nSize, cThreads, cBlocks, nThreads, nBlocks);
if(nBlocks > 65536)
throw std::runtime_error("customCudaAddCBiasInplaceNC: nSize too large given cSize");
dim3 grid(cBlocks,nBlocks,1);
dim3 threads(cThreads,nThreads,1);
if(isHalf)
addCBiasInplaceNCHalfKernel<<<grid,threads>>>((half*)buf,(const half*)biases,nSize,cSize);
else
addCBiasInplaceNCKernel<<<grid,threads>>>((float*)buf,(const float*)biases,nSize,cSize);
}
void customCudaAddCBiasInplaceNC(float* buf, const float* biases, int nSize, int cSize) {
sharedAddCBiasInplaceNC(buf,biases,nSize,cSize,false);
}
void customCudaAddCBiasInplaceNC(half* buf, const half* biases, int nSize, int cSize) {
sharedAddCBiasInplaceNC(buf,biases,nSize,cSize,true);
}
//--------------------------------------------------------------------------------------------------------------
__global__
void addNCBiasInplaceNCHWKernel(float *buf, const float* biases, int cSize, int sSize)
{
int sIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int ncIdx = nIdx * cSize + cIdx;
int idx = ncIdx * sSize + sIdx;
buf[idx] = buf[idx] + biases[ncIdx];
}
}
#ifdef CUDA_SUPPORTS_FP16
__global__
void addNCBiasInplaceNCHWHalfKernel(half *buf, const half* biases, int cSize, int sSize)
{
int sIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int ncIdx = nIdx * cSize + cIdx;
int idx = ncIdx * sSize + sIdx;
buf[idx] = __hadd(buf[idx],biases[ncIdx]);
}
}
#else
__global__
void addNCBiasInplaceNCHWHalfKernel(half *buf, const half* biases, int cSize, int sSize) {
//Do nothing, FP16 not supported
}
#endif
void sharedAddNCBiasInplaceNCHW(void *buf, const void* biases, int nSize, int cSize, int xySize, bool isHalf) {
if(nSize > 65536)
throw std::runtime_error("customCudaAddNCBiasInplaceNCHW: nSize too large");
if(cSize > 65536)
throw std::runtime_error("customCudaAddNCBiasInplaceNCHW: cSize too large");
int sSize = xySize;
int sThreads;
int sBlocks;
int cThreads;
int cBlocks;
splitThreadsAcrossDim01(sSize, cSize, sThreads, sBlocks, cThreads, cBlocks);
dim3 grid(sBlocks,cBlocks,nSize);
dim3 threads(sThreads,cThreads,1);
if(isHalf)
addNCBiasInplaceNCHWHalfKernel<<<grid,threads>>>((half*)buf,(const half*)biases,cSize,sSize);
else
addNCBiasInplaceNCHWKernel<<<grid,threads>>>((float*)buf,(const float*)biases,cSize,sSize);
}
void customCudaAddNCBiasInplaceNCHW(float *buf, const float* biases, int nSize, int cSize, int xySize) {
sharedAddNCBiasInplaceNCHW(buf,biases,nSize,cSize,xySize,false);
}
void customCudaAddNCBiasInplaceNCHW(half *buf, const half* biases, int nSize, int cSize, int xySize) {
sharedAddNCBiasInplaceNCHW(buf,biases,nSize,cSize,xySize,true);
}
//--------------------------------------------------------------------------------------------------------------
__global__
void addNCBiasInplaceNHWCKernel(float *buf, const float* biases, int sSize, int cSize)
{
int cIdx = blockIdx.x * blockDim.x + threadIdx.x;
int sIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int ncIdx = nIdx * cSize + cIdx;
int idx = (nIdx * sSize + sIdx) * cSize + cIdx;
buf[idx] = buf[idx] + biases[ncIdx];
}
}
#ifdef CUDA_SUPPORTS_FP16
__global__
void addNCBiasInplaceNHWCHalfKernel(half *buf, const half* biases, int sSize, int cSize)
{
int cIdx = blockIdx.x * blockDim.x + threadIdx.x;
int sIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int ncIdx = nIdx * cSize + cIdx;
int idx = (nIdx * sSize + sIdx) * cSize + cIdx;
buf[idx] = __hadd(buf[idx],biases[ncIdx]);
}
}
#else
__global__
void addNCBiasInplaceNHWCHalfKernel(half *buf, const half* biases, int sSize, int cSize)
{
//Do nothing, FP16 not supported
}
#endif
void sharedAddNCBiasInplaceNHWC(void *buf, const void* biases, int nSize, int xySize, int cSize, bool isHalf) {
if(nSize > 65536)
throw std::runtime_error("customCudaAddNCBiasInplaceNHWC: nSize too large");
if(xySize > 65536)
throw std::runtime_error("customCudaAddNCBiasInplaceNHWC: xySize too large");
int sSize = xySize;
int cThreads;
int cBlocks;
int sThreads;
int sBlocks;
splitThreadsAcrossDim01(cSize, sSize, cThreads, cBlocks, sThreads, sBlocks);
dim3 grid(cBlocks,sBlocks,nSize);
dim3 threads(cThreads,sThreads,1);
if(isHalf)
addNCBiasInplaceNHWCHalfKernel<<<grid,threads>>>((half*)buf,(const half*)biases,sSize,cSize);
else
addNCBiasInplaceNHWCKernel<<<grid,threads>>>((float*)buf,(const float*)biases,sSize,cSize);
}
void customCudaAddNCBiasInplaceNHWC(float *buf, const float* biases, int nSize, int xySize, int cSize) {
sharedAddNCBiasInplaceNHWC(buf,biases,nSize,xySize,cSize,false);
}
void customCudaAddNCBiasInplaceNHWC(half *buf, const half* biases, int nSize, int xySize, int cSize) {
sharedAddNCBiasInplaceNHWC(buf,biases,nSize,xySize,cSize,true);
}
//--------------------------------------------------------------------------------------------------------------
__global__
void applyCScaleBiasNCHWKernel(const float *in, float* out, const float* scale, const float* biases, int cSize, int sSize)
{
int sIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * cSize + cIdx) * sSize + sIdx;
out[idx] = in[idx] * scale[cIdx] + biases[cIdx];
}
}
__global__
void applyCScaleBiasNCHWReluKernel(const float *in, float* out, const float* scale, const float* biases, int cSize, int sSize)
{
int sIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * cSize + cIdx) * sSize + sIdx;
out[idx] = fmaxf(in[idx] * scale[cIdx] + biases[cIdx],0.0f);
}
}
__global__
void applyCScaleBiasNCHWMaskKernel(const float *in, float* out, const float* scale, const float* biases, const float* mask, int cSize, int sSize)
{
int sIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * cSize + cIdx) * sSize + sIdx;
out[idx] = (in[idx] * scale[cIdx] + biases[cIdx]) * mask[nIdx*sSize+sIdx];
}
}
__global__
void applyCScaleBiasNCHWReluMaskKernel(const float *in, float* out, const float* scale, const float* biases, const float* mask, int cSize, int sSize)
{
int sIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * cSize + cIdx) * sSize + sIdx;
out[idx] = fmaxf(in[idx] * scale[cIdx] + biases[cIdx],0.0f) * mask[nIdx*sSize+sIdx];
}
}
#ifdef CUDA_SUPPORTS_FP16
__global__
void applyCScaleBiasNCHWHalfKernel(const half *in, half* out, const half* scale, const half* biases, int cSize, int sSize)
{
int sIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * cSize + cIdx) * sSize + sIdx;
out[idx] = __hfma(in[idx],scale[cIdx],biases[cIdx]);
}
}
__global__
void applyCScaleBiasNCHWReluHalfKernel(const half *in, half* out, const half* scale, const half* biases, int cSize, int sSize)
{
int sIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * cSize + cIdx) * sSize + sIdx;
half a = __hfma(in[idx],scale[cIdx],biases[cIdx]);
const half halfzero = __float2half(0.0f);
out[idx] = __hgt(a,halfzero) ? a : halfzero;
}
}
__global__
void applyCScaleBiasNCHWMaskHalfKernel(const half *in, half* out, const half* scale, const half* biases, const half* mask, int cSize, int sSize)
{
int sIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * cSize + cIdx) * sSize + sIdx;
out[idx] = __hmul(__hfma(in[idx],scale[cIdx],biases[cIdx]),mask[nIdx*sSize+sIdx]);
}
}
__global__
void applyCScaleBiasNCHWReluMaskHalfKernel(const half *in, half* out, const half* scale, const half* biases, const half* mask, int cSize, int sSize)
{
int sIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * cSize + cIdx) * sSize + sIdx;
half a = __hmul(__hfma(in[idx],scale[cIdx],biases[cIdx]),mask[nIdx*sSize+sIdx]);
const half halfzero = __float2half(0.0f);
out[idx] = __hgt(a,halfzero) ? a : halfzero;
}
}
#else
__global__
void applyCScaleBiasNCHWHalfKernel(const half *in, half* out, const half* scale, const half* biases, int cSize, int sSize)
{
//Do nothing, FP16 not supported
}
__global__
void applyCScaleBiasNCHWReluHalfKernel(const half *in, half* out, const half* scale, const half* biases, int cSize, int sSize)
{
//Do nothing, FP16 not supported
}
__global__
void applyCScaleBiasNCHWMaskHalfKernel(const half *in, half* out, const half* scale, const half* biases, const half* mask, int cSize, int sSize)
{
//Do nothing, FP16 not supported
}
__global__
void applyCScaleBiasNCHWReluMaskHalfKernel(const half *in, half* out, const half* scale, const half* biases, const half* mask, int cSize, int sSize)
{
//Do nothing, FP16 not supported
}
#endif
void sharedApplyCScaleBiasNCHW(const void* in, void* out, const void* scale, const void* biases, const void* mask, int nSize, int cSize, int xySize, bool isHalf, bool applyRelu) {
if(nSize > 65536)
throw std::runtime_error("customCudaApplyCScaleBiasNCHW: nSize too large");
if(cSize > 65536)
throw std::runtime_error("customCudaApplyCScaleBiasNCHW: cSize too large");
int sSize = xySize;
int sThreads;
int sBlocks;
int cThreads;
int cBlocks;
splitThreadsAcrossDim01(sSize, cSize, sThreads, sBlocks, cThreads, cBlocks);
dim3 grid(sBlocks,cBlocks,nSize);
dim3 threads(sThreads,cThreads,1);
if(mask == NULL) {
if(applyRelu) {
if(isHalf)
applyCScaleBiasNCHWReluHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,cSize,sSize);
else
applyCScaleBiasNCHWReluKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,cSize,sSize);
}
else {
if(isHalf)
applyCScaleBiasNCHWHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,cSize,sSize);
else
applyCScaleBiasNCHWKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,cSize,sSize);
}
}
else {
if(applyRelu) {
if(isHalf)
applyCScaleBiasNCHWReluMaskHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,(const half*)mask,cSize,sSize);
else
applyCScaleBiasNCHWReluMaskKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,(const float*)mask,cSize,sSize);
}
else {
if(isHalf)
applyCScaleBiasNCHWMaskHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,(const half*)mask,cSize,sSize);
else
applyCScaleBiasNCHWMaskKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,(const float*)mask,cSize,sSize);
}
}
}
void customCudaApplyCScaleBiasNCHW(const float* in, float* out, const float* scale, const float* biases, const float* mask, int nSize, int cSize, int xySize, bool applyRelu) {
sharedApplyCScaleBiasNCHW(in,out,scale,biases,mask,nSize,cSize,xySize,false,applyRelu);
}
void customCudaApplyCScaleBiasNCHW(const half* in, half* out, const half* scale, const half* biases, const half* mask, int nSize, int cSize, int xySize, bool applyRelu) {
sharedApplyCScaleBiasNCHW(in,out,scale,biases,mask,nSize,cSize,xySize,true,applyRelu);
}
//--------------------------------------------------------------------------------------------------------------
__global__
void applyCScaleBiasNHWCKernel(const float* in, float* out, const float* scale, const float* biases, int sSize, int cSize)
{
int cIdx = blockIdx.x * blockDim.x + threadIdx.x;
int sIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * sSize + sIdx) * cSize + cIdx;
out[idx] = in[idx] * scale[cIdx] + biases[cIdx];
}
}
__global__
void applyCScaleBiasNHWCReluKernel(const float* in, float* out, const float* scale, const float* biases, int sSize, int cSize)
{
int cIdx = blockIdx.x * blockDim.x + threadIdx.x;
int sIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * sSize + sIdx) * cSize + cIdx;
out[idx] = fmaxf(in[idx] * scale[cIdx] + biases[cIdx],0.0f);
}
}
__global__
void applyCScaleBiasNHWCMaskKernel(const float* in, float* out, const float* scale, const float* biases, const float* mask, int sSize, int cSize)
{
int cIdx = blockIdx.x * blockDim.x + threadIdx.x;
int sIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * sSize + sIdx) * cSize + cIdx;
out[idx] = (in[idx] * scale[cIdx] + biases[cIdx]) * mask[nIdx*sSize+sIdx];
}
}
__global__
void applyCScaleBiasNHWCReluMaskKernel(const float* in, float* out, const float* scale, const float* biases, const float* mask, int sSize, int cSize)
{
int cIdx = blockIdx.x * blockDim.x + threadIdx.x;
int sIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * sSize + sIdx) * cSize + cIdx;
out[idx] = fmaxf(in[idx] * scale[cIdx] + biases[cIdx],0.0f) * mask[nIdx*sSize+sIdx];
}
}
#ifdef CUDA_SUPPORTS_FP16
__global__
void applyCScaleBiasNHWCHalfKernel(const half* in, half* out, const half* scale, const half* biases, int sSize, int cSize)
{
int cIdx = blockIdx.x * blockDim.x + threadIdx.x;
int sIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * sSize + sIdx) * cSize + cIdx;
out[idx] = __hfma(in[idx],scale[cIdx],biases[cIdx]);
}
}
__global__
void applyCScaleBiasNHWCReluHalfKernel(const half* in, half* out, const half* scale, const half* biases, int sSize, int cSize)
{
int cIdx = blockIdx.x * blockDim.x + threadIdx.x;
int sIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * sSize + sIdx) * cSize + cIdx;
half a = __hfma(in[idx],scale[cIdx],biases[cIdx]);
const half halfzero = __float2half(0.0f);
out[idx] = __hgt(a,halfzero) ? a : halfzero;
}
}
__global__
void applyCScaleBiasNHWCMaskHalfKernel(const half* in, half* out, const half* scale, const half* biases, const half* mask, int sSize, int cSize)
{
int cIdx = blockIdx.x * blockDim.x + threadIdx.x;
int sIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * sSize + sIdx) * cSize + cIdx;
out[idx] = __hmul(__hfma(in[idx],scale[cIdx],biases[cIdx]),mask[nIdx*sSize+sIdx]);
}
}
__global__
void applyCScaleBiasNHWCReluMaskHalfKernel(const half* in, half* out, const half* scale, const half* biases, const half* mask, int sSize, int cSize)
{
int cIdx = blockIdx.x * blockDim.x + threadIdx.x;
int sIdx = blockIdx.y * blockDim.y + threadIdx.y;
int nIdx = blockIdx.z;
if(cIdx < cSize && sIdx < sSize) {
int idx = (nIdx * sSize + sIdx) * cSize + cIdx;
half a = __hmul(__hfma(in[idx],scale[cIdx],biases[cIdx]),mask[nIdx*sSize+sIdx]);
const half halfzero = __float2half(0.0f);
out[idx] = __hgt(a,halfzero) ? a : halfzero;
}
}
#else
__global__
void applyCScaleBiasNHWCHalfKernel(const half* in, half* out, const half* scale, const half* biases, int sSize, int cSize)
{
//Do nothing, FP16 not supported
}
__global__
void applyCScaleBiasNHWCReluHalfKernel(const half* in, half* out, const half* scale, const half* biases, int sSize, int cSize)
{
//Do nothing, FP16 not supported
}
__global__
void applyCScaleBiasNHWCMaskHalfKernel(const half* in, half* out, const half* scale, const half* biases, const half* mask, int sSize, int cSize)
{
//Do nothing, FP16 not supported
}
__global__
void applyCScaleBiasNHWCReluMaskHalfKernel(const half* in, half* out, const half* scale, const half* biases, const half* mask, int sSize, int cSize)
{
//Do nothing, FP16 not supported
}
#endif
void sharedApplyCScaleBiasNHWC(const void* in, void* out, const void* scale, const void* biases, const void* mask, int nSize, int xySize, int cSize, bool isHalf, bool applyRelu) {
if(nSize > 65536)
throw std::runtime_error("customCudaApplyCScaleBiasNHWC: nSize too large");
if(xySize > 65536)
throw std::runtime_error("customCudaApplyCScaleBiasNHWC: xySize too large");
int sSize = xySize;
int cThreads;
int cBlocks;
int sThreads;
int sBlocks;
splitThreadsAcrossDim01(cSize, sSize, cThreads, cBlocks, sThreads, sBlocks);
dim3 grid(cBlocks,sBlocks,nSize);
dim3 threads(cThreads,sThreads,1);
if(mask == NULL) {
if(applyRelu) {
if(isHalf)
applyCScaleBiasNHWCReluHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,sSize,cSize);
else
applyCScaleBiasNHWCReluKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,sSize,cSize);
}
else {
if(isHalf)
applyCScaleBiasNHWCHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,sSize,cSize);
else
applyCScaleBiasNHWCKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,sSize,cSize);
}
}
else {
if(applyRelu) {
if(isHalf)
applyCScaleBiasNHWCReluMaskHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,(const half*)mask,sSize,cSize);
else
applyCScaleBiasNHWCReluMaskKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,(const float*)mask,sSize,cSize);
}
else {
if(isHalf)
applyCScaleBiasNHWCMaskHalfKernel<<<grid,threads>>>((const half*)in,(half*)out,(const half*)scale,(const half*)biases,(const half*)mask,sSize,cSize);
else
applyCScaleBiasNHWCMaskKernel<<<grid,threads>>>((const float*)in,(float*)out,(const float*)scale,(const float*)biases,(const float*)mask,sSize,cSize);
}
}
}
void customCudaApplyCScaleBiasNHWC(const float* in, float* out, const float* scale, const float* biases, const float* mask, int nSize, int xySize, int cSize, bool applyRelu) {
sharedApplyCScaleBiasNHWC(in,out,scale,biases,mask,nSize,xySize,cSize,false,applyRelu);
}
void customCudaApplyCScaleBiasNHWC(const half* in, half* out, const half* scale, const half* biases, const half* mask, int nSize, int xySize, int cSize, bool applyRelu) {
sharedApplyCScaleBiasNHWC(in,out,scale,biases,mask,nSize,xySize,cSize,true,applyRelu);
} | the_stack |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_atomic_functions.h"
#include <iostream>
#include <thread>
#include <chrono>
#include <cassert>
#include "cudarad.h"
#include "bsp.h"
#include "bsp_shared.h"
#include "cudabsp.h"
#include "cudamatrix.h"
#include "raytracer.h"
#include "cudautils.h"
namespace CUDARAD {
static std::unique_ptr<RayTracer::CUDARayTracer> g_pRayTracer;
static __device__ RayTracer::CUDARayTracer* g_pDeviceRayTracer;
__device__ FaceInfo::FaceInfo() {};
__device__ FaceInfo::FaceInfo(
CUDABSP::CUDABSP& cudaBSP,
size_t faceIndex
) :
faceIndex(faceIndex),
face(cudaBSP.faces[faceIndex]),
plane(cudaBSP.planes[face.planeNum]),
texInfo(cudaBSP.texInfos[face.texInfo]),
Ainv(cudaBSP.xyzMatrices[faceIndex]),
faceNorm(
make_float3(plane.normal.x, plane.normal.y, plane.normal.z)
),
lightmapWidth(face.lightmapTextureSizeInLuxels[0] + 1),
lightmapHeight(face.lightmapTextureSizeInLuxels[1] + 1),
lightmapSize(lightmapWidth * lightmapHeight),
lightmapStartIndex(face.lightOffset / sizeof(BSP::RGBExp32)),
totalLight(make_float3()) {}
__device__ float3 FaceInfo::xyz_from_st(float s, float t) {
float sOffset = this->texInfo.lightmapVecs[0][3];
float tOffset = this->texInfo.lightmapVecs[1][3];
float sMin = this->face.lightmapTextureMinsInLuxels[0];
float tMin = this->face.lightmapTextureMinsInLuxels[1];
CUDAMatrix::CUDAMatrix<double, 3, 1> B;
B[0][0] = s - sOffset + sMin;
B[1][0] = t - tOffset + tMin;
B[2][0] = this->plane.dist;
CUDAMatrix::CUDAMatrix<double, 3, 1> result = this->Ainv * B;
return make_float3(result[0][0], result[1][0], result[2][0]);
}
}
namespace DirectLighting {
static __device__ inline float attenuate(
BSP::DWorldLight& light,
float dist
) {
float c = light.constantAtten;
float l = light.linearAtten;
float q = light.quadraticAtten;
return c + l * dist + q * dist * dist;
}
__device__ float3 sample_at(
CUDABSP::CUDABSP& cudaBSP,
float3 samplePos,
float3 sampleNormal=make_float3()
) {
uint8_t* pvs = CUDABSP::pvs_for_pos(cudaBSP, samplePos);
size_t numClusters = cudaBSP.numVisClusters;
float3 result = make_float3();
for (size_t lightIndex=0;
lightIndex<cudaBSP.numWorldLights;
lightIndex++
) {
BSP::DWorldLight& light = cudaBSP.worldLights[lightIndex];
if (!CUDABSP::cluster_in_pvs(light.cluster, pvs, numClusters)) {
// This light isn't within the sample's PVS. Skip it.
continue;
}
float3 lightPos = make_float3(light.origin);
float3 diff = samplePos - lightPos;
/*
* This light is on the wrong side of the current sample.
* There's no way it could possibly light it.
*/
if (len(sampleNormal) > 0.0f && dot(diff, sampleNormal) >= 0.0f) {
continue;
}
float dist = len(diff);
float3 dir = diff / dist;
float penumbraScale = 1.0f;
if (light.type == BSP::EMIT_SPOTLIGHT) {
float3 lightNorm = make_float3(light.normal);
float lightDot = dot(dir, lightNorm);
if (lightDot < light.stopdot2) {
/* This sample is outside the spotlight cone. */
continue;
}
else if (lightDot < light.stopdot) {
/* This sample is within the spotlight's penumbra. */
penumbraScale = (
(lightDot - light.stopdot2)
/ (light.stopdot - light.stopdot2)
);
//penumbraScale = 100.0;
}
//if (lightIndex == cudaBSP.numWorldLights - 1) {
// printf(
// "(%f, %f, %f) is within spotlight!\n"
// "Pos: (%f, %f, %f)\n"
// "Norm: <%f, %f, %f> (<%f, %f, %f>)\n"
// "stopdot: %f; stopdot2: %f\n"
// "Dot between light and sample: %f\n",
// samplePos.x, samplePos.y, samplePos.z,
// lightPos.x, lightPos.y, lightPos.z,
// lightNorm.x, lightNorm.y, lightNorm.z,
// light.normal.x, light.normal.y, light.normal.z,
// light.stopdot, light.stopdot2,
// lightDot
// );
//}
}
const float EPSILON = 1e-3f;
// Nudge the sample position towards the light slightly, to avoid
// colliding with triangles that directly contain the sample
// position.
samplePos -= dir * EPSILON;
bool lightBlocked = CUDARAD::g_pDeviceRayTracer->LOS_blocked(
lightPos, samplePos
);
if (lightBlocked) {
// This light can't be seen from the position of the sample.
// Ignore it.
continue;
}
/* I CAN SEE THE LIGHT */
float attenuation = attenuate(light, dist);
float3 lightContribution = make_float3(light.intensity);
lightContribution *= penumbraScale * 255.0f / attenuation;
result += lightContribution;
}
//printf(
// "Sample at (%u, %u) for Face %u: (%f, %f, %f)\n",
// static_cast<unsigned int>(s),
// static_cast<unsigned int>(t),
// static_cast<unsigned int>(faceIndex),
// result.x, result.y, result.z
//);
return result;
}
__device__ float3 sample_at(
CUDABSP::CUDABSP& cudaBSP,
CUDARAD::FaceInfo& faceInfo,
float s, float t
) {
float3 samplePos = faceInfo.xyz_from_st(s, t);
return sample_at(cudaBSP, samplePos, faceInfo.faceNorm);
}
__global__ void map_faces(
CUDABSP::CUDABSP* pCudaBSP,
size_t* pFacesCompleted
) {
bool primaryThread = (threadIdx.x == 0 && threadIdx.y == 0);
if (pCudaBSP->tag != CUDABSP::TAG) {
if (primaryThread) {
printf("Invalid CUDABSP Tag: %x\n", pCudaBSP->tag);
}
return;
}
__shared__ CUDARAD::FaceInfo faceInfo;
if (primaryThread) {
// Map block numbers to faces.
faceInfo = CUDARAD::FaceInfo(*pCudaBSP, blockIdx.x);
//printf(
// "Processing Face %u...\n",
// static_cast<unsigned int>(faceInfo.faceIndex)
//);
}
__syncthreads();
/* Take a sample at each lightmap luxel. */
for (size_t i=0; i<faceInfo.lightmapHeight; i+=blockDim.y) {
size_t t = i + threadIdx.y;
if (t >= faceInfo.lightmapHeight) {
continue;
}
for (size_t j=0; j<faceInfo.lightmapWidth; j+=blockDim.x) {
size_t s = j + threadIdx.x;
if (s >= faceInfo.lightmapWidth) {
continue;
}
float3 color = sample_at(
*pCudaBSP, faceInfo,
static_cast<float>(s),
static_cast<float>(t)
);
size_t& lightmapStart = faceInfo.lightmapStartIndex;
size_t sampleIndex = t * faceInfo.lightmapWidth + s;
pCudaBSP->lightSamples[lightmapStart + sampleIndex] = color;
atomicAdd(&faceInfo.totalLight.x, color.x);
atomicAdd(&faceInfo.totalLight.y, color.y);
atomicAdd(&faceInfo.totalLight.z, color.z);
}
}
__syncthreads();
if (primaryThread) {
faceInfo.avgLight = faceInfo.totalLight;
faceInfo.avgLight /= static_cast<float>(faceInfo.lightmapSize);
pCudaBSP->lightSamples[faceInfo.lightmapStartIndex - 1]
= faceInfo.avgLight;
// Still have no idea how this works. But if we don't do this,
// EVERYTHING becomes a disaster...
faceInfo.face.styles[0] = 0x00;
faceInfo.face.styles[1] = 0xFF;
faceInfo.face.styles[2] = 0xFF;
faceInfo.face.styles[3] = 0xFF;
/* Copy our changes back to the CUDABSP. */
pCudaBSP->faces[faceInfo.faceIndex] = faceInfo.face;
atomicAdd(reinterpret_cast<unsigned int*>(pFacesCompleted), 1);
__threadfence_system();
}
//printf(
// "Lightmap offset for face %u: %u\n",
// static_cast<unsigned int>(faceIndex),
// static_cast<unsigned int>(lightmapStartIndex)
//);
//printf("%u\n", static_cast<unsigned int>(*pFacesCompleted));
}
}
namespace AA {
static __device__ const float INV_GAMMA = 1.0f / 2.2f;
static __device__ inline float perceptual_from_linear(float linear) {
return powf(linear, INV_GAMMA);
}
static __device__ float intensity(float3 rgb) {
return perceptual_from_linear(
dot(
rgb / 255.0f,
make_float3(1.0f)
//make_float3(0.299, 0.587, 0.114)
)
);
}
//static __device__ const float MIN_AA_GRADIENT = 1.0f / 8.0f;
static __device__ const float MIN_AA_GRADIENT = 1.0f / 16.0f;
const size_t MAP_FACES_AA_BLOCK_WIDTH = 16;
const size_t MAP_FACES_AA_BLOCK_HEIGHT = 16;
const size_t MAP_FACES_AA_NUM_THREADS =
MAP_FACES_AA_BLOCK_WIDTH * MAP_FACES_AA_BLOCK_HEIGHT;
__global__ void map_faces_AA(CUDABSP::CUDABSP* pCudaBSP) {
int threadID = threadIdx.y * blockDim.x + threadIdx.x;
bool primaryThread = (threadID == 0);
__shared__ size_t threadsPerBlock;
__shared__ size_t faceNum;
__shared__ CUDARAD::FaceInfo faceInfo;
__shared__ size_t lightmapStart;
__shared__ size_t width;
__shared__ size_t height;
__shared__ uint32_t aaTargets[CUDABSP::MAX_LUXELS_PER_FACE];
if (primaryThread) {
threadsPerBlock = blockDim.x * blockDim.y;
// Map block numbers to faces.
faceNum = blockIdx.x;
faceInfo = CUDARAD::FaceInfo(*pCudaBSP, faceNum);
lightmapStart = faceInfo.lightmapStartIndex;
width = faceInfo.lightmapWidth;
height = faceInfo.lightmapHeight;
}
__syncthreads();
assert(width * height <= CUDABSP::MAX_LUXELS_PER_FACE);
/* Initialize the AA targets array. */
for (size_t i=0; i<CUDABSP::MAX_LUXELS_PER_FACE; i+=threadsPerBlock) {
size_t index = i + threadID;
if (index >= CUDABSP::MAX_LUXELS_PER_FACE) {
continue;
}
aaTargets[index] = 0;
}
__syncthreads();
/* Select luxels on this face that are good candidates for AA. */
for (size_t i=0; i<height; i+=blockDim.y) {
size_t t = i + threadIdx.y;
if (t >= height) {
continue;
}
for (size_t j=0; j<width; j+=blockDim.x) {
size_t s = j + threadIdx.x;
if (s >= width) {
continue;
}
size_t sampleIndex = t * width + s;
float3 sampleColor
= pCudaBSP->lightSamples[lightmapStart + sampleIndex];
float sampleIntensity = intensity(sampleColor);
/* Calculate the maximum gradient of this luxel. */
float gradient = 0.0;
for (int tOffset=-1; tOffset<=1; tOffset++) {
int neighborT = t + tOffset;
if (!(0 <= neighborT && neighborT < height)) {
continue;
}
for (int sOffset=-1; sOffset<=1; sOffset++) {
if (sOffset == 0 && tOffset == 0) {
continue;
}
int neighborS = s + sOffset;
if (!(0 <= neighborS && neighborS < width)) {
continue;
}
int neighborIndex
= neighborT * width + neighborS;
float neighborIntensity = intensity(
pCudaBSP->lightSamples[
lightmapStart + neighborIndex
]
);
gradient = fmaxf(
gradient,
fabsf(neighborIntensity - sampleIntensity)
);
}
}
assert(sampleIndex < CUDABSP::MAX_LUXELS_PER_FACE);
aaTargets[sampleIndex] = (gradient > MIN_AA_GRADIENT);
}
}
__syncthreads();
__shared__ uint32_t scannedAATargets[CUDABSP::MAX_LUXELS_PER_FACE];
prefix_sum<
uint32_t, CUDABSP::MAX_LUXELS_PER_FACE,
MAP_FACES_AA_BLOCK_WIDTH, MAP_FACES_AA_BLOCK_HEIGHT
>(aaTargets, scannedAATargets);
__syncthreads();
__shared__ size_t numAATargets;
__shared__ uint32_t aaTargetIndices[CUDABSP::MAX_LUXELS_PER_FACE];
if (primaryThread) {
numAATargets =
scannedAATargets[CUDABSP::MAX_LUXELS_PER_FACE - 1]
+ aaTargets[CUDABSP::MAX_LUXELS_PER_FACE - 1];
}
__syncthreads();
/* Gather all the AA targets into the final target array. */
for (size_t i=0; i<CUDABSP::MAX_LUXELS_PER_FACE; i+=threadsPerBlock) {
size_t index = i + threadID;
if (index >= CUDABSP::MAX_LUXELS_PER_FACE) {
continue;
}
if (aaTargets[index]) {
size_t finalPosition = scannedAATargets[index];
aaTargetIndices[finalPosition] = index;
}
}
__syncthreads();
__shared__ float3 finalSamples[CUDABSP::MAX_LUXELS_PER_FACE];
/* Zero out all the final samples. */
for (size_t i=0; i<numAATargets; i+=threadsPerBlock) {
size_t index = i + threadID;
if (index >= numAATargets) {
continue;
}
finalSamples[index] = make_float3();
}
__syncthreads();
const size_t SUPERSAMPLE_WIDTH = 4;
const size_t SUPERSAMPLES_PER_TARGET =
SUPERSAMPLE_WIDTH * SUPERSAMPLE_WIDTH;
const size_t numSupersamples = numAATargets * SUPERSAMPLES_PER_TARGET;
/* Supersample all the target positions. */
for (size_t i=0; i<numSupersamples; i+=threadsPerBlock) {
size_t index = i + threadID;
if (index >= numSupersamples) {
continue;
}
size_t aaTargetNumber = index / SUPERSAMPLES_PER_TARGET;
size_t targetSupersampleNumber = index % SUPERSAMPLES_PER_TARGET;
size_t aaTargetIndex = aaTargetIndices[aaTargetNumber];
float s = static_cast<float>(aaTargetIndex % width);
float t = static_cast<float>(aaTargetIndex / width);
size_t sOffsetIndex = targetSupersampleNumber % SUPERSAMPLE_WIDTH;
size_t tOffsetIndex = targetSupersampleNumber / SUPERSAMPLE_WIDTH;
float sStep = 2.0f / static_cast<float>(SUPERSAMPLE_WIDTH);
float tStep = 2.0f / static_cast<float>(SUPERSAMPLE_WIDTH);
float sOffset = sStep * sOffsetIndex - 1.0f;
float tOffset = tStep * tOffsetIndex - 1.0f;
float3 color = DirectLighting::sample_at(
*pCudaBSP, faceInfo,
s + sOffset, t + tOffset
);
float3& sample = finalSamples[aaTargetNumber];
atomicAdd(&sample.x, color.x);
atomicAdd(&sample.y, color.y);
atomicAdd(&sample.z, color.z);
}
__threadfence_block();
__syncthreads();
/* Average out all the supersamples. */
for (size_t i=0; i<numAATargets; i+=threadsPerBlock) {
size_t index = i + threadID;
if (index >= numAATargets) {
continue;
}
finalSamples[index] /= SUPERSAMPLES_PER_TARGET;
}
__syncthreads();
/* Scatter the final samples to their lightmap positions. */
for (size_t i=0; i<numAATargets; i+=threadsPerBlock) {
size_t index = i + threadID;
if (index >= numAATargets) {
continue;
}
size_t targetIndex = aaTargetIndices[index];
size_t lightmapIndex = lightmapStart + targetIndex;
pCudaBSP->lightSamples[lightmapIndex] = finalSamples[index];
}
}
}
namespace BouncedLighting {
static __device__ const float PI = 3.14159265358979323846264f;
static __device__ const float INV_PI = 0.31830988618379067153715f;
/**
* Computes the form factor from a differential patch to a convex
* polygonal patch.
*
* Thankfully, Source's polygons are always convex.
*
* Formula graciously stolen from Formula 81 of this book:
* https://people.cs.kuleuven.be/~philip.dutre/GI/TotalCompendium.pdf
*
* ... and Formula 4.16 of this one:
* https://books.google.com/books?id=zALK286TFXgC&lpg=PP1&pg=PA72#v=onepage&q&f=false
*/
static __device__ float ff_diff_poly(
float3 diffPos, float3 diffNorm,
float3* vertices, size_t numVertices
) {
float result = 0.0f;
for (size_t i=0; i<4; i++) {
float3 vertex1 = vertices[i] - diffPos;
float3 vertex2 = vertices[(i + 1) % numVertices] - diffPos;
float3 vertexCross = cross(vertex1, vertex2);
float crossLen = len(vertexCross);
vertexCross /= crossLen;
float v1Len = len(vertex1);
float v2Len = len(vertex2);
float theta = asinf(crossLen / (v1Len * v2Len));
result += dot(diffNorm, vertexCross) * theta;
}
result *= 0.5f * INV_PI;
return result;
}
/** Computes the form factor between two differential patches. */
static __device__ float ff_diff_diff(
float3 diff1Pos, float3 diff1Norm,
float3 diff2Pos, float3 diff2Norm
) {
float3 delta = diff2Pos - diff1Pos;
float invDist = 1.0f / len(delta);
float3 dir = delta * invDist;
return (
dot(diff1Norm, dir) * -dot(diff2Norm, dir)
* INV_PI * invDist * invDist
);
}
}
namespace AmbientLighting {
static __device__ const float AMBIENT_SCALE = 1.0f / 128.0f;
__global__ void map_leaves(CUDABSP::CUDABSP* pCudaBSP) {
size_t leafIndex = blockIdx.x;
if (leafIndex >= pCudaBSP->numLeaves) {
return;
}
BSP::DLeaf& leaf = pCudaBSP->leaves[leafIndex];
if (leaf.contents & BSP::CONTENTS_SOLID) {
return;
}
BSP::DLeafAmbientIndex& ambientIndex
= pCudaBSP->ambientIndices[leafIndex];
BSP::DLeafAmbientLighting* ambientSamples
= &pCudaBSP->ambientLightSamples[ambientIndex.firstAmbientSample];
for (size_t i=threadIdx.x;
i<ambientIndex.ambientSampleCount;
i+=blockDim.x) {
if (i >= ambientIndex.ambientSampleCount) {
return;
}
BSP::DLeafAmbientLighting& sample = ambientSamples[i];
float3 leafMins = make_float3(
leaf.mins[0], leaf.mins[1], leaf.mins[2]
);
float3 leafMaxs = make_float3(
leaf.maxs[0], leaf.maxs[1], leaf.maxs[2]
);
float3 leafSize = leafMaxs - leafMins;
float3 samplePos = leafMins + make_float3(
leafSize.x * static_cast<float>(sample.x) / 255.0f,
leafSize.y * static_cast<float>(sample.y) / 255.0f,
leafSize.z * static_cast<float>(sample.z) / 255.0f
);
//sample.cube.color[0] = BSP::RGBExp32 {1, 1, 1, -3};
//sample.cube.color[1] = BSP::RGBExp32 {1, 1, 1, -3};
//sample.cube.color[2] = BSP::RGBExp32 {1, 1, 1, -3};
//sample.cube.color[3] = BSP::RGBExp32 {1, 1, 1, -3};
//sample.cube.color[4] = BSP::RGBExp32 {1, 1, 1, -3};
//sample.cube.color[5] = BSP::RGBExp32 {1, 1, 1, -3};
/*
* Note: This isn't really the correct way to do ambient lighting.
* Actual ambient lighting would sample lightmaps visible from this
* point in a sphere, and use that information to accumulate
* lighting data into a light cube.
* TODO: Write an actual ambient lighting algorithm.
*/
// +X
sample.cube.color[0] = CUDABSP::rgbexp32_from_float3(
DirectLighting::sample_at(
*pCudaBSP,
samplePos,
make_float3(1.0f, 0.0f, 0.0f)
) * AMBIENT_SCALE
);
// -X
sample.cube.color[1] = CUDABSP::rgbexp32_from_float3(
DirectLighting::sample_at(
*pCudaBSP,
samplePos,
make_float3(-1.0f, 0.0f, 0.0f)
) * AMBIENT_SCALE
);
// +Y
sample.cube.color[2] = CUDABSP::rgbexp32_from_float3(
DirectLighting::sample_at(
*pCudaBSP,
samplePos,
make_float3(0.0f, 1.0f, 0.0f)
) * AMBIENT_SCALE
);
// -Y
sample.cube.color[3] = CUDABSP::rgbexp32_from_float3(
DirectLighting::sample_at(
*pCudaBSP,
samplePos,
make_float3(0.0f, -1.0f, 0.0f)
) * AMBIENT_SCALE
);
// +Z
sample.cube.color[4] = CUDABSP::rgbexp32_from_float3(
DirectLighting::sample_at(
*pCudaBSP,
samplePos,
make_float3(0.0f, 0.0f, 1.0f)
) * AMBIENT_SCALE
);
// -Z
sample.cube.color[5] = CUDABSP::rgbexp32_from_float3(
DirectLighting::sample_at(
*pCudaBSP,
samplePos,
make_float3(0.0f, 0.0f, -1.0f)
) * AMBIENT_SCALE
);
}
}
}
namespace CUDARAD {
void init(BSP::BSP& bsp) {
std::cout << "Setting up ray-trace acceleration structure... "
<< std::flush;
using Clock = std::chrono::high_resolution_clock;
auto start = Clock::now();
g_pRayTracer = std::unique_ptr<RayTracer::CUDARayTracer>(
new RayTracer::CUDARayTracer()
);
std::vector<RayTracer::Triangle> triangles;
/* Put all of the BSP's face triangles into the ray-tracer. */
for (const BSP::Face& face : bsp.get_faces()) {
int32_t flags = face.get_texinfo().flags;
if ((flags & BSP::SURF_TRANS) && !(flags & BSP::SURF_NODRAW)) {
// Skip translucent faces, but keep nodraw faces.
continue;
}
std::vector<BSP::Edge>::const_iterator pEdge
= face.get_edges().begin();
BSP::Vec3<float> vertex1 = (pEdge++)->vertex1;
BSP::Vec3<float> vertex2;
BSP::Vec3<float> vertex3 = (pEdge++)->vertex1;
do {
vertex2 = vertex3;
vertex3 = (pEdge++)->vertex1;
RayTracer::Triangle tri {
{
make_float3(vertex1),
make_float3(vertex2),
make_float3(vertex3),
},
};
triangles.push_back(tri);
} while (pEdge != face.get_edges().end());
}
g_pRayTracer->add_triangles(triangles);
auto end = Clock::now();
std::chrono::milliseconds ms
= std::chrono::duration_cast<std::chrono::milliseconds>(
end - start
);
std::cout << "Done! (" << ms.count() << " ms)" << std::endl;
std::cout << "Moving ray-tracer to device..." << std::endl;
RayTracer::CUDARayTracer* pDeviceRayTracer;
CUDA_CHECK_ERROR(
cudaMalloc(&pDeviceRayTracer, sizeof(RayTracer::CUDARayTracer))
);
CUDA_CHECK_ERROR(
cudaMemcpy(
pDeviceRayTracer, g_pRayTracer.get(),
sizeof(RayTracer::CUDARayTracer),
cudaMemcpyHostToDevice
)
);
CUDA_CHECK_ERROR(
cudaMemcpyToSymbol(
g_pDeviceRayTracer, &pDeviceRayTracer,
sizeof(RayTracer::CUDARayTracer*), 0,
cudaMemcpyHostToDevice
)
);
}
void cleanup(void) {
RayTracer::CUDARayTracer* pDeviceRayTracer;
CUDA_CHECK_ERROR(
cudaMemcpyFromSymbol(
&pDeviceRayTracer, g_pDeviceRayTracer,
sizeof(RayTracer::CUDARayTracer*), 0,
cudaMemcpyDeviceToHost
)
);
CUDA_CHECK_ERROR(cudaFree(pDeviceRayTracer));
g_pRayTracer = nullptr;
}
void compute_direct_lighting(BSP::BSP& bsp, CUDABSP::CUDABSP* pCudaBSP) {
volatile size_t* pFacesCompleted;
CUDA_CHECK_ERROR(
cudaHostAlloc(
&pFacesCompleted, sizeof(size_t),
cudaHostAllocMapped
)
);
*pFacesCompleted = 0;
volatile size_t* pDeviceFacesCompleted;
CUDA_CHECK_ERROR(
cudaHostGetDevicePointer(
const_cast<size_t**>(&pDeviceFacesCompleted),
const_cast<size_t*>(pFacesCompleted),
0
)
);
const size_t BLOCK_WIDTH = 16;
const size_t BLOCK_HEIGHT = 16;
size_t numFaces = bsp.get_faces().size();
dim3 blockDim(BLOCK_WIDTH, BLOCK_HEIGHT);
std::cout << "Launching "
<< numFaces * BLOCK_WIDTH * BLOCK_HEIGHT << " threads ("
<< numFaces << " faces)..."
<< std::endl;
cudaEvent_t startEvent;
cudaEvent_t stopEvent;
CUDA_CHECK_ERROR(cudaEventCreate(&startEvent));
CUDA_CHECK_ERROR(cudaEventCreate(&stopEvent));
CUDA_CHECK_ERROR(cudaEventRecord(startEvent));
KERNEL_LAUNCH(
DirectLighting::map_faces,
numFaces, blockDim,
pCudaBSP, const_cast<size_t*>(pDeviceFacesCompleted)
);
flush_wddm_queue();
size_t lastFacesCompleted = 0;
size_t facesCompleted;
/* Progress notification logic */
do {
CUDA_CHECK_ERROR(cudaPeekAtLastError());
facesCompleted = *pFacesCompleted;
if (facesCompleted > lastFacesCompleted) {
std::cout << " " << facesCompleted << "/"
<< numFaces
<< " faces processed..." << std::endl;
}
lastFacesCompleted = facesCompleted;
std::this_thread::sleep_for(std::chrono::milliseconds(5));
} while (facesCompleted < numFaces);
CUDA_CHECK_ERROR(cudaEventRecord(stopEvent));
CUDA_CHECK_ERROR(cudaDeviceSynchronize());
float time;
CUDA_CHECK_ERROR(cudaEventElapsedTime(&time, startEvent, stopEvent));
std::cout << "Done! (" << time << " ms)" << std::endl;
cudaFreeHost(const_cast<size_t*>(pFacesCompleted));
}
void antialias_direct_lighting(BSP::BSP& bsp, CUDABSP::CUDABSP* pCudaBSP) {
cudaEvent_t startEvent;
cudaEvent_t stopEvent;
CUDA_CHECK_ERROR(cudaEventCreate(&startEvent));
CUDA_CHECK_ERROR(cudaEventCreate(&stopEvent));
CUDA_CHECK_ERROR(cudaEventRecord(startEvent));
size_t numFaces = bsp.get_faces().size();
dim3 blockDim(
AA::MAP_FACES_AA_BLOCK_WIDTH,
AA::MAP_FACES_AA_BLOCK_HEIGHT
);
KERNEL_LAUNCH(
AA::map_faces_AA,
numFaces, blockDim,
pCudaBSP
);
CUDA_CHECK_ERROR(cudaEventRecord(stopEvent));
CUDA_CHECK_ERROR(cudaDeviceSynchronize());
float time;
CUDA_CHECK_ERROR(cudaEventElapsedTime(&time, startEvent, stopEvent));
std::cout << "Done! (" << time << " ms)" << std::endl;
}
void bounce_lighting(BSP::BSP& bsp, CUDABSP::CUDABSP* pCudaBSP) {
using Clock = std::chrono::high_resolution_clock;
auto start = Clock::now();
auto end = Clock::now();
std::chrono::milliseconds ms
= std::chrono::duration_cast<std::chrono::milliseconds>(
end - start
);
std::cout << "Done! (" << ms.count() << " ms)" << std::endl;
}
void compute_ambient_lighting(CUDABSP::CUDABSP* pCudaBSP) {
using Clock = std::chrono::high_resolution_clock;
auto start = Clock::now();
const size_t BLOCK_WIDTH = 32;
size_t numLeaves;
CUDA_CHECK_ERROR(
cudaMemcpy(
&numLeaves, &pCudaBSP->numLeaves, sizeof(size_t),
cudaMemcpyDeviceToHost
)
);
KERNEL_LAUNCH(
AmbientLighting::map_leaves,
numLeaves, BLOCK_WIDTH,
pCudaBSP
);
CUDA_CHECK_ERROR(cudaDeviceSynchronize());
auto end = Clock::now();
std::chrono::milliseconds ms
= std::chrono::duration_cast<std::chrono::milliseconds>(
end - start
);
std::cout << "Done! (" << ms.count() << " ms)" << std::endl;
}
} | the_stack |
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
int setTensorDescriptor(cudnnTensorDescriptor_t activationDesc,
const int numDim,
const long shape[]) {
int batchSize = 0;
int channels = 0;
switch (numDim) {
case 2:
batchSize = shape[0];
channels = shape[1];
checkCUDNN(cudnnSetTensor4dDescriptor(activationDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
batchSize,
channels, 1, 1));
break;
case 4:
batchSize = shape[0];
channels = shape[1];
int height = shape[2];
int width = shape[3];
checkCUDNN(cudnnSetTensor4dDescriptor(activationDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
batchSize,
channels,
height,
width));
break;
// TODO: handle other cases and errors
}
return 0;
}
cudnnHandle_t cudnn_handler = NULL;
int cudnnReLUForward(const DLArrayHandle input, DLArrayHandle output) {
const float *input_data = (const float *) input->data;
float *output_data = (float *) output->data;
assert(input->shape[0] == output->shape[0]);
assert(input->shape[1] == output->shape[1]);
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
cudnnActivationDescriptor_t activation_descriptor;
checkCUDNN(cudnnCreateActivationDescriptor(&activation_descriptor));
checkCUDNN(cudnnSetActivationDescriptor(activation_descriptor,
CUDNN_ACTIVATION_RELU, // type of activation
CUDNN_PROPAGATE_NAN, // reluNanOpt
0)); //relu_coef
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
setTensorDescriptor(output_descriptor, output->ndim, output->shape);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnActivationForward(cudnn_handler,
activation_descriptor,
&alpha,
input_descriptor,
input_data,
&beta,
output_descriptor,
output_data));
cudnnDestroyActivationDescriptor(activation_descriptor);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
return 0;
}
int cudnnConv2DForward(const DLArrayHandle input,
const DLArrayHandle filter,
const DLArrayHandle bias,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
DLArrayHandle output) {
const int input_dim = input->ndim;
const int output_dim = output->ndim;
assert(input_dim == 4);
assert(output_dim == 4);
const int filter_shape = filter->ndim;
assert(filter_shape == 4);
const int num_filters = filter->shape[0];
const int num_outputs = filter->shape[1];
const int filter_height = filter->shape[2];
const int filter_width = filter->shape[3];
const int bias_dim = bias->ndim;
assert(bias_dim == 1);
assert(bias->shape[0] == num_filters);
const float *input_data = (const float *) input->data;
const float *filter_date = (const float *) filter->data;
const float *bias_data = (const float *) bias->data;
float *output_data = (float *) output->data;
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating input and output tensors
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
setTensorDescriptor(output_descriptor, output->ndim, output->shape);
// create filter tensors
cudnnFilterDescriptor_t filter_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&filter_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(filter_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/num_filters,
/*in_channels=*/num_outputs,
/*kernel_height=*/filter_height,
/*kernel_width=*/filter_width));
// create convolution tensor
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/padding_height,
/*pad_width=*/padding_width,
/*vertical_stride=*/stride_height,
/*horizontal_stride=*/stride_width,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
cudnnConvolutionFwdAlgo_t convolution_algorithm;
checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnn_handler,
input_descriptor,
filter_descriptor,
convolution_descriptor,
output_descriptor,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&convolution_algorithm));
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn_handler,
input_descriptor,
filter_descriptor,
convolution_descriptor,
output_descriptor,
convolution_algorithm,
&workspace_bytes));
//std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB" << std::endl;
assert(workspace_bytes > 0);
void *d_workspace{nullptr};
cudaMalloc(&d_workspace, workspace_bytes);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionForward(cudnn_handler,
&alpha,
input_descriptor,
input_data,
filter_descriptor,
filter_date,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
output_data));
// adding bias tensor
cudnnTensorDescriptor_t bias_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&bias_descriptor));
//setTensorDescriptor(bias_descriptor, bias->ndim, bias->shape);
checkCUDNN(cudnnSetTensor4dDescriptor(bias_descriptor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
num_filters,
1,
1));
checkCUDNN(cudnnAddTensor(cudnn_handler,
&alpha,
bias_descriptor,
bias_data,
&alpha,
output_descriptor,
output_data));
cudaFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyFilterDescriptor(filter_descriptor);
cudnnDestroyTensorDescriptor(bias_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
return 0;
}
int cudnnConv2DBackwardBias(const DLArrayHandle output_grads,
DLArrayHandle bias_grads) {
const float *output_grads_data = (const float *) output_grads->data;
float *bias_grads_data = (float *) bias_grads->data;
const int bias_grads_dim = bias_grads->ndim;
assert(bias_grads_dim == 1);
const int num_filters = bias_grads->shape[0];
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating output_grads descriptor
cudnnTensorDescriptor_t output_grads_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_grads_descriptor));
setTensorDescriptor(output_grads_descriptor, output_grads->ndim, output_grads->shape);
// bias descriptor
cudnnTensorDescriptor_t bias_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&bias_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(bias_descriptor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
num_filters,
1,
1));
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionBackwardBias(cudnn_handler,
&alpha,
output_grads_descriptor,
output_grads_data,
&beta,
bias_descriptor,
bias_grads_data
));
cudnnDestroyTensorDescriptor(bias_descriptor);
cudnnDestroyTensorDescriptor(output_grads_descriptor);
return 0;
}
int cudnnConv2DBackwardData(const DLArrayHandle filter,
const DLArrayHandle output_grads,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
DLArrayHandle data_grad) {
//const int input_dim = input->ndim;
const int data_grad_dim = data_grad->ndim;
//assert(input_dim == 4);
assert(data_grad_dim == 4);
const int filter_shape = filter->ndim;
assert(filter_shape == 4);
const int num_filters = filter->shape[0];
const int num_outputs = filter->shape[1];
const int filter_height = filter->shape[2];
const int filter_width = filter->shape[3];
//const float *input_data = (const float *) input->data;
const float *filter_date = (const float *) filter->data;
const float *output_grads_data = (const float *) output_grads->data;
float *data_grad_data = (float *) data_grad->data;
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating output_grads descriptor
cudnnTensorDescriptor_t output_grads_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_grads_descriptor));
setTensorDescriptor(output_grads_descriptor, output_grads->ndim, output_grads->shape);
// create convolution tensor
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/padding_height,
/*pad_width=*/padding_width,
/*vertical_stride=*/stride_height,
/*horizontal_stride=*/stride_width,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
// create filter tensors
cudnnFilterDescriptor_t filter_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&filter_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(filter_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/num_filters,
/*in_channels=*/num_outputs,
/*kernel_height=*/filter_height,
/*kernel_width=*/filter_width));
cudnnTensorDescriptor_t data_grads_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&data_grads_descriptor));
setTensorDescriptor(data_grads_descriptor, data_grad->ndim, data_grad->shape);
cudnnConvolutionBwdDataAlgo_t backward_data_algo;
checkCUDNN(cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handler,
filter_descriptor,
output_grads_descriptor,
convolution_descriptor,
data_grads_descriptor,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST,
0,
&backward_data_algo));
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handler,
filter_descriptor,
output_grads_descriptor,
convolution_descriptor,
data_grads_descriptor,
backward_data_algo,
&workspace_bytes));
//std::cout << "workspace size: " << workspace_bytes << std::endl;
void *d_workspace{nullptr};
cudaMalloc(&d_workspace, workspace_bytes);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionBackwardData(cudnn_handler,
&alpha,
filter_descriptor,
filter_date,
output_grads_descriptor,
output_grads_data,
convolution_descriptor,
backward_data_algo,
d_workspace,
workspace_bytes,
&beta,
data_grads_descriptor,
data_grad_data));
// Release resources
cudaFree(d_workspace);
cudnnDestroyTensorDescriptor(data_grads_descriptor);
cudnnDestroyTensorDescriptor(output_grads_descriptor);
cudnnDestroyFilterDescriptor(filter_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
//std::cout << "leaveing cudnnConv2DBackwardData" << std::endl;
return 0;
}
int cudnnConv2DBackwardFilter(const DLArrayHandle input,
const DLArrayHandle output_grads,
const int stride_height,
const int stride_width,
const int padding_height,
const int padding_width,
DLArrayHandle filter_grad) {
const int input_dim = input->ndim;
const int filter_dim = filter_grad->ndim;
//const int filter_grad_dim = filter_grad->ndim;
assert(input_dim == 4);
assert(filter_dim == 4);
//assert(filter_grad_dim == filter_dim);
const int num_filters = filter_grad->shape[0];
const int num_outputs = filter_grad->shape[1];
const int filter_height = filter_grad->shape[2];
const int filter_width = filter_grad->shape[3];
const float *input_data = (const float *) input->data;
const float *output_grads_data = (const float *) output_grads->data;
//const float *filter_date = (const float *) filter->data;
float *filter_grad_data = (float *) filter_grad->data;
//cudnnHandle_t cudnn;
//cudnnCreate(&cudnn);
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating input descriptor
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
// creating output_grads descriptor
cudnnTensorDescriptor_t output_grads_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_grads_descriptor));
setTensorDescriptor(output_grads_descriptor, output_grads->ndim, output_grads->shape);
// create convolution tensor
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/padding_height,
/*pad_width=*/padding_width,
/*vertical_stride=*/stride_height,
/*horizontal_stride=*/stride_width,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
// create filter tensors
cudnnFilterDescriptor_t filter_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&filter_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(filter_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/num_filters,
/*in_channels=*/num_outputs,
/*kernel_height=*/filter_height,
/*kernel_width=*/filter_width));
cudnnConvolutionBwdFilterAlgo_t backward_filter_algo;
checkCUDNN(cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handler,
input_descriptor,
output_grads_descriptor,
convolution_descriptor,
filter_descriptor,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST,
0,
&backward_filter_algo));
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handler,
input_descriptor,
output_grads_descriptor,
convolution_descriptor,
filter_descriptor,
backward_filter_algo,
&workspace_bytes));
void *d_workspace{nullptr};
cudaMalloc(&d_workspace, workspace_bytes);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionBackwardFilter(cudnn_handler,
&alpha,
input_descriptor,
input_data,
output_grads_descriptor,
output_grads_data,
convolution_descriptor,
backward_filter_algo,
d_workspace,
workspace_bytes,
&beta,
filter_descriptor,
filter_grad_data));
cudaFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_grads_descriptor);
cudnnDestroyFilterDescriptor(filter_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
return 0;
}
int cudnnPoolForward(const DLArrayHandle input,
const int pooling_height,
const int pooling_width,
const int stride_height,
const int stride_width,
const char *mode,
DLArrayHandle output) {
const int input_dim = input->ndim;
const int output_dim = output->ndim;
assert(input_dim == 4);
assert(output_dim == 4);
const float *input_data = (const float *) input->data;
float *output_data = (float *) output->data;
cudnnPoolingMode_t pooling_mode = CUDNN_POOLING_MAX;
std::string str_mode(mode);
if (str_mode.compare("average") == 0) {
std::cout << str_mode << std::endl;
pooling_mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
std::cout << pooling_mode << std::endl;
}
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// creating input and output tensors
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
setTensorDescriptor(output_descriptor, output->ndim, output->shape);
cudnnPoolingDescriptor_t pooling_descriptor;
checkCUDNN(cudnnCreatePoolingDescriptor(&pooling_descriptor));
checkCUDNN(cudnnSetPooling2dDescriptor(pooling_descriptor,
pooling_mode,
CUDNN_PROPAGATE_NAN,
pooling_height,
pooling_width,
0, // TODO: parameterize vertical padding
0, // TODO: parameterize horizontal padding
stride_height,
stride_width));
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnPoolingForward(cudnn_handler,
pooling_descriptor,
&alpha,
input_descriptor,
input_data,
&beta,
output_descriptor,
output_data));
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyPoolingDescriptor(pooling_descriptor);
return 0;
}
int cudnnPoolBackward(const DLArrayHandle input,
const DLArrayHandle output_grads,
const DLArrayHandle output,
const int pooling_height,
const int pooling_width,
const int stride_height,
const int stride_width,
const char *mode,
DLArrayHandle pool_grad) {
const int input_dim = input->ndim;
const int output_dim = output->ndim;
const int output_grads_dim = output_grads->ndim;
const int pool_grad_dim = pool_grad->ndim;
assert(input_dim == 4);
assert(output_dim == 4);
assert(output_grads_dim == 4);
assert(pool_grad_dim == 4);
const float *input_data = (const float*) input->data;
const float *output_data = (const float*) output->data;
const float *output_grads_data = (const float*) output_grads->data;
float *pool_grad_data = (float*) pool_grad->data;
cudnnPoolingMode_t pooling_mode = CUDNN_POOLING_MAX;
std::string str_mode(mode);
if (str_mode.compare("average") == 0) {
std::cout << str_mode << std::endl;
pooling_mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
std::cout << pooling_mode << std::endl;
}
if (!cudnn_handler) {
cudnnCreate(&cudnn_handler);
}
// input descriptor
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
setTensorDescriptor(input_descriptor, input->ndim, input->shape);
// ouput descriptor
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
setTensorDescriptor(output_descriptor, output->ndim, output->shape);
// output grad descriptor
cudnnTensorDescriptor_t output_grad_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_grad_descriptor));
setTensorDescriptor(output_grad_descriptor, output_grads->ndim, output_grads->shape);
// pool grad descriptor
cudnnTensorDescriptor_t pool_grad_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&pool_grad_descriptor));
setTensorDescriptor(pool_grad_descriptor, pool_grad->ndim, pool_grad->shape);
// TODO: reuse already defined pooling descriptor in forward pass
cudnnPoolingDescriptor_t pooling_descriptor;
checkCUDNN(cudnnCreatePoolingDescriptor(&pooling_descriptor));
checkCUDNN(cudnnSetPooling2dDescriptor(pooling_descriptor,
pooling_mode,
CUDNN_PROPAGATE_NAN,
pooling_height,
pooling_width,
0, // TODO: parameterize vertical padding
0, // TODO: parameterize horizontal padding
stride_height,
stride_width));
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnPoolingBackward(cudnn_handler,
pooling_descriptor,
&alpha,
output_descriptor,
output_data,
output_grad_descriptor,
output_grads_data,
input_descriptor,
input_data,
&beta,
pool_grad_descriptor,
pool_grad_data));
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyTensorDescriptor(output_grad_descriptor);
cudnnDestroyTensorDescriptor(pool_grad_descriptor);
cudnnDestroyPoolingDescriptor(pooling_descriptor);
return 0;
} | the_stack |
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/roi_align.hpp>
#include <nbla/cuda/utils/atomic_add.cuh>
#include <nbla/variable.hpp>
namespace nbla {
namespace {
template <typename T> struct Box { T batch_index, x1, y1, x2, y2; };
template <typename T>
__forceinline__ __device__ T clamp(const T x, const T low, const T high) {
return max(low, min(high, x));
}
template <typename T>
__forceinline__ __device__ int sampling_grid(const int sampling_ratio,
const T step_size) {
return sampling_ratio > 0 ? sampling_ratio
: static_cast<int>(ceil(step_size));
}
}
template <typename T, typename SIZE_T>
__global__ void roi_align_forward_kernel_nchw(
const SIZE_T size, const T *input_data, const T *boxes_data, T *output_data,
const SIZE_T samples, const SIZE_T channels, const SIZE_T input_rows,
const SIZE_T input_cols, const SIZE_T input_stride_c,
const SIZE_T input_stride_n, const SIZE_T output_rows,
const SIZE_T output_cols, const SIZE_T output_stride_c,
const SIZE_T output_stride_n, const int sampling_ratio,
const float spatial_scale_y, const float spatial_scale_x) {
NBLA_CUDA_KERNEL_LOOP(output_index, size) {
SIZE_T n = output_index / output_stride_n;
SIZE_T index = output_index - n * output_stride_n;
SIZE_T c = index / output_stride_c;
index -= c * output_stride_c;
SIZE_T y = index / output_cols;
SIZE_T x = index - y * output_cols;
auto roi = *reinterpret_cast<Box<T> const *>(boxes_data + n * 5);
auto const roi_x1 = roi.x1 * spatial_scale_x - 0.5f;
auto const roi_y1 = roi.y1 * spatial_scale_y - 0.5f;
auto const roi_x2 = roi.x2 * spatial_scale_x - 0.5f;
auto const roi_y2 = roi.y2 * spatial_scale_y - 0.5f;
auto const roi_index = clamp<SIZE_T>(roi.batch_index, 0, samples - 1);
auto const step_size_x = (roi_x2 - roi_x1) / output_cols;
auto const step_size_y = (roi_y2 - roi_y1) / output_rows;
auto const grid_size_x = sampling_grid(sampling_ratio, step_size_x);
auto const grid_size_y = sampling_grid(sampling_ratio, step_size_y);
auto const step_size_xx = step_size_x / grid_size_x;
auto const step_size_yy = step_size_y / grid_size_y;
auto const grid_size_xy = grid_size_x * grid_size_y;
auto const half_step_xx = 0.5f * step_size_xx;
auto const half_step_yy = 0.5f * step_size_yy;
auto const xf = roi_x1 + x * step_size_x + half_step_xx;
auto const yf = roi_y1 + y * step_size_y + half_step_yy;
auto input_sample_data = input_data + roi_index * input_stride_n;
auto input_channel_data = input_sample_data + c * input_stride_c;
auto output_value = 0.0f;
for (auto yy = 0; yy < grid_size_y; yy++) {
auto yyf = yf + yy * step_size_yy;
if (yyf < -1.0f || yyf > input_rows)
continue;
yyf = clamp<T>(yyf, 0, input_rows - 1);
auto const y_lo = static_cast<SIZE_T>(yyf);
auto const y_hi = min(y_lo + 1, input_rows - 1);
auto const ly = yyf - floor(yyf);
auto const hy = 1.0f - ly;
for (auto xx = 0; xx < grid_size_x; xx++) {
auto xxf = xf + xx * step_size_xx;
if (xxf < -1.0f || xxf > input_cols)
continue;
xxf = clamp<T>(xxf, 0, input_cols - 1);
auto const x_lo = static_cast<SIZE_T>(xxf);
auto const x_hi = min(x_lo + 1, input_cols - 1);
auto const lx = xxf - floor(xxf);
auto const hx = 1.0f - lx;
auto const p1 = y_lo * input_cols + x_lo;
auto const p2 = y_lo * input_cols + x_hi;
auto const p3 = y_hi * input_cols + x_lo;
auto const p4 = y_hi * input_cols + x_hi;
output_value += hy * hx * input_channel_data[p1];
output_value += hy * lx * input_channel_data[p2];
output_value += ly * hx * input_channel_data[p3];
output_value += ly * lx * input_channel_data[p4];
}
}
output_data[output_index] = static_cast<T>(output_value / grid_size_xy);
}
}
template <typename T, typename SIZE_T>
__global__ void roi_align_forward_kernel_nhwc(
const SIZE_T size, const T *input_data, const T *boxes_data, T *output_data,
const SIZE_T samples, const SIZE_T channels, const SIZE_T input_rows,
const SIZE_T input_cols, const SIZE_T input_stride_n,
const SIZE_T output_rows, const SIZE_T output_cols,
const SIZE_T output_size_xy, const int sampling_ratio,
const float spatial_scale_y, const float spatial_scale_x) {
NBLA_CUDA_KERNEL_LOOP(thread_index, size) {
SIZE_T n = thread_index / output_size_xy;
SIZE_T i = thread_index - n * output_size_xy;
SIZE_T y = i / output_cols;
SIZE_T x = i - y * output_cols;
auto roi = *reinterpret_cast<Box<T> const *>(boxes_data + n * 5);
auto const roi_x1 = roi.x1 * spatial_scale_x - 0.5f;
auto const roi_y1 = roi.y1 * spatial_scale_y - 0.5f;
auto const roi_x2 = roi.x2 * spatial_scale_x - 0.5f;
auto const roi_y2 = roi.y2 * spatial_scale_y - 0.5f;
auto const roi_index = clamp<SIZE_T>(roi.batch_index, 0, samples - 1);
auto const step_size_x = (roi_x2 - roi_x1) / output_cols;
auto const step_size_y = (roi_y2 - roi_y1) / output_rows;
auto const grid_size_x = sampling_grid(sampling_ratio, step_size_x);
auto const grid_size_y = sampling_grid(sampling_ratio, step_size_y);
auto const step_size_xx = step_size_x / grid_size_x;
auto const step_size_yy = step_size_y / grid_size_y;
auto const grid_size_xy = grid_size_x * grid_size_y;
auto const half_step_xx = 0.5f * step_size_xx;
auto const half_step_yy = 0.5f * step_size_yy;
auto const xf = roi_x1 + x * step_size_x + half_step_xx;
auto const yf = roi_y1 + y * step_size_y + half_step_yy;
auto input_sample_data = input_data + roi_index * input_stride_n;
auto output_channel_data = output_data + thread_index * channels;
for (auto c = 0; c < channels; c++) {
output_channel_data[c] = T(0);
}
for (auto yy = 0; yy < grid_size_y; yy++) {
auto yyf = yf + yy * step_size_yy;
if (yyf < -1.0f || yyf > input_rows)
continue;
yyf = clamp<T>(yyf, 0, input_rows - 1);
auto const y_lo = static_cast<SIZE_T>(yyf);
auto const y_hi = min(y_lo + 1, input_rows - 1);
auto const ly = yyf - floor(yyf);
auto const hy = 1.0f - ly;
for (auto xx = 0; xx < grid_size_x; xx++) {
auto xxf = xf + xx * step_size_xx;
if (xxf < -1.0f || xxf > input_cols)
continue;
xxf = clamp<T>(xxf, 0, input_cols - 1);
auto const x_lo = static_cast<SIZE_T>(xxf);
auto const x_hi = min(x_lo + 1, input_cols - 1);
auto const lx = xxf - floor(xxf);
auto const hx = 1.0f - lx;
auto const p1 = (y_lo * input_cols + x_lo) * channels;
auto const p2 = (y_lo * input_cols + x_hi) * channels;
auto const p3 = (y_hi * input_cols + x_lo) * channels;
auto const p4 = (y_hi * input_cols + x_hi) * channels;
for (auto c = 0; c < channels; c++) {
auto output_data_value = 0.0f;
output_data_value += hy * hx * input_sample_data[p1 + c];
output_data_value += hy * lx * input_sample_data[p2 + c];
output_data_value += ly * hx * input_sample_data[p3 + c];
output_data_value += ly * lx * input_sample_data[p4 + c];
output_channel_data[c] += static_cast<T>(output_data_value);
}
}
}
for (auto c = 0; c < channels; c++) {
output_channel_data[c] /= grid_size_xy;
}
}
}
template <typename T, typename SIZE_T>
__global__ void roi_align_backward_kernel_nchw(
const SIZE_T size, T *input_grad, const T *boxes_data, const T *output_grad,
const SIZE_T samples, const SIZE_T channels, const SIZE_T input_rows,
const SIZE_T input_cols, const SIZE_T input_channel_size,
const SIZE_T input_sample_size, const SIZE_T output_rows,
const SIZE_T output_cols, const SIZE_T output_stride_c,
const SIZE_T output_stride_n, const int sampling_ratio,
const float spatial_scale_y, const float spatial_scale_x) {
NBLA_CUDA_KERNEL_LOOP(output_index, size) {
SIZE_T n = output_index / output_stride_n;
SIZE_T index = output_index - n * output_stride_n;
SIZE_T c = index / output_stride_c;
index -= c * output_stride_c;
SIZE_T y = index / output_cols;
SIZE_T x = index - y * output_cols;
auto roi = *reinterpret_cast<Box<T> const *>(boxes_data + n * 5);
auto const roi_x1 = roi.x1 * spatial_scale_x - 0.5f;
auto const roi_y1 = roi.y1 * spatial_scale_y - 0.5f;
auto const roi_x2 = roi.x2 * spatial_scale_x - 0.5f;
auto const roi_y2 = roi.y2 * spatial_scale_y - 0.5f;
auto const roi_index = clamp<SIZE_T>(roi.batch_index, 0, samples - 1);
auto const step_size_x = (roi_x2 - roi_x1) / output_cols;
auto const step_size_y = (roi_y2 - roi_y1) / output_rows;
auto const grid_size_x = sampling_grid(sampling_ratio, step_size_x);
auto const grid_size_y = sampling_grid(sampling_ratio, step_size_y);
auto const step_size_xx = step_size_x / grid_size_x;
auto const step_size_yy = step_size_y / grid_size_y;
auto const grid_size_xy = grid_size_x * grid_size_y;
auto const half_step_xx = 0.5f * step_size_xx;
auto const half_step_yy = 0.5f * step_size_yy;
auto const xf = roi_x1 + x * step_size_x + half_step_xx;
auto const yf = roi_y1 + y * step_size_y + half_step_yy;
auto input_sample_grad = input_grad + roi_index * input_sample_size;
auto input_channel_grad = input_sample_grad + c * input_channel_size;
auto output_grad_value = output_grad[output_index] / grid_size_xy;
for (auto yy = 0; yy < grid_size_y; yy++) {
auto yyf = yf + yy * step_size_yy;
if (yyf < -1.0f || yyf > input_rows)
continue;
yyf = clamp<T>(yyf, 0, input_rows - 1);
auto const y_lo = static_cast<SIZE_T>(yyf);
auto const y_hi = min(y_lo + 1, input_rows - 1);
auto const ly = yyf - floor(yyf);
auto const hy = 1.0f - ly;
for (auto xx = 0; xx < grid_size_x; xx++) {
auto xxf = xf + xx * step_size_xx;
if (xxf < -1.0f || xxf > input_cols)
continue;
xxf = clamp<T>(xxf, 0, input_cols - 1);
auto const x_lo = static_cast<SIZE_T>(xxf);
auto const x_hi = min(x_lo + 1, input_cols - 1);
auto const lx = xxf - floor(xxf);
auto const hx = 1.0f - lx;
auto const p1 = y_lo * input_cols + x_lo;
auto const p2 = y_lo * input_cols + x_hi;
auto const p3 = y_hi * input_cols + x_lo;
auto const p4 = y_hi * input_cols + x_hi;
const T v1 = static_cast<T>(hy * hx * output_grad_value);
const T v2 = static_cast<T>(hy * lx * output_grad_value);
const T v3 = static_cast<T>(ly * hx * output_grad_value);
const T v4 = static_cast<T>(ly * lx * output_grad_value);
atomic_add(&input_channel_grad[p1], v1);
atomic_add(&input_channel_grad[p2], v2);
atomic_add(&input_channel_grad[p3], v3);
atomic_add(&input_channel_grad[p4], v4);
}
}
}
}
template <typename T, typename SIZE_T>
__global__ void roi_align_backward_kernel_nhwc(
const SIZE_T size, T *input_grad, const T *boxes_data, const T *output_grad,
const SIZE_T samples, const SIZE_T channels, const SIZE_T input_rows,
const SIZE_T input_cols, const SIZE_T input_stride_n,
const SIZE_T output_rows, const SIZE_T output_cols,
const SIZE_T output_size_xy, const int sampling_ratio,
const float spatial_scale_y, const float spatial_scale_x) {
NBLA_CUDA_KERNEL_LOOP(thread_index, size) {
SIZE_T n = thread_index / output_size_xy;
SIZE_T i = thread_index - n * output_size_xy;
SIZE_T y = i / output_cols;
SIZE_T x = i - y * output_cols;
auto roi = *reinterpret_cast<Box<T> const *>(boxes_data + n * 5);
auto const roi_x1 = roi.x1 * spatial_scale_x - 0.5f;
auto const roi_y1 = roi.y1 * spatial_scale_y - 0.5f;
auto const roi_x2 = roi.x2 * spatial_scale_x - 0.5f;
auto const roi_y2 = roi.y2 * spatial_scale_y - 0.5f;
auto const roi_index = clamp<SIZE_T>(roi.batch_index, 0, samples - 1);
auto const step_size_x = (roi_x2 - roi_x1) / output_cols;
auto const step_size_y = (roi_y2 - roi_y1) / output_rows;
auto const grid_size_x = sampling_grid(sampling_ratio, step_size_x);
auto const grid_size_y = sampling_grid(sampling_ratio, step_size_y);
auto const step_size_xx = step_size_x / grid_size_x;
auto const step_size_yy = step_size_y / grid_size_y;
auto const grid_size_xy = grid_size_x * grid_size_y;
auto const half_step_xx = 0.5f * step_size_xx;
auto const half_step_yy = 0.5f * step_size_yy;
auto const xf = roi_x1 + x * step_size_x + half_step_xx;
auto const yf = roi_y1 + y * step_size_y + half_step_yy;
auto input_sample_grad = input_grad + roi_index * input_stride_n;
auto output_channel_grad = output_grad + thread_index * channels;
for (auto yy = 0; yy < grid_size_y; yy++) {
auto yyf = yf + static_cast<T>(yy) * step_size_yy;
if (yyf < -1.0f || yyf > static_cast<T>(input_rows))
continue;
yyf = clamp<T>(yyf, 0, input_rows - 1);
auto const y_lo = static_cast<SIZE_T>(yyf);
auto const y_hi = min(y_lo + 1, input_rows - 1);
auto const ly = yyf - floor(yyf);
auto const hy = 1.0f - ly;
for (auto xx = 0; xx < grid_size_x; xx++) {
auto xxf = xf + static_cast<T>(xx) * step_size_xx;
if (xxf < -1.0f || xxf > static_cast<T>(input_cols))
continue;
xxf = clamp<T>(xxf, 0, input_cols - 1);
auto const x_lo = static_cast<SIZE_T>(xxf);
auto const x_hi = min(x_lo + 1, input_cols - 1);
auto const lx = xxf - floor(xxf);
auto const hx = 1.0f - lx;
auto const p1 = (y_lo * input_cols + x_lo) * channels;
auto const p2 = (y_lo * input_cols + x_hi) * channels;
auto const p3 = (y_hi * input_cols + x_lo) * channels;
auto const p4 = (y_hi * input_cols + x_hi) * channels;
for (auto c = 0; c < channels; c++) {
auto const grad_value = static_cast<float>(output_channel_grad[c]);
const T v1 = static_cast<T>(hy * hx * grad_value / grid_size_xy);
const T v2 = static_cast<T>(hy * lx * grad_value / grid_size_xy);
const T v3 = static_cast<T>(ly * hx * grad_value / grid_size_xy);
const T v4 = static_cast<T>(ly * lx * grad_value / grid_size_xy);
atomic_add(&input_sample_grad[p1 + c], v1);
atomic_add(&input_sample_grad[p2 + c], v2);
atomic_add(&input_sample_grad[p3 + c], v3);
atomic_add(&input_sample_grad[p4 + c], v4);
}
}
}
}
}
template <typename T>
void RoiAlignCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
RoiAlign<T>::setup_impl(inputs, outputs);
cuda_set_device(this->device_);
}
template <typename T>
void RoiAlignCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
auto input = inputs.at(0);
auto boxes = inputs.at(1);
auto output = outputs.at(0);
auto input_data = input->get_data_pointer<Tcu>(this->ctx_);
auto boxes_data = boxes->get_data_pointer<Tcu>(this->ctx_);
auto output_data = output->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto const samples = input->shape().at(0);
auto const spatial_scale_y = this->spatial_scale_.at(0);
auto const spatial_scale_x = this->spatial_scale_.at(1);
if (!this->channel_last_) {
auto const channels = input->shape().at(1);
auto const input_rows = input->shape().at(2);
auto const input_cols = input->shape().at(3);
auto const output_rows = output->shape().at(2);
auto const output_cols = output->shape().at(3);
auto const input_stride_n = input->strides().at(0);
auto const input_stride_c = input->strides().at(1);
auto const output_stride_n = output->strides().at(0);
auto const output_stride_c = output->strides().at(1);
auto const nthreads = output->size();
if (output->size() <= INT32_MAX) {
auto kernel = roi_align_forward_kernel_nchw<Tcu, int32_t>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, nthreads, input_data, boxes_data, output_data, samples,
channels, input_rows, input_cols, input_stride_c, input_stride_n,
output_rows, output_cols, output_stride_c, output_stride_n,
this->sampling_ratio_, spatial_scale_y, spatial_scale_x);
} else {
auto kernel = roi_align_forward_kernel_nchw<Tcu, int64_t>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, nthreads, input_data, boxes_data, output_data, samples,
channels, input_rows, input_cols, input_stride_c, input_stride_n,
output_rows, output_cols, output_stride_c, output_stride_n,
this->sampling_ratio_, spatial_scale_y, spatial_scale_x);
}
} else {
auto const channels = input->shape().at(3);
auto const input_rows = input->shape().at(1);
auto const input_cols = input->shape().at(2);
auto const output_rows = output->shape().at(1);
auto const output_cols = output->shape().at(2);
auto const input_stride_n = input->strides().at(0);
auto const output_size_xy = output_rows * output_cols;
auto const nthreads = output->size() / channels;
if (output->size() <= INT32_MAX) {
auto kernel = roi_align_forward_kernel_nhwc<Tcu, int32_t>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, nthreads, input_data, boxes_data, output_data, samples,
channels, input_rows, input_cols, input_stride_n, output_rows,
output_cols, output_size_xy, this->sampling_ratio_, spatial_scale_y,
spatial_scale_x);
} else {
auto kernel = roi_align_forward_kernel_nhwc<Tcu, int64_t>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, nthreads, input_data, boxes_data, output_data, samples,
channels, input_rows, input_cols, input_stride_n, output_rows,
output_cols, output_size_xy, this->sampling_ratio_, spatial_scale_y,
spatial_scale_x);
}
}
}
template <typename T>
void RoiAlignCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0]) {
return;
}
cuda_set_device(this->device_);
auto input = inputs.at(0);
auto boxes = inputs.at(1);
auto output = outputs.at(0);
auto input_grad = input->cast_grad_and_get_pointer<Tcu>(this->ctx_, false);
auto boxes_data = boxes->get_data_pointer<Tcu>(this->ctx_);
auto output_grad = output->get_grad_pointer<Tcu>(this->ctx_);
auto const samples = input->shape().at(0);
auto const spatial_scale_y = this->spatial_scale_.at(0);
auto const spatial_scale_x = this->spatial_scale_.at(1);
if (!this->channel_last_) {
auto const channels = input->shape().at(1);
auto const input_rows = input->shape().at(2);
auto const input_cols = input->shape().at(3);
auto const output_rows = output->shape().at(2);
auto const output_cols = output->shape().at(3);
auto const input_stride_n = input->strides().at(0);
auto const input_stride_c = input->strides().at(1);
auto const output_stride_n = output->strides().at(0);
auto const output_stride_c = output->strides().at(1);
auto const nthreads = output->size();
if (output->size() <= INT32_MAX) {
auto kernel = roi_align_backward_kernel_nchw<Tcu, int32_t>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, nthreads, input_grad, boxes_data, output_grad, samples,
channels, input_rows, input_cols, input_stride_c, input_stride_n,
output_rows, output_cols, output_stride_c, output_stride_n,
this->sampling_ratio_, spatial_scale_y, spatial_scale_x);
} else {
auto kernel = roi_align_backward_kernel_nchw<Tcu, int64_t>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, nthreads, input_grad, boxes_data, output_grad, samples,
channels, input_rows, input_cols, input_stride_c, input_stride_n,
output_rows, output_cols, output_stride_c, output_stride_n,
this->sampling_ratio_, spatial_scale_y, spatial_scale_x);
}
} else {
auto const channels = input->shape().at(3);
auto const input_rows = input->shape().at(1);
auto const input_cols = input->shape().at(2);
auto const output_rows = output->shape().at(1);
auto const output_cols = output->shape().at(2);
auto const input_stride_n = input->strides().at(0);
auto const output_size_xy = output_rows * output_cols;
auto const nthreads = output->size() / channels;
if (output->size() <= INT32_MAX) {
auto kernel = roi_align_backward_kernel_nhwc<Tcu, int32_t>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, nthreads, input_grad, boxes_data, output_grad, samples,
channels, input_rows, input_cols, input_stride_n, output_rows,
output_cols, output_size_xy, this->sampling_ratio_, spatial_scale_y,
spatial_scale_x);
} else {
auto kernel = roi_align_backward_kernel_nhwc<Tcu, int64_t>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, nthreads, input_grad, boxes_data, output_grad, samples,
channels, input_rows, input_cols, input_stride_n, output_rows,
output_cols, output_size_xy, this->sampling_ratio_, spatial_scale_y,
spatial_scale_x);
}
}
}
} | the_stack |
#include <iostream>
#include <algorithm>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
#include "Common.cuh"
//////////////////////////////
// forward
//////////////////////////////
#if 0
template<typename T=float>
__global__ void kernal_RealToBinary_Forward(
T const *x_buf,
T *y_buf,
unsigned int depth_modulation_size,
T depth_modulation_step,
unsigned int frame_modulation_size,
T frame_modulation_step,
T x_offset,
T x_scale,
unsigned int point_size,
unsigned int x_depth_size,
unsigned int x_frame_size,
unsigned int x_frame_stride,
unsigned int y_frame_stride,
bool binarize
)
{
unsigned int y_frame = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int x_depth = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int point = blockDim.z * blockIdx.z + threadIdx.z;
unsigned int y_depth = x_depth * depth_modulation_size;
unsigned int x_frame = y_frame / frame_modulation_size;
unsigned int frame = y_frame % frame_modulation_size;
T const *x_ptr = &x_buf[(x_depth * point_size + point) * x_frame_stride + x_frame];
T *y_ptr = &y_buf[(y_depth * point_size + point) * y_frame_stride + y_frame];
if ( x_frame < x_frame_size && point < point_size && x_depth < x_depth_size ) {
T x = (*x_ptr - x_offset) * x_scale;
T depth_step_recip = (T)depth_modulation_size; // reciprocal of depth_modulation_step
T frame_step_recip = (T)frame_modulation_size; // reciprocal of frame_modulation_step
for ( int depth = 0; depth < depth_modulation_size; ++depth ) {
T y = x;
// modulation for depth
y = (y - (T)(depth * depth_modulation_step)) * depth_step_recip;
// modulation for frame
y = (y - (T)(frame * frame_modulation_step)) * frame_step_recip;
// clamp
y = max((T)0.0, min((T)1.0, y));
if ( binarize ) {
y = (y > (T)0.5) ? (T)1.0 : (T)0.0;
}
y_ptr[depth * point_size * y_frame_stride] = y;
}
}
}
template<typename T>
BBCU_DLL_EXPORT int bbcu_RealToBinary_Forward
(
T const *dev_x_buf,
T *dev_y_buf,
unsigned int depth_modulation_size,
unsigned int frame_modulation_size,
T input_range_lo,
T input_range_hi,
unsigned int point_size,
unsigned int x_depth_size,
unsigned int x_frame_size,
unsigned int x_frame_stride,
unsigned int y_frame_stride,
bool binarize,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const MIN_FRAME_UNIT = 1;
unsigned int const MIN_DEPTH_UNIT = 1;
unsigned int y_frame_size = x_frame_size * frame_modulation_size;
T depth_modulation_step = (T)1.0 / (T)depth_modulation_size;
T frame_modulation_step = (T)1.0 / (T)frame_modulation_size;
dim3 block(32, 32, 1);
while ( block.x / 2 >= y_frame_size && block.x > MIN_FRAME_UNIT ){ block.x /= 2; block.y *= 2; }
while ( block.y / 2 >= x_depth_size && block.y > MIN_DEPTH_UNIT ){ block.y /= 2; block.z *= 2; }
block.x = std::min(block.x, y_frame_size);
block.y = std::min(block.y, x_depth_size);
block.z = std::min(block.z, point_size);
dim3 grid;
grid.x = (y_frame_size + (block.x - 1)) / block.x;
grid.y = (x_depth_size + (block.y - 1)) / block.y;
grid.z = (point_size + (block.z - 1)) / block.z;
kernal_RealToBinary_Forward<T><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
depth_modulation_size,
depth_modulation_step,
frame_modulation_size,
frame_modulation_step,
input_range_lo,
(T)1.0 / (input_range_hi - input_range_lo),
point_size,
x_depth_size,
x_frame_size,
x_frame_stride,
y_frame_stride,
binarize
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
#else
template<typename T=float>
__global__ void kernal_RealToBinary_Forward(
T const *x_buf,
T *y_buf,
unsigned int depth_modulation_size,
T depth_modulation_step,
unsigned int frame_modulation_size,
T frame_modulation_step,
T x_offset,
T x_scale,
int point_size,
int x_depth_size,
int x_frame_size,
int x_frame_stride,
int y_frame_stride,
bool binarize
)
{
unsigned int y_frame = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int y_depth = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int point = blockDim.z * blockIdx.z + threadIdx.z;
unsigned int x_depth = y_depth / depth_modulation_size;
unsigned int depth = y_depth % depth_modulation_size;
unsigned int x_frame = y_frame / frame_modulation_size;
unsigned int frame = y_frame % frame_modulation_size;
T const *x_ptr = &x_buf[(x_depth * point_size + point) * x_frame_stride + x_frame];
T *y_ptr = &y_buf[(y_depth * point_size + point) * y_frame_stride + y_frame];
if ( x_frame < x_frame_size && point < point_size && x_depth < x_depth_size ) {
T x = (*x_ptr - x_offset) * x_scale;
T depth_step_recip = (T)depth_modulation_size; // reciprocal of depth_modulation_step
T frame_step_recip = (T)frame_modulation_size; // reciprocal of frame_modulation_step
T y = x;
// modulation for depth
y = (y - (T)(depth * depth_modulation_step)) * depth_step_recip;
// modulation for frame
y = (y - (T)(frame * frame_modulation_step)) * frame_step_recip;
// clamp
y = max((T)0.0, min((T)1.0, y));
// modulation for frame
if ( binarize ) {
y = (y > (T)0.5) ? (T)BB_BINARY_HI : (T)BB_BINARY_LO;
}
*y_ptr = y;
}
}
template<typename T>
BBCU_DLL_EXPORT int bbcu_RealToBinary_Forward
(
T const *dev_x_buf,
T *dev_y_buf,
unsigned int depth_modulation_size,
unsigned int frame_modulation_size,
T input_range_lo,
T input_range_hi,
unsigned int point_size,
unsigned int x_depth_size,
unsigned int x_frame_size,
unsigned int x_frame_stride,
unsigned int y_frame_stride,
bool binarize,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const MIN_FRAME_UNIT = 1;
unsigned int const MIN_DEPTH_UNIT = 1;
unsigned int y_frame_size = x_frame_size * frame_modulation_size;
unsigned int y_depth_size = x_depth_size * depth_modulation_size;
T depth_modulation_step = (T)1.0 / (T)depth_modulation_size;
T frame_modulation_step = (T)1.0 / (T)frame_modulation_size;
dim3 block(32, 32, 1);
while ( block.x / 2 >= y_frame_size && block.x > MIN_FRAME_UNIT ){ block.x /= 2; block.y *= 2; }
while ( block.y / 2 >= y_depth_size && block.y > MIN_DEPTH_UNIT ){ block.y /= 2; block.z *= 2; }
block.x = std::min(block.x, y_frame_size);
block.y = std::min(block.y, y_depth_size);
block.z = std::min(block.z, point_size);
dim3 grid;
grid.x = (y_frame_size + (block.x - 1)) / block.x;
grid.y = (y_depth_size + (block.y - 1)) / block.y;
grid.z = (point_size + (block.z - 1)) / block.z;
kernal_RealToBinary_Forward<T><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
depth_modulation_size,
depth_modulation_step,
frame_modulation_size,
frame_modulation_step,
input_range_lo,
(T)1.0 / (input_range_hi - input_range_lo),
point_size,
x_depth_size,
x_frame_size,
x_frame_stride,
y_frame_stride,
binarize
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
#endif
template BBCU_DLL_EXPORT int bbcu_RealToBinary_Forward<float>(float const *, float *, unsigned int, unsigned int, float, float, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, cudaStream_t);
template<typename T=float>
__global__ void kernal_bit_RealToBinary_Forward(
T const *x_buf,
int *y_buf,
unsigned int depth_modulation_size,
T depth_modulation_step,
unsigned int frame_modulation_size,
T frame_modulation_step,
T x_offset,
T x_scale,
int point_size,
int x_depth_size,
int x_frame_size,
int x_frame_stride,
int y_frame_stride
)
{
unsigned int y_unit = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int y_depth = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int point = blockDim.z * blockIdx.z + threadIdx.z;
unsigned int x_depth = y_depth / depth_modulation_size;
unsigned int depth = y_depth % depth_modulation_size;
T const *x_ptr = &x_buf[(x_depth * point_size + point) * x_frame_stride];
int y_bit = 0;
int bit_mask = 1;
for ( int bit = 0; bit < 32; ++bit ) {
unsigned int y_frame = y_unit * 32 + bit;
unsigned int x_frame = y_frame / frame_modulation_size;
unsigned int frame = y_frame % frame_modulation_size;
if ( x_frame < x_frame_size && point < point_size && x_depth < x_depth_size ) {
T x = (x_ptr[x_frame] - x_offset) * x_scale;
T depth_step_recip = (T)depth_modulation_size; // reciprocal of depth_modulation_step
T frame_step_recip = (T)frame_modulation_size; // reciprocal of frame_modulation_step
T y = x;
// modulation for depth
y = (y - (T)(depth * depth_modulation_step)) * depth_step_recip;
// modulation for frame
y = (y - (T)(frame * frame_modulation_step)) * frame_step_recip;
// clamp
y = max((T)0.0, min((T)1.0, y));
// modulation for frame
if (y > (T)0.5) {
y_bit |= bit_mask;
}
bit_mask <<= 1;
}
}
int *y_ptr = &y_buf[(y_depth * point_size + point) * y_frame_stride];
y_ptr[y_unit] = y_bit;
}
template<typename T>
BBCU_DLL_EXPORT int bbcu_bit_RealToBinary_Forward
(
T const *dev_x_buf,
int *dev_y_buf,
unsigned int depth_modulation_size,
unsigned int frame_modulation_size,
T input_range_lo,
T input_range_hi,
unsigned int point_size,
unsigned int x_depth_size,
unsigned int x_frame_size,
unsigned int x_frame_stride,
unsigned int y_frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const MIN_FRAME_UNIT = 1;
unsigned int const MIN_DEPTH_UNIT = 1;
unsigned int y_frame_size = x_frame_size * frame_modulation_size;
unsigned int y_depth_size = x_depth_size * depth_modulation_size;
T depth_modulation_step = (T)1.0 / (T)depth_modulation_size;
T frame_modulation_step = (T)1.0 / (T)frame_modulation_size;
unsigned int y_unit_size = (y_frame_size + 31) / 32;
// dim3 block(32, 32, 1);
dim3 block(32, 16, 1); // 1024スレッド作るにはレジスタが足りない模様
while ( block.x / 2 >= y_unit_size && block.x > MIN_FRAME_UNIT ){ block.x /= 2; block.y *= 2; }
while ( block.y / 2 >= y_depth_size && block.y > MIN_DEPTH_UNIT ){ block.y /= 2; block.z *= 2; }
block.x = std::min(block.x, y_unit_size);
block.y = std::min(block.y, y_depth_size);
block.z = std::min(block.z, point_size);
block.x = std::min(block.x, 1024U);
block.y = std::min(block.y, 1024U);
block.z = std::min(block.z, 64U);
dim3 grid;
grid.x = (y_unit_size + (block.x - 1)) / block.x;
grid.y = (y_depth_size + (block.y - 1)) / block.y;
grid.z = (point_size + (block.z - 1)) / block.z;
kernal_bit_RealToBinary_Forward<T><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
depth_modulation_size,
depth_modulation_step,
frame_modulation_size,
frame_modulation_step,
input_range_lo,
(T)1.0 / (input_range_hi - input_range_lo),
point_size,
x_depth_size,
x_frame_size,
x_frame_stride,
y_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
template BBCU_DLL_EXPORT int bbcu_bit_RealToBinary_Forward<float>(float const *, int *, unsigned int, unsigned int, float, float, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, cudaStream_t);
#if 0
//////////////////////////////
// forward
//////////////////////////////
__global__ void kernal_fp32_RealToBinary_Forward(
const float* x_buf,
float* y_buf,
float th_offset,
float th_step,
int modulation_size,
int node_size,
int x_frame_size,
int x_frame_stride,
int y_frame_stride
)
{
int x_frame = blockDim.x * blockIdx.x + threadIdx.x;
int node = blockDim.y * blockIdx.y + threadIdx.y;
float const *x_ptr = &x_buf[node * x_frame_stride];
float *y_ptr = &y_buf[node * y_frame_stride];
if ( x_frame < x_frame_size && node < node_size) {
float x = x_ptr[x_frame];
int y_frame = x_frame * modulation_size;
float th = th_offset;
for ( int i = 0; i < modulation_size; ++i ) {
y_ptr[y_frame + i] = (x > th) ? 1.0 : 0.0;
th += th_step;
}
}
}
BBCU_DLL_EXPORT int bbcu_fp32_RealToBinary_Forward
(
float const *dev_x_buf,
float *dev_y_buf,
float th_offset,
float th_step,
int modulation_size,
int node_size,
int x_frame_size,
int x_frame_stride,
int y_frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const THREAD_SIZE = 1024;
unsigned int const MAX_FRAME_UNIT = 1024;
unsigned int const MAX_NODE_UNIT = 1024;
#if 1
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= x_frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= x_frame_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= node_size ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid((x_frame_size + (block.x - 1)) / block.x, (node_size + (block.y - 1)) / block.y);
kernal_fp32_RealToBinary_Forward<<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
th_offset,
th_step,
modulation_size,
node_size,
x_frame_size,
x_frame_stride,
y_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
//////////////////
template <int MAX_NODE_UNIT>
__global__ void kernal_fp32_bit_no_modulation_RealToBinary_Forward
(
float const *x_buf,
int *y_buf,
float th,
int node_size,
int frame_size,
int x_frame_stride,
int y_frame_stride
)
{
int frame = blockDim.x * blockIdx.x + threadIdx.x;
int node = blockDim.y * blockIdx.y + threadIdx.y;
int unit_id = ((threadIdx.y * blockDim.x + threadIdx.x) >> 5);
__shared__ int sbuf[MAX_NODE_UNIT][32];
float const *x_ptr = &x_buf[node * x_frame_stride];
int *y_ptr = &y_buf[node * y_frame_stride];
int bit = (frame & 0x1f);
int unit = (frame >> 5);
int y = 0;
if ( frame < frame_size && node < node_size) {
float x = x_ptr[frame];
y = (x > th) ? (1 << bit) : 0;
}
y = device_int_LocalOr(y, bit, sbuf[unit_id]);
if ( frame < frame_size && node < node_size && bit == 0 ) {
y_ptr[unit] = y;
}
}
BBCU_DLL_EXPORT int bbcu_fp32_bit_no_modulation_RealToBinary_Forward
(
float const *dev_x_buf,
int *dev_y_buf,
float th,
int node_size,
int frame_size,
int x_frame_stride,
int y_frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const THREAD_SIZE = 1024;
unsigned int const MAX_FRAME_UNIT = 1024;
unsigned int const MIN_FRAME_UNIT = 32;
unsigned int const MAX_NODE_UNIT = 32;
#if 1
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size && block.x > MIN_FRAME_UNIT ){ block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size && block.x > MIN_FRAME_UNIT ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid((frame_size + (block.x - 1)) / block.x, (node_size + (block.y - 1)) / block.y);
kernal_fp32_bit_no_modulation_RealToBinary_Forward<MAX_NODE_UNIT><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
th,
node_size,
frame_size,
x_frame_stride,
y_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
#endif
// end of file | the_stack |
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N) {
return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
template <typename DType>
__device__ DType deformable_im2col_bilinear(const DType *bottom_data, const int data_width,
const int height, const int width, DType h, DType w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
DType lh = h - h_low;
DType lw = w - w_low;
DType hh = 1 - lh, hw = 1 - lw;
DType v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
DType v2 = 0;
if (h_low >=0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
DType v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
DType v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename DType>
__device__ DType get_gradient_weight(DType argmax_h, DType argmax_w,
const int h, const int w, const int height, const int width) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) {
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
DType weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename DType>
__device__ DType get_coordinate_weight(DType argmax_h, DType argmax_w,
const int height, const int width, const DType *im_data,
const int data_width, const int bp_dir) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) {
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
DType weight = 0;
if (bp_dir == 0) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
/*!
* \brief deformable_im2col gpu kernel.
* DO NOT call this directly. Use wrapper function im2col() instead;
*/
template <typename DType>
__global__ void deformable_im2col_gpu_kernel(const int n, const DType *data_im, const DType *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
DType *data_col) {
CUDA_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const DType* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const DType* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const DType* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
//const DType map_h = i * dilation_h + offset_h;
//const DType map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
/*!\brief
* cpu function of deformable_im2col algorithm
* \param s device stream
* \param data_im pointer of images (N, C, H, W, ...) in the image batch
* \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape (#channels, N, output_im_height, output_im_width, ...)
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param data_col column buffer pointer
*/
template <typename DType>
inline void deformable_im2col(cudaStream_t stream,
const DType *data_im, const DType *data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, DType *data_col) {
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
//index_t num_spatial_axes = kernel_shape.ndim();
//CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum);
//index_t channel_per_deformable_group = im_shape[1] / deformable_group;
//index_t num_kernels = im_shape[1] * col_shape.ProdShape(1, col_shape.ndim());
//using namespace mxnet_op;
//switch (num_spatial_axes) {
//case 2:
// deformable_im2col_gpu_kernel<DType> // NOLINT_NEXT_LINE(whitespace/operators)
// <<<cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum,
// 0, mshadow::Stream<gpu>::GetStream(s)>>>(
// num_kernels, data_im, data_offset, im_shape[2], im_shape[3], kernel_shape[0], kernel_shape[1],
// pad[0], pad[1], stride[0], stride[1], dilation[0], dilation[1], channel_per_deformable_group,
// col_shape[1], im_shape[1], deformable_group, col_shape[2], col_shape[3], data_col);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_im2col_gpu_kernel);
// break;
//default:
// LOG(FATAL) << "im2col_nd_gpu does not support computation with "
// << num_spatial_axes << " spatial axes";
deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
num_kernels, data_im, data_offset, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, channels, deformable_group, height_col, width_col, data_col);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
}
}
template void deformable_im2col<float>(
cudaStream_t stream, const float *data_im, const float *data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, float *data_col);
/*!
* \brief deformable_col2im gpu kernel.
* \brief DO NOT call this directly. Use wrapper function deformable_col2im() instead;
*/
template <typename DType>
__global__ void deformable_col2im_gpu_kernel(const int n, const DType *data_col, const DType *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
DType *grad_im) {
CUDA_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
DType weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
/*!\brief
* gpu function of deformable_col2im algorithm
* \param s device stream
* \param data_col start pointer of the column buffer to be filled
* \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param grad_im pointer of images (N, C, H, W,...) in the image batch
*/
template <typename DType>
inline void deformable_col2im(cudaStream_t stream,
const DType *data_col, const DType *data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
DType* grad_im) {
// todo: make sure parallel_imgs is passed in correctly
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
// index_t num_spatial_axes = kernel_shape.ndim();
// index_t im_size = im_shape.ProdShape(1, im_shape.ndim());
// index_t channel_per_deformable_group = im_shape[1] / deformable_group;
// index_t num_kernels = col_shape.ProdShape(0, col_shape.ndim());
// num_axes should be smaller than block size
// CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum);
// using namespace mxnet_op;
// switch (num_spatial_axes) {
// case 2:
// // To avoid involving atomic operations, we will launch one kernel per
// // bottom dimension, and then in the kernel add up the top dimensions.
// // NOLINT_NEXT_LINE(whitespace/operators)
// deformable_col2im_gpu_kernel<DType><<<cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum,
// 0, mshadow::Stream<gpu>::GetStream(s)>>>(
// num_kernels, data_col, data_offset, im_shape[1], im_shape[2], im_shape[3],
// kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
// dilation[0], dilation[1], channel_per_deformable_group,
// col_shape[1], deformable_group, col_shape[2], col_shape[3], grad_im, req);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_gpu_kernel);
// break;
// default:
// LOG(FATAL) << "col2im_nd_gpu does not support computation with "
// << num_spatial_axes << " spatial axes";
deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
num_kernels, data_col, data_offset, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, deformable_group, height_col, width_col, grad_im);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in deformable_col2im: %s\n", cudaGetErrorString(err));
}
}
template void deformable_col2im<float>(
cudaStream_t stream, const float *data_col, const float *data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, float *grad_im);
/*!
* \brief deformable_col2im_coord gpu kernel.
* \brief DO NOT call this directly. Use wrapper function deformable_col2im_coord() instead;
*/
template <typename DType>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const DType *data_col,
const DType *data_im, const DType *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, DType *grad_offset) {
CUDA_KERNEL_LOOP(index, n) {
DType val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group *
batch_size * width_col * height_col;
const DType *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const DType *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) {
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
}
const DType weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
/*!\brief
* gpu function of deformable_col2im_coord algorithm
* \param s device stream
* \param data_col start pointer of the column buffer to be filled
* \param data_im pointer of images (N, C, H, W, ...) in the image batch
* \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param grad_offset pointer of the offsets (N, deformable_group*kernel_h*kernel_w*2, H, W,...) in the offset batch
*/
template <typename DType>
inline void deformable_col2im_coord(cudaStream_t stream,
const DType *data_col, const DType *data_im, const DType *data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, DType *grad_offset) {
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs;
int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group;
// index_t num_spatial_axes = kernel_shape.ndim();
// index_t num_kernels = col_shape[1] * col_shape[2] * col_shape[3] * 2 * kernel_shape[0] * kernel_shape[1] * deformable_group;
// index_t channel_per_deformable_group = col_shape[0] / deformable_group;
// num_axes should be smaller than block size
// CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum);
// using namespace mxnet_op;
// switch (num_spatial_axes) {
// case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
// deformable_col2im_coord_gpu_kernel<DType> << <cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum,
// 0, mshadow::Stream<gpu>::GetStream(s) >> >(
// num_kernels, data_col, data_im, data_offset, im_shape[1], im_shape[2], im_shape[3],
// kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
// dilation[0], dilation[1], channel_per_deformable_group,
// col_shape[1], 2 * kernel_shape[0] * kernel_shape[1] * deformable_group, deformable_group, col_shape[2], col_shape[3], grad_offset, req);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_coord_gpu_kernel);
// break;
// default:
// LOG(FATAL) << "col2im_nd_gpu does not support computation with "
// << num_spatial_axes << " spatial axes";
deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
num_kernels, data_col, data_im, data_offset, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset);
}
template void
deformable_col2im_coord(cudaStream_t stream, const float *data_col,
const float *data_im, const float *data_offset,
const int channels, const int height, const int width,
const int ksize_h, const int ksize_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, float *grad_offset); | the_stack |
#include "allocators.cuh"
#include "broadcast_kernel.cuh"
#include "math_functions.cuh"
namespace minkowski {
namespace detail {
template <class T> struct IsIntType { static const bool value = false; };
template <> struct IsIntType<int> { static const bool value = true; };
template <typename Dtype>
__device__ void atomic_addition_n(Dtype *__restrict__ dst,
const Dtype *__restrict__ src,
const int num_elements) {
for (int i = 0; i < num_elements; ++i)
atomicAdd(dst + i, src[i]);
}
/* Must be applied to collision free destinations */
template <typename Dtype>
__device__ void multiplication_n(Dtype *__restrict__ dst,
const Dtype *__restrict__ src,
const int num_elements) {
for (int i = 0; i < num_elements; ++i)
dst[i] *= src[i];
}
template <typename Dtype, typename Itype>
__global__ void channelwise_addition(const int n, const int nchannel,
const Dtype *__restrict__ d_glob_feat,
const Itype *__restrict__ d_in_map,
const Itype *__restrict__ d_out_map,
Dtype *__restrict__ d_out_feat) {
CUDA_KERNEL_LOOP(index, n) {
atomic_addition_n(&d_out_feat[d_in_map[index] * nchannel],
&d_glob_feat[d_out_map[index] * nchannel], nchannel);
}
}
template <typename Dtype, typename Itype>
__global__ void channelwise_multiplication(
const int n, const int nchannel, const Dtype *__restrict__ d_glob_feat,
const Itype *__restrict__ d_in_map, const Itype *__restrict__ d_out_map,
Dtype *__restrict__ d_out_feat) {
CUDA_KERNEL_LOOP(index, n) {
multiplication_n(&d_out_feat[d_in_map[index] * nchannel],
&d_glob_feat[d_out_map[index] * nchannel], nchannel);
}
}
template <typename Dtype>
__global__ void fill(const int n, Dtype *__restrict__ in_feat,
const Dtype val) {
CUDA_KERNEL_LOOP(index, n) { in_feat[index] = val; }
}
} // namespace detail
template <typename Dtype, typename Itype, typename ByteAllocator>
void BroadcastForwardKernelGPU(
const Dtype *d_in_feat, const int in_nrows, const Dtype *d_in_feat_global,
const int in_nrows_global, Dtype *d_out_feat, const int nchannel,
BroadcastMode::Type const op,
gpu_kernel_map<Itype, ByteAllocator> const &kernel_map,
cusparseHandle_t cushandle, cudaStream_t stream) {
// Sum all sizes
size_t const num_map = kernel_map.in_maps.end() - kernel_map.in_maps.begin();
if (num_map != in_nrows)
throw std::invalid_argument(
"BroadcastForwardKernelGPU: kernel_map size != in_nrows");
// Copy all in_feat to out_feat
CUDA_CHECK(cudaMemcpy(d_out_feat, d_in_feat,
sizeof(Dtype) * nchannel * in_nrows,
cudaMemcpyDeviceToDevice));
// To speed up, put switch outside for loops
switch (op) {
case BroadcastMode::ELEMENTWISE_ADDITON: // +
detail::channelwise_addition<Dtype, Itype>
<<<GET_BLOCKS(in_nrows, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0,
stream>>>(in_nrows, nchannel, d_in_feat_global,
kernel_map.in_maps.begin(), kernel_map.out_maps.begin(),
d_out_feat);
break;
case BroadcastMode::ELEMENTWISE_MULTIPLICATION: // *
detail::channelwise_multiplication<Dtype, Itype>
<<<GET_BLOCKS(in_nrows, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0,
stream>>>(in_nrows, nchannel, d_in_feat_global,
kernel_map.in_maps.begin(), kernel_map.out_maps.begin(),
d_out_feat);
break;
default:
throw std::invalid_argument(Formatter() << "Operation not supported: "
<< std::to_string(op));
}
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
}
template void
BroadcastForwardKernelGPU<float, uint32_t, detail::default_allocator<char>>(
const float *d_in_feat, int in_nrows, const float *d_in_feat_global,
int in_nrows_global, float *d_out_feat, int nchannel,
BroadcastMode::Type const op,
gpu_kernel_map<uint32_t, detail::default_allocator<char>> const &kernel_map,
cusparseHandle_t cuhandle, cudaStream_t stream);
template void
BroadcastForwardKernelGPU<double, uint32_t, detail::default_allocator<char>>(
const double *d_in_feat, int in_nrows, const double *d_in_feat_global,
int in_nrows_global, double *d_out_feat, int nchannel,
BroadcastMode::Type const op,
gpu_kernel_map<uint32_t, detail::default_allocator<char>> const &kernel_map,
cusparseHandle_t cuhandle, cudaStream_t stream);
template void
BroadcastForwardKernelGPU<float, uint32_t, detail::c10_allocator<char>>(
const float *d_in_feat, int in_nrows, const float *d_in_feat_global,
int in_nrows_global, float *d_out_feat, int nchannel,
BroadcastMode::Type const op,
gpu_kernel_map<uint32_t, detail::c10_allocator<char>> const &kernel_map,
cusparseHandle_t cuhandle, cudaStream_t stream);
template void
BroadcastForwardKernelGPU<double, uint32_t, detail::c10_allocator<char>>(
const double *d_in_feat, int in_nrows, const double *d_in_feat_global,
int in_nrows_global, double *d_out_feat, int nchannel,
BroadcastMode::Type const op,
gpu_kernel_map<uint32_t, detail::c10_allocator<char>> const &kernel_map,
cusparseHandle_t cuhandle, cudaStream_t stream);
template <typename Dtype, typename Itype, typename ByteAllocator>
void BroadcastBackwardKernelGPU(
const Dtype *d_in_feat, Dtype *d_grad_in_feat, int in_nrows,
const Dtype *d_in_feat_global, Dtype *d_grad_in_feat_global,
int in_nrows_global, const Dtype *d_grad_out_feat, int nchannel,
BroadcastMode::Type const op,
gpu_kernel_map<Itype, ByteAllocator> const &kernel_map,
cusparseHandle_t cushandle, cudaStream_t stream) {
Itype *d_scr, *d_in_map, *d_out_map; //, *d_csr_row;
Dtype *d_dtype, *d_coo_val, *d_tmp_grad_in_feat_global, *d_tmp_grad_in_feat;
// cusparseMatDescr_t descr = 0;
const Dtype alpha = 1;
const Dtype beta = 0;
int nnz = in_nrows;
// if (in_maps.size() != 1) {
// All in_maps[k] are contiguous.
// TODO. Assert contiguous.
// }
// Sum all sizes
size_t const num_map = kernel_map.in_maps.end() - kernel_map.in_maps.begin();
if (num_map != in_nrows)
throw std::invalid_argument(
"BroadcastBackwardKernelGPU: kernel_map size != in_nrows");
/* In Out Map prep */
// Malloc d_in_map, d_out_map, d_csr_row
// CSR returns n_row + 1
CUDA_CHECK(cudaMalloc((void **)&d_scr,
2 * nnz * sizeof(Itype) + // in out maps
(in_nrows_global + 1) * sizeof(Itype) // d_csr_row
));
// COO cols
d_in_map = d_scr; // nnz
// COO rows
d_out_map = d_scr + nnz; // nnz
// CSR row indices
// d_csr_row = d_scr + 2 * nnz; // in_nrows_global + 1
CUDA_CHECK(cudaMemcpy(
d_in_map,
(int *)kernel_map.in_maps.begin(), // in_maps are contiguous of size nnz
nnz * sizeof(int), cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(
d_out_map,
(int *)kernel_map.out_maps.begin(), // out_maps are contiguous of size nnz
nnz * sizeof(int), cudaMemcpyDeviceToDevice));
/* tmp in out feat */
// sparse gemm output
CUDA_CHECK(cudaMalloc(
(void **)&d_dtype,
nnz * sizeof(Dtype) + // d_csr_val
in_nrows * nchannel * sizeof(Dtype) + // tmp_grad_infeat
in_nrows_global * nchannel * sizeof(Dtype) // tmp_grad_infeat_global
));
// Divide the memory space into multiple chunks
d_tmp_grad_in_feat_global = d_dtype; // in_nrows_global * nchannel
d_tmp_grad_in_feat = d_tmp_grad_in_feat_global +
in_nrows_global * nchannel; // in_nrows * nchannel
d_coo_val = d_tmp_grad_in_feat + in_nrows * nchannel;
// thrust::fill(d_csr_val.begin(), d_csr_val.end(), 1);
detail::fill<Dtype>
<<<GET_BLOCKS(nnz, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>>(
nnz, d_coo_val, (Dtype)1.);
// CUSPARSE_CHECK(cusparseCreateMatDescr(&descr));
// cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
// cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
// Sort COO first
THRUST_CHECK(thrust::sort_by_key(thrust::device, //
d_out_map, // key begin
d_out_map + nnz, // key end
d_in_map // value begin
));
cusparseSpMMAlg_t mm_alg;
#if defined(CUDART_VERSION) && (CUDART_VERSION < 10010)
TORCH_CHECK(false, "spmm sparse-dense requires CUDA 10.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) && \
(CUDART_VERSION < 11000)
mm_alg = CUSPARSE_MM_ALG_DEFAULT;
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 11000)
mm_alg = CUSPARSE_SPMM_ALG_DEFAULT;
#endif
// +---------+ +---+
// | spm | | i |
// +---------+ | n |
// in_nrows | |
// | F |
// | |
// +---+
// nchannel
size_t dim_i = in_nrows_global, dim_j = in_nrows, dim_k = nchannel;
constexpr bool is_float32 = std::is_same<Dtype, float>::value;
cudaDataType cuda_data_type = is_float32 ? CUDA_R_32F : CUDA_R_64F;
cusparseSpMatDescr_t sparse_descr;
cusparseDnMatDescr_t dense_descr;
cusparseDnMatDescr_t result_descr;
CUSPARSE_CHECK(cusparseCreateCoo(&sparse_descr, //
dim_i, dim_j, nnz, //
d_out_map, // rows
d_in_map, // cols
d_coo_val, // coo vals
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO,
cuda_data_type));
// buffer size 0 for CUSPARSE_SPMM_COO_ALG1, CUSPARSE_SPMM_COO_ALG3,
// CUSPARSE_SPMM_COO_ALG4, and CUSPARSE_SPMM_CSR_ALG1
// To speed up, put switch outside for loops
switch (op) {
case BroadcastMode::ELEMENTWISE_ADDITON: // +
// For grad_in_feat, copy all grad_out_feat to grad_in_feat
CUDA_CHECK(cudaMemcpy(d_grad_in_feat, d_grad_out_feat,
sizeof(Dtype) * nchannel * in_nrows,
cudaMemcpyDeviceToDevice));
CUSPARSE_CHECK(cusparseCreateDnMat(&dense_descr, //
dim_k, dim_j, dim_k, //
(void *)d_grad_out_feat, //
cuda_data_type, CUSPARSE_ORDER_COL));
CUSPARSE_CHECK(cusparseCreateDnMat(&result_descr, //
dim_i, dim_k, dim_i, //
(void *)d_tmp_grad_in_feat_global, //
cuda_data_type, CUSPARSE_ORDER_COL));
// Transpose the output
// WARNING: coo sorting must have been handled in the kernel map
// decomposition.
CUSPARSE_CHECK(cusparseSpMM(cushandle, //
CUSPARSE_OPERATION_NON_TRANSPOSE, //
CUSPARSE_OPERATION_TRANSPOSE, //
(void *)&alpha, //
sparse_descr, dense_descr, //
(void *)&beta, result_descr, //
cuda_data_type, mm_alg, 0));
// For grad_in_feat_glob, add all grad_out_feat
/*
CUSPARSE_CHECK(
cusparse_csrmm<Dtype>(cushandle,
CUSPARSE_OPERATION_NON_TRANSPOSE, // op(A)
CUSPARSE_OPERATION_TRANSPOSE, // op(B)
in_nrows_global, // M
nchannel, // N
in_nrows, // K
nnz, &alpha, descr,
d_csr_val, // val
d_csr_row, // row
d_in_map, // col
d_grad_out_feat, // B
nchannel, // ldb
&beta,
d_tmp_grad_in_feat_global, // C
in_nrows_global // ldc
));
*/
col2row_major<Dtype>(in_nrows_global, nchannel, d_tmp_grad_in_feat_global,
d_grad_in_feat_global, stream);
break;
case BroadcastMode::ELEMENTWISE_MULTIPLICATION: // *
// Second, for grad_in_feat_global, copy in_feat to tmp,
// Forward : (A^T(sparse) x global(dense)) (*) B(feat) = C(result)
// grad global : A(sparse) (grad C (*) B) =
CUDA_CHECK(cudaMemcpy(d_tmp_grad_in_feat, d_grad_out_feat,
sizeof(Dtype) * nchannel * in_nrows,
cudaMemcpyDeviceToDevice));
gpu_multiplication<Dtype>(nchannel * in_nrows, d_in_feat,
d_tmp_grad_in_feat, d_tmp_grad_in_feat, stream);
CUSPARSE_CHECK(cusparseCreateDnMat(&dense_descr, //
dim_k, dim_j, dim_k, //
(void *)d_tmp_grad_in_feat, //
cuda_data_type, CUSPARSE_ORDER_COL));
CUSPARSE_CHECK(cusparseCreateDnMat(&result_descr, //
dim_i, dim_k, dim_i, //
(void *)d_tmp_grad_in_feat_global, //
cuda_data_type, CUSPARSE_ORDER_COL));
// Transpose the output
CUSPARSE_CHECK(cusparseSpMM(cushandle, //
CUSPARSE_OPERATION_NON_TRANSPOSE, //
CUSPARSE_OPERATION_TRANSPOSE, //
(void *)&alpha, //
sparse_descr, dense_descr, //
(void *)&beta, result_descr, //
cuda_data_type, mm_alg, 0));
/*CUSPARSE_CHECK(
cusparse_csrmm<Dtype>(cushandle,
CUSPARSE_OPERATION_NON_TRANSPOSE, // op(A)
CUSPARSE_OPERATION_TRANSPOSE, // op(B)
in_nrows_global, // M
nchannel, // N
in_nrows, // K
nnz, &alpha, descr,
d_csr_val, // val
d_csr_row, // row
d_in_map, // col
d_tmp_grad_in_feat, // B
nchannel, // ldb
&beta,
d_tmp_grad_in_feat_global, // C
in_nrows_global // ldc
));
*/
col2row_major<Dtype>(in_nrows_global, nchannel, d_tmp_grad_in_feat_global,
d_grad_in_feat_global, stream);
// First, for grad_in_feat
// Copy in_feat_global to tmp, then multiply the tmp with grad_out_feat
// Forward : (A^T(sparse) x global(dense)) (*) B(feat) = C(result)
// grad feat : A^T(sparse) x global(dense) (*) grad C
// Sort COO first
// sort_coo_gpu(cushandle, in_nrows_global, in_nrows, nnz, d_out_map,
// d_in_map);
// cusparseSpMatDescr_t sparse_descr2;
CUDA_CHECK(cudaMemcpy(d_grad_in_feat, d_grad_out_feat,
sizeof(Dtype) * nchannel * in_nrows,
cudaMemcpyDeviceToDevice));
detail::channelwise_multiplication<Dtype, Itype>
<<<GET_BLOCKS(in_nrows, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0,
stream>>>(in_nrows, nchannel, d_in_feat_global,
kernel_map.in_maps.begin(), kernel_map.out_maps.begin(),
d_grad_in_feat);
/*
row2col_major<Dtype>(in_nrows_global, nchannel, d_in_feat_global,
d_tmp_grad_in_feat_global, stream);
CUSPARSE_CHECK(
cusparse_csrmm<Dtype>(cushandle,
CUSPARSE_OPERATION_TRANSPOSE, // op(A)
CUSPARSE_OPERATION_NON_TRANSPOSE, // op(B)
in_nrows_global, // M
nchannel, // N
in_nrows, // K
nnz, &alpha, descr,
d_csr_val, // val
d_csr_row, // row
d_in_map, // col
d_tmp_grad_in_feat_global, // B
in_nrows_global, // ldb
&beta,
d_tmp_grad_in_feat, // C
in_nrows // ldc
));
col2row_major<Dtype>(in_nrows, nchannel, d_tmp_grad_in_feat, d_grad_in_feat,
stream);
gpu_multiplication<Dtype>(nchannel * in_nrows, d_grad_out_feat,
d_grad_in_feat, d_grad_in_feat, stream);
*/
break;
default:
throw std::invalid_argument(Formatter() << "Operation not supported: "
<< std::to_string(op));
}
cudaFree(d_scr);
cudaFree(d_dtype);
CUSPARSE_CHECK(cusparseDestroySpMat(sparse_descr));
CUSPARSE_CHECK(cusparseDestroyDnMat(dense_descr));
CUSPARSE_CHECK(cusparseDestroyDnMat(result_descr));
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
}
template void
BroadcastBackwardKernelGPU<float, uint32_t, detail::default_allocator<char>>(
const float *d_in_feat, float *d_grad_in_feat, int in_nrows,
const float *d_in_feat_global, float *d_grad_in_feat_global,
int in_nrows_global, const float *d_grad_out_feat, int nchannel,
BroadcastMode::Type const op,
gpu_kernel_map<uint32_t, detail::default_allocator<char>> const &kernel_map,
cusparseHandle_t cushandle, cudaStream_t stream);
template void
BroadcastBackwardKernelGPU<double, uint32_t, detail::default_allocator<char>>(
const double *d_in_feat, double *d_grad_in_feat, int in_nrows,
const double *d_in_feat_global, double *d_grad_in_feat_global,
int in_nrows_global, const double *d_grad_out_feat, int nchannel,
BroadcastMode::Type const op,
gpu_kernel_map<uint32_t, detail::default_allocator<char>> const &kernel_map,
cusparseHandle_t cushandle, cudaStream_t stream);
template void
BroadcastBackwardKernelGPU<float, uint32_t, detail::c10_allocator<char>>(
const float *d_in_feat, float *d_grad_in_feat, int in_nrows,
const float *d_in_feat_global, float *d_grad_in_feat_global,
int in_nrows_global, const float *d_grad_out_feat, int nchannel,
BroadcastMode::Type const op,
gpu_kernel_map<uint32_t, detail::c10_allocator<char>> const &kernel_map,
cusparseHandle_t cushandle, cudaStream_t stream);
template void
BroadcastBackwardKernelGPU<double, uint32_t, detail::c10_allocator<char>>(
const double *d_in_feat, double *d_grad_in_feat, int in_nrows,
const double *d_in_feat_global, double *d_grad_in_feat_global,
int in_nrows_global, const double *d_grad_out_feat, int nchannel,
BroadcastMode::Type const op,
gpu_kernel_map<uint32_t, detail::c10_allocator<char>> const &kernel_map,
cusparseHandle_t cushandle, cudaStream_t stream);
} // namespace minkowski
#endif // GPU_BROADCAST | the_stack |
#include "cudpp_radixsort.h"
#include <cudpp_globals.h>
#include "sharedmem.h"
#include "cta/radixsort_cta.cuh"
/**
* @file
* radixsort_kernel.cu
*
* @brief CUDPP kernel-level radix sorting routines
*/
/** \addtogroup cudpp_kernel
* @{
*/
/** @name RadixSort Functions
* @{
*/
typedef unsigned int uint;
/** @brief And empty kernel used to reset CTA issue hardware
**/
__global__ void emptyKernel() {}
/** @brief Does special binary arithmetic before sorting floats
*
* Uses floatFlip function to flip bits.
* @param[in,out] values Values to be manipulated
* @param[in] numValues Number of values to be flipped
**/
__global__ void
LAUNCH_BOUNDS(SORT_CTA_SIZE)
flipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
}
/** @brief Undoes the flips from flipFloats
*
* Uses floatUnflip function to unflip bits.
* @param[in,out] values Values to be manipulated
* @param[in] numValues Number of values to be unflipped
**/
__global__ void
LAUNCH_BOUNDS(SORT_CTA_SIZE)
unflipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
}
/** @brief Optimization for sorts of WARP_SIZE or fewer elements
*
* @param[in,out] keys Keys to be sorted.
* @param[in,out] values Associated values to be sorted (through keys).
* @param[in] numElements Number of elements in the sort.
*/
template <bool flip>
__global__
LAUNCH_BOUNDS(WARP_SIZE)
void radixSortSingleWarp(uint *keys,
uint *values,
uint numElements)
{
volatile __shared__ uint sKeys[WARP_SIZE]; //remove class distinctions
volatile __shared__ uint sValues[WARP_SIZE];
volatile __shared__ uint sFlags[WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
sValues[threadIdx.x] = values[threadIdx.x];
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
uint val_i = sValues[i];
sFlags[threadIdx.x] = 0;
uint temp, tempval;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
temp = sKeys[threadIdx.x];
tempval = sValues[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sValues[threadIdx.x + 1] = tempval;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
sValues[threadIdx.x] = val_i;
}
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
values[threadIdx.x] = sValues[threadIdx.x];
}
/** @brief Optimization for sorts of WARP_SIZE or fewer elements. Keys-Only version.
*
* @param[in,out] keys Keys to be sorted
* @param[in] numElements Total number of elements to be sorted
**/
template <bool flip>
__global__
LAUNCH_BOUNDS(WARP_SIZE)
void radixSortSingleWarpKeysOnly(uint *keys,
uint numElements)
{
volatile __shared__ uint sKeys[WARP_SIZE];
volatile __shared__ uint sFlags[WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
sFlags[threadIdx.x] = 0;
uint temp;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
temp = sKeys[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
}
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
}
/** @brief sorts all blocks of data independently in shared memory.
* Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
*
* The radix sort is done in two stages. This stage calls radixSortBlock on each
* block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
* differently than arrays that are not. "flip" is used to only compile in the
* float flip code when float keys are used. "loop" is used when persistent CTAs
* are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[out] keysOut Output of sorted keys
* @param[out] valuesOut Output of associated values
* @param[in] keysIn Input of unsorted keys in GPU
* @param[in] valuesIn Input of associated input values
* @param[in] numElements Total number of elements to sort
* @param[in] totalBlocks The number of blocks of data to sort
*/
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void
LAUNCH_BOUNDS(SORT_CTA_SIZE)
radixSortBlocks(uint4* keysOut, uint4* valuesOut,
uint4* keysIn, uint4* valuesIn,
uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key, value;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
value = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
uint *values1 = (uint*)valuesIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
value.x = (idx < numElements) ? values1[idx] : UINT_MAX;
value.y = (idx+1 < numElements) ? values1[idx+1] : UINT_MAX;
value.z = (idx+2 < numElements) ? values1[idx+2] : UINT_MAX;
value.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
value = valuesIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlock<nbits, startbit>(key, value);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
uint *values1 = (uint*)valuesOut;
keys1[idx] = key.x;
values1[idx] = value.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
values1[idx + 1] = value.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
values1[idx + 2] = value.z;
}
}
}
}
else
{
keysOut[i] = key;
valuesOut[i] = value;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
/** @brief Computes the number of keys of each radix in each block stores offset.
*
* Given an array with blocks sorted according to a 4-bit radix group, each
* block counts the number of keys that fall into each radix in the group, and
* finds the starting offset of each radix in the block. It then writes the radix
* counts to the counters array, and the starting offsets to the blockOffsets array.
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size
* (fullBlocks) differently than arrays that are not. "loop" is used when persistent
* CTAs are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[in] keys Input keys
* @param[out] counters Radix count for each block
* @param[out] blockOffsets The offset address for each block
* @param[in] numElements Total number of elements
* @param[in] totalBlocks Total number of blocks
**/
template<uint startbit, bool fullBlocks, bool loop>
__global__ void
LAUNCH_BOUNDS(SORT_CTA_SIZE)
findRadixOffsets(uint2 *keys,
uint *counters,
uint *blockOffsets,
uint numElements,
uint totalBlocks)
{
extern __shared__ uint sRadix1[];
__shared__ uint sStartPointers[16];
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint2 radix2;
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && ((i + 1) << 1 ) > numElements )
{
// handle uint1 rather than uint2 for non-full blocks
uint *keys1 = (uint*)keys;
uint j = i << 1;
radix2.x = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
radix2.y = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
radix2 = keys[i];
}
sRadix1[2 * threadIdx.x] = (radix2.x >> startbit) & 0xF;
sRadix1[2 * threadIdx.x + 1] = (radix2.y >> startbit) & 0xF;
// Finds the position where the sRadix1 entries differ and stores start
// index for each radix.
if(threadIdx.x < 16)
{
sStartPointers[threadIdx.x] = 0;
}
__syncthreads();
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x]] = threadIdx.x;
}
if(sRadix1[threadIdx.x + SORT_CTA_SIZE] != sRadix1[threadIdx.x + SORT_CTA_SIZE - 1])
{
sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE]] = threadIdx.x + SORT_CTA_SIZE;
}
__syncthreads();
if(threadIdx.x < 16)
{
blockOffsets[blockId*16 + threadIdx.x] = sStartPointers[threadIdx.x];
}
__syncthreads();
// Compute the sizes of each block.
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x - 1]] =
threadIdx.x - sStartPointers[sRadix1[threadIdx.x - 1]];
}
if(sRadix1[threadIdx.x + SORT_CTA_SIZE] != sRadix1[threadIdx.x + SORT_CTA_SIZE - 1] )
{
sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE - 1]] =
threadIdx.x + SORT_CTA_SIZE - sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE - 1]];
}
if(threadIdx.x == SORT_CTA_SIZE - 1)
{
sStartPointers[sRadix1[2 * SORT_CTA_SIZE - 1]] =
2 * SORT_CTA_SIZE - sStartPointers[sRadix1[2 * SORT_CTA_SIZE - 1]];
}
__syncthreads();
if(threadIdx.x < 16)
{
counters[threadIdx.x * totalBlocks + blockId] =
sStartPointers[threadIdx.x];
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
/**@brief Reorders data in the global array.
*
* reorderData shuffles data in the array globally after the radix
* offsets have been found. On compute version 1.1 and earlier GPUs, this code depends
* on SORT_CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
*
* On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
* that all writes are coalesced using extra work in the kernel. On later
* GPUs coalescing rules have been relaxed, so this extra overhead hurts
* performance. On these GPUs we set manualCoalesce=false and directly store
* the results.
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size
* (fullBlocks) differently than arrays that are not. "loop" is used when persistent
* CTAs are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[out] outKeys Output of sorted keys
* @param[out] outValues Output of associated values
* @param[in] keys Input of unsorted keys in GPU
* @param[in] values Input of associated input values
* @param[in] blockOffsets The offset address for each block
* @param[in] offsets Address of each radix within each block
* @param[in] sizes Number of elements in a block
* @param[in] numElements Total number of elements
* @param[in] totalBlocks Total number of data blocks to process
*
* @todo Args that are const below should be prototyped as const
**/
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void
LAUNCH_BOUNDS(SORT_CTA_SIZE)
reorderData(uint *outKeys,
uint *outValues,
uint2 *keys,
uint2 *values,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[SORT_CTA_SIZE];
__shared__ uint2 sValues2[SORT_CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint *sValues1 = (uint*)sValues2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint *values1 = (uint*)values;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[threadIdx.x << 1] = (j < numElements) ? values1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[(threadIdx.x << 1) + 1] = (j < numElements) ? values1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
sValues2[threadIdx.x] = values[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
outValues[globalOffset] = sValues1[threadIdx.x];
}
radix = (sKeys1[threadIdx.x + SORT_CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + SORT_CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + SORT_CTA_SIZE]);
outValues[globalOffset] = sValues1[threadIdx.x + SORT_CTA_SIZE];
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
outValues[outOffset] = sValues1[inOffset];
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
/** @brief Sorts all blocks of data independently in shared memory.
* Each thread block (CTA) sorts one block of 4*CTA_SIZE elements
*
* The radix sort is done in two stages. This stage calls radixSortBlock on each
* block independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size (fullBlocks)
* differently than arrays that are not. "flip" is used to only compile in the
* float flip code when float keys are used. "loop" is used when persistent CTAs
* are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[out] keysOut Output of sorted keys GPU main memory
* @param[in] keysIn Input of unsorted keys in GPU main memory
* @param[in] numElements Total number of elements to sort
* @param[in] totalBlocks Total number of blocks to sort
*
*/
template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop>
__global__ void
LAUNCH_BOUNDS(SORT_CTA_SIZE)
radixSortBlocksKeysOnly(uint4* keysOut, uint4* keysIn, uint numElements, uint totalBlocks)
{
extern __shared__ uint4 sMem[];
uint4 key;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlockKeysOnly<nbits, startbit>(key);
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
keys1[idx] = key.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
}
}
}
}
else
{
keysOut[i] = key;
}
if (loop)
blockId += gridDim.x;
else
break;
}
}
/** @brief Reorders data in the global array.
*
* reorderDataKeysOnly shuffles data in the array globally after the radix offsets
* have been found. On compute version 1.1 and earlier GPUs, this code depends
* on SORT_CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
*
* On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
* that all writes are coalesced using extra work in the kernel. On later
* GPUs coalescing rules have been relaxed, so this extra overhead hurts
* performance. On these GPUs we set manualCoalesce=false and directly store
* the results.
*
* Template parameters are used to generate efficient code for various special cases
* For example, we have to handle arrays that are a multiple of the block size
* (fullBlocks) differently than arrays that are not. "loop" is used when persistent
* CTAs are used.
*
* By persistent CTAs we mean that we launch only as many thread blocks as can
* be resident in the GPU and no more, rather than launching as many threads as
* we have elements. Persistent CTAs loop over blocks of elements until all work
* is complete. This can be faster in some cases. In our tests it is faster
* for large sorts (and the threshold is higher on compute version 1.1 and earlier
* GPUs than it is on compute version 1.2 GPUs.
*
* @param[out] outKeys Output result of reorderDataKeysOnly()
* @param[in] keys Keys to be reordered
* @param[in] blockOffsets Start offset for each block
* @param[in] offsets Offset of each radix within each block
* @param[in] sizes Number of elements in a block
* @param[in] numElements Total number of elements
* @param[in] totalBlocks Total number of blocks
*/
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop>
__global__ void
LAUNCH_BOUNDS(SORT_CTA_SIZE)
reorderDataKeysOnly(uint *outKeys,
uint2 *keys,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks)
{
__shared__ uint2 sKeys2[SORT_CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint blockId = blockIdx.x;
while (!loop || blockId < totalBlocks)
{
uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
}
radix = (sKeys1[threadIdx.x + SORT_CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + SORT_CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + SORT_CTA_SIZE]);
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
}
}
}
}
if (loop)
{
blockId += gridDim.x;
__syncthreads();
}
else
break;
}
}
/** @} */ // end radixsort functions
/** @} */ // end cudpp_kernel | the_stack |
* \test Tests sparse-matrix-dense-matrix products.
**/
//
// include necessary system headers
//
#include <iostream>
#include <cmath>
#include <vector>
#include <map>
//
// ViennaCL includes
//
#include "viennacl/scalar.hpp"
#include "viennacl/vector.hpp"
#include "viennacl/matrix.hpp"
#include "viennacl/linalg/direct_solve.hpp"
#include "viennacl/compressed_matrix.hpp"
#include "viennacl/coordinate_matrix.hpp"
#include "viennacl/ell_matrix.hpp"
#include "viennacl/hyb_matrix.hpp"
#include "viennacl/linalg/prod.hpp" //generic matrix-vector product
#include "viennacl/linalg/norm_2.hpp" //generic l2-norm for vectors
#include "viennacl/io/matrix_market.hpp"
#include "viennacl/tools/random.hpp"
template<typename NumericT>
int check_matrices(std::vector<std::vector<NumericT> > const & ref_mat,
std::vector<std::vector<NumericT> > const & mat,
NumericT eps)
{
if ( (ref_mat.size() != mat.size()) || (ref_mat[0].size() != mat[0].size()) )
return EXIT_FAILURE;
for (std::size_t i = 0; i < ref_mat.size(); i++)
for (std::size_t j = 0; j < ref_mat[0].size(); j++)
{
NumericT rel_error = std::abs(ref_mat[i][j] - mat[i][j]) / std::max(std::abs(ref_mat[i][j]), std::abs(mat[i][j]));
if (rel_error > eps)
{
std::cout << "ERROR: Verification failed at (" << i <<", "<< j << "): "
<< " Expected: " << ref_mat[i][j] << ", got: " << mat[i][j] << " (relative error: " << rel_error << ")" << std::endl;
return EXIT_FAILURE;
}
}
std::cout << "Everything went well!" << std::endl;
return EXIT_SUCCESS;
}
// Computes C = A * B for a sparse matrix A and dense matrices B and C.
// C is initialized with zeros
template<typename IndexT, typename NumericT>
void compute_reference_result(std::vector<std::map<IndexT, NumericT> > const & A,
std::vector<std::vector<NumericT> > const & B,
std::vector<std::vector<NumericT> > & C)
{
typedef typename std::map<IndexT, NumericT>::const_iterator RowIterator;
for (std::size_t i=0; i<C.size(); ++i)
for (RowIterator it = A[i].begin(); it != A[i].end(); ++it)
{
IndexT col_A = it->first;
NumericT val_A = it->second;
for (std::size_t j=0; j<C[i].size(); ++j)
C[i][j] += val_A * B[col_A][j];
}
}
template<typename NumericT, typename ResultLayoutT, typename FactorLayoutT>
int test(NumericT epsilon)
{
int retVal = EXIT_SUCCESS;
viennacl::tools::uniform_random_numbers<NumericT> randomNumber;
std::vector<std::map<unsigned int, NumericT> > std_A;
if (viennacl::io::read_matrix_market_file(std_A, "../examples/testdata/mat65k.mtx") == EXIT_FAILURE)
{
std::cout << "Error reading Matrix file" << std::endl;
return EXIT_FAILURE;
}
// add some extra weight to diagonal in order to avoid issues with round-off errors
for (std::size_t i=0; i<std_A.size(); ++i)
std_A[i][static_cast<unsigned int>(i)] *= NumericT(1.5);
std::size_t cols_rhs = 5;
viennacl::compressed_matrix<NumericT> compressed_A;
viennacl::ell_matrix<NumericT> ell_A;
viennacl::coordinate_matrix<NumericT> coo_A;
viennacl::hyb_matrix<NumericT> hyb_A;
std::vector<std::vector<NumericT> > std_C(std_A.size(), std::vector<NumericT>(cols_rhs));
viennacl::matrix<NumericT, ResultLayoutT> C;
viennacl::copy(std_A, compressed_A);
viennacl::copy(std_A, ell_A);
viennacl::copy(std_A, coo_A);
viennacl::copy(std_A, hyb_A);
std::vector<std::vector<NumericT> > std_B(std_A.size(), std::vector<NumericT>(cols_rhs));
viennacl::matrix<NumericT, FactorLayoutT> B1(std_A.size(), cols_rhs);
viennacl::matrix<NumericT, FactorLayoutT> B2;
std::vector<std::vector<NumericT> > temp(std_A.size(), std::vector<NumericT>(cols_rhs));
for (unsigned int i = 0; i < std_B.size(); i++)
for (unsigned int j = 0; j < std_B[i].size(); j++)
std_B[i][j] = NumericT(0.5) + NumericT(0.1) * randomNumber();
viennacl::copy(std_B, B1);
/* gold result */
compute_reference_result(std_A, std_B, std_C);
/******************************************************************/
std::cout << "Testing compressed(CSR) lhs * dense rhs" << std::endl;
C = viennacl::linalg::prod(compressed_A, B1);
for (std::size_t i=0; i<temp.size(); ++i)
for (std::size_t j=0; j<temp[i].size(); ++j)
temp[i][j] = 0;
viennacl::copy(C, temp);
retVal = check_matrices(std_C, temp, epsilon);
if (retVal != EXIT_SUCCESS)
{
std::cerr << "Test failed!" << std::endl;
return retVal;
}
/******************************************************************/
std::cout << "Testing compressed(ELL) lhs * dense rhs" << std::endl;
C.clear();
C = viennacl::linalg::prod(ell_A, B1);
for (std::size_t i=0; i<temp.size(); ++i)
for (std::size_t j=0; j<temp[i].size(); ++j)
temp[i][j] = 0;
viennacl::copy(C, temp);
retVal = check_matrices(std_C, temp, epsilon);
if (retVal != EXIT_SUCCESS)
{
std::cerr << "Test failed!" << std::endl;
return retVal;
}
/******************************************************************/
std::cout << "Testing compressed(COO) lhs * dense rhs" << std::endl;
C.clear();
C = viennacl::linalg::prod(coo_A, B1);
for (std::size_t i=0; i<temp.size(); ++i)
for (std::size_t j=0; j<temp[i].size(); ++j)
temp[i][j] = 0;
viennacl::copy(C, temp);
retVal = check_matrices(std_C, temp, epsilon);
if (retVal != EXIT_SUCCESS)
{
std::cerr << "Test failed!" << std::endl;
return retVal;
}
/******************************************************************/
std::cout << "Testing compressed(HYB) lhs * dense rhs" << std::endl;
C.clear();
C = viennacl::linalg::prod(hyb_A, B1);
for (std::size_t i=0; i<temp.size(); ++i)
for (std::size_t j=0; j<temp[i].size(); ++j)
temp[i][j] = 0;
viennacl::copy(C, temp);
retVal = check_matrices(std_C, temp, epsilon);
if (retVal != EXIT_SUCCESS)
{
std::cerr << "Test failed!" << std::endl;
return retVal;
}
/******************************************************************/
///////////// transposed right hand side
B2 = viennacl::trans(B1);
/******************************************************************/
std::cout << std::endl << "Testing compressed(CSR) lhs * transposed dense rhs:" << std::endl;
C.clear();
C = viennacl::linalg::prod(compressed_A, viennacl::trans(B2));
for (std::size_t i=0; i<temp.size(); ++i)
for (std::size_t j=0; j<temp[i].size(); ++j)
temp[i][j] = 0;
viennacl::copy(C, temp);
retVal = check_matrices(std_C, temp, epsilon);
if (retVal != EXIT_SUCCESS)
{
std::cerr << "Test failed!" << std::endl;
return retVal;
}
/******************************************************************/
std::cout << "Testing compressed(ELL) lhs * transposed dense rhs" << std::endl;
C.clear();
C = viennacl::linalg::prod(ell_A, viennacl::trans(B2));
for (std::size_t i=0; i<temp.size(); ++i)
for (std::size_t j=0; j<temp[i].size(); ++j)
temp[i][j] = 0;
viennacl::copy(C, temp);
retVal = check_matrices(std_C, temp, epsilon);
if (retVal != EXIT_SUCCESS)
{
std::cerr << "Test failed!" << std::endl;
return retVal;
}
/******************************************************************/
std::cout << "Testing compressed(COO) lhs * transposed dense rhs" << std::endl;
C.clear();
C = viennacl::linalg::prod(coo_A, viennacl::trans(B2));
for (std::size_t i=0; i<temp.size(); ++i)
for (std::size_t j=0; j<temp[i].size(); ++j)
temp[i][j] = 0;
viennacl::copy(C, temp);
retVal = check_matrices(std_C, temp, epsilon);
if (retVal != EXIT_SUCCESS)
{
std::cerr << "Test failed!" << std::endl;
return retVal;
}
/******************************************************************/
std::cout << "Testing compressed(HYB) lhs * transposed dense rhs" << std::endl;
C.clear();
C = viennacl::linalg::prod(hyb_A, viennacl::trans(B2));
for (std::size_t i=0; i<temp.size(); ++i)
for (std::size_t j=0; j<temp[i].size(); ++j)
temp[i][j] = 0;
viennacl::copy(C, temp);
retVal = check_matrices(std_C, temp, epsilon);
if (retVal != EXIT_SUCCESS)
{
std::cerr << "Test failed!" << std::endl;
return retVal;
}
/******************************************************************/
if (retVal == EXIT_SUCCESS) {
std::cout << "Tests passed successfully" << std::endl;
}
return retVal;
}
//
// -------------------------------------------------------------
//
int main()
{
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "## Test :: Sparse-Dense Matrix Multiplication" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
int retval = EXIT_SUCCESS;
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
{
typedef float NumericT;
NumericT epsilon = static_cast<NumericT>(1E-4);
std::cout << "# Testing setup:" << std::endl;
std::cout << " eps: " << epsilon << std::endl;
std::cout << " numeric: float" << std::endl;
std::cout << " layout: row-major, row-major" << std::endl;
retval = test<NumericT, viennacl::row_major, viennacl::row_major>(epsilon);
if ( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
std::cout << "# Testing setup:" << std::endl;
std::cout << " eps: " << epsilon << std::endl;
std::cout << " numeric: float" << std::endl;
std::cout << " layout: row-major, column-major" << std::endl;
retval = test<NumericT, viennacl::row_major, viennacl::column_major>(epsilon);
if ( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
std::cout << "# Testing setup:" << std::endl;
std::cout << " eps: " << epsilon << std::endl;
std::cout << " numeric: float" << std::endl;
std::cout << " layout: column-major, row-major" << std::endl;
retval = test<NumericT, viennacl::column_major, viennacl::row_major>(epsilon);
if ( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
std::cout << "# Testing setup:" << std::endl;
std::cout << " eps: " << epsilon << std::endl;
std::cout << " numeric: float" << std::endl;
std::cout << " layout: column-major, column-major" << std::endl;
retval = test<NumericT, viennacl::column_major, viennacl::column_major>(epsilon);
if ( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
}
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
{
typedef double NumericT;
NumericT epsilon = 1.0E-12;
std::cout << "# Testing setup:" << std::endl;
std::cout << " eps: " << epsilon << std::endl;
std::cout << " numeric: double" << std::endl;
std::cout << " layout: row-major, row-major" << std::endl;
retval = test<NumericT, viennacl::row_major, viennacl::row_major>(epsilon);
if ( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
std::cout << "# Testing setup:" << std::endl;
std::cout << " eps: " << epsilon << std::endl;
std::cout << " numeric: double" << std::endl;
std::cout << " layout: row-major, column-major" << std::endl;
retval = test<NumericT, viennacl::row_major, viennacl::column_major>(epsilon);
if ( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
std::cout << "# Testing setup:" << std::endl;
std::cout << " eps: " << epsilon << std::endl;
std::cout << " numeric: double" << std::endl;
std::cout << " layout: column-major, row-major" << std::endl;
retval = test<NumericT, viennacl::column_major, viennacl::row_major>(epsilon);
if ( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
std::cout << "# Testing setup:" << std::endl;
std::cout << " eps: " << epsilon << std::endl;
std::cout << " numeric: double" << std::endl;
std::cout << " layout: column-major, column-major" << std::endl;
retval = test<NumericT, viennacl::column_major, viennacl::column_major>(epsilon);
if ( retval == EXIT_SUCCESS )
std::cout << "# Test passed" << std::endl;
else
return retval;
}
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
}
#ifdef VIENNACL_WITH_OPENCL
else
std::cout << "No double precision support, skipping test..." << std::endl;
#endif
std::cout << std::endl;
std::cout << "------- Test completed --------" << std::endl;
std::cout << std::endl;
return retval;
} | the_stack |
#include "nvblox/core/hash.h"
#include "nvblox/core/indexing.h"
#include "nvblox/core/types.h"
#include "nvblox/core/unified_vector.h"
#include "nvblox/integrators/ray_caster.h"
#include "nvblox/utils/timing.h"
namespace nvblox {
FrustumCalculator::FrustumCalculator() { cudaStreamCreate(&cuda_stream_); }
FrustumCalculator::~FrustumCalculator() { cudaStreamDestroy(cuda_stream_); }
unsigned int FrustumCalculator::raycast_subsampling_factor() const {
return raycast_subsampling_factor_;
}
void FrustumCalculator::raycast_subsampling_factor(
unsigned int raycast_subsampling_factor) {
CHECK_GT(raycast_subsampling_factor, 0);
raycast_subsampling_factor_ = raycast_subsampling_factor;
}
// AABB linear indexing
// - We index in x-major, i.e. x is varied first, then y, then z.
// - Linear indexing within an AABB is relative and starts at zero. This is
// not true for AABB 3D indexing which is w.r.t. the layer origin.
__host__ __device__ inline size_t layerIndexToAabbLinearIndex(
const Index3D& index, const Index3D& aabb_min, const Index3D& aabb_size) {
const Index3D index_shifted = index - aabb_min;
return index_shifted.x() + // NOLINT
index_shifted.y() * aabb_size.x() + // NOLINT
index_shifted.z() * aabb_size.x() * aabb_size.y(); // NOLINT
}
__host__ __device__ inline Index3D aabbLinearIndexToLayerIndex(
const size_t lin_idx, const Index3D& aabb_min, const Index3D& aabb_size) {
const Index3D index(lin_idx % aabb_size.x(), // NOLINT
(lin_idx / aabb_size.x()) % aabb_size.y(), // NOLINT
lin_idx / (aabb_size.x() * aabb_size.y())); // NOLINT
return index + aabb_min;
}
__device__ void setIndexUpdated(const Index3D& index_to_update,
const Index3D& aabb_min,
const Index3D& aabb_size, bool* aabb_updated) {
const size_t linear_size = aabb_size.x() * aabb_size.y() * aabb_size.z();
const size_t lin_idx =
layerIndexToAabbLinearIndex(index_to_update, aabb_min, aabb_size);
if (lin_idx < linear_size) {
aabb_updated[lin_idx] = true;
}
}
template <typename T>
void convertAabbUpdatedToVector(const Index3D& aabb_min,
const Index3D& aabb_size,
size_t aabb_linear_size, bool* aabb_updated,
T* indices) {
indices->reserve(aabb_linear_size);
for (size_t i = 0; i < aabb_linear_size; i++) {
if (aabb_updated[i]) {
indices->push_back(aabbLinearIndexToLayerIndex(i, aabb_min, aabb_size));
}
}
}
__global__ void getBlockIndicesInImageKernel(
const Transform T_L_C, const Camera camera, const float* image, int rows,
int cols, const float block_size, const float max_integration_distance_m,
const float truncation_distance_m, const Index3D aabb_min,
const Index3D aabb_size, bool* aabb_updated) {
// First, figure out which pixel we're in.
int pixel_row = blockIdx.x * blockDim.x + threadIdx.x;
int pixel_col = blockIdx.y * blockDim.y + threadIdx.y;
// Hooray we do nothing.
if (pixel_row >= rows || pixel_col >= cols) {
return;
}
// Look up the pixel we care about.
float depth = image::access<float>(pixel_row, pixel_col, cols, image);
if (depth <= 0.0f) {
return;
}
if (max_integration_distance_m > 0.0f && depth > max_integration_distance_m) {
depth = max_integration_distance_m;
}
// Ok now project this thing into space.
Vector3f p_C = (depth + truncation_distance_m) *
camera.rayFromPixelIndices(Index2D(pixel_col, pixel_row));
Vector3f p_L = T_L_C * p_C;
// Now we have the position of the thing in space. Now we need the block
// index.
Index3D block_index = getBlockIndexFromPositionInLayer(block_size, p_L);
setIndexUpdated(block_index, aabb_min, aabb_size, aabb_updated);
}
__global__ void raycastToBlocksKernel(int num_blocks, Index3D* block_indices,
const Transform T_L_C, float block_size,
const Index3D aabb_min,
const Index3D aabb_size,
bool* aabb_updated) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int corner_index = threadIdx.y;
if (index >= num_blocks) {
return;
}
constexpr float corner_increment_table[9][3] = {
{0.0f, 0.0f, 0.0f}, // NOLINT
{1.0f, 0.0f, 0.0f}, // NOLINT
{0.0f, 1.0f, 0.0f}, // NOLINT
{0.0f, 0.0f, 1.0f}, // NOLINT
{1.0f, 1.0f, 0.0f}, // NOLINT
{1.0f, 0.0f, 1.0f}, // NOLINT
{0.0f, 1.0f, 1.0f}, // NOLINT
{1.0f, 1.0f, 1.0f}, // NOLINT
{0.5f, 0.5f, 0.5f}, // NOLINT
};
const Vector3f increment(corner_increment_table[corner_index][0],
corner_increment_table[corner_index][1],
corner_increment_table[corner_index][2]);
const Index3D& block_index = block_indices[index];
RayCaster raycaster(T_L_C.translation() / block_size,
block_index.cast<float>() + increment);
Index3D ray_index;
while (raycaster.nextRayIndex(&ray_index)) {
setIndexUpdated(ray_index, aabb_min, aabb_size, aabb_updated);
}
}
__global__ void combinedBlockIndicesInImageKernel(
const Transform T_L_C, const Camera camera, const float* image, int rows,
int cols, const float block_size, const float max_integration_distance_m,
const float truncation_distance_m, int raycast_subsampling_factor,
const Index3D aabb_min, const Index3D aabb_size, bool* aabb_updated) {
// First, figure out which pixel we're in.
const int ray_idx_row = blockIdx.x * blockDim.x + threadIdx.x;
const int ray_idx_col = blockIdx.y * blockDim.y + threadIdx.y;
int pixel_row = ray_idx_row * raycast_subsampling_factor;
int pixel_col = ray_idx_col * raycast_subsampling_factor;
// Hooray we do nothing.
if (pixel_row >= (rows + raycast_subsampling_factor - 1) ||
pixel_col >= (cols + raycast_subsampling_factor - 1)) {
return;
} else {
// Move remaining overhanging pixels back to the borders.
if (pixel_row >= rows) {
pixel_row = rows - 1;
}
if (pixel_col >= cols) {
pixel_col = cols - 1;
}
}
// Look up the pixel we care about.
float depth = image::access<float>(pixel_row, pixel_col, cols, image);
if (depth <= 0.0f) {
return;
}
if (max_integration_distance_m > 0.0f && depth > max_integration_distance_m) {
depth = max_integration_distance_m;
}
// Ok now project this thing into space.
Vector3f p_C = (depth + truncation_distance_m) *
camera.rayFromPixelIndices(Index2D(pixel_col, pixel_row));
Vector3f p_L = T_L_C * p_C;
// Now we have the position of the thing in space. Now we need the block
// index.
Index3D block_index = getBlockIndexFromPositionInLayer(block_size, p_L);
setIndexUpdated(block_index, aabb_min, aabb_size, aabb_updated);
// Ok raycast to the correct point in the block.
RayCaster raycaster(T_L_C.translation() / block_size, p_L / block_size);
Index3D ray_index = Index3D::Zero();
while (raycaster.nextRayIndex(&ray_index)) {
setIndexUpdated(ray_index, aabb_min, aabb_size, aabb_updated);
}
}
std::vector<Index3D> FrustumCalculator::getBlocksInImageViewCuda(
const DepthImage& depth_frame, const Transform& T_L_C, const Camera& camera,
const float block_size, const float truncation_distance_m,
const float max_integration_distance_m) {
timing::Timer setup_timer("in_view/setup");
// Aight so first we have to get the AABB of this guy.
const Frustum frustum =
camera.getViewFrustum(T_L_C, 0.0f, max_integration_distance_m);
const AxisAlignedBoundingBox aabb_L = frustum.getAABB();
// Get the min index and the max index.
const Index3D min_index =
getBlockIndexFromPositionInLayer(block_size, aabb_L.min());
const Index3D max_index =
getBlockIndexFromPositionInLayer(block_size, aabb_L.max());
const Index3D aabb_size = max_index - min_index + Index3D::Ones();
const size_t aabb_linear_size = aabb_size.x() * aabb_size.y() * aabb_size.z();
// A 3D grid of bools, one for each block in the AABB, which indicates if it
// is in the view. The 3D grid is represented as a flat vector.
if (aabb_linear_size > aabb_device_buffer_.size()) {
constexpr float kBufferExpansionFactor = 1.5f;
const int new_size =
static_cast<int>(kBufferExpansionFactor * aabb_linear_size);
aabb_device_buffer_.reserve(new_size);
aabb_host_buffer_.reserve(new_size);
}
checkCudaErrors(cudaMemsetAsync(aabb_device_buffer_.data(), 0,
sizeof(bool) * aabb_linear_size));
aabb_device_buffer_.resize(aabb_linear_size);
aabb_host_buffer_.resize(aabb_linear_size);
setup_timer.Stop();
// Raycast
if (raycast_to_pixels_) {
getBlocksByRaycastingPixels(T_L_C, camera, depth_frame, block_size,
truncation_distance_m,
max_integration_distance_m, min_index,
aabb_size, aabb_device_buffer_.data());
} else {
getBlocksByRaycastingCorners(T_L_C, camera, depth_frame, block_size,
truncation_distance_m,
max_integration_distance_m, min_index,
aabb_size, aabb_device_buffer_.data());
}
// Output vector.
timing::Timer output_timer("in_view/output");
cudaMemcpyAsync(aabb_host_buffer_.data(), aabb_device_buffer_.data(),
sizeof(bool) * aabb_linear_size, cudaMemcpyDeviceToHost,
cuda_stream_);
checkCudaErrors(cudaStreamSynchronize(cuda_stream_));
checkCudaErrors(cudaPeekAtLastError());
std::vector<Index3D> output_vector;
convertAabbUpdatedToVector<std::vector<Index3D>>(
min_index, aabb_size, aabb_linear_size, aabb_host_buffer_.data(),
&output_vector);
output_timer.Stop();
// We have to manually destruct this. :(
timing::Timer destory_timer("in_view/destroy");
destory_timer.Stop();
return output_vector;
}
void FrustumCalculator::getBlocksByRaycastingCorners(
const Transform& T_L_C, const Camera& camera, const DepthImage& depth_frame,
float block_size, const float truncation_distance_m,
const float max_integration_distance_m, const Index3D& min_index,
const Index3D& aabb_size, bool* aabb_updated_cuda) {
// Get the blocks touched by the ray endpoints
// We'll do warps of 32x32 pixels in the image. This is 1024 threads which is
// in the recommended 512-1024 range.
constexpr int kThreadDim = 16;
int rounded_rows = static_cast<int>(
std::ceil(depth_frame.rows() / static_cast<float>(kThreadDim)));
int rounded_cols = static_cast<int>(
std::ceil(depth_frame.cols() / static_cast<float>(kThreadDim)));
dim3 block_dim(rounded_rows, rounded_cols);
dim3 thread_dim(kThreadDim, kThreadDim);
timing::Timer image_blocks_timer("in_view/get_image_blocks");
getBlockIndicesInImageKernel<<<block_dim, thread_dim, 0, cuda_stream_>>>(
T_L_C, camera, depth_frame.dataConstPtr(), depth_frame.rows(),
depth_frame.cols(), block_size, max_integration_distance_m,
truncation_distance_m, min_index, aabb_size, aabb_updated_cuda);
checkCudaErrors(cudaStreamSynchronize(cuda_stream_));
checkCudaErrors(cudaPeekAtLastError());
image_blocks_timer.Stop();
timing::Timer image_blocks_copy_timer("in_view/image_blocks_copy");
unified_vector<Index3D> initial_vector;
const size_t aabb_linear_size = aabb_size.x() * aabb_size.y() * aabb_size.z();
initial_vector.reserve(aabb_linear_size / 3);
convertAabbUpdatedToVector<unified_vector<Index3D>>(
min_index, aabb_size, aabb_linear_size, aabb_updated_cuda,
&initial_vector);
image_blocks_copy_timer.Stop();
// Call the kernel to do raycasting.
timing::Timer raycast_blocks_timer("in_view/raycast_blocks");
int num_initial_blocks = initial_vector.size();
constexpr int kNumCorners = 9;
constexpr int kNumBlocksPerThreadBlock = 40;
int raycast_block_dim = static_cast<int>(std::ceil(
static_cast<float>(num_initial_blocks) / kNumBlocksPerThreadBlock));
dim3 raycast_thread_dim(kNumBlocksPerThreadBlock, kNumCorners);
raycastToBlocksKernel<<<raycast_block_dim, raycast_thread_dim, 0,
cuda_stream_>>>(
num_initial_blocks, initial_vector.data(), T_L_C, block_size, min_index,
aabb_size, aabb_updated_cuda);
checkCudaErrors(cudaStreamSynchronize(cuda_stream_));
checkCudaErrors(cudaPeekAtLastError());
raycast_blocks_timer.Stop();
}
void FrustumCalculator::getBlocksByRaycastingPixels(
const Transform& T_L_C, const Camera& camera, const DepthImage& depth_frame,
float block_size, const float truncation_distance_m,
const float max_integration_distance_m, const Index3D& min_index,
const Index3D& aabb_size, bool* aabb_updated_cuda) {
// Number of rays per dimension. Depth frame size / subsampling rate.
const int num_subsampled_rows =
std::ceil(static_cast<float>(depth_frame.rows() + 1) /
static_cast<float>(raycast_subsampling_factor_));
const int num_subsampled_cols =
std::ceil(static_cast<float>(depth_frame.cols() + 1) /
static_cast<float>(raycast_subsampling_factor_));
// We'll do warps of 32x32 pixels in the image. This is 1024 threads which is
// in the recommended 512-1024 range.
constexpr int kThreadDim = 16;
const int rounded_rows = static_cast<int>(
std::ceil(num_subsampled_rows / static_cast<float>(kThreadDim)));
const int rounded_cols = static_cast<int>(
std::ceil(num_subsampled_cols / static_cast<float>(kThreadDim)));
dim3 block_dim(rounded_rows, rounded_cols);
dim3 thread_dim(kThreadDim, kThreadDim);
timing::Timer combined_kernel_timer("in_view/combined_kernel");
combinedBlockIndicesInImageKernel<<<block_dim, thread_dim, 0, cuda_stream_>>>(
T_L_C, camera, depth_frame.dataConstPtr(), depth_frame.rows(),
depth_frame.cols(), block_size, max_integration_distance_m,
truncation_distance_m, raycast_subsampling_factor_, min_index, aabb_size,
aabb_updated_cuda);
checkCudaErrors(cudaStreamSynchronize(cuda_stream_));
checkCudaErrors(cudaPeekAtLastError());
combined_kernel_timer.Stop();
}
} // namespace nvblox | the_stack |
int unir(int *res, int rows, int tipo, int **ret, int final)
{
thrust::device_ptr<int> pt, re;
thrust::device_ptr<s2> pt2, re2;
thrust::device_ptr<s3> pt3, re3;
thrust::device_ptr<s4> pt4, re4;
thrust::device_ptr<s5> pt5, re5;
thrust::device_ptr<s6> pt6, re6;
thrust::device_ptr<s7> pt7, re7;
thrust::device_ptr<s8> pt8, re8;
thrust::device_ptr<s9> pt9, re9;
thrust::device_ptr<s10> pt10, re10;
thrust::device_ptr<s11> pt11, re11;
thrust::device_ptr<s12> pt12, re12;
thrust::device_ptr<s13> pt13, re13;
thrust::device_ptr<s14> pt14, re14;
thrust::device_ptr<s15> pt15, re15;
thrust::device_ptr<s16> pt16, re16;
thrust::device_ptr<s17> pt17, re17;
thrust::device_ptr<s18> pt18, re18;
thrust::device_ptr<s19> pt19, re19;
thrust::device_ptr<s20> pt20, re20;
s2 *t2;
s3 *t3;
s4 *t4;
s5 *t5;
s6 *t6;
s7 *t7;
s8 *t8;
s9 *t9;
s10 *t10;
s11 *t11;
s12 *t12;
s13 *t13;
s14 *t14;
s15 *t15;
s16 *t16;
s17 *t17;
s18 *t18;
s19 *t19;
s20 *t20;
int flag, nrows, *nres, size;
#if TIMER
cuda_stats.unions++;
#endif
switch(tipo)
{
case 1:
{
pt = thrust::device_pointer_cast(res);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt, pt + rows);
if(final)
{
re = thrust::unique(pt, pt + rows, q1());
re = thrust::unique(pt, re);
}
else
re = thrust::unique(pt, pt + rows);
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt, re);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 2:
{
t2 = (s2*)res;
pt2 = thrust::device_pointer_cast(t2);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt2, pt2 + rows, o2());
if(final)
{
re2 = thrust::unique(pt2, pt2 + rows, q2());
re2 = thrust::unique(pt2, re2, p2());
}
else
re2 = thrust::unique(pt2, pt2 + rows, p2());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt2, re2);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 3:
{
t3 = (s3*)res;
pt3 = thrust::device_pointer_cast(t3);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt3, pt3 + rows, o3());
if(final)
{
re3 = thrust::unique(pt3, pt3 + rows, q3());
re3 = thrust::unique(pt3, re3, p3());
}
else
re3 = thrust::unique(pt3, pt3 + rows, p3());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt3, re3);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 4:
{
t4 = (s4*)res;
pt4 = thrust::device_pointer_cast(t4);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt4, pt4 + rows, o4());
if(final)
{
re4 = thrust::unique(pt4, pt4 + rows, q4());
re4 = thrust::unique(pt4, re4, p4());
}
else
re4 = thrust::unique(pt4, pt4 + rows, p4());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt4, re4);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 5:
{
t5 = (s5*)res;
pt5 = thrust::device_pointer_cast(t5);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt5, pt5 + rows, o5());
if(final)
{
re5 = thrust::unique(pt5, pt5 + rows, q5());
re5 = thrust::unique(pt5, re5, p5());
}
else
re5 = thrust::unique(pt5, pt5 + rows, p5());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt5, re5);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 6:
{
t6 = (s6*)res;
pt6 = thrust::device_pointer_cast(t6);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt6, pt6 + rows, o6());
if(final)
{
re6 = thrust::unique(pt6, pt6 + rows, q6());
re6 = thrust::unique(pt6, re6, p6());
}
else
re6 = thrust::unique(pt6, pt6 + rows, p6());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt6, re6);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 7:
{
t7 = (s7*)res;
pt7 = thrust::device_pointer_cast(t7);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt7, pt7 + rows, o7());
if(final)
{
re7 = thrust::unique(pt7, pt7 + rows, q7());
re7 = thrust::unique(pt7, re7, p7());
}
else
re7 = thrust::unique(pt7, pt7 + rows, p7());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt7, re7);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 8:
{
t8 = (s8*)res;
pt8 = thrust::device_pointer_cast(t8);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt8, pt8 + rows, o8());
if(final)
{
re8 = thrust::unique(pt8, pt8 + rows, q8());
re8 = thrust::unique(pt8, re8, p8());
}
else
re8 = thrust::unique(pt8, pt8 + rows, p8());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt8, re8);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 9:
{
t9 = (s9*)res;
pt9 = thrust::device_pointer_cast(t9);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt9, pt9 + rows, o9());
if(final)
{
re9 = thrust::unique(pt9, pt9 + rows, q9());
re9 = thrust::unique(pt9, re9, p9());
}
else
re9 = thrust::unique(pt9, pt9 + rows, p9());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt9, re9);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 10:
{
t10 = (s10*)res;
pt10 = thrust::device_pointer_cast(t10);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt10, pt10 + rows, o10());
if(final)
{
re10 = thrust::unique(pt10, pt10 + rows, q10());
re10 = thrust::unique(pt10, re10, p10());
}
else
re10 = thrust::unique(pt10, pt10 + rows, p10());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt10, re10);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 11:
{
t11 = (s11*)res;
pt11 = thrust::device_pointer_cast(t11);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt11, pt11 + rows, o11());
if(final)
{
re11 = thrust::unique(pt11, pt11 + rows, q11());
re11 = thrust::unique(pt11, re11, p11());
}
else
re11 = thrust::unique(pt11, pt11 + rows, p11());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt11, re11);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 12:
{
t12 = (s12*)res;
pt12 = thrust::device_pointer_cast(t12);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt12, pt12 + rows, o12());
if(final)
{
re12 = thrust::unique(pt12, pt12 + rows, q12());
re12 = thrust::unique(pt12, re12, p12());
}
else
re12 = thrust::unique(pt12, pt12 + rows, p12());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt12, re12);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 13:
{
t13 = (s13*)res;
pt13 = thrust::device_pointer_cast(t13);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt13, pt13 + rows, o13());
if(final)
{
re13 = thrust::unique(pt13, pt13 + rows, q13());
re13 = thrust::unique(pt13, re13, p13());
}
else
re13 = thrust::unique(pt13, pt13 + rows, p13());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt13, re13);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 14:
{
t14 = (s14*)res;
pt14 = thrust::device_pointer_cast(t14);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt14, pt14 + rows, o14());
if(final)
{
re14 = thrust::unique(pt14, pt14 + rows, q14());
re14 = thrust::unique(pt14, re14, p14());
}
else
re14 = thrust::unique(pt14, pt14 + rows, p14());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt14, re14);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 15:
{
t15 = (s15*)res;
pt15 = thrust::device_pointer_cast(t15);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt15, pt15 + rows, o15());
if(final)
{
re15 = thrust::unique(pt15, pt15 + rows, q15());
re15 = thrust::unique(pt15, re15, p15());
}
else
re15 = thrust::unique(pt15, pt15 + rows, p15());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt15, re15);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 16:
{
t16 = (s16*)res;
pt16 = thrust::device_pointer_cast(t16);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt16, pt16 + rows, o16());
if(final)
{
re16 = thrust::unique(pt16, pt16 + rows, q16());
re16 = thrust::unique(pt16, re16, p16());
}
else
re16 = thrust::unique(pt16, pt16 + rows, p16());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt16, re16);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 17:
{
t17 = (s17*)res;
pt17 = thrust::device_pointer_cast(t17);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt17, pt17 + rows, o17());
if(final)
{
re17 = thrust::unique(pt17, pt17 + rows, q17());
re17 = thrust::unique(pt17, re17, p17());
}
else
re17 = thrust::unique(pt17, pt17 + rows, p17());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt17, re17);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 18:
{
t18 = (s18*)res;
pt18 = thrust::device_pointer_cast(t18);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt18, pt18 + rows, o18());
if(final)
{
re18 = thrust::unique(pt18, pt18 + rows, q18());
re18 = thrust::unique(pt18, re18, p18());
}
else
re18 = thrust::unique(pt18, pt18 + rows, p18());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt18, re18);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 19:
{
t19 = (s19*)res;
pt19 = thrust::device_pointer_cast(t19);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt19, pt19 + rows, o19());
if(final)
{
re19 = thrust::unique(pt19, pt19 + rows, q19());
re19 = thrust::unique(pt19, re19, p19());
}
else
re19 = thrust::unique(pt19, pt19 + rows, p19());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt19, re19);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
case 20:
{
t20 = (s20*)res;
pt20 = thrust::device_pointer_cast(t20);
flag = 0;
while(flag != 1)
{
try
{
thrust::sort(pt20, pt20 + rows, o20());
if(final)
{
re20 = thrust::unique(pt20, pt20 + rows, q20());
re20 = thrust::unique(pt20, re20, p20());
}
else
re20 = thrust::unique(pt20, pt20 + rows, p20());
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("sort/unique in unir", 0);
}
}
nrows = thrust::distance(pt20, re20);
if(nrows < rows / 2)
{
size = nrows * tipo * sizeof(int);
reservar(&nres, size);
cudaMemcpyAsync(nres, res, size, cudaMemcpyDeviceToDevice);
cudaFree(*ret);
*ret = nres;
}
return nrows;
}
}
return 0;
} | the_stack |
// COMMON
const float AMBIENT = .1;
const uint BLOCK = 128;
int RES;
float FPS;
__constant__ float FPS_;
__constant__ float AGENT_RADIUS;
__constant__ float HALF_SCREEN_WIDTH;
__host__ void initialize(float agent_radius, int res, float fov, float fps) {
RES = res;
cudaMemcpyToSymbol(AGENT_RADIUS, &agent_radius, sizeof(float));
const auto half_screen = tanf(CUDART_PI_F/180.f*fov/2.);
cudaMemcpyToSymbol(HALF_SCREEN_WIDTH, &half_screen, sizeof(float));
FPS = fps;
cudaMemcpyToSymbol(FPS_, &fps, sizeof(float));
}
at::cuda::CUDAStream stream() {
return at::cuda::getCurrentCUDAStream();
}
// SIMULATOR - COLLISIONS
struct Point {
float x;
float y;
__device__ Point(float x, float y) : x(x), y(y) { }
__device__ Point(at::TensorAccessor<float, 1, RestrictPtrTraits, int> t) : x(t[0]), y(t[1]) { }
__device__ Point operator/(float v) const { return Point(x/v, y/v); }
__device__ Point operator*(float v) const { return Point(x*v, y*v); }
__device__ Point operator+(const Point q) const { return Point(x + q.x, y + q.y); }
__device__ Point operator-(const Point q) const { return Point(x - q.x, y - q.y); }
__device__ float len2() const { return x*x + y*y; }
__device__ float len() const { return sqrtf(len2()); }
};
struct Line {
Point a;
Point b;
__device__ Line(at::TensorAccessor<float, 2, RestrictPtrTraits, int> t) : a(t[0]), b(t[1]) { }
};
__device__ inline float cross(Point V, Point W) {
return V.x*W.y - V.y*W.x;
}
__device__ inline float dot(Point V, Point W) {
return V.x*W.x + V.y*W.y;
}
struct Intersection { float s; float t; };
__device__ Intersection intersect(Point P, Point U, Point Q, Point V) {
/* Finds the intersection of two infinite lines described as P+sU, Q+tV
Returns:
s: fraction from P along U to intersection
t: fraction from Q along V to intersection
*/
const auto UxV = cross(U, V);
const bool distant = fabsf(UxV) < 1.e-3f;
float s, t;
if (distant) {
s = CUDART_INF_F;
t = CUDART_INF_F;
} else {
const auto PQ = Q - P;
s = cross(PQ, V)/UxV;
t = cross(PQ, U)/UxV;
}
return {s, t};
}
__device__ Intersection intersect(Point P, Point U, Line L) { return intersect(P, U, L.a, L.b - L.a); }
struct Projection { float s; float d; };
__device__ Projection project(Point P, Point U, Point Q) {
/* Projects Q onto the infinite line P+sU.
Returns:
s: fraction from P along U to the projection of Q onto (P, P+U)
d: distance from Q to the projection of Q onto (P, P+U)
*/
const auto u = U.len() + 1e-6f;
const auto PQ = Q - P;
const auto s = dot(PQ, U)/(u*u);
const auto d = fabsf(cross(PQ, U))/u;
return {s, d};
}
__device__ Projection project(Line L, Point Q) { return project(L.a, L.b - L.a, Q); }
__device__ float sensibilize(float p) {
// Collide a bit earlier than is exactly right, so nothing gets stuck in the next iteration
const auto margin = .99f;
// Sometimes the `side` value can have numerical issues, so here we clamp it to something sensible
if (isnan(p)) {
return 0.f;
} else {
return fmaxf(fminf(p*margin, 1.f), 0.f);
}
}
__device__ float collision(Point p0, Point v0, Point p1, Point v1) {
//Follows http://ericleong.me/research/circle-circle/#dynamic-circle-circle-collision
// Make the agent a bit bigger so that the near vision plane doesn't go through walls
const auto r = 1.001f*2.f*AGENT_RADIUS;
auto x = 1.f;
const auto a = project(p0, v0 - v1, p1);
if ((0 < a.s) & (a.d < r)) {
const auto backoff = sqrtf(r*r - a.d*a.d)/(v0 - v1).len();
x = fminf(x, sensibilize(a.s - backoff));
}
return x;
}
__device__ float collision(Point p, Point v, Line l) {
// Follows http://ericleong.me/research/circle-line/#moving-circle-and-static-line-segment
// Make the agent a bit bigger so that the near vision plane doesn't go through walls
const auto r = 1.001f*AGENT_RADIUS;
auto x = 1.f;
// Test for passing through `l`
const auto mid = intersect(p, v, l);
if ((0 < mid.s) & (mid.s < 1) & (0 < mid.t) & (mid.t < 1)) {
x = fminf(x, sensibilize((1 - r/project(l, p).d)*mid.s));
}
// Test for passing within r of `l.a`
const auto a = project(p, v, l.a);
if ((0 < a.s) & (a.d < r)) {
const auto backoff = sqrtf(r*r - a.d*a.d)/v.len();
x = fminf(x, sensibilize(a.s - backoff));
}
// Test for passing within r of `l.b`
const auto b = project(p, v, l.b);
if ((0 < b.s) & (b.d < r)) {
const auto backoff = sqrtf(r*r - b.d*b.d)/v.len();
x = fminf(x, sensibilize(b.s - backoff));
}
// Test for passing within r of the middle of `l`
const auto side = project(l, p + v);
if ((0 < side.s) & (side.s < 1) & (side.d < r)) {
const auto dp = project(l, p).d;
const auto dq = side.d;
x = fminf(x, sensibilize((dp - r)/(dp - dq)));
}
return x;
}
__host__ TT normalize_degrees(TT a) {
return (((a % 360.f) + 180.f) % 360.f) - 180.f;
}
using Progress = TensorProxy<float, 2>;
__global__ void collision_kernel(
int DF, Positions::PTA positions, Velocity::PTA velocity,
Lines::PTA lines, Progress::PTA progress) {
const auto N = positions.size(0);
const auto D = positions.size(1);
const int n = blockIdx.x*blockDim.x + threadIdx.x;
if (n < N) {
const auto L = lines.widths[n];
for (int d0=0; d0 < D; d0++) {
const Point p0(positions[n][d0]);
const Point m0(velocity[n][d0]);
float x = 1.f;
for (int d1=0; d1 < D; d1++) {
if (d0 != d1) {
const Point p1(positions[n][d1]);
const Point m1(velocity[n][d1]);
x = fminf(x, collision(p0, m0/FPS_, p1, m1/FPS_));
}
}
// Check whether it's collided with any walls
for (int l=DF; l < L; l++) {
x = fminf(x, collision(p0, m0/FPS_, lines[n][l]));
}
progress[n][d0] = x;
}
}
}
__host__ Physics physics(const Scenery& scenery, const Agents& agents) {
const uint N = agents.angles.size(0);
const uint A = scenery.n_agents;
const uint F = scenery.model.size(0);
auto progress(Progress::empty({N, A}));
const uint collision_blocks = (N + BLOCK - 1)/BLOCK;
collision_kernel<<<collision_blocks, {BLOCK,}, 0, stream()>>>(
A*F, agents.positions.pta(), agents.velocity.pta(), scenery.lines.pta(), progress.pta());
//TODO: Collisions should only kill the normal component of momentum
at::AutoNonVariableTypeMode nonvar{true};
agents.positions.t.set_(agents.positions.t + progress.t.unsqueeze(-1)*agents.velocity.t/FPS);
agents.velocity.t.masked_fill_(progress.t.unsqueeze(-1) < 1, 0.f);
agents.angles.t.set_(normalize_degrees(agents.angles.t + progress.t*agents.angvelocity.t/FPS));
agents.angvelocity.t.masked_fill_(progress.t < 1, 0.f);
return {progress.t};
}
// RENDERING - BAKING
__device__ float ray_y(float r, float R) {
return (R - 2*r - 1)*HALF_SCREEN_WIDTH/R;
}
__device__ float light_intensity(Lines::PTA lines, Lights::PTA lights, Point C, int n, int af) {
const float LUMINANCE = 2.f;
float intensity = AMBIENT;
const auto num_i = lights.widths[n];
const auto num_l = lines.widths[n];
for (int i=0; i < num_i; i++) {
const Point I(lights[n][i]);
const auto Ii = lights[n][i][2];
bool unobstructed = true;
// Ignore the dynamic lines at the front of the array
for (int l1=af; l1 < num_l; l1++) {
const Line L(lines[n][l1]);
const auto p = intersect(I, C - I, L);
// Test the length to .999 rather than 1 so that floating point errors don't end up
// randomly darkening some texels.
bool obstructed = (p.t > 0.f) & (p.t < 1.f) & (p.s > 0.f) & (p.s < .999f);
unobstructed = unobstructed & !obstructed;
}
const auto d2 = (I - C).len2();
if (unobstructed) {
intensity += LUMINANCE*Ii/fmaxf(d2, 1.f);
}
}
return fminf(intensity, 1.f);
}
__global__ void baking_kernel(
Lines::PTA lines, Lights::PTA lights, Textures::PTA textures, Baked::PTA baked, int af) {
const auto t = blockDim.x*blockIdx.x + threadIdx.x;
if (t < textures.vals.size(0)) {
const auto l0 = textures.inverse[t];
const auto n = lines.inverse[l0];
const auto loc = (t - textures.starts[l0] + .5f)/textures.widths[l0];
const auto C = Point(lines.vals[l0][0])*(1.f-loc) + Point(lines.vals[l0][1])*loc;
const auto i = light_intensity(lines, lights, C, n, af);
baked.vals[t] = i;
}
}
__host__ void bake(Scenery& scenery) {
const uint T = scenery.textures.vals.size(0);
const uint F = scenery.model.size(0);
const auto blocks = (T + BLOCK - 1)/BLOCK;
baking_kernel<<<blocks, BLOCK, 0, stream()>>>(
scenery.lines.pta(), scenery.lights.pta(), scenery.textures.pta(), scenery.baked.pta(), scenery.n_agents*F);
}
// RENDERING - KERNELS
__global__ void draw_kernel(Angles::PTA angles, Positions::PTA positions, Model::PTA model, Lines::PTA lines) {
const auto n = blockIdx.x;
const auto e = threadIdx.x;
const auto m = threadIdx.y;
const auto a = threadIdx.z;
const auto ang = angles[n][a]/180.f;
const auto c = cospif(ang);
const auto s = sinpif(ang);
const auto px = positions[n][a][0];
const auto py = positions[n][a][1];
// TODO: Stick these in constant memory
const auto M = model.size(0);
const auto mx = model[m][e][0];
const auto my = model[m][e][1];
lines[n][a*M + m][e][0] = c*mx - s*my + px;
lines[n][a*M + m][e][1] = s*mx + c*my + py;
}
using Indices = TensorProxy<int, 3>;
using Locations = TensorProxy<float, 3>;
using Dots = TensorProxy<float, 3>;
using Distances = TensorProxy<float, 3>;
__global__ void raycast_kernel(
Angles::PTA angles, Positions::PTA positions, Lines::PTA lines,
Indices::PTA indices, Locations::PTA locations, Dots::PTA dots, Distances::PTA distances) {
const auto n = blockIdx.x;
const auto r = threadIdx.x;
const auto a = blockIdx.y;
// Generate the ray
const float ang = angles[n][a]/180.f;
const auto c = cospif(ang);
const auto s = sinpif(ang);
const Point p(positions[n][a]);
const float R = indices.size(2);
const Point u(1.f, ray_y(r, R));
const Point ru(c*u.x - s*u.y, s*u.x + c*u.y);
const auto rlen = ru.len();
// Cast the ray
const auto num_l = lines.widths[n];
float nearest_idx = -1;
float nearest_s = CUDART_INF_F;
float nearest_loc = CUDART_NAN_F;
float nearest_dot = CUDART_NAN_F;
for (int l=0; l < num_l; l++) {
const Line L(lines[n][l]);
const Point v(L.b.x - L.a.x, L.b.y - L.a.y);
const auto q = intersect(p, ru, L);
// dot of the ray and the line
// this is _not_ the dot of the ray and the normal. we can get that easily enough from this,
// but not vice versa. the thing that breaks the symmetry is that we only need the absolute
// value of the dot with the normal.
const auto dtop = dot(ru, v);
const auto dbot = rlen*v.len();
const auto dot = dtop/(dbot + 1.e-6f);
// Use the agent radius as the near plane
const bool hit = (0 <= q.t) & (q.t <= 1);
// 1e-4 offset here is to suppress z fighting
const bool better = (AGENT_RADIUS/rlen < q.s) & (q.s < nearest_s - 1.e-4f);
if (hit & better) {
nearest_s = q.s;
nearest_idx = l;
nearest_loc = q.t;
nearest_dot = dot;
}
}
indices[n][a][r] = nearest_idx;
locations[n][a][r] = nearest_loc;
dots[n][a][r] = nearest_dot;
distances[n][a][r] = nearest_s*rlen;
}
using Screen = TensorProxy<float, 4>;
struct Filter {
int l;
int r;
float lw;
float rw;
};
__device__ Filter filter(float x, int w) {
const auto y = fminf(x*(w+1), w-1);
const int l = fmaxf(y-1, 0);
const int r = fminf(y, w-1);
const auto ld = fabsf(y - (l+1)) + 1.e-3f;
const auto rd = fabsf(y - (r+1)) + 1.e-3f;
const auto lw = rd/(ld+rd);
const auto rw = ld/(ld+rd);
return {l, r, lw, rw};
}
__global__ void shader_kernel(
Indices::PTA indices, Locations::PTA locations, Dots::PTA dots,
Lines::PTA lines, Lights::PTA lights, Textures::PTA textures, Baked::PTA baked, int F,
Screen::PTA screen) {
const auto n = blockIdx.x;
const auto r = threadIdx.x;
const auto a = blockIdx.y;
const auto AF = screen.size(1)*F;
auto s0 = 0.f, s1 = 0.f, s2 = 0.f;
const auto l0 = indices[n][a][r];
if (l0 >= 0) {
const auto loc = locations[n][a][r];
//TODO: Stick this in texture memory
// Use the tex object API: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#texture-object-api
// Use CUDA arrays becuase they're the only ones that support normalized indexing and filtering
// Use a single texture per design, as CUDA arrays are size-limited
// Use linear interpolation. Do I have to start thinking of texels as fenceposts rather than centers?
const auto start = lines.starts[n] + l0;
const auto f = filter(loc, textures.widths[start]);
const auto tex_l = textures[start][f.l];
const auto tex_r = textures[start][f.r];
// If it's a dynamic line, calculate the intensity on the fly. Else - if it's static - use the baked version.
float intensity;
if (l0 < AF) {
const auto C = Point(lines[n][l0][0])*(1-loc) + Point(lines[n][l0][1])*loc;
intensity = light_intensity(lines, lights, C, n, AF);
} else {
intensity = f.lw*baked[start][f.l] + f.rw*baked[start][f.r];
}
// `dots` is the dot with the line; we want the dot with the normal
const auto dot = 1 - dots[n][a][r]*dots[n][a][r];
s0 = dot*intensity*(f.lw*tex_l[0] + f.rw*tex_r[0]);
s1 = dot*intensity*(f.lw*tex_l[1] + f.rw*tex_r[1]);
s2 = dot*intensity*(f.lw*tex_l[2] + f.rw*tex_r[2]);
}
screen[n][a][r][0] = s0;
screen[n][a][r][1] = s1;
screen[n][a][r][2] = s2;
}
__host__ Render render(const Scenery& scenery, const Agents& agents) {
const uint N = agents.angles.size(0);
const uint A = scenery.n_agents;
const uint F = scenery.model.size(0);
//TODO: This gives underfull warps. But it's also not the bottleneck, so who cares
draw_kernel<<<N, {2, F, A}, 0, stream()>>>(
agents.angles.pta(), agents.positions.pta(), scenery.model.pta(), scenery.lines.pta());
auto indices(Indices::empty({N, A, RES}));
auto locations(Locations::empty({N, A, RES}));
auto dots(Dots::empty({N, A, RES}));
auto distances(Distances::empty({N, A, RES}));
raycast_kernel<<<{N, A}, {(uint) RES}, 0, stream()>>>(
agents.angles.pta(), agents.positions.pta(), scenery.lines.pta(),
indices.pta(), locations.pta(), dots.pta(), distances.pta());
auto screen(Screen::empty({N, A, RES, 3}));
shader_kernel<<<{N, A}, {(uint) RES}, 0, stream()>>>(
indices.pta(), locations.pta(), dots.pta(),
scenery.lines.pta(), scenery.lights.pta(), scenery.textures.pta(), scenery.baked.pta(), F, screen.pta());
return {indices.t, locations.t, dots.t, distances.t, screen.t};
} | the_stack |
#include "../config.cuh"
#include "../util_namespace.cuh"
#include "dispatch/dispatch_adjacent_difference.cuh"
#include <thrust/detail/integer_traits.h>
#include <thrust/detail/cstdint.h>
CUB_NAMESPACE_BEGIN
/**
* @brief DeviceAdjacentDifference provides device-wide, parallel operations for
* computing the differences of adjacent elements residing within
* device-accessible memory.
*
* @ingroup SingleModule
*
* @par Overview
* - DeviceAdjacentDifference calculates the differences of adjacent elements in
* d_input. Because the binary operation could be noncommutative, there
* are two sets of methods. Methods named SubtractLeft subtract left element
* `*(i - 1)` of input sequence from current element `*i`.
* Methods named `SubtractRight` subtract current element `*i` from the
* right one `*(i + 1)`:
* @par
* @code
* int *d_values; // [1, 2, 3, 4]
* //...
* int *d_subtract_left_result <-- [ 1, 1, 1, 1 ]
* int *d_subtract_right_result <-- [ -1, -1, -1, 4 ]
* @endcode
* - For SubtractLeft, if the left element is out of bounds, the iterator is
* assigned to <tt>\*(result + (i - first))</tt> without modification.
* - For SubtractRight, if the right element is out of bounds, the iterator is
* assigned to <tt>\*(result + (i - first))</tt> without modification.
*
* @par Snippet
* The code snippet below illustrates how to use @p DeviceAdjacentDifference to
* compute the left difference between adjacent elements.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_adjacent_difference.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* int num_items; // e.g., 8
* int *d_values; // e.g., [1, 2, 1, 2, 1, 2, 1, 2]
* //...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
*
* cub::DeviceAdjacentDifference::SubtractLeft(
* d_temp_storage, temp_storage_bytes, d_values, num_items);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run operation
* cub::DeviceAdjacentDifference::SubtractLeft(
* d_temp_storage, temp_storage_bytes, d_values, num_items);
*
* // d_values <-- [1, 1, -1, 1, -1, 1, -1, 1]
* @endcode
*/
struct DeviceAdjacentDifference
{
private:
template <bool in_place,
bool read_left,
typename InputIteratorT,
typename OutputIteratorT,
typename DifferenceOpT>
static CUB_RUNTIME_FUNCTION cudaError_t
AdjacentDifference(void *d_temp_storage,
std::size_t &temp_storage_bytes,
InputIteratorT d_input,
OutputIteratorT d_output,
std::size_t num_items,
DifferenceOpT difference_op,
cudaStream_t stream,
bool debug_synchronous)
{
const auto uint64_threshold = static_cast<std::size_t>(
THRUST_NS_QUALIFIER::detail::integer_traits<
THRUST_NS_QUALIFIER::detail::int32_t>::const_max);
if (num_items <= uint64_threshold)
{
using OffsetT = std::uint32_t;
using DispatchT = DispatchAdjacentDifference<InputIteratorT,
OutputIteratorT,
DifferenceOpT,
OffsetT,
in_place,
read_left>;
return DispatchT::Dispatch(d_temp_storage,
temp_storage_bytes,
d_input,
d_output,
static_cast<OffsetT>(num_items),
difference_op,
stream,
debug_synchronous);
}
else
{
using OffsetT = std::uint64_t;
using DispatchT = DispatchAdjacentDifference<InputIteratorT,
OutputIteratorT,
DifferenceOpT,
OffsetT,
in_place,
read_left>;
return DispatchT::Dispatch(d_temp_storage,
temp_storage_bytes,
d_input,
d_output,
static_cast<OffsetT>(num_items),
difference_op,
stream,
debug_synchronous);
}
}
public:
/**
* @brief Subtracts the left element of each adjacent pair of elements residing within device-accessible memory.
* @ingroup SingleModule
*
* @par Overview
* - Calculates the differences of adjacent elements in `d_input`. That is,
* `*d_input` is assigned to `*d_output`, and, for each iterator `i` in the
* range `[d_input + 1, d_input + num_items)`, the result of
* `difference_op(*i, *(i - 1))` is assigned to
* `*(d_output + (i - d_input))`.
* - Note that the behavior is undefined if the input and output ranges
* overlap in any way.
*
* @par Snippet
* The code snippet below illustrates how to use @p DeviceAdjacentDifference
* to compute the difference between adjacent elements.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_adjacent_difference.cuh>
*
* struct CustomDifference
* {
* template <typename DataType>
* __device__ DataType operator()(DataType &lhs, DataType &rhs)
* {
* return lhs - rhs;
* }
* };
*
* // Declare, allocate, and initialize device-accessible pointers
* int num_items; // e.g., 8
* int *d_input; // e.g., [1, 2, 1, 2, 1, 2, 1, 2]
* int *d_output;
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
*
* cub::DeviceAdjacentDifference::SubtractLeftCopy(
* d_temp_storage, temp_storage_bytes,
* d_input, d_output,
* num_items, CustomDifference());
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run operation
* cub::DeviceAdjacentDifference::SubtractLeftCopy(
* d_temp_storage, temp_storage_bytes,
* d_input, d_output,
* num_items, CustomDifference());
*
* // d_input <-- [1, 2, 1, 2, 1, 2, 1, 2]
* // d_output <-- [1, 1, -1, 1, -1, 1, -1, 1]
* @endcode
*
* @tparam InputIteratorT
* is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
* and `x` and `y` are objects of `InputIteratorT`'s `value_type`, then
* `x - y` is defined, and `InputIteratorT`'s `value_type` is convertible to
* a type in `OutputIteratorT`'s set of `value_types`, and the return type
* of `x - y` is convertible to a type in `OutputIteratorT`'s set of
* `value_types`.
*
* @tparam OutputIteratorT
* is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
*
* @tparam DifferenceOpT
* Its `result_type` is convertible to a type in `OutputIteratorT`'s set of
* `value_types`.
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When `nullptr`, the
* required allocation size is written to `temp_storage_bytes` and no work
* is done.
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of `d_temp_storage` allocation
*
* @param[in] d_input
* Pointer to the input sequence
*
* @param[out] d_output
* Pointer to the output sequence
*
* @param[in] num_items
* Number of items in the input sequence
*
* @param[in] difference_op
* The binary function used to compute differences
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations to
* be printed to the console. Default is `false`
*/
template <typename InputIteratorT,
typename OutputIteratorT,
typename DifferenceOpT = cub::Difference>
static CUB_RUNTIME_FUNCTION cudaError_t
SubtractLeftCopy(void *d_temp_storage,
std::size_t &temp_storage_bytes,
InputIteratorT d_input,
OutputIteratorT d_output,
std::size_t num_items,
DifferenceOpT difference_op = {},
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
constexpr bool in_place = false;
constexpr bool read_left = true;
return AdjacentDifference<in_place, read_left>(d_temp_storage,
temp_storage_bytes,
d_input,
d_output,
num_items,
difference_op,
stream,
debug_synchronous);
}
/**
* @brief Subtracts the left element of each adjacent pair of elements
* residing within device-accessible memory.
*
* @ingroup SingleModule
*
* @par Overview
* Calculates the differences of adjacent elements in `d_input`. That is, for
* each iterator `i` in the range `[d_input + 1, d_input + num_items)`, the
* result of `difference_op(*i, *(i - 1))` is assigned to
* `*(d_input + (i - d_input))`.
*
* @par Snippet
* The code snippet below illustrates how to use @p DeviceAdjacentDifference
* to compute the difference between adjacent elements.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_adjacent_difference.cuh>
*
* struct CustomDifference
* {
* template <typename DataType>
* __device__ DataType operator()(DataType &lhs, DataType &rhs)
* {
* return lhs - rhs;
* }
* };
*
* // Declare, allocate, and initialize device-accessible pointers
* int num_items; // e.g., 8
* int *d_data; // e.g., [1, 2, 1, 2, 1, 2, 1, 2]
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceAdjacentDifference::SubtractLeft(
* d_temp_storage, temp_storage_bytes,
* d_data, num_items, CustomDifference());
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run operation
* cub::DeviceAdjacentDifference::SubtractLeft(
* d_temp_storage, temp_storage_bytes,
* d_data, num_items, CustomDifference());
*
* // d_data <-- [1, 1, -1, 1, -1, 1, -1, 1]
* @endcode
*
* @tparam RandomAccessIteratorT
* is a model of <a href="https://en.cppreference.com/w/cpp/iterator/random_access_iterator">Random Access Iterator</a>,
* `RandomAccessIteratorT` is mutable. If `x` and `y` are objects of
* `RandomAccessIteratorT`'s `value_type`, and `x - y` is defined, then the
* return type of `x - y` should be convertible to a type in
* `RandomAccessIteratorT`'s set of `value_types`.
*
* @tparam DifferenceOpT
* Its `result_type` is convertible to a type in `RandomAccessIteratorT`'s
* set of `value_types`.
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When `nullptr`, the
* required allocation size is written to `temp_storage_bytes` and no work
* is done.
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in,out] d_input
* Pointer to the input sequence and the result
*
* @param[in] num_items
* Number of items in the input sequence
*
* @param[in] difference_op
* The binary function used to compute differences
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations to
* be printed to the console. Default is `false`.
*/
template <typename RandomAccessIteratorT,
typename DifferenceOpT = cub::Difference>
static CUB_RUNTIME_FUNCTION cudaError_t
SubtractLeft(void *d_temp_storage,
std::size_t &temp_storage_bytes,
RandomAccessIteratorT d_input,
std::size_t num_items,
DifferenceOpT difference_op = {},
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
constexpr bool in_place = true;
constexpr bool read_left = true;
return AdjacentDifference<in_place, read_left>(d_temp_storage,
temp_storage_bytes,
d_input,
d_input,
num_items,
difference_op,
stream,
debug_synchronous);
}
/**
* @brief Subtracts the right element of each adjacent pair of elements
* residing within device-accessible memory.
*
* @ingroup SingleModule
*
* @par Overview
* - Calculates the right differences of adjacent elements in `d_input`. That
* is, `*(d_input + num_items - 1)` is assigned to
* `*(d_output + num_items - 1)`, and, for each iterator `i` in the range
* `[d_input, d_input + num_items - 1)`, the result of
* `difference_op(*i, *(i + 1))` is assigned to
* `*(d_output + (i - d_input))`.
* - Note that the behavior is undefined if the input and output ranges
* overlap in any way.
*
* @par Snippet
* The code snippet below illustrates how to use @p DeviceAdjacentDifference
* to compute the difference between adjacent elements.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_adjacent_difference.cuh>
*
* struct CustomDifference
* {
* template <typename DataType>
* __device__ DataType operator()(DataType &lhs, DataType &rhs)
* {
* return lhs - rhs;
* }
* };
*
* // Declare, allocate, and initialize device-accessible pointers
* int num_items; // e.g., 8
* int *d_input; // e.g., [1, 2, 1, 2, 1, 2, 1, 2]
* int *d_output;
* ..
*
* // Determine temporary device storage requirements
* void *d_temp_storage = nullptr;
* size_t temp_storage_bytes = 0;
* cub::DeviceAdjacentDifference::SubtractRightCopy(
* d_temp_storage, temp_storage_bytes,
* d_input, d_output, num_items, CustomDifference());
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run operation
* cub::DeviceAdjacentDifference::SubtractRightCopy(
* d_temp_storage, temp_storage_bytes,
* d_input, d_output, num_items, CustomDifference());
*
* // d_input <-- [1, 2, 1, 2, 1, 2, 1, 2]
* // d_data <-- [-1, 1, -1, 1, -1, 1, -1, 2]
* @endcode
*
* @tparam InputIteratorT
* is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
* and `x` and `y` are objects of `InputIteratorT`'s `value_type`, then
* `x - y` is defined, and `InputIteratorT`'s `value_type` is convertible to
* a type in `OutputIteratorT`'s set of `value_types`, and the return type
* of `x - y` is convertible to a type in `OutputIteratorT`'s set of
* `value_types`.
*
* @tparam OutputIteratorT
* is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
*
* @tparam DifferenceOpT
* Its `result_type` is convertible to a type in `RandomAccessIteratorT`'s
* set of `value_types`.
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When `nullptr`, the
* required allocation size is written to `temp_storage_bytes` and no work
* is done.
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of `d_temp_storage` allocation
*
* @param[in] d_input
* Pointer to the input sequence
*
* @param[out] d_output
* Pointer to the output sequence
*
* @param[in] num_items
* Number of items in the input sequence
*
* @param[in] difference_op
* The binary function used to compute differences.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations to
* be printed to the console. Default is `false`.
*/
template <typename InputIteratorT,
typename OutputIteratorT,
typename DifferenceOpT = cub::Difference>
static CUB_RUNTIME_FUNCTION cudaError_t
SubtractRightCopy(void *d_temp_storage,
std::size_t &temp_storage_bytes,
InputIteratorT d_input,
OutputIteratorT d_output,
std::size_t num_items,
DifferenceOpT difference_op = {},
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
constexpr bool in_place = false;
constexpr bool read_left = false;
return AdjacentDifference<in_place, read_left>(d_temp_storage,
temp_storage_bytes,
d_input,
d_output,
num_items,
difference_op,
stream,
debug_synchronous);
}
/**
* @brief Subtracts the right element of each adjacent pair of elements
* residing within device-accessible memory.
*
* @ingroup SingleModule
*
* @par Overview
* Calculates the right differences of adjacent elements in `d_input`. That
* is, for each iterator `i` in the range
* `[d_input, d_input + num_items - 1)`, the result of
* `difference_op(*i, *(i + 1))` is assigned to
* `*(d_input + (i - d_input))`.
*
* @par Snippet
* The code snippet below illustrates how to use @p DeviceAdjacentDifference
* to compute the difference between adjacent elements.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_adjacent_difference.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* int num_items; // e.g., 8
* int *d_data; // e.g., [1, 2, 1, 2, 1, 2, 1, 2]
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceAdjacentDifference::SubtractRight(
* d_temp_storage, temp_storage_bytes, d_data, num_items);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run operation
* cub::DeviceAdjacentDifference::SubtractRight(
* d_temp_storage, temp_storage_bytes, d_data, num_items);
*
* // d_data <-- [-1, 1, -1, 1, -1, 1, -1, 2]
* @endcode
*
* @tparam RandomAccessIteratorT
* is a model of <a href="https://en.cppreference.com/w/cpp/iterator/random_access_iterator">Random Access Iterator</a>,
* `RandomAccessIteratorT` is mutable. If `x` and `y` are objects of
* `RandomAccessIteratorT`'s `value_type`, and `x - y` is defined, then the
* return type of `x - y` should be convertible to a type in
* `RandomAccessIteratorT`'s set of `value_types`.
*
* @tparam DifferenceOpT
* Its `result_type` is convertible to a type in `RandomAccessIteratorT`'s
* set of `value_types`.
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When `nullptr`, the
* required allocation size is written to `temp_storage_bytes` and no work
* is done.
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of `d_temp_storage` allocation
*
* @param[in,out] d_input
* Pointer to the input sequence
*
* @param[in] num_items
* Number of items in the input sequence
*
* @param[in] difference_op
* The binary function used to compute differences
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations to be
* printed to the console. Default is `false`.
*/
template <typename RandomAccessIteratorT,
typename DifferenceOpT = cub::Difference>
static CUB_RUNTIME_FUNCTION cudaError_t
SubtractRight(void *d_temp_storage,
std::size_t &temp_storage_bytes,
RandomAccessIteratorT d_input,
std::size_t num_items,
DifferenceOpT difference_op = {},
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
constexpr bool in_place = true;
constexpr bool read_left = false;
return AdjacentDifference<in_place, read_left>(d_temp_storage,
temp_storage_bytes,
d_input,
d_input,
num_items,
difference_op,
stream,
debug_synchronous);
}
};
CUB_NAMESPACE_END | the_stack |
#include "test_asserts.cuh"
#include <fmt/format.h>
void test_empty()
{
nvbench::int64_axis axis("Empty");
ASSERT(axis.get_name() == "Empty");
ASSERT(axis.get_type() == nvbench::axis_type::int64);
ASSERT(axis.get_size() == 0);
axis.set_inputs({});
ASSERT(axis.get_size() == 0);
const auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
const auto *clone =
dynamic_cast<const nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_name() == "Empty");
ASSERT(clone->get_type() == nvbench::axis_type::int64);
ASSERT(clone->get_size() == 0);
}
void test_basic()
{
nvbench::int64_axis axis{"BasicAxis"};
axis.set_inputs({0, 1, 2, 3, 7, 6, 5, 4});
const std::vector<nvbench::int64_t> ref{0, 1, 2, 3, 7, 6, 5, 4};
ASSERT(axis.get_name() == "BasicAxis");
ASSERT(axis.get_type() == nvbench::axis_type::int64);
ASSERT(!axis.is_power_of_two());
ASSERT(axis.get_size() == 8);
ASSERT(axis.get_inputs() == ref);
ASSERT(axis.get_values() == ref);
for (size_t i = 0; i < 8; ++i)
{
ASSERT(axis.get_input_string(i) == fmt::to_string(ref[i]));
ASSERT(axis.get_description(i).empty());
}
const auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
const auto *clone =
dynamic_cast<const nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_name() == "BasicAxis");
ASSERT(clone->get_type() == nvbench::axis_type::int64);
ASSERT(!clone->is_power_of_two());
ASSERT(clone->get_size() == 8);
ASSERT(clone->get_inputs() == ref);
ASSERT(clone->get_values() == ref);
for (size_t i = 0; i < 8; ++i)
{
ASSERT(clone->get_input_string(i) == fmt::to_string(ref[i]));
ASSERT(clone->get_description(i).empty());
}
}
void test_power_of_two()
{
nvbench::int64_axis axis{"POTAxis"};
axis.set_inputs({0, 1, 2, 3, 7, 6, 5, 4},
nvbench::int64_axis_flags::power_of_two);
const std::vector<nvbench::int64_t> ref_inputs{0, 1, 2, 3, 7, 6, 5, 4};
const std::vector<nvbench::int64_t> ref_values{1, 2, 4, 8, 128, 64, 32, 16};
ASSERT(axis.get_name() == "POTAxis");
ASSERT(axis.get_type() == nvbench::axis_type::int64);
ASSERT(axis.is_power_of_two());
ASSERT(axis.get_size() == 8);
ASSERT(axis.get_inputs() == ref_inputs);
ASSERT(axis.get_values() == ref_values);
for (size_t i = 0; i < 8; ++i)
{
ASSERT(axis.get_input_string(i) == fmt::to_string(ref_inputs[i]));
ASSERT(axis.get_description(i) ==
fmt::format("2^{} = {}", ref_inputs[i], ref_values[i]));
}
const auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
const auto *clone =
dynamic_cast<const nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
ASSERT(clone->get_name() == "POTAxis");
ASSERT(clone->get_type() == nvbench::axis_type::int64);
ASSERT(clone->is_power_of_two());
ASSERT(clone->get_size() == 8);
ASSERT(clone->get_inputs() == ref_inputs);
ASSERT(clone->get_values() == ref_values);
for (size_t i = 0; i < 8; ++i)
{
ASSERT(clone->get_input_string(i) == fmt::to_string(ref_inputs[i]));
ASSERT(clone->get_description(i) ==
fmt::format("2^{} = {}", ref_inputs[i], ref_values[i]));
}
}
void test_update_none_to_none()
{
nvbench::int64_axis axis{"TestAxis"};
const std::vector<nvbench::int64_t> ref{0, 1, 2, 3, 7, 6, 5, 4};
axis.set_inputs(ref);
{ // Update a clone with empty values
auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
auto *clone = dynamic_cast<nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
clone->set_inputs({});
ASSERT(clone->get_name() == "TestAxis");
ASSERT(clone->get_type() == nvbench::axis_type::int64);
ASSERT(!clone->is_power_of_two());
ASSERT(clone->get_size() == 0);
ASSERT(clone->get_inputs().empty());
ASSERT(clone->get_values().empty());
}
{ // Update a clone with new values
auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
auto *clone = dynamic_cast<nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
const std::vector<nvbench::int64_t> update_ref{2, 5, 7, 9};
clone->set_inputs(update_ref);
ASSERT(clone->get_name() == "TestAxis");
ASSERT(clone->get_type() == nvbench::axis_type::int64);
ASSERT(!clone->is_power_of_two());
ASSERT(clone->get_size() == 4);
ASSERT(clone->get_inputs() == update_ref);
ASSERT(clone->get_values() == update_ref);
for (size_t i = 0; i < update_ref.size(); ++i)
{
ASSERT(clone->get_input_string(i) == fmt::to_string(update_ref[i]));
ASSERT(clone->get_description(i).empty());
}
}
// Check that the original axis is unchanged:
ASSERT(axis.get_name() == "TestAxis");
ASSERT(axis.get_type() == nvbench::axis_type::int64);
ASSERT(!axis.is_power_of_two());
ASSERT(axis.get_size() == 8);
ASSERT(axis.get_inputs() == ref);
ASSERT(axis.get_values() == ref);
for (size_t i = 0; i < 8; ++i)
{
ASSERT(axis.get_input_string(i) == fmt::to_string(ref[i]));
ASSERT(axis.get_description(i).empty());
}
}
void test_update_none_to_pow2()
{
nvbench::int64_axis axis{"TestAxis"};
const std::vector<nvbench::int64_t> ref{0, 1, 2, 3, 7, 6, 5, 4};
axis.set_inputs(ref);
{ // Update a clone with empty values
auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
auto *clone = dynamic_cast<nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
clone->set_inputs({}, nvbench::int64_axis_flags::power_of_two);
ASSERT(clone->get_name() == "TestAxis");
ASSERT(clone->get_type() == nvbench::axis_type::int64);
ASSERT(clone->is_power_of_two());
ASSERT(clone->get_size() == 0);
ASSERT(clone->get_inputs().empty());
ASSERT(clone->get_values().empty());
}
{ // Update a clone with new values
auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
auto *clone = dynamic_cast<nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
const std::vector<nvbench::int64_t> update_inputs{2, 5, 7, 9};
const std::vector<nvbench::int64_t> update_values{4, 32, 128, 512};
clone->set_inputs(update_inputs, nvbench::int64_axis_flags::power_of_two);
ASSERT(clone->get_name() == "TestAxis");
ASSERT(clone->get_type() == nvbench::axis_type::int64);
ASSERT(clone->is_power_of_two());
ASSERT(clone->get_size() == 4);
ASSERT(clone->get_inputs() == update_inputs);
ASSERT(clone->get_values() == update_values);
for (size_t i = 0; i < update_inputs.size(); ++i)
{
ASSERT(clone->get_input_string(i) == fmt::to_string(update_inputs[i]));
ASSERT(clone->get_description(i) ==
fmt::format("2^{} = {}", update_inputs[i], update_values[i]));
}
}
// Check that the original axis is unchanged:
ASSERT(axis.get_name() == "TestAxis");
ASSERT(axis.get_type() == nvbench::axis_type::int64);
ASSERT(!axis.is_power_of_two());
ASSERT(axis.get_size() == 8);
ASSERT(axis.get_inputs() == ref);
ASSERT(axis.get_values() == ref);
for (size_t i = 0; i < 8; ++i)
{
ASSERT(axis.get_input_string(i) == fmt::to_string(ref[i]));
ASSERT(axis.get_description(i).empty());
}
}
void test_update_pow2_to_none()
{
nvbench::int64_axis axis{"TestAxis"};
axis.set_inputs({0, 1, 2, 3, 7, 6, 5, 4},
nvbench::int64_axis_flags::power_of_two);
const std::vector<nvbench::int64_t> ref_inputs{0, 1, 2, 3, 7, 6, 5, 4};
const std::vector<nvbench::int64_t> ref_values{1, 2, 4, 8, 128, 64, 32, 16};
{ // Update a clone with empty values
auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
auto *clone = dynamic_cast<nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
clone->set_inputs({});
ASSERT(clone->get_name() == "TestAxis");
ASSERT(clone->get_type() == nvbench::axis_type::int64);
ASSERT(!clone->is_power_of_two());
ASSERT(clone->get_size() == 0);
ASSERT(clone->get_inputs().empty());
ASSERT(clone->get_values().empty());
}
{ // Update a clone with new values
auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
auto *clone = dynamic_cast<nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
const std::vector<nvbench::int64_t> update_ref{2, 5, 7, 9};
clone->set_inputs(update_ref);
ASSERT(clone->get_name() == "TestAxis");
ASSERT(clone->get_type() == nvbench::axis_type::int64);
ASSERT(!clone->is_power_of_two());
ASSERT(clone->get_size() == 4);
ASSERT(clone->get_inputs() == update_ref);
ASSERT(clone->get_values() == update_ref);
for (size_t i = 0; i < update_ref.size(); ++i)
{
ASSERT(clone->get_input_string(i) == fmt::to_string(update_ref[i]));
ASSERT(clone->get_description(i).empty());
}
}
// Check that the original axis is unchanged:
ASSERT(axis.get_name() == "TestAxis");
ASSERT(axis.get_type() == nvbench::axis_type::int64);
ASSERT(axis.is_power_of_two());
ASSERT(axis.get_size() == 8);
ASSERT(axis.get_inputs() == ref_inputs);
ASSERT(axis.get_values() == ref_values);
for (size_t i = 0; i < 8; ++i)
{
ASSERT(axis.get_input_string(i) == fmt::to_string(ref_inputs[i]));
ASSERT(axis.get_description(i) ==
fmt::format("2^{} = {}", ref_inputs[i], ref_values[i]));
}
}
void test_update_pow2_to_pow2()
{
nvbench::int64_axis axis{"TestAxis"};
axis.set_inputs({0, 1, 2, 3, 7, 6, 5, 4},
nvbench::int64_axis_flags::power_of_two);
const std::vector<nvbench::int64_t> ref_inputs{0, 1, 2, 3, 7, 6, 5, 4};
const std::vector<nvbench::int64_t> ref_values{1, 2, 4, 8, 128, 64, 32, 16};
{ // Update a clone with empty values
auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
auto *clone = dynamic_cast<nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
clone->set_inputs({}, nvbench::int64_axis_flags::power_of_two);
ASSERT(clone->get_name() == "TestAxis");
ASSERT(clone->get_type() == nvbench::axis_type::int64);
ASSERT(clone->is_power_of_two());
ASSERT(clone->get_size() == 0);
ASSERT(clone->get_inputs().empty());
ASSERT(clone->get_values().empty());
}
{ // Update a clone with new values
auto clone_base = axis.clone();
ASSERT(clone_base.get() != nullptr);
auto *clone = dynamic_cast<nvbench::int64_axis *>(clone_base.get());
ASSERT(clone != nullptr);
const std::vector<nvbench::int64_t> update_inputs{2, 5, 7, 9};
const std::vector<nvbench::int64_t> update_values{4, 32, 128, 512};
clone->set_inputs(update_inputs, nvbench::int64_axis_flags::power_of_two);
ASSERT(clone->get_name() == "TestAxis");
ASSERT(clone->get_type() == nvbench::axis_type::int64);
ASSERT(clone->is_power_of_two());
ASSERT(clone->get_size() == 4);
ASSERT(clone->get_inputs() == update_inputs);
ASSERT(clone->get_values() == update_values);
for (size_t i = 0; i < update_inputs.size(); ++i)
{
ASSERT(clone->get_input_string(i) == fmt::to_string(update_inputs[i]));
ASSERT(clone->get_description(i) ==
fmt::format("2^{} = {}", update_inputs[i], update_values[i]));
}
}
// Check that the original axis is unchanged:
ASSERT(axis.get_name() == "TestAxis");
ASSERT(axis.get_type() == nvbench::axis_type::int64);
ASSERT(axis.is_power_of_two());
ASSERT(axis.get_size() == 8);
ASSERT(axis.get_inputs() == ref_inputs);
ASSERT(axis.get_values() == ref_values);
for (size_t i = 0; i < 8; ++i)
{
ASSERT(axis.get_input_string(i) == fmt::to_string(ref_inputs[i]));
ASSERT(axis.get_description(i) ==
fmt::format("2^{} = {}", ref_inputs[i], ref_values[i]));
}
}
int main()
{
test_empty();
test_basic();
test_power_of_two();
test_update_none_to_none();
test_update_none_to_pow2();
test_update_pow2_to_none();
test_update_pow2_to_pow2();
return EXIT_SUCCESS;
} | the_stack |
#define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#define BLOCKSIZE 16
#define BLOCKSIZE2 BLOCKSIZE*BLOCKSIZE
typedef unsigned int uint;
extern "C"
{
__global__ void IsFinite(const float* __restrict a, float* __restrict b, uint size)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
b[index] = isfinite(a[index]) ? 0 : 1;
}
}
__global__ void PointwiseMultiply(const float* __restrict a, float* __restrict b, uint size)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
b[index] *= a[index];
}
}
__global__ void PointwiseDivide(const float* __restrict a, float* __restrict b, uint size)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
b[index] = a[index] / b[index];
}
}
__global__ void Sqrt(const float* __restrict a, float* __restrict b, uint size, float valueAdjustment)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
b[index] = sqrt(a[index] + valueAdjustment);
}
}
__global__ void AddInPlace(float* __restrict a, const float* __restrict b, uint size, float coefficient1, float coefficient2)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
a[index] = (a[index] * coefficient1) + (b[index] * coefficient2);
}
}
__global__ void SubtractInPlace(float* __restrict a, const float* __restrict b, uint size, float coefficient1, float coefficient2)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
a[index] = (a[index] * coefficient1) - (b[index] * coefficient2);
}
}
__global__ void AddToEachRow(float* __restrict a, const float* __restrict b, uint rows, uint columns)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
a[j * rows + i] += b[j];
}
}
}
__global__ void AddToEachColumn(float* __restrict a, const float* __restrict b, uint rows, uint columns)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
a[j * rows + i] += b[i];
}
}
}
__global__ void TanH(const float* __restrict a, float* __restrict b, uint size)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
b[index] = tanh(a[index]);
}
}
__global__ void TanHDerivative(const float* __restrict a, float* __restrict b, uint size)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
float ta = tanh(a[index]);
b[index] = 1.0f - ta * ta;
}
}
__global__ void Sigmoid(const float* __restrict a, float* __restrict b, uint size)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
b[index] = 1.0f / (1.0f + exp(-1.0f * a[index]));
}
}
__global__ void SigmoidDerivative(const float* __restrict a, float* __restrict b, uint size)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
float sigmoid = 1.0f / (1.0f + exp(-1.0f * a[index]));
b[index] = sigmoid * (1.0f - sigmoid);
}
}
__global__ void RELU(const float* __restrict a, float* __restrict b, uint size)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
float val = a[index];
b[index] = (val <= 0) ? 0 : val;
}
}
__global__ void RELUDerivative(const float* __restrict a, float* __restrict b, uint size)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
float val = a[index];
b[index] = (val <= 0) ? 0 : 1;
}
}
__global__ void LeakyRELU(const float* __restrict a, float* __restrict b, uint size)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
float val = a[index];
b[index] = (val <= 0) ? 0.01f*val : val;
}
}
__global__ void LeakyRELUDerivative(const float* __restrict a, float* __restrict b, uint size)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
float val = a[index];
b[index] = (val <= 0) ? 0.01f : 1;
}
}
__global__ void Reverse(const float* __restrict a, float* __restrict b, uint size)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
b[size - index - 1] = a[index];
}
}
__global__ void SumRows(const float* __restrict a, float* __restrict b, uint rows, uint columns)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
atomicAdd(b + i, a[j * rows + i]);
}
}
}
__global__ void SumColumns(const float* __restrict a, float* __restrict b, uint rows, uint columns)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
atomicAdd(b + j, a[j * rows + i]);
}
}
}
__global__ void MemClear(float* data, uint count, uint srcOffset, uint srcIncrement)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) {
uint arrayIndex = srcOffset + (index * srcIncrement);
data[arrayIndex] = 0.0f;
}
}
__global__ void FindMinAndMax(const float* __restrict data, uint count, float* __restrict minBlock, float* __restrict maxBlock)
{
uint tidX = threadIdx.x;
uint blockX = blockIdx.x;
uint index = blockDim.x * blockX + tidX;
// read block into shared memory
__shared__ float block[BLOCKSIZE2];
block[tidX] = (index < count) ? data[index] : 0;
__syncthreads();
// aggregate per block
if (tidX == 0) {
float min = FLT_MAX, max = FLT_MIN;
uint maxIndex = BLOCKSIZE2;
if (count - index < BLOCKSIZE2)
maxIndex = count - index;
for (uint i = 0; i < maxIndex; i++) {
float val = block[i];
if (i == 0 || val > max)
max = val;
if (i == 0 || val < min)
min = val;
}
minBlock[blockX] = min;
maxBlock[blockX] = max;
}
}
__global__ void FindSum(const float* __restrict data, uint count, float* __restrict sum)
{
uint tidX = threadIdx.x;
uint blockX = blockIdx.x;
uint index = blockDim.x * blockX + tidX;
// read block into shared memory
__shared__ float block[BLOCKSIZE2];
if (index < count)
block[tidX] = data[index];
__syncthreads();
// aggregate per block
if (tidX == 0) {
float total = 0;
uint maxIndex = BLOCKSIZE2;
if (count - blockX * BLOCKSIZE2 < BLOCKSIZE2)
maxIndex = count - blockX * BLOCKSIZE2;
for (uint i = 0; i < maxIndex; i++) {
total += block[i];
}
sum[blockX] = total;
}
}
__global__ void FindStdDev(const float* __restrict data, uint count, float mean, float* __restrict stdDev)
{
uint tidX = threadIdx.x;
uint blockX = blockIdx.x;
uint index = blockDim.x * blockX + tidX;
// read block into shared memory
__shared__ float block[BLOCKSIZE2];
if (index < count)
block[tidX] = data[index];
__syncthreads();
// aggregate per block
if (tidX == 0) {
float total = 0;
uint maxIndex = BLOCKSIZE2;
if (count - blockX * BLOCKSIZE2 < BLOCKSIZE2)
maxIndex = count - blockX * BLOCKSIZE2;
for (uint i = 0; i < maxIndex; i++) {
float val = block[i] - mean;
total += val * val;
}
stdDev[blockX] = total;
}
}
__global__ void Constrain(float* __restrict data, uint count, float min, float max)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) {
float val = data[index];
if (val < min)
data[index] = min;
if (val > max)
data[index] = max;
}
}
__global__ void RoundInPlace(float* __restrict data, uint count, float lower, float upper, float mid)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) {
float val = data[index];
if (val >= mid)
data[index] = upper;
else
data[index] = lower;
}
}
__global__ void Pow(const float* __restrict a, float* __restrict b, uint count, float power)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) {
b[index] = pow(a[index], power);
}
}
__global__ void Diagonal(const float* __restrict a, float* __restrict b, uint rows, uint columns)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < rows && index < columns; index += blockDim.x * gridDim.x) {
b[index] = a[index * rows + index];
}
}
__global__ void L1Regularisation(float* __restrict a, uint count, float coefficient)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) {
float val = a[index];
a[index] = val - ((val > 0 ? 1 : val < 0 ? -1 : 0) * coefficient);
}
}
__global__ void PointwiseDivideRows(float* __restrict a, const float* __restrict b, uint rows, uint columns)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
uint index = j * rows + i;
float val = a[index];
a[index] = val / b[i];
}
}
}
__global__ void PointwiseDivideColumns(float* __restrict a, const float* __restrict b, uint rows, uint columns)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
uint index = j * rows + i;
float val = a[index];
a[index] = val / b[j];
}
}
}
__global__ void SplitRows(const float* __restrict a, float* __restrict b, float* __restrict c, uint rows, uint columns, uint position)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
uint index = j * rows + i;
float val = a[index];
if(j >= position) {
uint diff = j - position;
c[diff * rows + i] = val;
}else
b[index] = val;
}
}
}
__global__ void SplitColumns(const float* __restrict a, float* __restrict b, float* __restrict c, uint rows, uint columns, uint position)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
float val = a[j * rows + i];
if(i >= position) {
uint diff = i - position;
c[j * (rows-position) + diff] = val;
}else
b[j * position + i] = val;
}
}
}
__global__ void ConcatColumns(const float* __restrict a, const float* __restrict b, float* __restrict c, uint rows, uint columns, uint topRowCount, uint bottomRowCount)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
float val;
if(i >= topRowCount)
val = b[j * bottomRowCount + i - topRowCount];
else
val = a[j * topRowCount + i];
c[j * rows + i] = val;
}
}
}
__global__ void ConcatRows(const float* __restrict a, const float* __restrict b, float* __restrict c, uint rows, uint columns, uint leftColumnCount)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
float val;
if(j >= leftColumnCount)
val = b[(j-leftColumnCount) * rows + i];
else
val = a[j * rows + i];
c[j * rows + i] = val;
}
}
}
__global__ void EuclideanDistance(const float* __restrict a, const float* __restrict b, float* __restrict c, uint count)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) {
float val = a[index] - b[index];
c[index] = val * val;
}
}
/*__global__ void MultiEuclideanDistance(const float* __restrict a, const float* __restrict* b, float* __restrict c, uint size, uint columns)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
float val1 = a[i];
float val2 = b[j][i];
float val3 = val1 - val2;
c[j * size + i] = val3 * val3;
}
}
}*/
__global__ void ManhattanDistance(const float* __restrict a, const float* __restrict b, float* __restrict c, uint count)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) {
c[index] = abs(a[index] - b[index]);
}
}
/*__global__ void MultiManhattanDistance(const float* __restrict a, const float* __restrict* b, float* __restrict c, uint size, uint columns)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
float val1 = a[i];
float val2 = b[j][i];
c[j * size + i] = abs(val1 - val2);
}
}
}*/
__global__ void CosineDistance(const float* __restrict a, const float* __restrict b, float* __restrict aa, float* __restrict ab, float* __restrict bb, uint count)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) {
float left = a[index];
float right = b[index];
atomicAdd(aa, left * left);
atomicAdd(ab, left * right);
atomicAdd(bb, right * right);
}
}
__global__ void Abs(const float* __restrict a, float* __restrict b, uint count)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) {
b[index] = abs(a[index]);
}
}
__global__ void Log(const float* __restrict a, float* __restrict b, uint count)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) {
b[index] = log(a[index]);
}
}
__global__ void Normalise(float* __restrict a, uint count, float min, float range)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) {
a[index] = (a[index] - min) / range;
}
}
__global__ void SoftmaxVector(const float* __restrict a, float* __restrict b, uint count, float max)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) {
b[index] = exp(a[index] - max);
}
}
__global__ void VectorAdd(float* __restrict a, uint size, float scalar)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
a[index] += scalar;
}
}
__global__ void VectorCopyRandom(const float* __restrict a, float* __restrict b, uint* __restrict c, uint size)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
b[index] += a[c[index]];
}
}
__global__ void CopyToMatrixRows(const float* __restrict* a, float* __restrict b, uint rows, uint columns)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
float val = a[i][j];
b[j * rows + i] = val;
}
}
}
__global__ void CopyToMatrixColumns(const float* __restrict* a, float* __restrict b, uint rows, uint columns)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
float val = a[j][i];
//printf("i:%i(%i) j:%i(%i)\n", i, rows, j, columns);
b[j * rows + i] = val;
}
}
}
__global__ void TensorAddPadding(
uint size,
const float* __restrict a,
float* __restrict b,
uint rows,
uint columns,
uint depth,
uint count,
uint outputRows,
uint outputColumns,
uint padding
) {
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
uint i = index % outputRows;
uint index2 = index / outputRows;
uint j = index2 % outputColumns;
uint index3 = index2 / outputColumns;
uint k = index3 % depth;
uint z = index3 / depth;
float val = 0;
if(i >= padding && i < (outputRows - padding) && j >= padding && j < (outputColumns - padding)) {
const float* inputPtr = a + (rows * columns * depth * z) + (rows * columns * k);
uint aIndex = (j-padding) * rows + (i-padding);
val = inputPtr[aIndex];
/*printf("index:%i i:%i(%i) j:%i(%i) k:%i(%i) z:%i(%i) ai:%i val:%f\n", index,
i, outputRows,
j, outputColumns,
k, depth,
z, count,
aIndex, val
);*/
}
float* outputPtr = b + (outputRows * outputColumns * depth * z) + (outputRows * outputColumns * k);
outputPtr[j * outputRows + i] = val;
}
}
__global__ void TensorRemovePadding(
uint size,
const float* __restrict a,
float* __restrict b,
uint rows,
uint columns,
uint depth,
uint count,
uint outputRows,
uint outputColumns,
uint padding
) {
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
uint i = index % rows;
uint index2 = index / rows;
uint j = index2 % columns;
if(i >= padding && i < (rows-padding) && j >= padding && j < (columns-padding)) {
uint index3 = index2 / columns;
uint k = index3 % depth;
uint z = index3 / depth;
const float* inputPtr = a + (rows * columns * depth * z) + (rows * columns * k);
uint aIndex = j * rows + i;
float val = inputPtr[aIndex];
float* outputPtr = b + (outputRows * outputColumns * depth * z) + (outputRows * outputColumns * k);
uint bIndex = (j-padding) * outputRows + (i-padding);
outputPtr[bIndex] = val;
/*printf("index:%i i:%i(%i) j:%i(%i) k:%i(%i) z:%i(%i) ai:%i bi:%i val:%f\n", index,
i, outputRows,
j, outputColumns,
k, depth,
z, count,
aIndex, bIndex,
val
);*/
}
}
}
__global__ void TensorIm2Col(
uint size,
const float* __restrict a,
float* __restrict b,
const float* __restrict cx,
const float* __restrict cy,
uint rows,
uint columns,
uint depth,
uint count,
uint outputRows,
uint outputColumns,
uint convolutionCount,
uint filterWidth,
uint filterHeight,
uint xStride,
uint yStride
) {
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
uint x = index % filterWidth;
uint index2 = index / filterWidth;
uint y = index2 % filterHeight;
uint index3 = index2 / filterHeight;
uint k = index3 % depth;
uint index4 = index3 / depth;
uint ci = index4 % convolutionCount;
uint i = index4 / convolutionCount;
uint offsetX = cx[ci];
uint offsetY = cy[ci];
/*printf("index:%i, i:%i(%i), ci:%i(%i), k:%i(%i), x:%i(%i), y:%i(%i), cx:%i=%i, cy:%i=%i\n", index,
i, count,
ci, convolutionCount,
k, depth,
x, filterWidth,
y, filterHeight,
offsetX, (uint)cx[ci],
offsetY, (uint)cy[ci]
);*/
uint filterOffset = k * filterWidth * filterHeight;
uint filterIndex = filterOffset + (x * filterHeight + y);
float* outputPtr = b + (outputRows * outputColumns * i);
const float* inputPtr = a + (rows * columns * depth * i) + (rows * columns * k);
outputPtr[filterIndex * outputRows + ci] = inputPtr[(offsetX + x) * rows + (offsetY + y)];
}
}
__global__ void TensorReverseIm2Col(
uint size,
const float* __restrict a,
const float* __restrict filters,
float* __restrict b,
const float* __restrict cx,
const float* __restrict cy,
uint rows,
uint columns,
uint depth,
uint count,
uint convolutionCount,
uint filterWidth,
uint filterHeight,
uint xStride,
uint yStride,
uint outputRows,
uint outputColumns,
uint outputDepth
) {
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
uint z = index % outputDepth;
uint index2 = index / outputDepth;
uint x = index2 % filterWidth;
uint index3 = index2 / filterWidth;
uint y = index3 % filterHeight;
uint index4 = index3 / filterHeight;
uint ci = index4 % convolutionCount;
uint index5 = index4 / convolutionCount;
uint k = index5 % depth;
uint i = index5 / depth;
uint offsetX = cx[ci];
uint offsetY = cy[ci];
/*printf("index:%i di:%i(%i) ci:%i(%i) k:%i(%i) x:%i(%i) y:%i(%i) z:%i(%i) cx:%i cy:%i\n", index,
i, count,
ci, convolutionCount,
k, depth,
x, filterWidth,
y, filterHeight,
z, outputDepth,
offsetX, offsetY
);*/
const float* slice = a + (i * rows * columns * depth) + (k * rows * columns);
const float* filter = filters + (k * outputDepth * filterWidth * filterHeight) + (z * filterWidth * filterHeight);
float* output = b + (i * outputRows * outputColumns * outputDepth) + (z * outputRows * outputColumns);
uint errorX = offsetX / xStride;
uint errorY = offsetY / yStride;
if(errorX < columns && errorY < rows) {
float error = slice[errorX * rows + errorY];
uint filterIndex = (filterWidth-x-1) * filterHeight + (filterHeight-y-1);
uint outputIndex = (offsetX+x) * outputRows + (offsetY+y);
float val = filter[filterIndex] * error;
atomicAdd(output + outputIndex, val);
}
}
}
__global__ void SoftmaxDerivative(const float* __restrict a, float* __restrict b, uint size)
{
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < size; j += blockDim.y * gridDim.y) {
uint index = j * size + i;
if(i == j)
b[index] = a[i] * (1 - a[i]);
else
b[index] = -a[i] * a[j];
}
}
}
__global__ void RotateInPlace(float* __restrict a, uint size, uint blockCount, uint blockSize)
{
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
uint blockIndex = index / blockSize;
uint blockOffset = index % blockSize;
uint index1 = blockIndex * blockSize + blockSize - blockOffset - 1;
uint index2 = blockIndex * blockSize + blockOffset;
float temp = a[index1];
a[index1] = a[index2];
a[index2] = temp;
}
}
__global__ void TensorMaxPool(
uint size,
const float* __restrict a,
float* __restrict b,
float* __restrict indexOffset,
const float* __restrict cx,
const float* __restrict cy,
uint convolutionCount,
uint rows,
uint columns,
uint depth,
uint count,
uint outputRows,
uint outputColumns,
uint filterWidth,
uint filterHeight,
uint xStride,
uint yStride,
uint saveIndices
) {
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
uint ci = index % convolutionCount;
uint index2 = index / convolutionCount;
uint k = index2 % depth;
uint z = index2 / depth;
uint aX = cx[ci];
uint aY = cy[ci];
uint bX = aX / xStride;
uint bY = aY / yStride;
/*printf("index:%i k:%i(%i) z:%i(%i) ax:%i ay:%i bx:%i by:%i\n", index,
k, depth,
z, count,
aX, aY,
bX, bY
);*/
uint targetOffset = (z * outputRows * outputColumns * depth) + (k * outputRows * outputColumns);
const float* source = a + (z * rows * columns * depth) + (k * rows * columns);
float* target = b + targetOffset;
float maxVal = 0;
int bestOffset = -1;
uint offset = 0;
for (uint x = 0; x < filterWidth; x++) {
for (uint y = 0; y < filterHeight; y++) {
float val = source[(aX + x) * rows + (aY + y)];
bool isGreater = (bestOffset < 0 || val > maxVal);
if (isGreater) {
bestOffset = offset;
maxVal = val;
}
//printf("index:%i, x:%i, y:%i val:%f max:%f offset:%i is-greater:%i\n", index, x, y, val, maxVal, bestOffset, isGreater ? 1 : 0);
++offset;
}
}
//printf("\tindex:%i i:%i j:%i val:%f\n", index, i, j, maxVal);
if(saveIndices) {
float* indices = indexOffset + targetOffset;
indices[bX * outputRows + bY] = bestOffset;
}
target[bX * outputRows + bY] = maxVal;
}
}
__global__ void TensorReverseMaxPool(
uint size,
const float* __restrict a,
const float* __restrict indices,
float* __restrict b,
uint rows,
uint columns,
uint depth,
uint count,
uint outputRows,
uint outputColumns,
uint filterWidth,
uint filterHeight,
uint xStride,
uint yStride
) {
for (uint index = blockDim.x * blockIdx.x + threadIdx.x; index < size; index += blockDim.x * gridDim.x) {
uint i = index % rows;
uint index2 = index / rows;
uint j = index2 % columns;
uint index3 = index2 / columns;
uint k = index3 % depth;
uint z = index3 / depth;
uint sourceOffset = (z * rows * columns * depth) + (k * rows * columns);
const float* source = a + sourceOffset;
const float* indexPtr = indices + sourceOffset;
float* target = b + (z * outputRows * outputColumns * depth) + (k * outputRows * outputColumns);
uint sourceIndex = j * rows + i;
float val = source[sourceIndex];
int offset = indexPtr[sourceIndex];
if(offset < 0)
offset = 0;
uint targetX = j * xStride + (offset / filterHeight);
uint targetY = i * yStride + (offset % filterHeight);
/*printf("index:%i s:%i i:%i(%i) j:%i(%i) k:%i(%i) z:%i(%i) val:%f offset:%i tx:%i ty:%i\n",
index, xStride, yStride
i, outputRows,
j, outputColumns,
k, depth,
z, count,
val, offset,
targetX, targetY
);*/
target[targetX * outputRows + targetY] = val;
}
}
__global__ void CalculateDistances(
const float** __restrict a,
const float** __restrict b,
float* __restrict c,
uint rows,
uint columns,
uint size,
uint distanceMetric
) {
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
for (uint k = blockDim.z * blockIdx.z + threadIdx.z; k < rows; k += blockDim.z * gridDim.z) {
float aVal = a[j][i];
float bVal = b[k][i];
float output = 0;
if(distanceMetric == 0) { // euclidean
float diff = aVal - bVal;
output = diff * diff;
}else if(distanceMetric == 1) { // cosine
output = aVal * bVal;
}else if(distanceMetric == 2) { // manhattan
output = abs(aVal - bVal);
}
float* outputPtr = c + (j * rows + k);
atomicAdd(outputPtr, output);
}
}
}
}
__global__ void MultiCosineDistance(
const float** __restrict a,
const float** __restrict b,
float* __restrict aa,
float* __restrict ab,
float* __restrict bb,
uint rows,
uint columns,
uint size
) {
for (uint i = blockDim.x * blockIdx.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
for (uint j = blockDim.y * blockIdx.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y) {
for (uint k = blockDim.z * blockIdx.z + threadIdx.z; k < rows; k += blockDim.z * gridDim.z) {
float aVal = a[j][i];
float bVal = b[k][i];
uint offset = j * rows + k;
atomicAdd(aa + offset, aVal * aVal);
atomicAdd(ab + offset, aVal * bVal);
atomicAdd(bb + offset, bVal * bVal);
}
}
}
}
} | the_stack |
namespace AggMIS {
namespace MIS {
namespace Kernels {
__global__ void GenerateRandoms(int size,
int iterations,
unsigned int *randoms,
unsigned int *seeds) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int z = seeds[idx];
int offset = idx;
int step = 32768;
for (int i = 0; i < iterations; i++)
{
if (offset < size)
{
unsigned int b = (((z << 13) ^ z) >> 19);
z = (((z & UINT_MAX) << 12) ^ b);
randoms[offset] = z;
offset += step;
}
}
}
__global__ void PreInitialize(int size,
unsigned int *randoms,
int *bestSeen,
int *origin,
int *mis) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
// Set the origin to be self
origin[idx] = idx;
// Set MIS to be -1;
mis[idx] = -1;
// Set the bestSeen value to be random
bestSeen[idx] = randoms[idx] % 1000000;
}
}
__global__ void Initialize(int size,
unsigned int *randoms,
int *bestSeen,
int *origin,
int *mis,
int *incomplete) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
// Taustep is performed with S1=13, S2=19, S3=12, and M=UINT_MAX coded into kernel
unsigned int z = randoms[idx];
unsigned int b = (((z << 13) ^ z) >> 19);
z = (((z & UINT_MAX) << 12) ^ b);
// Set the origin to be self
origin[idx] = idx;
// Set the bestSeen value to be either random from 0-1000000 or 1000001 if in MIS
int status = mis[idx];
int value = 0;
if (status == 1)
value = 1000001;
bestSeen[idx] = (mis[idx] == -1) ? (z % 1000000) : value;
// Write out new random value for seeding
randoms[idx] = z;
}
// Reset incomplete value
if (idx == 0)
incomplete[0] = 0;
}
__global__ void Iterate(int size,
int *originIn,
int *originOut,
int *bestSeenIn,
int *bestSeenOut,
int *adjIndexes,
int *adjacency) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
int bestSeen = bestSeenIn[idx];
int origin = originIn[idx];
if (bestSeen < 1000001)
{
int start = adjIndexes[idx];
int end = adjIndexes[idx + 1];
// Look at all the neighbors and take best values:
for (int i = start; i < end; i++)
{
int neighbor = adjacency[i];
int challenger = bestSeenIn[neighbor];
int challengerOrigin = originIn[neighbor];
if (challenger > 0 && challenger == bestSeen && challengerOrigin > origin)
{
origin = challengerOrigin;
}
if (challenger > bestSeen)
{
bestSeen = challenger;
origin = challengerOrigin;
}
}
}
// Write out the best values found
bestSeenOut[idx] = bestSeen;
originOut[idx] = origin;
}
}
__global__ void Finalize(int size,
int *originIn,
int *originOut,
int *bestSeenIn,
int *bestSeenOut,
int *adjIndexes,
int *adjacency,
int *mis,
int *incomplete) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
int bestSeen = bestSeenIn[idx];
int origin = originIn[idx];
if (bestSeen < 1000001)
{
int start = adjIndexes[idx];
int end = adjIndexes[idx + 1];
// Look at all the neighbors and take best values:
for (int i = start; i < end; i++)
{
int neighbor = adjacency[i];
unsigned int challenger = bestSeenIn[neighbor];
int challengerOrigin = originIn[neighbor];
if (challenger > 0 && challenger == bestSeen && challengerOrigin > origin)
{
origin = challengerOrigin;
}
if (challenger > bestSeen)
{
bestSeen = challenger;
origin = challengerOrigin;
}
}
}
// Write new MIS status
int misStatus = -1;
if (origin == idx)
misStatus = 1;
else if (bestSeen == 1000001)
misStatus = 0;
mis[idx] = misStatus;
// If this node is still unassigned mark
if (misStatus == -1)
{
incomplete[0] = 1;
}
}
}
}
AggMIS::Types::IntVector_d* RandomizedMIS(int k, AggMIS::Types::Graph_d &graph) {
// Setting to prefer the cache:
cudaFuncSetCacheConfig(Kernels::Initialize, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(Kernels::Iterate, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(Kernels::Finalize, cudaFuncCachePreferL1);
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
AggMIS::Types::IntVector_d incomplete(1); // This is a single value that will be marked with 1 by initialize kernel if there are unallocated nodes
AggMIS::Types::IntVector_d *misIn = new AggMIS::Types::IntVector_d(graph.Size()); // The current MIS assignments 1 = in MIS, 0 = not in MIS, -1 = undetermined
AggMIS::Types::UIntVector_d randoms(graph.Size()); // Set of random values generated by each threads random generator
AggMIS::Types::IntVector_d bestSeenIn(graph.Size()); // Holds the highest value seen so far propogated through neigbhors each iteration
AggMIS::Types::IntVector_d bestSeenOut(graph.Size()); // Holds the highest value seen so far propogated through neigbhors each iteration
AggMIS::Types::IntVector_d originIn(graph.Size()); // The index where the best seen value originated
AggMIS::Types::IntVector_d originOut(graph.Size()); // The index where the best seen value originated
AggMIS::Types::UIntVector_d seeds(32768); // Stores the first few seeds for the random generation process
// Getting raw pointers:
int *incomplete_d = thrust::raw_pointer_cast(&incomplete[0]);
int *mis_d = thrust::raw_pointer_cast(misIn->data());
unsigned *randoms_d = thrust::raw_pointer_cast(&randoms[0]);
unsigned *seeds_d = thrust::raw_pointer_cast(&seeds[0]);
int *bestSeenIn_d = thrust::raw_pointer_cast(&bestSeenIn[0]);
int *bestSeenOut_d = thrust::raw_pointer_cast(&bestSeenOut[0]);
int *originIn_d = thrust::raw_pointer_cast(&originIn[0]);
int *originOut_d = thrust::raw_pointer_cast(&originOut[0]);
int *adjIndexes_d = thrust::raw_pointer_cast(graph.indices->data());
int *adjacency_d = thrust::raw_pointer_cast(graph.adjacency->data());
// Setting up for kernel launches
int blockSize = 512;
int nBlocks = graph.Size() / blockSize + (graph.Size() % blockSize == 0 ? 0 : 1);
// Seeding the randoms array:
srand(time(NULL));
unsigned *seeds_h = new unsigned[32768];
for (int i = 0; i < 32768; i++)
seeds_h[i] = (unsigned)rand();
thrust::copy(seeds_h, seeds_h + 32768, seeds.begin());
int iterations = (graph.Size() + 32767) / 32768;
Kernels::GenerateRandoms << <128, 256 >> > (graph.Size(), iterations, randoms_d, seeds_d);
// Running the initialize kernel:
Kernels::PreInitialize << < nBlocks, blockSize >> > (graph.Size(), randoms_d, bestSeenIn_d, originIn_d, mis_d);
// Running the iteration kernel k times swapping in and out for each iteration
for (int i = 0; i < k; i++)
{
if (i < k - 1)
Kernels::Iterate << < nBlocks, blockSize >> > (graph.Size(), originIn_d, originOut_d, bestSeenIn_d, bestSeenOut_d, adjIndexes_d, adjacency_d);
else
Kernels::Finalize << < nBlocks, blockSize >> > (graph.Size(), originIn_d, originOut_d, bestSeenIn_d, bestSeenOut_d, adjIndexes_d, adjacency_d, mis_d, incomplete_d);
// Swap the pointers for the next iteration:
int *temp = originIn_d;
originIn_d = originOut_d;
originOut_d = temp;
int *temp2 = bestSeenIn_d;
bestSeenIn_d = bestSeenOut_d;
bestSeenOut_d = temp2;
}
// If not complete get new randoms and repeat
cudaThreadSynchronize();
int unallocated = incomplete[0];
while (unallocated == 1)
{
// Initialize kernel
Kernels::Initialize << < nBlocks, blockSize >> > (graph.Size(), randoms_d, bestSeenIn_d, originIn_d, mis_d, incomplete_d);
// Running the iteration kernel k times swapping in and out for each iteration
for (int i = 0; i < k; i++)
{
if (i < k - 1)
Kernels::Iterate << < nBlocks, blockSize >> > (graph.Size(), originIn_d, originOut_d, bestSeenIn_d, bestSeenOut_d, adjIndexes_d, adjacency_d);
else
Kernels::Finalize << < nBlocks, blockSize >> > (graph.Size(), originIn_d, originOut_d, bestSeenIn_d, bestSeenOut_d, adjIndexes_d, adjacency_d, mis_d, incomplete_d);
// Swap the pointers for the next iteration:
int *temp = originIn_d;
originIn_d = originOut_d;
originOut_d = temp;
int *temp2 = bestSeenIn_d;
bestSeenIn_d = bestSeenOut_d;
bestSeenOut_d = temp2;
}
// Checking if done:
cudaThreadSynchronize();
unallocated = incomplete[0];
}
// Deallocating temporary arrays:
incomplete.resize(0);
randoms.resize(0);
bestSeenIn.resize(0);
bestSeenOut.resize(0);
originIn.resize(0);
originOut.resize(0);
// Returning the mis
//return misIn;
return misIn;
}
bool IsValidKMIS(AggMIS::Types::IntVector_d& misIn, AggMIS::Types::Graph_d& graphIn, int k, bool verbose) {
// Copy to host data
AggMIS::Types::Graph_h graph(graphIn);
AggMIS::Types::IntVector_h mis(misIn);
if (verbose)
printf("Attempting to verify %d-mis properties\n", k);
// Checking if the mis vector is the right size
int misSize = mis.size();
int gSize = graph.indices->size() - 1;
if (misSize != gSize || misSize != graph.Size())
{
if (verbose)
{
printf("The given vector is not the correct size to describe and MIS of the input graph! \n");
printf("\tMIS size: %d Graph Size: %d\n", misSize, gSize);
}
return false;
}
// Checking that at least one node is marked as in the MIS
int count = 0;
for (int i = 0; i < mis.size(); i++)
if (mis[i] == 1)
count++;
if (count == 0)
{
if (verbose)
{
printf("No nodes are designated as in the MIS!\n");
// debugHelpers::printVector(mis, std::string("The MIS"));
}
return false;
}
// Checking that every node not in the MIS has a path to a root node
// of less than k and that every node in the MIS does not
for (int i = 0; i < mis.size(); i++)
{
std::vector< std::vector<int> > rings(k + 1);
int distance = INT_MAX;
rings[0].push_back(i);
// Filling in the rings with breadth first search
for (int j = 1; j < rings.size(); j++)
{
for (int root = 0; root < rings[j - 1].size(); root++)
{
int rootPoint = rings[j - 1][root];
int start = graph.indices->data()[rootPoint];
int end = graph.indices->data()[rootPoint + 1];
for (int nIt = start; nIt < end; nIt++)
{
int neighbor = graph.adjacency->data()[nIt];
bool visited = false;
for (int vLevel = 0; vLevel < j + 1; vLevel++)
{
for (int vIt = 0; vIt < rings[vLevel].size(); vIt++)
{
if (rings[vLevel][vIt] == neighbor)
visited = true;
}
}
if (!visited)
{
rings[j].push_back(neighbor);
if (mis[neighbor] == 1 && distance > j)
distance = j;
if (mis[i] == 1 && mis[neighbor] == 1)
printf("Found a %d-path from root node %d to root node %d\n", distance, neighbor, i);
}
}
}
}
// If this node is not in the MIS distance should be less than k
if (mis[i] == 1 && distance <= k)
{
if (verbose)
{
printf("Node %d is in the MIS but has a %d-path to another root node!\n", i, distance);
// debugHelpers::printVector(mis, std::string("The MIS:"));
// debugHelpers::printGraph(graph);
}
return false;
}
if (mis[i] == 0 && distance > k)
{
if (verbose)
printf("Node %d is not in the MIS but has no conflicts with MIS nodes!\n", i);
// debugHelpers::printVector(mis, std::string("The MIS:"));
// debugHelpers::printGraph(graph);
return false;
}
}
// Clean up temp memory
mis.resize(0);
// If we got this far the MIS must be valid (both maximal and independent)
return true;
}
}
} | the_stack |
//------------------------------------------------------------------------
// Common op attribute parser.
static __host__ void interpolateParseOpAttributes(OpKernelConstruction* ctx, InterpolateKernelParams& p, bool enableDA)
{
if (enableDA)
{
OP_REQUIRES_OK(ctx, ctx->GetAttr("diff_attrs_all", &p.diff_attrs_all));
if (!p.diff_attrs_all)
{
std::vector<int> diff_attrs_vec;
OP_REQUIRES_OK(ctx, ctx->GetAttr("diff_attrs", &diff_attrs_vec));
OP_REQUIRES(ctx, diff_attrs_vec.size() > 0, errors::InvalidArgument("differentiation enabled with empty diff_attrs list"));
OP_REQUIRES(ctx, diff_attrs_vec.size() <= IP_MAX_DIFF_ATTRS, errors::InvalidArgument("too many entries in diff_attrs list (increase IP_MAX_DIFF_ATTRS)"));
p.numDiffAttr = diff_attrs_vec.size();
memcpy(p.diffAttrs, &diff_attrs_vec[0], diff_attrs_vec.size()*sizeof(int));
}
}
}
//------------------------------------------------------------------------
// Forward TensorFlow op.
template <bool ENABLE_DA>
struct InterpolateFwdOp : public OpKernel
{
InterpolateKernelParams m_attribs;
InterpolateFwdOp(OpKernelConstruction* ctx): OpKernel(ctx)
{
memset(&m_attribs, 0, sizeof(m_attribs));
interpolateParseOpAttributes(ctx, m_attribs, ENABLE_DA);
}
void Compute(OpKernelContext* ctx)
{
InterpolateKernelParams& p = m_attribs;
cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
// Get input.
const Tensor& attr = ctx->input(0);
const Tensor& rast = ctx->input(1);
const Tensor& tri = ctx->input(2);
const Tensor& rast_db = ctx->input(ENABLE_DA ? 3 : 2);
// Instance rendering mode?
p.instance_mode = attr.dims() > 2;
// Extract input dimensions.
if (p.instance_mode)
{
p.numVertices = (attr.dims() > 1) ? attr.dim_size(1) : 0;
p.numAttr = (attr.dims() > 2) ? attr.dim_size(2) : 0;
}
else
{
p.numVertices = (attr.dims() > 0) ? attr.dim_size(0) : 0;
p.numAttr = (attr.dims() > 1) ? attr.dim_size(1) : 0;
}
p.numTriangles = (tri.dims() > 0) ? tri.dim_size(0) : 0;
p.height = (rast.dims() > 1) ? rast.dim_size(1) : 0;
p.width = (rast.dims() > 2) ? rast.dim_size(2) : 0;
p.depth = (rast.dims() > 0) ? rast.dim_size(0) : 0;
// Sanity checks.
OP_REQUIRES(ctx, rast.dims() == 4 && rast.dim_size(0) > 0 && rast.dim_size(1) > 0 && rast.dim_size(2) > 0 && rast.dim_size(3) == 4, errors::InvalidArgument("rast must have shape[>0, >0, >0, 4]"));
OP_REQUIRES(ctx, tri.dims() == 2 && tri.dim_size(0) > 0 && tri.dim_size(1) == 3, errors::InvalidArgument("tri must have shape [>0, 3]"));
OP_REQUIRES(ctx, (attr.dims() == 2 || attr.dims() == 3) && attr.dim_size(0) > 0 && attr.dim_size(1) > 0 && (attr.dims() == 2 || attr.dim_size(2) > 0), errors::InvalidArgument("attr must have shape [>0, >0, >0] or [>0, >0]"));
if (p.instance_mode)
OP_REQUIRES(ctx, attr.dim_size(0) == p.depth || attr.dim_size(0) == 1, errors::InvalidArgument("minibatch size mismatch between inputs rast, attr"));
if (ENABLE_DA)
{
OP_REQUIRES(ctx, rast_db.dims() == 4 && rast_db.dim_size(0) > 0 && rast_db.dim_size(1) > 0 && rast_db.dim_size(2) > 0 && rast_db.dim_size(3) == 4, errors::InvalidArgument("rast_db must have shape[>0, >0, >0, 4]"));
OP_REQUIRES(ctx, rast_db.dim_size(1) == rast.dim_size(1) && rast_db.dim_size(2) == rast.dim_size(2), errors::InvalidArgument("spatial size mismatch between inputs rast and rast_db"));
OP_REQUIRES(ctx, rast_db.dim_size(0) == p.depth, errors::InvalidArgument("minibatch size mismatch between inputs rast, rast_db"));
}
// All diff attrs mode.
if (p.diff_attrs_all)
p.numDiffAttr = p.numAttr;
// Get input pointers.
p.attr = attr.flat<float>().data();
p.rast = rast.flat<float>().data();
p.tri = tri.flat<int>().data();
p.attrBC = (p.instance_mode && attr.dim_size(0) == 1) ? 1 : 0;
p.rastDB = ENABLE_DA ? rast_db.flat<float>().data() : 0;
// Allocate main output tensor.
Tensor* out_tensor = NULL;
TensorShape out_shape;
out_shape.AddDim(p.depth);
out_shape.AddDim(p.height);
out_shape.AddDim(p.width);
out_shape.AddDim(p.numAttr);
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, out_shape, &out_tensor));
p.out = out_tensor->flat<float>().data();
// Allocate pixel differential output tensor.
Tensor* out_da_tensor = NULL;
out_shape.set_dim(3, p.numDiffAttr * 2);
OP_REQUIRES_OK(ctx, ctx->allocate_output(1, out_shape, &out_da_tensor));
p.outDA = ENABLE_DA ? out_da_tensor->flat<float>().data() : 0;
// Verify that buffers are aligned to allow float2/float4 operations.
OP_REQUIRES(ctx, !((uintptr_t)p.rast & 15), errors::Internal("rast input tensor not aligned to float4"));
OP_REQUIRES(ctx, !((uintptr_t)p.rastDB & 15), errors::Internal("rast_db input tensor not aligned to float4"));
if (ENABLE_DA)
OP_REQUIRES(ctx, !((uintptr_t)p.outDA & 7), errors::Internal("out_da output tensor not aligned to float2"));
// Choose launch parameters.
dim3 blockSize = getLaunchBlockSize(IP_FWD_MAX_KERNEL_BLOCK_WIDTH, IP_FWD_MAX_KERNEL_BLOCK_HEIGHT, p.width, p.height);
dim3 gridSize = getLaunchGridSize(blockSize, p.width, p.height, p.depth);
// Launch CUDA kernel.
void* args[] = {&p};
void* func = ENABLE_DA ? (void*)InterpolateFwdKernelDa : (void*)InterpolateFwdKernel;
OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(func, gridSize, blockSize, args, 0, stream));
}
};
REGISTER_OP("InterpolateFwd")
.Input ("attr: float")
.Input ("rast: float")
.Input ("tri: int32")
.Output ("out: float")
.Output ("out_da: float");
REGISTER_OP("InterpolateFwdDa")
.Input ("attr: float")
.Input ("rast: float")
.Input ("tri: int32")
.Input ("rast_db: float")
.Output ("out: float")
.Output ("out_da: float")
.Attr ("diff_attrs_all: int")
.Attr ("diff_attrs: list(int)");
REGISTER_KERNEL_BUILDER(Name("InterpolateFwd") .Device(DEVICE_GPU), InterpolateFwdOp<false>);
REGISTER_KERNEL_BUILDER(Name("InterpolateFwdDa").Device(DEVICE_GPU), InterpolateFwdOp<true>);
//------------------------------------------------------------------------
// Gradient TensorFlow op.
template <bool ENABLE_DA>
struct InterpolateGradOp : public OpKernel
{
InterpolateKernelParams m_attribs;
InterpolateGradOp(OpKernelConstruction* ctx): OpKernel(ctx)
{
memset(&m_attribs, 0, sizeof(m_attribs));
interpolateParseOpAttributes(ctx, m_attribs, ENABLE_DA);
}
void Compute(OpKernelContext* ctx)
{
InterpolateKernelParams& p = m_attribs;
cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
// Get input.
const Tensor& attr = ctx->input(0);
const Tensor& rast = ctx->input(1);
const Tensor& tri = ctx->input(2);
const Tensor& dy = ctx->input(3);
const Tensor& rast_db = ctx->input(ENABLE_DA ? 4 : 3);
const Tensor& dda = ctx->input(ENABLE_DA ? 5 : 3);
// Instance rendering mode?
p.instance_mode = attr.dims() > 2;
// Extract input dimensions.
if (p.instance_mode)
{
p.numVertices = (attr.dims() > 1) ? attr.dim_size(1) : 0;
p.numAttr = (attr.dims() > 2) ? attr.dim_size(2) : 0;
}
else
{
p.numVertices = (attr.dims() > 0) ? attr.dim_size(0) : 0;
p.numAttr = (attr.dims() > 1) ? attr.dim_size(1) : 0;
}
p.numTriangles = (tri.dims() > 0) ? tri.dim_size(0) : 0;
p.depth = (rast.dims() > 0) ? rast.dim_size(0) : 0;
p.height = (rast.dims() > 1) ? rast.dim_size(1) : 0;
p.width = (rast.dims() > 2) ? rast.dim_size(2) : 0;
int attr_depth = p.instance_mode ? (attr.dims() > 1 ? attr.dim_size(0) : 0) : 1;
// Sanity checks.
OP_REQUIRES(ctx, rast.dims() == 4 && rast.dim_size(0) > 0 && rast.dim_size(1) > 0 && rast.dim_size(2) > 0 && rast.dim_size(3) == 4, errors::InvalidArgument("rast must have shape[>0, >0, >0, 4]"));
OP_REQUIRES(ctx, tri.dims() == 2 && tri.dim_size(0) > 0 && tri.dim_size(1) == 3, errors::InvalidArgument("tri must have shape [>0, 3]"));
OP_REQUIRES(ctx, (attr.dims() == 2 || attr.dims() == 3) && attr.dim_size(0) > 0 && attr.dim_size(1) > 0 && (attr.dims() == 2 || attr.dim_size(2) > 0), errors::InvalidArgument("attr must have shape [>0, >0, >0] or [>0, >0]"));
OP_REQUIRES(ctx, dy.dims() == 4 && dy.dim_size(0) > 0 && dy.dim_size(1) == p.height && dy.dim_size(2) == p.width && dy.dim_size(3) > 0, errors::InvalidArgument("dy must have shape [>0, height, width, >0]"));
OP_REQUIRES(ctx, dy.dim_size(3) == p.numAttr, errors::InvalidArgument("argument count mismatch between inputs dy, attr"));
OP_REQUIRES(ctx, (attr_depth == p.depth || attr_depth == 1) && dy.dim_size(0) == p.depth, errors::InvalidArgument("minibatch size mismatch between inputs rast, dy, attr"));
if (ENABLE_DA)
{
OP_REQUIRES(ctx, dda.dims() == 4 && dda.dim_size(0) > 0 && dda.dim_size(1) == p.height && dda.dim_size(2) == p.width, errors::InvalidArgument("dda must have shape [>0, height, width, ?]"));
OP_REQUIRES(ctx, dda.dim_size(0) == p.depth, errors::InvalidArgument("minibatch size mismatch between rast, dda"));
}
// All diff attrs mode.
if (p.diff_attrs_all)
p.numDiffAttr = p.numAttr;
// Get input pointers.
p.attr = attr.flat<float>().data();
p.rast = rast.flat<float>().data();
p.tri = tri.flat<int>().data();
p.dy = dy.flat<float>().data();
p.rastDB = ENABLE_DA ? rast_db.flat<float>().data() : 0;
p.dda = ENABLE_DA ? dda.flat<float>().data() : 0;
p.attrBC = (p.instance_mode && attr_depth < p.depth) ? 1 : 0;
// Allocate attribute gradient output tensor.
Tensor* grad_attr_tensor = NULL;
TensorShape grad_attr_shape;
if (p.instance_mode)
grad_attr_shape.AddDim(attr_depth);
grad_attr_shape.AddDim(p.numVertices);
grad_attr_shape.AddDim(p.numAttr);
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, grad_attr_shape, &grad_attr_tensor));
p.gradAttr = grad_attr_tensor->flat<float>().data();
// Allocate bary gradient output tensor.
Tensor* grad_rast_tensor = NULL;
TensorShape grad_rast_shape;
grad_rast_shape.AddDim(p.depth);
grad_rast_shape.AddDim(p.height);
grad_rast_shape.AddDim(p.width);
grad_rast_shape.AddDim(4);
OP_REQUIRES_OK(ctx, ctx->allocate_output(1, grad_rast_shape, &grad_rast_tensor));
p.gradRaster = grad_rast_tensor->flat<float>().data();
// Allocate bary pixel diff gradient output tensor.
if (ENABLE_DA)
{
Tensor* grad_rast_db_tensor = NULL;
OP_REQUIRES_OK(ctx, ctx->allocate_output(2, grad_rast_shape, &grad_rast_db_tensor));
p.gradRasterDB = grad_rast_db_tensor->flat<float>().data();
}
// Clear attribute gradients.
cudaMemsetAsync(p.gradAttr, 0, attr_depth * p.numVertices * p.numAttr * sizeof(float), stream);
// Verify that buffers are aligned to allow float2/float4 operations.
OP_REQUIRES(ctx, !((uintptr_t)p.rast & 15), errors::Internal("rast input tensor not aligned to float4"));
OP_REQUIRES(ctx, !((uintptr_t)p.gradRaster & 15), errors::Internal("grad_rast output tensor not aligned to float4"));
if (ENABLE_DA)
{
OP_REQUIRES(ctx, !((uintptr_t)p.dda & 7), errors::Internal("dda input tensor not aligned to float2"));
OP_REQUIRES(ctx, !((uintptr_t)p.rastDB & 15), errors::Internal("rast_db input tensor not aligned to float4"));
OP_REQUIRES(ctx, !((uintptr_t)p.gradRasterDB & 15), errors::Internal("grad_rast_db output tensor not aligned to float4"));
}
// Choose launch parameters.
dim3 blockSize = getLaunchBlockSize(IP_GRAD_MAX_KERNEL_BLOCK_WIDTH, IP_GRAD_MAX_KERNEL_BLOCK_HEIGHT, p.width, p.height);
dim3 gridSize = getLaunchGridSize(blockSize, p.width, p.height, p.depth);
// Launch CUDA kernel.
void* args[] = {&p};
void* func = ENABLE_DA ? (void*)InterpolateGradKernelDa : (void*)InterpolateGradKernel;
OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(func, gridSize, blockSize, args, 0, stream));
}
};
REGISTER_OP("InterpolateGrad")
.Input ("attr: float")
.Input ("rast: float")
.Input ("tri: int32")
.Input ("dy: float")
.Output ("grad_attr: float")
.Output ("grad_rast: float")
;
REGISTER_OP("InterpolateGradDa")
.Input ("attr: float")
.Input ("rast: float")
.Input ("tri: int32")
.Input ("dy: float")
.Input ("rast_db: float")
.Input ("dda: float")
.Output ("grad_attr: float")
.Output ("grad_rast: float")
.Output ("grad_rast_db: float")
.Attr ("diff_attrs_all: int")
.Attr ("diff_attrs: list(int)");
;
REGISTER_KERNEL_BUILDER(Name("InterpolateGrad") .Device(DEVICE_GPU), InterpolateGradOp<false>);
REGISTER_KERNEL_BUILDER(Name("InterpolateGradDa").Device(DEVICE_GPU), InterpolateGradOp<true>);
//------------------------------------------------------------------------ | the_stack |
#include <cutil_inline.h>
#include <cutil_math.h>
#include "GlobalDefines.h"
#include "cuda_SimpleMatrixUtil.h"
#define T_PER_BLOCK 16
#define MINF __int_as_float(0xff800000)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Convert Float4 Color to UCHAR4
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void convertColorFloat4ToUCHAR4Device(uchar4* d_output, float4* d_input, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
float4 color = d_input[y*width + x];
d_output[y*width + x] = make_uchar4(color.x*255.0f, color.y*255.0f, color.z*255.0f, color.w*255.0f);
}
extern "C" void convertColorFloat4ToUCHAR4(uchar4* d_output, float4* d_input, unsigned int width, unsigned int height)
{
const dim3 blockSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 gridSize(T_PER_BLOCK, T_PER_BLOCK);
convertColorFloat4ToUCHAR4Device << <blockSize, gridSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Convert Color to Intensity
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void convertColorToIntensityFloatDevice(float* d_output, float4* d_input, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const float4 color = d_input[y*width + x];
d_output[y*width + x] = 0.299f*color.x + 0.587f*color.y + 0.114f*color.z;
}
extern "C" void convertColorToIntensityFloat(float* d_output, float4* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
convertColorToIntensityFloatDevice << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Convert Depth to Camera Space Positions
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void convertDepthFloatToCameraSpaceFloat4Device(float4* d_output, float* d_input, float4x4 intrinsicsInv,
unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
d_output[y*width+x] = make_float4(MINF, MINF, MINF, MINF);
float depth = d_input[y*width+x];
if(depth != MINF)
{
float4 cameraSpace(intrinsicsInv*make_float4((float)x*depth, (float)y*depth, depth, depth));
d_output[y*width+x] = make_float4(cameraSpace.x, cameraSpace.y, cameraSpace.w, 1.0f);
}
}
}
extern "C" void convertDepthFloatToCameraSpaceFloat4(float4* d_output, float* d_input, float4x4 intrinsicsInv, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1)/T_PER_BLOCK, (height + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
convertDepthFloatToCameraSpaceFloat4Device<<<gridSize, blockSize>>>(d_output, d_input, intrinsicsInv, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Compute Normal Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void computeNormalsDevice(float4* d_output, float4* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x >= width || y >= height) return;
d_output[y*width+x] = make_float4(MINF, MINF, MINF, MINF);
if(x > 0 && x < width-1 && y > 0 && y < height-1)
{
const float4 CC = d_input[(y+0)*width+(x+0)];
const float4 PC = d_input[(y+1)*width+(x+0)];
const float4 CP = d_input[(y+0)*width+(x+1)];
const float4 MC = d_input[(y-1)*width+(x+0)];
const float4 CM = d_input[(y+0)*width+(x-1)];
if(CC.x != MINF && PC.x != MINF && CP.x != MINF && MC.x != MINF && CM.x != MINF)
{
const float3 n = cross(make_float3(PC)-make_float3(MC), make_float3(CP)-make_float3(CM));
const float l = length(n);
if(l > 0.0f)
{
d_output[y*width+x] = make_float4(n/-l, 1.0f);
}
}
}
}
extern "C" void computeNormals(float4* d_output, float4* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1)/T_PER_BLOCK, (height + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
computeNormalsDevice<<<gridSize, blockSize>>>(d_output, d_input, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
/////////////////////////////////////////////
// Transform
/////////////////////////////////////////////
__global__ void transformNormalMapDevice(float4* d_normals, unsigned int imageWidth, unsigned int imageHeight, float4x4 transform)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int index = y*imageWidth+x;
if(x >= 0 && x < imageWidth && y >= 0 && y < imageHeight)
{
if(d_normals[index].x != MINF)
{
float3 n = transform.getFloat3x3() * make_float3(d_normals[index].x,d_normals[index].y,d_normals[index].z);
d_normals[index] = make_float4(n, 0.0f);
}
}
}
extern "C" void transformNormalMap(float4* d_normals, unsigned int imageWidth, unsigned int imageHeight, float4x4 transform)
{
const dim3 gridSize((imageWidth + T_PER_BLOCK - 1)/T_PER_BLOCK, (imageHeight + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
transformNormalMapDevice<<<gridSize, blockSize>>>(d_normals, imageWidth, imageHeight, transform);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Bilateral Filter Float Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
inline __device__ float gaussR(float sigma, float dist)
{
return exp(-(dist*dist) / (2.0*sigma*sigma));
}
inline __device__ float linearR(float sigma, float dist)
{
return max(1.0f, min(0.0f, 1.0f - (dist*dist) / (2.0*sigma*sigma)));
}
inline __device__ float gaussD(float sigma, int x, int y)
{
return exp(-((x*x + y*y) / (2.0f*sigma*sigma)));
}
inline __device__ float gaussD(float sigma, int x)
{
return exp(-((x*x) / (2.0f*sigma*sigma)));
}
__global__ void bilateralFilterFloatMapDevice(float* d_output, float* d_input, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int kernelRadius = (int)ceil(2.0*sigmaD);
d_output[y*width + x] = MINF;
float sum = 0.0f;
float sumWeight = 0.0f;
const float depthCenter = d_input[y*width + x];
if (depthCenter != MINF)
{
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float currentDepth = d_input[n*width + m];
if (currentDepth != MINF) {
const float weight = gaussD(sigmaD, m - x, n - y)*gaussR(sigmaR, currentDepth - depthCenter);
sumWeight += weight;
sum += weight*currentDepth;
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
}
extern "C" void bilateralFilterFloatMap(float* d_output, float* d_input, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
bilateralFilterFloatMapDevice << <gridSize, blockSize >> >(d_output, d_input, sigmaD, sigmaR, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Bilateral Filter Float4 Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void bilateralFilterFloat4MapDevice(float4* d_output, float4* d_input, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int kernelRadius = (int)ceil(2.0*sigmaD);
//d_output[y*width+x] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
d_output[y*width + x] = make_float4(MINF, MINF, MINF, MINF);
float4 sum = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
float sumWeight = 0.0f;
const float4 depthCenter = d_input[y*width + x];
if (depthCenter.x != MINF) {
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float4 currentDepth = d_input[n*width + m];
if (currentDepth.x != MINF) {
const float weight = gaussD(sigmaD, m - x, n - y)*gaussR(sigmaR, length(currentDepth - depthCenter));
sum += weight*currentDepth;
sumWeight += weight;
}
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
extern "C" void bilateralFilterFloat4Map(float4* d_output, float4* d_input, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
bilateralFilterFloat4MapDevice << <gridSize, blockSize >> >(d_output, d_input, sigmaD, sigmaR, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Gauss Filter Float Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void gaussFilterFloatMapDevice(float* d_output, float* d_input, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int kernelRadius = (int)ceil(2.0*sigmaD);
d_output[y*width + x] = MINF;
float sum = 0.0f;
float sumWeight = 0.0f;
const float depthCenter = d_input[y*width + x];
if (depthCenter != MINF)
{
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float currentDepth = d_input[n*width + m];
if (currentDepth != MINF && fabs(depthCenter - currentDepth) < sigmaR)
{
const float weight = gaussD(sigmaD, m - x, n - y);
sumWeight += weight;
sum += weight*currentDepth;
}
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
extern "C" void gaussFilterFloatMap(float* d_output, float* d_input, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
gaussFilterFloatMapDevice << <gridSize, blockSize >> >(d_output, d_input, sigmaD, sigmaR, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Gauss Filter Float4 Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void gaussFilterFloat4MapDevice(float4* d_output, float4* d_input, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int kernelRadius = (int)ceil(2.0*sigmaD);
//d_output[y*width+x] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
d_output[y*width + x] = make_float4(MINF, MINF, MINF, MINF);
float4 sum = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
float sumWeight = 0.0f;
const float4 center = d_input[y*width + x];
if (center.x != MINF) {
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float4 current = d_input[n*width + m];
if (current.x != MINF) {
if (length(center - current) < sigmaR)
{
const float weight = gaussD(sigmaD, m - x, n - y);
sumWeight += weight;
sum += weight*current;
}
}
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
extern "C" void gaussFilterFloat4Map(float4* d_output, float4* d_input, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
gaussFilterFloat4MapDevice << <gridSize, blockSize >> >(d_output, d_input, sigmaD, sigmaR, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Compute Edge Mask
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void computeMaskEdgeMapFloat4Device(unsigned char* d_output, float4* d_input, float* d_indepth, float threshold, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
d_output[y*width + x] = 1;
d_output[width*height + y*width + x] = 1;
const float thre = threshold *threshold *3.0f;
if (x > 0 && y > 0 && x < width - 1 && y < height - 1)
{
if (d_indepth[y*width + x] == MINF)
{
d_output[y*width + x] = 0;
d_output[y*width + x - 1] = 0;
d_output[width*height + y*width + x] = 0;
d_output[width*height + (y - 1)*width + x] = 0;
return;
}
const float4& p0 = d_input[(y + 0)*width + (x + 0)];
const float4& p1 = d_input[(y + 0)*width + (x + 1)];
const float4& p2 = d_input[(y + 1)*width + (x + 0)];
float dU = sqrt(((p1.x - p0.x)*(p1.x - p0.x) + (p1.y - p0.y) * (p1.y - p0.y) + (p1.z - p0.z)*(p1.z - p0.z)) / 3.0f);
float dV = sqrt(((p2.x - p0.x)*(p2.x - p0.x) + (p2.y - p0.y) * (p2.y - p0.y) + (p2.z - p0.z)*(p2.z - p0.z)) / 3.0f);
//float dgradx = abs(d_indepth[y*width+x-1] + d_indepth[y*width+x+1] - 2.0f * d_indepth[y*width+x]);
//float dgrady = abs(d_indepth[y*width+x-width] + d_indepth[y*width+x+width] - 2.0f * d_indepth[y*width+x]);
if (dU > thre) d_output[y*width + x] = 0;
if (dV > thre) d_output[width*height + y*width + x] = 0;
//remove depth discontinuities
const int r = 1;
const float thres = 0.01f;
const float pCC = d_indepth[y*width + x];
for (int i = -r; i <= r; i++)
{
for (int j = -r; j <= r; j++)
{
int currentX = x + j;
int currentY = y + i;
if (currentX >= 0 && currentX < width && currentY >= 0 && currentY < height)
{
float d = d_indepth[currentY*width + currentX];
if (d != MINF && abs(pCC - d) > thres)
{
d_output[y*width + x] = 0;
d_output[width*height + y*width + x] = 0;
return;
}
}
}
}
}
}
extern "C" void computeMaskEdgeMapFloat4(unsigned char* d_output, float4* d_input, float* d_indepth, float threshold, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
computeMaskEdgeMapFloat4Device << <gridSize, blockSize >> >(d_output, d_input, d_indepth, threshold, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Resample Float Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
inline __device__ float bilinearInterpolationFloat(float x, float y, float* d_input, unsigned int imageWidth, unsigned int imageHeight)
{
const int2 p00 = make_int2(floor(x), floor(y));
const int2 p01 = p00 + make_int2(0.0f, 1.0f);
const int2 p10 = p00 + make_int2(1.0f, 0.0f);
const int2 p11 = p00 + make_int2(1.0f, 1.0f);
const float alpha = x - p00.x;
const float beta = y - p00.y;
float s0 = 0.0f; float w0 = 0.0f;
if (p00.x < imageWidth && p00.y < imageHeight) { float v00 = d_input[p00.y*imageWidth + p00.x]; if (v00 != MINF) { s0 += (1.0f - alpha)*v00; w0 += (1.0f - alpha); } }
if (p10.x < imageWidth && p10.y < imageHeight) { float v10 = d_input[p10.y*imageWidth + p10.x]; if (v10 != MINF) { s0 += alpha *v10; w0 += alpha; } }
float s1 = 0.0f; float w1 = 0.0f;
if (p01.x < imageWidth && p01.y < imageHeight) { float v01 = d_input[p01.y*imageWidth + p01.x]; if (v01 != MINF) { s1 += (1.0f - alpha)*v01; w1 += (1.0f - alpha); } }
if (p11.x < imageWidth && p11.y < imageHeight) { float v11 = d_input[p11.y*imageWidth + p11.x]; if (v11 != MINF) { s1 += alpha *v11; w1 += alpha; } }
const float p0 = s0 / w0;
const float p1 = s1 / w1;
float ss = 0.0f; float ww = 0.0f;
if (w0 > 0.0f) { ss += (1.0f - beta)*p0; ww += (1.0f - beta); }
if (w1 > 0.0f) { ss += beta *p1; ww += beta; }
if (ww > 0.0f) return ss / ww;
else return MINF;
}
__global__ void resampleFloatMapDevice(float* d_colorMapResampledFloat, float* d_colorMapFloat, unsigned int inputWidth, unsigned int inputHeight, unsigned int outputWidth, unsigned int outputHeight)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < outputWidth && y < outputHeight)
{
const float scaleWidth = (float)(inputWidth - 1) / (float)(outputWidth - 1);
const float scaleHeight = (float)(inputHeight - 1) / (float)(outputHeight - 1);
const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
if (xInput < inputWidth && yInput < inputHeight)
{
d_colorMapResampledFloat[y*outputWidth + x] = bilinearInterpolationFloat((float)x*scaleWidth, (float)y*scaleHeight, d_colorMapFloat, inputWidth, inputHeight);
}
}
}
extern "C" void resampleFloatMap(float* d_colorMapResampledFloat, unsigned int outputWidth, unsigned int outputHeight, float* d_colorMapFloat, unsigned int inputWidth, unsigned int inputHeight)
{
const dim3 gridSize((outputWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (outputHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
resampleFloatMapDevice << <gridSize, blockSize >> >(d_colorMapResampledFloat, d_colorMapFloat, inputWidth, inputHeight, outputWidth, outputHeight);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Resample Float4 Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
inline __device__ float4 bilinearInterpolationFloat4(float x, float y, float4* d_input, unsigned int imageWidth, unsigned int imageHeight)
{
const int2 p00 = make_int2(floor(x), floor(y));
const int2 p01 = p00 + make_int2(0.0f, 1.0f);
const int2 p10 = p00 + make_int2(1.0f, 0.0f);
const int2 p11 = p00 + make_int2(1.0f, 1.0f);
const float alpha = x - p00.x;
const float beta = y - p00.y;
//const float INVALID = 0.0f;
const float INVALID = MINF;
float4 s0 = make_float4(0.0f, 0.0f, 0.0f, 0.0f); float w0 = 0.0f;
if (p00.x < imageWidth && p00.y < imageHeight) { float4 v00 = d_input[p00.y*imageWidth + p00.x]; if (v00.x != INVALID && v00.y != INVALID && v00.z != INVALID) { s0 += (1.0f - alpha)*v00; w0 += (1.0f - alpha); } }
if (p10.x < imageWidth && p10.y < imageHeight) { float4 v10 = d_input[p10.y*imageWidth + p10.x]; if (v10.x != INVALID && v10.y != INVALID && v10.z != INVALID) { s0 += alpha *v10; w0 += alpha; } }
float4 s1 = make_float4(0.0f, 0.0f, 0.0f, 0.0f); float w1 = 0.0f;
if (p01.x < imageWidth && p01.y < imageHeight) { float4 v01 = d_input[p01.y*imageWidth + p01.x]; if (v01.x != INVALID && v01.y != INVALID && v01.z != INVALID) { s1 += (1.0f - alpha)*v01; w1 += (1.0f - alpha); } }
if (p11.x < imageWidth && p11.y < imageHeight) { float4 v11 = d_input[p11.y*imageWidth + p11.x]; if (v11.x != INVALID && v11.y != INVALID && v11.z != INVALID) { s1 += alpha *v11; w1 += alpha; } }
const float4 p0 = s0 / w0;
const float4 p1 = s1 / w1;
float4 ss = make_float4(0.0f, 0.0f, 0.0f, 0.0f); float ww = 0.0f;
if (w0 > 0.0f) { ss += (1.0f - beta)*p0; ww += (1.0f - beta); }
if (w1 > 0.0f) { ss += beta *p1; ww += beta; }
if (ww > 0.0f) return ss / ww;
else return make_float4(MINF, MINF, MINF, MINF);
}
__global__ void resampleFloat4MapDevice(float4* d_colorMapResampledFloat4, float4* d_colorMapFloat4, unsigned int inputWidth, unsigned int inputHeight, unsigned int outputWidth, unsigned int outputHeight)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < outputWidth && y < outputHeight)
{
const float scaleWidth = (float)(inputWidth - 1) / (float)(outputWidth - 1);
const float scaleHeight = (float)(inputHeight - 1) / (float)(outputHeight - 1);
const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
if (xInput < inputWidth && yInput < inputHeight)
{
d_colorMapResampledFloat4[y*outputWidth + x] = bilinearInterpolationFloat4(x*scaleWidth, y*scaleHeight, d_colorMapFloat4, inputWidth, inputHeight);
}
}
}
extern "C" void resampleFloat4Map(float4* d_colorMapResampledFloat4, unsigned int outputWidth, unsigned int outputHeight, float4* d_colorMapFloat4, unsigned int inputWidth, unsigned int inputHeight)
{
const dim3 gridSize((outputWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (outputHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
resampleFloat4MapDevice << <gridSize, blockSize >> >(d_colorMapResampledFloat4, d_colorMapFloat4, inputWidth, inputHeight, outputWidth, outputHeight);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Resample Unsigned Char Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void resampleUCharMapDevice(unsigned char* d_MapResampled, unsigned int outputWidth, unsigned int outputHeight,
unsigned char* d_Map, unsigned int inputWidth, unsigned int inputHeight)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < outputWidth && y < outputHeight)
{
const float scaleWidth = (float)(inputWidth - 1) / (float)(outputWidth - 1);
const float scaleHeight = (float)(inputHeight - 1) / (float)(outputHeight - 1);
const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
if (xInput < inputWidth && yInput < inputHeight)
{
d_MapResampled[y*outputWidth + x] = d_Map[yInput*inputWidth + xInput];
}
}
}
extern "C" void resampleUCharMap(unsigned char* d_MapResampled, unsigned int outputWidth, unsigned int outputHeight,
unsigned char* d_Map, unsigned int inputWidth, unsigned int inputHeight)
{
const dim3 gridSize((outputWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (outputHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
resampleUCharMapDevice << <gridSize, blockSize >> >(d_MapResampled, outputWidth, outputHeight, d_Map, inputWidth, inputHeight);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Convert Edge Mask to Float Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void convertEdgeMaskToFloatDevice(float* d_output, unsigned char* d_input, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
d_output[y*width + x] = min(d_input[y*width + x], d_input[width*height + y*width + x]);
}
extern "C" void convertEdgeMaskToFloat(float* d_output, unsigned char* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
convertEdgeMaskToFloatDevice << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Dilate Depth Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void dilateDepthMapDevice(float* d_output, float* d_input, float* d_inputOrig, int structureSize, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= 0 && x < width && y >= 0 && y < height)
{
float sum = 0.0f;
float count = 0.0f;
float oldDepth = d_inputOrig[y*width + x];
if (oldDepth != MINF && oldDepth != 0)
{
for (int i = -structureSize; i <= structureSize; i++)
{
for (int j = -structureSize; j <= structureSize; j++)
{
if (x + j >= 0 && x + j < width && y + i >= 0 && y + i < height)
{
const float d = d_input[(y + i)*width + (x + j)];
if (d != MINF && d != 0.0f && fabs(d - oldDepth) < 0.05f)
{
sum += d;
count += 1.0f;
}
}
}
}
}
if (count > ((2 * structureSize + 1)*(2 * structureSize + 1)) / 36) d_output[y*width + x] = 1.0f;
else d_output[y*width + x] = MINF;
}
}
extern "C" void dilateDepthMapMask(float* d_output, float* d_input, float* d_inputOrig, int structureSize, int width, int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
dilateDepthMapDevice << <gridSize, blockSize >> >(d_output, d_input, d_inputOrig, structureSize, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Mean Filter Depth Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void removeDevMeanMapMaskDevice(float* d_output, float* d_input, int structureSize, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
d_output[y*width + x] = d_input[y*width + x];
if (x >= 0 && x < width && y >= 0 && y < height)
{
float oldDepth = d_input[y*width + x];
float mean = 0.0f;
float meanSquared = 0.0f;
float count = 0.0f;
for (int i = -structureSize; i <= structureSize; i++)
{
for (int j = -structureSize; j <= structureSize; j++)
{
if (x + j >= 0 && x + j < width && y + i >= 0 && y + i < height)
{
float depth = d_input[(y + i)*width + (x + j)];
if (depth == MINF)
{
depth = 8.0f;
}
if (depth > 0.0f)
{
mean += depth;
meanSquared += depth*depth;
count += 1.0f;
}
}
}
}
mean /= count;
meanSquared /= count;
float stdDev = sqrt(meanSquared - mean*mean);
if (fabs(oldDepth - mean) > 0.5f*stdDev)// || stdDev> 0.005f)
{
d_output[y*width + x] = MINF;
}
}
}
extern "C" void removeDevMeanMapMask(float* d_output, float* d_input, int structureSize, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
removeDevMeanMapMaskDevice << <gridSize, blockSize >> >(d_output, d_input, structureSize, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
// Nearest neighbour
inline __device__ bool getValueNearestNeighbourNoCheck(const float2& p, const float4* inputMap, unsigned int imageWidth, unsigned int imageHeight, float4* outValue)
{
const int u = (int)(p.x + 0.5f);
const int v = (int)(p.y + 0.5f);
if (u < 0 || u > imageWidth || v < 0 || v > imageHeight) return false;
*outValue = inputMap[v*imageWidth + u];
return true;
}
inline __device__ bool getValueNearestNeighbour(const float2& p, const float4* inputMap, unsigned int imageWidth, unsigned int imageHeight, float4* outValue)
{
bool valid = getValueNearestNeighbourNoCheck(p, inputMap, imageWidth, imageHeight, outValue);
return valid && (outValue->x != MINF && outValue->y != MINF && outValue->z != MINF);
}
// Nearest neighbour
inline __device__ bool getValueNearestNeighbourFloatNoCheck(const float2& p, const float* inputMap, unsigned int imageWidth, unsigned int imageHeight, float* outValue)
{
const int u = (int)(p.x + 0.5f);
const int v = (int)(p.y + 0.5f);
if (u < 0 || u > imageWidth || v < 0 || v > imageHeight) return false;
*outValue = inputMap[v*imageWidth + u];
return true;
}
inline __device__ bool getValueNearestNeighbourFloat(const float2& p, const float* inputMap, unsigned int imageWidth, unsigned int imageHeight, float* outValue)
{
bool valid = getValueNearestNeighbourFloatNoCheck(p, inputMap, imageWidth, imageHeight, outValue);
return valid && (*outValue != MINF);
}
/////////////////////////////////////////////
// Compute Intensity and Derivatives
/////////////////////////////////////////////
__global__ void computeIntensityAndDerivativesDevice(float* d_intensity, unsigned int imageWidth, unsigned int imageHeight, float4* d_intensityAndDerivatives)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int index = y*imageWidth + x;
if (x >= 0 && x < imageWidth && y >= 0 && y < imageHeight)
{
d_intensityAndDerivatives[index] = make_float4(MINF, MINF, MINF, MINF);
if (x > 0 && x < imageWidth - 1 && y > 0 && y < imageHeight - 1)
{
float pos00; bool valid00 = getValueNearestNeighbourFloat(make_float2(x - 1, y - 1), d_intensity, imageWidth, imageHeight, &pos00); if (!valid00) return;
float pos01; bool valid01 = getValueNearestNeighbourFloat(make_float2(x - 1, y - 0), d_intensity, imageWidth, imageHeight, &pos01); if (!valid01) return;
float pos02; bool valid02 = getValueNearestNeighbourFloat(make_float2(x - 1, y + 1), d_intensity, imageWidth, imageHeight, &pos02); if (!valid02) return;
float pos10; bool valid10 = getValueNearestNeighbourFloat(make_float2(x - 0, y - 1), d_intensity, imageWidth, imageHeight, &pos10); if (!valid10) return;
float pos11; bool valid11 = getValueNearestNeighbourFloat(make_float2(x - 0, y - 0), d_intensity, imageWidth, imageHeight, &pos11); if (!valid11) return;
float pos12; bool valid12 = getValueNearestNeighbourFloat(make_float2(x - 0, y + 1), d_intensity, imageWidth, imageHeight, &pos12); if (!valid12) return;
float pos20; bool valid20 = getValueNearestNeighbourFloat(make_float2(x + 1, y - 1), d_intensity, imageWidth, imageHeight, &pos20); if (!valid20) return;
float pos21; bool valid21 = getValueNearestNeighbourFloat(make_float2(x + 1, y - 0), d_intensity, imageWidth, imageHeight, &pos21); if (!valid21) return;
float pos22; bool valid22 = getValueNearestNeighbourFloat(make_float2(x + 1, y + 1), d_intensity, imageWidth, imageHeight, &pos22); if (!valid22) return;
float resU = (-1.0f)*pos00 + (1.0f)*pos20 +
(-2.0f)*pos01 + (2.0f)*pos21 +
(-1.0f)*pos02 + (1.0f)*pos22;
resU /= 8.0f;
float resV = (-1.0f)*pos00 + (-2.0f)*pos10 + (-1.0f)*pos20 +
(1.0f)*pos02 + (2.0f)*pos12 + (1.0f)*pos22;
resV /= 8.0f;
d_intensityAndDerivatives[index] = make_float4(pos11, resU, resV, 1.0f);
}
}
}
extern "C" void computeIntensityAndDerivatives(float* d_intensity, unsigned int imageWidth, unsigned int imageHeight, float4* d_intensityAndDerivatives)
{
const dim3 gridSize((imageWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (imageHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
computeIntensityAndDerivativesDevice << <gridSize, blockSize >> >(d_intensity, imageWidth, imageHeight, d_intensityAndDerivatives);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
/////////////////////////////////////////////
// Compute grdient intensity magnitude
/////////////////////////////////////////////
__global__ void computeGradientIntensityMagnitudeDevice(float4* d_inputDU, float4* d_inputDV, unsigned int imageWidth, unsigned int imageHeight, float4* d_ouput)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int index = y*imageWidth + x;
d_ouput[index] = make_float4(MINF, MINF, MINF, MINF);
float4 DU = d_inputDU[index];
float4 DV = d_inputDV[index];
if (DU.x != MINF && DV.x != MINF)
{
float m = sqrtf(DU.x*DU.x + DV.x*DV.x);
if (m > 0.005f)
{
d_ouput[index] = make_float4(m, m, m, 1.0f);
}
}
}
extern "C" void computeGradientIntensityMagnitude(float4* d_inputDU, float4* d_inputDV, unsigned int imageWidth, unsigned int imageHeight, float4* d_ouput)
{
const dim3 gridSize((imageWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (imageHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
computeGradientIntensityMagnitudeDevice << <gridSize, blockSize >> >(d_inputDU, d_inputDV, imageWidth, imageHeight, d_ouput);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Erode Depth Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void erodeDepthMapDevice(float* d_output, float* d_input, int structureSize, int width, int height, float dThresh, float fracReq)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= 0 && x < width && y >= 0 && y < height)
{
unsigned int count = 0;
float oldDepth = d_input[y*width + x];
for (int i = -structureSize; i <= structureSize; i++)
{
for (int j = -structureSize; j <= structureSize; j++)
{
if (x + j >= 0 && x + j < width && y + i >= 0 && y + i < height)
{
float depth = d_input[(y + i)*width + (x + j)];
if (depth == MINF || depth == 0.0f || fabs(depth - oldDepth) > dThresh)
{
count++;
//d_output[y*width+x] = MINF;
//return;
}
}
}
}
unsigned int sum = (2 * structureSize + 1)*(2 * structureSize + 1);
if ((float)count / (float)sum >= fracReq) {
d_output[y*width + x] = MINF;
}
else {
d_output[y*width + x] = d_input[y*width + x];
}
}
}
extern "C" void erodeDepthMap(float* d_output, float* d_input, int structureSize, unsigned int width, unsigned int height, float dThresh, float fracReq)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
erodeDepthMapDevice << <gridSize, blockSize >> >(d_output, d_input, structureSize, width, height, dThresh, fracReq);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// filter annotations
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void filterAnnotations_Kernel(unsigned char* d_outputInstance, const unsigned char* d_inputInstance,
const float* d_depth, const float* d_intensity, const unsigned char* d_instanceToIdx,
const unsigned char* d_idxToInstance, float* d_vote,
int structureSize, int width, int height, float sigmaD, float sigmaR, float intensityScale)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int voteoffset = (y*width + x) * MAX_NUM_LABELS_PER_SCENE;
if (x >= 0 && x < width && y >= 0 && y < height) {
d_outputInstance[y*width + x] = d_inputInstance[y*width + x];
float depthCenter = d_depth[y*width + x];
float intensityCenter = d_intensity[y*width + x];
for (int i = -structureSize; i <= structureSize; i++) {
for (int j = -structureSize; j <= structureSize; j++) {
if (x + j >= 0 && x + j < width && y + i >= 0 && y + i < height)
{
float depth = d_depth[(y + i)*width + (x + j)];
float intensity = d_intensity[(y + i)*width + (x + j)];
float intensityOffset = std::abs(intensityCenter - intensity) * intensityScale; //bring intensity to approx scale of depth
float depthOffset = 0.0f;
if (depthCenter != MINF && depth != MINF)
depthOffset = std::abs(depthCenter - depth);
const float weight = gaussD(sigmaD, j, i)*gaussR(sigmaR, depthOffset)*gaussR(sigmaR, intensityOffset);
unsigned char val = d_inputInstance[(y + i)*width + (x + j)];
unsigned char idx = d_instanceToIdx[val];
d_vote[voteoffset + idx] += weight;
}
} //j
} //i
float maxWeight = 0.0f; unsigned char bestVal = 0; //TODO fix this part...
for (int i = 0; i < MAX_NUM_LABELS_PER_SCENE; i++) {
if (d_vote[voteoffset + i] > maxWeight) {
maxWeight = d_vote[voteoffset + i];
bestVal = d_idxToInstance[i];
}
}
d_outputInstance[y*width + x] = bestVal;
} //in bounds of image
}
extern "C" void filterAnnotations(unsigned char* d_outputInstance, const unsigned char* d_inputInstance,
const float* d_depth, const float* d_intensity, const unsigned char* d_instanceToIdx,
const unsigned char* d_idxToInstance, float* d_vote,
int structureSize, int width, int height, float sigmaD, float sigmaR, float intensityScale)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
cutilSafeCall(cudaMemset(d_vote, 0, sizeof(float)*width*height*MAX_NUM_LABELS_PER_SCENE));
filterAnnotations_Kernel << <gridSize, blockSize >> >(d_outputInstance, d_inputInstance,
d_depth, d_intensity, d_instanceToIdx, d_idxToInstance, d_vote,
structureSize, width, height, sigmaD, sigmaR, intensityScale);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void convertInstanceToLabel_Kernel(unsigned short* d_outputLabel, const unsigned char* d_inputInstance,
const unsigned short* d_instanceToLabel, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
d_outputLabel[y*width + x] = d_instanceToLabel[d_inputInstance[y*width + x]];
} //in bounds of image
}
extern "C" void convertInstanceToLabel(unsigned short* d_outputLabel, const unsigned char* d_inputInstance,
const unsigned short* d_instanceToLabel, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
convertInstanceToLabel_Kernel << <gridSize, blockSize >> >(d_outputLabel, d_inputInstance,
d_instanceToLabel, width, height);
//TODO convert instance to label
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
#endif // _FILTER_ | the_stack |
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "Unsqueeze.h"
#include "Unsqueeze.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension)
>> s - pointer to the source data array
>> blockSize - size of a block
>> totalSize - total size of the blocks (i.e., blockSIze * n)
>> t - pointer to the target data array
>> n - number of blocks to copy data
*/
template<class T>
__global__
void KernelUnsqueezeFlat(void * s, int blockSize, int totalSize, void * t, int n)
{
/* index of data items */
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= blockSize)
return;
T value = ((T*)s)[i];
T * tData = (T*)t;
__syncthreads();
for (int k = i; k < totalSize; k += blockSize)
tData[k] = value;
}
/*
insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension)
>> s - pointer to the source data array
>> blockSize - size of a block
>> totalSize - total size of the blocks (i.e., blockSIze * n)
>> t - pointer to the target data array
>> n - number of blocks to copy data
*/
template<class T>
__global__
void KernelUnsqueezeFlatBigram(void * s, int blockSize, int totalSize, void * t, int n)
{
/* index of data items */
int i = (blockDim.x * blockIdx.x + threadIdx.x) * 2;
if (i >= blockSize)
return;
T value = ((T*)s)[i];
T value2 = ((T*)s)[i + 1];
T * tData = (T*)t;
__syncthreads();
for (int k = i; k < totalSize; k += blockSize){
tData[k] = value;
tData[k + 1] = value2;
}
}
/*
insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension)
>> s - pointer to the source data array
>> blockSize - size of a block
>> totalSize - total size of the blocks (i.e., blockSIze * n)
>> t - pointer to the target data array
>> n - number of blocks to copy data
*/
template<class T>
__global__
void KernelUnsqueezeFlat2D(void * s, int blockSize, int totalSize, void * t, int n)
{
__shared__ T data[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int offsets[MAX_CUDA_THREAD_NUM_PER_BLOCK];
/* index of data items */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* index of data items */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= blockSize || j >= n)
return;
if(threadIdx.y == 0)
data[threadIdx.x] = ((T*)s)[i];
if(threadIdx.x == 0)
offsets[threadIdx.y] = blockSize * j;
__syncthreads();
((T*)t)[offsets[threadIdx.y] + i] = data[threadIdx.x];
}
/*
insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension)
>> s - pointer to the source data array
>> blockSize - size of a block
>> blockNum - number of the blocks
>> totalSize - total size of the blocks (i.e., blockSize * n)
>> t - pointer to the target data array
>> n - number of blocks to copy data
*/
template<class T>
__global__
void KernelUnsqueeze(void * s, int blockSize, int blockNum, int totalSize, void * t, int n)
{
/* index of data items */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block index */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= blockSize || j >= blockNum)
return;
MTYPE offset = blockSize * j;
T value = ((T*)s)[offset + i];
T * tData = (T*)t + offset * n;
__syncthreads();
for (int k = i; k < totalSize; k += blockSize)
tData[k] = value;
}
/*
insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension)
This is special case where we actually copy a v-dimentional column vector by n times to form a v * n matrix
>> s - pointer to the source data array
>> rowNum - number of rows (i.e., dimension size of s)
>> colNum - number of columns (i.e., number of copies)
>> t - pointer to the target data array
*/
template<class T>
__global__
void KernelUnsqueezeByCol(void * s, int rowNum, int colNum, void * t)
{
__shared__ T values[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ T * ts[MAX_CUDA_THREAD_NUM_PER_BLOCK];
/* column index */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* row index */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= colNum || j >= rowNum)
return;
if(threadIdx.x == 0){
values[threadIdx.y] = ((T*)s)[j];
ts[threadIdx.y] = (T*)t + colNum * j;
}
__syncthreads();
ts[threadIdx.y][i] = values[threadIdx.y];
}
/*
insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension)
This is special case where we actually copy a v-dimentional column vector by n times to form a v * n matrix
And a row is very big so that it occupies the cuda threads in a block
>> s - pointer to the source data array
>> rowNum - number of rows (i.e., dimension size of s)
>> colNum - number of columns (i.e., number of copies)
>> t - pointer to the target data array
*/
template<class T>
__global__
void KernelUnsqueezeByColBigRow(void * s, int rowNum, int colNum, void * t)
{
__shared__ T value;
__shared__ T * tData;
/* column index */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* row index */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= colNum || j >= rowNum)
return;
if (threadIdx.x == 0) {
value = ((T*)s)[j];
tData = (T*)t + colNum * j;
}
__syncthreads();
tData[i] = value;
}
/*
insert a dimension by copying the blocks for x times (where x is the size of the inerted dimension)
>> a - input tensor
>> b - output tensor
>> dim - where to insert the dimension
>> dSize - size of the newly-inserted dimension
*/
void _CudaUnsqueeze(const XTensor * a, XTensor * b, int dim, int dSize)
{
int blockSize = 1;
int blockNumA = 1;
int blockNumB = 1;
for (int i = dim; i < a->order; i++)
blockSize *= a->dimSize[i];
blockNumA = a->unitNum / blockSize;
blockNumB = b->unitNum / blockSize;
CheckNTErrors((blockNumA * dSize == blockNumB), "Unmatched tensors!");;
int cudaGrids[3];
int cudaBlocks[3];
int devIDBackup = 0;
ProtectCudaDev(a->devID, devIDBackup);
if (dim == b->order - 1) {
GDevs.GetCudaThread2D(a->devID, dSize, blockNumA, MAX_INT, cudaGrids, cudaBlocks);
if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) {
if (cudaBlocks[1] == 1)
KernelUnsqueezeByColBigRow<float> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> >
(a->data, blockNumA, dSize, b->data);
else
KernelUnsqueezeByCol<float> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> >
(a->data, blockNumA, dSize, b->data);
}
else if (a->dataType == X_INT && b->dataType == X_INT) {
if (cudaBlocks[1] == 1)
KernelUnsqueezeByColBigRow<int> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> >
(a->data, blockNumA, dSize, b->data);
else
KernelUnsqueezeByCol<int> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> >
(a->data, blockNumA, dSize, b->data);
}
else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) {
if (cudaBlocks[1] == 1)
KernelUnsqueezeByColBigRow<__half> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> >
(a->data, blockNumA, dSize, b->data);
else
KernelUnsqueezeByCol<__half> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> >
(a->data, blockNumA, dSize, b->data);
}
else {
ShowNTErrors("TODO!");
}
}
else if(blockNumA > 1){
GDevs.GetCudaThread2D(a->devID, blockSize, blockNumA, MAX_INT, cudaGrids, cudaBlocks);
if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) {
KernelUnsqueeze<float> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> >
(a->data, blockSize, blockNumA, blockSize * dSize, b->data, dSize);
}
else if (a->dataType == X_INT && b->dataType == X_INT) {
KernelUnsqueeze<int> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> >
(a->data, blockSize, blockNumA, blockSize * dSize, b->data, dSize);
}
else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) {
KernelUnsqueeze<half> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> >
(a->data, blockSize, blockNumA, blockSize * dSize, b->data, dSize);
}
else {
ShowNTErrors("TODO!");
}
}
else if(blockNumA == 1 && blockSize < MAX_CUDA_THREAD_NUM_PER_BLOCK){
GDevs.GetCudaThread2D(a->devID, blockSize, dSize, MAX_CUDA_THREAD_NUM_PER_BLOCK/4, cudaGrids, cudaBlocks);
if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) {
KernelUnsqueezeFlat2D<float> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> >
(a->data, blockSize, blockSize * dSize, b->data, dSize);
}
else if (a->dataType == X_INT && b->dataType == X_INT) {
KernelUnsqueezeFlat2D<int> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> >
(a->data, blockSize, blockSize * dSize, b->data, dSize);
}
else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) {
KernelUnsqueezeFlat2D<half> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> >
(a->data, blockSize, blockSize * dSize, b->data, dSize);
}
else {
ShowNTErrors("TODO!");
}
}
else if(blockNumA == 1 && blockSize % 2 == 0){
GDevs.GetCudaThread(a->devID, blockSize/2, cudaGrids, cudaBlocks);
if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) {
KernelUnsqueezeFlatBigram<float> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> >
(a->data, blockSize, blockSize * dSize, b->data, dSize);
}
else if (a->dataType == X_INT && b->dataType == X_INT) {
KernelUnsqueezeFlatBigram<int> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> >
(a->data, blockSize, blockSize * dSize, b->data, dSize);
}
else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) {
KernelUnsqueezeFlatBigram<half> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> >
(a->data, blockSize, blockSize * dSize, b->data, dSize);
}
else {
ShowNTErrors("TODO!");
}
}
else if(blockNumA == 1){
GDevs.GetCudaThread(a->devID, blockSize, cudaGrids, cudaBlocks);
if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) {
KernelUnsqueezeFlat<float> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> >
(a->data, blockSize, blockSize * dSize, b->data, dSize);
}
else if (a->dataType == X_INT && b->dataType == X_INT) {
KernelUnsqueezeFlat<int> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> >
(a->data, blockSize, blockSize * dSize, b->data, dSize);
}
else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) {
KernelUnsqueezeFlat<half> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> >
(a->data, blockSize, blockSize * dSize, b->data, dSize);
}
else {
ShowNTErrors("TODO!");
}
}
else{
ShowNTErrors("Something is wrong!");
}
BacktoCudaDev(a->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) | the_stack |
#include <algorithm>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/platform/complex128.h"
#include "paddle/fluid/platform/complex64.h"
#include "paddle/fluid/platform/profiler.h"
namespace paddle {
namespace framework {
void TensorCopy(const Tensor& src, const platform::Place& dst_place,
const platform::DeviceContext& ctx, Tensor* dst) {
if (&src == dst) {
auto src_copy = src;
TensorCopy(src_copy, dst_place, ctx, dst);
return;
}
VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to "
<< dst_place;
src.check_memory_size();
dst->Resize(src.dims());
dst->set_layout(src.layout());
auto src_place = src.place();
auto src_ptr = src.data<void>();
#ifdef PADDLE_WITH_MKLDNN
dst->set_format(src.format());
// oneDNN tensors due to padding may be of bigger size
// than numel()*size(type())
auto dst_ptr =
src.layout() == DataLayout::kMKLDNN
? dst->mutable_data(dst_place, src.type(), src.memory_size())
: dst->mutable_data(dst_place, src.type());
#else
auto dst_ptr = dst->mutable_data(dst_place, src.type());
#endif
if (src_ptr == dst_ptr && src_place == dst_place) {
VLOG(3) << "Skip copy the same data async from " << src_place << " to "
<< dst_place;
return;
}
#ifdef PADDLE_WITH_MKLDNN
auto size = src.layout() == DataLayout::kMKLDNN
? src.memory_size()
: src.numel() * SizeOfType(src.type());
#else
auto size = src.numel() * SizeOfType(src.type());
#endif
if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
}
#ifdef PADDLE_WITH_XPU
else if (platform::is_xpu_place(src_place) && // NOLINT
platform::is_cpu_place(dst_place)) {
memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
} else if (platform::is_cpu_place(src_place) &&
platform::is_xpu_place(dst_place)) {
memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
} else if (platform::is_xpu_place(src_place) &&
platform::is_xpu_place(dst_place)) {
if (src_ptr == dst_ptr) {
VLOG(3) << "Skip copy the same data async from " << src_place << " to "
<< dst_place;
return;
}
memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Copy from %s to %s is not supported.", src_place, dst_place));
}
#endif
#ifdef PADDLE_WITH_CUDA
else if (platform::is_cuda_pinned_place(src_place) && // NOLINT
platform::is_cuda_pinned_place(dst_place)) {
memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
size);
}
else if (platform::is_cuda_pinned_place(src_place) && // NOLINT
platform::is_cpu_place(dst_place)) {
memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
size);
}
else if (platform::is_cpu_place(src_place) && // NOLINT
platform::is_cuda_pinned_place(dst_place)) {
memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
}
else if (platform::is_gpu_place(src_place) && // NOLINT
platform::is_cpu_place(dst_place)) {
auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
auto dst_cpu_place = BOOST_GET_CONST(platform::CPUPlace, dst_place);
auto ctx_place = ctx.GetPlace();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx_place), true,
platform::errors::PreconditionNotMet(
"Context place error, excepted GPUPlace, but actually %s.",
ctx_place));
auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place,
platform::errors::Unavailable(
"Source place and context place do not match, source "
"place is %s, context place is %s.",
src_gpu_place, ctx_gpu_place));
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
}
else if (platform::is_cpu_place(src_place) && // NOLINT
platform::is_gpu_place(dst_place)) {
auto src_cpu_place = BOOST_GET_CONST(platform::CPUPlace, src_place);
auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
auto ctx_place = ctx.GetPlace();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx_place), true,
platform::errors::PreconditionNotMet(
"Context place error, excepted GPUPlace, but actually %s.",
ctx_place));
auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place,
platform::errors::Unavailable(
"Destination place and context place do not match, "
"destination place is %s, context place is %s.",
dst_gpu_place, ctx_gpu_place));
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream);
}
else if (platform::is_gpu_place(src_place) && // NOLINT
platform::is_cuda_pinned_place(dst_place)) {
auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
auto dst_cuda_pinned_place =
BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place);
auto ctx_place = ctx.GetPlace();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx_place), true,
platform::errors::PreconditionNotMet(
"Device context place mismatch. When copying Tensor "
"data from GPU memory to CUDA Pinned memory, current "
"device context place should be GPU."));
auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place,
platform::errors::PreconditionNotMet(
"The source GPU device and current device context do "
"not match. The source GPU device number is %d, but "
"device context GPU number is %d.",
src_gpu_place.device, ctx_gpu_place.device));
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
memory::Copy(dst_cuda_pinned_place, dst_ptr, src_gpu_place, src_ptr, size,
stream);
}
else if (platform::is_cuda_pinned_place(src_place) && // NOLINT
platform::is_gpu_place(dst_place)) {
auto src_cuda_pinned_place =
BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place);
auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
auto ctx_place = ctx.GetPlace();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx_place), true,
platform::errors::PreconditionNotMet(
"Device context place mismatch. When copying Tensor "
"data from CUDA Pinned memory to GPU memory, current "
"device context place should be GPU."));
auto ctx_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx_place);
PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place,
platform::errors::PreconditionNotMet(
"The target GPU device and current device context do "
"not match. The target GPU device number is %d, but "
"device context GPU number is %d.",
dst_gpu_place.device, ctx_gpu_place.device));
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
memory::Copy(dst_gpu_place, dst_ptr, src_cuda_pinned_place, src_ptr, size,
stream);
}
else if (platform::is_gpu_place(src_place) && // NOLINT
platform::is_gpu_place(dst_place)) {
auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
auto ctx_place = ctx.GetPlace();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx_place), true,
platform::errors::PreconditionNotMet(
"Context place error, excepted GPUPlace, but actually %s.",
ctx_place));
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
if (platform::is_same_place(src_place, dst_place)) {
memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
stream);
} else {
if (platform::is_same_place(ctx_place, src_place)) {
memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
stream);
platform::DeviceContextPool::Instance().Get(src.place())->Wait();
} else if (platform::is_same_place(ctx_place, dst_place)) {
platform::DeviceContextPool::Instance().Get(src.place())->Wait();
memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
stream);
} else {
PADDLE_THROW(platform::errors::Unavailable(
"Context place dose not match the source and destination place."));
}
}
}
else { // NOLINT
PADDLE_THROW(platform::errors::Unimplemented(
"Copying from %s to %s is not supported.", src_place, dst_place));
}
#endif
}
void TensorCopy(const Tensor& src, const platform::Place& dst_place,
Tensor* dst) {
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
const platform::DeviceContext* dev_ctx;
if (platform::is_gpu_place(dst_place)) {
dev_ctx = pool.Get(dst_place);
} else {
dev_ctx = pool.Get(src.place());
}
TensorCopy(src, dst_place, *dev_ctx, dst);
}
void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
Tensor* dst) {
if (&src == dst) {
auto src_copy = src;
TensorCopySync(src_copy, dst_place, dst);
return;
}
VLOG(3) << "TensorCopySync " << src.dims() << " from " << src.place()
<< " to " << dst_place;
src.check_memory_size();
dst->Resize(src.dims());
dst->set_layout(src.layout());
#ifdef PADDLE_WITH_MKLDNN
dst->set_format(src.format());
#endif
auto src_place = src.place();
auto src_ptr = src.data<void>();
auto dst_ptr = dst->mutable_data(dst_place, src.type());
if (src_ptr == dst_ptr && src_place == dst_place) {
VLOG(3) << "Skip copy the same data from " << src_place << " to "
<< dst_place;
return;
}
auto size = src.numel() * SizeOfType(src.type());
if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
}
#ifdef PADDLE_WITH_XPU
else if (platform::is_xpu_place(src_place) && // NOLINT
platform::is_cpu_place(dst_place)) {
memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
} else if (platform::is_cpu_place(src_place) && // NOLINT
platform::is_xpu_place(dst_place)) {
memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
} else if (platform::is_xpu_place(src_place) && // NOLINT
platform::is_xpu_place(dst_place)) {
if (src_ptr == dst_ptr) {
VLOG(3) << "Skip copy the same data async from " << src_place << " to "
<< dst_place;
return;
}
memory::Copy(BOOST_GET_CONST(platform::XPUPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::XPUPlace, src_place), src_ptr, size);
} else { // NOLINT
PADDLE_THROW(platform::errors::Unimplemented(
"Copy from %s to %s is not supported.", src_place, dst_place));
}
#endif
#ifdef PADDLE_WITH_CUDA
else if (platform::is_cuda_pinned_place(src_place) && // NOLINT
platform::is_cuda_pinned_place(dst_place)) {
memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
size);
}
else if (platform::is_cuda_pinned_place(src_place) && // NOLINT
platform::is_cpu_place(dst_place)) {
memory::Copy(BOOST_GET_CONST(platform::CPUPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place), src_ptr,
size);
}
else if (platform::is_cpu_place(src_place) && // NOLINT
platform::is_cuda_pinned_place(dst_place)) {
memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::CPUPlace, src_place), src_ptr, size);
}
else if (platform::is_gpu_place(src_place) && // NOLINT
platform::is_cuda_pinned_place(dst_place)) {
memory::Copy(BOOST_GET_CONST(platform::CUDAPinnedPlace, dst_place), dst_ptr,
BOOST_GET_CONST(platform::CUDAPlace, src_place), src_ptr, size,
nullptr);
}
else if (platform::is_gpu_place(src_place) && // NOLINT
platform::is_cpu_place(dst_place)) {
auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
auto dst_cpu_place = BOOST_GET_CONST(platform::CPUPlace, dst_place);
memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr);
}
else if (platform::is_cpu_place(src_place) && // NOLINT
platform::is_gpu_place(dst_place)) {
auto src_cpu_place = BOOST_GET_CONST(platform::CPUPlace, src_place);
auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, nullptr);
}
else if (platform::is_gpu_place(src_place) && // NOLINT
platform::is_gpu_place(dst_place)) {
auto src_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr);
}
else if (platform::is_cuda_pinned_place(src_place) && // NOLINT
platform::is_gpu_place(dst_place)) {
auto src_pinned_place =
BOOST_GET_CONST(platform::CUDAPinnedPlace, src_place);
auto dst_gpu_place = BOOST_GET_CONST(platform::CUDAPlace, dst_place);
memory::Copy(dst_gpu_place, dst_ptr, src_pinned_place, src_ptr, size,
nullptr);
}
else { // NOLINT
PADDLE_THROW(platform::errors::Unimplemented(
"Copy from %s to %s is not supported.", src_place, dst_place));
}
#endif
}
template <typename Predicate, typename DevCtx>
struct AnyDTypeVisitor {
Predicate predicate_;
const Tensor& tensor_;
const DevCtx& ctx_;
Tensor* out_;
AnyDTypeVisitor(Predicate predicate, const Tensor& tensor, const DevCtx& ctx,
Tensor* out)
: predicate_(predicate), tensor_(tensor), ctx_(ctx), out_(out) {}
template <typename T>
void apply() const {
auto t = EigenVector<T>::Flatten(tensor_);
auto o = EigenScalar<bool>::From(*out_);
// return any of predicate_(t) is true.
o.device(*ctx_.eigen_device()) = predicate_(t).any();
}
};
template <typename Predicate, typename DevCtx>
inline void AnyImpl(Predicate predicate, const framework::Tensor& tensor,
const DevCtx& ctx, framework::Tensor* out) {
VisitDataType(tensor.type(), AnyDTypeVisitor<Predicate, DevCtx>(
predicate, tensor, ctx, out));
}
template <typename Predicate>
class AnyVisitor : public boost::static_visitor<bool> {
private:
const framework::Tensor& tensor_;
Predicate predicate_;
bool GetResultHelper(const framework::Tensor& out,
const platform::Place& place) const {
platform::CPUPlace cpu;
framework::Tensor tmp;
tmp.Resize({1});
tmp.mutable_data<bool>(cpu);
auto ctx = platform::DeviceContextPool::Instance().Get(place);
ctx->Wait();
TensorCopy(out, cpu, *ctx, &tmp);
ctx->Wait();
return GetResult(tmp, cpu);
}
public:
AnyVisitor(const framework::Tensor& tensor, Predicate predicate)
: tensor_(tensor), predicate_(std::move(predicate)) {}
template <typename Place>
bool operator()(const Place& place) const {
framework::Tensor out;
out.Resize({1});
out.mutable_data<bool>(place);
auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place);
AnyImpl(predicate_, tensor_, *ctx, &out);
return this->GetResult(out, place);
}
bool GetResult(const framework::Tensor& out,
const platform::XPUPlace& xpu) const {
return GetResultHelper(out, xpu);
}
bool GetResult(const framework::Tensor& out,
const platform::CUDAPlace& gpu) const {
return GetResultHelper(out, gpu);
}
bool GetResult(const framework::Tensor& out,
const platform::CPUPlace& cpu) const {
return *out.data<bool>();
}
bool GetResult(const framework::Tensor& out,
const platform::CUDAPinnedPlace& cpu) const {
return *out.data<bool>();
}
};
template <typename Predicate>
class AnyOutVisitor : public boost::static_visitor<> {
private:
const framework::Tensor& tensor_;
mutable framework::Tensor* out_;
Predicate predicate_;
public:
AnyOutVisitor(const framework::Tensor& tensor, Predicate predicate,
framework::Tensor* out)
: tensor_(tensor), out_(out), predicate_(std::move(predicate)) {}
template <typename Place>
void operator()(const Place& place) const {
auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place);
out_->Resize({1});
out_->mutable_data<bool>(place);
AnyImpl(predicate_, tensor_, *ctx, out_);
}
};
template <typename Predicate>
inline bool Any(const framework::Tensor& tensor, Predicate predicate) {
AnyVisitor<Predicate> visitor(tensor, predicate);
auto place = tensor.place();
return platform::VisitPlace(place, visitor);
}
template <typename Predicate>
inline void Any(const framework::Tensor& tensor, Predicate predicate,
framework::Tensor* out) {
AnyOutVisitor<Predicate> visitor(tensor, predicate, out);
auto place = tensor.place();
platform::VisitPlace(place, visitor);
}
template <typename Predicate, typename DevCtx>
struct AllDTypeVisitor {
Predicate predicate_;
const Tensor& tensor_;
const DevCtx& ctx_;
Tensor* out_;
AllDTypeVisitor(Predicate predicate, const Tensor& tensor, const DevCtx& ctx,
Tensor* out)
: predicate_(predicate), tensor_(tensor), ctx_(ctx), out_(out) {}
template <typename T>
void apply() const {
auto t = EigenVector<T>::Flatten(tensor_);
auto o = EigenVector<bool>::Flatten(*out_);
o.device(*ctx_.eigen_device()) = predicate_(t);
}
};
template <typename Predicate, typename DevCtx>
inline void AllImpl(Predicate predicate, const framework::Tensor& tensor,
const DevCtx& ctx, framework::Tensor* out) {
VisitDataType(tensor.type(), AllDTypeVisitor<Predicate, DevCtx>(
predicate, tensor, ctx, out));
}
template <typename Predicate>
class AllOutVisitor : public boost::static_visitor<> {
private:
const framework::Tensor& tensor_;
mutable framework::Tensor* out_;
Predicate predicate_;
public:
AllOutVisitor(const framework::Tensor& tensor, Predicate predicate,
framework::Tensor* out)
: tensor_(tensor), out_(out), predicate_(predicate) {}
template <typename Place>
void operator()(const Place& place) const {
auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place);
out_->Resize(tensor_.dims());
out_->mutable_data<bool>(place);
AllImpl(predicate_, tensor_, *ctx, out_);
}
};
template <typename Predicate>
inline void All(const framework::Tensor& tensor, Predicate predicate,
framework::Tensor* out) {
AllOutVisitor<Predicate> visitor(tensor, predicate, out);
auto place = tensor.place();
platform::VisitPlace(place, visitor);
}
struct ContainsNANPredicate {
template <typename T>
auto operator()(const T& eigen_vec) const
-> decltype(std::declval<T>().isnan()) {
// Cast eigen_vector to vector of bool. true if is inf.
return eigen_vec.isnan();
}
};
bool TensorContainsNAN(const framework::Tensor& tensor) {
ContainsNANPredicate predicate;
return Any(tensor, predicate);
}
void TensorContainsNAN(const framework::Tensor& tensor,
framework::Tensor* out) {
ContainsNANPredicate predicate;
Any(tensor, predicate, out);
}
void TensorContainsNANV2(const framework::Tensor& tensor,
framework::Tensor* out) {
ContainsNANPredicate predicate;
All(tensor, predicate, out);
}
struct ContainsInfPredicate {
template <typename T>
auto operator()(const T& eigen_vec) const
-> decltype(std::declval<T>().isinf()) {
// Cast eigen_vector to vector of bool. true if is inf.
return eigen_vec.isinf();
}
};
bool TensorContainsInf(const framework::Tensor& tensor) {
ContainsInfPredicate predicate;
return Any(tensor, predicate);
}
void TensorContainsInf(const framework::Tensor& tensor,
framework::Tensor* out) {
ContainsInfPredicate predicate;
Any(tensor, predicate, out);
}
void TensorContainsInfV2(const framework::Tensor& tensor,
framework::Tensor* out) {
ContainsInfPredicate predicate;
All(tensor, predicate, out);
}
// NOTE(dzhwinter):
// Isfinite need a AllVisitor to loop through all the elements.
// We choose two cuda call instead of one allvisitor. The AllVisitor
// should be implemented if the performance hurts.
bool TensorIsfinite(const framework::Tensor& tensor) {
ContainsInfPredicate pred_inf;
ContainsNANPredicate pred_nan;
return !Any(tensor, pred_inf) && !Any(tensor, pred_nan);
}
#ifdef PADDLE_WITH_CUDA
template <typename T>
static inline void __global__ BothFalse(const T* cmp, T* out, int element_num) {
CUDA_KERNEL_LOOP(i, element_num) { out[i] = (!cmp[i]) && (!out[i]); }
}
#endif
struct BothFalseVisitor : public boost::static_visitor<> {
const framework::Tensor& in_;
mutable framework::Tensor* out_;
BothFalseVisitor(const framework::Tensor& in, framework::Tensor* out)
: in_(in), out_(out) {}
template <typename Place>
void operator()(const Place& place) const {
VisitorImpl(place);
}
void VisitorImpl(const platform::XPUPlace& xpu) const {
PADDLE_THROW(platform::errors::Unimplemented("XPUPlace is not supported"));
}
void VisitorImpl(const platform::CUDAPlace& gpu) const {
#ifdef PADDLE_WITH_CUDA
auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(gpu);
constexpr int MAX_BLOCK_DIM = 512;
const int MAX_GRID_DIM = ctx->GetMaxPhysicalThreadCount() / MAX_BLOCK_DIM;
int element_num = in_.numel();
int block_size = (element_num >= MAX_BLOCK_DIM)
? MAX_BLOCK_DIM
: (1 << static_cast<int>(std::log2(element_num)));
int grid_size = element_num / block_size;
grid_size = (grid_size >= MAX_GRID_DIM) ? MAX_GRID_DIM : grid_size;
BothFalse<bool><<<grid_size, block_size, 0, ctx->stream()>>>(
in_.data<bool>(), out_->mutable_data<bool>(gpu), element_num);
#endif
}
void VisitorImpl(const platform::CPUPlace& cpu) const {
int num = in_.numel();
const bool* in_ptr = in_.data<bool>();
bool* out_ptr = out_->data<bool>();
for (int i = 0; i < num; ++i) {
bool lhs = !in_ptr[i];
bool rhs = !out_ptr[i];
out_ptr[i] = lhs && rhs;
}
}
void VisitorImpl(
const platform::CUDAPinnedPlace& cpu /* equals to cpu*/) const {
int num = in_.numel();
const bool* in_ptr = in_.data<bool>();
bool* out_ptr = out_->data<bool>();
for (int i = 0; i < num; ++i) {
bool lhs = !in_ptr[i];
bool rhs = !out_ptr[i];
out_ptr[i] = lhs && rhs;
}
}
};
void TensorIsfinite(const framework::Tensor& tensor, framework::Tensor* out) {
framework::Tensor tmp;
TensorContainsInf(tensor, &tmp);
TensorContainsNAN(tensor, out);
BothFalseVisitor visitor(tmp, out);
auto place = tensor.place();
platform::VisitPlace(place, visitor);
}
void TensorIsfiniteV2(const framework::Tensor& tensor, framework::Tensor* out) {
framework::Tensor tmp;
TensorContainsInfV2(tensor, &tmp);
TensorContainsNANV2(tensor, out);
BothFalseVisitor visitor(tmp, out);
auto place = tensor.place();
platform::VisitPlace(place, visitor);
}
void TensorToStream(std::ostream& os, const Tensor& tensor,
const platform::DeviceContext& dev_ctx) {
{ // the 1st field, uint32_t version
constexpr uint32_t version = 0;
os.write(reinterpret_cast<const char*>(&version), sizeof(version));
}
{ // the 2nd field, tensor description
// int32_t size
// void* protobuf message
proto::VarType::TensorDesc desc;
desc.set_data_type(tensor.type());
auto dims = framework::vectorize(tensor.dims());
auto* pb_dims = desc.mutable_dims();
pb_dims->Resize(static_cast<int>(dims.size()), 0);
std::copy(dims.begin(), dims.end(), pb_dims->begin());
int32_t size = desc.ByteSize();
os.write(reinterpret_cast<const char*>(&size), sizeof(size));
auto out = desc.SerializeAsString();
os.write(out.data(), size);
}
{ // the 3rd field, tensor data
uint64_t size = tensor.numel() * framework::SizeOfType(tensor.type());
auto* data_ptr = tensor.data<void>();
PADDLE_ENFORCE_LT(size, (std::numeric_limits<std::streamsize>::max)(),
platform::errors::ResourceExhausted(
"tensor size %d overflow when writing tensor", size));
if (platform::is_gpu_place(tensor.place())) {
#ifdef PADDLE_WITH_CUDA
constexpr size_t kBufSize = 1024 * 1024 * 64; // 64MB
std::unique_ptr<char[]> buf(new char[kBufSize]);
auto& gpu_dev_ctx =
static_cast<const platform::CUDADeviceContext&>(dev_ctx);
platform::CPUPlace cpu;
uintptr_t data = reinterpret_cast<uintptr_t>(data_ptr);
while (size != 0) {
size_t size_to_write = std::min(kBufSize, static_cast<size_t>(size));
memory::Copy(cpu, buf.get(),
BOOST_GET_CONST(platform::CUDAPlace, tensor.place()),
reinterpret_cast<const void*>(data), size_to_write,
gpu_dev_ctx.stream());
gpu_dev_ctx.Wait();
os.write(buf.get(), size_to_write);
data += size_to_write;
size -= size_to_write;
}
#else
PADDLE_THROW(platform::errors::Unimplemented(
"CUDAPlace is not supported when not compiled with CUDA"));
#endif
} else if (platform::is_xpu_place(tensor.place())) {
#ifdef PADDLE_WITH_XPU
constexpr size_t kBufSize = 1024 * 1024 * 64; // 64MB
std::unique_ptr<char[]> buf(new char[kBufSize]);
auto& xpu_dev_ctx =
static_cast<const platform::XPUDeviceContext&>(dev_ctx);
platform::CPUPlace cpu;
uintptr_t data = reinterpret_cast<uintptr_t>(data_ptr);
while (size != 0) {
size_t size_to_write = std::min(kBufSize, static_cast<size_t>(size));
memory::Copy(cpu, buf.get(),
BOOST_GET_CONST(platform::XPUPlace, tensor.place()),
reinterpret_cast<const void*>(data), size_to_write);
xpu_dev_ctx.Wait();
os.write(buf.get(), size_to_write);
data += size_to_write;
size -= size_to_write;
}
#else
PADDLE_THROW(platform::errors::Unimplemented(
"XPUPlace is not supported when not compiled with XPU"));
#endif
} else {
os.write(static_cast<const char*>(data_ptr),
static_cast<std::streamsize>(size));
}
}
}
struct DeserializedDataFunctor {
DeserializedDataFunctor(void** buf, Tensor* tensor,
const platform::Place& place)
: buf_(buf), tensor_(tensor), place_(place) {}
template <typename T>
void apply() {
*buf_ = tensor_->mutable_data<T>(place_);
}
void** buf_;
Tensor* tensor_;
platform::Place place_;
};
void TensorFromStream(std::istream& is, Tensor* tensor,
const platform::DeviceContext& dev_ctx,
const size_t& seek, const std::vector<int64_t>& shape) {
uint32_t version;
is.read(reinterpret_cast<char*>(&version), sizeof(version));
PADDLE_ENFORCE_EQ(
version, 0U,
platform::errors::InvalidArgument(
"tensor version %u is not supported, Only version 0 is supported",
version));
proto::VarType::TensorDesc desc;
{ // int32_t size
// proto buffer
int32_t size;
is.read(reinterpret_cast<char*>(&size), sizeof(size));
std::unique_ptr<char[]> buf(new char[size]);
is.read(reinterpret_cast<char*>(buf.get()), size);
PADDLE_ENFORCE_EQ(
desc.ParseFromArray(buf.get(), size), true,
platform::errors::InvalidArgument("Cannot parse tensor desc"));
}
{ // read tensor
tensor->Resize(framework::make_ddim(shape));
size_t seekg = seek * framework::SizeOfType(desc.data_type());
is.seekg(seekg, is.cur);
void* buf;
auto ctx = platform::CPUDeviceContext();
size_t size = tensor->numel() * framework::SizeOfType(desc.data_type());
if (platform::is_gpu_place(dev_ctx.GetPlace()) ||
platform::is_xpu_place(dev_ctx.GetPlace())) {
#if defined PADDLE_WITH_CUDA || defined PADDLE_WITH_XPU
Tensor cpu_tensor;
cpu_tensor.Resize(framework::make_ddim(shape));
framework::VisitDataType(
desc.data_type(),
DeserializedDataFunctor(&buf, &cpu_tensor, ctx.GetPlace()));
is.read(static_cast<char*>(buf), size);
auto dst_place = dev_ctx.GetPlace();
framework::TensorCopy(cpu_tensor, dst_place, dev_ctx, tensor);
#else
if (platform::is_gpu_place(dev_ctx.GetPlace())) {
PADDLE_THROW(platform::errors::Unimplemented(
"CUDAPlace is not supported when not compiled with CUDA"));
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"XPUPlace is not supported when not compiled with XPU"));
}
#endif
} else {
framework::VisitDataType(
desc.data_type(),
DeserializedDataFunctor(&buf, tensor, ctx.GetPlace()));
is.read(static_cast<char*>(buf), size);
}
}
}
void TensorFromStream(std::istream& is, Tensor* tensor,
const platform::DeviceContext& dev_ctx) {
uint32_t version;
is.read(reinterpret_cast<char*>(&version), sizeof(version));
PADDLE_ENFORCE_EQ(
version, 0U,
platform::errors::InvalidArgument(
"tensor version %u is not supported, Only version 0 is supported",
version));
proto::VarType::TensorDesc desc;
{ // int32_t size
// proto buffer
int32_t size;
is.read(reinterpret_cast<char*>(&size), sizeof(size));
std::unique_ptr<char[]> buf(new char[size]);
is.read(reinterpret_cast<char*>(buf.get()), size);
PADDLE_ENFORCE_EQ(
desc.ParseFromArray(buf.get(), size), true,
platform::errors::InvalidArgument("Cannot parse tensor desc"));
}
{ // read tensor
std::vector<int64_t> dims;
dims.reserve(static_cast<size_t>(desc.dims().size()));
std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims));
tensor->Resize(framework::make_ddim(dims));
void* buf;
auto ctx = platform::CPUDeviceContext();
size_t size = tensor->numel() * framework::SizeOfType(desc.data_type());
if (platform::is_gpu_place(dev_ctx.GetPlace()) ||
platform::is_xpu_place(dev_ctx.GetPlace())) {
#if defined PADDLE_WITH_CUDA || defined PADDLE_WITH_XPU
Tensor cpu_tensor;
cpu_tensor.Resize(framework::make_ddim(dims));
framework::VisitDataType(
desc.data_type(),
DeserializedDataFunctor(&buf, &cpu_tensor, ctx.GetPlace()));
is.read(static_cast<char*>(buf), size);
auto dst_place = dev_ctx.GetPlace();
framework::TensorCopy(cpu_tensor, dst_place, dev_ctx, tensor);
#else
if (platform::is_gpu_place(dev_ctx.GetPlace())) {
PADDLE_THROW(platform::errors::Unimplemented(
"CUDAPlace is not supported when not compiled with CUDA"));
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"XPUPlace is not supported when not compiled with XPU"));
}
#endif
} else {
framework::VisitDataType(
desc.data_type(),
DeserializedDataFunctor(&buf, tensor, ctx.GetPlace()));
is.read(static_cast<char*>(buf), size);
}
}
}
// get tensor data point by DLDataType
void* GetDstPtrByDLDataType(DLDataType type, framework::Tensor* dst,
const platform::Place& dst_place) {
// vector types not currently supported
PADDLE_ENFORCE_LE(type.lanes, 1,
platform::errors::Unimplemented(
"Vector type is not supported currently."));
switch (type.bits) {
case 8:
if (type.code == kDLInt)
return static_cast<void*>(dst->mutable_data<int8_t>(dst_place));
if (type.code == kDLUInt)
return static_cast<void*>(dst->mutable_data<uint8_t>(dst_place));
PADDLE_THROW(platform::errors::Unimplemented(
"DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
type.code, type.bits));
case 16:
if (type.code == kDLInt)
return static_cast<void*>(dst->mutable_data<int16_t>(dst_place));
if (type.code == kDLFloat)
return static_cast<void*>(
dst->mutable_data<paddle::platform::float16>(dst_place));
PADDLE_THROW(platform::errors::Unimplemented(
"DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
type.code, type.bits));
case 32:
if (type.code == kDLInt)
return static_cast<void*>(dst->mutable_data<int32_t>(dst_place));
if (type.code == kDLFloat)
return static_cast<void*>(dst->mutable_data<float>(dst_place));
PADDLE_THROW(platform::errors::Unimplemented(
"DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
type.code, type.bits));
case 64:
if (type.code == kDLInt)
return static_cast<void*>(dst->mutable_data<int64_t>(dst_place));
if (type.code == kDLFloat)
return static_cast<void*>(dst->mutable_data<double>(dst_place));
PADDLE_THROW(platform::errors::Unimplemented(
"DLDataType code <%d> is illegal when DLDataType.bits is <%d>.",
type.code, type.bits));
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported DLDataType.bits %d.", type.bits));
}
}
void TensorFromDLPack(const ::DLTensor& dl_tensor, framework::Tensor* dst) {
platform::CPUPlace dst_place = platform::CPUPlace();
platform::CPUPlace src_place = platform::CPUPlace();
std::vector<int64_t> vec;
std::copy(dl_tensor.shape, dl_tensor.shape + dl_tensor.ndim,
std::back_inserter(vec));
framework::DDim vddim = framework::make_ddim(vec);
dst->Resize(vddim);
::DLDataType type = dl_tensor.dtype;
void* dst_ptr = GetDstPtrByDLDataType(type, dst, dst_place);
auto src_ptr = static_cast<const void*>(dl_tensor.data);
auto size = paddle::framework::product(vddim) * type.bits / 8;
if (dl_tensor.ctx.device_type == kDLCPU) {
memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size);
}
#ifdef PADDLE_WITH_CUDA
if (dl_tensor.ctx.device_type == kDLGPU) {
platform::CUDAPlace dst_place =
platform::CUDAPlace(dl_tensor.ctx.device_id);
platform::CUDAPlace src_place =
platform::CUDAPlace(dl_tensor.ctx.device_id);
dst_ptr = GetDstPtrByDLDataType(type, dst, dst_place);
auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(dst_place);
memory::Copy(
dst_place, dst_ptr, src_place, src_ptr, size,
reinterpret_cast<const platform::CUDADeviceContext&>(*ctx).stream());
}
#endif
#ifdef PADDLE_WITH_XPU
PADDLE_THROW(platform::errors::Unimplemented("XPUPlace is not supported"));
#endif
}
template <typename T>
std::string format_tensor(const framework::Tensor& tensor) {
// TODO(zhiqiu): use the print option to format tensor.
return "NOT IMPLEMENTED";
}
template <typename T>
std::ostream& print_tensor(std::ostream& os, const framework::Tensor& tensor) {
auto inspect = tensor.data<T>();
auto element_num = tensor.numel();
os << " - data: [";
// Note: int8_t && uint8_t is typedf of char, ostream unable to print properly
if (typeid(int8_t) == typeid(T) || typeid(uint8_t) == typeid(T)) {
if (element_num > 0) {
os << signed(inspect[0]);
for (int j = 1; j < element_num; ++j) {
os << " " << signed(inspect[j]);
}
}
} else {
if (element_num > 0) {
os << inspect[0];
for (int j = 1; j < element_num; ++j) {
os << " " << inspect[j];
}
}
}
os << "]";
return os;
}
template <>
std::ostream& print_tensor<paddle::platform::complex64>(
std::ostream& os, const framework::Tensor& tensor) {
auto inspect = tensor.data<paddle::platform::complex64>();
auto element_num = tensor.numel();
os << " - data: [";
if (element_num > 0) {
os << signed(inspect[0].real) << "+" << signed(inspect[0].imag) << "j";
for (int j = 1; j < element_num; ++j) {
os << " " << signed(inspect[j].real) << "+" << signed(inspect[j].imag)
<< "j";
}
}
os << "]";
return os;
}
template <>
std::ostream& print_tensor<paddle::platform::complex128>(
std::ostream& os, const framework::Tensor& tensor) {
auto inspect = tensor.data<paddle::platform::complex128>();
auto element_num = tensor.numel();
os << " - data: [";
if (element_num > 0) {
os << signed(inspect[0].real) << "+" << signed(inspect[0].imag) << "j";
for (int j = 1; j < element_num; ++j) {
os << " " << signed(inspect[j].real) << "+" << signed(inspect[j].imag)
<< "j";
}
}
os << "]";
return os;
}
std::ostream& operator<<(std::ostream& os, const Tensor& t) {
os << " - place: " << t.place() << "\n";
os << " - shape: [" << t.dims() << "]\n";
os << " - layout: " << DataLayoutToString(t.layout()) << "\n";
Tensor tensor;
tensor.Resize(t.dims());
if (platform::is_cpu_place(t.place())) {
tensor.ShareDataWith(t);
} else {
platform::CPUPlace place;
framework::TensorCopy(t, place, &tensor);
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto& dev_ctx = *pool.Get(t.place());
dev_ctx.Wait();
}
#define PrintTensorCallback(cpp_type, proto_type) \
do { \
if (tensor.type() == proto_type) { \
os << " - dtype: " << proto_type << "\n"; \
print_tensor<cpp_type>(os, tensor); \
return os; \
} \
} while (0)
_ForEachDataType_(PrintTensorCallback);
VLOG(1) << "PrintVar: unrecognized data type:" << t.type();
return os;
}
} // namespace framework
} // namespace paddle | the_stack |
extern "C" {
#endif
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "rpsroi_pooling_kernel.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
__global__ void RPSROIPoolForward(const int nthreads, const float* bottom_data,
const float spatial_scale, const int height, const int width,
const int channels, const int pooled_height, const int pooled_width,
const int group_size, const int output_dim,
const float* bottom_rois, float* top_data, int* mapping_channel, float* areas)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
bottom_rois += n * 9;
int roi_batch_ind = bottom_rois[0];
float roi_x1 = static_cast<float>(round(bottom_rois[1])) * spatial_scale;
float roi_y1 = static_cast<float>(round(bottom_rois[2])) * spatial_scale;
float roi_x2 = static_cast<float>(round(bottom_rois[3])) * spatial_scale;
float roi_y2 = static_cast<float>(round(bottom_rois[4])) * spatial_scale;
float roi_x3 = static_cast<float>(round(bottom_rois[5])) * spatial_scale;
float roi_y3 = static_cast<float>(round(bottom_rois[6])) * spatial_scale;
float roi_x4 = static_cast<float>(round(bottom_rois[7])) * spatial_scale;
float roi_y4 = static_cast<float>(round(bottom_rois[8])) * spatial_scale;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "rois: " << roi_x1 << " " << roi_y1 << " " << roi_x2 << " " << roi_y2 << " " << roi_x3 << " " << roi_y3 << " " << roi_x4 << " " << roi_y4 << endl;
//printf("rois: %f, %f, %f, %f, %f, %f, %f, %f\n", roi_x1, roi_y1, roi_x2, roi_y2, roi_x3, roi_y3, roi_x4, roi_y4);
float anchor_x1 = static_cast<float>(pw) * (roi_x2 - roi_x1) / pooled_width + roi_x1;
float anchor_y1 = static_cast<float>(pw) * (roi_y2 - roi_y1) / pooled_width + roi_y1;
float anchor_x2 = static_cast<float>(pw+1) * (roi_x2 - roi_x1) / pooled_width + roi_x1;
float anchor_y2 = static_cast<float>(pw+1) * (roi_y2 - roi_y1) / pooled_width + roi_y1;
float anchor_x3 = static_cast<float>(pw+1) * (roi_x3 - roi_x4) / pooled_width + roi_x4;
float anchor_y3 = static_cast<float>(pw+1) * (roi_y3 - roi_y4) / pooled_width + roi_y4;
float anchor_x4 = static_cast<float>(pw) * (roi_x3 - roi_x4) / pooled_width + roi_x4;
float anchor_y4 = static_cast<float>(pw) * (roi_y3 - roi_y4) / pooled_width + roi_y4;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "anchor: " << anchor_x1 << " " << anchor_y1 << " " << anchor_x2 << " " << anchor_y2 << " " << anchor_x3 << " " << anchor_y3 << " " << anchor_x4 << " " << anchor_y4 <<endl;
//printf("anchor: %f, %f, %f, %f, %f, %f, %f, %f\n", anchor_x1, anchor_y1, anchor_x2, anchor_y2, anchor_x3, anchor_y3, anchor_x4, anchor_y4);
float grid_x1 = static_cast<float>(ph) * (anchor_x4 - anchor_x1) / pooled_height + anchor_x1;
float grid_y1 = static_cast<float>(ph) * (anchor_y4 - anchor_y1) / pooled_height + anchor_y1;
float grid_x4 = static_cast<float>(ph + 1) * (anchor_x4 - anchor_x1) / pooled_height + anchor_x1;
float grid_y4 = static_cast<float>(ph + 1) * (anchor_y4 - anchor_y1) / pooled_height + anchor_y1;
float grid_x2 = static_cast<float>(ph) * (anchor_x3 - anchor_x2) / pooled_height + anchor_x2;
float grid_y2 = static_cast<float>(ph) * (anchor_y3 - anchor_y2) / pooled_height + anchor_y2;
float grid_x3 = static_cast<float>(ph + 1) * (anchor_x3 - anchor_x2) / pooled_height + anchor_x2;
float grid_y3 = static_cast<float>(ph + 1) * (anchor_y3 - anchor_y2) / pooled_height + anchor_y2;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "grid: " << grid_x1 << " " << grid_y1 << " " << grid_x2 << " " << grid_y2 << " " << grid_x3 << " " << grid_y3 << " " << grid_x4 << " " << grid_y4 << endl;
// printf("grid: %f, %f, %f, %f, %f, %f, %f, %f\n", grid_x1, grid_y1, grid_x2, grid_y2, grid_x3, grid_y3, grid_x4, grid_y4);
//printf("min:%f, %f, %f\n", grid_y1, grid_y2, min(grid_y1, grid_y2));
//printf("min_grid:%f, %f, %f\n", grid_y1, grid_y2, floor(min(grid_y1, grid_y2)));
int hstart = static_cast<int>(floor(min(min(min(grid_y1, grid_y2) , grid_y3), grid_y4)));
int hend = static_cast<int>(ceil(max(max(max(grid_y1, grid_y2) , grid_y3), grid_y4)));
int wstart = static_cast<int>(floor(min(min(min(grid_x1, grid_x2) , grid_x3), grid_x4)));
int wend = static_cast<int>(ceil(max(max(max(grid_x1, grid_x2) , grid_x3), grid_x4)));
///////////////////////////////DEBUG/////////////////////////////////////
//cout << "start&&end: " << hstart << " " << hend << " " << wstart << " " << wend << endl;
//printf("start&&end: %d, %d, %d, %d\n", hstart, hend, wstart, wend);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
/////////////////////////////////////////////////////////////////////
//cout << "start&&end norm: " << hstart << " " << hend << " " << wstart << " " << wend;
//printf("start&&end norm: %d, %d, %d, %d\n", hstart, hend, wstart, wend);
int gw = pw;
int gh = ph;
int c = (ctop*group_size + gh)*group_size + gw;
// printf("c:%d %d %d %d\n", c, channels, height, width);
bottom_data += (roi_batch_ind * channels + c) * height * width;
//printf("get value: %d, %d, %d, %f\n", c, 270, 765, bottom_data[270*width + 765]);
float out_sum = 0;
float bin_area = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
float p1 = (grid_x2 - grid_x1) * (h - grid_y1) - (w - grid_x1) * (grid_y2 - grid_y1);
float p2 = (grid_x3 - grid_x2) * (h - grid_y2) - (w - grid_x2) * (grid_y3 - grid_y2);
float p3 = (grid_x4 - grid_x3) * (h - grid_y3) - (w - grid_x3) * (grid_y4 - grid_y3);
float p4 = (grid_x1 - grid_x4) * (h - grid_y4) - (w - grid_x4) * (grid_y1 - grid_y4);
if(p1 >= 0 && p2 >= 0 && p3 >= 0 && p4 >= 0){
out_sum += bottom_data[bottom_index];
bin_area += 1;
}
}
}
/////////////////////////////DEBUG//////////////////////////
//cout << "bin_area: " << bin_area <<" out_sum: " << out_sum << endl;
//printf("bin_area: %f, out_sum: %f\n", bin_area, out_sum);
top_data[index] = (is_empty || (bin_area ==0)) ? 0. : out_sum/bin_area;
mapping_channel[index] = c;
areas[index] = bin_area;
}
}
int RPSROIPoolForwardLauncher(
const float* bottom_data, const float spatial_scale, const int num_rois, const int height,
const int width, const int channels, const int pooled_height,
const int pooled_width, const float* bottom_rois,
const int group_size, const int output_dim,
float* top_data, int* mapping_channel, float* areas, cudaStream_t stream)
{
const int kThreadsPerBlock = 1024;
const int output_size = output_dim * pooled_height * pooled_width * num_rois;
cudaError_t err;
RPSROIPoolForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(
output_size, bottom_data, spatial_scale, height, width, channels, pooled_height,
pooled_width, group_size, output_dim, bottom_rois, top_data, mapping_channel, areas);
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return 1;
}
__global__ void RPSROIPoolBackward(const int nthreads, const float* top_diff,
const int* mapping_channel, const float* areas, const int num_rois, const float spatial_scale,
const int height, const int width, const int channels,
const int pooled_height, const int pooled_width, const int output_dim, float* bottom_diff,
const float* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 9;
int roi_batch_ind = bottom_rois[0];
float roi_x1 = static_cast<float>(round(bottom_rois[1])) * spatial_scale;
float roi_y1 = static_cast<float>(round(bottom_rois[2])) * spatial_scale;
float roi_x2 = static_cast<float>(round(bottom_rois[3])) * spatial_scale;
float roi_y2 = static_cast<float>(round(bottom_rois[4])) * spatial_scale;
float roi_x3 = static_cast<float>(round(bottom_rois[5])) * spatial_scale;
float roi_y3 = static_cast<float>(round(bottom_rois[6])) * spatial_scale;
float roi_x4 = static_cast<float>(round(bottom_rois[7])) * spatial_scale;
float roi_y4 = static_cast<float>(round(bottom_rois[8])) * spatial_scale;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "rois: " << roi_x1 << " " << roi_y1 << " " << roi_x2 << " " << roi_y2 << " " << roi_x3 << " " << roi_y3 << " " << roi_x4 << " " << roi_y4 << endl;
//printf("rois: %f, %f, %f, %f, %f, %f, %f, %f\n", roi_x1, roi_y1, roi_x2, roi_y2, roi_x3, roi_y3, roi_x4, roi_y4);
float anchor_x1 = static_cast<float>(pw) * (roi_x2 - roi_x1) / pooled_width + roi_x1;
float anchor_y1 = static_cast<float>(pw) * (roi_y2 - roi_y1) / pooled_width + roi_y1;
float anchor_x2 = static_cast<float>(pw+1) * (roi_x2 - roi_x1) / pooled_width + roi_x1;
float anchor_y2 = static_cast<float>(pw+1) * (roi_y2 - roi_y1) / pooled_width + roi_y1;
float anchor_x3 = static_cast<float>(pw+1) * (roi_x3 - roi_x4) / pooled_width + roi_x4;
float anchor_y3 = static_cast<float>(pw+1) * (roi_y3 - roi_y4) / pooled_width + roi_y4;
float anchor_x4 = static_cast<float>(pw) * (roi_x3 - roi_x4) / pooled_width + roi_x4;
float anchor_y4 = static_cast<float>(pw) * (roi_y3 - roi_y4) / pooled_width + roi_y4;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "anchor: " << anchor_x1 << " " << anchor_y1 << " " << anchor_x2 << " " << anchor_y2 << " " << anchor_x3 << " " << anchor_y3 << " " << anchor_x4 << " " << anchor_y4 <<endl;
//printf("anchor: %f, %f, %f, %f, %f, %f, %f, %f\n", anchor_x1, anchor_y1, anchor_x2, anchor_y2, anchor_x3, anchor_y3, anchor_x4, anchor_y4);
float grid_x1 = static_cast<float>(ph) * (anchor_x4 - anchor_x1) / pooled_height + anchor_x1;
float grid_y1 = static_cast<float>(ph) * (anchor_y4 - anchor_y1) / pooled_height + anchor_y1;
float grid_x4 = static_cast<float>(ph + 1) * (anchor_x4 - anchor_x1) / pooled_height + anchor_x1;
float grid_y4 = static_cast<float>(ph + 1) * (anchor_y4 - anchor_y1) / pooled_height + anchor_y1;
float grid_x2 = static_cast<float>(ph) * (anchor_x3 - anchor_x2) / pooled_height + anchor_x2;
float grid_y2 = static_cast<float>(ph) * (anchor_y3 - anchor_y2) / pooled_height + anchor_y2;
float grid_x3 = static_cast<float>(ph + 1) * (anchor_x3 - anchor_x2) / pooled_height + anchor_x2;
float grid_y3 = static_cast<float>(ph + 1) * (anchor_y3 - anchor_y2) / pooled_height + anchor_y2;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "grid: " << grid_x1 << " " << grid_y1 << " " << grid_x2 << " " << grid_y2 << " " << grid_x3 << " " << grid_y3 << " " << grid_x4 << " " << grid_y4 << endl;
//printf("grid: %f, %f, %f, %f, %f, %f, %f, %f\n", grid_x1, grid_y1, grid_x2, grid_y2, grid_x3, grid_y3, grid_x4, grid_y4);
//printf("min:%f, %f, %f\n", grid_y1, grid_y2, min(grid_y1, grid_y2));
//printf("min_grid:%f, %f, %f\n", grid_y1, grid_y2, floor(min(grid_y1, grid_y2)));
int hstart = static_cast<int>(floor(min(min(min(grid_y1, grid_y2) , grid_y3), grid_y4)));
int hend = static_cast<int>(ceil(max(max(max(grid_y1, grid_y2) , grid_y3), grid_y4)));
int wstart = static_cast<int>(floor(min(min(min(grid_x1, grid_x2) , grid_x3), grid_x4)));
int wend = static_cast<int>(ceil(max(max(max(grid_x1, grid_x2) , grid_x3), grid_x4)));
///////////////////////////////DEBUG/////////////////////////////////////
//cout << "start&&end: " << hstart << " " << hend << " " << wstart << " " << wend << endl;
//printf("start&&end: %d, %d, %d, %d\n", hstart, hend, wstart, wend);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
float* offset_bottom_diff = bottom_diff +
(roi_batch_ind * channels + c) * height * width;
float bin_area = areas[index];
float diff_val = (is_empty || (bin_area == 0)) ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
float p1 = (grid_x2 - grid_x1) * (h - grid_y1) - (w - grid_x1) * (grid_y2 - grid_y1);
float p2 = (grid_x3 - grid_x2) * (h - grid_y2) - (w - grid_x2) * (grid_y3 - grid_y2);
float p3 = (grid_x4 - grid_x3) * (h - grid_y3) - (w - grid_x3) * (grid_y4 - grid_y3);
float p4 = (grid_x1 - grid_x4) * (h - grid_y4) - (w - grid_x4) * (grid_y1 - grid_y4);
if(p1 >= 0 && p2 >= 0 && p3 >= 0 && p4 >= 0){
atomicAdd(offset_bottom_diff + bottom_index, diff_val);
}
}
}
}
}
int RPSROIPoolBackwardLauncher(const float* top_diff, const int* mapping_channel, const float* areas, const int batch_size, const int num_rois, const float spatial_scale, const int channels,
const int height, const int width, const int pooled_width,
const int pooled_height, const int output_dim,
float* bottom_diff, const float* bottom_rois, cudaStream_t stream)
{
const int kThreadsPerBlock = 1024;
//const int output_size = output_dim * height * width * channels;
const int output_size = output_dim * pooled_height * pooled_width * num_rois;
cudaError_t err;
RPSROIPoolBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(
output_size, top_diff, mapping_channel, areas, num_rois, spatial_scale, height, width, channels, pooled_height,
pooled_width, output_dim, bottom_diff, bottom_rois);
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return 1;
}
#ifdef __cplusplus
}
#endif | the_stack |
#define EPSILON 1e-7
#define THREADS 256
#define ZERO Real_t(0)
#define HALF Real_t(0.5)
#define ONE Real_t(1.0)
#define THREE Real_t(3.0)
#define FOUR Real_t(4.0)
#define C1 Real_t(.1111111e-36)
#define C2 Real_t(.3333333e-18)
#define SEVEN Real_t(7.0)
#define EIGHT Real_t(8.0)
#define C1S Real_t(2.0/3.0)
#define SIXTH Real_t(1.0/6.0)
#define PTINY Real_t(1e-36)
/*********************************/
/* CUDA kernels */
/*********************************/
__device__
static inline
void SumElemFaceNormal(Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
Real_t *normalX1, Real_t *normalY1, Real_t *normalZ1,
Real_t *normalX2, Real_t *normalY2, Real_t *normalZ2,
Real_t *normalX3, Real_t *normalY3, Real_t *normalZ3,
const Real_t x0, const Real_t y0, const Real_t z0,
const Real_t x1, const Real_t y1, const Real_t z1,
const Real_t x2, const Real_t y2, const Real_t z2,
const Real_t x3, const Real_t y3, const Real_t z3)
{
Real_t bisectX0 = Real_t(0.5) * (x3 + x2 - x1 - x0);
Real_t bisectY0 = Real_t(0.5) * (y3 + y2 - y1 - y0);
Real_t bisectZ0 = Real_t(0.5) * (z3 + z2 - z1 - z0);
Real_t bisectX1 = Real_t(0.5) * (x2 + x1 - x3 - x0);
Real_t bisectY1 = Real_t(0.5) * (y2 + y1 - y3 - y0);
Real_t bisectZ1 = Real_t(0.5) * (z2 + z1 - z3 - z0);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
*normalX0 += areaX;
*normalX1 += areaX;
*normalX2 += areaX;
*normalX3 += areaX;
*normalY0 += areaY;
*normalY1 += areaY;
*normalY2 += areaY;
*normalY3 += areaY;
*normalZ0 += areaZ;
*normalZ1 += areaZ;
*normalZ2 += areaZ;
*normalZ3 += areaZ;
}
/******************************************/
__device__
static inline
void CalcElemShapeFunctionDerivatives( Real_t const x[],
Real_t const y[],
Real_t const z[],
Real_t b[][8],
Real_t* const volume )
{
const Real_t x0 = x[0] ; const Real_t x1 = x[1] ;
const Real_t x2 = x[2] ; const Real_t x3 = x[3] ;
const Real_t x4 = x[4] ; const Real_t x5 = x[5] ;
const Real_t x6 = x[6] ; const Real_t x7 = x[7] ;
const Real_t y0 = y[0] ; const Real_t y1 = y[1] ;
const Real_t y2 = y[2] ; const Real_t y3 = y[3] ;
const Real_t y4 = y[4] ; const Real_t y5 = y[5] ;
const Real_t y6 = y[6] ; const Real_t y7 = y[7] ;
const Real_t z0 = z[0] ; const Real_t z1 = z[1] ;
const Real_t z2 = z[2] ; const Real_t z3 = z[3] ;
const Real_t z4 = z[4] ; const Real_t z5 = z[5] ;
const Real_t z6 = z[6] ; const Real_t z7 = z[7] ;
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
fjxxi = Real_t(.125) * ( (x6-x0) + (x5-x3) - (x7-x1) - (x4-x2) );
fjxet = Real_t(.125) * ( (x6-x0) - (x5-x3) + (x7-x1) - (x4-x2) );
fjxze = Real_t(.125) * ( (x6-x0) + (x5-x3) + (x7-x1) + (x4-x2) );
fjyxi = Real_t(.125) * ( (y6-y0) + (y5-y3) - (y7-y1) - (y4-y2) );
fjyet = Real_t(.125) * ( (y6-y0) - (y5-y3) + (y7-y1) - (y4-y2) );
fjyze = Real_t(.125) * ( (y6-y0) + (y5-y3) + (y7-y1) + (y4-y2) );
fjzxi = Real_t(.125) * ( (z6-z0) + (z5-z3) - (z7-z1) - (z4-z2) );
fjzet = Real_t(.125) * ( (z6-z0) - (z5-z3) + (z7-z1) - (z4-z2) );
fjzze = Real_t(.125) * ( (z6-z0) + (z5-z3) + (z7-z1) + (z4-z2) );
/* compute cofactors */
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0][0] = - cjxxi - cjxet - cjxze;
b[0][1] = cjxxi - cjxet - cjxze;
b[0][2] = cjxxi + cjxet - cjxze;
b[0][3] = - cjxxi + cjxet - cjxze;
b[0][4] = -b[0][2];
b[0][5] = -b[0][3];
b[0][6] = -b[0][0];
b[0][7] = -b[0][1];
b[1][0] = - cjyxi - cjyet - cjyze;
b[1][1] = cjyxi - cjyet - cjyze;
b[1][2] = cjyxi + cjyet - cjyze;
b[1][3] = - cjyxi + cjyet - cjyze;
b[1][4] = -b[1][2];
b[1][5] = -b[1][3];
b[1][6] = -b[1][0];
b[1][7] = -b[1][1];
b[2][0] = - cjzxi - cjzet - cjzze;
b[2][1] = cjzxi - cjzet - cjzze;
b[2][2] = cjzxi + cjzet - cjzze;
b[2][3] = - cjzxi + cjzet - cjzze;
b[2][4] = -b[2][2];
b[2][5] = -b[2][3];
b[2][6] = -b[2][0];
b[2][7] = -b[2][1];
/* calculate jacobian determinant (volume) */
*volume = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
}
/******************************************/
__device__
static inline
void CalcElemNodeNormals(Real_t pfx[8],
Real_t pfy[8],
Real_t pfz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
for (Index_t i = 0 ; i < 8 ; ++i) {
pfx[i] = Real_t(0.0);
pfy[i] = Real_t(0.0);
pfz[i] = Real_t(0.0);
}
/* evaluate face one: nodes 0, 1, 2, 3 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[1], &pfy[1], &pfz[1],
&pfx[2], &pfy[2], &pfz[2],
&pfx[3], &pfy[3], &pfz[3],
x[0], y[0], z[0], x[1], y[1], z[1],
x[2], y[2], z[2], x[3], y[3], z[3]);
/* evaluate face two: nodes 0, 4, 5, 1 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[4], &pfy[4], &pfz[4],
&pfx[5], &pfy[5], &pfz[5],
&pfx[1], &pfy[1], &pfz[1],
x[0], y[0], z[0], x[4], y[4], z[4],
x[5], y[5], z[5], x[1], y[1], z[1]);
/* evaluate face three: nodes 1, 5, 6, 2 */
SumElemFaceNormal(&pfx[1], &pfy[1], &pfz[1],
&pfx[5], &pfy[5], &pfz[5],
&pfx[6], &pfy[6], &pfz[6],
&pfx[2], &pfy[2], &pfz[2],
x[1], y[1], z[1], x[5], y[5], z[5],
x[6], y[6], z[6], x[2], y[2], z[2]);
/* evaluate face four: nodes 2, 6, 7, 3 */
SumElemFaceNormal(&pfx[2], &pfy[2], &pfz[2],
&pfx[6], &pfy[6], &pfz[6],
&pfx[7], &pfy[7], &pfz[7],
&pfx[3], &pfy[3], &pfz[3],
x[2], y[2], z[2], x[6], y[6], z[6],
x[7], y[7], z[7], x[3], y[3], z[3]);
/* evaluate face five: nodes 3, 7, 4, 0 */
SumElemFaceNormal(&pfx[3], &pfy[3], &pfz[3],
&pfx[7], &pfy[7], &pfz[7],
&pfx[4], &pfy[4], &pfz[4],
&pfx[0], &pfy[0], &pfz[0],
x[3], y[3], z[3], x[7], y[7], z[7],
x[4], y[4], z[4], x[0], y[0], z[0]);
/* evaluate face six: nodes 4, 7, 6, 5 */
SumElemFaceNormal(&pfx[4], &pfy[4], &pfz[4],
&pfx[7], &pfy[7], &pfz[7],
&pfx[6], &pfy[6], &pfz[6],
&pfx[5], &pfy[5], &pfz[5],
x[4], y[4], z[4], x[7], y[7], z[7],
x[6], y[6], z[6], x[5], y[5], z[5]);
}
//#pragma omp end declare target
/******************************************/
__device__
static inline
void SumElemStressesToNodeForces( const Real_t B[][8],
const Real_t stress_xx,
const Real_t stress_yy,
const Real_t stress_zz,
Real_t fx[], Real_t fy[], Real_t fz[] )
{
for(Index_t i = 0; i < 8; i++) {
fx[i] = -( stress_xx * B[0][i] );
fy[i] = -( stress_yy * B[1][i] );
fz[i] = -( stress_zz * B[2][i] );
}
}
//#pragma omp end declare target
/******************************************/
/******************************************/
__device__
static inline
void VoluDer(const Real_t x0, const Real_t x1, const Real_t x2,
const Real_t x3, const Real_t x4, const Real_t x5,
const Real_t y0, const Real_t y1, const Real_t y2,
const Real_t y3, const Real_t y4, const Real_t y5,
const Real_t z0, const Real_t z1, const Real_t z2,
const Real_t z3, const Real_t z4, const Real_t z5,
Real_t* dvdx, Real_t* dvdy, Real_t* dvdz)
{
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ;
*dvdx =
(y1 + y2) * (z0 + z1) - (y0 + y1) * (z1 + z2) +
(y0 + y4) * (z3 + z4) - (y3 + y4) * (z0 + z4) -
(y2 + y5) * (z3 + z5) + (y3 + y5) * (z2 + z5);
*dvdy =
- (x1 + x2) * (z0 + z1) + (x0 + x1) * (z1 + z2) -
(x0 + x4) * (z3 + z4) + (x3 + x4) * (z0 + z4) +
(x2 + x5) * (z3 + z5) - (x3 + x5) * (z2 + z5);
*dvdz =
- (y1 + y2) * (x0 + x1) + (y0 + y1) * (x1 + x2) -
(y0 + y4) * (x3 + x4) + (y3 + y4) * (x0 + x4) +
(y2 + y5) * (x3 + x5) - (y3 + y5) * (x2 + x5);
*dvdx *= twelfth;
*dvdy *= twelfth;
*dvdz *= twelfth;
}
//#pragma omp end declare target
/******************************************/
__device__
static inline
void CalcElemVolumeDerivative(Real_t dvdx[8],
Real_t dvdy[8],
Real_t dvdz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
VoluDer(x[1], x[2], x[3], x[4], x[5], x[7],
y[1], y[2], y[3], y[4], y[5], y[7],
z[1], z[2], z[3], z[4], z[5], z[7],
&dvdx[0], &dvdy[0], &dvdz[0]);
VoluDer(x[0], x[1], x[2], x[7], x[4], x[6],
y[0], y[1], y[2], y[7], y[4], y[6],
z[0], z[1], z[2], z[7], z[4], z[6],
&dvdx[3], &dvdy[3], &dvdz[3]);
VoluDer(x[3], x[0], x[1], x[6], x[7], x[5],
y[3], y[0], y[1], y[6], y[7], y[5],
z[3], z[0], z[1], z[6], z[7], z[5],
&dvdx[2], &dvdy[2], &dvdz[2]);
VoluDer(x[2], x[3], x[0], x[5], x[6], x[4],
y[2], y[3], y[0], y[5], y[6], y[4],
z[2], z[3], z[0], z[5], z[6], z[4],
&dvdx[1], &dvdy[1], &dvdz[1]);
VoluDer(x[7], x[6], x[5], x[0], x[3], x[1],
y[7], y[6], y[5], y[0], y[3], y[1],
z[7], z[6], z[5], z[0], z[3], z[1],
&dvdx[4], &dvdy[4], &dvdz[4]);
VoluDer(x[4], x[7], x[6], x[1], x[0], x[2],
y[4], y[7], y[6], y[1], y[0], y[2],
z[4], z[7], z[6], z[1], z[0], z[2],
&dvdx[5], &dvdy[5], &dvdz[5]);
VoluDer(x[5], x[4], x[7], x[2], x[1], x[3],
y[5], y[4], y[7], y[2], y[1], y[3],
z[5], z[4], z[7], z[2], z[1], z[3],
&dvdx[6], &dvdy[6], &dvdz[6]);
VoluDer(x[6], x[5], x[4], x[3], x[2], x[0],
y[6], y[5], y[4], y[3], y[2], y[0],
z[6], z[5], z[4], z[3], z[2], z[0],
&dvdx[7], &dvdy[7], &dvdz[7]);
}
/******************************************/
__host__ __device__
static inline
Real_t calcElemVolume( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t x4, const Real_t x5,
const Real_t x6, const Real_t x7,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t y4, const Real_t y5,
const Real_t y6, const Real_t y7,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3,
const Real_t z4, const Real_t z5,
const Real_t z6, const Real_t z7 )
{
Real_t twelveth = Real_t(1.0)/Real_t(12.0);
Real_t dx61 = x6 - x1;
Real_t dy61 = y6 - y1;
Real_t dz61 = z6 - z1;
Real_t dx70 = x7 - x0;
Real_t dy70 = y7 - y0;
Real_t dz70 = z7 - z0;
Real_t dx63 = x6 - x3;
Real_t dy63 = y6 - y3;
Real_t dz63 = z6 - z3;
Real_t dx20 = x2 - x0;
Real_t dy20 = y2 - y0;
Real_t dz20 = z2 - z0;
Real_t dx50 = x5 - x0;
Real_t dy50 = y5 - y0;
Real_t dz50 = z5 - z0;
Real_t dx64 = x6 - x4;
Real_t dy64 = y6 - y4;
Real_t dz64 = z6 - z4;
Real_t dx31 = x3 - x1;
Real_t dy31 = y3 - y1;
Real_t dz31 = z3 - z1;
Real_t dx72 = x7 - x2;
Real_t dy72 = y7 - y2;
Real_t dz72 = z7 - z2;
Real_t dx43 = x4 - x3;
Real_t dy43 = y4 - y3;
Real_t dz43 = z4 - z3;
Real_t dx57 = x5 - x7;
Real_t dy57 = y5 - y7;
Real_t dz57 = z5 - z7;
Real_t dx14 = x1 - x4;
Real_t dy14 = y1 - y4;
Real_t dz14 = z1 - z4;
Real_t dx25 = x2 - x5;
Real_t dy25 = y2 - y5;
Real_t dz25 = z2 - z5;
#define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \
((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2)))
Real_t volume =
TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20,
dy31 + dy72, dy63, dy20,
dz31 + dz72, dz63, dz20) +
TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70,
dy43 + dy57, dy64, dy70,
dz43 + dz57, dz64, dz70) +
TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50,
dy14 + dy25, dy61, dy50,
dz14 + dz25, dz61, dz50);
#undef TRIPLE_PRODUCT
volume *= twelveth;
return volume ;
}
//inline
__host__ __device__
Real_t CalcElemVolume( const Real_t x[8], const Real_t y[8], const Real_t z[8] )
{
return calcElemVolume(x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7],
z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]);
}
__device__
static inline
Real_t AreaFace( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3)
{
Real_t fx = (x2 - x0) - (x3 - x1);
Real_t fy = (y2 - y0) - (y3 - y1);
Real_t fz = (z2 - z0) - (z3 - z1);
Real_t gx = (x2 - x0) + (x3 - x1);
Real_t gy = (y2 - y0) + (y3 - y1);
Real_t gz = (z2 - z0) + (z3 - z1);
Real_t area =
(fx * fx + fy * fy + fz * fz) *
(gx * gx + gy * gy + gz * gz) -
(fx * gx + fy * gy + fz * gz) *
(fx * gx + fy * gy + fz * gz);
return area ;
}
//#pragma omp end declare target
/******************************************/
//#pragma omp declare target
#define max(a,b) ({ __typeof__ (a) _a = (a); __typeof__ (b) _b = (b); _a > _b ? _a : _b; })
__device__
static inline
Real_t CalcElemCharacteristicLength( const Real_t x[8],
const Real_t y[8],
const Real_t z[8],
const Real_t volume)
{
Real_t a, charLength = Real_t(0.0);
a = AreaFace(x[0],x[1],x[2],x[3],
y[0],y[1],y[2],y[3],
z[0],z[1],z[2],z[3]) ;
charLength = max(a,charLength) ;
a = AreaFace(x[4],x[5],x[6],x[7],
y[4],y[5],y[6],y[7],
z[4],z[5],z[6],z[7]) ;
charLength = max(a,charLength) ;
a = AreaFace(x[0],x[1],x[5],x[4],
y[0],y[1],y[5],y[4],
z[0],z[1],z[5],z[4]) ;
charLength = max(a,charLength) ;
a = AreaFace(x[1],x[2],x[6],x[5],
y[1],y[2],y[6],y[5],
z[1],z[2],z[6],z[5]) ;
charLength = max(a,charLength) ;
a = AreaFace(x[2],x[3],x[7],x[6],
y[2],y[3],y[7],y[6],
z[2],z[3],z[7],z[6]) ;
charLength = max(a,charLength) ;
a = AreaFace(x[3],x[0],x[4],x[7],
y[3],y[0],y[4],y[7],
z[3],z[0],z[4],z[7]) ;
charLength = max(a,charLength) ;
charLength = Real_t(4.0) * volume / sqrt(charLength);
return charLength;
}
//#pragma omp end declare target
/******************************************/
__device__
static inline
void CalcElemVelocityGradient( const Real_t* const xvel,
const Real_t* const yvel,
const Real_t* const zvel,
const Real_t b[][8],
const Real_t detJ,
Real_t* const d )
{
const Real_t inv_detJ = Real_t(1.0) / detJ ;
Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz;
const Real_t* const pfx = b[0];
const Real_t* const pfy = b[1];
const Real_t* const pfz = b[2];
d[0] = inv_detJ * ( pfx[0] * (xvel[0]-xvel[6])
+ pfx[1] * (xvel[1]-xvel[7])
+ pfx[2] * (xvel[2]-xvel[4])
+ pfx[3] * (xvel[3]-xvel[5]) );
d[1] = inv_detJ * ( pfy[0] * (yvel[0]-yvel[6])
+ pfy[1] * (yvel[1]-yvel[7])
+ pfy[2] * (yvel[2]-yvel[4])
+ pfy[3] * (yvel[3]-yvel[5]) );
d[2] = inv_detJ * ( pfz[0] * (zvel[0]-zvel[6])
+ pfz[1] * (zvel[1]-zvel[7])
+ pfz[2] * (zvel[2]-zvel[4])
+ pfz[3] * (zvel[3]-zvel[5]) );
dyddx = inv_detJ * ( pfx[0] * (yvel[0]-yvel[6])
+ pfx[1] * (yvel[1]-yvel[7])
+ pfx[2] * (yvel[2]-yvel[4])
+ pfx[3] * (yvel[3]-yvel[5]) );
dxddy = inv_detJ * ( pfy[0] * (xvel[0]-xvel[6])
+ pfy[1] * (xvel[1]-xvel[7])
+ pfy[2] * (xvel[2]-xvel[4])
+ pfy[3] * (xvel[3]-xvel[5]) );
dzddx = inv_detJ * ( pfx[0] * (zvel[0]-zvel[6])
+ pfx[1] * (zvel[1]-zvel[7])
+ pfx[2] * (zvel[2]-zvel[4])
+ pfx[3] * (zvel[3]-zvel[5]) );
dxddz = inv_detJ * ( pfz[0] * (xvel[0]-xvel[6])
+ pfz[1] * (xvel[1]-xvel[7])
+ pfz[2] * (xvel[2]-xvel[4])
+ pfz[3] * (xvel[3]-xvel[5]) );
dzddy = inv_detJ * ( pfy[0] * (zvel[0]-zvel[6])
+ pfy[1] * (zvel[1]-zvel[7])
+ pfy[2] * (zvel[2]-zvel[4])
+ pfy[3] * (zvel[3]-zvel[5]) );
dyddz = inv_detJ * ( pfz[0] * (yvel[0]-yvel[6])
+ pfz[1] * (yvel[1]-yvel[7])
+ pfz[2] * (yvel[2]-yvel[4])
+ pfz[3] * (yvel[3]-yvel[5]) );
d[5] = Real_t( .5) * ( dxddy + dyddx );
d[4] = Real_t( .5) * ( dxddz + dzddx );
d[3] = Real_t( .5) * ( dzddy + dyddz );
}
__global__ void fill_sig(
Real_t *sigxx,
Real_t *sigyy,
Real_t *sigzz,
const Real_t *p,
const Real_t *q,
const Index_t numElem )
{
Index_t i = blockDim.x*blockIdx.x+threadIdx.x;
if (i >= numElem) return;
sigxx[i] = sigyy[i] = sigzz[i] = - p[i] - q[i] ;
}
__global__ void integrateStress (
Real_t *fx_elem,
Real_t *fy_elem,
Real_t *fz_elem,
const Real_t *x,
const Real_t *y,
const Real_t *z,
const Index_t *nodelist,
const Real_t *sigxx,
const Real_t *sigyy,
const Real_t *sigzz,
Real_t *determ,
const Index_t numElem)
{
Index_t k = blockDim.x*blockIdx.x+threadIdx.x;
if (k >= numElem) return;
const Index_t* const elemToNode = nodelist + Index_t(8)*k;
Real_t B[3][8] ;// shape function derivatives
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
determ[k] = Real_t(10.0);
// get nodal coordinates from global arrays and copy into local arrays.
Index_t nd0i = elemToNode[0] ;
Index_t nd1i = elemToNode[1] ;
Index_t nd2i = elemToNode[2] ;
Index_t nd3i = elemToNode[3] ;
Index_t nd4i = elemToNode[4] ;
Index_t nd5i = elemToNode[5] ;
Index_t nd6i = elemToNode[6] ;
Index_t nd7i = elemToNode[7] ;
x_local[0] = x[nd0i];
x_local[1] = x[nd1i];
x_local[2] = x[nd2i];
x_local[3] = x[nd3i];
x_local[4] = x[nd4i];
x_local[5] = x[nd5i];
x_local[6] = x[nd6i];
x_local[7] = x[nd7i];
y_local[0] = y[nd0i];
y_local[1] = y[nd1i];
y_local[2] = y[nd2i];
y_local[3] = y[nd3i];
y_local[4] = y[nd4i];
y_local[5] = y[nd5i];
y_local[6] = y[nd6i];
y_local[7] = y[nd7i];
z_local[0] = z[nd0i];
z_local[1] = z[nd1i];
z_local[2] = z[nd2i];
z_local[3] = z[nd3i];
z_local[4] = z[nd4i];
z_local[5] = z[nd5i];
z_local[6] = z[nd6i];
z_local[7] = z[nd7i];
// Volume calculation involves extra work for numerical consistency
CalcElemShapeFunctionDerivatives(x_local, y_local, z_local, B, &determ[k]);
CalcElemNodeNormals( B[0] , B[1], B[2], x_local, y_local, z_local );
// Eliminate thread writing conflicts at the nodes by giving
// each element its own copy to write to
SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k],
&fx_elem[k*8],
&fy_elem[k*8],
&fz_elem[k*8] ) ;
}
__global__ void acc_final_force (
const Real_t *fx_elem,
const Real_t *fy_elem,
const Real_t *fz_elem,
Real_t *fx,
Real_t *fy,
Real_t *fz,
const Index_t *nodeElemStart,
const Index_t *nodeElemCornerList,
const Index_t numNode)
{
Index_t gnode = blockDim.x*blockIdx.x+threadIdx.x;
if (gnode >= numNode) return;
// element count
const Index_t count = nodeElemStart[gnode+1] - nodeElemStart[gnode];//domain.nodeElemCount(gnode) ;
// list of all corners
const Index_t *cornerList = nodeElemCornerList + nodeElemStart[gnode];//domain.nodeElemCornerList(gnode) ;
Real_t fx_tmp = Real_t(0.0) ;
Real_t fy_tmp = Real_t(0.0) ;
Real_t fz_tmp = Real_t(0.0) ;
for (Index_t i=0 ; i < count ; ++i) {
Index_t elem = cornerList[i] ;
fx_tmp += fx_elem[elem] ;
fy_tmp += fy_elem[elem] ;
fz_tmp += fz_elem[elem] ;
}
fx[gnode] = fx_tmp ;
fy[gnode] = fy_tmp ;
fz[gnode] = fz_tmp ;
}
__global__ void hgc (
Real_t *dvdx,
Real_t *dvdy,
Real_t *dvdz,
Real_t *x8n,
Real_t *y8n,
Real_t *z8n,
Real_t *determ,
const Real_t *x,
const Real_t *y,
const Real_t *z,
const Index_t *nodelist,
const Real_t *volo,
const Real_t *v,
int *vol_error,
const Index_t numElem )
{
Index_t i = blockDim.x*blockIdx.x+threadIdx.x;
if (i >= numElem) return;
Real_t x1[8], y1[8], z1[8] ;
Real_t pfx[8], pfy[8], pfz[8] ;
const Index_t* elemToNode = nodelist + Index_t(8)*i;
// CollectDomainNodesToElemNodes(domain, elemToNode, x1, y1, z1);
// inline the function manually
Index_t nd0i = elemToNode[0] ;
Index_t nd1i = elemToNode[1] ;
Index_t nd2i = elemToNode[2] ;
Index_t nd3i = elemToNode[3] ;
Index_t nd4i = elemToNode[4] ;
Index_t nd5i = elemToNode[5] ;
Index_t nd6i = elemToNode[6] ;
Index_t nd7i = elemToNode[7] ;
x1[0] = x[nd0i];
x1[1] = x[nd1i];
x1[2] = x[nd2i];
x1[3] = x[nd3i];
x1[4] = x[nd4i];
x1[5] = x[nd5i];
x1[6] = x[nd6i];
x1[7] = x[nd7i];
y1[0] = y[nd0i];
y1[1] = y[nd1i];
y1[2] = y[nd2i];
y1[3] = y[nd3i];
y1[4] = y[nd4i];
y1[5] = y[nd5i];
y1[6] = y[nd6i];
y1[7] = y[nd7i];
z1[0] = z[nd0i];
z1[1] = z[nd1i];
z1[2] = z[nd2i];
z1[3] = z[nd3i];
z1[4] = z[nd4i];
z1[5] = z[nd5i];
z1[6] = z[nd6i];
z1[7] = z[nd7i];
CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1);
/* load into temporary storage for FB Hour Glass control */
for(Index_t ii=0;ii<8;++ii){
Index_t jj=8*i+ii;
dvdx[jj] = pfx[ii];
dvdy[jj] = pfy[ii];
dvdz[jj] = pfz[ii];
x8n[jj] = x1[ii];
y8n[jj] = y1[ii];
z8n[jj] = z1[ii];
}
determ[i] = volo[i] * v[i];
/* Do a check for negative volumes */
if ( v[i] <= Real_t(0.0) ) {
vol_error[0] = i;
}
}
__global__ void fb (
const Real_t *dvdx,
const Real_t *dvdy,
const Real_t *dvdz,
const Real_t *x8n,
const Real_t *y8n,
const Real_t *z8n,
const Real_t *determ,
const Real_t *xd,
const Real_t *yd,
const Real_t *zd,
const Real_t *ss,
const Real_t *elemMass,
const Index_t *nodelist,
const Real_t *gamma,
Real_t *fx_elem,
Real_t *fy_elem,
Real_t *fz_elem,
Real_t hgcoef,
const Index_t numElem )
{
Index_t i2 = blockDim.x*blockIdx.x+threadIdx.x;
if (i2 >= numElem) return;
Index_t i3 = 8*i2;
const Index_t* elemToNode = nodelist + i3;
Real_t hgfx[8], hgfy[8], hgfz[8] ;
Real_t coefficient;
Real_t hourgam[8][4];
Real_t xd1[8], yd1[8], zd1[8] ;
Real_t volinv = ONE/determ[i2];
Real_t ss1, mass1, volume13 ;
for(Index_t i1=0;i1<4;++i1) {
Real_t hourmodx =
x8n[i3] * gamma[i1*8+0] + x8n[i3+1] * gamma[i1*8+1] +
x8n[i3+2] * gamma[i1*8+2] + x8n[i3+3] * gamma[i1*8+3] +
x8n[i3+4] * gamma[i1*8+4] + x8n[i3+5] * gamma[i1*8+5] +
x8n[i3+6] * gamma[i1*8+6] + x8n[i3+7] * gamma[i1*8+7];
Real_t hourmody =
y8n[i3] * gamma[i1*8+0] + y8n[i3+1] * gamma[i1*8+1] +
y8n[i3+2] * gamma[i1*8+2] + y8n[i3+3] * gamma[i1*8+3] +
y8n[i3+4] * gamma[i1*8+4] + y8n[i3+5] * gamma[i1*8+5] +
y8n[i3+6] * gamma[i1*8+6] + y8n[i3+7] * gamma[i1*8+7];
Real_t hourmodz =
z8n[i3] * gamma[i1*8+0] + z8n[i3+1] * gamma[i1*8+1] +
z8n[i3+2] * gamma[i1*8+2] + z8n[i3+3] * gamma[i1*8+3] +
z8n[i3+4] * gamma[i1*8+4] + z8n[i3+5] * gamma[i1*8+5] +
z8n[i3+6] * gamma[i1*8+6] + z8n[i3+7] * gamma[i1*8+7];
hourgam[0][i1] = gamma[i1*8+0] - volinv*(dvdx[i3 ] * hourmodx +
dvdy[i3 ] * hourmody +
dvdz[i3 ] * hourmodz );
hourgam[1][i1] = gamma[i1*8+1] - volinv*(dvdx[i3+1] * hourmodx +
dvdy[i3+1] * hourmody +
dvdz[i3+1] * hourmodz );
hourgam[2][i1] = gamma[i1*8+2] - volinv*(dvdx[i3+2] * hourmodx +
dvdy[i3+2] * hourmody +
dvdz[i3+2] * hourmodz );
hourgam[3][i1] = gamma[i1*8+3] - volinv*(dvdx[i3+3] * hourmodx +
dvdy[i3+3] * hourmody +
dvdz[i3+3] * hourmodz );
hourgam[4][i1] = gamma[i1*8+4] - volinv*(dvdx[i3+4] * hourmodx +
dvdy[i3+4] * hourmody +
dvdz[i3+4] * hourmodz );
hourgam[5][i1] = gamma[i1*8+5] - volinv*(dvdx[i3+5] * hourmodx +
dvdy[i3+5] * hourmody +
dvdz[i3+5] * hourmodz );
hourgam[6][i1] = gamma[i1*8+6] - volinv*(dvdx[i3+6] * hourmodx +
dvdy[i3+6] * hourmody +
dvdz[i3+6] * hourmodz );
hourgam[7][i1] = gamma[i1*8+7] - volinv*(dvdx[i3+7] * hourmodx +
dvdy[i3+7] * hourmody +
dvdz[i3+7] * hourmodz );
}
/* compute forces */
/* store forces into h arrays (force arrays) */
ss1 = ss[i2];
mass1 = elemMass[i2];
volume13 = cbrt(determ[i2]);
Index_t n0si2 = elemToNode[0];
Index_t n1si2 = elemToNode[1];
Index_t n2si2 = elemToNode[2];
Index_t n3si2 = elemToNode[3];
Index_t n4si2 = elemToNode[4];
Index_t n5si2 = elemToNode[5];
Index_t n6si2 = elemToNode[6];
Index_t n7si2 = elemToNode[7];
xd1[0] = xd[n0si2];
xd1[1] = xd[n1si2];
xd1[2] = xd[n2si2];
xd1[3] = xd[n3si2];
xd1[4] = xd[n4si2];
xd1[5] = xd[n5si2];
xd1[6] = xd[n6si2];
xd1[7] = xd[n7si2];
yd1[0] = yd[n0si2];
yd1[1] = yd[n1si2];
yd1[2] = yd[n2si2];
yd1[3] = yd[n3si2];
yd1[4] = yd[n4si2];
yd1[5] = yd[n5si2];
yd1[6] = yd[n6si2];
yd1[7] = yd[n7si2];
zd1[0] = zd[n0si2];
zd1[1] = zd[n1si2];
zd1[2] = zd[n2si2];
zd1[3] = zd[n3si2];
zd1[4] = zd[n4si2];
zd1[5] = zd[n5si2];
zd1[6] = zd[n6si2];
zd1[7] = zd[n7si2];
coefficient = hgcoef * Real_t(-0.01) * ss1 * mass1 / volume13;
Real_t hxx[4], hyy[4], hzz[4];
for(Index_t i = 0; i < 4; i++) {
hxx[i] = hourgam[0][i] * xd1[0] + hourgam[1][i] * xd1[1] +
hourgam[2][i] * xd1[2] + hourgam[3][i] * xd1[3] +
hourgam[4][i] * xd1[4] + hourgam[5][i] * xd1[5] +
hourgam[6][i] * xd1[6] + hourgam[7][i] * xd1[7];
}
for(Index_t i = 0; i < 8; i++) {
hgfx[i] = coefficient *
(hourgam[i][0] * hxx[0] + hourgam[i][1] * hxx[1] +
hourgam[i][2] * hxx[2] + hourgam[i][3] * hxx[3]);
}
for(Index_t i = 0; i < 4; i++) {
hyy[i] = hourgam[0][i] * yd1[0] + hourgam[1][i] * yd1[1] +
hourgam[2][i] * yd1[2] + hourgam[3][i] * yd1[3] +
hourgam[4][i] * yd1[4] + hourgam[5][i] * yd1[5] +
hourgam[6][i] * yd1[6] + hourgam[7][i] * yd1[7];
}
for(Index_t i = 0; i < 8; i++) {
hgfy[i] = coefficient *
(hourgam[i][0] * hyy[0] + hourgam[i][1] * hyy[1] +
hourgam[i][2] * hyy[2] + hourgam[i][3] * hyy[3]);
}
for(Index_t i = 0; i < 4; i++) {
hzz[i] = hourgam[0][i] * zd1[0] + hourgam[1][i] * zd1[1] +
hourgam[2][i] * zd1[2] + hourgam[3][i] * zd1[3] +
hourgam[4][i] * zd1[4] + hourgam[5][i] * zd1[5] +
hourgam[6][i] * zd1[6] + hourgam[7][i] * zd1[7];
}
for(Index_t i = 0; i < 8; i++) {
hgfz[i] = coefficient *
(hourgam[i][0] * hzz[0] + hourgam[i][1] * hzz[1] +
hourgam[i][2] * hzz[2] + hourgam[i][3] * hzz[3]);
}
// With the threaded version, we write into local arrays per elem
// so we don't have to worry about race conditions
Real_t *fx_local = fx_elem + i3 ;
fx_local[0] = hgfx[0];
fx_local[1] = hgfx[1];
fx_local[2] = hgfx[2];
fx_local[3] = hgfx[3];
fx_local[4] = hgfx[4];
fx_local[5] = hgfx[5];
fx_local[6] = hgfx[6];
fx_local[7] = hgfx[7];
Real_t *fy_local = fy_elem + i3 ;
fy_local[0] = hgfy[0];
fy_local[1] = hgfy[1];
fy_local[2] = hgfy[2];
fy_local[3] = hgfy[3];
fy_local[4] = hgfy[4];
fy_local[5] = hgfy[5];
fy_local[6] = hgfy[6];
fy_local[7] = hgfy[7];
Real_t *fz_local = fz_elem + i3 ;
fz_local[0] = hgfz[0];
fz_local[1] = hgfz[1];
fz_local[2] = hgfz[2];
fz_local[3] = hgfz[3];
fz_local[4] = hgfz[4];
fz_local[5] = hgfz[5];
fz_local[6] = hgfz[6];
fz_local[7] = hgfz[7];
}
__global__ void collect_final_force (
const Real_t *fx_elem,
const Real_t *fy_elem,
const Real_t *fz_elem,
Real_t *fx,
Real_t *fy,
Real_t *fz,
const Index_t *nodeElemStart,
const Index_t *nodeElemCornerList,
const Index_t numNode )
{
Index_t gnode = blockDim.x*blockIdx.x+threadIdx.x;
if (gnode >= numNode) return;
// element count
const Index_t count = nodeElemStart[gnode+1] - nodeElemStart[gnode];//domain.nodeElemCount(gnode) ;
// list of all corners
const Index_t *cornerList = nodeElemCornerList + nodeElemStart[gnode];//domain.nodeElemCornerList(gnode) ;
Real_t fx_tmp = Real_t(0.0) ;
Real_t fy_tmp = Real_t(0.0) ;
Real_t fz_tmp = Real_t(0.0) ;
for (Index_t i=0 ; i < count ; ++i) {
Index_t elem = cornerList[i] ;
fx_tmp += fx_elem[elem] ;
fy_tmp += fy_elem[elem] ;
fz_tmp += fz_elem[elem] ;
}
fx[gnode] = fx_tmp ;
fy[gnode] = fy_tmp ;
fz[gnode] = fz_tmp ;
}
__global__ void accelerationForNode (
const Real_t *fx,
const Real_t *fy,
const Real_t *fz,
const Real_t *nodalMass,
Real_t *xdd,
Real_t *ydd,
Real_t *zdd,
const Index_t numNode)
{
Index_t i = blockDim.x*blockIdx.x+threadIdx.x;
if (i >= numNode) return;
Real_t one_over_nMass = Real_t(1.) / nodalMass[i];
xdd[i] = fx[i] * one_over_nMass;
ydd[i] = fy[i] * one_over_nMass;
zdd[i] = fz[i] * one_over_nMass;
}
__global__ void applyAccelerationBoundaryConditionsForNodes (
const Index_t *symmX,
const Index_t *symmY,
const Index_t *symmZ,
Real_t *xdd,
Real_t *ydd,
Real_t *zdd,
const Index_t s1,
const Index_t s2,
const Index_t s3,
const Index_t numNodeBC )
{
Index_t i = blockDim.x*blockIdx.x+threadIdx.x;
if (i >= numNodeBC) return;
if (s1 == 0)
xdd[symmX[i]] = Real_t(0.0);
if (s2 == 0) ydd[symmY[i]] = Real_t(0.0);
if (s3 == 0) zdd[symmZ[i]] = Real_t(0.0);
}
__global__ void calcVelocityForNodes (
Real_t *xd,
Real_t *yd,
Real_t *zd,
const Real_t *xdd,
const Real_t *ydd,
const Real_t *zdd,
const Real_t deltaTime,
const Real_t u_cut,
const Index_t numNode )
{
Index_t i = blockDim.x*blockIdx.x+threadIdx.x;
if (i >= numNode) return;
Real_t xdtmp = xd[i] + xdd[i] * deltaTime;
// FABS is not compiled with target regions in mind
// To get around this, compute the absolute value manually:
// if( xdtmp > Real_t(0.0) && xdtmp < u_cut || Real_t(-1.0) * xdtmp < u_cut)
if( fabs(xdtmp) < u_cut ) xdtmp = Real_t(0.0);
xd[i] = xdtmp ;
Real_t ydtmp = yd[i] + ydd[i] * deltaTime;
if( fabs(ydtmp) < u_cut ) ydtmp = Real_t(0.0);
yd[i] = ydtmp ;
Real_t zdtmp = zd[i] + zdd[i] * deltaTime;
if( fabs(zdtmp) < u_cut ) zdtmp = Real_t(0.0);
zd[i] = zdtmp ;
}
__global__ void calcPositionForNodes (
Real_t *x,
Real_t *y,
Real_t *z,
const Real_t *xd,
const Real_t *yd,
const Real_t *zd,
const Real_t deltaTime,
const Index_t numNode)
{
Index_t i = blockDim.x*blockIdx.x+threadIdx.x;
if (i >= numNode) return;
x[i] += xd[i] * deltaTime;
y[i] += yd[i] * deltaTime;
z[i] += zd[i] * deltaTime;
}
__global__ void calcKinematicsForElems (
const Real_t *xd,
const Real_t *yd,
const Real_t *zd,
const Real_t *x,
const Real_t *y,
const Real_t *z,
const Index_t *nodeList,
const Real_t *volo,
const Real_t *v,
Real_t *delv,
Real_t *arealg,
Real_t *dxx,
Real_t *dyy,
Real_t *dzz,
Real_t *vnew,
const Real_t deltaTime,
const Index_t numElem )
{
Index_t k = blockDim.x*blockIdx.x+threadIdx.x;
if (k >= numElem) return;
Real_t B[3][8] ; // shape function derivatives
Real_t D[6] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t detJ = Real_t(0.0) ;
Real_t volume ;
Real_t relativeVolume ;
const Index_t* elemToNode = nodeList + Index_t(8)*k;
// get nodal coordinates from global arrays and copy into local arrays.
Index_t nd0i = elemToNode[0] ;
Index_t nd1i = elemToNode[1] ;
Index_t nd2i = elemToNode[2] ;
Index_t nd3i = elemToNode[3] ;
Index_t nd4i = elemToNode[4] ;
Index_t nd5i = elemToNode[5] ;
Index_t nd6i = elemToNode[6] ;
Index_t nd7i = elemToNode[7] ;
x_local[0] = x[nd0i];
x_local[1] = x[nd1i];
x_local[2] = x[nd2i];
x_local[3] = x[nd3i];
x_local[4] = x[nd4i];
x_local[5] = x[nd5i];
x_local[6] = x[nd6i];
x_local[7] = x[nd7i];
y_local[0] = y[nd0i];
y_local[1] = y[nd1i];
y_local[2] = y[nd2i];
y_local[3] = y[nd3i];
y_local[4] = y[nd4i];
y_local[5] = y[nd5i];
y_local[6] = y[nd6i];
y_local[7] = y[nd7i];
z_local[0] = z[nd0i];
z_local[1] = z[nd1i];
z_local[2] = z[nd2i];
z_local[3] = z[nd3i];
z_local[4] = z[nd4i];
z_local[5] = z[nd5i];
z_local[6] = z[nd6i];
z_local[7] = z[nd7i];
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / volo[k] ;
vnew[k] = relativeVolume ;
delv[k] = relativeVolume - v[k] ;
// set characteristic length
arealg[k] = CalcElemCharacteristicLength(x_local, y_local, z_local,
volume);
// get nodal velocities from global array and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = elemToNode[lnode];
xd_local[lnode] = xd[gnode];
yd_local[lnode] = yd[gnode];
zd_local[lnode] = zd[gnode];
}
Real_t dt2 = Real_t(0.5) * deltaTime;
for ( Index_t j=0 ; j<8 ; ++j )
{
x_local[j] -= dt2 * xd_local[j];
y_local[j] -= dt2 * yd_local[j];
z_local[j] -= dt2 * zd_local[j];
}
CalcElemShapeFunctionDerivatives( x_local, y_local, z_local,
B, &detJ );
CalcElemVelocityGradient( xd_local, yd_local, zd_local,
B, detJ, D );
// put velocity gradient quantities into their global arrays.
dxx[k] = D[0];
dyy[k] = D[1];
dzz[k] = D[2];
}
__global__ void calcStrainRates(
Real_t *dxx,
Real_t *dyy,
Real_t *dzz,
const Real_t *vnew,
Real_t *vdov,
int *vol_error,
const Index_t numElem )
{
Index_t k = blockDim.x*blockIdx.x+threadIdx.x;
if (k >= numElem) return;
// calc strain rate and apply as constraint (only done in FB element)
Real_t vvdov = dxx[k] + dyy[k] + dzz[k] ;
Real_t vdovthird = vvdov/Real_t(3.0) ;
// make the rate of deformation tensor deviatoric
vdov[k] = vvdov;
dxx[k] -= vdovthird ; //LG: why to update dxx? it is deallocated right after
dyy[k] -= vdovthird ;
dzz[k] -= vdovthird ;
// See if any volumes are negative, and take appropriate action.
if (vnew[k] <= Real_t(0.0))
{
vol_error[0] = k;
}
}
__global__ void calcMonotonicQGradientsForElems (
const Real_t *xd,
const Real_t *yd,
const Real_t *zd,
const Real_t *x,
const Real_t *y,
const Real_t *z,
const Index_t *nodelist,
const Real_t *volo,
Real_t *delv_eta,
Real_t *delx_eta,
Real_t *delv_zeta,
Real_t *delx_zeta,
Real_t *delv_xi,
Real_t *delx_xi,
const Real_t *vnew,
const Index_t numElem )
{
Index_t i = blockDim.x*blockIdx.x+threadIdx.x;
if (i >= numElem) return;
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
const Index_t *elemToNode = nodelist + Index_t(8) * i;
Index_t n0 = elemToNode[0] ;
Index_t n1 = elemToNode[1] ;
Index_t n2 = elemToNode[2] ;
Index_t n3 = elemToNode[3] ;
Index_t n4 = elemToNode[4] ;
Index_t n5 = elemToNode[5] ;
Index_t n6 = elemToNode[6] ;
Index_t n7 = elemToNode[7] ;
Real_t x0 = x[n0] ;
Real_t x1 = x[n1] ;
Real_t x2 = x[n2] ;
Real_t x3 = x[n3] ;
Real_t x4 = x[n4] ;
Real_t x5 = x[n5] ;
Real_t x6 = x[n6] ;
Real_t x7 = x[n7] ;
Real_t y0 = y[n0] ;
Real_t y1 = y[n1] ;
Real_t y2 = y[n2] ;
Real_t y3 = y[n3] ;
Real_t y4 = y[n4] ;
Real_t y5 = y[n5] ;
Real_t y6 = y[n6] ;
Real_t y7 = y[n7] ;
Real_t z0 = z[n0] ;
Real_t z1 = z[n1] ;
Real_t z2 = z[n2] ;
Real_t z3 = z[n3] ;
Real_t z4 = z[n4] ;
Real_t z5 = z[n5] ;
Real_t z6 = z[n6] ;
Real_t z7 = z[n7] ;
Real_t xv0 = xd[n0] ;
Real_t xv1 = xd[n1] ;
Real_t xv2 = xd[n2] ;
Real_t xv3 = xd[n3] ;
Real_t xv4 = xd[n4] ;
Real_t xv5 = xd[n5] ;
Real_t xv6 = xd[n6] ;
Real_t xv7 = xd[n7] ;
Real_t yv0 = yd[n0] ;
Real_t yv1 = yd[n1] ;
Real_t yv2 = yd[n2] ;
Real_t yv3 = yd[n3] ;
Real_t yv4 = yd[n4] ;
Real_t yv5 = yd[n5] ;
Real_t yv6 = yd[n6] ;
Real_t yv7 = yd[n7] ;
Real_t zv0 = zd[n0] ;
Real_t zv1 = zd[n1] ;
Real_t zv2 = zd[n2] ;
Real_t zv3 = zd[n3] ;
Real_t zv4 = zd[n4] ;
Real_t zv5 = zd[n5] ;
Real_t zv6 = zd[n6] ;
Real_t zv7 = zd[n7] ;
Real_t vol = volo[i] * vnew[i] ;
Real_t norm = Real_t(1.0) / ( vol + PTINY ) ;
Real_t dxj = Real_t(-0.25)*((x0+x1+x5+x4) - (x3+x2+x6+x7)) ;
Real_t dyj = Real_t(-0.25)*((y0+y1+y5+y4) - (y3+y2+y6+y7)) ;
Real_t dzj = Real_t(-0.25)*((z0+z1+z5+z4) - (z3+z2+z6+z7)) ;
Real_t dxi = Real_t( 0.25)*((x1+x2+x6+x5) - (x0+x3+x7+x4)) ;
Real_t dyi = Real_t( 0.25)*((y1+y2+y6+y5) - (y0+y3+y7+y4)) ;
Real_t dzi = Real_t( 0.25)*((z1+z2+z6+z5) - (z0+z3+z7+z4)) ;
Real_t dxk = Real_t( 0.25)*((x4+x5+x6+x7) - (x0+x1+x2+x3)) ;
Real_t dyk = Real_t( 0.25)*((y4+y5+y6+y7) - (y0+y1+y2+y3)) ;
Real_t dzk = Real_t( 0.25)*((z4+z5+z6+z7) - (z0+z1+z2+z3)) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
delx_zeta[i] = vol / sqrt(ax*ax + ay*ay + az*az + PTINY) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*((xv4+xv5+xv6+xv7) - (xv0+xv1+xv2+xv3)) ;
dyv = Real_t(0.25)*((yv4+yv5+yv6+yv7) - (yv0+yv1+yv2+yv3)) ;
dzv = Real_t(0.25)*((zv4+zv5+zv6+zv7) - (zv0+zv1+zv2+zv3)) ;
delv_zeta[i] = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
delx_xi[i] = vol / sqrt(ax*ax + ay*ay + az*az + PTINY) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*((xv1+xv2+xv6+xv5) - (xv0+xv3+xv7+xv4)) ;
dyv = Real_t(0.25)*((yv1+yv2+yv6+yv5) - (yv0+yv3+yv7+yv4)) ;
dzv = Real_t(0.25)*((zv1+zv2+zv6+zv5) - (zv0+zv3+zv7+zv4)) ;
delv_xi[i] = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
delx_eta[i] = vol / sqrt(ax*ax + ay*ay + az*az + PTINY) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(-0.25)*((xv0+xv1+xv5+xv4) - (xv3+xv2+xv6+xv7)) ;
dyv = Real_t(-0.25)*((yv0+yv1+yv5+yv4) - (yv3+yv2+yv6+yv7)) ;
dzv = Real_t(-0.25)*((zv0+zv1+zv5+zv4) - (zv3+zv2+zv6+zv7)) ;
delv_eta[i] = ax*dxv + ay*dyv + az*dzv ;
}
__global__ void calcMonotonicQForElems (
const Index_t *elemBC,
const Real_t *elemMass,
Real_t *ql,
Real_t *qq,
const Real_t *vdov,
const Real_t *volo,
const Real_t *delv_eta,
const Real_t *delx_eta,
const Real_t *delv_zeta,
const Real_t *delx_zeta,
const Real_t *delv_xi,
const Real_t *delx_xi,
const Index_t *lxim,
const Index_t *lxip,
const Index_t *lzetam,
const Index_t *lzetap,
const Index_t *letap,
const Index_t *letam,
const Real_t *vnew,
const Real_t monoq_limiter_mult,
const Real_t monoq_max_slope,
const Real_t qlc_monoq,
const Real_t qqc_monoq,
const Index_t numElem )
{
Index_t i = blockDim.x*blockIdx.x+threadIdx.x;
if (i >= numElem) return;
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
Int_t bcMask = elemBC[i] ;
Real_t delvm = 0.0, delvp =0.0;
/* phixi */
Real_t norm = Real_t(1.) / (delv_xi[i]+ PTINY ) ;
switch (bcMask & XI_M) {
case XI_M_COMM: /* needs comm data */
case 0: delvm = delv_xi[lxim[i]]; break ;
case XI_M_SYMM: delvm = delv_xi[i] ; break ;
case XI_M_FREE: delvm = Real_t(0.0) ; break ;
default: //fprintf(stderr, "Error in switch at %s line %d\n", __FILE__, __LINE__);
delvm = 0; /* ERROR - but quiets the compiler */
break;
}
switch (bcMask & XI_P) {
case XI_P_COMM: /* needs comm data */
case 0: delvp = delv_xi[lxip[i]] ; break ;
case XI_P_SYMM: delvp = delv_xi[i] ; break ;
case XI_P_FREE: delvp = Real_t(0.0) ; break ;
default: //fprintf(stderr, "Error in switch at %s line %d\n", __FILE__, __LINE__);
delvp = 0; /* ERROR - but quiets the compiler */
break;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
norm = Real_t(1.) / ( delv_eta[i] + PTINY ) ;
switch (bcMask & ETA_M) {
case ETA_M_COMM: /* needs comm data */
case 0: delvm = delv_eta[letam[i]] ; break ;
case ETA_M_SYMM: delvm = delv_eta[i] ; break ;
case ETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: //fprintf(stderr, "Error in switch at %s line %d\n", __FILE__, __LINE__);
delvm = 0; /* ERROR - but quiets the compiler */
break;
}
switch (bcMask & ETA_P) {
case ETA_P_COMM: /* needs comm data */
case 0: delvp = delv_eta[letap[i]] ; break ;
case ETA_P_SYMM: delvp = delv_eta[i] ; break ;
case ETA_P_FREE: delvp = Real_t(0.0) ; break ;
default:
delvp = 0; /* ERROR - but quiets the compiler */
break;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
norm = Real_t(1.) / ( delv_zeta[i] + PTINY ) ;
switch (bcMask & ZETA_M) {
case ZETA_M_COMM: /* needs comm data */
case 0: delvm = delv_zeta[lzetam[i]] ; break ;
case ZETA_M_SYMM: delvm = delv_zeta[i] ; break ;
case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;
default:
delvm = 0; /* ERROR - but quiets the compiler */
break;
}
switch (bcMask & ZETA_P) {
case ZETA_P_COMM: /* needs comm data */
case 0: delvp = delv_zeta[lzetap[i]] ; break ;
case ZETA_P_SYMM: delvp = delv_zeta[i] ; break ;
case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;
default:
delvp = 0; /* ERROR - but quiets the compiler */
break;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
if ( vdov[i] > Real_t(0.) ) {
qlin = Real_t(0.) ;
qquad = Real_t(0.) ;
}
else {
Real_t delvxxi = delv_xi[i] * delx_xi[i] ;
Real_t delvxeta = delv_eta[i] * delx_eta[i] ;
Real_t delvxzeta = delv_zeta[i] * delx_zeta[i] ;
if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;
if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;
if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;
Real_t rho = elemMass[i] / (volo[i] * vnew[i]) ;
qlin = -qlc_monoq * rho *
( delvxxi * (Real_t(1.) - phixi) +
delvxeta * (Real_t(1.) - phieta) +
delvxzeta * (Real_t(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +
delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +
delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;
}
qq[i] = qquad ;
ql[i] = qlin ;
}
__global__ void applyMaterialPropertiesForElems(
const Real_t *ql,
const Real_t *qq,
const Real_t *delv,
const Index_t *elemRep,
const Index_t *elemElem,
Real_t *q,
Real_t *p,
Real_t *e,
Real_t *ss,
Real_t *v,
Real_t *vnewc,
const Real_t e_cut,
const Real_t p_cut,
const Real_t ss4o3,
const Real_t q_cut,
const Real_t v_cut,
const Real_t eosvmax,
const Real_t eosvmin,
const Real_t pmin,
const Real_t emin,
const Real_t rho0,
const Index_t numElem )
{
Index_t elem = blockDim.x*blockIdx.x+threadIdx.x;
if (elem >= numElem) return;
Index_t rep = elemRep[elem];
Real_t e_old, delvc, p_old, q_old, qq_old, ql_old;
Real_t p_new, q_new, e_new;
Real_t work, compression, compHalfStep, bvc, pbvc, pHalfStep;
Real_t vchalf ;
Real_t vhalf ;
Real_t ssc ;
Real_t q_tilde ;
Real_t ssTmp ;
if (eosvmin != ZERO) {
if (vnewc[elem] < eosvmin)
vnewc[elem] = eosvmin ;
}
if (eosvmax != ZERO) {
if (vnewc[elem] > eosvmax)
vnewc[elem] = eosvmax ;
}
// This check may not make perfect sense in LULESH, but
// it's representative of something in the full code -
// just leave it in, please
Real_t vc = v[elem] ;
if (eosvmin != ZERO) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != ZERO) {
if (vc > eosvmax)
vc = eosvmax ;
}
Real_t vnewc_t = vnewc[elem];
Real_t e_temp = e[elem];
Real_t delv_temp = delv[elem];
Real_t p_temp = p[elem];
Real_t q_temp = q[elem];
Real_t qq_temp = qq[elem];
Real_t ql_temp = ql[elem];
for(Index_t j = 0; j < rep; j++) {
e_old = e_temp ;
delvc = delv_temp ;
p_old = p_temp ;
q_old = q_temp ;
qq_old = qq_temp ;
ql_old = ql_temp ;
compression = ONE / vnewc_t - ONE;
vchalf = vnewc_t - delvc * HALF;
compHalfStep = ONE / vchalf - ONE;
if (vnewc_t <= eosvmin) { /* impossible due to calling func? */
compHalfStep = compression ;
}
if (vnewc_t >= eosvmax) { /* impossible due to calling func? */
p_old = ZERO ;
compression = ZERO ;
compHalfStep = ZERO ;
}
work = ZERO ;
e_new = e_old - HALF * delvc * (p_old + q_old)
+ HALF * work;
if (e_new < emin ) {
e_new = emin ;
}
bvc = C1S * (compHalfStep + ONE);
pbvc = C1S;
pHalfStep = bvc * e_new ;
if (fabs(pHalfStep) < p_cut )
pHalfStep = ZERO ;
if ( vnewc_t >= eosvmax ) /* impossible condition here? */
pHalfStep = ZERO ;
if (pHalfStep < pmin)
pHalfStep = pmin ;
vhalf = ONE / (ONE + compHalfStep) ;
if ( delvc > ZERO ) {
q_new /* = qq_old[elem] = ql_old[elem] */ = ZERO ;
} else {
ssc = ( pbvc * e_new + vhalf * vhalf * bvc * pHalfStep ) / rho0 ;
if ( ssc <= C1 ) {
ssc = C2 ;
} else {
ssc = sqrt(ssc) ;
}
q_new = (ssc*ql_old + qq_old) ;
}
e_new = e_new + HALF * delvc
* (THREE*(p_old + q_old)
- FOUR*(pHalfStep + q_new)) ;
e_new += HALF * work;
if (fabs(e_new) < e_cut) {
e_new = ZERO ;
}
if ( e_new < emin ) {
e_new = emin ;
}
bvc = C1S * (compression + ONE);
pbvc = C1S;
p_new = bvc * e_new ;
if (fabs(p_new) < p_cut )
p_new = ZERO ;
if ( vnewc_t >= eosvmax ) /* impossible condition here? */
p_new = ZERO ;
if (p_new < pmin)
p_new = pmin ;
if (delvc > ZERO) {
q_tilde = ZERO ;
}
else {
Real_t ssc = ( pbvc * e_new + vnewc_t * vnewc_t * bvc * p_new ) / rho0 ;
if ( ssc <= C1 ) {
ssc = C2 ;
} else {
ssc = sqrt(ssc) ;
}
q_tilde = (ssc * ql_old + qq_old) ;
}
e_new = e_new - ( SEVEN*(p_old + q_old)
- EIGHT*(pHalfStep + q_new)
+ (p_new + q_tilde)) * delvc*SIXTH ;
if (fabs(e_new) < e_cut) {
e_new = ZERO ;
}
if (e_new < emin) {
e_new = emin ;
}
bvc = C1S * (compression + ONE);
pbvc = C1S;
p_new = bvc * e_new ;
if ( fabs(p_new) < p_cut )
p_new = ZERO ;
if ( vnewc_t >= eosvmax ) /* impossible condition here? */
p_new = ZERO ;
if (p_new < pmin)
p_new = pmin ;
if ( delvc <= ZERO ) {
ssc = ( pbvc * e_new + vnewc_t * vnewc_t * bvc * p_new ) / rho0 ;
if ( ssc <= C1 ) {
ssc = C2 ;
} else {
ssc = sqrt(ssc) ;
}
q_new = (ssc*ql_old + qq_old) ;
if (fabs(q_new) < q_cut) q_new = ZERO ;
}
} //this is the end of the rep loop
p[elem] = p_new ;
e[elem] = e_new ;
q[elem] = q_new ;
ssTmp = (pbvc * e_new + vnewc_t * vnewc_t * bvc * p_new) / rho0;
if (ssTmp <= C1) {
ssTmp = C2;
} else {
ssTmp = sqrt(ssTmp);
}
ss[elem] = ssTmp ;
if ( fabs(vnewc_t - ONE) < v_cut )
vnewc_t = ONE ;
v[elem] = vnewc_t ;
}
/*********************************/
/* Data structure implementation */
/*********************************/
/* might want to add access methods so that memory can be */
/* better managed, as in luleshFT */
template <typename T>
T *Allocate(size_t size)
{
return static_cast<T *>(malloc(sizeof(T)*size)) ;
}
template <typename T>
void Release(T **ptr)
{
if (*ptr != NULL) {
free(*ptr) ;
*ptr = NULL ;
}
}
/******************************************/
/* Work Routines */
static inline
void TimeIncrement(Domain& domain)
{
Real_t targetdt = domain.stoptime() - domain.time() ;
if ((domain.dtfixed() <= Real_t(0.0)) && (domain.cycle() != Int_t(0))) {
Real_t ratio ;
Real_t olddt = domain.deltatime() ;
/* This will require a reduction in parallel */
Real_t gnewdt = Real_t(1.0e+20) ;
Real_t newdt ;
if (domain.dtcourant() < gnewdt) {
gnewdt = domain.dtcourant() / Real_t(2.0) ;
}
if (domain.dthydro() < gnewdt) {
gnewdt = domain.dthydro() * Real_t(2.0) / Real_t(3.0) ;
}
newdt = gnewdt;
ratio = newdt / olddt ;
if (ratio >= Real_t(1.0)) {
if (ratio < domain.deltatimemultlb()) {
newdt = olddt ;
}
else if (ratio > domain.deltatimemultub()) {
newdt = olddt*domain.deltatimemultub() ;
}
}
if (newdt > domain.dtmax()) {
newdt = domain.dtmax() ;
}
domain.deltatime() = newdt ;
}
/* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */
if ((targetdt > domain.deltatime()) &&
(targetdt < (Real_t(4.0) * domain.deltatime() / Real_t(3.0))) ) {
targetdt = Real_t(2.0) * domain.deltatime() / Real_t(3.0) ;
}
if (targetdt < domain.deltatime()) {
domain.deltatime() = targetdt ;
}
domain.time() += domain.deltatime() ;
++domain.cycle() ;
}
/******************************************/
/******************************************/
static inline
void CalcCourantConstraintForElems(Domain &domain, Index_t length,
Index_t *regElemlist,
Real_t qqc, Real_t& dtcourant)
{
Index_t threads = NT;
static Index_t *courant_elem_per_thread;
static Real_t *dtcourant_per_thread;
static bool first = true;
if (first) {
courant_elem_per_thread = new Index_t[threads];
dtcourant_per_thread = new Real_t[threads];
first = false;
}
//#pragma omp parallel firstprivate(length, qqc)
{
Real_t qqc2 = Real_t(64.0) * qqc * qqc ;
Real_t dtcourant_tmp = dtcourant;
Index_t courant_elem = -1 ;
for (Index_t thread_num = 0; thread_num < NT; thread_num++) {
//#pragma omp for
for (Index_t i = 0 ; i < length ; ++i) {
Index_t indx = regElemlist[i] ;
Real_t dtf = domain.ss(indx) * domain.ss(indx) ;
if ( domain.vdov(indx) < Real_t(0.) ) {
dtf = dtf
+ qqc2 * domain.arealg(indx) * domain.arealg(indx)
* domain.vdov(indx) * domain.vdov(indx) ;
}
dtf = SQRT(dtf) ;
dtf = domain.arealg(indx) / dtf ;
if (domain.vdov(indx) != Real_t(0.)) {
if ( dtf < dtcourant_tmp ) {
dtcourant_tmp = dtf ;
courant_elem = indx ;
}
}
}
dtcourant_per_thread[thread_num] = dtcourant_tmp ;
courant_elem_per_thread[thread_num] = courant_elem ;
}
}
for (Index_t i = 1; i < threads; ++i) {
if (dtcourant_per_thread[i] < dtcourant_per_thread[0] ) {
dtcourant_per_thread[0] = dtcourant_per_thread[i];
courant_elem_per_thread[0] = courant_elem_per_thread[i];
}
}
if (courant_elem_per_thread[0] != -1) {
dtcourant = dtcourant_per_thread[0] ;
}
return ;
}
/******************************************/
static inline
void CalcHydroConstraintForElems(Domain &domain, Index_t length,
Index_t *regElemlist, Real_t dvovmax, Real_t& dthydro)
{
Index_t threads = NT;
static Index_t *hydro_elem_per_thread;
static Real_t *dthydro_per_thread;
static bool first = true;
if (first) {
hydro_elem_per_thread = new Index_t[threads];
dthydro_per_thread = new Real_t[threads];
first = false;
}
//#pragma omp parallel firstprivate(length, dvovmax)
{
Real_t dthydro_tmp = dthydro ;
Index_t hydro_elem = -1 ;
for (Index_t thread_num = 0; thread_num < NT; thread_num++) {
//#pragma omp for
for (Index_t i = 0 ; i < length ; ++i) {
Index_t indx = regElemlist[i] ;
if (domain.vdov(indx) != Real_t(0.)) {
Real_t dtdvov = dvovmax / (FABS(domain.vdov(indx))+Real_t(1.e-20)) ;
if ( dthydro_tmp > dtdvov ) {
dthydro_tmp = dtdvov ;
hydro_elem = indx ;
}
}
}
dthydro_per_thread[thread_num] = dthydro_tmp ;
hydro_elem_per_thread[thread_num] = hydro_elem ;
}
}
for (Index_t i = 1; i < threads; ++i) {
if(dthydro_per_thread[i] < dthydro_per_thread[0]) {
dthydro_per_thread[0] = dthydro_per_thread[i];
hydro_elem_per_thread[0] = hydro_elem_per_thread[i];
}
}
if (hydro_elem_per_thread[0] != -1) {
dthydro = dthydro_per_thread[0] ;
}
return ;
}
/******************************************/
static inline
void CalcTimeConstraintsForElems(Domain& domain) {
// Initialize conditions to a very large value
domain.dtcourant() = 1.0e+20;
domain.dthydro() = 1.0e+20;
for (Index_t r=0 ; r < domain.numReg() ; ++r) {
/* evaluate time constraint */
CalcCourantConstraintForElems(domain, domain.regElemSize(r),
domain.regElemlist(r),
domain.qqc(),
domain.dtcourant()) ;
/* check hydro constraint */
CalcHydroConstraintForElems(domain, domain.regElemSize(r),
domain.regElemlist(r),
domain.dvovmax(),
domain.dthydro()) ;
}
}
/******************************************/
/******************************************/
int main(int argc, char *argv[])
{
Domain *locDom ;
Int_t numRanks ;
Int_t myRank ;
struct cmdLineOpts opts;
numRanks = 1;
myRank = 0;
/* Set defaults that can be overridden by command line opts */
opts.its = 9999999;
opts.nx = 30;
opts.numReg = 11;
opts.numFiles = (int)(numRanks+10)/9;
opts.showProg = 0;
opts.quiet = 0;
opts.viz = 0;
opts.balance = 1;
opts.cost = 1;
opts.iteration_cap = 0;
ParseCommandLineOptions(argc, argv, myRank, &opts);
if ((myRank == 0) && (opts.quiet == 0)) {
printf("Running problem size %d^3 per domain until completion\n", opts.nx);
printf("Num processors: %d\n", numRanks);
printf("Num threads (hardcoded): %d\n", NT);
printf("Total number of elements: %lld\n\n", (long long int)(numRanks*opts.nx*opts.nx*opts.nx));
printf("To run other sizes, use -s <integer>.\n");
printf("To run a fixed number of iterations, use -i <integer>.\n");
printf("To run a more or less balanced region set, use -b <integer>.\n");
printf("To change the relative costs of regions, use -c <integer>.\n");
printf("To print out progress, use -p\n");
printf("To write an output file for VisIt, use -v\n");
printf("To only execute the first iteration, use -z (used when profiling: nvprof --metrics all)\n");
printf("See help (-h) for more options\n\n");
}
// Set up the mesh and decompose. Assumes regular cubes for now
Int_t col, row, plane, side;
InitMeshDecomp(numRanks, myRank, &col, &row, &plane, &side);
// Build the main data structure and initialize it
locDom = new Domain(numRanks, col, row, plane, opts.nx,
side, opts.numReg, opts.balance, opts.cost) ;
// BEGIN timestep to solution */
timeval start;
gettimeofday(&start, NULL) ;
// Compute elem to reglist correspondence
Index_t k = 0;
for (Int_t r=0 ; r<locDom->numReg() ; r++) {
Index_t numElemReg = locDom->regElemSize(r);
Index_t *regElemList = locDom->regElemlist(r);
Index_t rep;
//Determine load imbalance for this region
//round down the number with lowest cost
if(r < locDom->numReg()/2)
rep = 1;
//you don't get an expensive region unless you at least have 5 regions
else if(r < (locDom->numReg() - (locDom->numReg()+15)/20))
rep = 1 + locDom->cost();
//very expensive regions
else
rep = 10 * (1+ locDom->cost());
// std::cout << "Elems: " << numElemReg << " Reps: " << rep << "\n";
for (Index_t e=0 ; e<numElemReg ; e++){
locDom->m_elemRep[regElemList[e]] = rep;
locDom->m_elemElem[k] = regElemList[e];
k++;
}
}
//export persistent data to GPU
Index_t numNode = locDom->numNode();
Index_t numElem = locDom->numElem() ;
Index_t numElem8 = numElem * 8;
printf("numNode=%d numElem=%d\n", numNode, numElem);
#ifdef VERIFY
assert(numElem > 0);
#endif
Real_t *x = &locDom->m_x[0];
Real_t *y = &locDom->m_y[0];
Real_t *z = &locDom->m_z[0];
Real_t *fx = &locDom->m_fx[0];
Real_t *fy = &locDom->m_fy[0];
Real_t *fz = &locDom->m_fz[0];
Real_t *xd = &locDom->m_xd[0];
Real_t *yd = &locDom->m_yd[0];
Real_t *zd = &locDom->m_zd[0];
Real_t *xdd = &locDom->m_xdd[0];
Real_t *ydd = &locDom->m_ydd[0];
Real_t *zdd = &locDom->m_zdd[0];
Index_t *nodelist = &locDom->m_nodelist[0];
#ifdef VERIFY
for (int i = 0; i < numNode; i++) {
xd[i] = ((float)rand()/(float)(RAND_MAX));
yd[i] = ((float)rand()/(float)(RAND_MAX));
zd[i] = ((float)rand()/(float)(RAND_MAX));
}
#endif
Real_t *d_x;
hipMalloc((void**)&d_x, sizeof(Real_t)*numNode);
hipMemcpy(d_x, x, sizeof(Real_t)*numNode, hipMemcpyHostToDevice);
Real_t *d_y;
hipMalloc((void**)&d_y, sizeof(Real_t)*numNode);
hipMemcpy(d_y, y, sizeof(Real_t)*numNode, hipMemcpyHostToDevice);
Real_t *d_z;
hipMalloc((void**)&d_z, sizeof(Real_t)*numNode);
hipMemcpy(d_z, z, sizeof(Real_t)*numNode, hipMemcpyHostToDevice);
Real_t *d_fx;
hipMalloc((void**)&d_fx, sizeof(Real_t)*numNode);
hipMemcpy(d_fx, fx, sizeof(Real_t)*numNode, hipMemcpyHostToDevice);
Real_t *d_fy;
hipMalloc((void**)&d_fy, sizeof(Real_t)*numNode);
hipMemcpy(d_fy, fy, sizeof(Real_t)*numNode, hipMemcpyHostToDevice);
Real_t *d_fz;
hipMalloc((void**)&d_fz, sizeof(Real_t)*numNode);
hipMemcpy(d_fz, fz, sizeof(Real_t)*numNode, hipMemcpyHostToDevice);
Real_t *d_xd;
hipMalloc((void**)&d_xd, sizeof(Real_t)*numNode);
hipMemcpy(d_xd, xd, sizeof(Real_t)*numNode, hipMemcpyHostToDevice);
Real_t *d_yd;
hipMalloc((void**)&d_yd, sizeof(Real_t)*numNode);
hipMemcpy(d_yd, yd, sizeof(Real_t)*numNode, hipMemcpyHostToDevice);
Real_t *d_zd;
hipMalloc((void**)&d_zd, sizeof(Real_t)*numNode);
hipMemcpy(d_zd, zd, sizeof(Real_t)*numNode, hipMemcpyHostToDevice);
Real_t *d_xdd;
hipMalloc((void**)&d_xdd, sizeof(Real_t)*numNode);
hipMemcpy(d_xdd, xdd, sizeof(Real_t)*numNode, hipMemcpyHostToDevice);
Real_t *d_ydd;
hipMalloc((void**)&d_ydd, sizeof(Real_t)*numNode);
hipMemcpy(d_ydd, ydd, sizeof(Real_t)*numNode, hipMemcpyHostToDevice);
Real_t *d_zdd;
hipMalloc((void**)&d_zdd, sizeof(Real_t)*numNode);
hipMemcpy(d_zdd, zdd, sizeof(Real_t)*numNode, hipMemcpyHostToDevice);
Index_t *d_nodelist;
hipMalloc((void**)&d_nodelist, sizeof(Index_t)*numElem8);
hipMemcpy(d_nodelist, nodelist, sizeof(Index_t)*numElem8, hipMemcpyHostToDevice);
// factor out the buffers from the loop
Real_t *d_determ;
hipMalloc((void**)&d_determ, sizeof(Real_t)*numElem);
Real_t *d_fx_elem;
hipMalloc((void**)&d_fx_elem, sizeof(Real_t)*numElem8);
Real_t *d_fy_elem;
hipMalloc((void**)&d_fy_elem, sizeof(Real_t)*numElem8);
Real_t *d_fz_elem;
hipMalloc((void**)&d_fz_elem, sizeof(Real_t)*numElem8);
Real_t *d_dvdx;
hipMalloc((void**)&d_dvdx, sizeof(Real_t)*numElem8);
Real_t *d_dvdy;
hipMalloc((void**)&d_dvdy, sizeof(Real_t)*numElem8);
Real_t *d_dvdz;
hipMalloc((void**)&d_dvdz, sizeof(Real_t)*numElem8);
Real_t *d_x8n;
hipMalloc((void**)&d_x8n, sizeof(Real_t)*numElem8);
Real_t *d_y8n;
hipMalloc((void**)&d_y8n, sizeof(Real_t)*numElem8);
Real_t *d_z8n;
hipMalloc((void**)&d_z8n, sizeof(Real_t)*numElem8);
Real_t *d_sigxx;
hipMalloc((void**)&d_sigxx, sizeof(Real_t)*numElem);
Real_t *d_sigyy;
hipMalloc((void**)&d_sigyy, sizeof(Real_t)*numElem);
Real_t *d_sigzz;
hipMalloc((void**)&d_sigzz, sizeof(Real_t)*numElem);
Real_t *d_delv_xi;
hipMalloc((void**)&d_delv_xi, sizeof(Real_t)*numElem);
Real_t *d_delx_xi;
hipMalloc((void**)&d_delx_xi, sizeof(Real_t)*numElem);
Real_t *d_delv_eta;
hipMalloc((void**)&d_delv_eta, sizeof(Real_t)*numElem);
Real_t *d_delx_eta;
hipMalloc((void**)&d_delx_eta, sizeof(Real_t)*numElem);
Real_t *d_delv_zeta;
hipMalloc((void**)&d_delv_zeta, sizeof(Real_t)*numElem);
Real_t *d_delx_zeta;
hipMalloc((void**)&d_delx_zeta, sizeof(Real_t)*numElem);
Real_t *d_p;
hipMalloc((void**)&d_p, sizeof(Real_t)*numElem);
Real_t *d_q;
hipMalloc((void**)&d_q, sizeof(Real_t)*numElem);
Real_t *d_volo;
hipMalloc((void**)&d_volo, sizeof(Real_t)*numElem);
Real_t *d_v;
hipMalloc((void**)&d_v, sizeof(Real_t)*numElem);
int *d_vol_error;
hipMalloc((void**)&d_vol_error, sizeof(int));
Index_t *nodeElemStart = &locDom->m_nodeElemStart[0];
Index_t len1 = numNode + 1;
Index_t *nodeElemCornerList = &locDom->m_nodeElemCornerList[0];
Index_t len2 = nodeElemStart[numNode];
Index_t* d_nodeElemStart;
hipMalloc((void**)&d_nodeElemStart, sizeof(Index_t)*len1);
hipMemcpy(d_nodeElemStart, nodeElemStart, sizeof(Index_t)*len1, hipMemcpyHostToDevice);
Index_t* d_nodeElemCornerList;
hipMalloc((void**)&d_nodeElemCornerList, sizeof(Index_t)*len2);
hipMemcpy(d_nodeElemCornerList, nodeElemCornerList, sizeof(Index_t)*len2, hipMemcpyHostToDevice);
Real_t gamma[32] __attribute__((__aligned__(64)));
gamma[0] = Real_t( 1.);
gamma[1] = Real_t( 1.);
gamma[2] = Real_t(-1.);
gamma[3] = Real_t(-1.);
gamma[4] = Real_t(-1.);
gamma[5] = Real_t(-1.);
gamma[6] = Real_t( 1.);
gamma[7] = Real_t( 1.);
gamma[8] = Real_t( 1.);
gamma[9] = Real_t(-1.);
gamma[10] = Real_t(-1.);
gamma[11] = Real_t( 1.);
gamma[12] = Real_t(-1.);
gamma[13] = Real_t( 1.);
gamma[14] = Real_t( 1.);
gamma[15] = Real_t(-1.);
gamma[16] = Real_t( 1.);
gamma[17] = Real_t(-1.);
gamma[18] = Real_t( 1.);
gamma[19] = Real_t(-1.);
gamma[20] = Real_t( 1.);
gamma[21] = Real_t(-1.);
gamma[22] = Real_t( 1.);
gamma[23] = Real_t(-1.);
gamma[24] = Real_t(-1.);
gamma[25] = Real_t( 1.);
gamma[26] = Real_t(-1.);
gamma[27] = Real_t( 1.);
gamma[28] = Real_t( 1.);
gamma[29] = Real_t(-1.);
gamma[30] = Real_t( 1.);
gamma[31] = Real_t(-1.);
Real_t *d_gamma;
hipMalloc((void**)&d_gamma, sizeof(Real_t)*32);
hipMemcpy(d_gamma, gamma, 32*sizeof(Real_t), hipMemcpyHostToDevice);
Real_t* d_ss;
hipMalloc((void**)&d_ss, sizeof(Real_t)*numElem);
Real_t* d_elemMass;
hipMalloc((void**)&d_elemMass, sizeof(Real_t)*numElem);
Real_t* d_nodalMass;
hipMalloc((void**)&d_nodalMass, sizeof(Real_t)*numNode);
Index_t size = locDom->sizeX();
Index_t numNodeBC = (size+1)*(size+1) ;
Index_t* d_symmX;
hipMalloc((void**)&d_symmX, sizeof(Index_t)*numNodeBC);
Index_t* d_symmY;
hipMalloc((void**)&d_symmY, sizeof(Index_t)*numNodeBC);
Index_t* d_symmZ;
hipMalloc((void**)&d_symmZ, sizeof(Index_t)*numNodeBC);
Real_t* d_vdov;
hipMalloc((void**)&d_vdov, sizeof(Real_t)*numElem);
Real_t* d_delv;
hipMalloc((void**)&d_delv, sizeof(Real_t)*numElem);
Real_t* d_arealg;
hipMalloc((void**)&d_arealg, sizeof(Real_t)*numElem);
Real_t* d_dxx;
hipMalloc((void**)&d_dxx, sizeof(Real_t)*numElem);
Real_t* d_dyy;
hipMalloc((void**)&d_dyy, sizeof(Real_t)*numElem);
Real_t* d_dzz;
hipMalloc((void**)&d_dzz, sizeof(Real_t)*numElem);
Real_t* d_vnew;
hipMalloc((void**)&d_vnew, sizeof(Real_t)*numElem);
Index_t* d_lzetam;
hipMalloc((void**)&d_lzetam, sizeof(Index_t)*numElem);
Index_t* d_lzetap;
hipMalloc((void**)&d_lzetap, sizeof(Index_t)*numElem);
Index_t* d_letap;
hipMalloc((void**)&d_letap, sizeof(Index_t)*numElem);
Index_t* d_letam;
hipMalloc((void**)&d_letam, sizeof(Index_t)*numElem);
Index_t* d_lxip;
hipMalloc((void**)&d_lxip, sizeof(Index_t)*numElem);
Index_t* d_lxim;
hipMalloc((void**)&d_lxim, sizeof(Index_t)*numElem);
Index_t* d_elemBC;
hipMalloc((void**)&d_elemBC, sizeof(Index_t)*numElem);
Real_t* d_ql;
hipMalloc((void**)&d_ql, sizeof(Real_t)*numElem);
Real_t* d_qq;
hipMalloc((void**)&d_qq, sizeof(Real_t)*numElem);
Real_t* d_e;
hipMalloc((void**)&d_e, sizeof(Real_t)*numElem);
Index_t* d_elemRep;
hipMalloc((void**)&d_elemRep, sizeof(Index_t)*numElem);
Index_t* d_elemElem;
hipMalloc((void**)&d_elemElem, sizeof(Index_t)*numElem);
// error checking on the host
Real_t *determ = Allocate<Real_t>(numElem) ;
// resize m_dxx, m_dyy, and m_dzz
locDom->AllocateStrains(numElem);
// resize position and velocity gradients
Int_t allElem = numElem + /* local elem */
2*locDom->sizeX()*locDom->sizeY() + /* plane ghosts */
2*locDom->sizeX()*locDom->sizeZ() + /* row ghosts */
2*locDom->sizeY()*locDom->sizeZ() ; /* col ghosts */
locDom->AllocateGradients(numElem, allElem);
while((locDom->time() < locDom->stoptime()) && (locDom->cycle() < opts.its)) {
TimeIncrement(*locDom) ;
//==============================================================================
// LagrangeLeapFrog(*locDom) ;
//==============================================================================
//=============================================================
// calculate nodal forces, accelerations, velocities, positions, with
// applied boundary conditions and slide surface considerations
//LagrangeNodal(domain);
//=============================================================
Domain &domain = *locDom;
const Real_t deltaTime = domain.deltatime() ;
Real_t u_cut = domain.u_cut() ;
//=============================================================================
// time of boundary condition evaluation is beginning of step for force and
// acceleration boundary conditions.
//CalcForceForNodes(domain);
//=============================================================================
//=====================================================================
// CalcVolumeForceForElems(domain)
//=====================================================================
Real_t hgcoef = domain.hgcoef() ;
// Sum contributions to total stress tensor
Real_t *p = &domain.m_p[0];
Real_t *q = &domain.m_q[0];
hipMemcpy(d_p, p, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_q, q, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
dim3 gws_elem ((numElem+THREADS-1)/THREADS);
dim3 gws_node ((numNode+THREADS-1)/THREADS);
dim3 lws (THREADS);
hipLaunchKernelGGL(fill_sig, dim3(gws_elem), dim3(lws), 0, 0, d_sigxx, d_sigyy, d_sigzz, d_p, d_q, numElem);
//==============================================================================================
// IntegrateStressForElems( domain, sigxx, sigyy, sigzz, determ, numElem, domain.numNode())
//==============================================================================================
hipLaunchKernelGGL(integrateStress, dim3(gws_elem), dim3(lws), 0, 0,
d_fx_elem,
d_fy_elem,
d_fz_elem,
d_x,
d_y,
d_z,
d_nodelist,
d_sigxx,
d_sigyy,
d_sigzz,
d_determ,
numElem);
hipLaunchKernelGGL(acc_final_force, dim3(gws_node), dim3(lws), 0, 0,
d_fx_elem,
d_fy_elem,
d_fz_elem,
d_fx,
d_fy,
d_fz,
d_nodeElemStart,
d_nodeElemCornerList,
numNode);
// check for negative element volume on the host
hipMemcpy(determ, d_determ, sizeof(Real_t)*numElem, hipMemcpyDeviceToHost);
#ifdef _OPENMP
#pragma omp parallel for firstprivate(numElem)
#endif
for ( Index_t k=0 ; k<numElem ; ++k ) {
if (determ[k] <= Real_t(0.0)) {
exit(VolumeError);
}
}
//=================================================================================
// CalcHourglassControlForElems(device_queue, domain, determ, hgcoef) ;
//=================================================================================
Real_t *volo = &domain.m_volo[0];
Real_t *v = &domain.m_v[0];
int vol_error = -1;
hipMemcpy(d_volo, volo, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_v, v, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_vol_error, &vol_error, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(hgc, dim3(gws_elem), dim3(lws), 0, 0,
d_dvdx,
d_dvdy,
d_dvdz,
d_x8n,
d_y8n,
d_z8n,
d_determ,
d_x,
d_y,
d_z,
d_nodelist,
d_volo,
d_v,
d_vol_error,
numElem );
#ifdef VERIFY
Real_t *dvdx_tmp = (Real_t*) malloc (sizeof(Real_t)*numElem8);
Real_t *dvdy_tmp = (Real_t*) malloc (sizeof(Real_t)*numElem8);
Real_t *dvdz_tmp = (Real_t*) malloc (sizeof(Real_t)*numElem8);
Real_t *x8n_tmp = (Real_t*) malloc(sizeof(Real_t)*numElem8);
Real_t *y8n_tmp = (Real_t*) malloc(sizeof(Real_t)*numElem8);
Real_t *z8n_tmp = (Real_t*) malloc(sizeof(Real_t)*numElem8);
hipMemcpy(dvdx_tmp, d_dvdx, sizeof(Real_t)*numElem8, hipMemcpyDeviceToHost);
hipMemcpy(dvdy_tmp, d_dvdy, sizeof(Real_t)*numElem8, hipMemcpyDeviceToHost);
hipMemcpy(dvdz_tmp, d_dvdz, sizeof(Real_t)*numElem8, hipMemcpyDeviceToHost);
hipMemcpy(x8n_tmp, d_x8n, sizeof(Real_t)*numElem8, hipMemcpyDeviceToHost);
hipMemcpy(y8n_tmp, d_y8n, sizeof(Real_t)*numElem8, hipMemcpyDeviceToHost);
hipMemcpy(z8n_tmp, d_z8n, sizeof(Real_t)*numElem8, hipMemcpyDeviceToHost);
hipMemcpy(determ, d_determ, sizeof(Real_t)*numElem, hipMemcpyDeviceToHost);
// volumn derivative
for (int i = 0; i < numElem8; i++) {
printf("vd %d %f %f %f %f %f %f %f\n",
i, dvdx_tmp[i], dvdy_tmp[i], dvdz_tmp[i],
x8n_tmp[i], y8n_tmp[i], z8n_tmp[i], determ[i/8]);
}
free(dvdx_tmp);
free(dvdy_tmp);
free(dvdz_tmp);
free(x8n_tmp);
free(y8n_tmp);
free(z8n_tmp);
#endif
hipMemcpy(&vol_error, d_vol_error, sizeof(int), hipMemcpyDeviceToHost);
if (vol_error >= 0){
printf("VolumeError: negative volumn\n");
exit(VolumeError);
}
if ( hgcoef > Real_t(0.) ) {
Real_t *ss = &domain.m_ss[0];
Real_t *elemMass = &domain.m_elemMass[0];
//Index_t *nodeElemStart = &domain.m_nodeElemStart[0];
//Index_t len1 = numNode + 1;
//Index_t *nodeElemCornerList = &domain.m_nodeElemCornerList[0];
//Index_t len2 = nodeElemStart[numNode];
#ifdef VERIFY
// initialize data for testing
for (int i = 0; i < numElem; i++) {
ss[i] = ((float)rand()/(float)(RAND_MAX));
elemMass[i] = ((float)rand()/(float)(RAND_MAX));
}
#endif
hipMemcpy(d_ss, ss, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_elemMass, elemMass, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
hipLaunchKernelGGL(fb, dim3(gws_elem), dim3(lws), 0, 0,
d_dvdx,
d_dvdy,
d_dvdz,
d_x8n,
d_y8n,
d_z8n,
d_determ,
d_xd,
d_yd,
d_zd,
d_ss,
d_elemMass,
d_nodelist,
d_gamma,
d_fx_elem,
d_fy_elem,
d_fz_elem,
hgcoef,
numElem );
hipLaunchKernelGGL(collect_final_force, dim3(gws_node), dim3(lws), 0, 0,
d_fx_elem,
d_fy_elem,
d_fz_elem,
d_fx,
d_fy,
d_fz,
d_nodeElemStart,
d_nodeElemCornerList,
numNode );
#ifdef VERIFY
Real_t *fx_tmp = (Real_t*) malloc (sizeof(Real_t)*numNode);
Real_t *fy_tmp = (Real_t*) malloc (sizeof(Real_t)*numNode);
Real_t *fz_tmp = (Real_t*) malloc (sizeof(Real_t)*numNode);
hipMemcpy(fx_tmp, d_fx, sizeof(Real_t)*numNode, hipMemcpyDeviceToHost);
hipMemcpy(fy_tmp, d_fy, sizeof(Real_t)*numNode, hipMemcpyDeviceToHost);
hipMemcpy(fz_tmp, d_fz, sizeof(Real_t)*numNode, hipMemcpyDeviceToHost);
for (int i = 0; i < numNode; i++)
printf("fb: %d %f %f %f\n", i, fx_tmp[i], fy_tmp[i], fz_tmp[i]);
free(fx_tmp);
free(fy_tmp);
free(fz_tmp);
#endif
} // if ( hgcoef > Real_t(0.) )
//===========================================================================
//CalcAccelerationForNodes(domain, domain.numNode()); // IN: fx OUT: m_xdd
//===========================================================================
Real_t *nodalMass = &domain.m_nodalMass[0];
hipMemcpy(d_nodalMass, nodalMass, sizeof(Real_t)*numNode, hipMemcpyHostToDevice);
hipLaunchKernelGGL(accelerationForNode, dim3(gws_node), dim3(lws), 0, 0,
d_fx,
d_fy,
d_fz,
d_nodalMass,
d_xdd,
d_ydd,
d_zdd,
numNode);
//======================================================================================
//ApplyAccelerationBoundaryConditionsForNodes(domain); // uses m_xdd
//======================================================================================
//Index_t size = domain.sizeX();
//Index_t numNodeBC = (size+1)*(size+1) ;
Index_t *symmX = &domain.m_symmX[0];
Index_t *symmY = &domain.m_symmY[0];
Index_t *symmZ = &domain.m_symmZ[0];
hipMemcpy(d_symmX, symmX, sizeof(Index_t)*numNodeBC, hipMemcpyHostToDevice);
hipMemcpy(d_symmY, symmY, sizeof(Index_t)*numNodeBC, hipMemcpyHostToDevice);
hipMemcpy(d_symmZ, symmZ, sizeof(Index_t)*numNodeBC, hipMemcpyHostToDevice);
Index_t s1 = domain.symmXempty();
Index_t s2 = domain.symmYempty();
Index_t s3 = domain.symmZempty();
hipLaunchKernelGGL(applyAccelerationBoundaryConditionsForNodes, dim3((numNodeBC+255)/256), dim3(256), 0, 0,
d_symmX,
d_symmY,
d_symmZ,
d_xdd,
d_ydd,
d_zdd,
s1,
s2,
s3,
numNodeBC ) ;
//=================================================================
// CalcVelocityForNodes( domain, delt, u_cut, domain.numNode()) ; //uses m_xd and m_xdd
//=================================================================
hipLaunchKernelGGL(calcVelocityForNodes, dim3(gws_node), dim3(lws), 0, 0,
d_xd,
d_yd,
d_zd,
d_xdd,
d_ydd,
d_zdd,
deltaTime,
u_cut,
numNode );
//=================================================================================
// CalcPositionForNodes( domain, delt, domain.numNode() ); //uses m_xd and m_x
//=================================================================================
hipLaunchKernelGGL(calcPositionForNodes, dim3(gws_node), dim3(lws ), 0, 0,
d_x,
d_y,
d_z,
d_xd,
d_yd,
d_zd,
deltaTime,
numNode) ;
#ifdef VERIFY
Real_t *xd_tmp = (Real_t*) malloc (sizeof(Real_t)*numNode);
Real_t *yd_tmp = (Real_t*) malloc (sizeof(Real_t)*numNode);
Real_t *zd_tmp = (Real_t*) malloc (sizeof(Real_t)*numNode);
Real_t *x_tmp = (Real_t*) malloc (sizeof(Real_t)*numNode);
Real_t *y_tmp = (Real_t*) malloc (sizeof(Real_t)*numNode);
Real_t *z_tmp = (Real_t*) malloc (sizeof(Real_t)*numNode);
hipMemcpy(xd_tmp, d_xd, sizeof(Real_t)*numNode, hipMemcpyDeviceToHost);
hipMemcpy(yd_tmp, d_yd, sizeof(Real_t)*numNode, hipMemcpyDeviceToHost);
hipMemcpy(zd_tmp, d_zd, sizeof(Real_t)*numNode, hipMemcpyDeviceToHost);
hipMemcpy(x_tmp, d_x, sizeof(Real_t)*numNode, hipMemcpyDeviceToHost);
hipMemcpy(y_tmp, d_y, sizeof(Real_t)*numNode, hipMemcpyDeviceToHost);
hipMemcpy(z_tmp, d_z, sizeof(Real_t)*numNode, hipMemcpyDeviceToHost);
for (int i = 0; i < numNode; i++)
printf("CalcPositionForNodes: %d %f %f %f %f %f %f\n",
i, x_tmp[i], y_tmp[i], z_tmp[i], xd_tmp[i], yd_tmp[i], zd_tmp[i]);
free(x_tmp);
free(y_tmp);
free(z_tmp);
free(xd_tmp);
free(yd_tmp);
free(zd_tmp);
#endif
//=========================================================
// calculate element quantities (i.e. velocity gradient & q), and update material states
// LagrangeElements(domain);
//=========================================================
Real_t *dxx = &domain.m_dxx[0];
Real_t *dyy = &domain.m_dyy[0];
Real_t *dzz = &domain.m_dzz[0];
Real_t *delv = &domain.m_delv[0];
Real_t *arealg = &domain.m_arealg[0];
Real_t *vdov = &domain.m_vdov[0];
hipMemcpy(d_vdov, vdov, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_delv, delv, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_arealg, arealg, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_dxx, dxx, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_dyy, dyy, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_dzz, dzz, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
//========================================================================
// void CalcKinematicsForElems( Domain &domain, Real_t *vnew,
//========================================================================
hipLaunchKernelGGL(calcKinematicsForElems, dim3(gws_elem), dim3(lws), 0, 0,
d_xd,
d_yd,
d_zd,
d_x,
d_y,
d_z,
d_nodelist,
d_volo,
d_v,
d_delv,
d_arealg,
d_dxx,
d_dyy,
d_dzz,
d_vnew,
deltaTime,
numElem );
vol_error = -1; // reset volumn error
hipMemcpy(d_vol_error, &vol_error, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(calcStrainRates, dim3(gws_elem), dim3(lws), 0, 0,
d_dxx,
d_dyy,
d_dzz,
d_vnew,
d_vdov,
d_vol_error,
numElem );
hipMemcpy(vdov, d_vdov, sizeof(Real_t)*numElem, hipMemcpyDeviceToHost);
hipMemcpy(&vol_error, d_vol_error, sizeof(int), hipMemcpyDeviceToHost);
#ifdef VERIFY
for ( Index_t k=0 ; k<numElem ; ++k )
{
printf("kintec: %d %f\n", k, vdov[k]);
}
#endif
if (vol_error >= 0){
printf("VolumeError: negative volumn\n");
exit(VolumeError);
}
//=======================================================
//CalcQForElems(domain, vnew) ;
//=======================================================
//================================================================
// Calculate velocity gradients
//CalcMonotonicQGradientsForElems(domain, vnew);
//================================================================
hipLaunchKernelGGL(calcMonotonicQGradientsForElems, dim3(gws_elem), dim3(lws), 0, 0,
d_xd,
d_yd,
d_zd,
d_x,
d_y,
d_z,
d_nodelist,
d_volo,
d_delv_eta,
d_delx_eta,
d_delv_zeta,
d_delx_zeta,
d_delv_xi,
d_delx_xi,
d_vnew,
numElem );
//=========================================================
// CalcMonotonicQForElems(domain, vnew) ;
//=========================================================
Real_t monoq_limiter_mult = domain.monoq_limiter_mult();
Real_t monoq_max_slope = domain.monoq_max_slope();
Real_t qlc_monoq = domain.qlc_monoq();
Real_t qqc_monoq = domain.qqc_monoq();
Index_t *elemBC = &domain.m_elemBC[0];
Index_t *lxim = &domain.m_lxim[0];
Index_t *lxip = &domain.m_lxip[0];
Index_t *letam = &domain.m_letam[0];
Index_t *letap = &domain.m_letap[0];
Index_t *lzetam = &domain.m_lzetam[0];
Index_t *lzetap = &domain.m_lzetap[0];
Real_t *elemMass = &domain.m_elemMass[0];
Real_t *ql = &domain.m_ql[0];
Real_t *qq = &domain.m_qq[0];
hipMemcpy(d_lzetam, lzetam, sizeof(Index_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_lzetap, lzetap, sizeof(Index_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_letam, letam, sizeof(Index_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_letap, letap, sizeof(Index_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_lxip, lxip, sizeof(Index_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_lxim, lxim, sizeof(Index_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_elemBC, elemBC, sizeof(Index_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_elemMass, elemMass, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
hipLaunchKernelGGL(calcMonotonicQForElems, dim3(gws_elem), dim3(lws), 0, 0,
d_elemBC,
d_elemMass,
d_ql,
d_qq,
d_vdov,
d_volo,
d_delv_eta,
d_delx_eta,
d_delv_zeta,
d_delx_zeta,
d_delv_xi,
d_delx_xi,
d_lxim,
d_lxip,
d_lzetam,
d_lzetap,
d_letap,
d_letam,
d_vnew,
monoq_limiter_mult,
monoq_max_slope,
qlc_monoq,
qqc_monoq,
numElem );
#ifdef VERIFY
Real_t* qq_tmp = (Real_t*) malloc (sizeof(Real_t)*numElem);
Real_t* ql_tmp = (Real_t*) malloc (sizeof(Real_t)*numElem);
hipMemcpy(qq_tmp, d_qq, sizeof(Real_t)*numElem, hipMemcpyDeviceToHost);
hipMemcpy(ql_tmp, d_ql, sizeof(Real_t)*numElem, hipMemcpyDeviceToHost);
for (int i = 0; i < numElem; i++) {
printf("mqr: %d %f %f\n", i, qq_tmp[i], ql_tmp[i]);
}
free(qq_tmp);
free(ql_tmp);
#endif
/* Don't allow excessive artificial viscosity */
Index_t idx = -1;
for (Index_t i=0; i<numElem; ++i) {
if ( domain.q(i) > domain.qstop() ) {
idx = i ;
break ;
}
}
if(idx >= 0) {
printf("QStopError\n");
exit(QStopError);
}
//=================================================
//ApplyMaterialPropertiesForElems(domain, vnew) ;
//=================================================
Real_t e_cut = domain.e_cut() ;
Real_t p_cut = domain.p_cut() ;
Real_t ss4o3 = domain.ss4o3() ;
Real_t q_cut = domain.q_cut() ;
Real_t v_cut = domain.v_cut() ;
Real_t eosvmax = domain.eosvmax() ;
Real_t eosvmin = domain.eosvmin() ;
Real_t pmin = domain.pmin() ;
Real_t emin = domain.emin() ;
Real_t rho0 = domain.refdens() ;
Real_t *e = &domain.m_e[0];
Real_t *ss = &domain.m_ss[0];
Index_t *elemRep = &domain.m_elemRep[0];
Index_t *elemElem = &domain.m_elemElem[0];
hipMemcpy(d_e, e, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_ss, ss, sizeof(Real_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_elemRep, elemRep, sizeof(Index_t)*numElem, hipMemcpyHostToDevice);
hipMemcpy(d_elemElem, elemElem, sizeof(Index_t)*numElem, hipMemcpyHostToDevice);
hipLaunchKernelGGL(applyMaterialPropertiesForElems, dim3(gws_elem), dim3(lws), 0, 0,
d_ql,
d_qq,
d_delv,
d_elemRep,
d_elemElem,
d_q,
d_p,
d_e,
d_ss,
d_v,
d_vnew,
e_cut,
p_cut,
ss4o3,
q_cut,
v_cut,
eosvmax,
eosvmin,
pmin,
emin,
rho0,
numElem );
hipMemcpy(p, d_p, sizeof(Real_t)*numElem, hipMemcpyDeviceToHost);
hipMemcpy(q, d_q, sizeof(Real_t)*numElem, hipMemcpyDeviceToHost);
hipMemcpy(e, d_e, sizeof(Real_t)*numElem, hipMemcpyDeviceToHost);
hipMemcpy(ss, d_ss, sizeof(Real_t)*numElem, hipMemcpyDeviceToHost);
hipMemcpy(v, d_v, sizeof(Real_t)*numElem, hipMemcpyDeviceToHost);
#ifdef VERIFY
for (int i = 0; i < numElem; i++) {
printf("eos: %f %f %f %f %f\n", q[i], p[i], e[i], ss[i], v[i]);
}
#endif
CalcTimeConstraintsForElems(domain);
if ((opts.showProg != 0) && (opts.quiet == 0) && (myRank == 0)) {
printf("cycle = %d, time = %e, dt=%e\n",
locDom->cycle(), double(locDom->time()), double(locDom->deltatime()) ) ;
}
if (opts.iteration_cap == 1){
break;
}
opts.iteration_cap -= 1;
}
// Use reduced max elapsed time
double elapsed_time;
timeval end;
gettimeofday(&end, NULL) ;
elapsed_time = (double)(end.tv_sec - start.tv_sec) + ((double)(end.tv_usec - start.tv_usec))/1000000 ;
double elapsed_timeG;
elapsed_timeG = elapsed_time;
// Write out final viz file */
if (opts.viz) {
DumpToVisit(*locDom, opts.numFiles, myRank, numRanks) ;
}
if ((myRank == 0) && (opts.quiet == 0)) {
VerifyAndWriteFinalOutput(elapsed_timeG, *locDom, opts.nx, numRanks);
}
// Release resources
hipFree(d_x);
hipFree(d_y);
hipFree(d_z);
hipFree(d_fx);
hipFree(d_fy);
hipFree(d_fz);
hipFree(d_xd);
hipFree(d_yd);
hipFree(d_zd);
hipFree(d_xdd);
hipFree(d_ydd);
hipFree(d_zdd);
hipFree(d_nodelist);
hipFree(d_determ);
hipFree(d_fx_elem);
hipFree(d_fy_elem);
hipFree(d_fz_elem);
hipFree(d_dvdx);
hipFree(d_dvdy);
hipFree(d_dvdz);
hipFree(d_x8n);
hipFree(d_y8n);
hipFree(d_z8n);
hipFree(d_sigxx);
hipFree(d_sigyy);
hipFree(d_sigzz);
hipFree(d_delv_xi);
hipFree(d_delx_xi);
hipFree(d_delv_eta);
hipFree(d_delx_eta);
hipFree(d_delv_zeta);
hipFree(d_delx_zeta);
hipFree(d_p);
hipFree(d_q);
hipFree(d_volo);
hipFree(d_v);
hipFree(d_vol_error);
hipFree(d_nodeElemStart);
hipFree(d_nodeElemCornerList);
hipFree(d_gamma);
hipFree(d_ss);
hipFree(d_elemMass);
hipFree(d_nodalMass);
hipFree(d_symmX);
hipFree(d_symmY);
hipFree(d_symmZ);
hipFree(d_vdov);
hipFree(d_delv);
hipFree(d_arealg);
hipFree(d_dxx);
hipFree(d_dyy);
hipFree(d_dzz);
hipFree(d_vnew);
hipFree(d_lzetam);
hipFree(d_lzetap);
hipFree(d_letap);
hipFree(d_letam);
hipFree(d_lxip);
hipFree(d_lxim);
hipFree(d_elemBC);
hipFree(d_ql);
hipFree(d_qq);
hipFree(d_e);
hipFree(d_elemRep);
hipFree(d_elemElem);
locDom->DeallocateGradients();
locDom->DeallocateStrains();
Release(&determ);
delete(locDom);
return 0 ;
} | the_stack |
#include "caffe/layers/accuracy_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype, typename MItype, typename MOtype>
void AccuracyLayer<Dtype, MItype, MOtype>::GenerateProgram() {
this->device_program_ = this->device_->CreateProgram();
stringstream ss;
ss << this->device_program_->setup();
ss << this->device_program_->template define_type<Dtype>("Dtype");
ss << this->device_program_->template define_type<MItype>("MItype");
ss << this->device_program_->template define_type<MOtype>("MOtype");
{
KernelArgs fw_args;
fw_args.push_back(this->device_program_->template
create_kernel_arg<uint_tp>("nthreads", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<MItype>(
"bottom_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
fw_args.push_back(this->device_program_->template create_kernel_arg<MItype>(
"label", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
fw_args.push_back(this->device_program_->template create_kernel_arg<MItype>(
"acc", KERNEL_ARG_GLOBAL_MEM));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"num", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"dim", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"spatial_dim", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"num_labels", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"top_k", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<bool>(
"has_ignore_label", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"ignore_label", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<MItype>(
"counts", KERNEL_ARG_GLOBAL_MEM));
ss << this->device_program_->function("AccuracyForward", fw_args);
ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads");
ss << "const int_tp n = index / spatial_dim;" << std::endl;
ss << "const int_tp s = index % spatial_dim;" << std::endl;
ss << "const int_tp label_value = (int_tp)(label[n * spatial_dim + s]);"
<< std::endl;
ss << "const Dtype prob_of_true_class = bottom_data[n * dim"
<< " + label_value * spatial_dim + s];" << std::endl;
ss << "int_tp num_better_predictions = -1;"
<< " // true_class also counts as \"better\"" << std::endl;
ss << "if (has_ignore_label && label_value == ignore_label) {" << std::endl;
ss << "acc[index] = 0;" << std::endl;
ss << "counts[index] = 0;" << std::endl;
ss << "} else {" << std::endl;
ss << "for (int_tp k = 0; k < num_labels & num_better_predictions < top_k;"
<< " k++) {" << std::endl;
ss << "num_better_predictions += "
<< "(bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class);"
<< std::endl;
ss << "}" << std::endl;
ss << "acc[index] = (num_better_predictions < top_k) ? "
<< "(MItype)1 : (MItype)0;" << std::endl;
ss << "counts[index] = (MItype)1;" << std::endl;
ss << "}" << std::endl;
ss << "}" << std::endl;
ss << "}" << std::endl;
}
{
KernelArgs fw_args;
fw_args.push_back(this->device_program_->template
create_kernel_arg<uint_tp>("nthreads", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<MItype>(
"bottom_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
fw_args.push_back(this->device_program_->template create_kernel_arg<MItype>(
"label", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM));
fw_args.push_back(this->device_program_->template create_kernel_arg<MItype>(
"acc", KERNEL_ARG_GLOBAL_MEM));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"num", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"dim", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"spatial_dim", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"num_labels", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"top_k", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<bool>(
"has_ignore_label", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>(
"ignore_label", KERNEL_ARG_CONST));
fw_args.push_back(this->device_program_->template create_kernel_arg<MItype>(
"counts", KERNEL_ARG_GLOBAL_MEM));
ss << this->device_program_->function("AccuracyForwardWithPerClass",
fw_args);
ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads");
ss << "const int_tp n = index / spatial_dim;" << std::endl;
ss << "const int_tp s = index % spatial_dim;" << std::endl;
ss << "const int_tp label_value = (int_tp)(label[n * spatial_dim + s]);"
<< std::endl;
ss << "const Dtype prob_of_true_class = bottom_data[n * dim"
<< " + label_value * spatial_dim + s];" << std::endl;
ss << "if (has_ignore_label && label_value == ignore_label) {"
<< std::endl;
ss << "// nothing to be done." << std::endl;
ss << "} else {" << std::endl;
ss << "int_tp num_better_predictions = -1;"
<< " // true_class also counts as \"better\"" << std::endl;
ss << "for (int_tp k = 0; k < num_labels & num_better_predictions < top_k; "
<< "k++) {" << std::endl;
ss << "num_better_predictions += "
<< "(bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class);"
<< std::endl;
ss << "}" << std::endl;
ss << "acc[label_value*nthreads + index]"
<< " += (num_better_predictions < top_k) ? (MItype) 1 : (MItype) 0;"
<< std::endl;
ss << "counts[label_value*nthreads + index] = (Dtype)1;" << std::endl;
ss << "}" << std::endl;
ss << "}" << std::endl;
ss << "}" << std::endl;
}
this->device_program_->set_source(ss.str());
this->device_program_->Compile(true, true);
}
template<typename Dtype, typename MItype, typename MOtype>
void AccuracyLayer<Dtype, MItype, MOtype>::Forward_gpu(
const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top) {
vptr<const MItype> bottom_data = bottom[0]->gpu_data();
vptr<const MItype> bottom_label = bottom[1]->gpu_data();
const int_tp dim = bottom[0]->count() / outer_num_;
const int_tp num_labels = bottom[0]->shape(label_axis_);
const uint_tp nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything, we use it here to avoid having
// to allocate new GPU memory to accumulate intermediate results.
vptr<MItype> acc_data = bottom[0]->mutable_gpu_diff();
if (top.size() == 1) {
// simple case - report only global accuracy.
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
vptr<MItype> counts = bottom[1]->mutable_gpu_diff();
shared_ptr<DeviceKernel> kernel =
this->device_program_->GetKernel("AccuracyForward");
kernel->add_arg(&nthreads);
kernel->add_arg(&bottom_data);
kernel->add_arg(&bottom_label);
kernel->add_arg(&acc_data);
kernel->add_arg(&outer_num_);
kernel->add_arg(&dim);
kernel->add_arg(&inner_num_);
kernel->add_arg(&num_labels);
kernel->add_arg(&top_k_);
kernel->add_arg(&has_ignore_label_);
kernel->add_arg(&ignore_label_);
kernel->add_arg(&counts);
vector<size_t> work_size(1, nthreads);
vector<size_t> group;
vector<size_t> local;
this->device_->get_threads(&work_size, &group, &local, kernel.get(),
true);
kernel->Execute(group, local);
MOtype acc;
this->device_->template asum<MOtype>(nthreads, acc_data, &acc);
MOtype valid_count;
this->device_->template asum<MOtype>(nthreads, counts, &valid_count);
if (valid_count > 0) {
top[0]->mutable_cpu_data()[0] = acc / valid_count;
} else {
top[0]->mutable_cpu_data()[0] = 0;
}
} else {
// need to report per-class accuracy as well
// allocate space for more detailed "counts"
nums_buffer_.ReshapeLike(bottom[0]);
vptr<Dtype> counts = nums_buffer_.mutable_gpu_data();
this->device_->template set<MItype>(bottom[0]->count(), MItype(0),
acc_data);
this->device_->template set<Dtype>(nums_buffer_.count(), Dtype(0), counts);
shared_ptr<DeviceKernel> kernel =
this->device_program_->GetKernel("AccuracyForwardWithPerClass");
kernel->add_arg(&nthreads);
kernel->add_arg(&bottom_data);
kernel->add_arg(&bottom_label);
kernel->add_arg(&acc_data);
kernel->add_arg(&outer_num_);
kernel->add_arg(&dim);
kernel->add_arg(&inner_num_);
kernel->add_arg(&num_labels);
kernel->add_arg(&top_k_);
kernel->add_arg(&has_ignore_label_);
kernel->add_arg(&ignore_label_);
kernel->add_arg(&counts);
vector<size_t> work_size(1, nthreads);
vector<size_t> group;
vector<size_t> local;
this->device_->get_threads(&work_size, &group, &local, kernel.get(), true);
kernel->Execute(group, local);
// get the overall accuracy
MItype acc;
this->device_->template asum<MItype>(bottom[0]->count(), acc_data, &acc);
Dtype valid_count;
this->device_->template asum<Dtype>(nums_buffer_.count(), counts,
&valid_count);
if (valid_count > 0) {
top[0]->mutable_cpu_data()[0] = static_cast<MOtype>(acc /
static_cast<MItype>(valid_count));
} else {
top[0]->mutable_cpu_data()[0] = 0;
}
// get per-class accuracy
MOtype* per_class_acc = top[1]->mutable_cpu_data();
for (int_tp l = 0; l < num_labels; l++) {
this->device_->template asum<MOtype>(nthreads,
acc_data + l * nthreads, per_class_acc + l);
this->device_->template asum<Dtype>(nthreads,
counts + l * nthreads, &valid_count);
if (valid_count > 0) {
per_class_acc[l] /= static_cast<MOtype>(valid_count);
} else {
per_class_acc[l] = MOtype(0);
}
}
}
// Clear scratch memory to prevent interfering with backward (see #6202).
this->device_->template set<MItype>(bottom[0]->count(),
MItype(0), bottom[0]->mutable_gpu_diff());
}
template<typename Dtype, typename MItype, typename MOtype>
void AccuracyLayer<Dtype, MItype, MOtype>::Backward_gpu(
const vector<Blob<MOtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<MItype>*>& bottom) {
if (propagate_down[1]) { NOT_IMPLEMENTED; }
}
INSTANTIATE_CLASST_FUNC_3T_GUARDED(AccuracyLayer, GenerateProgram,
(half_fp), (half_fp), (half_fp));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(AccuracyLayer, GenerateProgram,
(float), (float), (float));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(AccuracyLayer, GenerateProgram,
(double), (double), (double));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(AccuracyLayer, Forward_gpu,
(half_fp), (half_fp), (half_fp));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(AccuracyLayer, Forward_gpu,
(float), (float), (float));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(AccuracyLayer, Forward_gpu,
(double), (double), (double));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(AccuracyLayer, Backward_gpu,
(half_fp), (half_fp), (half_fp));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(AccuracyLayer, Backward_gpu,
(float), (float), (float));
INSTANTIATE_CLASST_FUNC_3T_GUARDED(AccuracyLayer, Backward_gpu,
(double), (double), (double));
} // namespace caffe | the_stack |
#include <cub/block/block_load.cuh>
#include <cub/block/block_run_length_decode.cuh>
#include <cub/block/block_store.cuh>
#include <cub/device/device_scan.cuh>
#include <cub/iterator/counting_input_iterator.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include <cub/util_allocator.cuh>
#include "test_util.h"
using namespace cub;
/******************************************************************************
* HELPER CLASS FOR RUN-LENGTH DECODING TESTS
******************************************************************************/
/**
* \brief Class template to facilitate testing the BlockRunLengthDecode algorithm for all its template parameter
* specialisations.
*
* \tparam ItemItT The item type being run-length decoded
* \tparam RunLengthsItT Iterator type providing the runs' lengths
* \tparam RUNS_PER_THREAD The number of runs that each thread is getting assigned to
* \tparam DECODED_ITEMS_PER_THREAD The number of run-length decoded items that each thread is decoding
* \tparam TEST_RELATIVE_OFFSETS_ Whether to also retrieve each decoded item's relative offset within its run
* \tparam TEST_RUN_OFFSETS_ Whether to pass in each run's offset instead of each run's length
* \tparam BLOCK_DIM_X The thread block length in threads along the X dimension
* \tparam BLOCK_DIM_Y The thread block length in threads along the Y dimension
* \tparam BLOCK_DIM_Z The thread block length in threads along the Z dimension
*/
template <typename ItemItT,
typename RunLengthsItT,
int RUNS_PER_THREAD,
int DECODED_ITEMS_PER_THREAD,
bool TEST_RELATIVE_OFFSETS_,
bool TEST_RUN_OFFSETS_,
int BLOCK_DIM_X,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1>
class AgentTestBlockRunLengthDecode
{
public:
constexpr static uint32_t BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
constexpr static uint32_t RUNS_PER_BLOCK = RUNS_PER_THREAD * BLOCK_THREADS;
constexpr static bool TEST_RELATIVE_OFFSETS = TEST_RELATIVE_OFFSETS_;
private:
using RunItemT = cub::detail::value_t<ItemItT>;
using RunLengthT = cub::detail::value_t<RunLengthsItT>;
using BlockRunOffsetScanT = cub::BlockScan<RunLengthT,
BLOCK_DIM_X,
BLOCK_SCAN_RAKING,
BLOCK_DIM_Y,
BLOCK_DIM_Z>;
using BlockRunLengthDecodeT =
cub::BlockRunLengthDecode<RunItemT,
BLOCK_DIM_X,
RUNS_PER_THREAD,
DECODED_ITEMS_PER_THREAD>;
using BlockLoadRunItemT = cub::BlockLoad<RunItemT,
BLOCK_DIM_X,
RUNS_PER_THREAD,
BLOCK_LOAD_WARP_TRANSPOSE,
BLOCK_DIM_Y,
BLOCK_DIM_Z>;
using BlockLoadRunLengthsT = cub::BlockLoad<RunLengthT,
BLOCK_DIM_X,
RUNS_PER_THREAD,
BLOCK_LOAD_WARP_TRANSPOSE,
BLOCK_DIM_Y,
BLOCK_DIM_Z>;
using BlockStoreDecodedItemT = cub::BlockStore<RunItemT,
BLOCK_DIM_X,
DECODED_ITEMS_PER_THREAD,
BLOCK_STORE_WARP_TRANSPOSE,
BLOCK_DIM_Y,
BLOCK_DIM_Z>;
using BlockStoreRelativeOffsetT = cub::BlockStore<RunLengthT,
BLOCK_DIM_X,
DECODED_ITEMS_PER_THREAD,
BLOCK_STORE_WARP_TRANSPOSE,
BLOCK_DIM_Y,
BLOCK_DIM_Z>;
__device__ __forceinline__ BlockRunLengthDecodeT InitBlockRunLengthDecode(RunItemT (&unique_items)[RUNS_PER_THREAD],
RunLengthT (&run_lengths)[RUNS_PER_THREAD],
RunLengthT &decoded_size,
cub::Int2Type<true> /*test_run_offsets*/)
{
RunLengthT run_offsets[RUNS_PER_THREAD];
BlockRunOffsetScanT(temp_storage.run_offsets_scan_storage).ExclusiveSum(run_lengths, run_offsets, decoded_size);
// Ensure temporary shared memory can be repurposed
CTA_SYNC();
// Construct BlockRunLengthDecode and initialize with the run offsets
return BlockRunLengthDecodeT(temp_storage.decode.run_length_decode_storage, unique_items, run_offsets);
}
__device__ __forceinline__ BlockRunLengthDecodeT InitBlockRunLengthDecode(RunItemT (&unique_items)[RUNS_PER_THREAD],
RunLengthT (&run_lengths)[RUNS_PER_THREAD],
RunLengthT &decoded_size,
cub::Int2Type<false> /*test_run_offsets*/)
{
// Construct BlockRunLengthDecode and initialize with the run lengths
return BlockRunLengthDecodeT(temp_storage.decode.run_length_decode_storage, unique_items, run_lengths, decoded_size);
}
__device__ __forceinline__ void LoadRuns(ItemItT d_block_unique_items,
RunLengthsItT d_block_run_lengths,
RunItemT (&unique_items)[RUNS_PER_THREAD],
RunLengthT (&run_lengths)[RUNS_PER_THREAD],
size_t num_valid_items)
{
if (num_valid_items < RUNS_PER_BLOCK)
{
BlockLoadRunItemT(temp_storage.load_uniques_storage).Load(d_block_unique_items, unique_items, num_valid_items);
}
else
{
BlockLoadRunItemT(temp_storage.load_uniques_storage).Load(d_block_unique_items, unique_items);
}
// Ensure BlockLoad's temporary shared memory can be repurposed
CTA_SYNC();
// Load this block's tile of run lengths
if (num_valid_items < RUNS_PER_BLOCK)
BlockLoadRunLengthsT(temp_storage.load_run_lengths_storage)
.Load(d_block_run_lengths, run_lengths, num_valid_items, static_cast<RunLengthT>(0));
else
BlockLoadRunLengthsT(temp_storage.load_run_lengths_storage).Load(d_block_run_lengths, run_lengths);
// Ensure temporary shared memory can be repurposed
CTA_SYNC();
}
public:
union TempStorage
{
typename BlockLoadRunItemT::TempStorage load_uniques_storage;
typename BlockLoadRunLengthsT::TempStorage load_run_lengths_storage;
cub::detail::conditional_t<TEST_RUN_OFFSETS_,
typename BlockRunOffsetScanT::TempStorage,
cub::NullType>
run_offsets_scan_storage;
struct
{
typename BlockRunLengthDecodeT::TempStorage run_length_decode_storage;
typename BlockStoreDecodedItemT::TempStorage store_decoded_runs_storage;
typename BlockStoreRelativeOffsetT::TempStorage store_relative_offsets;
} decode;
};
TempStorage &temp_storage;
__device__ __forceinline__ AgentTestBlockRunLengthDecode(TempStorage &temp_storage)
: temp_storage(temp_storage)
{}
/**
* \brief Loads the given block (or tile) of runs, and computes their "decompressed" (run-length decoded) size.
*/
__device__ __forceinline__ uint32_t GetDecodedSize(ItemItT d_block_unique_items,
RunLengthsItT d_block_run_lengths,
size_t num_valid_runs)
{
// Load this block's tile of encoded runs
RunItemT unique_items[RUNS_PER_THREAD];
RunLengthT run_lengths[RUNS_PER_THREAD];
LoadRuns(d_block_unique_items, d_block_run_lengths, unique_items, run_lengths, num_valid_runs);
// Init the BlockRunLengthDecode and get the total decoded size of this block's tile (i.e., the "decompressed" size)
uint32_t decoded_size = 0U;
BlockRunLengthDecodeT run_length_decode =
InitBlockRunLengthDecode(unique_items, run_lengths, decoded_size, cub::Int2Type<TEST_RUN_OFFSETS_>());
return decoded_size;
}
/**
* \brief Loads the given block (or tile) of runs, run-length decodes them, and writes the results to \p
* d_block_decoded_out.
*/
template <typename UniqueItemOutItT, typename RelativeOffsetOutItT>
__device__ __forceinline__ uint32_t WriteDecodedRuns(ItemItT d_block_unique_items,
RunLengthsItT d_block_run_lengths,
UniqueItemOutItT d_block_decoded_out,
RelativeOffsetOutItT d_block_rel_out,
size_t num_valid_runs)
{
// Load this block's tile of encoded runs
RunItemT unique_items[RUNS_PER_THREAD];
RunLengthT run_lengths[RUNS_PER_THREAD];
LoadRuns(d_block_unique_items, d_block_run_lengths, unique_items, run_lengths, num_valid_runs);
// Init the BlockRunLengthDecode and get the total decoded size of this block's tile (i.e., the "decompressed" size)
uint32_t decoded_size = 0U;
BlockRunLengthDecodeT run_length_decode =
InitBlockRunLengthDecode(unique_items, run_lengths, decoded_size, cub::Int2Type<TEST_RUN_OFFSETS_>());
// Run-length decode ("decompress") the runs into a window buffer of limited size. This is repeated until all runs
// have been decoded.
uint32_t decoded_window_offset = 0U;
while (decoded_window_offset < decoded_size)
{
RunLengthT relative_offsets[DECODED_ITEMS_PER_THREAD];
RunItemT decoded_items[DECODED_ITEMS_PER_THREAD];
// The number of decoded items that are valid within this window (aka pass) of run-length decoding
uint32_t num_valid_items = decoded_size - decoded_window_offset;
run_length_decode.RunLengthDecode(decoded_items, relative_offsets, decoded_window_offset);
BlockStoreDecodedItemT(temp_storage.decode.store_decoded_runs_storage)
.Store(d_block_decoded_out + decoded_window_offset, decoded_items, num_valid_items);
if (TEST_RELATIVE_OFFSETS)
{
BlockStoreRelativeOffsetT(temp_storage.decode.store_relative_offsets)
.Store(d_block_rel_out + decoded_window_offset, relative_offsets, num_valid_items);
}
decoded_window_offset += DECODED_ITEMS_PER_THREAD * BLOCK_THREADS;
}
return decoded_size;
}
};
/******************************************************************************
* [STAGE 1] RUN-LENGTH DECODING TEST KERNEL
******************************************************************************/
template <typename AgentTestBlockRunLengthDecode,
typename ItemItT,
typename RunLengthsItT,
typename OffsetT,
typename DecodedSizesOutT>
__launch_bounds__(AgentTestBlockRunLengthDecode::BLOCK_THREADS) __global__
void BlockRunLengthDecodeGetSizeKernel(const ItemItT d_unique_items,
const RunLengthsItT d_run_lengths,
const OffsetT num_runs,
DecodedSizesOutT d_decoded_sizes)
{
constexpr OffsetT RUNS_PER_BLOCK = AgentTestBlockRunLengthDecode::RUNS_PER_BLOCK;
__shared__ typename AgentTestBlockRunLengthDecode::TempStorage temp_storage;
OffsetT block_offset = blockIdx.x * RUNS_PER_BLOCK;
OffsetT num_valid_runs = (block_offset + RUNS_PER_BLOCK >= num_runs) ? (num_runs - block_offset) : RUNS_PER_BLOCK;
AgentTestBlockRunLengthDecode run_length_decode_agent(temp_storage);
uint64_t num_decoded_items =
run_length_decode_agent.GetDecodedSize(d_unique_items + block_offset, d_run_lengths + block_offset, num_valid_runs);
d_decoded_sizes[blockIdx.x] = num_decoded_items;
}
/******************************************************************************
* [STAGE 2] RUN-LENGTH DECODING TEST KERNEL
******************************************************************************/
template <typename AgentTestBlockRunLengthDecode,
typename ItemItT,
typename RunLengthsItT,
typename DecodedSizesOutT,
typename OffsetT,
typename DecodedItemsOutItT,
typename RelativeOffsetOutItT>
__launch_bounds__(AgentTestBlockRunLengthDecode::BLOCK_THREADS) __global__
void BlockRunLengthDecodeTestKernel(const ItemItT d_unique_items,
const RunLengthsItT d_run_lengths,
const DecodedSizesOutT d_decoded_offsets,
const OffsetT num_runs,
DecodedItemsOutItT d_decoded_items,
RelativeOffsetOutItT d_relative_offsets)
{
constexpr OffsetT RUNS_PER_BLOCK = AgentTestBlockRunLengthDecode::RUNS_PER_BLOCK;
__shared__ typename AgentTestBlockRunLengthDecode::TempStorage temp_storage;
OffsetT block_offset = blockIdx.x * RUNS_PER_BLOCK;
OffsetT num_valid_runs = (block_offset + RUNS_PER_BLOCK >= num_runs) ? (num_runs - block_offset) : RUNS_PER_BLOCK;
AgentTestBlockRunLengthDecode run_length_decode_agent(temp_storage);
run_length_decode_agent.WriteDecodedRuns(d_unique_items + block_offset,
d_run_lengths + block_offset,
d_decoded_items + d_decoded_offsets[blockIdx.x],
d_relative_offsets + d_decoded_offsets[blockIdx.x],
num_valid_runs);
}
struct ModOp
{
using T = uint32_t;
__host__ __device__ __forceinline__ T operator()(const T &x) const { return 1 + (x % 100); }
};
template <uint32_t RUNS_PER_THREAD,
uint32_t DECODED_ITEMS_PER_THREAD,
uint32_t BLOCK_DIM_X,
uint32_t BLOCK_DIM_Y,
uint32_t BLOCK_DIM_Z,
bool TEST_RUN_OFFSETS,
bool TEST_RELATIVE_OFFSETS>
void TestAlgorithmSpecialisation()
{
constexpr uint32_t THREADS_PER_BLOCK = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
constexpr uint32_t RUNS_PER_BLOCK = RUNS_PER_THREAD * THREADS_PER_BLOCK;
using RunItemT = float;
using RunLengthT = uint32_t;
using ItemItT = cub::CountingInputIterator<RunItemT>;
using RunLengthsItT = cub::TransformInputIterator<RunLengthT, ModOp, cub::CountingInputIterator<RunLengthT>>;
ItemItT d_unique_items(1000U);
RunLengthsItT d_run_lengths(cub::CountingInputIterator<RunLengthT>(0), ModOp{});
constexpr uint32_t num_runs = 10000;
constexpr uint32_t num_blocks = (num_runs + (RUNS_PER_BLOCK - 1U)) / RUNS_PER_BLOCK;
size_t temp_storage_bytes = 0ULL;
void *temp_storage = nullptr;
uint32_t *h_num_decoded_total = nullptr;
uint32_t *d_decoded_sizes = nullptr;
uint32_t *d_decoded_offsets = nullptr;
RunItemT *d_decoded_out = nullptr;
RunLengthT *d_relative_offsets = nullptr;
RunItemT *h_decoded_out = nullptr;
RunLengthT *h_relative_offsets = nullptr;
using AgentTestBlockRunLengthDecodeT = AgentTestBlockRunLengthDecode<ItemItT,
RunLengthsItT,
RUNS_PER_THREAD,
DECODED_ITEMS_PER_THREAD,
TEST_RELATIVE_OFFSETS,
TEST_RUN_OFFSETS,
THREADS_PER_BLOCK,
1,
1>;
enum : uint32_t
{
TIMER_SIZE_BEGIN = 0,
TIMER_SIZE_END,
TIMER_DECODE_BEGIN,
TIMER_DECODE_END,
NUM_TIMERS,
};
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaEvent_t cuda_evt_timers[NUM_TIMERS];
for (uint32_t i = 0; i < NUM_TIMERS; i++)
{
cudaEventCreate(&cuda_evt_timers[i]);
}
// Get temporary storage requirements for the scan (for computing offsets for the per-block run-length decoded items)
cub::DeviceScan::InclusiveSum(nullptr, temp_storage_bytes, d_decoded_sizes, d_decoded_offsets, num_blocks, stream);
// Allocate device memory
CubDebugExit(cudaMalloc(&temp_storage, temp_storage_bytes));
CubDebugExit(cudaMalloc(&d_decoded_sizes, num_blocks * sizeof(*d_decoded_sizes)));
// Allocate for the exclusive sum PLUS the overall aggregate
CubDebugExit(cudaMalloc(&d_decoded_offsets, (num_blocks + 1) * sizeof(*d_decoded_offsets)));
CubDebugExit(cudaMallocHost(&h_num_decoded_total, sizeof(*h_num_decoded_total)));
// Get the per-block number of items being decoded (i-th thread block writing size to d_decoded_sizes[i])
CubDebugExit(cudaEventRecord(cuda_evt_timers[TIMER_SIZE_BEGIN], stream));
BlockRunLengthDecodeGetSizeKernel<AgentTestBlockRunLengthDecodeT>
<<<num_blocks, THREADS_PER_BLOCK, 0U, stream>>>(d_unique_items, d_run_lengths, num_runs, d_decoded_sizes);
CubDebugExit(cudaEventRecord(cuda_evt_timers[TIMER_SIZE_END], stream));
// Compute offsets for the runs decoded by each block (exclusive sum + aggregate)
CubDebugExit(cudaMemsetAsync(d_decoded_offsets, 0, sizeof(d_decoded_offsets[0]), stream));
CubDebugExit(cub::DeviceScan::InclusiveSum(temp_storage,
temp_storage_bytes,
d_decoded_sizes,
&d_decoded_offsets[1],
num_blocks,
stream));
// Copy the total decoded size to CPU in order to allocate just the right amount of device memory
CubDebugExit(cudaMemcpyAsync(h_num_decoded_total,
&d_decoded_offsets[num_blocks],
sizeof(*h_num_decoded_total),
cudaMemcpyDeviceToHost,
stream));
// Ensure the total decoded size has been copied from GPU to CPU
CubDebugExit(cudaStreamSynchronize(stream));
// Allocate device memory for the run-length decoded output
CubDebugExit(cudaMallocHost(&h_decoded_out, (*h_num_decoded_total) * sizeof(RunItemT)));
CubDebugExit(cudaMalloc(&d_decoded_out, (*h_num_decoded_total) * sizeof(RunItemT)));
if (TEST_RELATIVE_OFFSETS)
{
CubDebugExit(cudaMalloc(&d_relative_offsets, (*h_num_decoded_total) * sizeof(RunLengthT)));
CubDebugExit(cudaMallocHost(&h_relative_offsets, (*h_num_decoded_total) * sizeof(RunLengthT)));
}
// Perform the block-wise run-length decoding (each block taking its offset from d_decoded_offsets)
CubDebugExit(cudaEventRecord(cuda_evt_timers[TIMER_DECODE_BEGIN], stream));
BlockRunLengthDecodeTestKernel<AgentTestBlockRunLengthDecodeT>
<<<num_blocks, THREADS_PER_BLOCK, 0U, stream>>>(d_unique_items,
d_run_lengths,
d_decoded_offsets,
num_runs,
d_decoded_out,
d_relative_offsets);
CubDebugExit(cudaEventRecord(cuda_evt_timers[TIMER_DECODE_END], stream));
// Copy back results for verification
CubDebugExit(cudaMemcpyAsync(h_decoded_out,
d_decoded_out,
(*h_num_decoded_total) * sizeof(*h_decoded_out),
cudaMemcpyDeviceToHost,
stream));
if (TEST_RELATIVE_OFFSETS)
{
// Copy back the relative offsets
CubDebugExit(cudaMemcpyAsync(h_relative_offsets,
d_relative_offsets,
(*h_num_decoded_total) * sizeof(*h_relative_offsets),
cudaMemcpyDeviceToHost,
stream));
}
// Generate host-side run-length decoded data for verification
std::vector<std::pair<RunItemT, RunLengthT>> host_golden;
host_golden.reserve(*h_num_decoded_total);
for (uint32_t run = 0; run < num_runs; run++)
{
for (RunLengthT i = 0; i < d_run_lengths[run]; i++)
{
host_golden.push_back({d_unique_items[run], i});
}
}
// Ensure the run-length decoded result has been copied to the host
CubDebugExit(cudaStreamSynchronize(stream));
// Verify the total run-length decoded size is correct
AssertEquals(host_golden.size(), h_num_decoded_total[0]);
float duration_size = 0.0f;
float duration_decode = 0.0f;
cudaEventElapsedTime(&duration_size, cuda_evt_timers[TIMER_SIZE_BEGIN], cuda_evt_timers[TIMER_SIZE_END]);
cudaEventElapsedTime(&duration_decode, cuda_evt_timers[TIMER_DECODE_BEGIN], cuda_evt_timers[TIMER_DECODE_END]);
size_t decoded_bytes = host_golden.size() * sizeof(RunItemT);
size_t relative_offsets_bytes = TEST_RELATIVE_OFFSETS ? host_golden.size() * sizeof(RunLengthT) : 0ULL;
size_t total_bytes_written = decoded_bytes + relative_offsets_bytes;
std::cout << "MODE: " << (TEST_RELATIVE_OFFSETS ? "offsets, " : "normal, ") //
<< "INIT: " << (TEST_RUN_OFFSETS ? "run offsets, " : "run lengths, ") //
<< "RUNS_PER_THREAD: " << RUNS_PER_THREAD //
<< ", DECODED_ITEMS_PER_THREAD: " << DECODED_ITEMS_PER_THREAD //
<< ", THREADS_PER_BLOCK: " << THREADS_PER_BLOCK //
<< ", decoded size (bytes): " << decoded_bytes //
<< ", relative offsets (bytes): " << relative_offsets_bytes //
<< ", time_size (ms): " << duration_size //
<< ", time_decode (ms): " << duration_decode //
<< ", achieved decode BW (GB/s): "
<< ((static_cast<double>(total_bytes_written) / 1.0e9) * (1000.0 / duration_decode)) << "\n";
// Verify the run-length decoded data is correct
bool cmp_eq = true;
for (uint32_t i = 0; i < host_golden.size(); i++)
{
if (host_golden[i].first != h_decoded_out[i])
{
std::cout << "Mismatch at #" << i << ": CPU item: " << host_golden[i].first << ", GPU: " << h_decoded_out[i]
<< "\n";
cmp_eq = false;
}
if (TEST_RELATIVE_OFFSETS)
{
if (host_golden[i].second != h_relative_offsets[i])
{
std::cout << "Mismatch of relative offset at #" << i << ": CPU item: " << host_golden[i].first
<< ", GPU: " << h_decoded_out[i] << "; relative offsets: CPU: " << host_golden[i].second
<< ", GPU: " << h_relative_offsets[i] << "\n";
cmp_eq = false;
break;
}
}
}
AssertEquals(cmp_eq, true);
// Clean up memory allocations
CubDebugExit(cudaFree(temp_storage));
CubDebugExit(cudaFree(d_decoded_sizes));
CubDebugExit(cudaFree(d_decoded_offsets));
CubDebugExit(cudaFree(d_decoded_out));
CubDebugExit(cudaFreeHost(h_num_decoded_total));
CubDebugExit(cudaFreeHost(h_decoded_out));
if (TEST_RELATIVE_OFFSETS)
{
CubDebugExit(cudaFree(d_relative_offsets));
CubDebugExit(cudaFreeHost(h_relative_offsets));
}
// Clean up events
for (uint32_t i = 0; i < NUM_TIMERS; i++)
{
CubDebugExit(cudaEventDestroy(cuda_evt_timers[i]));
}
// Clean up streams
CubDebugExit(cudaStreamDestroy(stream));
}
template <uint32_t RUNS_PER_THREAD,
uint32_t DECODED_ITEMS_PER_THREAD,
uint32_t BLOCK_DIM_X,
uint32_t BLOCK_DIM_Y = 1U,
uint32_t BLOCK_DIM_Z = 1U>
void TestForTuningParameters()
{
constexpr bool DO_TEST_RELATIVE_OFFSETS = true;
constexpr bool DO_NOT_TEST_RELATIVE_OFFSETS = false;
constexpr bool TEST_WITH_RUN_OFFSETS = true;
constexpr bool TEST_WITH_RUN_LENGTHS = false;
// Run BlockRunLengthDecode that uses run lengths and generates offsets relative to each run
TestAlgorithmSpecialisation<RUNS_PER_THREAD,
DECODED_ITEMS_PER_THREAD,
BLOCK_DIM_X,
BLOCK_DIM_Y,
BLOCK_DIM_Z,
TEST_WITH_RUN_LENGTHS,
DO_TEST_RELATIVE_OFFSETS>();
// Run BlockRunLengthDecode that uses run lengths and performs normal run-length decoding
TestAlgorithmSpecialisation<RUNS_PER_THREAD,
DECODED_ITEMS_PER_THREAD,
BLOCK_DIM_X,
BLOCK_DIM_Y,
BLOCK_DIM_Z,
TEST_WITH_RUN_LENGTHS,
DO_NOT_TEST_RELATIVE_OFFSETS>();
// Run BlockRunLengthDecode that uses run offsets and generates offsets relative to each run
TestAlgorithmSpecialisation<RUNS_PER_THREAD,
DECODED_ITEMS_PER_THREAD,
BLOCK_DIM_X,
BLOCK_DIM_Y,
BLOCK_DIM_Z,
TEST_WITH_RUN_OFFSETS,
DO_TEST_RELATIVE_OFFSETS>();
// Run BlockRunLengthDecode that uses run offsets and performs normal run-length decoding
TestAlgorithmSpecialisation<RUNS_PER_THREAD,
DECODED_ITEMS_PER_THREAD,
BLOCK_DIM_X,
BLOCK_DIM_Y,
BLOCK_DIM_Z,
TEST_WITH_RUN_OFFSETS,
DO_NOT_TEST_RELATIVE_OFFSETS>();
}
int main(int argc, char **argv)
{
CommandLineArgs args(argc, argv);
// Initialize device
CubDebugExit(args.DeviceInit());
// Instantiate test template instances for various configurations (tuning parameter dimensions)
// <RUNS_PER_THREAD, DECODED_ITEMS_PER_THREAD, BLOCK_DIM_X[, BLOCK_DIM_Y, BLOCK_DIM_Z]>
TestForTuningParameters<1U, 1U, 64U>();
TestForTuningParameters<1U, 3U, 32U, 2U, 3U>();
TestForTuningParameters<1U, 1U, 128U>();
TestForTuningParameters<1U, 8U, 128U>();
TestForTuningParameters<2U, 8U, 128U>();
TestForTuningParameters<3U, 1U, 256U>();
TestForTuningParameters<1U, 8U, 256U>();
TestForTuningParameters<8U, 1U, 256U>();
TestForTuningParameters<1U, 1U, 256U>();
TestForTuningParameters<2U, 2U, 384U>();
return 0;
} | the_stack |
#include <cstdint>
#include <cstdio>
#include <sstream>
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/io/class_io/pointcloud_io.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/filesystem.h"
#include "cupoch/utility/helper.h"
// References for PCD file IO
// http://pointclouds.org/documentation/tutorials/pcd_file_format.php
// https://github.com/PointCloudLibrary/pcl/blob/master/io/src/pcd_io.cpp
// https://www.mathworks.com/matlabcentral/fileexchange/40382-matlab-to-point-cloud-library
namespace cupoch {
namespace {
using namespace io;
enum PCDDataType {
PCD_DATA_ASCII = 0,
PCD_DATA_BINARY = 1,
PCD_DATA_BINARY_COMPRESSED = 2
};
struct PCLPointField {
public:
std::string name;
int size;
char type;
int count;
// helper variable
int count_offset;
int offset;
};
struct PCDHeader {
public:
std::string version;
std::vector<PCLPointField> fields;
int width;
int height;
int points;
PCDDataType datatype;
std::string viewpoint;
// helper variables
int elementnum;
int pointsize;
bool has_points;
bool has_normals;
bool has_colors;
};
bool CheckHeader(PCDHeader &header) {
if (header.points <= 0 || header.pointsize <= 0) {
utility::LogWarning("[CheckHeader] PCD has no data.\n");
return false;
}
if (header.fields.size() == 0 || header.pointsize <= 0) {
utility::LogWarning("[CheckHeader] PCD has no fields.\n");
return false;
}
header.has_points = false;
header.has_normals = false;
header.has_colors = false;
bool has_x = false;
bool has_y = false;
bool has_z = false;
bool has_normal_x = false;
bool has_normal_y = false;
bool has_normal_z = false;
bool has_rgb = false;
bool has_rgba = false;
for (const auto &field : header.fields) {
if (field.name == "x") {
has_x = true;
} else if (field.name == "y") {
has_y = true;
} else if (field.name == "z") {
has_z = true;
} else if (field.name == "normal_x") {
has_normal_x = true;
} else if (field.name == "normal_y") {
has_normal_y = true;
} else if (field.name == "normal_z") {
has_normal_z = true;
} else if (field.name == "rgb") {
has_rgb = true;
} else if (field.name == "rgba") {
has_rgba = true;
}
}
header.has_points = (has_x && has_y && has_z);
header.has_normals = (has_normal_x && has_normal_y && has_normal_z);
header.has_colors = (has_rgb || has_rgba);
if (header.has_points == false) {
utility::LogWarning(
"[CheckHeader] Fields for point data are not complete.\n");
return false;
}
return true;
}
bool ReadPCDHeader(FILE *file, PCDHeader &header) {
char line_buffer[DEFAULT_IO_BUFFER_SIZE];
size_t specified_channel_count = 0;
while (fgets(line_buffer, DEFAULT_IO_BUFFER_SIZE, file)) {
std::string line(line_buffer);
if (line == "") {
continue;
}
std::vector<std::string> st;
utility::SplitString(st, line, "\t\r\n ");
std::stringstream sstream(line);
sstream.imbue(std::locale::classic());
std::string line_type;
sstream >> line_type;
if (line_type.substr(0, 1) == "#") {
} else if (line_type.substr(0, 7) == "VERSION") {
if (st.size() >= 2) {
header.version = st[1];
}
} else if (line_type.substr(0, 6) == "FIELDS" ||
line_type.substr(0, 7) == "COLUMNS") {
specified_channel_count = st.size() - 1;
if (specified_channel_count == 0) {
utility::LogWarning("[ReadPCDHeader] Bad PCD file format.\n");
return false;
}
header.fields.resize(specified_channel_count);
int count_offset = 0, offset = 0;
for (size_t i = 0; i < specified_channel_count;
i++, count_offset += 1, offset += 4) {
header.fields[i].name = st[i + 1];
header.fields[i].size = 4;
header.fields[i].type = 'F';
header.fields[i].count = 1;
header.fields[i].count_offset = count_offset;
header.fields[i].offset = offset;
}
header.elementnum = count_offset;
header.pointsize = offset;
} else if (line_type.substr(0, 4) == "SIZE") {
if (specified_channel_count != st.size() - 1) {
utility::LogWarning("[ReadPCDHeader] Bad PCD file format.\n");
return false;
}
int offset = 0, col_type = 0;
for (size_t i = 0; i < specified_channel_count;
i++, offset += col_type) {
sstream >> col_type;
header.fields[i].size = col_type;
header.fields[i].offset = offset;
}
header.pointsize = offset;
} else if (line_type.substr(0, 4) == "TYPE") {
if (specified_channel_count != st.size() - 1) {
utility::LogWarning("[ReadPCDHeader] Bad PCD file format.\n");
return false;
}
for (size_t i = 0; i < specified_channel_count; i++) {
header.fields[i].type = st[i + 1].c_str()[0];
}
} else if (line_type.substr(0, 5) == "COUNT") {
if (specified_channel_count != st.size() - 1) {
utility::LogWarning("[ReadPCDHeader] Bad PCD file format.\n");
return false;
}
int count_offset = 0, offset = 0, col_count = 0;
for (size_t i = 0; i < specified_channel_count; i++) {
sstream >> col_count;
header.fields[i].count = col_count;
header.fields[i].count_offset = count_offset;
header.fields[i].offset = offset;
count_offset += col_count;
offset += col_count * header.fields[i].size;
}
header.elementnum = count_offset;
header.pointsize = offset;
} else if (line_type.substr(0, 5) == "WIDTH") {
sstream >> header.width;
} else if (line_type.substr(0, 6) == "HEIGHT") {
sstream >> header.height;
header.points = header.width * header.height;
} else if (line_type.substr(0, 9) == "VIEWPOINT") {
if (st.size() >= 2) {
header.viewpoint = st[1];
}
} else if (line_type.substr(0, 6) == "POINTS") {
sstream >> header.points;
} else if (line_type.substr(0, 4) == "DATA") {
header.datatype = PCD_DATA_ASCII;
if (st.size() >= 2) {
if (st[1].substr(0, 17) == "binary_compressed") {
header.datatype = PCD_DATA_BINARY_COMPRESSED;
} else if (st[1].substr(0, 6) == "binary") {
header.datatype = PCD_DATA_BINARY;
}
}
break;
}
}
if (CheckHeader(header) == false) {
return false;
}
return true;
}
float UnpackBinaryPCDElement(const char *data_ptr,
const char type,
const int size) {
if (type == 'I') {
if (size == 1) {
std::int8_t data;
memcpy(&data, data_ptr, sizeof(data));
return (float)data;
} else if (size == 2) {
std::int16_t data;
memcpy(&data, data_ptr, sizeof(data));
return (float)data;
} else if (size == 4) {
std::int32_t data;
memcpy(&data, data_ptr, sizeof(data));
return (float)data;
} else {
return 0.0;
}
} else if (type == 'U') {
if (size == 1) {
std::uint8_t data;
memcpy(&data, data_ptr, sizeof(data));
return (float)data;
} else if (size == 2) {
std::uint16_t data;
memcpy(&data, data_ptr, sizeof(data));
return (float)data;
} else if (size == 4) {
std::uint32_t data;
memcpy(&data, data_ptr, sizeof(data));
return (float)data;
} else {
return 0.0;
}
} else if (type == 'F') {
if (size == 4) {
std::float_t data;
memcpy(&data, data_ptr, sizeof(data));
return (float)data;
} else {
return 0.0;
}
}
return 0.0;
}
Eigen::Vector3f UnpackBinaryPCDColor(const char *data_ptr,
const char type,
const int size) {
if (size == 4) {
std::uint8_t data[4];
memcpy(data, data_ptr, 4);
// color data is packed in BGR order.
return Eigen::Vector3f((float)data[2] / 255.0, (float)data[1] / 255.0,
(float)data[0] / 255.0);
} else {
return Eigen::Vector3f::Zero();
}
}
float UnpackASCIIPCDElement(const char *data_ptr,
const char type,
const int size) {
char *end;
if (type == 'I') {
return (float)std::strtol(data_ptr, &end, 0);
} else if (type == 'U') {
return (float)std::strtoul(data_ptr, &end, 0);
} else if (type == 'F') {
return std::strtod(data_ptr, &end);
}
return 0.0;
}
Eigen::Vector3f UnpackASCIIPCDColor(const char *data_ptr,
const char type,
const int size) {
if (size == 4) {
std::uint8_t data[4] = {0, 0, 0, 0};
char *end;
if (type == 'I') {
std::int32_t value = std::strtol(data_ptr, &end, 0);
memcpy(data, &value, 4);
} else if (type == 'U') {
std::uint32_t value = std::strtoul(data_ptr, &end, 0);
memcpy(data, &value, 4);
} else if (type == 'F') {
std::float_t value = std::strtof(data_ptr, &end);
memcpy(data, &value, 4);
}
return Eigen::Vector3f((float)data[2] / 255.0, (float)data[1] / 255.0,
(float)data[0] / 255.0);
} else {
return Eigen::Vector3f::Zero();
}
}
bool ReadPCDData(FILE *file,
const PCDHeader &header,
geometry::PointCloud &pointcloud) {
HostPointCloud host_pc;
// The header should have been checked
if (header.has_points) {
host_pc.points_.resize(header.points);
} else {
utility::LogWarning(
"[ReadPCDData] Fields for point data are not complete.\n");
return false;
}
if (header.has_normals) {
host_pc.normals_.resize(header.points);
}
if (header.has_colors) {
host_pc.colors_.resize(header.points);
}
if (header.datatype == PCD_DATA_ASCII) {
char line_buffer[DEFAULT_IO_BUFFER_SIZE];
int idx = 0;
while (fgets(line_buffer, DEFAULT_IO_BUFFER_SIZE, file) &&
idx < header.points) {
std::string line(line_buffer);
std::vector<std::string> strs;
utility::SplitString(strs, line, "\t\r\n ");
if ((int)strs.size() < header.elementnum) {
continue;
}
for (size_t i = 0; i < header.fields.size(); i++) {
const auto &field = header.fields[i];
if (field.name == "x") {
host_pc.points_[idx](0) = UnpackASCIIPCDElement(
strs[field.count_offset].c_str(), field.type,
field.size);
} else if (field.name == "y") {
host_pc.points_[idx](1) = UnpackASCIIPCDElement(
strs[field.count_offset].c_str(), field.type,
field.size);
} else if (field.name == "z") {
host_pc.points_[idx](2) = UnpackASCIIPCDElement(
strs[field.count_offset].c_str(), field.type,
field.size);
} else if (field.name == "normal_x") {
host_pc.normals_[idx](0) = UnpackASCIIPCDElement(
strs[field.count_offset].c_str(), field.type,
field.size);
} else if (field.name == "normal_y") {
host_pc.normals_[idx](1) = UnpackASCIIPCDElement(
strs[field.count_offset].c_str(), field.type,
field.size);
} else if (field.name == "normal_z") {
host_pc.normals_[idx](2) = UnpackASCIIPCDElement(
strs[field.count_offset].c_str(), field.type,
field.size);
} else if (field.name == "rgb" || field.name == "rgba") {
host_pc.colors_[idx] = UnpackASCIIPCDColor(
strs[field.count_offset].c_str(), field.type,
field.size);
}
}
idx++;
}
} else if (header.datatype == PCD_DATA_BINARY) {
std::unique_ptr<char[]> buffer(new char[header.pointsize]);
for (int i = 0; i < header.points; i++) {
if (fread(buffer.get(), header.pointsize, 1, file) != 1) {
utility::LogWarning(
"[ReadPCDData] Failed to read data record.\n");
pointcloud.Clear();
return false;
}
for (const auto &field : header.fields) {
if (field.name == "x") {
host_pc.points_[i](0) =
UnpackBinaryPCDElement(buffer.get() + field.offset,
field.type, field.size);
} else if (field.name == "y") {
host_pc.points_[i](1) =
UnpackBinaryPCDElement(buffer.get() + field.offset,
field.type, field.size);
} else if (field.name == "z") {
host_pc.points_[i](2) =
UnpackBinaryPCDElement(buffer.get() + field.offset,
field.type, field.size);
} else if (field.name == "normal_x") {
host_pc.normals_[i](0) =
UnpackBinaryPCDElement(buffer.get() + field.offset,
field.type, field.size);
} else if (field.name == "normal_y") {
host_pc.normals_[i](1) =
UnpackBinaryPCDElement(buffer.get() + field.offset,
field.type, field.size);
} else if (field.name == "normal_z") {
host_pc.normals_[i](2) =
UnpackBinaryPCDElement(buffer.get() + field.offset,
field.type, field.size);
} else if (field.name == "rgb" || field.name == "rgba") {
host_pc.colors_[i] =
UnpackBinaryPCDColor(buffer.get() + field.offset,
field.type, field.size);
}
}
}
} else if (header.datatype == PCD_DATA_BINARY_COMPRESSED) {
std::uint32_t compressed_size;
std::uint32_t uncompressed_size;
if (fread(&compressed_size, sizeof(compressed_size), 1, file) != 1) {
utility::LogWarning("[ReadPCDData] Failed to read data record.\n");
pointcloud.Clear();
return false;
}
if (fread(&uncompressed_size, sizeof(uncompressed_size), 1, file) !=
1) {
utility::LogWarning("[ReadPCDData] Failed to read data record.\n");
pointcloud.Clear();
return false;
}
utility::LogWarning(
"PCD data with {:d} compressed size, and {:d} uncompressed "
"size.\n",
compressed_size, uncompressed_size);
std::unique_ptr<char[]> buffer_compressed(new char[compressed_size]);
if (fread(buffer_compressed.get(), 1, compressed_size, file) !=
compressed_size) {
utility::LogWarning("[ReadPCDData] Failed to read data record.\n");
pointcloud.Clear();
return false;
}
std::unique_ptr<char[]> buffer(new char[uncompressed_size]);
if (lzf_decompress(buffer_compressed.get(),
(unsigned int)compressed_size, buffer.get(),
(unsigned int)uncompressed_size) !=
uncompressed_size) {
utility::LogWarning("[ReadPCDData] Uncompression failed.\n");
pointcloud.Clear();
return false;
}
for (const auto &field : header.fields) {
const char *base_ptr = buffer.get() + field.offset * header.points;
if (field.name == "x") {
for (int i = 0; i < header.points; i++) {
host_pc.points_[i](0) = UnpackBinaryPCDElement(
base_ptr + i * field.size * field.count, field.type,
field.size);
}
} else if (field.name == "y") {
for (int i = 0; i < header.points; i++) {
host_pc.points_[i](1) = UnpackBinaryPCDElement(
base_ptr + i * field.size * field.count, field.type,
field.size);
}
} else if (field.name == "z") {
for (int i = 0; i < header.points; i++) {
host_pc.points_[i](2) = UnpackBinaryPCDElement(
base_ptr + i * field.size * field.count, field.type,
field.size);
}
} else if (field.name == "normal_x") {
for (int i = 0; i < header.points; i++) {
host_pc.normals_[i](0) = UnpackBinaryPCDElement(
base_ptr + i * field.size * field.count, field.type,
field.size);
}
} else if (field.name == "normal_y") {
for (int i = 0; i < header.points; i++) {
host_pc.normals_[i](1) = UnpackBinaryPCDElement(
base_ptr + i * field.size * field.count, field.type,
field.size);
}
} else if (field.name == "normal_z") {
for (int i = 0; i < header.points; i++) {
host_pc.normals_[i](2) = UnpackBinaryPCDElement(
base_ptr + i * field.size * field.count, field.type,
field.size);
}
} else if (field.name == "rgb" || field.name == "rgba") {
for (int i = 0; i < header.points; i++) {
host_pc.colors_[i] = UnpackBinaryPCDColor(
base_ptr + i * field.size * field.count, field.type,
field.size);
}
}
}
}
host_pc.ToDevice(pointcloud);
return true;
}
bool GenerateHeader(const geometry::PointCloud &pointcloud,
const bool write_ascii,
const bool compressed,
PCDHeader &header) {
if (pointcloud.HasPoints() == false) {
return false;
}
header.version = "0.7";
header.width = (int)pointcloud.points_.size();
header.height = 1;
header.points = header.width;
header.fields.clear();
PCLPointField field;
field.type = 'F';
field.size = 4;
field.count = 1;
field.name = "x";
header.fields.push_back(field);
field.name = "y";
header.fields.push_back(field);
field.name = "z";
header.fields.push_back(field);
header.elementnum = 3;
header.pointsize = 12;
if (pointcloud.HasNormals()) {
field.name = "normal_x";
header.fields.push_back(field);
field.name = "normal_y";
header.fields.push_back(field);
field.name = "normal_z";
header.fields.push_back(field);
header.elementnum += 3;
header.pointsize += 12;
}
if (pointcloud.HasColors()) {
field.name = "rgb";
header.fields.push_back(field);
header.elementnum++;
header.pointsize += 4;
}
if (write_ascii) {
header.datatype = PCD_DATA_ASCII;
} else {
if (compressed) {
header.datatype = PCD_DATA_BINARY_COMPRESSED;
} else {
header.datatype = PCD_DATA_BINARY;
}
}
return true;
}
bool WritePCDHeader(FILE *file, const PCDHeader &header) {
fprintf(file, "# .PCD v%s - Point Cloud Data file format\n",
header.version.c_str());
fprintf(file, "VERSION %s\n", header.version.c_str());
fprintf(file, "FIELDS");
for (const auto &field : header.fields) {
fprintf(file, " %s", field.name.c_str());
}
fprintf(file, "\n");
fprintf(file, "SIZE");
for (const auto &field : header.fields) {
fprintf(file, " %d", field.size);
}
fprintf(file, "\n");
fprintf(file, "TYPE");
for (const auto &field : header.fields) {
fprintf(file, " %c", field.type);
}
fprintf(file, "\n");
fprintf(file, "COUNT");
for (const auto &field : header.fields) {
fprintf(file, " %d", field.count);
}
fprintf(file, "\n");
fprintf(file, "WIDTH %d\n", header.width);
fprintf(file, "HEIGHT %d\n", header.height);
fprintf(file, "VIEWPOINT 0 0 0 1 0 0 0\n");
fprintf(file, "POINTS %d\n", header.points);
switch (header.datatype) {
case PCD_DATA_BINARY:
fprintf(file, "DATA binary\n");
break;
case PCD_DATA_BINARY_COMPRESSED:
fprintf(file, "DATA binary_compressed\n");
break;
case PCD_DATA_ASCII:
default:
fprintf(file, "DATA ascii\n");
break;
}
return true;
}
float ConvertRGBToFloat(const Eigen::Vector3f &color) {
std::uint8_t rgba[4] = {0, 0, 0, 0};
rgba[2] = (std::uint8_t)std::max(std::min((int)(color(0) * 255.0), 255), 0);
rgba[1] = (std::uint8_t)std::max(std::min((int)(color(1) * 255.0), 255), 0);
rgba[0] = (std::uint8_t)std::max(std::min((int)(color(2) * 255.0), 255), 0);
float value;
memcpy(&value, rgba, 4);
return value;
}
bool WritePCDData(FILE *file,
const PCDHeader &header,
const geometry::PointCloud &pointcloud) {
bool has_normal = pointcloud.HasNormals();
bool has_color = pointcloud.HasColors();
HostPointCloud host_pc;
host_pc.FromDevice(pointcloud);
if (header.datatype == PCD_DATA_ASCII) {
for (size_t i = 0; i < host_pc.points_.size(); i++) {
const auto &point = host_pc.points_[i];
fprintf(file, "%.10g %.10g %.10g", point(0), point(1), point(2));
if (has_normal) {
const auto &normal = host_pc.normals_[i];
fprintf(file, " %.10g %.10g %.10g", normal(0), normal(1),
normal(2));
}
if (has_color) {
const auto &color = host_pc.colors_[i];
fprintf(file, " %.10g", ConvertRGBToFloat(color));
}
fprintf(file, "\n");
}
} else if (header.datatype == PCD_DATA_BINARY) {
std::unique_ptr<float[]> data(new float[header.elementnum]);
for (size_t i = 0; i < host_pc.points_.size(); i++) {
const auto &point = host_pc.points_[i];
data[0] = (float)point(0);
data[1] = (float)point(1);
data[2] = (float)point(2);
int idx = 3;
if (has_normal) {
const auto &normal = host_pc.normals_[i];
data[idx + 0] = (float)normal(0);
data[idx + 1] = (float)normal(1);
data[idx + 2] = (float)normal(2);
idx += 3;
}
if (has_color) {
const auto &color = host_pc.colors_[i];
data[idx] = ConvertRGBToFloat(color);
}
fwrite(data.get(), sizeof(float), header.elementnum, file);
}
} else if (header.datatype == PCD_DATA_BINARY_COMPRESSED) {
int strip_size = header.points;
std::uint32_t buffer_size =
(std::uint32_t)(header.elementnum * header.points);
std::unique_ptr<float[]> buffer(new float[buffer_size]);
std::unique_ptr<float[]> buffer_compressed(new float[buffer_size * 2]);
for (size_t i = 0; i < host_pc.points_.size(); i++) {
const auto &point = host_pc.points_[i];
buffer[0 * strip_size + i] = (float)point(0);
buffer[1 * strip_size + i] = (float)point(1);
buffer[2 * strip_size + i] = (float)point(2);
int idx = 3;
if (has_normal) {
const auto &normal = host_pc.normals_[i];
buffer[(idx + 0) * strip_size + i] = (float)normal(0);
buffer[(idx + 1) * strip_size + i] = (float)normal(1);
buffer[(idx + 2) * strip_size + i] = (float)normal(2);
idx += 3;
}
if (has_color) {
const auto &color = host_pc.colors_[i];
buffer[idx * strip_size + i] = ConvertRGBToFloat(color);
}
}
std::uint32_t buffer_size_in_bytes = buffer_size * sizeof(float);
std::uint32_t size_compressed =
lzf_compress(buffer.get(), buffer_size_in_bytes,
buffer_compressed.get(), buffer_size_in_bytes * 2);
if (size_compressed == 0) {
utility::LogWarning("[WritePCDData] Failed to compress data.\n");
return false;
}
utility::LogDebug(
"[WritePCDData] {:d} bytes data compressed into {:d} bytes.\n",
buffer_size_in_bytes, size_compressed);
fwrite(&size_compressed, sizeof(size_compressed), 1, file);
fwrite(&buffer_size_in_bytes, sizeof(buffer_size_in_bytes), 1, file);
fwrite(buffer_compressed.get(), 1, size_compressed, file);
}
return true;
}
} // unnamed namespace
namespace io {
bool ReadPointCloudFromPCD(const std::string &filename,
geometry::PointCloud &pointcloud,
bool print_progress) {
PCDHeader header;
FILE *file = fopen(filename.c_str(), "rb");
if (file == NULL) {
utility::LogWarning("Read PCD failed: unable to open file: {}\n",
filename);
return false;
}
if (ReadPCDHeader(file, header) == false) {
utility::LogWarning("Read PCD failed: unable to parse header.\n");
fclose(file);
return false;
}
utility::LogDebug(
"PCD header indicates {:d} fields, {:d} bytes per point, and {:d} "
"points "
"in total.\n",
(int)header.fields.size(), header.pointsize, header.points);
for (const auto &field : header.fields) {
utility::LogDebug("{}, {}, {:d}, {:d}, {:d}\n", field.name.c_str(),
field.type, field.size, field.count, field.offset);
}
utility::LogDebug("Compression method is {:d}.\n", (int)header.datatype);
utility::LogDebug("Points: {}; normals: {}; colors: {}\n",
header.has_points ? "yes" : "no",
header.has_normals ? "yes" : "no",
header.has_colors ? "yes" : "no");
if (ReadPCDData(file, header, pointcloud) == false) {
utility::LogWarning("Read PCD failed: unable to read data.\n");
fclose(file);
return false;
}
fclose(file);
return true;
}
bool WritePointCloudToPCD(const std::string &filename,
const geometry::PointCloud &pointcloud,
bool write_ascii /* = false*/,
bool compressed /* = false*/,
bool print_progress) {
PCDHeader header;
if (GenerateHeader(pointcloud, write_ascii, compressed, header) == false) {
utility::LogWarning("Write PCD failed: unable to generate header.\n");
return false;
}
FILE *file = fopen(filename.c_str(), "wb");
if (file == NULL) {
utility::LogWarning("Write PCD failed: unable to open file.\n");
return false;
}
if (WritePCDHeader(file, header) == false) {
utility::LogWarning("Write PCD failed: unable to write header.\n");
fclose(file);
return false;
}
if (WritePCDData(file, header, pointcloud) == false) {
utility::LogWarning("Write PCD failed: unable to write data.\n");
fclose(file);
return false;
}
fclose(file);
return true;
}
} // namespace io
} // namespace cupoch | the_stack |
#include "Float16.cuh"
//
// Templated wrappers to express math for different scalar and vector
// types, so kernels can have the same written form but can operate
// over half and float, and on vector types transparently
//
namespace faiss { namespace gpu {
template <typename T>
struct Math {
typedef T ScalarType;
static inline __device__ T add(T a, T b) {
return a + b;
}
static inline __device__ T sub(T a, T b) {
return a - b;
}
static inline __device__ T mul(T a, T b) {
return a * b;
}
static inline __device__ T neg(T v) {
return -v;
}
/// For a vector type, this is a horizontal add, returning sum(v_i)
static inline __device__ T reduceAdd(T v) {
return v;
}
static inline __device__ bool lt(T a, T b) {
return a < b;
}
static inline __device__ bool gt(T a, T b) {
return a > b;
}
static inline __device__ bool eq(T a, T b) {
return a == b;
}
static inline __device__ T zero() {
return (T) 0;
}
};
template <>
struct Math<float2> {
typedef float ScalarType;
static inline __device__ float2 add(float2 a, float2 b) {
float2 v;
v.x = a.x + b.x;
v.y = a.y + b.y;
return v;
}
static inline __device__ float2 sub(float2 a, float2 b) {
float2 v;
v.x = a.x - b.x;
v.y = a.y - b.y;
return v;
}
static inline __device__ float2 add(float2 a, float b) {
float2 v;
v.x = a.x + b;
v.y = a.y + b;
return v;
}
static inline __device__ float2 sub(float2 a, float b) {
float2 v;
v.x = a.x - b;
v.y = a.y - b;
return v;
}
static inline __device__ float2 mul(float2 a, float2 b) {
float2 v;
v.x = a.x * b.x;
v.y = a.y * b.y;
return v;
}
static inline __device__ float2 mul(float2 a, float b) {
float2 v;
v.x = a.x * b;
v.y = a.y * b;
return v;
}
static inline __device__ float2 neg(float2 v) {
v.x = -v.x;
v.y = -v.y;
return v;
}
/// For a vector type, this is a horizontal add, returning sum(v_i)
static inline __device__ float reduceAdd(float2 v) {
return v.x + v.y;
}
// not implemented for vector types
// static inline __device__ bool lt(float2 a, float2 b);
// static inline __device__ bool gt(float2 a, float2 b);
// static inline __device__ bool eq(float2 a, float2 b);
static inline __device__ float2 zero() {
float2 v;
v.x = 0.0f;
v.y = 0.0f;
return v;
}
};
template <>
struct Math<float4> {
typedef float ScalarType;
static inline __device__ float4 add(float4 a, float4 b) {
float4 v;
v.x = a.x + b.x;
v.y = a.y + b.y;
v.z = a.z + b.z;
v.w = a.w + b.w;
return v;
}
static inline __device__ float4 sub(float4 a, float4 b) {
float4 v;
v.x = a.x - b.x;
v.y = a.y - b.y;
v.z = a.z - b.z;
v.w = a.w - b.w;
return v;
}
static inline __device__ float4 add(float4 a, float b) {
float4 v;
v.x = a.x + b;
v.y = a.y + b;
v.z = a.z + b;
v.w = a.w + b;
return v;
}
static inline __device__ float4 sub(float4 a, float b) {
float4 v;
v.x = a.x - b;
v.y = a.y - b;
v.z = a.z - b;
v.w = a.w - b;
return v;
}
static inline __device__ float4 mul(float4 a, float4 b) {
float4 v;
v.x = a.x * b.x;
v.y = a.y * b.y;
v.z = a.z * b.z;
v.w = a.w * b.w;
return v;
}
static inline __device__ float4 mul(float4 a, float b) {
float4 v;
v.x = a.x * b;
v.y = a.y * b;
v.z = a.z * b;
v.w = a.w * b;
return v;
}
static inline __device__ float4 neg(float4 v) {
v.x = -v.x;
v.y = -v.y;
v.z = -v.z;
v.w = -v.w;
return v;
}
/// For a vector type, this is a horizontal add, returning sum(v_i)
static inline __device__ float reduceAdd(float4 v) {
return v.x + v.y + v.z + v.w;
}
// not implemented for vector types
// static inline __device__ bool lt(float4 a, float4 b);
// static inline __device__ bool gt(float4 a, float4 b);
// static inline __device__ bool eq(float4 a, float4 b);
static inline __device__ float4 zero() {
float4 v;
v.x = 0.0f;
v.y = 0.0f;
v.z = 0.0f;
v.w = 0.0f;
return v;
}
};
#ifdef FAISS_USE_FLOAT16
template <>
struct Math<half> {
typedef half ScalarType;
static inline __device__ half add(half a, half b) {
#ifdef FAISS_USE_FULL_FLOAT16
return __hadd(a, b);
#else
return __float2half(__half2float(a) + __half2float(b));
#endif
}
static inline __device__ half sub(half a, half b) {
#ifdef FAISS_USE_FULL_FLOAT16
return __hsub(a, b);
#else
return __float2half(__half2float(a) - __half2float(b));
#endif
}
static inline __device__ half mul(half a, half b) {
#ifdef FAISS_USE_FULL_FLOAT16
return __hmul(a, b);
#else
return __float2half(__half2float(a) * __half2float(b));
#endif
}
static inline __device__ half neg(half v) {
#ifdef FAISS_USE_FULL_FLOAT16
return __hneg(v);
#else
return __float2half(-__half2float(v));
#endif
}
static inline __device__ half reduceAdd(half v) {
return v;
}
static inline __device__ bool lt(half a, half b) {
#ifdef FAISS_USE_FULL_FLOAT16
return __hlt(a, b);
#else
return __half2float(a) < __half2float(b);
#endif
}
static inline __device__ bool gt(half a, half b) {
#ifdef FAISS_USE_FULL_FLOAT16
return __hgt(a, b);
#else
return __half2float(a) > __half2float(b);
#endif
}
static inline __device__ bool eq(half a, half b) {
#ifdef FAISS_USE_FULL_FLOAT16
return __heq(a, b);
#else
return __half2float(a) == __half2float(b);
#endif
}
static inline __device__ half zero() {
#if CUDA_VERSION >= 9000
return 0;
#else
half h;
h.x = 0;
return h;
#endif
}
};
template <>
struct Math<half2> {
typedef half ScalarType;
static inline __device__ half2 add(half2 a, half2 b) {
#ifdef FAISS_USE_FULL_FLOAT16
return __hadd2(a, b);
#else
float2 af = __half22float2(a);
float2 bf = __half22float2(b);
af.x += bf.x;
af.y += bf.y;
return __float22half2_rn(af);
#endif
}
static inline __device__ half2 sub(half2 a, half2 b) {
#ifdef FAISS_USE_FULL_FLOAT16
return __hsub2(a, b);
#else
float2 af = __half22float2(a);
float2 bf = __half22float2(b);
af.x -= bf.x;
af.y -= bf.y;
return __float22half2_rn(af);
#endif
}
static inline __device__ half2 add(half2 a, half b) {
#ifdef FAISS_USE_FULL_FLOAT16
half2 b2 = __half2half2(b);
return __hadd2(a, b2);
#else
float2 af = __half22float2(a);
float bf = __half2float(b);
af.x += bf;
af.y += bf;
return __float22half2_rn(af);
#endif
}
static inline __device__ half2 sub(half2 a, half b) {
#ifdef FAISS_USE_FULL_FLOAT16
half2 b2 = __half2half2(b);
return __hsub2(a, b2);
#else
float2 af = __half22float2(a);
float bf = __half2float(b);
af.x -= bf;
af.y -= bf;
return __float22half2_rn(af);
#endif
}
static inline __device__ half2 mul(half2 a, half2 b) {
#ifdef FAISS_USE_FULL_FLOAT16
return __hmul2(a, b);
#else
float2 af = __half22float2(a);
float2 bf = __half22float2(b);
af.x *= bf.x;
af.y *= bf.y;
return __float22half2_rn(af);
#endif
}
static inline __device__ half2 mul(half2 a, half b) {
#ifdef FAISS_USE_FULL_FLOAT16
half2 b2 = __half2half2(b);
return __hmul2(a, b2);
#else
float2 af = __half22float2(a);
float bf = __half2float(b);
af.x *= bf;
af.y *= bf;
return __float22half2_rn(af);
#endif
}
static inline __device__ half2 neg(half2 v) {
#ifdef FAISS_USE_FULL_FLOAT16
return __hneg2(v);
#else
float2 vf = __half22float2(v);
vf.x = -vf.x;
vf.y = -vf.y;
return __float22half2_rn(vf);
#endif
}
static inline __device__ half reduceAdd(half2 v) {
#ifdef FAISS_USE_FULL_FLOAT16
half hv = __high2half(v);
half lv = __low2half(v);
return __hadd(hv, lv);
#else
float2 vf = __half22float2(v);
vf.x += vf.y;
return __float2half(vf.x);
#endif
}
// not implemented for vector types
// static inline __device__ bool lt(half2 a, half2 b);
// static inline __device__ bool gt(half2 a, half2 b);
// static inline __device__ bool eq(half2 a, half2 b);
static inline __device__ half2 zero() {
return __half2half2(Math<half>::zero());
}
};
template <>
struct Math<Half4> {
typedef half ScalarType;
static inline __device__ Half4 add(Half4 a, Half4 b) {
Half4 h;
h.a = Math<half2>::add(a.a, b.a);
h.b = Math<half2>::add(a.b, b.b);
return h;
}
static inline __device__ Half4 sub(Half4 a, Half4 b) {
Half4 h;
h.a = Math<half2>::sub(a.a, b.a);
h.b = Math<half2>::sub(a.b, b.b);
return h;
}
static inline __device__ Half4 add(Half4 a, half b) {
Half4 h;
h.a = Math<half2>::add(a.a, b);
h.b = Math<half2>::add(a.b, b);
return h;
}
static inline __device__ Half4 sub(Half4 a, half b) {
Half4 h;
h.a = Math<half2>::sub(a.a, b);
h.b = Math<half2>::sub(a.b, b);
return h;
}
static inline __device__ Half4 mul(Half4 a, Half4 b) {
Half4 h;
h.a = Math<half2>::mul(a.a, b.a);
h.b = Math<half2>::mul(a.b, b.b);
return h;
}
static inline __device__ Half4 mul(Half4 a, half b) {
Half4 h;
h.a = Math<half2>::mul(a.a, b);
h.b = Math<half2>::mul(a.b, b);
return h;
}
static inline __device__ Half4 neg(Half4 v) {
Half4 h;
h.a = Math<half2>::neg(v.a);
h.b = Math<half2>::neg(v.b);
return h;
}
static inline __device__ half reduceAdd(Half4 v) {
half hx = Math<half2>::reduceAdd(v.a);
half hy = Math<half2>::reduceAdd(v.b);
return Math<half>::add(hx, hy);
}
// not implemented for vector types
// static inline __device__ bool lt(Half4 a, Half4 b);
// static inline __device__ bool gt(Half4 a, Half4 b);
// static inline __device__ bool eq(Half4 a, Half4 b);
static inline __device__ Half4 zero() {
Half4 h;
h.a = Math<half2>::zero();
h.b = Math<half2>::zero();
return h;
}
};
template <>
struct Math<Half8> {
typedef half ScalarType;
static inline __device__ Half8 add(Half8 a, Half8 b) {
Half8 h;
h.a = Math<Half4>::add(a.a, b.a);
h.b = Math<Half4>::add(a.b, b.b);
return h;
}
static inline __device__ Half8 sub(Half8 a, Half8 b) {
Half8 h;
h.a = Math<Half4>::sub(a.a, b.a);
h.b = Math<Half4>::sub(a.b, b.b);
return h;
}
static inline __device__ Half8 add(Half8 a, half b) {
Half8 h;
h.a = Math<Half4>::add(a.a, b);
h.b = Math<Half4>::add(a.b, b);
return h;
}
static inline __device__ Half8 sub(Half8 a, half b) {
Half8 h;
h.a = Math<Half4>::sub(a.a, b);
h.b = Math<Half4>::sub(a.b, b);
return h;
}
static inline __device__ Half8 mul(Half8 a, Half8 b) {
Half8 h;
h.a = Math<Half4>::mul(a.a, b.a);
h.b = Math<Half4>::mul(a.b, b.b);
return h;
}
static inline __device__ Half8 mul(Half8 a, half b) {
Half8 h;
h.a = Math<Half4>::mul(a.a, b);
h.b = Math<Half4>::mul(a.b, b);
return h;
}
static inline __device__ Half8 neg(Half8 v) {
Half8 h;
h.a = Math<Half4>::neg(v.a);
h.b = Math<Half4>::neg(v.b);
return h;
}
static inline __device__ half reduceAdd(Half8 v) {
half hx = Math<Half4>::reduceAdd(v.a);
half hy = Math<Half4>::reduceAdd(v.b);
return Math<half>::add(hx, hy);
}
// not implemented for vector types
// static inline __device__ bool lt(Half8 a, Half8 b);
// static inline __device__ bool gt(Half8 a, Half8 b);
// static inline __device__ bool eq(Half8 a, Half8 b);
static inline __device__ Half8 zero() {
Half8 h;
h.a = Math<Half4>::zero();
h.b = Math<Half4>::zero();
return h;
}
};
#endif // FAISS_USE_FLOAT16
} } // namespace | the_stack |
#include <random>
#include <vector>
#include "paddle/fluid/operators/fused/fused_dropout_test.h"
#include "paddle/fluid/operators/fused/fused_layernorm_residual_dropout_bias.h"
/**
* @brief The unit test of fused_layernorm_residual_dropout_bias
*/
template <typename T>
struct TestFusedLayernormResidualDropoutBias {
uint32_t rows;
uint32_t cols;
uint64_t seed;
float dropout_prob, epsilon;
bool is_upscale_in_train;
bool is_test; // default false, Set to true for inference only
bool has_bias = true;
bool has_scale = true;
bool has_layernorm_bias = true;
framework::Tensor src, residual, bias, out, mask, scale, layernorm_bias,
layernorm_out, means, vars;
framework::Tensor dsrc, dbias;
std::vector<T> src_vec, residual_vec, bias_vec;
std::vector<LayerNormParamType<T>> means_vec, vars_vec, scale_vec,
layernorm_bias_vec;
std::vector<T> correct_out, correct_dsrc, correct_dbias,
correct_layernorm_out;
std::vector<LayerNormParamType<T>> correct_means, correct_vars;
std::vector<uint8_t> correct_mask;
platform::CUDAPlace place;
platform::CUDADeviceContext *ctx;
TestFusedLayernormResidualDropoutBias() {
rows = 32;
cols = 32;
seed = 0;
dropout_prob = 0.0;
is_upscale_in_train = false;
is_test = false;
has_bias = true;
has_scale = true;
has_layernorm_bias = true;
epsilon = 0.00001f;
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto devicectx = pool.Get(place);
ctx = reinterpret_cast<platform::CUDADeviceContext *>(devicectx);
}
TestFusedLayernormResidualDropoutBias(int _rows, int _cols,
uint64_t _seed = 0,
float _dropout_prob = 0.0,
float _epsilon = 0.00001f,
bool _is_upscale_in_train = false,
bool _is_test = false) {
rows = _rows;
cols = _cols;
seed = _seed;
dropout_prob = _dropout_prob;
epsilon = _epsilon;
is_upscale_in_train = _is_upscale_in_train;
is_test = _is_test;
has_bias = true;
has_scale = true;
has_layernorm_bias = true;
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto devicectx = pool.Get(place);
ctx = reinterpret_cast<platform::CUDADeviceContext *>(devicectx);
}
~TestFusedLayernormResidualDropoutBias() {}
void SetUp() {
using U = LayerNormParamType<T>;
const int n = rows * cols;
correct_out.resize(n);
correct_mask.resize(n);
correct_dsrc.resize(n);
correct_dbias.resize(cols);
correct_means.resize(rows);
correct_vars.resize(rows);
correct_layernorm_out.resize(n);
src_vec.resize(n);
residual_vec.resize(n);
if (has_bias) {
bias_vec.resize(cols);
}
if (has_scale) {
scale_vec.resize(cols);
}
if (has_layernorm_bias) {
layernorm_bias_vec.resize(cols);
}
std::default_random_engine random(time(NULL));
std::uniform_real_distribution<float> dis(0.0, 1.0);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
src_vec[i * cols + j] = static_cast<T>(dis(random));
residual_vec[i * cols + j] = static_cast<T>(dis(random));
if (i == 0) {
if (has_bias) {
bias_vec[j] = static_cast<T>(dis(random));
}
if (has_scale) {
scale_vec[j] = static_cast<U>(dis(random));
}
if (has_layernorm_bias) {
layernorm_bias_vec[j] = static_cast<U>(dis(random));
}
}
}
}
framework::TensorFromVector<T>(src_vec, *ctx, &src);
src.Resize({rows, cols});
framework::TensorFromVector<T>(residual_vec, *ctx, &residual);
residual.Resize({rows, cols});
if (has_bias) {
framework::TensorFromVector<T>(bias_vec, *ctx, &bias);
bias.Resize({cols});
}
if (has_scale) {
framework::TensorFromVector<U>(scale_vec, *ctx, &scale);
scale.Resize({cols});
}
if (has_layernorm_bias) {
framework::TensorFromVector<U>(layernorm_bias_vec, *ctx, &layernorm_bias);
layernorm_bias.Resize({cols});
}
{
out.Resize({rows, cols});
out.mutable_data<T>(place);
mask.Resize({rows, cols});
mask.mutable_data<uint8_t>(place);
means.Resize({rows});
means.mutable_data<U>(place);
vars.Resize({rows});
vars.mutable_data<U>(place);
layernorm_out.Resize({rows, cols});
layernorm_out.mutable_data<T>(place);
dsrc.Resize({rows, cols});
dsrc.mutable_data<T>(place);
if (has_bias) {
dbias.Resize({cols});
dbias.mutable_data<T>(place);
}
}
}
void BaseForward() {
using U = LayerNormParamType<T>;
std::vector<T> out1(rows * cols), out2(rows * cols);
if (has_bias) {
// add bias
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
out1[i * cols + j] = src_vec[i * cols + j] + bias_vec[j];
}
}
// call dropout
Dropout<T>(out1, src.dims(), &out2, &correct_mask, *ctx, seed,
dropout_prob, is_upscale_in_train, is_test);
} else {
Dropout<T>(src_vec, src.dims(), &out2, &correct_mask, *ctx, seed,
dropout_prob, is_upscale_in_train, is_test);
}
// add residual
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
correct_out[i * cols + j] =
residual_vec[i * cols + j] + out2[i * cols + j];
}
}
LayerNorm<T>(scale_vec, layernorm_bias_vec, correct_out, &correct_means,
&correct_vars, &correct_layernorm_out, epsilon, rows, cols,
*ctx);
ctx->Wait();
}
void FusedForward() {
using U = LayerNormParamType<T>;
int VecSize = MAX_CACHE_BYTES / sizeof(T);
if (cols % 4 != 0) {
VecSize = 1;
}
int threads = paddle::operators::GetDesiredBlockDim(cols / VecSize);
const int increment = ((cols - 1) / (threads * VecSize) + 1) * VecSize;
T *bias_ptr = nullptr;
U *scale_ptr = nullptr;
U *layernorm_bias_ptr = nullptr;
if (has_bias) {
bias_ptr = bias.data<T>();
}
if (has_scale) {
scale_ptr = scale.data<U>();
}
if (has_layernorm_bias) {
layernorm_bias_ptr = layernorm_bias.data<U>();
}
paddle::operators::LaunchLayernormResidualDropoutBias<T, uint8_t>(
rows, cols, increment, seed, dropout_prob, epsilon, is_upscale_in_train,
is_test, src.data<T>(), residual.data<T>(), bias_ptr, scale_ptr,
layernorm_bias_ptr, mask.data<uint8_t>(), out.data<T>(),
layernorm_out.data<T>(), means.data<U>(), vars.data<U>(), *ctx);
ctx->Wait();
}
void Run() {
SetUp();
BaseForward();
FusedForward();
}
void CheckOut(const T diff) {
using U = LayerNormParamType<T>;
const int n = rows * cols;
std::vector<T> _out(n), _layernorm_out(n);
std::vector<U> _means(rows), _vars(cols);
std::vector<uint8_t> _mask(n);
framework::TensorToVector(out, *ctx, &_out);
framework::TensorToVector(layernorm_out, *ctx, &_layernorm_out);
framework::TensorToVector(means, *ctx, &_means);
framework::TensorToVector(vars, *ctx, &_vars);
if (!is_test) {
framework::TensorToVector(mask, *ctx, &_mask);
}
ctx->Wait();
for (int i = 0; i < n; i++) {
EXPECT_LT(std::abs(_out[i] - correct_out[i]), diff);
EXPECT_LT(std::abs(_layernorm_out[i] - correct_layernorm_out[i]), diff);
if (!is_test) EXPECT_EQ(_mask[i], correct_mask[i]);
}
for (int i = 0; i < rows; i++) {
EXPECT_LT(std::abs(_means[i] - correct_means[i]), static_cast<U>(diff));
EXPECT_LT(std::abs(_vars[i] - correct_vars[i]), static_cast<U>(diff));
}
}
};
template <typename T>
static void BaseTest(const bool is_fp16 = false) {
const int rows = 16;
T default_diff = !is_fp16 ? static_cast<T>(1e-4) : static_cast<T>(1e-2);
for (auto cols : {16, 17}) {
for (auto has_bias : {true, false}) {
for (auto has_scale : {true, false}) {
for (auto has_layernorm_bias : {true, false}) {
TestFusedLayernormResidualDropoutBias<T> test(rows, cols);
test.has_bias = has_bias;
test.has_scale = has_scale;
test.has_layernorm_bias = has_layernorm_bias;
test.Run();
test.CheckOut(default_diff);
}
}
}
}
}
TEST(FusedDropout, GPUFusedLayernormResidualDropoutBias) { BaseTest<float>(); }
TEST(FusedDropout, GPUFusedLayernormResidualDropoutBiasDouble) {
BaseTest<double>();
}
TEST(FusedDropout, GPUFusedLayernormResidualDropoutBiasFp16) {
BaseTest<platform::float16>(true);
}
TEST(FusedDropout, GPUFusedLayernormResidualDropoutBiasIsUpscaleInTrain) {
const int rows = 16;
const int cols = 16;
for (auto is_upscale_in_train : {true, false}) {
TestFusedLayernormResidualDropoutBias<float> test(
rows, cols, 0, 1.0, 0.00001f, is_upscale_in_train, false);
test.Run();
test.CheckOut(static_cast<float>(1e-4));
}
}
TEST(FusedDropout, GPUFusedLayernormResidualDropoutBiasIsTest) {
const int rows = 16;
const int cols = 16;
TestFusedLayernormResidualDropoutBias<float> test(rows, cols, 0, 0.35,
0.00001f, true, true);
test.Run();
test.CheckOut(static_cast<float>(1e-4));
}
TEST(FusedDropout, GPUFusedLayernormResidualDropoutBiasSeed) {
const int rows = 16;
const int cols = 16;
TestFusedLayernormResidualDropoutBias<float> test(rows, cols, 125, 0.0,
0.00001f, false, false);
test.Run();
test.CheckOut(static_cast<float>(1e-4));
}
TEST(FusedDropout, GPUFusedLayernormResidualDropoutLargeShape) {
const int rows = 512;
const int cols = 512;
TestFusedLayernormResidualDropoutBias<float> test(rows, cols);
test.Run();
test.CheckOut(static_cast<float>(1e-4));
} | the_stack |
// GaussianSmoothxy.cu
// 实现对curve的高斯平滑
#include "GaussianSmoothxy.h"
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块尺寸。
#define DEF_BLOCK_X 32
//#define DEF_BLOCK_Y 8
// 宏,定义了五个高斯平滑尺度对应的系数
// Gaussian3,5,7,9,11时分别为: 4,16,64,256,1024.
#define GAUSS_THREE 4
#define GAUSS_FIVE 16
#define GAUSS_SEVEN 64
#define GAUSS_NINE 256
#define GAUSS_ELEVEN 1024
// 平滑窗口大小为7的核函数
static __global__ void
gauss7SmoothXY(
int n, // 曲线上点的数量
int* ringCordiXY, // 辅助参数
float* gSmCordiXY // 平滑结果
);
// 平滑窗口大小为5的核函数
static __global__ void
gauss5SmoothXY(
int n, // 曲线上点的数量
int* ringCordiXY, // 辅助参数
float* gSmCordiXY // 平滑结果
);
// 平滑窗口大小为9的核函数
static __global__ void
gauss9SmoothXY(
int n, // 曲线上点的数量
int* ringCordiXY, // 辅助参数
float* gSmCordiXY // 平滑结果
);
// 平滑窗口大小为11的核函数,
static __global__ void
gauss11SmoothXY(
int n, // 曲线上点的数量
int* ringCordiXY, // 辅助参数
float* gSmCordiXY // 平滑结果
);
// 平滑窗口大小为3的核函数
static __global__ void
gauss3SmoothXY(
int n, // 曲线上点的数量
int* ringCordiXY, // 辅助参数
float* gSmCordiXY // 平滑结果
);
// 平滑窗口大小为7的核函数的具体实现
static __global__ void gauss7SmoothXY(int n, int* ringCordiXY,
float* gSmCordiXY )
{
// 计算当前线程下标
int t = blockIdx.x * blockDim.x + threadIdx.x;
// 检查线程是否越界,如果越界,则不进行处理,一方面节省计算资源,
// 另一方面防止由于段错误导致程序崩溃。
if (t >= n)
return;
// 计算当前线程对应的数组下标
int i = 2 * (n + t);
// 计算横纵坐标对应的数值
int x = ringCordiXY[i - 6] + 6 * ringCordiXY[i - 4] +
15 * ringCordiXY[i - 2] + 20 * ringCordiXY[i] +
15 * ringCordiXY[i + 2] + 6 * ringCordiXY[i + 4] +
ringCordiXY[i + 6];
int y = ringCordiXY[i - 5] + 6 * ringCordiXY[i - 3] +
15 * ringCordiXY[i - 1] + 20 * ringCordiXY[i + 1] +
15 * ringCordiXY[i + 3] + 6 * ringCordiXY[i + 5] +
ringCordiXY[i + 7];
// 计算平滑后的横纵坐标,写入gSmCordiXY中
gSmCordiXY[2 * t] = 1.0 * x / GAUSS_SEVEN;
gSmCordiXY[2 * t + 1] = 1.0 * y / GAUSS_SEVEN;
}
// 平滑窗口大小为5的核函数的具体实现
static __global__ void gauss5SmoothXY(int n, int* ringCordiXY,
float* gSmCordiXY)
{
// 计算当前线程下标
int t = blockIdx.x * blockDim.x + threadIdx.x;
// 检查线程是否越界,如果越界,则不进行处理,一方面节省计算资源,
// 另一方面防止由于段错误导致程序崩溃。
if (t >= n)
return;
// 计算当前线程对应的数组下标
int i = 2 * (n + t);
// 计算横纵坐标对应的数值
int x = ringCordiXY[i - 4] + 4 * ringCordiXY[i - 2] +
6 * ringCordiXY[i] + 4 * ringCordiXY[i + 2] + ringCordiXY[i + 4] ;
int y = ringCordiXY[i - 3] + 4 * ringCordiXY[i - 1] +
6 * ringCordiXY[i + 1] + 4 * ringCordiXY[i + 3] +
ringCordiXY[i + 5];
// 计算平滑后的横纵坐标,写入gSmCordiXY中
gSmCordiXY[2 * t] = 1.0 * x / GAUSS_FIVE;
gSmCordiXY[2 * t + 1] = 1.0 * y / GAUSS_FIVE;
}
// 平滑窗口大小为9的核函数的具体实现
static __global__ void gauss9SmoothXY(int n, int* ringCordiXY,
float* gSmCordiXY)
{
// 计算当前线程下标
int t = blockIdx.x * blockDim.x + threadIdx.x;
// 检查线程是否越界,如果越界,则不进行处理,一方面节省计算资源,
// 另一方面防止由于段错误导致程序崩溃。
if (t >= n)
return;
// 计算当前线程对应的数组下标
int i = 2 * (n + t);
// 计算横纵坐标对应的数值
int x = ringCordiXY[i - 8] + 8 * ringCordiXY[i - 6] +
28 * ringCordiXY[i - 4] + 56 * ringCordiXY[i - 2] +
70 * ringCordiXY[i] + 56 * ringCordiXY[i + 2] +
28 * ringCordiXY[i + 4] + 8 * ringCordiXY[i + 6] +
ringCordiXY[i + 8];
int y = ringCordiXY[i - 7] + 8 * ringCordiXY[i - 5] +
28 * ringCordiXY[i - 3] + 56 * ringCordiXY[i - 1] +
70 * ringCordiXY[i + 1] + 56 * ringCordiXY[i + 1] +
28 * ringCordiXY[i + 3] + 8 * ringCordiXY[i + 5] +
ringCordiXY[i + 7];
// 计算平滑后的横纵坐标,写入gSmCordiXY中
gSmCordiXY[2 * t] = 1.0 * x / GAUSS_NINE;
gSmCordiXY[2 * t + 1] = 1.0 * y / GAUSS_NINE;
}
//平滑窗口大小为11的核函数的具体实现
static __global__ void gauss11SmoothXY(int n, int* ringCordiXY,
float* gSmCordiXY)
{
// 计算当前线程下标
int t = blockIdx.x * blockDim.x + threadIdx.x;
// 检查线程是否越界,如果越界,则不进行处理,一方面节省计算资源,
// 另一方面防止由于段错误导致程序崩溃。
if (t >= n)
return;
// 计算当前线程对应的数组下标
int i = 2 * (n + t);
// 计算横纵坐标对应的数值
int x = ringCordiXY[i - 10] + 10 * ringCordiXY[i - 8] +
45 * ringCordiXY[i - 6] + 120 * ringCordiXY[i - 4] +
210 * ringCordiXY[i - 2] + 252 * ringCordiXY[i] +
210 * ringCordiXY[i + 2] + 120 * ringCordiXY[i + 4] +
45 * ringCordiXY[i + 6] + 10 * ringCordiXY[i + 8] +
ringCordiXY[i + 10];
int y = ringCordiXY[i - 9] + 10 * ringCordiXY[i - 7] +
45 * ringCordiXY[i - 5] + 120 * ringCordiXY[i - 3] +
210 * ringCordiXY[i - 1] + 252 * ringCordiXY[i + 1] +
210 * ringCordiXY[i + 3] + 120 * ringCordiXY[i + 5] +
45 * ringCordiXY[i + 7] + 10 * ringCordiXY[i + 9] +
ringCordiXY[i + 11];
// 计算平滑后的横纵坐标,写入gSmCordiXY中
gSmCordiXY[2 * t] = 1.0 * x / GAUSS_ELEVEN;
gSmCordiXY[2 * t + 1] = 1.0 * y / GAUSS_ELEVEN;
}
// 平滑窗口大小为3的核函数的具体实现
static __global__ void gauss3SmoothXY(int n, int* ringCordiXY,
float* gSmCordiXY)
{
// 计算当前线程下标
int t = blockIdx.x * blockDim.x + threadIdx.x;
// 检查线程是否越界,如果越界,则不进行处理,一方面节省计算资源,
// 另一方面防止由于段错误导致程序崩溃。
if (t >= n)
return;
// 计算当前线程对应的数组下标
int i = 2 * (n + t);
// 计算横纵坐标对应的数值
int x = ringCordiXY[i - 2] + 2 * ringCordiXY[i] + ringCordiXY[i + 2];
int y = ringCordiXY[i - 1] + 2 * ringCordiXY[i + 1] + ringCordiXY[i + 3];
// 计算平滑后的横纵坐标,写入gSmCordiXY中
gSmCordiXY[2 * t] = 1.0 * x / GAUSS_THREE;
gSmCordiXY[2 * t + 1] = 1.0 * y / GAUSS_THREE;
}
// curve高斯平滑的具体实现
__host__ int GaussSmoothXY::gaussSmoothCurveXY(Curve *curve, int smWindowSize)
{
// 局部变量,错误码。
int state;
// 获取曲线长度
int cLength = curve->curveLength;
// 曲线长度不足3
// 按照委托方的需求返回错误值,为了避免和现有的errorcode冲突
// 以下返回值均为自定义
if (cLength < 3 )
return -11;
// 平滑窗口大小不在有效范围之内则报错
// smWindowSize应仅限于3、5、7、9、11五种,防止因误输入其他数据
// 导致用户在不知情的情况下采用了默认值3,从而出现和期望不一致的平滑结果
if (smWindowSize != 3 && smWindowSize != 5 && smWindowSize != 7 &&
smWindowSize != 9 && smWindowSize != 11)
return -12;
// 如果曲线长度过短导致不大于平滑区间长度,则对平滑区间做缩小处理
// 以取得正确的平滑结果,每次缩小长度为2
if (cLength <= smWindowSize ) {
// 如果缩小smWindowSize后不在处理范围则返回
if ((smWindowSize -= 2) < 3)
return -13;
// 缩小后曲线长度大于平滑区间长度,则处理结束
if (cLength > smWindowSize)
curve->smWindowSize = smWindowSize;
// 仍然不符合要求,继续缩小
else {
// 如果缩小smWindowSize后不在处理范围则返回
if ((smWindowSize -= 2) < 3)
return -14;
// 缩小后曲线长度大于平滑区间长度,则处理结束
if (cLength > smWindowSize)
curve->smWindowSize = smWindowSize;
else {
// 如果缩小smWindowSize后不在处理范围则返回
if ((smWindowSize -= 2) < 3)
return -15;
// 缩小后曲线长度大于平滑区间长度,则处理结束
if (cLength > smWindowSize)
curve->smWindowSize = smWindowSize;
else {
// 如果缩小smWindowSize后不在处理范围则返回
if ((smWindowSize -= 2) < 3)
return -16;
// 曲线长度大于平滑区间长度,则处理结束
if (cLength > smWindowSize)
curve->smWindowSize = smWindowSize;
// 处理窗口取最小值
else {
curve->smWindowSize = 3;
}
}
}
}
}
else {
// 设置curve的成员变量smWindowSize的值
curve->smWindowSize = smWindowSize;
}
// 如果平滑坐标数组为空,则开辟空间
if (curve->smCurveCordiXY == NULL){
// 为gSmCordiXY开辟host内存。
curve->smCurveCordiXY = new float[2 * cLength];
}
// 启动平滑函数
state = gaussSmoothXY(cLength, curve->crvData, curve->closed,
curve->smCurveCordiXY, smWindowSize);
// 平滑出错,清除平滑相关数据
if(state != NO_ERROR)
{
curve->smWindowSize = 0;
delete curve->smCurveCordiXY;
curve->smCurveCordiXY = NULL;
}
return state;
}
// curve高斯平滑核心函数
__host__ int GaussSmoothXY::gaussSmoothXY(int n, int* origiCordiXY, bool closed,
float* gSmCordiXY, int smWindowSize)
{
// 局部变量,错误码。
cudaError_t cudaerrcode;
// 高斯平滑用到的辅助数据
static int ringCordiLength = 0;
static int reverseCordiLength = 0;
static int* reverseCordiXY = NULL;
int n3 = n * 3;
// 计算设置ringCordiLength
if (ringCordiLength <= n3) {
if (ringCordiXY != NULL) {
cudaFree(ringCordiXY);
}
n3 += 3;
// 在GPGPU GLOBAL memory中取得一个长度为n3 * 2的memory -> ringCordiXY;
cudaerrcode = cudaMalloc((void **)&ringCordiXY, sizeof (int) * n3 * 2);
// 开辟失败,释放内存空间。
if (cudaerrcode != cudaSuccess) {
cudaFree(ringCordiXY);
return CUDA_ERROR;
}
ringCordiLength = n3;
}
// 处理闭合曲线
if (closed) {
// 由HOST memory 向GPGPU memory copy:
// copy origiCordiXY to ringCordiXY; copy size = 2 * n
cudaerrcode = cudaMemcpy(ringCordiXY, origiCordiXY,
2 * n * sizeof(int), cudaMemcpyHostToDevice);
// 拷贝函数出错
if (cudaerrcode != cudaSuccess) {
cudaFree(ringCordiXY);
return CUDA_ERROR;
}
// copy origiCordiXY to ringCordiXY + 2 * n; copy size = 2 * n
cudaerrcode = cudaMemcpy(ringCordiXY + 2 * n, origiCordiXY ,
2 * n * sizeof(int) , cudaMemcpyHostToDevice);
// 拷贝函数出错
if (cudaerrcode != cudaSuccess) {
cudaFree(ringCordiXY);
return CUDA_ERROR;
}
// copy origiCordiXY to ringCordiXY + 2 * n; copy size = 2 * n
cudaerrcode = cudaMemcpy(ringCordiXY + 4 * n, origiCordiXY ,
2 * n * sizeof(int), cudaMemcpyHostToDevice);
// 拷贝函数出错
if (cudaerrcode != cudaSuccess) {
cudaFree(ringCordiXY);
return CUDA_ERROR;
}
}
// 处理非闭合曲线
else {
// 为reverseCordiXY开辟空间
if (reverseCordiLength <= n ) {
delete reverseCordiXY;
reverseCordiXY = new int[2 * n + 6];
reverseCordiLength = n + 3;
}
// 将x、y坐标分别反转后存入reverseCordiXY中
#pragma unroll
for (int i = 0; i < 2 * n; i += 2) {
reverseCordiXY[i + 1] = origiCordiXY[2 * n - i - 1];
}
#pragma unroll
for (int i = 1; i < 2 * n; i += 2) {
reverseCordiXY[i - 1] = origiCordiXY[2 * n - i - 1];
}
// 由HOST memory 向GPGPU memory copy:
// copy reverseCordiXY to ringCordiXY; copy size = 2 * n
cudaerrcode = cudaMemcpy(ringCordiXY, reverseCordiXY,
2 * n * sizeof(int), cudaMemcpyHostToDevice);
// 拷贝函数出错
if (cudaerrcode != cudaSuccess) {
delete reverseCordiXY;
cudaFree(ringCordiXY);
return CUDA_ERROR;
}
//copy origiCordiXY to ringCordiXY + 2 * n;
// copy size = 2 * n 这个必须是origiCordiXY
cudaerrcode = cudaMemcpy(ringCordiXY + 2 * n, origiCordiXY,
2 * n * sizeof(int), cudaMemcpyHostToDevice);
// 拷贝函数出错
if (cudaerrcode != cudaSuccess) {
delete reverseCordiXY;
cudaFree(ringCordiXY);
return CUDA_ERROR;
}
// copy reverseCordiXY to ringCordiXY + 4 * n; copy size = 2 * n
cudaerrcode = cudaMemcpy(ringCordiXY + 4 * n, reverseCordiXY,
2 * n * sizeof(int), cudaMemcpyHostToDevice);
// 拷贝函数出错
if (cudaerrcode != cudaSuccess) {
delete reverseCordiXY;
cudaFree(ringCordiXY);
return CUDA_ERROR;
}
}
// 为成员变量gSmCordiXY在device端开辟空间
cudaerrcode = cudaMalloc((void **)&this->gSmCordiXY, sizeof(float) * 2 * n);
// 开辟失败,释放内存空间。
if (cudaerrcode != cudaSuccess) {
cudaFree(ringCordiXY);
cudaFree(this->gSmCordiXY);
delete reverseCordiXY;
return CUDA_ERROR;
}
// 根据平滑窗口大小选择合适的平滑函数
// 按照委托方要求的顺序,核函数按照7,5,9,11,3排列
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 gridSize,blockSize;
blockSize.x = DEF_BLOCK_X;
blockSize.y = 1;
gridSize.x = (n + blockSize.x - 1) / blockSize.x;
gridSize.y = 1;
switch (smWindowSize) {
case 7:
// 启动平滑窗口大小为7的核函数
gauss7SmoothXY<<<gridSize, blockSize>>>(n, ringCordiXY,
this->gSmCordiXY );
//核函数出错
if (cudaGetLastError() != cudaSuccess) {
cudaFree(ringCordiXY);
cudaFree(this->gSmCordiXY);
delete reverseCordiXY;
return CUDA_ERROR;
}
break;
case 5:
// 启动平滑窗口大小为5的核函数
gauss5SmoothXY<<<gridSize, blockSize>>>(n, ringCordiXY,
this->gSmCordiXY );
//核函数出错
if (cudaGetLastError() != cudaSuccess) {
cudaFree(ringCordiXY);
cudaFree(this->gSmCordiXY);
delete reverseCordiXY;
return CUDA_ERROR;
}
break;
case 9:
// 启动平滑窗口大小为9的核函数
gauss9SmoothXY<<<gridSize, blockSize>>>(n, ringCordiXY,
this->gSmCordiXY);
//核函数出错
if (cudaGetLastError() != cudaSuccess) {
cudaFree(ringCordiXY);
cudaFree(this->gSmCordiXY);
delete reverseCordiXY;
return CUDA_ERROR;
}
break;
case 11:
// 启动平滑窗口大小为11的核函数
gauss11SmoothXY<<<gridSize, blockSize>>>(n, ringCordiXY,
this->gSmCordiXY);
//核函数出错
if (cudaGetLastError() != cudaSuccess) {
cudaFree(ringCordiXY);
cudaFree(this->gSmCordiXY);
delete reverseCordiXY;
return CUDA_ERROR;
}
break;
default:
// 启动平滑窗口大小为3的核函数
gauss3SmoothXY<<<gridSize, gridSize>>>(n, ringCordiXY,
this->gSmCordiXY);
//核函数出错
if (cudaGetLastError() != cudaSuccess) {
cudaFree(ringCordiXY);
cudaFree(this->gSmCordiXY);
delete reverseCordiXY;
return CUDA_ERROR;
}
break;
}
// 将计算结果拷贝回gSmCordiXY中
cudaerrcode = cudaMemcpy(gSmCordiXY, this->gSmCordiXY,
sizeof (float) * 2 * n, cudaMemcpyDeviceToHost);
// 拷贝函数出错
if (cudaerrcode != cudaSuccess) {
cudaFree(ringCordiXY);
cudaFree(this->gSmCordiXY);
delete reverseCordiXY;
return CUDA_ERROR;
}
return NO_ERROR;
} | the_stack |
__forceinline__ __device__
long long int int2_as_longlong (int2 a)
{
long long int res;
asm ("mov.b64 %0, {%1,%2};" : "=l"(res) : "r"(a.x), "r"(a.y));
return res;
}
// __forceinline__ __device__
// void xcaz(int init, int *c, uint64_t a, uint64_t b)
// {
// switch (init) {
// case 0: *c += __popcll(a ^ b); break;
// case 1: *c += __popcll(a & b) - __popcll((a ^ b) & b); break;
// case 2: *c += __popcll(a & b) - __popcll((a ^ b) & a); break;
// }
// }
template <const int DIM_X, const int DIM_Y,
const int BLK_M, const int BLK_N, const int BLK_K,
const int DIM_XA, const int DIM_YA,
const int DIM_XB, const int DIM_YB,
const int THR_M, const int THR_N>
static __global__
void pgemm_kernel(const int M, const int N, const int K,
const uint64_t * __restrict__ A, const int LDA,
const uint64_t * __restrict__ B, const int LDB,
float * __restrict__ C, const int LDC,
int offsA, int offsB)
{
int blx=blockIdx.y, bly=blockIdx.x;
int idx=threadIdx.y, idy=threadIdx.x, idt=idx*DIM_Y+idy;
int idxA=idt/DIM_YA, idyA=idt % DIM_YA;
int idxB=idt/DIM_YB, idyB=idt % DIM_YB;
int rC[THR_M][THR_N];
uint64_t rA[THR_M], rB[THR_N];
uint64_t ra[BLK_M/DIM_XA][BLK_K/DIM_YA];
uint64_t rb[BLK_N/DIM_XB][BLK_K/DIM_YB];
__shared__ uint64_t sA[BLK_M][BLK_K+1];
__shared__ uint64_t sB[BLK_N][BLK_K+1];
#define cazA (blx*BLK_M*LDA + idxA*LDA + idyA)
#define cazB (bly*BLK_N*LDB + idxB*LDB + idyB)
#ifdef TEX1D
int coord_A = offsA + cazA;
int coord_B = offsB + cazB;
#else
const uint64_t *offs_A = A + cazA;
const uint64_t *offs_B = B + cazB;
ptrdiff_t boundA = (LDA*(M-1)+K) - cazA - 1;
ptrdiff_t boundB = (LDB*(N-1)+K) - cazB - 1;
#endif
#undef cazA
#undef cazB
int m, n, k, kk;
#pragma unroll
for (m=0; m<THR_M; m++)
#pragma unroll
for (n=0; n<THR_N; n++)
rC[m][n] = 0;
#pragma unroll
for (m=0; m<BLK_M; m+=DIM_XA)
#pragma unroll
for (n=0; n<BLK_K; n+=DIM_YA)
sA[m+idxA][n+idyA] = fetch(A, m, n, boundA);
#pragma unroll
for (m=0; m<BLK_N; m+=DIM_XB)
#pragma unroll
for (n=0; n<BLK_K; n+=DIM_YB)
sB[m+idxB][n+idyB] = fetch(B, m, n, boundB);
__syncthreads();
for (kk=0; kk<K-BLK_K; kk+=BLK_K) {
#ifdef TEX1D
coord_A += BLK_K;
coord_B += BLK_K;
#else
offs_A += BLK_K; boundA -= BLK_K;
offs_B += BLK_K; boundB -= BLK_K;
#endif
#pragma unroll
for (m=0; m<BLK_M/DIM_XA; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YA; n++)
ra[m][n] = fetch(A, m*DIM_XA, n*DIM_YA, boundA);
#pragma unroll
for (m=0; m<BLK_N/DIM_XB; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YB; n++)
rb[m][n] = fetch(B, m*DIM_XB, n*DIM_YB, boundB);
#pragma unroll
for (k=0; k<BLK_K; k++) {
#pragma unroll
for (m=0; m<THR_M; m++)
rA[m] = sA[m*DIM_X+idx][k];
#pragma unroll
for (n=0; n<THR_N; n++)
rB[n] = sB[n*DIM_Y+idy][k];
#pragma unroll
for (m=0; m<THR_M; m++)
#pragma unroll
for (n=0; n<THR_N; n++) {
rC[m][n] += __popcll(rA[m] ^ rB[n]);
//xcaz(INIT, &rC[m][n], rA[m], rB[n]);
}
}
__syncthreads();
#pragma unroll
for (m=0; m<BLK_M/DIM_XA; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YA; n++)
sA[m*DIM_XA+idxA][n*DIM_YA+idyA] = ra[m][n];
#pragma unroll
for (m=0; m<BLK_N/DIM_XB; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YB; n++)
sB[m*DIM_XB+idxB][n*DIM_YB+idyB] = rb[m][n];
__syncthreads();
}
kk=K-kk;
#pragma unroll
for (k=0; k<kk; k++) {
#pragma unroll
for (m=0; m<THR_M; m++)
rA[m] = sA[m*DIM_X+idx][k];
#pragma unroll
for (n=0; n<THR_N; n++)
rB[n] = sB[n*DIM_Y+idy][k];
#pragma unroll
for (m=0; m<THR_M; m++)
#pragma unroll
for (n=0; n<THR_N;n++) {
rC[m][n] += __popcll(rA[m] ^ rB[n]);
//xcaz(INIT, &rC[m][n], rA[m], rB[n]);
}
}
#pragma unroll
for (m=0; m<THR_M; m++) {
int i = blx*BLK_M + m*DIM_X + idx;
#pragma unroll
for (n=0; n<THR_N; n++) {
int j = bly*BLK_N + n*DIM_Y + idy;
if (i<M && j<N)
//if (INIT)
///C[i*LDC+j] = rC[m][m];
//else
C[i*LDC+j] = (LDA<<6)-(rC[m][n]<<1);
}
}
}
//////////////////////////////////////////////////////////
template <const int DIM_X, const int DIM_Y,
const int BLK_M, const int BLK_N, const int BLK_K,
const int DIM_XA, const int DIM_YA,
const int DIM_XB, const int DIM_YB,
const int THR_M, const int THR_N>
static __global__
void pgemm_kernel_init(const int M, const int N, const int K,
const uint64_t * __restrict__ A, const int LDA,
const uint64_t * __restrict__ B, const int LDB,
float * __restrict__ C, const int LDC,
int offsA, int offsB)
{
int blx=blockIdx.y, bly=blockIdx.x;
int idx=threadIdx.y, idy=threadIdx.x, idt=idx*DIM_Y+idy;
int idxA=idt/DIM_YA, idyA=idt % DIM_YA;
int idxB=idt/DIM_YB, idyB=idt % DIM_YB;
int rC[THR_M][THR_N];
uint64_t rA[THR_M], rB[THR_N];
uint64_t ra[BLK_M/DIM_XA][BLK_K/DIM_YA];
uint64_t rb[BLK_N/DIM_XB][BLK_K/DIM_YB];
__shared__ uint64_t sA[BLK_M][BLK_K+1];
__shared__ uint64_t sB[BLK_N][BLK_K+1];
#define cazA (blx*BLK_M*LDA + idxA*LDA + idyA)
#define cazB (bly*BLK_N*LDB + idxB*LDB + idyB)
const uint64_t *offs_A = A + cazA;
const uint64_t *offs_B = B + cazB;
ptrdiff_t boundA = (LDA*(M-1)+K) - cazA - 1;
ptrdiff_t boundB = (LDB*(N-1)+K) - cazB - 1;
#undef cazA
#undef cazB
int m, n, k, kk;
#pragma unroll
for (m=0; m<THR_M; m++)
#pragma unroll
for (n=0; n<THR_N; n++)
rC[m][n] = 0;
#pragma unroll
for (m=0; m<BLK_M; m+=DIM_XA)
#pragma unroll
for (n=0; n<BLK_K; n+=DIM_YA)
sA[m+idxA][n+idyA] = fetch(A, m, n, boundA);
#pragma unroll
for (m=0; m<BLK_N; m+=DIM_XB)
#pragma unroll
for (n=0; n<BLK_K; n+=DIM_YB)
sB[m+idxB][n+idyB] = fetch(B, m, n, boundB);
__syncthreads();
for (kk=0; kk<K-BLK_K; kk+=BLK_K) {
#ifdef TEX1D
coord_A += BLK_K;
coord_B += BLK_K;
#else
offs_A += BLK_K; boundA -= BLK_K;
offs_B += BLK_K; boundB -= BLK_K;
#endif
#pragma unroll
for (m=0; m<BLK_M/DIM_XA; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YA; n++)
ra[m][n] = fetch(A, m*DIM_XA, n*DIM_YA, boundA);
#pragma unroll
for (m=0; m<BLK_N/DIM_XB; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YB; n++)
rb[m][n] = fetch(B, m*DIM_XB, n*DIM_YB, boundB);
#pragma unroll
for (k=0; k<BLK_K; k++) {
#pragma unroll
for (m=0; m<THR_M; m++)
rA[m] = sA[m*DIM_X+idx][k];
#pragma unroll
for (n=0; n<THR_N; n++)
rB[n] = sB[n*DIM_Y+idy][k];
#pragma unroll
for (m=0; m<THR_M; m++)
#pragma unroll
for (n=0; n<THR_N; n++) {
rC[m][n] += (__popcll(rA[m] & rB[n]) -
__popcll((rA[m] ^ rB[n]) & rB[n]));
}
}
__syncthreads();
#pragma unroll
for (m=0; m<BLK_M/DIM_XA; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YA; n++)
sA[m*DIM_XA+idxA][n*DIM_YA+idyA] = ra[m][n];
#pragma unroll
for (m=0; m<BLK_N/DIM_XB; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YB; n++)
sB[m*DIM_XB+idxB][n*DIM_YB+idyB] = rb[m][n];
__syncthreads();
}
kk=K-kk;
#pragma unroll
for (k=0; k<kk; k++) {
#pragma unroll
for (m=0; m<THR_M; m++)
rA[m] = sA[m*DIM_X+idx][k];
#pragma unroll
for (n=0; n<THR_N; n++)
rB[n] = sB[n*DIM_Y+idy][k];
#pragma unroll
for (m=0; m<THR_M; m++)
#pragma unroll
for (n=0; n<THR_N;n++) {
rC[m][n] += (__popcll(rA[m] & rB[n]) -
__popcll((rA[m] ^ rB[n]) & rB[n]));
}
}
#pragma unroll
for (m=0; m<THR_M; m++) {
int i = blx*BLK_M + m*DIM_X + idx;
#pragma unroll
for (n=0; n<THR_N; n++) {
int j = bly*BLK_N + n*DIM_Y + idy;
if (i<M && j<N)
C[i*LDC+j] = rC[m][n];
}
}
}
///////////////////////////////////////////////////////////////
template <const int DIM_X, const int DIM_Y,
const int BLK_M, const int BLK_N, const int BLK_K,
const int DIM_XA, const int DIM_YA,
const int DIM_XB, const int DIM_YB,
const int THR_M, const int THR_N>
static __global__
void pgemm_kernel_init_rev(const int M, const int N, const int K,
const uint64_t * __restrict__ A, const int LDA,
const uint64_t * __restrict__ B, const int LDB,
float * __restrict__ C, const int LDC,
int offsA, int offsB)
{
int blx=blockIdx.y, bly=blockIdx.x;
int idx=threadIdx.y, idy=threadIdx.x, idt=idx*DIM_Y+idy;
int idxA=idt/DIM_YA, idyA=idt % DIM_YA;
int idxB=idt/DIM_YB, idyB=idt % DIM_YB;
int rC[THR_M][THR_N];
uint64_t rA[THR_M], rB[THR_N];
uint64_t ra[BLK_M/DIM_XA][BLK_K/DIM_YA];
uint64_t rb[BLK_N/DIM_XB][BLK_K/DIM_YB];
__shared__ uint64_t sA[BLK_M][BLK_K+1];
__shared__ uint64_t sB[BLK_N][BLK_K+1];
#define cazA (blx*BLK_M*LDA + idxA*LDA + idyA)
#define cazB (bly*BLK_N*LDB + idxB*LDB + idyB)
const uint64_t *offs_A = A + cazA;
const uint64_t *offs_B = B + cazB;
ptrdiff_t boundA = (LDA*(M-1)+K) - cazA - 1;
ptrdiff_t boundB = (LDB*(N-1)+K) - cazB - 1;
#undef cazA
#undef cazB
int m, n, k, kk;
#pragma unroll
for (m=0; m<THR_M; m++)
#pragma unroll
for (n=0; n<THR_N; n++)
rC[m][n] = 0;
#pragma unroll
for (m=0; m<BLK_M; m+=DIM_XA)
#pragma unroll
for (n=0; n<BLK_K; n+=DIM_YA)
sA[m+idxA][n+idyA] = fetch(A, m, n, boundA);
#pragma unroll
for (m=0; m<BLK_N; m+=DIM_XB)
#pragma unroll
for (n=0; n<BLK_K; n+=DIM_YB)
sB[m+idxB][n+idyB] = fetch(B, m, n, boundB);
__syncthreads();
for (kk=0; kk<K-BLK_K; kk+=BLK_K) {
offs_A += BLK_K; boundA -= BLK_K;
offs_B += BLK_K; boundB -= BLK_K;
#pragma unroll
for (m=0; m<BLK_M/DIM_XA; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YA; n++)
ra[m][n] = fetch(A, m*DIM_XA, n*DIM_YA, boundA);
#pragma unroll
for (m=0; m<BLK_N/DIM_XB; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YB; n++)
rb[m][n] = fetch(B, m*DIM_XB, n*DIM_YB, boundB);
#pragma unroll
for (k=0; k<BLK_K; k++) {
#pragma unroll
for (m=0; m<THR_M; m++)
rA[m] = sA[m*DIM_X+idx][k];
#pragma unroll
for (n=0; n<THR_N; n++)
rB[n] = sB[n*DIM_Y+idy][k];
#pragma unroll
for (m=0; m<THR_M; m++)
#pragma unroll
for (n=0; n<THR_N; n++)
rC[m][n] += __popcll(rA[m] & rB[n]) -
__popcll((rA[m] ^ rB[n]) & rA[n]);
}
__syncthreads();
#pragma unroll
for (m=0; m<BLK_M/DIM_XA; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YA; n++)
sA[m*DIM_XA+idxA][n*DIM_YA+idyA] = ra[m][n];
#pragma unroll
for (m=0; m<BLK_N/DIM_XB; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YB; n++)
sB[m*DIM_XB+idxB][n*DIM_YB+idyB] = rb[m][n];
__syncthreads();
}
kk=K-kk;
#pragma unroll
for (k=0; k<kk; k++) {
#pragma unroll
for (m=0; m<THR_M; m++)
rA[m] = sA[m*DIM_X+idx][k];
#pragma unroll
for (n=0; n<THR_N; n++)
rB[n] = sB[n*DIM_Y+idy][k];
#pragma unroll
for (m=0; m<THR_M; m++)
#pragma unroll
for (n=0; n<THR_N;n++)
rC[m][n] += __popcll(rA[m] & rB[n]) -
__popcll((rA[m] ^ rB[n]) & rA[n]);
}
#pragma unroll
for (m=0; m<THR_M; m++) {
int i = blx*BLK_M + m*DIM_X + idx;
#pragma unroll
for (n=0; n<THR_N; n++) {
int j = bly*BLK_N + n*DIM_Y + idy;
if (i<M && j<N)
C[i*LDC+j] = rC[m][n];
}
}
}
///////////////////////////////////////////////////////////////
template <const int INIT,
const int DIM_X, const int DIM_Y,
const int BLK_M, const int BLK_N, const int BLK_K,
const int DIM_XA, const int DIM_YA,
const int DIM_XB, const int DIM_YB,
const int THR_M, const int THR_N>
static __global__
void pgemm32_kernel(const int M, const int N, const int K,
const uint32_t * __restrict__ A, const int LDA,
const uint32_t * __restrict__ B, const int LDB,
float * __restrict__ C, const int LDC,
int offsA, int offsB)
{
int blx=blockIdx.y, bly=blockIdx.x;
int idx=threadIdx.y, idy=threadIdx.x, idt=idx*DIM_Y+idy;
int idxA=idt/DIM_YA, idyA=idt % DIM_YA;
int idxB=idt/DIM_YB, idyB=idt % DIM_YB;
int rC[THR_M][THR_N];
uint32_t rA[THR_M], rB[THR_N];
uint32_t ra[BLK_M/DIM_XA][BLK_K/DIM_YA];
uint32_t rb[BLK_N/DIM_XB][BLK_K/DIM_YB];
__shared__ uint32_t sA[BLK_M][BLK_K+1];
__shared__ uint32_t sB[BLK_N][BLK_K+1];
#define cazA (blx*BLK_M*LDA + idxA*LDA + idyA)
#define cazB (bly*BLK_N*LDB + idxB*LDB + idyB)
const uint32_t *offs_A = A + cazA;
const uint32_t *offs_B = B + cazB;
ptrdiff_t boundA = (LDA*(M-1)+K) - cazA - 1;
ptrdiff_t boundB = (LDB*(N-1)+K) - cazB - 1;
#undef cazA
#undef cazB
int m, n, k, kk;
#pragma unroll
for (m=0; m<THR_M; m++)
#pragma unroll
for (n=0; n<THR_N; n++)
rC[m][n] = 0;
#pragma unroll
for (m=0; m<BLK_M; m+=DIM_XA)
#pragma unroll
for (n=0; n<BLK_K; n+=DIM_YA)
sA[m+idxA][n+idyA] = fetch(A, m, n, boundA);
#pragma unroll
for (m=0; m<BLK_N; m+=DIM_XB)
#pragma unroll
for (n=0; n<BLK_K; n+=DIM_YB)
sB[m+idxB][n+idyB] = fetch(B, m, n, boundB);
__syncthreads();
for (kk=0; kk<K-BLK_K; kk+=BLK_K) {
offs_A += BLK_K; boundA -= BLK_K;
offs_B += BLK_K; boundB -= BLK_K;
#pragma unroll
for (m=0; m<BLK_M/DIM_XA; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YA; n++)
ra[m][n] = fetch(A, m*DIM_XA, n*DIM_YA, boundA);
#pragma unroll
for (m=0; m<BLK_N/DIM_XB; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YB; n++)
rb[m][n] = fetch(B, m*DIM_XB, n*DIM_YB, boundB);
#pragma unroll
for (k=0; k<BLK_K; k++) {
#pragma unroll
for (m=0; m<THR_M; m++)
rA[m] = sA[m*DIM_X+idx][k];
#pragma unroll
for (n=0; n<THR_N; n++)
rB[n] = sB[n*DIM_Y+idy][k];
#pragma unroll
for (m=0; m<THR_M; m++)
#pragma unroll
for (n=0; n<THR_N; n++)
rC[m][n] += __popc(rA[m] ^ rB[n]);
}
__syncthreads();
#pragma unroll
for (m=0; m<BLK_M/DIM_XA; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YA; n++)
sA[m*DIM_XA+idxA][n*DIM_YA+idyA] = ra[m][n];
#pragma unroll
for (m=0; m<BLK_N/DIM_XB; m++)
#pragma unroll
for (n=0; n<BLK_K/DIM_YB; n++)
sB[m*DIM_XB+idxB][n*DIM_YB+idyB] = rb[m][n];
__syncthreads();
}
kk=K-kk;
#pragma unroll
for (k=0; k<kk; k++) {
#pragma unroll
for (m=0; m<THR_M; m++)
rA[m] = sA[m*DIM_X+idx][k];
#pragma unroll
for (n=0; n<THR_N; n++)
rB[n] = sB[n*DIM_Y+idy][k];
#pragma unroll
for (m=0; m<THR_M; m++)
#pragma unroll
for (n=0; n<THR_N;n++)
rC[m][n] += __popc(rA[m] ^ rB[n]);
}
#pragma unroll
for (m=0; m<THR_M; m++) {
int i = blx*BLK_M + m*DIM_X + idx;
#pragma unroll
for (n=0; n<THR_N; n++) {
int j = bly*BLK_N + n*DIM_Y + idy;
if (i<M && j<N)
C[i*LDC+j] = (LDA<<5)-(rC[m][n]<<1);
}
}
} | the_stack |
namespace arboretum_test {
using arboretum::core::BestSplit;
using arboretum::core::GainFunctionParameters;
using arboretum::core::Histogram;
using arboretum::core::HistTreeGrower;
using arboretum::core::InternalConfiguration;
using arboretum::core::my_atomics;
TEST(HistTreeGrower, CreatePartitioningIndexes) {
const unsigned level = 1;
const unsigned depth = 2;
const InternalConfiguration config(false, 1, 0, false, true);
const size_t size = 32;
const unsigned hist_size = 8;
BestSplit<float2> best(1 << depth, hist_size);
Histogram<float2> histogram(1 << depth, hist_size, 1);
auto grower = HistTreeGrower<unsigned int, unsigned short, float2, float2>(
size, depth, hist_size, &best, &histogram, &config);
thrust::device_vector<unsigned int> row2Node(size, 0);
thrust::device_vector<unsigned int> partitioning_indexes(size, 0);
thrust::device_vector<unsigned int> parent_node_count(3, 0);
for (int i = 0; i < size; ++i) {
unsigned row = i % 2;
row2Node[i] = row;
}
parent_node_count[1] += 16;
parent_node_count[2] += 32;
grower.CreatePartitioningIndexes(partitioning_indexes, row2Node,
parent_node_count, level, depth);
ASSERT_EQ(partitioning_indexes[0], 0);
ASSERT_EQ(partitioning_indexes[1], 31);
ASSERT_EQ(partitioning_indexes[2], 1);
ASSERT_EQ(partitioning_indexes[3], 30);
ASSERT_EQ(partitioning_indexes[4], 2);
ASSERT_EQ(partitioning_indexes[5], 29);
ASSERT_EQ(partitioning_indexes[6], 3);
ASSERT_EQ(partitioning_indexes[7], 28);
ASSERT_EQ(partitioning_indexes[8], 4);
ASSERT_EQ(partitioning_indexes[9], 27);
ASSERT_EQ(partitioning_indexes[10], 5);
ASSERT_EQ(partitioning_indexes[11], 26);
ASSERT_EQ(partitioning_indexes[12], 6);
ASSERT_EQ(partitioning_indexes[13], 25);
ASSERT_EQ(partitioning_indexes[14], 7);
ASSERT_EQ(partitioning_indexes[15], 24);
ASSERT_EQ(partitioning_indexes[16], 8);
ASSERT_EQ(partitioning_indexes[17], 23);
ASSERT_EQ(partitioning_indexes[18], 9);
ASSERT_EQ(partitioning_indexes[19], 22);
ASSERT_EQ(partitioning_indexes[20], 10);
ASSERT_EQ(partitioning_indexes[21], 21);
ASSERT_EQ(partitioning_indexes[22], 11);
ASSERT_EQ(partitioning_indexes[23], 20);
ASSERT_EQ(partitioning_indexes[24], 12);
ASSERT_EQ(partitioning_indexes[25], 19);
ASSERT_EQ(partitioning_indexes[26], 13);
ASSERT_EQ(partitioning_indexes[27], 18);
ASSERT_EQ(partitioning_indexes[28], 14);
ASSERT_EQ(partitioning_indexes[29], 17);
ASSERT_EQ(partitioning_indexes[30], 15);
ASSERT_EQ(partitioning_indexes[31], 16);
}
TEST(HistTreeGrower, RootSearchContinuousFeature) {
const InternalConfiguration config(false, 1, 0, true, true);
const size_t size = 32;
const unsigned hist_size = 4;
const unsigned depth = 1;
BestSplit<float2> best(1 << depth, hist_size);
Histogram<float2> histogram(1 << depth, hist_size, 1);
auto grower = HistTreeGrower<unsigned int, unsigned short, float2, float2>(
size, depth, hist_size, &best, &histogram, &config);
thrust::device_vector<unsigned int> row2Node(size, 0);
thrust::device_vector<unsigned int> partitioning_indexes(size, 0);
thrust::device_vector<float2> grad(32);
thrust::host_vector<unsigned short> fvalue_h(32);
thrust::device_vector<unsigned short> fvalue_d(32);
float2 sum = make_float2(0.0, 0.0);
for (int i = 0; i < size; ++i) {
grad[i] = make_float2(float(i), 1.0);
fvalue_h[i] = fvalue_d[i] = i / 8;
sum += grad[i];
}
thrust::device_vector<float2> parent_node_sum(2);
parent_node_sum[0] = make_float2(0, 0);
parent_node_sum[1] = sum;
thrust::device_vector<unsigned int> parent_node_count(2, 0);
parent_node_count[0] = 0;
parent_node_count[1] = size;
auto p = GainFunctionParameters(0, 0, 0, 0, 0, 0);
grower.ProcessDenseFeature(partitioning_indexes, row2Node, grad, fvalue_d,
thrust::raw_pointer_cast(fvalue_h.data()),
parent_node_sum, parent_node_count, 3, 0, 1, p,
false, 0);
TEST_OK(cudaStreamSynchronize(grower.stream));
thrust::host_vector<my_atomics> result_h = grower.result_d;
// copy pasted result
ASSERT_EQ(result_h[0].ints[1], 1);
ASSERT_FLOAT_EQ(result_h[0].floats[0], 2048.0);
}
TEST(HistTreeGrower, Level1SearchContinuousFeatureNoTrickDynamic) {
const unsigned level = 1;
const unsigned depth = 2;
const InternalConfiguration config(false, 1, 0, false, true);
const size_t size = 32;
const unsigned hist_size = 8;
BestSplit<float2> best(1 << depth, hist_size);
Histogram<float2> histogram(1 << depth, hist_size, 1);
auto grower = HistTreeGrower<unsigned int, unsigned short, float2, float2>(
size, depth, hist_size, &best, &histogram, &config);
thrust::device_vector<unsigned int> row2Node(size, 0);
thrust::device_vector<unsigned int> partitioning_indexes(size, 0);
thrust::device_vector<float2> grad(32);
thrust::host_vector<unsigned short> fvalue_h(32);
thrust::device_vector<unsigned short> fvalue_d(32);
float2 sum = make_float2(0.0, 0.0);
thrust::device_vector<unsigned> hist_count((1 << depth) * hist_size, 0);
thrust::device_vector<float2> hist_sum((1 << depth) * hist_size);
thrust::device_vector<float2> parent_node_sum(3, make_float2(0, 0));
thrust::device_vector<unsigned int> parent_node_count(3, 0);
for (int i = 0; i < size; ++i) {
unsigned row = i % 2;
row2Node[i] = row;
grad[i] = make_float2(i * i, 1.0);
fvalue_h[i] = fvalue_d[i] = i / 4;
parent_node_sum[(i / 16) + 1] += grad[i];
hist_count[fvalue_h[i]] += 1;
}
// sort feature values by row according to cub's partition logic
hist_sum[0] = make_float2(0.0 + 1.0 + 900.0 + 961.0, 4.0);
hist_sum[1] = make_float2(4.0 + 9.0 + 784.0 + 841.0, 4.0);
hist_sum[2] = make_float2(16.0 + 25.0 + 676.0 + 729.0, 4.0);
hist_sum[3] = make_float2(36.0 + 49.0 + 576.0 + 625.0, 4.0);
hist_sum[4] = make_float2(64.0 + 81.0 + 484.0 + 529.0, 4.0);
hist_sum[5] = make_float2(100.0 + 121.0 + 400.0 + 441.0, 4.0);
hist_sum[6] = make_float2(144.0 + 169.0 + 324.0 + 361.0, 4.0);
hist_sum[7] = make_float2(196.0 + 225.0 + 256.0 + 289.0, 4.0);
parent_node_count[1] += 16;
parent_node_count[2] += 32;
parent_node_sum[1] += parent_node_sum[0];
parent_node_sum[2] += parent_node_sum[1];
auto p = GainFunctionParameters(0, 0, 0, 0, 0, 0);
grower.CreatePartitioningIndexes(partitioning_indexes, row2Node,
parent_node_count, level, depth);
grower.ProcessDenseFeature(partitioning_indexes, row2Node, grad, fvalue_d,
thrust::raw_pointer_cast(fvalue_h.data()),
parent_node_sum, parent_node_count, 3, level,
depth, p, false, 0);
TEST_OK(cudaStreamSynchronize(grower.stream));
thrust::host_vector<my_atomics> result_h = grower.result_d;
for (unsigned i = 0; i < hist_size * 2; ++i) {
ASSERT_EQ(2, grower.hist_bin_count[i]);
}
float2 tmp = grower.sum[0];
ASSERT_FLOAT_EQ(1.0, tmp.x);
tmp = grower.sum[1];
ASSERT_FLOAT_EQ(13.0, tmp.x);
tmp = grower.sum[2];
ASSERT_FLOAT_EQ(41.0, tmp.x);
tmp = grower.sum[3];
ASSERT_FLOAT_EQ(85.0, tmp.x);
tmp = grower.sum[4];
ASSERT_FLOAT_EQ(145.0, tmp.x);
tmp = grower.sum[5];
ASSERT_FLOAT_EQ(221.0, tmp.x);
tmp = grower.sum[6];
ASSERT_FLOAT_EQ(313.0, tmp.x);
tmp = grower.sum[7];
ASSERT_FLOAT_EQ(421.0, tmp.x);
tmp = grower.sum[8];
ASSERT_FLOAT_EQ(1861.0, tmp.x);
tmp = grower.sum[9];
ASSERT_FLOAT_EQ(1625.0, tmp.x);
tmp = grower.sum[10];
ASSERT_FLOAT_EQ(1405.0, tmp.x);
tmp = grower.sum[11];
ASSERT_FLOAT_EQ(1201.0, tmp.x);
tmp = grower.sum[12];
ASSERT_FLOAT_EQ(1013.0, tmp.x);
tmp = grower.sum[13];
ASSERT_FLOAT_EQ(841.0, tmp.x);
tmp = grower.sum[14];
ASSERT_FLOAT_EQ(685.0, tmp.x);
tmp = grower.sum[15];
ASSERT_FLOAT_EQ(545.0, tmp.x);
ASSERT_EQ(result_h[0].ints[1], 4);
ASSERT_FLOAT_EQ(result_h[0].floats[0], 64026.672);
ASSERT_EQ(result_h[1].ints[1], 11);
ASSERT_FLOAT_EQ(result_h[1].floats[0], 565504);
ASSERT_EQ(fvalue_d[0], 0);
ASSERT_EQ(fvalue_d[1], 0);
ASSERT_EQ(fvalue_d[2], 1);
ASSERT_EQ(fvalue_d[3], 1);
ASSERT_EQ(fvalue_d[4], 2);
ASSERT_EQ(fvalue_d[5], 2);
ASSERT_EQ(fvalue_d[6], 3);
ASSERT_EQ(fvalue_d[7], 3);
ASSERT_EQ(fvalue_d[8], 4);
ASSERT_EQ(fvalue_d[9], 4);
ASSERT_EQ(fvalue_d[10], 5);
ASSERT_EQ(fvalue_d[11], 5);
ASSERT_EQ(fvalue_d[12], 6);
ASSERT_EQ(fvalue_d[13], 6);
ASSERT_EQ(fvalue_d[14], 7);
ASSERT_EQ(fvalue_d[15], 7);
ASSERT_EQ(fvalue_d[16], 7);
ASSERT_EQ(fvalue_d[17], 7);
ASSERT_EQ(fvalue_d[18], 6);
ASSERT_EQ(fvalue_d[19], 6);
ASSERT_EQ(fvalue_d[20], 5);
ASSERT_EQ(fvalue_d[21], 5);
ASSERT_EQ(fvalue_d[22], 4);
ASSERT_EQ(fvalue_d[23], 4);
ASSERT_EQ(fvalue_d[24], 3);
ASSERT_EQ(fvalue_d[25], 3);
ASSERT_EQ(fvalue_d[26], 2);
ASSERT_EQ(fvalue_d[27], 2);
ASSERT_EQ(fvalue_d[28], 1);
ASSERT_EQ(fvalue_d[29], 1);
ASSERT_EQ(fvalue_d[30], 0);
ASSERT_EQ(fvalue_d[31], 0);
}
TEST(HistTreeGrower, Level1SearchContinuousFeatureNoTrickDynamic_Upload) {
const unsigned level = 1;
const unsigned depth = 2;
const InternalConfiguration config(false, 1, 0, false, true);
const size_t size = 32;
const unsigned hist_size = 8;
BestSplit<float2> best(1 << depth, hist_size);
Histogram<float2> histogram(1 << depth, hist_size, 1);
auto grower = HistTreeGrower<unsigned int, unsigned short, float2, float2>(
size, depth, hist_size, &best, &histogram, &config);
thrust::device_vector<unsigned int> row2Node(size, 0);
thrust::device_vector<unsigned int> partitioning_indexes(size, 0);
thrust::device_vector<float2> grad(32);
thrust::host_vector<unsigned short> fvalue_h(32);
thrust::device_vector<unsigned short> fvalue_d(0);
float2 sum = make_float2(0.0, 0.0);
thrust::device_vector<unsigned> hist_count((1 << depth) * hist_size, 0);
thrust::device_vector<float2> hist_sum((1 << depth) * hist_size);
thrust::device_vector<float2> parent_node_sum(3, make_float2(0, 0));
thrust::device_vector<unsigned int> parent_node_count(3, 0);
for (int i = 0; i < size; ++i) {
unsigned row = i % 2;
row2Node[i] = row;
grad[i] = make_float2(i * i, 1.0);
fvalue_h[i] = i / 4;
parent_node_sum[(i / 16) + 1] += grad[i];
hist_count[fvalue_h[i]] += 1;
}
// sort feature values by row according to cub's partition logic
hist_sum[0] = make_float2(0.0 + 1.0 + 900.0 + 961.0, 4.0);
hist_sum[1] = make_float2(4.0 + 9.0 + 784.0 + 841.0, 4.0);
hist_sum[2] = make_float2(16.0 + 25.0 + 676.0 + 729.0, 4.0);
hist_sum[3] = make_float2(36.0 + 49.0 + 576.0 + 625.0, 4.0);
hist_sum[4] = make_float2(64.0 + 81.0 + 484.0 + 529.0, 4.0);
hist_sum[5] = make_float2(100.0 + 121.0 + 400.0 + 441.0, 4.0);
hist_sum[6] = make_float2(144.0 + 169.0 + 324.0 + 361.0, 4.0);
hist_sum[7] = make_float2(196.0 + 225.0 + 256.0 + 289.0, 4.0);
parent_node_count[1] += 16;
parent_node_count[2] += 32;
parent_node_sum[1] += parent_node_sum[0];
parent_node_sum[2] += parent_node_sum[1];
auto p = GainFunctionParameters(0, 0, 0, 0, 0, 0);
grower.CreatePartitioningIndexes(partitioning_indexes, row2Node,
parent_node_count, level, depth);
grower.ProcessDenseFeature(partitioning_indexes, row2Node, grad, fvalue_d,
thrust::raw_pointer_cast(fvalue_h.data()),
parent_node_sum, parent_node_count, 3, level,
depth, p, false, 0);
TEST_OK(cudaStreamSynchronize(grower.stream));
TEST_OK(cudaStreamSynchronize(grower.copy_d2h_stream));
thrust::host_vector<my_atomics> result_h = grower.result_d;
for (unsigned i = 0; i < hist_size * 2; ++i) {
ASSERT_EQ(2, grower.hist_bin_count[i]);
}
float2 tmp = grower.sum[0];
ASSERT_FLOAT_EQ(1.0, tmp.x);
tmp = grower.sum[1];
ASSERT_FLOAT_EQ(13.0, tmp.x);
tmp = grower.sum[2];
ASSERT_FLOAT_EQ(41.0, tmp.x);
tmp = grower.sum[3];
ASSERT_FLOAT_EQ(85.0, tmp.x);
tmp = grower.sum[4];
ASSERT_FLOAT_EQ(145.0, tmp.x);
tmp = grower.sum[5];
ASSERT_FLOAT_EQ(221.0, tmp.x);
tmp = grower.sum[6];
ASSERT_FLOAT_EQ(313.0, tmp.x);
tmp = grower.sum[7];
ASSERT_FLOAT_EQ(421.0, tmp.x);
tmp = grower.sum[8];
ASSERT_FLOAT_EQ(1861.0, tmp.x);
tmp = grower.sum[9];
ASSERT_FLOAT_EQ(1625.0, tmp.x);
tmp = grower.sum[10];
ASSERT_FLOAT_EQ(1405.0, tmp.x);
tmp = grower.sum[11];
ASSERT_FLOAT_EQ(1201.0, tmp.x);
tmp = grower.sum[12];
ASSERT_FLOAT_EQ(1013.0, tmp.x);
tmp = grower.sum[13];
ASSERT_FLOAT_EQ(841.0, tmp.x);
tmp = grower.sum[14];
ASSERT_FLOAT_EQ(685.0, tmp.x);
tmp = grower.sum[15];
ASSERT_FLOAT_EQ(545.0, tmp.x);
ASSERT_EQ(result_h[0].ints[1], 4);
ASSERT_FLOAT_EQ(result_h[0].floats[0], 64026.672);
ASSERT_EQ(result_h[1].ints[1], 11);
ASSERT_FLOAT_EQ(result_h[1].floats[0], 565504);
ASSERT_EQ(fvalue_h[0], 0);
ASSERT_EQ(fvalue_h[1], 0);
ASSERT_EQ(fvalue_h[2], 1);
ASSERT_EQ(fvalue_h[3], 1);
ASSERT_EQ(fvalue_h[4], 2);
ASSERT_EQ(fvalue_h[5], 2);
ASSERT_EQ(fvalue_h[6], 3);
ASSERT_EQ(fvalue_h[7], 3);
ASSERT_EQ(fvalue_h[8], 4);
ASSERT_EQ(fvalue_h[9], 4);
ASSERT_EQ(fvalue_h[10], 5);
ASSERT_EQ(fvalue_h[11], 5);
ASSERT_EQ(fvalue_h[12], 6);
ASSERT_EQ(fvalue_h[13], 6);
ASSERT_EQ(fvalue_h[14], 7);
ASSERT_EQ(fvalue_h[15], 7);
ASSERT_EQ(fvalue_h[16], 7);
ASSERT_EQ(fvalue_h[17], 7);
ASSERT_EQ(fvalue_h[18], 6);
ASSERT_EQ(fvalue_h[19], 6);
ASSERT_EQ(fvalue_h[20], 5);
ASSERT_EQ(fvalue_h[21], 5);
ASSERT_EQ(fvalue_h[22], 4);
ASSERT_EQ(fvalue_h[23], 4);
ASSERT_EQ(fvalue_h[24], 3);
ASSERT_EQ(fvalue_h[25], 3);
ASSERT_EQ(fvalue_h[26], 2);
ASSERT_EQ(fvalue_h[27], 2);
ASSERT_EQ(fvalue_h[28], 1);
ASSERT_EQ(fvalue_h[29], 1);
ASSERT_EQ(fvalue_h[30], 0);
ASSERT_EQ(fvalue_h[31], 0);
}
TEST(HistTreeGrower,
Level1SearchContinuousFeatureNoTrickDynamic_PartitionOnly) {
const unsigned level = 1;
const unsigned depth = 2;
const InternalConfiguration config(false, 1, 0, false, true);
const size_t size = 32;
const unsigned hist_size = 8;
BestSplit<float2> best(1 << depth, hist_size);
Histogram<float2> histogram(1 << depth, hist_size, 1);
auto grower = HistTreeGrower<unsigned int, unsigned short, float2, float2>(
size, depth, hist_size, &best, &histogram, &config);
thrust::device_vector<unsigned int> row2Node(size, 0);
thrust::device_vector<unsigned int> partitioning_indexes(size, 0);
thrust::device_vector<float2> grad(32);
thrust::host_vector<unsigned short> fvalue_h(32);
thrust::device_vector<unsigned short> fvalue_d(32);
float2 sum = make_float2(0.0, 0.0);
thrust::device_vector<unsigned> hist_count((1 << depth) * hist_size, 0);
thrust::device_vector<float2> hist_sum((1 << depth) * hist_size);
thrust::device_vector<float2> parent_node_sum(3, make_float2(0, 0));
thrust::device_vector<unsigned int> parent_node_count(3, 0);
for (int i = 0; i < size; ++i) {
unsigned row = i % 2;
row2Node[i] = row;
grad[i] = make_float2(i * i, 1.0);
fvalue_h[i] = fvalue_d[i] = i / 4;
parent_node_sum[(i / 16) + 1] += grad[i];
hist_count[fvalue_h[i]] += 1;
}
// sort feature values by row according to cub's partition logic
hist_sum[0] = make_float2(0.0 + 1.0 + 900.0 + 961.0, 4.0);
hist_sum[1] = make_float2(4.0 + 9.0 + 784.0 + 841.0, 4.0);
hist_sum[2] = make_float2(16.0 + 25.0 + 676.0 + 729.0, 4.0);
hist_sum[3] = make_float2(36.0 + 49.0 + 576.0 + 625.0, 4.0);
hist_sum[4] = make_float2(64.0 + 81.0 + 484.0 + 529.0, 4.0);
hist_sum[5] = make_float2(100.0 + 121.0 + 400.0 + 441.0, 4.0);
hist_sum[6] = make_float2(144.0 + 169.0 + 324.0 + 361.0, 4.0);
hist_sum[7] = make_float2(196.0 + 225.0 + 256.0 + 289.0, 4.0);
parent_node_count[1] += 16;
parent_node_count[2] += 32;
parent_node_sum[1] += parent_node_sum[0];
parent_node_sum[2] += parent_node_sum[1];
auto p = GainFunctionParameters(0, 0, 0, 0, 0, 0);
grower.CreatePartitioningIndexes(partitioning_indexes, row2Node,
parent_node_count, level, depth);
grower.ProcessDenseFeature(partitioning_indexes, row2Node, grad, fvalue_d,
thrust::raw_pointer_cast(fvalue_h.data()),
parent_node_sum, parent_node_count, 3, level,
depth, p, true, 0);
TEST_OK(cudaStreamSynchronize(grower.stream));
TEST_OK(cudaStreamSynchronize(grower.copy_d2h_stream));
ASSERT_EQ(fvalue_d[0], 0);
ASSERT_EQ(fvalue_d[1], 0);
ASSERT_EQ(fvalue_d[2], 1);
ASSERT_EQ(fvalue_d[3], 1);
ASSERT_EQ(fvalue_d[4], 2);
ASSERT_EQ(fvalue_d[5], 2);
ASSERT_EQ(fvalue_d[6], 3);
ASSERT_EQ(fvalue_d[7], 3);
ASSERT_EQ(fvalue_d[8], 4);
ASSERT_EQ(fvalue_d[9], 4);
ASSERT_EQ(fvalue_d[10], 5);
ASSERT_EQ(fvalue_d[11], 5);
ASSERT_EQ(fvalue_d[12], 6);
ASSERT_EQ(fvalue_d[13], 6);
ASSERT_EQ(fvalue_d[14], 7);
ASSERT_EQ(fvalue_d[15], 7);
ASSERT_EQ(fvalue_d[16], 7);
ASSERT_EQ(fvalue_d[17], 7);
ASSERT_EQ(fvalue_d[18], 6);
ASSERT_EQ(fvalue_d[19], 6);
ASSERT_EQ(fvalue_d[20], 5);
ASSERT_EQ(fvalue_d[21], 5);
ASSERT_EQ(fvalue_d[22], 4);
ASSERT_EQ(fvalue_d[23], 4);
ASSERT_EQ(fvalue_d[24], 3);
ASSERT_EQ(fvalue_d[25], 3);
ASSERT_EQ(fvalue_d[26], 2);
ASSERT_EQ(fvalue_d[27], 2);
ASSERT_EQ(fvalue_d[28], 1);
ASSERT_EQ(fvalue_d[29], 1);
ASSERT_EQ(fvalue_d[30], 0);
ASSERT_EQ(fvalue_d[31], 0);
}
TEST(HistTreeGrower,
Level1SearchContinuousFeatureNoTrickDynamic_PartitionOnly_Upload) {
const unsigned level = 1;
const unsigned depth = 2;
const InternalConfiguration config(false, 1, 0, false, true);
const size_t size = 32;
const unsigned hist_size = 8;
BestSplit<float2> best(1 << depth, hist_size);
Histogram<float2> histogram(1 << depth, hist_size, 1);
auto grower = HistTreeGrower<unsigned int, unsigned short, float2, float2>(
size, depth, hist_size, &best, &histogram, &config);
thrust::device_vector<unsigned int> row2Node(size, 0);
thrust::device_vector<unsigned int> partitioning_indexes(size, 0);
thrust::device_vector<float2> grad(32);
thrust::host_vector<unsigned short> fvalue_h(32);
thrust::device_vector<unsigned short> fvalue_d(0);
float2 sum = make_float2(0.0, 0.0);
thrust::device_vector<unsigned> hist_count((1 << depth) * hist_size, 0);
thrust::device_vector<float2> hist_sum((1 << depth) * hist_size);
thrust::device_vector<float2> parent_node_sum(3, make_float2(0, 0));
thrust::device_vector<unsigned int> parent_node_count(3, 0);
for (int i = 0; i < size; ++i) {
unsigned row = i % 2;
row2Node[i] = row;
grad[i] = make_float2(i * i, 1.0);
fvalue_h[i] = i / 4;
parent_node_sum[(i / 16) + 1] += grad[i];
hist_count[fvalue_h[i]] += 1;
}
// sort feature values by row according to cub's partition logic
hist_sum[0] = make_float2(0.0 + 1.0 + 900.0 + 961.0, 4.0);
hist_sum[1] = make_float2(4.0 + 9.0 + 784.0 + 841.0, 4.0);
hist_sum[2] = make_float2(16.0 + 25.0 + 676.0 + 729.0, 4.0);
hist_sum[3] = make_float2(36.0 + 49.0 + 576.0 + 625.0, 4.0);
hist_sum[4] = make_float2(64.0 + 81.0 + 484.0 + 529.0, 4.0);
hist_sum[5] = make_float2(100.0 + 121.0 + 400.0 + 441.0, 4.0);
hist_sum[6] = make_float2(144.0 + 169.0 + 324.0 + 361.0, 4.0);
hist_sum[7] = make_float2(196.0 + 225.0 + 256.0 + 289.0, 4.0);
parent_node_count[1] += 16;
parent_node_count[2] += 32;
parent_node_sum[1] += parent_node_sum[0];
parent_node_sum[2] += parent_node_sum[1];
auto p = GainFunctionParameters(0, 0, 0, 0, 0, 0);
grower.CreatePartitioningIndexes(partitioning_indexes, row2Node,
parent_node_count, level, depth);
grower.ProcessDenseFeature(partitioning_indexes, row2Node, grad, fvalue_d,
thrust::raw_pointer_cast(fvalue_h.data()),
parent_node_sum, parent_node_count, 3, level,
depth, p, true, 0);
TEST_OK(cudaStreamSynchronize(grower.stream));
TEST_OK(cudaStreamSynchronize(grower.copy_d2h_stream));
ASSERT_EQ(fvalue_h[0], 0);
ASSERT_EQ(fvalue_h[1], 0);
ASSERT_EQ(fvalue_h[2], 1);
ASSERT_EQ(fvalue_h[3], 1);
ASSERT_EQ(fvalue_h[4], 2);
ASSERT_EQ(fvalue_h[5], 2);
ASSERT_EQ(fvalue_h[6], 3);
ASSERT_EQ(fvalue_h[7], 3);
ASSERT_EQ(fvalue_h[8], 4);
ASSERT_EQ(fvalue_h[9], 4);
ASSERT_EQ(fvalue_h[10], 5);
ASSERT_EQ(fvalue_h[11], 5);
ASSERT_EQ(fvalue_h[12], 6);
ASSERT_EQ(fvalue_h[13], 6);
ASSERT_EQ(fvalue_h[14], 7);
ASSERT_EQ(fvalue_h[15], 7);
ASSERT_EQ(fvalue_h[16], 7);
ASSERT_EQ(fvalue_h[17], 7);
ASSERT_EQ(fvalue_h[18], 6);
ASSERT_EQ(fvalue_h[19], 6);
ASSERT_EQ(fvalue_h[20], 5);
ASSERT_EQ(fvalue_h[21], 5);
ASSERT_EQ(fvalue_h[22], 4);
ASSERT_EQ(fvalue_h[23], 4);
ASSERT_EQ(fvalue_h[24], 3);
ASSERT_EQ(fvalue_h[25], 3);
ASSERT_EQ(fvalue_h[26], 2);
ASSERT_EQ(fvalue_h[27], 2);
ASSERT_EQ(fvalue_h[28], 1);
ASSERT_EQ(fvalue_h[29], 1);
ASSERT_EQ(fvalue_h[30], 0);
ASSERT_EQ(fvalue_h[31], 0);
}
TEST(HistTreeGrower, Level1SearchContinuousFeatureNoTrickStatic) {
const InternalConfiguration config(false, 1, 0, false, false);
const size_t size = 32;
const unsigned depth = 2;
const unsigned level = 1;
const unsigned hist_size = 8;
BestSplit<float2> best(1 << depth, hist_size);
Histogram<float2> histogram(1 << depth, hist_size, 1);
auto grower = HistTreeGrower<unsigned int, unsigned short, float2, float2>(
size, depth, hist_size, &best, &histogram, &config);
thrust::device_vector<unsigned int> row2Node(size, 0);
thrust::device_vector<unsigned int> partitioning_indexes(size, 0);
thrust::device_vector<float2> grad(32);
thrust::host_vector<unsigned short> fvalue_h(32);
thrust::device_vector<unsigned short> fvalue_d(32);
float2 sum = make_float2(0.0, 0.0);
thrust::device_vector<unsigned> hist_count((1 << depth) * hist_size, 0);
thrust::device_vector<float2> hist_sum((1 << depth) * hist_size);
thrust::device_vector<float2> parent_node_sum(3, make_float2(0, 0));
thrust::device_vector<unsigned int> parent_node_count(3, 0);
for (int i = 0; i < size; ++i) {
unsigned row = i % 2;
row2Node[i] = row;
grad[i] = make_float2(i * i, 1.0);
fvalue_h[i] = fvalue_d[i] = i / 4;
parent_node_sum[(i / 16) + 1] += grad[i];
hist_count[fvalue_h[i]] += 1;
}
// sort feature values by row according to cub's partition logic
hist_sum[0] = make_float2(0.0 + 1.0 + 900.0 + 961.0, 4.0);
hist_sum[1] = make_float2(4.0 + 9.0 + 784.0 + 841.0, 4.0);
hist_sum[2] = make_float2(16.0 + 25.0 + 676.0 + 729.0, 4.0);
hist_sum[3] = make_float2(36.0 + 49.0 + 576.0 + 625.0, 4.0);
hist_sum[4] = make_float2(64.0 + 81.0 + 484.0 + 529.0, 4.0);
hist_sum[5] = make_float2(100.0 + 121.0 + 400.0 + 441.0, 4.0);
hist_sum[6] = make_float2(144.0 + 169.0 + 324.0 + 361.0, 4.0);
hist_sum[7] = make_float2(196.0 + 225.0 + 256.0 + 289.0, 4.0);
parent_node_count[1] += 16;
parent_node_count[2] += 32;
parent_node_sum[1] += parent_node_sum[0];
parent_node_sum[2] += parent_node_sum[1];
best.parent_node_count_h = parent_node_count;
auto p = GainFunctionParameters(0, 0, 0, 0, 0, 0);
grower.CreatePartitioningIndexes(partitioning_indexes, row2Node,
parent_node_count, level, depth);
grower.ProcessDenseFeature(partitioning_indexes, row2Node, grad, fvalue_d,
thrust::raw_pointer_cast(fvalue_h.data()),
parent_node_sum, parent_node_count, 3, level,
depth, p, false, 0);
TEST_OK(cudaStreamSynchronize(grower.stream));
thrust::host_vector<my_atomics> result_h = grower.result_d;
for (unsigned i = 0; i < hist_size * 2; ++i) {
ASSERT_EQ(2, grower.hist_bin_count[i]);
}
float2 tmp = grower.sum[0];
ASSERT_FLOAT_EQ(1.0, tmp.x);
tmp = grower.sum[1];
ASSERT_FLOAT_EQ(13.0, tmp.x);
tmp = grower.sum[2];
ASSERT_FLOAT_EQ(41.0, tmp.x);
tmp = grower.sum[3];
ASSERT_FLOAT_EQ(85.0, tmp.x);
tmp = grower.sum[4];
ASSERT_FLOAT_EQ(145.0, tmp.x);
tmp = grower.sum[5];
ASSERT_FLOAT_EQ(221.0, tmp.x);
tmp = grower.sum[6];
ASSERT_FLOAT_EQ(313.0, tmp.x);
tmp = grower.sum[7];
ASSERT_FLOAT_EQ(421.0, tmp.x);
tmp = grower.sum[8];
ASSERT_FLOAT_EQ(1861.0, tmp.x);
tmp = grower.sum[9];
ASSERT_FLOAT_EQ(1625.0, tmp.x);
tmp = grower.sum[10];
ASSERT_FLOAT_EQ(1405.0, tmp.x);
tmp = grower.sum[11];
ASSERT_FLOAT_EQ(1201.0, tmp.x);
tmp = grower.sum[12];
ASSERT_FLOAT_EQ(1013.0, tmp.x);
tmp = grower.sum[13];
ASSERT_FLOAT_EQ(841.0, tmp.x);
tmp = grower.sum[14];
ASSERT_FLOAT_EQ(685.0, tmp.x);
tmp = grower.sum[15];
ASSERT_FLOAT_EQ(545.0, tmp.x);
ASSERT_EQ(result_h[0].ints[1], 4);
ASSERT_FLOAT_EQ(result_h[0].floats[0], 64026.672);
ASSERT_EQ(result_h[1].ints[1], 11);
ASSERT_FLOAT_EQ(result_h[1].floats[0], 565504);
}
TEST(HistTreeGrower, Level1SearchContinuousFeatureWithTrickDynamic) {
const InternalConfiguration config(false, 1, 0, true, true);
const unsigned level = 1;
const unsigned depth = 2;
const size_t size = 32;
const unsigned hist_size = 8;
BestSplit<float2> best(1 << depth, hist_size);
Histogram<float2> histogram(1 << depth, hist_size, 1);
auto grower = HistTreeGrower<unsigned int, unsigned short, float2, float2>(
size, depth, hist_size, &best, &histogram, &config);
thrust::device_vector<unsigned int> row2Node(size, 0);
thrust::device_vector<unsigned> partitioning_indexes(size, 0);
thrust::device_vector<float2> grad(32);
thrust::host_vector<unsigned short> fvalue_h(32);
thrust::device_vector<unsigned short> fvalue_d(32);
float2 sum = make_float2(0.0, 0.0);
thrust::device_vector<unsigned> hist_count((1 << depth) * hist_size, 0);
thrust::device_vector<float2> hist_sum((1 << depth) * hist_size);
thrust::device_vector<float2> parent_node_sum(3, make_float2(0, 0));
thrust::device_vector<unsigned int> parent_node_count(3, 0);
for (int i = 0; i < size; ++i) {
unsigned row = i % 2;
row2Node[i] = row;
grad[i] = make_float2(i * i, 1.0);
fvalue_h[i] = fvalue_d[i] = i / 4;
parent_node_sum[(i / 16) + 1] += grad[i];
hist_count[fvalue_h[i]] += 1;
}
// sort feature values by row according to cub's partition logic
hist_sum[0] = make_float2(0.0 + 1.0 + 900.0 + 961.0, 4.0);
hist_sum[1] = make_float2(4.0 + 9.0 + 784.0 + 841.0, 4.0);
hist_sum[2] = make_float2(16.0 + 25.0 + 676.0 + 729.0, 4.0);
hist_sum[3] = make_float2(36.0 + 49.0 + 576.0 + 625.0, 4.0);
hist_sum[4] = make_float2(64.0 + 81.0 + 484.0 + 529.0, 4.0);
hist_sum[5] = make_float2(100.0 + 121.0 + 400.0 + 441.0, 4.0);
hist_sum[6] = make_float2(144.0 + 169.0 + 324.0 + 361.0, 4.0);
hist_sum[7] = make_float2(196.0 + 225.0 + 256.0 + 289.0, 4.0);
histogram.Update(hist_sum, hist_count, 0, 1 << (depth - 1), grower.stream);
parent_node_count[1] += 16;
parent_node_count[2] += 32;
parent_node_sum[1] += parent_node_sum[0];
parent_node_sum[2] += parent_node_sum[1];
auto p = GainFunctionParameters(0, 0, 0, 0, 0, 0);
grower.CreatePartitioningIndexes(partitioning_indexes, row2Node,
parent_node_count, level, depth);
grower.ProcessDenseFeature(partitioning_indexes, row2Node, grad, fvalue_d,
thrust::raw_pointer_cast(fvalue_h.data()),
parent_node_sum, parent_node_count, 3, level,
depth, p, false, 0);
TEST_OK(cudaStreamSynchronize(grower.stream));
thrust::host_vector<my_atomics> result_h = grower.result_d;
for (unsigned i = 0; i < hist_size * 2; ++i) {
unsigned count = grower.hist_bin_count[i];
ASSERT_EQ(2, grower.hist_bin_count[i]);
}
float2 tmp = grower.sum[0];
ASSERT_FLOAT_EQ(1.0, tmp.x);
tmp = grower.sum[1];
ASSERT_FLOAT_EQ(13.0, tmp.x);
tmp = grower.sum[2];
ASSERT_FLOAT_EQ(41.0, tmp.x);
tmp = grower.sum[3];
ASSERT_FLOAT_EQ(85.0, tmp.x);
tmp = grower.sum[4];
ASSERT_FLOAT_EQ(145.0, tmp.x);
tmp = grower.sum[5];
ASSERT_FLOAT_EQ(221.0, tmp.x);
tmp = grower.sum[6];
ASSERT_FLOAT_EQ(313.0, tmp.x);
tmp = grower.sum[7];
ASSERT_FLOAT_EQ(421.0, tmp.x);
tmp = grower.sum[8];
ASSERT_FLOAT_EQ(1861.0, tmp.x);
tmp = grower.sum[9];
ASSERT_FLOAT_EQ(1625.0, tmp.x);
tmp = grower.sum[10];
ASSERT_FLOAT_EQ(1405.0, tmp.x);
tmp = grower.sum[11];
ASSERT_FLOAT_EQ(1201.0, tmp.x);
tmp = grower.sum[12];
ASSERT_FLOAT_EQ(1013.0, tmp.x);
tmp = grower.sum[13];
ASSERT_FLOAT_EQ(841.0, tmp.x);
tmp = grower.sum[14];
ASSERT_FLOAT_EQ(685.0, tmp.x);
tmp = grower.sum[15];
ASSERT_FLOAT_EQ(545.0, tmp.x);
ASSERT_EQ(result_h[0].ints[1], 4);
ASSERT_FLOAT_EQ(result_h[0].floats[0], 64026.672);
ASSERT_EQ(result_h[1].ints[1], 11);
ASSERT_FLOAT_EQ(result_h[1].floats[0], 565504);
}
TEST(HistTreeGrower, Level1SearchContinuousFeatureWithTrickStatic) {
const InternalConfiguration config(false, 1, 0, true, false);
const size_t size = 32;
const unsigned depth = 2;
const unsigned level = 1;
const unsigned hist_size = 8;
BestSplit<float2> best(1 << depth, hist_size);
Histogram<float2> histogram(1 << depth, hist_size, 1);
auto grower = HistTreeGrower<unsigned int, unsigned short, float2, float2>(
size, depth, hist_size, &best, &histogram, &config);
thrust::device_vector<unsigned int> row2Node(size, 0);
thrust::device_vector<unsigned int> partitioning_indexes(size, 0);
thrust::device_vector<float2> grad(32);
thrust::host_vector<unsigned short> fvalue_h(32);
thrust::device_vector<unsigned short> fvalue_d(32);
float2 sum = make_float2(0.0, 0.0);
thrust::device_vector<unsigned> hist_count((1 << depth) * hist_size, 0);
thrust::device_vector<float2> hist_sum((1 << depth) * hist_size);
thrust::device_vector<float2> parent_node_sum(3, make_float2(0, 0));
thrust::device_vector<unsigned int> parent_node_count(3, 0);
for (int i = 0; i < size; ++i) {
unsigned row = i % 2;
row2Node[i] = row;
grad[i] = make_float2(i * i, 1.0);
fvalue_h[i] = fvalue_d[i] = i / 4;
parent_node_sum[(i / 16) + 1] += grad[i];
hist_count[fvalue_h[i]] += 1;
}
// sort feature values by row according to cub's partition logic
hist_sum[0] = make_float2(0.0 + 1.0 + 900.0 + 961.0, 4.0);
hist_sum[1] = make_float2(4.0 + 9.0 + 784.0 + 841.0, 4.0);
hist_sum[2] = make_float2(16.0 + 25.0 + 676.0 + 729.0, 4.0);
hist_sum[3] = make_float2(36.0 + 49.0 + 576.0 + 625.0, 4.0);
hist_sum[4] = make_float2(64.0 + 81.0 + 484.0 + 529.0, 4.0);
hist_sum[5] = make_float2(100.0 + 121.0 + 400.0 + 441.0, 4.0);
hist_sum[6] = make_float2(144.0 + 169.0 + 324.0 + 361.0, 4.0);
hist_sum[7] = make_float2(196.0 + 225.0 + 256.0 + 289.0, 4.0);
histogram.Update(hist_sum, hist_count, 0, 1 << (depth - 1), grower.stream);
parent_node_count[1] += 16;
parent_node_count[2] += 32;
parent_node_sum[1] += parent_node_sum[0];
parent_node_sum[2] += parent_node_sum[1];
best.parent_node_count_h = parent_node_count;
auto p = GainFunctionParameters(0, 0, 0, 0, 0, 0);
grower.CreatePartitioningIndexes(partitioning_indexes, row2Node,
parent_node_count, level, depth);
grower.ProcessDenseFeature(partitioning_indexes, row2Node, grad, fvalue_d,
thrust::raw_pointer_cast(fvalue_h.data()),
parent_node_sum, parent_node_count, 3, level,
depth, p, false, 0);
TEST_OK(cudaStreamSynchronize(grower.stream));
thrust::host_vector<my_atomics> result_h = grower.result_d;
for (unsigned i = 0; i < hist_size * 2; ++i) {
unsigned count = grower.hist_bin_count[i];
ASSERT_EQ(2, grower.hist_bin_count[i]);
}
float2 tmp = grower.sum[0];
ASSERT_FLOAT_EQ(1.0, tmp.x);
tmp = grower.sum[1];
ASSERT_FLOAT_EQ(13.0, tmp.x);
tmp = grower.sum[2];
ASSERT_FLOAT_EQ(41.0, tmp.x);
tmp = grower.sum[3];
ASSERT_FLOAT_EQ(85.0, tmp.x);
tmp = grower.sum[4];
ASSERT_FLOAT_EQ(145.0, tmp.x);
tmp = grower.sum[5];
ASSERT_FLOAT_EQ(221.0, tmp.x);
tmp = grower.sum[6];
ASSERT_FLOAT_EQ(313.0, tmp.x);
tmp = grower.sum[7];
ASSERT_FLOAT_EQ(421.0, tmp.x);
tmp = grower.sum[8];
ASSERT_FLOAT_EQ(1861.0, tmp.x);
tmp = grower.sum[9];
ASSERT_FLOAT_EQ(1625.0, tmp.x);
tmp = grower.sum[10];
ASSERT_FLOAT_EQ(1405.0, tmp.x);
tmp = grower.sum[11];
ASSERT_FLOAT_EQ(1201.0, tmp.x);
tmp = grower.sum[12];
ASSERT_FLOAT_EQ(1013.0, tmp.x);
tmp = grower.sum[13];
ASSERT_FLOAT_EQ(841.0, tmp.x);
tmp = grower.sum[14];
ASSERT_FLOAT_EQ(685.0, tmp.x);
tmp = grower.sum[15];
ASSERT_FLOAT_EQ(545.0, tmp.x);
ASSERT_EQ(result_h[0].ints[1], 4);
ASSERT_FLOAT_EQ(result_h[0].floats[0], 64026.672);
ASSERT_EQ(result_h[1].ints[1], 11);
ASSERT_FLOAT_EQ(result_h[1].floats[0], 565504);
}
} // namespace arboretum_test | the_stack |
#include <iostream>
#include <algorithm>
#include "cuhnsw.hpp"
namespace cuhnsw {
CuHNSW::CuHNSW() {
logger_ = CuHNSWLogger().get_logger();
GetDeviceInfo();
// reference: https://stackoverflow.com/a/32531982
switch (major_){
case 2: // Fermi
if (minor_ == 1)
cores_ = mp_cnt_ * 48;
else
cores_ = mp_cnt_ * 32;
break;
case 3: // Kepler
cores_ = mp_cnt_ * 192;
break;
case 5: // Maxwell
cores_ = mp_cnt_ * 128;
break;
case 6: // Pascal
if (minor_ == 1 or minor_ == 2)
cores_ = mp_cnt_ * 128;
else if (minor_ == 0)
cores_ = mp_cnt_ * 64;
else
DEBUG0("Unknown device type");
break;
case 7: // Volta and Turing
if (minor_ == 0 or minor_ == 5)
cores_ = mp_cnt_ * 64;
else
DEBUG0("Unknown device type");
break;
case 8: // Ampere
if (minor_ == 0)
cores_ = mp_cnt_ * 64;
else if (minor_ == 6)
cores_ = mp_cnt_ * 128;
else
DEBUG0("Unknown device type");
break;
default:
DEBUG0("Unknown device type");
break;
}
if (cores_ == -1) cores_ = mp_cnt_ * 128;
INFO("cuda device info, major: {}, minor: {}, multi processors: {}, cores: {}",
major_, minor_, mp_cnt_, cores_);
// sw_.resize(PROFILE_SIZE);
// el_.resize(PROFILE_SIZE);
}
CuHNSW::~CuHNSW() {}
bool CuHNSW::Init(std::string opt_path) {
std::ifstream in(opt_path.c_str());
if (not in.is_open()) return false;
std::string str((std::istreambuf_iterator<char>(in)),
std::istreambuf_iterator<char>());
std::string err_cmt;
auto _opt = json11::Json::parse(str, err_cmt);
if (not err_cmt.empty()) return false;
opt_ = _opt;
max_m_ = opt_["max_m"].int_value();
max_m0_ = opt_["max_m0"].int_value();
save_remains_ = opt_["save_remains"].bool_value();
ef_construction_ = opt_["ef_construction"].int_value();
level_mult_ = opt_["level_mult"].number_value();
batch_size_ = opt_["batch_size"].int_value();
block_dim_ = opt_["block_dim"].int_value();
visited_table_size_ = opt_["visited_table_size"].int_value();
visited_list_size_ = opt_["visited_list_size"].int_value();
if (not visited_table_size_)
visited_table_size_ = visited_list_size_ * 2;
heuristic_coef_ = opt_["heuristic_coef"].number_value();
std::string dist_type = opt_["dist_type"].string_value();
reverse_cand_ = opt_["reverse_cand"].bool_value();
if (dist_type == "dot") {
dist_type_ = DOT;
} else if (dist_type == "l2") {
dist_type_ = L2;
} else {
char buf[4096];
snprintf(buf, sizeof(buf), "invalid dist type %s",
dist_type.c_str());
std::string msg(buf);
throw std::runtime_error(msg);
}
CuHNSWLogger().set_log_level(opt_["c_log_level"].int_value());
DEBUG("max_m: {}, max_m0: {}, save_remains: {}, ef_construction: {}, level_mult: {}, dist_type: {}",
max_m_, max_m0_, save_remains_, ef_construction_, level_mult_, dist_type);
return true;
}
void CuHNSW::SetData(const float* data, int num_data, int num_dims) {
num_data_ = num_data;
num_dims_ = num_dims;
block_cnt_ = opt_["hyper_threads"].number_value() * (cores_ / block_dim_);
DEBUG("copy data ({} x {}), block_cnt: {}, block_dim: {}",
num_data, num_dims, block_cnt_, block_dim_);
device_data_.resize(num_data * num_dims);
#ifdef HALF_PRECISION
// DEBUG0("fp16")
std::vector<cuda_scalar> hdata(num_data * num_dims);
for (int i = 0; i < num_data * num_dims; ++i) {
hdata[i] = conversion(data[i]);
// DEBUG("hdata i: {}, scalar: {}", i, out_scalar(hdata[i]));
}
thrust::copy(hdata.begin(), hdata.end(), device_data_.begin());
#else
// DEBUG0("fp32")
thrust::copy(data, data + num_data * num_dims, device_data_.begin());
#endif
data_ = data;
}
void CuHNSW::SetRandomLevels(const int* levels) {
levels_.resize(num_data_);
DEBUG("set levels of data (length: {})", num_data_)
max_level_ = 0;
std::vector<std::vector<int>> level_nodes(1);
for (int i = 0; i < num_data_; ++i) {
levels_[i] = levels[i];
if (levels[i] > max_level_) {
max_level_ = levels[i];
level_nodes.resize(max_level_ + 1);
enter_point_ = i;
}
for (int l = 0; l <= levels[i]; ++l)
level_nodes[l].push_back(i);
}
DEBUG("max level: {}", max_level_)
for (int i = 0; i <= max_level_; ++i)
DEBUG("number of data in level {}: {}",
i, level_nodes[i].size());
level_graphs_.clear();
for (int i = 0; i <= max_level_; ++i) {
LevelGraph graph = LevelGraph();
graph.SetNodes(level_nodes[i],
num_data_, ef_construction_);
level_graphs_.push_back(graph);
}
}
// save graph compatible with hnswlib (https://github.com/nmslib/hnswlib)
void CuHNSW::SaveIndex(std::string fpath) {
std::ofstream output(fpath);
DEBUG("save index to {}", fpath);
// write meta values
DEBUG0("write meta values");
size_t data_size = num_dims_ * sizeof(scalar);
size_t max_elements = num_data_;
size_t cur_element_count = num_data_;
size_t M = max_m_;
size_t maxM = max_m_;
size_t maxM0 = max_m0_;
int maxlevel = max_level_;
size_t size_links_level0 = maxM0 * sizeof(tableint) + sizeof(sizeint);
size_t size_links_per_element = maxM * sizeof(tableint) + sizeof(sizeint);
size_t size_data_per_element = size_links_level0 + data_size + sizeof(labeltype);
size_t ef_construction = ef_construction_;
double mult = level_mult_;
size_t offsetData = size_links_level0;
size_t label_offset = size_links_level0 + data_size;
size_t offsetLevel0 = 0;
tableint enterpoint_node = enter_point_;
writeBinaryPOD(output, offsetLevel0);
writeBinaryPOD(output, max_elements);
writeBinaryPOD(output, cur_element_count);
writeBinaryPOD(output, size_data_per_element);
writeBinaryPOD(output, label_offset);
writeBinaryPOD(output, offsetData);
writeBinaryPOD(output, maxlevel);
writeBinaryPOD(output, enterpoint_node);
writeBinaryPOD(output, maxM);
writeBinaryPOD(output, maxM0);
writeBinaryPOD(output, M);
writeBinaryPOD(output, mult);
writeBinaryPOD(output, ef_construction);
// write level0 links and data
DEBUG0("write level0 links and data");
char* data_level0_memory = (char*) malloc(cur_element_count * size_data_per_element);
LevelGraph& graph = level_graphs_[0];
std::vector<tableint> links;
links.reserve(max_m0_);
size_t offset = 0;
for (int i = 0; i < cur_element_count; ++i) {
links.clear();
for (const auto& pr: graph.GetNeighbors(i))
links.push_back(static_cast<tableint>(pr.second));
sizeint size = links.size();
memcpy(data_level0_memory + offset, &size, sizeof(sizeint));
offset += sizeof(sizeint);
if (size > 0)
memcpy(data_level0_memory + offset, &links[0], sizeof(tableint) * size);
offset += maxM0 * sizeof(tableint);
memcpy(data_level0_memory + offset, &data_[i * num_dims_], data_size);
offset += data_size;
labeltype label = i;
memcpy(data_level0_memory + offset, &label, sizeof(labeltype));
offset += sizeof(labeltype);
}
output.write(data_level0_memory, cur_element_count * size_data_per_element);
// write upper layer links
DEBUG0("write upper layer links");
for (int i = 0; i < num_data_; ++i) {
unsigned int size = size_links_per_element * levels_[i];
writeBinaryPOD(output, size);
char* mem = (char*) malloc(size);
offset = 0;
if (size) {
for (int j = 1; j <= levels_[i]; ++j) {
links.clear();
LevelGraph& upper_graph = level_graphs_[j];
for (const auto& pr: upper_graph.GetNeighbors(i))
links.push_back(static_cast<tableint>(pr.second));
sizeint link_size = links.size();
memcpy(mem + offset, &link_size, sizeof(sizeint));
offset += sizeof(sizeint);
if (link_size > 0)
memcpy(mem + offset, &links[0], sizeof(tableint) * link_size);
offset += sizeof(tableint) * maxM;
}
output.write(mem, size);
}
}
output.close();
}
// load graph compatible with hnswlib (https://github.com/nmslib/hnswlib)
void CuHNSW::LoadIndex(std::string fpath) {
std::ifstream input(fpath, std::ios::binary);
DEBUG("load index from {}", fpath);
// reqd meta values
DEBUG0("read meta values");
size_t offsetLevel0, max_elements, cur_element_count;
size_t size_data_per_element, label_offset, offsetData;
int maxlevel;
tableint enterpoint_node = enter_point_;
size_t maxM, maxM0, M;
double mult;
size_t ef_construction;
readBinaryPOD(input, offsetLevel0);
readBinaryPOD(input, max_elements);
readBinaryPOD(input, cur_element_count);
readBinaryPOD(input, size_data_per_element);
readBinaryPOD(input, label_offset);
readBinaryPOD(input, offsetData);
readBinaryPOD(input, maxlevel);
readBinaryPOD(input, enterpoint_node);
readBinaryPOD(input, maxM);
readBinaryPOD(input, maxM0);
readBinaryPOD(input, M);
readBinaryPOD(input, mult);
readBinaryPOD(input, ef_construction);
size_t size_per_link = maxM * sizeof(tableint) + sizeof(sizeint);
num_data_ = cur_element_count;
max_m_ = maxM;
max_m0_ = maxM0;
enter_point_ = enterpoint_node;
ef_construction_ = ef_construction;
max_level_ = maxlevel;
level_mult_ = mult;
num_dims_ = (label_offset - offsetData) / sizeof(scalar);
DEBUG("meta values loaded, num_data: {}, num_dims: {}, max_m: {}, max_m0: {}, enter_point: {}, max_level: {}",
num_data_, num_dims_, max_m_, max_m0_, enter_point_, max_level_);
char* data_level0_memory = (char*) malloc(max_elements * size_data_per_element);
input.read(data_level0_memory, cur_element_count * size_data_per_element);
// reset level graphs
level_graphs_.clear();
level_graphs_.shrink_to_fit();
level_graphs_.resize(max_level_ + 1);
// load data and level0 links
DEBUG0("load level0 links and data");
DEBUG("level0 count: {}", cur_element_count);
std::vector<float> data(num_data_ * num_dims_);
size_t offset = 0;
std::vector<tableint> links(max_m0_);
std::vector<scalar> vec_data(num_dims_);
LevelGraph& graph0 = level_graphs_[0];
std::vector<std::vector<int>> nodes(max_level_ + 1);
nodes[0].resize(cur_element_count);
std::iota(nodes[0].begin(), nodes[0].end(), 0);
graph0.SetNodes(nodes[0], num_data_, ef_construction_);
labels_.clear(); labelled_ = true;
for (int i = 0; i < cur_element_count; ++i) {
sizeint deg;
memcpy(°, data_level0_memory + offset, sizeof(sizeint));
offset += sizeof(sizeint);
memcpy(&links[0], data_level0_memory + offset, sizeof(tableint) * max_m0_);
for (int j = 0; j < deg; ++j)
graph0.AddEdge(i, links[j], 0);
offset += sizeof(tableint) * max_m0_;
memcpy(&vec_data[0], data_level0_memory + offset, sizeof(scalar) * num_dims_);
for (int j = 0; j < num_dims_; ++j)
data[num_dims_ * i + j] = vec_data[j];
offset += sizeof(scalar) * num_dims_;
labeltype label;
memcpy(&label, data_level0_memory + offset, sizeof(labeltype));
labels_.push_back(static_cast<int>(label));
offset += sizeof(labeltype);
}
SetData(&data[0], num_data_, num_dims_);
// load upper layer links
DEBUG0("load upper layer links");
std::vector<std::vector<std::pair<int, int>>> links_data(max_level_ + 1);
links.resize(max_m_);
levels_.resize(cur_element_count);
for (int i = 0; i < cur_element_count; ++i) {
unsigned int linksize;
readBinaryPOD(input, linksize);
if (not linksize) continue;
char* buffer = (char*) malloc(linksize);
input.read(buffer, linksize);
size_t levels = linksize / size_per_link;
size_t offset = 0;
levels_[i] = levels + 1;
for (int j = 1; j <= levels; ++j) {
nodes[j].push_back(i);
sizeint deg;
memcpy(°, buffer + offset, sizeof(sizeint));
offset += sizeof(sizeint);
memcpy(&links[0], buffer + offset, sizeof(tableint) * deg);
offset += sizeof(tableint) * max_m_;
for (int k = 0; k < deg; ++k)
links_data[j].emplace_back(i, links[k]);
}
}
for (int i = 1; i <= max_level_; ++i) {
LevelGraph& graph = level_graphs_[i];
DEBUG("level {} count: {}", i, nodes[i].size());
graph.SetNodes(nodes[i], num_data_, ef_construction_);
for (const auto& pr: links_data[i]) {
graph.AddEdge(pr.first, pr.second, 0);
}
}
input.close();
}
} // namespace cuhnsw | the_stack |
* \file
* Vector type inference utilities
*/
#pragma once
#include <iostream>
#include "util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilModule
* @{
*/
/******************************************************************************
* Vector type inference utilities. For example:
*
* typename CubVector<unsigned int, 2>::Type // Aliases uint2
*
******************************************************************************/
/**
* \brief Exposes a member typedef \p Type that names the corresponding CUDA vector type if one exists. Otherwise \p Type refers to the CubVector structure itself, which will wrap the corresponding \p x, \p y, etc. vector fields.
*/
template <typename T, int vec_elements> struct CubVector;
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
enum
{
/// The maximum number of elements in CUDA vector types
MAX_VEC_ELEMENTS = 4,
};
/**
* Generic vector-1 type
*/
template <typename T>
struct CubVector<T, 1>
{
T x;
typedef T BaseType;
typedef CubVector<T, 1> Type;
};
/**
* Generic vector-2 type
*/
template <typename T>
struct CubVector<T, 2>
{
T x;
T y;
typedef T BaseType;
typedef CubVector<T, 2> Type;
};
/**
* Generic vector-3 type
*/
template <typename T>
struct CubVector<T, 3>
{
T x;
T y;
T z;
typedef T BaseType;
typedef CubVector<T, 3> Type;
};
/**
* Generic vector-4 type
*/
template <typename T>
struct CubVector<T, 4>
{
T x;
T y;
T z;
T w;
typedef T BaseType;
typedef CubVector<T, 4> Type;
};
/**
* Macro for expanding partially-specialized built-in vector types
*/
#define CUB_DEFINE_VECTOR_TYPE(base_type,short_type) \
\
template<> struct CubVector<base_type, 1> : short_type##1 \
{ \
typedef base_type BaseType; \
typedef short_type##1 Type; \
__host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \
CubVector retval; \
retval.x = x + other.x; \
return retval; \
} \
__host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \
CubVector retval; \
retval.x = x - other.x; \
return retval; \
} \
}; \
\
template<> struct CubVector<base_type, 2> : short_type##2 \
{ \
typedef base_type BaseType; \
typedef short_type##2 Type; \
__host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \
CubVector retval; \
retval.x = x + other.x; \
retval.y = y + other.y; \
return retval; \
} \
__host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \
CubVector retval; \
retval.x = x - other.x; \
retval.y = y - other.y; \
return retval; \
} \
}; \
\
template<> struct CubVector<base_type, 3> : short_type##3 \
{ \
typedef base_type BaseType; \
typedef short_type##3 Type; \
__host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \
CubVector retval; \
retval.x = x + other.x; \
retval.y = y + other.y; \
retval.z = z + other.z; \
return retval; \
} \
__host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \
CubVector retval; \
retval.x = x - other.x; \
retval.y = y - other.y; \
retval.z = z - other.z; \
return retval; \
} \
}; \
\
template<> struct CubVector<base_type, 4> : short_type##4 \
{ \
typedef base_type BaseType; \
typedef short_type##4 Type; \
__host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \
CubVector retval; \
retval.x = x + other.x; \
retval.y = y + other.y; \
retval.z = z + other.z; \
retval.w = w + other.w; \
return retval; \
} \
__host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \
CubVector retval; \
retval.x = x - other.x; \
retval.y = y - other.y; \
retval.z = z - other.z; \
retval.w = w - other.w; \
return retval; \
} \
};
// Expand CUDA vector types for built-in primitives
CUB_DEFINE_VECTOR_TYPE(char, char)
CUB_DEFINE_VECTOR_TYPE(signed char, char)
CUB_DEFINE_VECTOR_TYPE(short, short)
CUB_DEFINE_VECTOR_TYPE(int, int)
CUB_DEFINE_VECTOR_TYPE(long, long)
CUB_DEFINE_VECTOR_TYPE(long long, longlong)
CUB_DEFINE_VECTOR_TYPE(unsigned char, uchar)
CUB_DEFINE_VECTOR_TYPE(unsigned short, ushort)
CUB_DEFINE_VECTOR_TYPE(unsigned int, uint)
CUB_DEFINE_VECTOR_TYPE(unsigned long, ulong)
CUB_DEFINE_VECTOR_TYPE(unsigned long long, ulonglong)
CUB_DEFINE_VECTOR_TYPE(float, float)
CUB_DEFINE_VECTOR_TYPE(double, double)
CUB_DEFINE_VECTOR_TYPE(bool, uchar)
// Undefine macros
#undef CUB_DEFINE_VECTOR_TYPE
#endif // DOXYGEN_SHOULD_SKIP_THIS
/** @} */ // end group UtilModule
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s) | the_stack |
#include "HoughRec.h"
#include <iostream>
#include <fstream>
#include <cmath>
#include <vector>
#include "ErrorCode.h"
#include "CoordiSet.h"
using namespace std;
// 宏:BORDER_COLOR
// 定义边界颜色
#define BORDER_COLOR 255
// 宏:BK_COLOR
// 定义背景颜色
#define BK_COLOR 0
// 宏:DEBUG
// 定义是否输出调试信息
//#define DEBUG
// 宏:M_PI
// π 值。对于某些操作系统,M_PI 可能没有定义,这里补充定义 M_PI。
#ifndef M_PI
#define M_PI 3.14159265359
#endif
//--------------------------全局方法声明------------------------------------
// 函数:_findMinMaxCoordinates(根据输入点集的坐标,找到最上、最下、最左、最右
// 的点,从而确定图像的宽和高)
static __host__ int _findMinMaxCoordinates(CoordiSet *guidingset,
int *xmin, int *ymin,
int *xmax, int *ymax);
//--------------------------全局方法实现------------------------------------
// 函数:_findMinMaxCoordinates(根据输入点集的坐标,找到最上、最下、最左、最右
// 的点,从而确定图像的宽和高)
static __host__ int _findMinMaxCoordinates(CoordiSet *guidingset,
int *xmin, int *ymin,
int *xmax, int *ymax)
{
// 声明局部变量。
int errcode;
// 在 host 端申请一个新的 CoordiSet 变量。
CoordiSet *tmpcoordiset;
errcode = CoordiSetBasicOp::newCoordiSet(&tmpcoordiset);
if (errcode != NO_ERROR)
return errcode;
errcode = CoordiSetBasicOp::makeAtHost(tmpcoordiset, guidingset->count);
if (errcode != NO_ERROR)
return errcode;
// 将坐标集拷贝到 Host 端。
errcode = CoordiSetBasicOp::copyToHost(guidingset, tmpcoordiset);
if (errcode != NO_ERROR)
return errcode;
// 初始化 x 和 y 方向上的最小最大值。
xmin[0] = xmax[0] = tmpcoordiset->tplData[0];
ymin[0] = ymax[0] = tmpcoordiset->tplData[1];
// 循环寻找坐标集最左、最右、最上、最下的坐标。
for (int i = 1;i < tmpcoordiset->count;i++) {
// 寻找 x 方向上的最小值。
if (xmin[0] > tmpcoordiset->tplData[2 * i])
xmin[0] = tmpcoordiset->tplData[2 * i];
// 寻找 x 方向上的最大值
if (xmax[0] < tmpcoordiset->tplData[2 * i])
xmax[0] = tmpcoordiset->tplData[2 * i];
// 寻找 y 方向上的最小值。
if (ymin[0] > tmpcoordiset->tplData[2 * i + 1])
ymin[0] = tmpcoordiset->tplData[2 * i + 1];
// 寻找 y 方向上的最大值
if (ymax[0] < tmpcoordiset->tplData[2 * i + 1])
ymax[0] = tmpcoordiset->tplData[2 * i + 1];
}
// 释放临时坐标集变量。
CoordiSetBasicOp::deleteCoordiSet(tmpcoordiset);
return errcode;
}
//--------------------------类声明实现------------------------------------
// 类:Pair(存放一组平行线组(超过两条)中的一对)
// 继承自:无
// 从一组平行线中提取所有平行线对时,用该结构当返回值
class Pair{
public:
int rho1;// 直线1的距离值
int rho2;// 直线2的距离值
int vote1;// 直线1的投票值
int vote2;// 直线2的投票值
Pair(int a,int b,int v1,int v2)
{
rho1=a;
rho2=b;
vote1=v1;
vote2=v2;
}
};
// 类:ThetaCluster(存放一组角度相同直线角度、距离rho、票数,即记录一组平行线)
// 继承自:无
// 检测平行四边形时,用此类存放可能的对边。
class ThetaCluster{
public:
float theta; // 该簇平行线的角度值,弧度制
vector<int> rhoList; // 存放多条直线的距离值
vector<int> voteList; // 存放多条直线的投票值
// 构造方法
ThetaCluster(float ang,int rho,int vote){
theta=ang;
rhoList.push_back(rho);
voteList.push_back(vote);
}
// 加入一条直线的距离值
void addRho(int rho){
rhoList.push_back(rho);
}
// 加入一条直线的投票值
void addVote(int vote){
voteList.push_back(vote);
}
// 提取该组中所有的平行线对,结果用pairList向量返回
void getPair(vector<Pair> &pairList)
{
if(rhoList.size()>=2)
for(int i=0;i<rhoList.size();i++)
for(int j=i+1;j<rhoList.size();j++){
Pair p(rhoList[i],rhoList[j],voteList[i],voteList[j]);
pairList.push_back(p);
}
}
};
// 类:PossiRectSet(记录可能形成平行四边形的两组平行线组在向量中的位置)
// 继承自:无
// 平行线组向量中,如果两个平行线组满足指定的角度,则产生一个对象,把这两个平行
// 线组的下标记录到该对象中,一个对象记录的两组平行线组可能产生多个平行四边形
class PossiRectSet
{
public:
int indexA;
int indexB;
PossiRectSet(int a,int b)
{indexA=a;indexB=b;}
};
//--------------------------成员方法实现------------------------------------
// Host 成员方法:detectParallelogram(Hough 变换检测平行四边形)
// 把参数给出的多条直线按角度(可以指定误差范围)聚类,每个角度形成一个平行线组
// (每组中要两条以上直线,否则无法构成四边形),然后检测哪些两个组的角度满足参
// 数要求,则每组中任两条直线可以和另一组中任两条直线构成平行四边形,放入参数数
// 组中返回。
__host__ int HoughRec::detectParallelogram(
LineParam *lineparam,
int linemax,
int *recsmax,
RecPolarParam *recpolarparm,
float anglelist[],
int anglenum
){
// 检查输入直线坐标集是否为 NULL,如果为 NULL 直接报错返回。
if (lineparam == NULL)
return NULL_POINTER;
// 用角度来聚类,放入平行线簇向量
vector<ThetaCluster> thetaIndexParam;
// 遍历参数给出的每一条直线
for(int i=0;i<linemax;i++)
{
bool inserted=false;
float temp_angle=lineparam[i].angle;
// 如果当前直线倾角和平行线簇向量中任一倾角相似,则追加到相似倾角中,
// 簇向量长度不用增加。
for(int j=0;j<thetaIndexParam.size();j++)
// 角度相似情况是共线
if(fabs(temp_angle-thetaIndexParam[j].theta)<toloranceAngle)
{
thetaIndexParam[j].addRho(lineparam[i].distance);
thetaIndexParam[j].addVote(lineparam[i].votes);
inserted=true;
break;
}
// 角度相差180情况左右也是共线(1度和179度),不过此段用途不大,直
// 线处理时已经做过类似的处理了。
else if(fabs( M_PI-temp_angle-thetaIndexParam[j].theta)
<toloranceAngle )
{// 如果两个角度相差约180,则他们的cos值大小相等,符号相反
// 故他们的distance值符号会相反,需要调整过来,求反
thetaIndexParam[j].addRho(-lineparam[i].distance);
thetaIndexParam[j].addVote(lineparam[i].votes);
inserted=true;
break;
}
// 如果和列表中任一倾角都不相似,则单独作为一项加入向量,向量长度加一
if(inserted == false){
ThetaCluster temp(lineparam[i].angle,lineparam[i].distance,
lineparam[i].votes);
thetaIndexParam.push_back(temp);
}
}// end of for
#ifdef DEBUG
cout<<"parallel lines groups num="<<thetaIndexParam.size()<<endl;
#endif
// 遍历直线簇向量,选出相差指定角度的簇,他们可能形成四边形,放入列表
int size=thetaIndexParam.size();
vector<PossiRectSet> PossiRectSetList;
for(int i=0;i<size;i++)
// 平行线条数起码两条以上,才有可能构成平行四边形
if(thetaIndexParam[i].rhoList.size()>=2 )
for(int j=i+1;j<size;j++)
{ // 平行线条数起码两条以上,才有可能构成平行四边形
if(thetaIndexParam[j].rhoList.size()>=2)
{
float diff_angle=fabs(thetaIndexParam[i].theta
-thetaIndexParam[j].theta);
// 满足指定角度列表中的任一角度(或其补角),可能是需要的平行四边
//形,放入列表
bool satify=false;
for(int a=0;a<anglenum;a++)
if( fabs(diff_angle-anglelist[a])<toloranceAngle
|| fabs((M_PI-diff_angle)-anglelist[a])<toloranceAngle)
{satify=true; break;}
if(satify)
{ PossiRectSet prs(i,j);
PossiRectSetList.push_back(prs);
}
}
}
#ifdef DEBUG
cout<<"rect groups num="<<PossiRectSetList.size()<<endl;
#endif
// 产生可能的平行四边形的参数列表
vector<RecPolarParam> RectParamList;
for(int i=0;i<PossiRectSetList.size();i++)
{ // 对满足夹角条件的每两个平行线簇,检测所有可能的平行四边形
int sideA=PossiRectSetList[i].indexA;
int sideB=PossiRectSetList[i].indexB;
ThetaCluster clusterA=thetaIndexParam[sideA];
ThetaCluster clusterB=thetaIndexParam[sideB];
// 从向量中取出满足指定夹角的平行线对
vector<Pair> pairAList;
vector<Pair> pairBList;
clusterA.getPair(pairAList);
clusterB.getPair(pairBList);
// 任两个线对都能构成一个平行四边形
for(int a=0;a<pairAList.size();a++)
for(int b=0;b<pairBList.size();b++)
{
RecPolarParam rect;
rect.theta1=clusterA.theta;
// 把对边的两条直线票数合并,作为票数
rect.votes1=pairAList[a].vote1+pairAList[a].vote2;
rect.rho1a=pairAList[a].rho1;
rect.rho1b=pairAList[a].rho2;
rect.theta2=clusterB.theta;
// 把对边的两条直线票数合并,作为票数
rect.votes2=pairBList[b].vote1+pairBList[b].vote2;
rect.rho2a=pairBList[b].rho1;
rect.rho2b=pairBList[b].rho2;
// 得到的四边形放入列表中
RectParamList.push_back(rect);
}
}// end of outer for
#ifdef DEBUG
cout<<" rects num="<<RectParamList.size()<<endl;
#endif
// 输出可能平行四边形列表
#ifdef DEBUG
if(RectParamList.size()>0)
{ cout<<"all possible rectangle para(no only returned) \n";
cout<<"theat1 rho1a rho1b vote1 theat2 rho2a rho2b vote2 \n ";
for(int i=0;i<RectParamList.size();i++)
cout<<"["<<RectParamList[i].theta1<<"],"
<<RectParamList[i].rho1a<<","
<<RectParamList[i].rho1b<<", { "
<<RectParamList[i].votes1<<"}, "
<<"["<<RectParamList[i].theta2<<"],"
<<RectParamList[i].rho2a<<","
<<RectParamList[i].rho2b<<" {"
<<RectParamList[i].votes2<<"}, \n";
}
#endif
// 根据参数给出的数量,从列表中复制到返回数组中
// 如果列表中的四边形数量少于最大值,则全部返回
if(RectParamList.size()<*recsmax)
*recsmax=RectParamList.size();
// 如果如果列表中的四边形数量大于等于最大值,则返回列表中的前面若干个
for(int i=0;i<*recsmax;i++)
recpolarparm[i]=RectParamList[i];
return NO_ERROR;
}
// Host 成员方法:detectRectangle(Hough 变换检测矩形)
__host__ int HoughRec::detectRectangle(
LineParam *lineparam,
int linemax,
int *recsmax,
RecPolarParam *recpolarparm
){
float angleList[1];
// 直角 90度(弧度)
angleList[0]=M_PI/2.0;
return HoughRec::detectParallelogram(lineparam,linemax,recsmax,
recpolarparm,angleList,1);
}
// Host 成员方法:polar2XYparam(角度距离坐标转换成直角坐标)
// 注意,此方法中
__host__ int HoughRec::polar2XYparam (
RecPolarParam *recpolarparam,
RecXYParam *recxyparam,
int recnum,
float derho
){
// 检查输入直线坐标集是否为 NULL,如果为 NULL 直接报错返回。
if (recpolarparam == NULL)
return NULL_POINTER;
// 检查输入直线坐标集是否为 NULL,如果为 NULL 直接报错返回。
if (recxyparam == NULL)
return NULL_POINTER;
// 临时变量,四个角点和中心点的坐标。
int x[4];
int y[4];
// 两条相邻边与横轴的角度与正弦余弦值。
float theta1, theta2;
float sintheta1, sintheta2, costheta1, costheta2;
// 两条相邻边与原点的距离。
int rho1[4];
int rho2[4];
// 依次处理每个矩形。
int idx=0;
for (idx=0; idx<recnum; idx++) {
// 获得两条相邻边的参数。
// 角度转换为弧度
theta1=recpolarparam[idx].theta1;
theta2=recpolarparam[idx].theta2;
// 求正弦余弦值,重复使用。
sintheta1=sin(theta1);
costheta1=cos(theta1);
sintheta2=sin(theta2);
costheta2=cos(theta2);
// 顺时针排列各直线与原点的距离。
rho1[0]=recpolarparam[idx].rho1a;
rho1[1]=recpolarparam[idx].rho1a;
rho1[2]=recpolarparam[idx].rho1b;
rho1[3]=recpolarparam[idx].rho1b;
rho2[0]=recpolarparam[idx].rho2a;
rho2[1]=recpolarparam[idx].rho2b;
rho2[2]=recpolarparam[idx].rho2b;
rho2[3]=recpolarparam[idx].rho2a;
// 根据直线与横轴的夹角决定代数方程
// 防止出现 cos(90),sin(0),sin(180),即分母不能为 0
for (int i=0; i<4; i++) {
if (fabs(theta1 * 180.0f / M_PI)<45.0f ||
fabs(fabs(theta1 * 180.0f / M_PI)-180.0f)<45.0f)
{
y[i]=(costheta1 * rho2[i] * derho -
costheta2 * rho1[i] * derho) /
(costheta1 * sintheta2-costheta2 * sintheta1);
x[i]=(rho1[i] * derho-y[i] * sintheta1) / costheta1;
} else {
x[i]=(sintheta1 * rho2[i] * derho -
sintheta2 * rho1[i] * derho) /
(costheta2 * sintheta1-costheta1 * sintheta2);
y[i]=(rho1[i] * derho-x[i] * costheta1) / sintheta1;
}
}
// 将坐标参数写入矩形结构体。
x[0]=(x[0]>0 ? x[0] :-x[0]);
x[1]=(x[1]>0 ? x[1] :-x[1]);
x[2]=(x[2]>0 ? x[2] :-x[2]);
x[3]=(x[3]>0 ? x[3] :-x[3]);
recxyparam[idx].x1=x[0];
recxyparam[idx].x2=x[1];
recxyparam[idx].x3=x[2];
recxyparam[idx].x4=x[3];
recxyparam[idx].xc=(x[0]+x[1]+x[2]+x[3]) / 4;
x[0]=(x[0]>0 ? x[0] :-x[0]);
x[1]=(x[1]>0 ? x[1] :-x[1]);
x[2]=(x[2]>0 ? x[2] :-x[2]);
x[3]=(x[3]>0 ? x[3] :-x[3]);
recxyparam[idx].y1=y[0];
recxyparam[idx].y2=y[1];
recxyparam[idx].y3=y[2];
recxyparam[idx].y4=y[3];
recxyparam[idx].yc=(y[0]+y[1]+y[2]+y[3]) / 4;
recxyparam[idx].votes=
2 * (recpolarparam[idx].votes1+recpolarparam[idx].votes2);
}
// 处理完毕,退出。
return NO_ERROR;
}
// Host 成员方法:detectRectangle(检测inimg图像中的矩形,放入数组返回)
__host__ int
HoughRec:: detectRectangle(
Image *inimg, // 输入图像
int linenum, // 最大直线数量
int linethres, // 直线票数阈值
float lineangthres, // 相似直线角度
int linedisthres, // 相似直线距离
int *rectnum, // 返回矩形数量
RecXYParam *rectxypara // 返回矩形xy坐标参数
){
HoughLine houghline;
// 直线检测角度和距离的步长
houghline.setDeTheta(M_PI / 180.0);
houghline.setDeRho(1);
// 票数阈值,根据图像分片大小和图像中直线的粗细设定
houghline.setThreshold(linethres);
// 合并相似直线采用的参数,倾角相差6度内且dis值相差15以内,可以认为是同一条直线
houghline.setThresAng(lineangthres);
houghline.setThresDis(linedisthres);
int linesMax =linenum;
LineParam *lineparam= new LineParam[linesMax];
// 直线检测
#ifdef DEBUG
Image *outimg;
ImageBasicOp::newImage(&outimg);
ImageBasicOp::makeAtHost(outimg, inimg->width, inimg->height);
houghline.houghLineImg(inimg, NULL,outimg, &linesMax, lineparam);
ImageBasicOp::copyToHost(outimg);
ImageBasicOp::writeToFile("line_out.bmp", outimg);
ImageBasicOp::deleteImage(outimg);
cout << "linesMax = " << linesMax << endl;
printf("序号 angle distance vote \n");
for (int i = 0; i < linesMax; i++)
printf("%4d %12f(%12f) %5d %5d\n",i,lineparam[i].angle,lineparam[i].angle/M_PI*180,
lineparam[i].distance,lineparam[i].votes);
#else
/*
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float runTime;
cudaEventRecord(start, 0);*/
houghline.houghLine(inimg, NULL, &linesMax, lineparam);
/*
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
printf(" %f ",runTime);*/
#endif
// 根据直线结果处理矩形
cudaThreadSynchronize();// 不加同步语句,则下面的代码结果不正确
// 申请倾角-距离参数数组
RecPolarParam *rectpp=new RecPolarParam[*rectnum];
// 检测矩形,放入倾角-距离参数数组,数量放入*rectnum
detectRectangle(lineparam,linesMax,rectnum,rectpp);
// 倾角-距离参数数组转换成矩形XY坐标参数结构体
polar2XYparam (rectpp, rectxypara, *rectnum, 1);
// 把矩形xy坐标由分片局部坐标加上分片远点坐标,转换成全局坐标
for(int i=0;i<*rectnum;i++){
rectxypara[i].x1 += inimg->roiX1;
rectxypara[i].x2 += inimg->roiX1;
rectxypara[i].x3 += inimg->roiX1;
rectxypara[i].x4 += inimg->roiX1;
rectxypara[i].y1 += inimg->roiY1;
rectxypara[i].y2 += inimg->roiY1;
rectxypara[i].y3 += inimg->roiY1;
rectxypara[i].y4 += inimg->roiY1;
}
// 局部动态内存回收
if(lineparam!=NULL)
{delete[] lineparam;lineparam=NULL;}
if(rectpp!=NULL)
{delete[] rectpp;rectpp=NULL;}
return NO_ERROR;
}
// Host 成员方法:detectRectangle(检测CoordiSet中的矩形,放入数组返回)
__host__ int
HoughRec:: detectRectangle(
CoordiSet *coor, // 输入坐标集
int linenum, // 最大直线数量
int linethres, // 直线票数阈值
float lineangthres, // 相似直线角度
int linedisthres, // 相似直线距离
int *rectnum, // 返回矩形数量
RecXYParam *rectxypara // 返回矩形xy坐标参数
){
if(coor==NULL)
return INVALID_DATA;
// 获取坐标集中点的分布范围,即包围盒坐标
int minx,maxx,miny,maxy;
Image *inimg;
ImageBasicOp::newImage(&inimg);
ImgConvert imgcvt(BORDER_COLOR,BK_COLOR);
// ----------------------输入coor参数转化成img----------------------------
// 预处理,得到外轮廓大小
int errorcode=_findMinMaxCoordinates(coor,&minx,&miny,&maxx,&maxy);
if(errorcode!=NO_ERROR)
return 0;
// 创建工作图像
//给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数,再+1,保证点在图像内部
ImageBasicOp::makeAtHost(inimg,maxx+2 ,maxy+2);
// 把坐标集绘制到图像上,前景255,背景0
imgcvt.cstConvertToImg(coor,inimg);
// 调用图像接口完成剩下操作。
detectRectangle(
inimg, // 输入坐标集
linenum, // 最大直线数量
linethres, // 直线票数阈值
lineangthres, // 相似直线角度
linedisthres, // 相似直线距离
rectnum, // 返回矩形数量
rectxypara // 返回矩形xy坐标参数
);
ImageBasicOp::deleteImage(inimg);
return NO_ERROR;
}
// Host 成员方法:detectRealRectangle(检测矩形数组中真实矩形数量,
// 放入数组返回,参照为坐标集)
__host__ int
HoughRec:: detectRealRectangle(
CoordiSet *coor, // 输入坐标集
int rectnum, // 可能矩形数量
RecXYParam *rectxypara, //可能矩形参数数组
int distance, // 真实直线判定距离
float percent, // 真实直线判定阈值
int *realrectnum, //真实矩形数量
RecXYParam *realrectxypara //真实矩形xy坐标参数
){
if(coor==NULL)
return INVALID_DATA;
// 获取坐标集中点的分布范围,即包围盒坐标
int minx,maxx,miny,maxy;
Image *inimg;
ImageBasicOp::newImage(&inimg);
ImgConvert imgcvt(BORDER_COLOR,BK_COLOR);
// ----------------------输入coor参数转化成img----------------------------
// 预处理,得到外轮廓大小
int errorcode=_findMinMaxCoordinates(coor,&minx,&miny,&maxx,&maxy);
if(errorcode!=NO_ERROR)
return 0;
// 创建工作图像
//给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数,再+1,保证点在图像内部
ImageBasicOp::makeAtHost(inimg,maxx+2 ,maxy+2);
// 把坐标集绘制到图像上,前景255,背景0
imgcvt.cstConvertToImg(coor,inimg);
// 调用图像接口完成剩余操作。
detectRealRectangle(
inimg, // 输入图像
rectnum, // 可能矩形数量
rectxypara, //可能矩形参数数组
distance, // 真实直线判定距离
percent, // 真实直线判定阈值
realrectnum, //真实矩形数量
realrectxypara //真实矩形xy坐标参数
);
ImageBasicOp::deleteImage(inimg);
return NO_ERROR;
}
// Host 成员方法:detectRealRectangle(检测矩形数组中真实矩形数量,放入数组返回)
__host__ int
HoughRec:: detectRealRectangle(
Image *inimg, // 输入图像
int rectnum, // 可能矩形数量
RecXYParam *rectxyparam, //可能矩形参数数组
int distance, // 真实直线判定距离
float percent, // 真实直线判定阈值
int *realrectnum, //真实矩形数量
RecXYParam *realrectxypara //真实矩形xy坐标参数
){
int pointer=0;
HoughLine houghline;
for(int i=0; i< rectnum; i++) {
// 对矩形四个边进行真实性判定
bool b1,b2,b3,b4;
inimg->roiX1=0;
inimg->roiX2=inimg->width;
inimg->roiY1=0;
inimg->roiY2=inimg->height;
b1=houghline.realLine(inimg,rectxyparam[i].x1,rectxyparam[i].y1,
rectxyparam[i].x2,rectxyparam[i].y2, distance,percent);
b2=houghline.realLine(inimg,rectxyparam[i].x2,rectxyparam[i].y2,
rectxyparam[i].x3,rectxyparam[i].y3, distance,percent);
b3=houghline.realLine(inimg,rectxyparam[i].x3,rectxyparam[i].y3,
rectxyparam[i].x4,rectxyparam[i].y4, distance,percent);
b4=houghline.realLine(inimg,rectxyparam[i].x4,rectxyparam[i].y4,
rectxyparam[i].x1,rectxyparam[i].y1, distance,percent);
// 判断四个边是否是真实直线,全都是的话,放入结果数组中。
#ifdef DEBUG
cout<<"b1="<<b1<<" b2="<<b2<<" b3="<<b3<<" b4="<<b4<< endl;
#endif
if( b1 && b2 && b3 && b4 && pointer<*realrectnum){
realrectxypara[pointer]=rectxyparam[i];
pointer++;
}
}
// 返回真实矩形的个数
*realrectnum=pointer;
return NO_ERROR;
}
// Host 成员方法:drawRect(把直角坐标四边形绘制到指定图像文件中)
__host__ int
HoughRec:: drawRect(
string filename,
size_t w,
size_t h,
RecXYParam recxyparam[],
int rectmax
){ // 创建坐标集
CoordiSet *cst;
CoordiSetBasicOp::newCoordiSet(&cst);
// 只有4个点的一个坐标集
CoordiSetBasicOp::makeAtHost(cst, 4);
// 创建输出图像
Image *recimg;
ImageBasicOp::newImage(&recimg);
// 输出图像和
ImageBasicOp::makeAtHost(recimg,w,h);
ImageDrawer id;
id.setBrushColor(0);
// 刷背景色
id.brushAllImage(recimg);
for (int i=0; i< rectmax; i++) {
CoordiSetBasicOp::copyToHost(cst);
cst->tplData[0]=recxyparam[i].x1;
cst->tplData[1]=recxyparam[i].y1;
cst->tplData[2]=recxyparam[i].x2;
cst->tplData[3]=recxyparam[i].y2;
cst->tplData[4]=recxyparam[i].x3;
cst->tplData[5]=recxyparam[i].y3;
cst->tplData[6]=recxyparam[i].x4;
cst->tplData[7]=recxyparam[i].y4;
id.drawTrace(recimg, cst);// 把cst中点顺序连接成一个闭合图形
}
// 图像写入硬盘文件
ImageBasicOp::copyToHost(recimg);
ImageBasicOp::writeToFile(filename.c_str(), recimg);
ImageBasicOp::deleteImage(recimg);
return NO_ERROR;
}
// Host 成员方法:pieceRealRectImg(分片检测img图像中的矩形写入图像文件)
__host__ int
HoughRec:: pieceRealRectImg(
Image *inimg,
string lineoutfile1,
string lineoutfile2,
string rectoutfile,
int piecenum,
int linenum,
int linethres,
float lineangthres,
int linedisthres,
int rectnum,
int distance,
float percent
){
// 两个不同分块直线检测的结果放入两个图像中
Image *outimg;
ImageBasicOp::newImage(&outimg);
ImageBasicOp::makeAtHost(outimg, inimg->width, inimg->height);
Image *outimg2;
ImageBasicOp::newImage(&outimg2);
ImageBasicOp::makeAtHost(outimg2, inimg->width, inimg->height);
// 创建矩形输出图像
Image *recoutimg;
ImageBasicOp::newImage(&recoutimg);
ImageBasicOp::makeAtHost(recoutimg, inimg->width, inimg->height);
// 用imageDrawer给矩形输出图像刷背景色
ImageDrawer id;
id.setBrushColor(0);
id.brushAllImage(recoutimg);
// 计算分片的大小
int cell_x=inimg->width/piecenum;
int cell_y=inimg->height/piecenum;
#ifdef DEBUG
printf("cell_x=%d cell_y=%d\n",cell_x,cell_y);
#endif
HoughLine hough;
// 直线检测的角度步长=1度,距离步长=1像素
hough.setDeTheta(M_PI / 180.0);
hough.setDeRho(1);
// 票数阈值 ,根据图像分片大小和图像中直线的粗细设定
hough.setThreshold(linethres);
// 合并相似直线采用的参数,倾角相差6度内且dis值相差15以内,可以认为是同一条直线
hough.setThresAng(lineangthres);
hough.setThresDis(linedisthres);
// 开始分块处理
for(int y=0;y<piecenum;y++)
for(int x=0;x<piecenum;x++)
{// .....................分块第一阶段.........................
#ifdef DEBUG
printf(" \n.................y=[%d] x=[%d]\n",y,x);
#endif
int linesMax =linenum;
LineParam *lineparam= new LineParam[linesMax];
for(int i=0;i<linesMax;i++){
lineparam[i].angle=-1;
lineparam[i].distance=-1;
lineparam[i].votes=-1;
}
inimg->roiX1=x*cell_x;
inimg->roiX2=x*cell_x+cell_x-1;
inimg->roiY1=y*cell_y;
inimg->roiY2=y*cell_y+cell_y-1;
outimg->roiX1= inimg->roiX1;
outimg->roiX2= inimg->roiX2;
outimg->roiY1= inimg->roiY1;
outimg->roiY2= inimg->roiY2;
#ifdef DEBUG
printf("x1=%d x2=%d y1=%d y2=%d \n"
,inimg->roiX1,inimg->roiX2
,inimg->roiY1,inimg->roiY2);
#endif
// 注意,此时得到的直线参数是分片内的局部坐标,非全局坐标,要转换
hough.houghLineImg(inimg, NULL, outimg, &linesMax, lineparam);
// 根据直线结果处理矩形
// 不加同步语句,则下面的代码结果不正确
cudaThreadSynchronize();
RecPolarParam *rectpp=new RecPolarParam[rectnum];
// 初始化
for(int i=0;i<rectnum;i++)
rectpp[i].theta1=-10000;
// 用于返回每个分片中真正的矩形个数
int rectMax=rectnum;
detectRectangle(lineparam,linesMax,&rectMax,rectpp);
RecXYParam *recxyparam;
recxyparam=new RecXYParam[rectMax];
// 初始化
for(int i=0;i<rectMax;i++)
recxyparam[i].x1=-10000;
// 输入矩形极坐标参数结构体,输出矩形XY坐标参数结构体
polar2XYparam (rectpp, recxyparam, rectMax, 1);
// 把矩形xy坐标由分片局部坐标加上分片原点坐标,转换成全局坐标
for(int i=0;i<rectMax;i++){
recxyparam[i].x1 += inimg->roiX1;
recxyparam[i].x2 += inimg->roiX1;
recxyparam[i].x3 += inimg->roiX1;
recxyparam[i].x4 += inimg->roiX1;
recxyparam[i].y1 += inimg->roiY1;
recxyparam[i].y2 += inimg->roiY1;
recxyparam[i].y3 += inimg->roiY1;
recxyparam[i].y4 += inimg->roiY1;
}
// 绘制矩形:输入 recxyparam 、rectMax 绘制到recoutimg图像中
// 创建只有4个点的一个坐标集
CoordiSet *cst;
CoordiSetBasicOp::newCoordiSet(&cst);
CoordiSetBasicOp::makeAtHost(cst, 4);
// 每个矩形四个顶点放入坐标集
for (int i=0; i< rectMax; i++) {
CoordiSetBasicOp::copyToHost(cst);
cst->tplData[0]=recxyparam[i].x1;
cst->tplData[1]=recxyparam[i].y1;
cst->tplData[2]=recxyparam[i].x2;
cst->tplData[3]=recxyparam[i].y2;
cst->tplData[4]=recxyparam[i].x3;
cst->tplData[5]=recxyparam[i].y3;
cst->tplData[6]=recxyparam[i].x4;
cst->tplData[7]=recxyparam[i].y4;
// 对矩形四个边进行真实性判定
bool b1,b2,b3,b4;
inimg->roiX1=0;
inimg->roiX2=inimg->width;
inimg->roiY1=0;
inimg->roiY2=inimg->height;
b1=hough.realLine(inimg,recxyparam[i].x1,recxyparam[i].y1,
recxyparam[i].x2,recxyparam[i].y2, distance,percent);
b2=hough.realLine(inimg,recxyparam[i].x2,recxyparam[i].y2,
recxyparam[i].x3,recxyparam[i].y3, distance,percent);
b3=hough.realLine(inimg,recxyparam[i].x3,recxyparam[i].y3,
recxyparam[i].x4,recxyparam[i].y4, distance,percent);
b4=hough.realLine(inimg,recxyparam[i].x4,recxyparam[i].y4,
recxyparam[i].x1,recxyparam[i].y1, distance,percent);
// 判断四个边是否是真实直线,全都是的话,就把cst中点顺序连接
// 成一个闭合图形,绘制到recoutimg中。
#ifdef DEBUG
cout<<"b1="<<b1<<" b2="<<b2<<" b3="<<b3<<" b4="<<b4<< endl;
#endif
if( b1 && b2 && b3 && b4)
id.drawTrace(recoutimg,cst);
}
// 循环内声明的局部动态内存,循环内回收
if(lineparam != NULL)
{delete[] lineparam;lineparam=NULL;}
if(recxyparam != NULL)
{delete[] recxyparam;recxyparam=NULL;}
if(rectpp!=NULL)
{delete[] rectpp;rectpp=NULL;}
// ...................分块第二阶段 ........................
if(x<piecenum-1 && y<piecenum-1){
#ifdef DEBUG
printf(" \n---------- step2 of[%d][%d]--------\n",y,x);
#endif
int linesMax =linenum;
LineParam *lineparam=new LineParam[linesMax];
for(int i=0;i<linesMax;i++){
lineparam[i].angle=-1;
lineparam[i].distance=-1;
lineparam[i].votes=-1;
}
// 每个分片向下、向右移动半个单位
inimg->roiX1=x*cell_x+cell_x/2;
inimg->roiX2=x*cell_x+cell_x/2+cell_x-1;
inimg->roiY1=y*cell_y+cell_y/2;
inimg->roiY2=y*cell_y+cell_y/2+cell_y-1;
outimg2->roiX1=inimg->roiX1;
outimg2->roiX2=inimg->roiX2;
outimg2->roiY1=inimg->roiY1;
outimg2->roiY2=inimg->roiY2;
#ifdef DEBUG
printf("x1=%d x2=%d y1=%d y2=%d \n",
inimg->roiX1,inimg->roiX2,
inimg->roiY1,inimg->roiY2);
#endif
// 注意,此时得到的直线参数是局部坐标,非全局坐标,要转换
hough.houghLineImg(inimg, NULL, outimg2, &linesMax, lineparam);
// 根据直线结果处理矩形
cudaThreadSynchronize();// 不加同步语句,则下面的代码结果不正确
RecPolarParam *rectpp;
rectpp=new RecPolarParam[rectnum];
// 初始化
for(int i=0;i<rectnum;i++)
rectpp[i].theta1=-10000;
// 用于返回每个分片中真正的矩形个数
int rectMax=rectnum;
detectRectangle(lineparam,linesMax,&rectMax,rectpp);
RecXYParam *recxyparam;
recxyparam=new RecXYParam[rectMax];
// 初始化
for(int i=0;i<rectMax;i++)
recxyparam[i].x1=-10000;
// 输入矩形极坐标参数结构体,输出矩形XY坐标参数结构体
polar2XYparam (rectpp, recxyparam, rectMax, 1);
// 把矩形xy坐标由分片局部坐标转换成全局坐标
for(int i=0;i<rectMax;i++){
recxyparam[i].x1 += inimg->roiX1;
recxyparam[i].x2 += inimg->roiX1;
recxyparam[i].x3 += inimg->roiX1;
recxyparam[i].x4 += inimg->roiX1;
recxyparam[i].y1 += inimg->roiY1;
recxyparam[i].y2 += inimg->roiY1;
recxyparam[i].y3 += inimg->roiY1;
recxyparam[i].y4 += inimg->roiY1;
}
// 绘制矩形:输入 recxyparam 、rectMax 绘制到recoutimg图像中
// 创建坐标集
CoordiSet *cst;
CoordiSetBasicOp::newCoordiSet(&cst);
// 只有4个点的一个坐标集
CoordiSetBasicOp::makeAtHost(cst,4);
for (int i=0; i< rectMax; i++){
CoordiSetBasicOp::copyToHost(cst);
cst->tplData[0]=recxyparam[i].x1;
cst->tplData[1]=recxyparam[i].y1;
cst->tplData[2]=recxyparam[i].x2;
cst->tplData[3]=recxyparam[i].y2;
cst->tplData[4]=recxyparam[i].x3;
cst->tplData[5]=recxyparam[i].y3;
cst->tplData[6]=recxyparam[i].x4;
cst->tplData[7]=recxyparam[i].y4;
bool b1,b2,b3,b4;
inimg->roiX1=0;
inimg->roiX2=inimg->width;
inimg->roiY1=0;
inimg->roiY2=inimg->height;
b1=hough.realLine(inimg,recxyparam[i].x1,recxyparam[i].y1,
recxyparam[i].x2,recxyparam[i].y2, distance,percent);
b2=hough.realLine(inimg,recxyparam[i].x2,recxyparam[i].y2,
recxyparam[i].x3,recxyparam[i].y3, distance,percent);
b3=hough.realLine(inimg,recxyparam[i].x3,recxyparam[i].y3,
recxyparam[i].x4,recxyparam[i].y4, distance,percent);
b4=hough.realLine(inimg,recxyparam[i].x4,recxyparam[i].y4,
recxyparam[i].x1,recxyparam[i].y1, distance,percent);
// 判断四个边是否是真实直线,全都是的话,就把cst中点顺序连接
// 成一个闭合图形,绘制到recoutimg中。
#ifdef DEBUG
cout<<"b1="<<b1<<" b2="<<b2<<" b3="<<b3<<" b4="<<b4<< endl;
#endif
if( b1 && b2 && b3 && b4)
id.drawTrace(recoutimg, cst);
}
// 循环内声明的局部动态内存,循环内回收
if(lineparam != NULL)
{delete[] lineparam;lineparam=NULL;}
if(recxyparam != NULL)
{delete[] recxyparam;recxyparam=NULL;}
if(rectpp!=NULL)
{delete[] rectpp;rectpp=NULL;}
}// end of step2 if
}// end of for x, y
// 图像写入文件中
ImageBasicOp::copyToHost(outimg);
ImageBasicOp::writeToFile( lineoutfile1.c_str(), outimg);
ImageBasicOp::copyToHost(outimg2);
ImageBasicOp::writeToFile( lineoutfile2.c_str(), outimg2);
ImageBasicOp::copyToHost(recoutimg);
ImageBasicOp::writeToFile(rectoutfile.c_str(), recoutimg);
// 回收资源
ImageBasicOp::deleteImage(outimg);
ImageBasicOp::deleteImage(outimg2);
ImageBasicOp::deleteImage(recoutimg);
return NO_ERROR;
}
// Host 成员方法:重载pieceRealRectImg(分片检测坐标集合中的矩形写入图像文件)
__host__ int
HoughRec:: pieceRealRectImg(
CoordiSet* coor,
string lineoutfile1,
string lineoutfile2,
string rectoutfile,
int piecenum,
int linenum,
int linethres,
float lineangthres,
int linedisthres,
int rectnum,
int distance,
float percent
){
if(coor==NULL)
return INVALID_DATA;
// 获取坐标集中点的分布范围,即包围盒坐标
int minx,maxx,miny,maxy;
Image *inimg;
ImageBasicOp::newImage(&inimg);
ImgConvert imgcvt(BORDER_COLOR,BK_COLOR);
// ----------------------输入coor参数转化成img----------------------------
// 预处理,得到外轮廓大小
int errorcode=_findMinMaxCoordinates(coor,&minx,&miny,&maxx,&maxy);
if(errorcode!=NO_ERROR)
return 0;
// 创建工作图像
//给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数,再+1,保证点在图像内部
ImageBasicOp::makeAtHost(inimg,maxx+2 ,maxy+2);
// 把坐标集绘制到图像上,前景255,背景0
imgcvt.cstConvertToImg(coor,inimg);
#ifdef DEBUG_IMG
// 把填充前的图像保存到文件
ImageBasicOp::copyToHost(inimg);
ImageBasicOp::writeToFile("coorimg.bmp",inimg);
#endif
// --------------调用图像接口的pieceRealRectImg--------------------
pieceRealRectImg(
inimg,
lineoutfile1,
lineoutfile2,
rectoutfile,
piecenum,
linenum,
linethres,
lineangthres,
linedisthres,
rectnum,
distance,
percent
);
// 回收资源
ImageBasicOp::deleteImage(inimg);
return NO_ERROR;
}
// Host 成员方法:pieceRealRect(分片检测inimg图像中的矩形,放入数组返回)
__host__ int
HoughRec:: pieceRealRect(
Image *inimg, // 输入图像
int piecenum,
int linenum,
int linethres,
float lineangthres,
int linedisthres,
int rectnum,
int distance,
float percent,
int *realrectnum,
RecXYParam *realxypara
){
// 矩形计数器清零
int pointer=0;
// 计算分片的大小
int cell_x=inimg->width/piecenum;
int cell_y=inimg->height/piecenum;
#ifdef DEBUG
printf("cell_x=%d cell_y=%d\n",cell_x,cell_y);
#endif
HoughLine hough;
// 直线检测角度和距离的步长
hough.setDeTheta(M_PI / 180.0);
hough.setDeRho(1);
// 票数阈值,根据图像分片大小和图像中直线的粗细设定
hough.setThreshold(linethres);
// 合并相似直线采用的参数,倾角相差6度内且dis值相差15以内,可以认为是同一条直线
hough.setThresAng(lineangthres);
hough.setThresDis(linedisthres);
// 开始分块处理
for(int y=0;y<piecenum;y++)
for(int x=0;x<piecenum;x++)
{//.......................分块第一阶段..........................
#ifdef DEBUG
printf(" \n----------------- y=[%d] x=[%d]\n",y,x);
#endif
int linesMax =linenum;
LineParam *lineparam= new LineParam[linesMax];
for(int i=0;i<linesMax;i++){
lineparam[i].angle=-1;
lineparam[i].distance=-1;
lineparam[i].votes=-1;
}
inimg->roiX1=x*cell_x;
inimg->roiX2=x*cell_x+cell_x-1;
inimg->roiY1=y*cell_y;
inimg->roiY2=y*cell_y+cell_y-1;
#ifdef DEBUG
printf("x1=%d x2=%d y1=%d y2=%d \n",
inimg->roiX1,inimg->roiX2,
inimg->roiY1,inimg->roiY2);
#endif
hough.houghLine(inimg, NULL, &linesMax, lineparam);
// 根据直线结果处理矩形
cudaThreadSynchronize();// 不加同步语句,则下面的代码结果不正确
RecPolarParam *rectpp=new RecPolarParam[rectnum];
// 初始化
for(int i=0;i<rectnum;i++)
rectpp[i].theta1=-10000;
// 用于返回每个分片中真正的矩形个数
int rectMax=rectnum;
detectRectangle(lineparam,linesMax,&rectMax,rectpp);
RecXYParam *recxyparam;
recxyparam=new RecXYParam[rectMax];
// 初始化
for(int i=0;i<rectMax;i++)
recxyparam[i].x1=-10000;
// 输入矩形极坐标参数结构体,输出矩形XY坐标参数结构体
polar2XYparam (rectpp, recxyparam, rectMax, 1);
// 把矩形xy坐标由分片局部坐标加上分片远点坐标,转换成全局坐标
for(int i=0;i<rectMax;i++){
recxyparam[i].x1 += inimg->roiX1;
recxyparam[i].x2 += inimg->roiX1;
recxyparam[i].x3 += inimg->roiX1;
recxyparam[i].x4 += inimg->roiX1;
recxyparam[i].y1 += inimg->roiY1;
recxyparam[i].y2 += inimg->roiY1;
recxyparam[i].y3 += inimg->roiY1;
recxyparam[i].y4 += inimg->roiY1;
}
for(int i=0; i< rectMax; i++) {
// 对矩形四个边进行真实性判定
bool b1,b2,b3,b4;
inimg->roiX1=0;
inimg->roiX2=inimg->width;
inimg->roiY1=0;
inimg->roiY2=inimg->height;
b1=hough.realLine(inimg,recxyparam[i].x1,recxyparam[i].y1,
recxyparam[i].x2,recxyparam[i].y2, distance,percent);
b2=hough.realLine(inimg,recxyparam[i].x2,recxyparam[i].y2,
recxyparam[i].x3,recxyparam[i].y3, distance,percent);
b3=hough.realLine(inimg,recxyparam[i].x3,recxyparam[i].y3,
recxyparam[i].x4,recxyparam[i].y4, distance,percent);
b4=hough.realLine(inimg,recxyparam[i].x4,recxyparam[i].y4,
recxyparam[i].x1,recxyparam[i].y1, distance,percent);
// 判断四个边是否是真实直线,全都是的话,放入结果数组中。
#ifdef DEBUG
cout<<"b1="<<b1<<" b2="<<b2<<" b3="<<b3<<" b4="<<b4<< endl;
#endif
if( b1 && b2 && b3 && b4 && pointer<*realrectnum){
realxypara[pointer]=recxyparam[i];
pointer++;
}
}
// 循环内声明的局部动态内存,循环内回收
if(lineparam!=NULL)
{delete[] lineparam;lineparam=NULL;}
if(recxyparam!=NULL)
{delete[] recxyparam;recxyparam=NULL;}
if(rectpp!=NULL)
{delete[] rectpp;rectpp=NULL;}
//.........................分块第二阶段........................
if(x<piecenum-1 && y<piecenum-1){
#ifdef DEBUG
printf(" \n-----------step2 of[%d][%d]-----------\n",y,x);
#endif
int linesMax =linenum;
LineParam *lineparam=new LineParam[linesMax];
for(int i=0;i<linesMax;i++){
lineparam[i].angle=-1;
lineparam[i].distance=-1;
lineparam[i].votes=-1;
}
// 每个分片向下、向右移动半个单位
inimg->roiX1=x*cell_x+cell_x/2;
inimg->roiX2=x*cell_x+cell_x/2+cell_x-1;
inimg->roiY1=y*cell_y+cell_y/2;
inimg->roiY2=y*cell_y+cell_y/2+cell_y-1;
#ifdef DEBUG
printf("x1=%d x2=%d y1=%d y2=%d \n",
inimg->roiX1,inimg->roiX2,
inimg->roiY1,inimg->roiY2);
#endif
// 注意,此时得到的直线参数是局部坐标,非全局坐标,要转换
hough.houghLine(inimg, NULL, &linesMax, lineparam);
// 根据直线结果处理矩形
// 不加同步语句,则下面的代码结果不正确
cudaThreadSynchronize();
RecPolarParam *rectpp;
rectpp=new RecPolarParam[rectnum];
// 初始化
for(int i=0;i<rectnum;i++)
rectpp[i].theta1=-10000;
// 用于返回每个分片中真正的矩形个数
int rectMax=rectnum;
detectRectangle(lineparam,linesMax,&rectMax,rectpp);
RecXYParam *recxyparam;
recxyparam=new RecXYParam[rectMax];
// 初始化
for(int i=0;i<rectMax;i++)
recxyparam[i].x1=-10000;
// 输入矩形极坐标参数结构体,输出矩形XY坐标参数结构体
polar2XYparam (rectpp, recxyparam, rectMax, 1);
// 把矩形xy坐标由分片局部坐标转换成全局坐标
for(int i=0;i<rectMax;i++){
recxyparam[i].x1 += inimg->roiX1;
recxyparam[i].x2 += inimg->roiX1;
recxyparam[i].x3 += inimg->roiX1;
recxyparam[i].x4 += inimg->roiX1;
recxyparam[i].y1 += inimg->roiY1;
recxyparam[i].y2 += inimg->roiY1;
recxyparam[i].y3 += inimg->roiY1;
recxyparam[i].y4 += inimg->roiY1;
}
for (int i=0; i< rectMax; i++){
bool b1,b2,b3,b4;
inimg->roiX1=0;
inimg->roiX2=inimg->width;
inimg->roiY1=0;
inimg->roiY2=inimg->height;
b1=hough.realLine(inimg,recxyparam[i].x1,recxyparam[i].y1,
recxyparam[i].x2,recxyparam[i].y2, distance,percent);
b2=hough.realLine(inimg,recxyparam[i].x2,recxyparam[i].y2,
recxyparam[i].x3,recxyparam[i].y3, distance,percent);
b3=hough.realLine(inimg,recxyparam[i].x3,recxyparam[i].y3,
recxyparam[i].x4,recxyparam[i].y4, distance,percent);
b4=hough.realLine(inimg,recxyparam[i].x4,recxyparam[i].y4,
recxyparam[i].x1,recxyparam[i].y1, distance,percent);
// 判断四个边是否是真实直线,全都是的话,放入结果数组中。
#ifdef DEBUG
cout<<"b1="<<b1<<" b2="<<b2<<" b3="<<b3<<" b4="<<b4<< endl;
#endif
if( b1 && b2 && b3 && b4 && pointer<*realrectnum)
{
realxypara[pointer]=recxyparam[i];
pointer++;
}
}
// 循环内声明的局部动态内存,循环内回收
if(lineparam!=NULL)
{delete[] lineparam;lineparam=NULL;}
if(recxyparam!=NULL)
{delete[] recxyparam;recxyparam=NULL;}
if(rectpp!=NULL)
{delete[] rectpp;rectpp=NULL;}
}// end of step2 if
}// end of for x,for y
// 返回真实矩形的个数
*realrectnum=pointer;
return NO_ERROR;
}
// Host 成员方法:重载pieceRealRect(分片检测coor坐标集中的矩形,放入数组返回)
__host__ int
HoughRec:: pieceRealRect(
CoordiSet* coor, // 输入坐标集
int piecenum,
int linenum,
int linethres,
float lineangthres,
int linedisthres,
int rectnum,
int distance,
float percent,
int *realrectnum,
RecXYParam *realxypara
){
if(coor!=NULL){
// 获取坐标集中点的分布范围,即包围盒坐标
int minx,maxx,miny,maxy;
Image *inimg;
ImageBasicOp::newImage(&inimg);
ImgConvert imgcvt(BORDER_COLOR,BK_COLOR);
// ----------------------输入coor参数转化成img----------------------------
// 预处理,得到外轮廓大小
int errorcode=_findMinMaxCoordinates(coor,&minx,&miny,&maxx,&maxy);
if(errorcode!=NO_ERROR)
return 0;
// 创建工作图像
//给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数,再+1,保证点在图像内部
ImageBasicOp::makeAtHost(inimg,maxx+2 ,maxy+2);
// 把坐标集绘制到图像上,前景255,背景0
imgcvt.cstConvertToImg(coor,inimg);
#ifdef DEBUG_IMG
// 把填充前的图像保存到文件
ImageBasicOp::copyToHost(inimg);
ImageBasicOp::writeToFile("coorimg.bmp",inimg);
#endif
// ------------------调用图像接口的pieceRealRect()获得结果---------------------
pieceRealRect(
inimg,
piecenum,
linenum,
linethres,
lineangthres,
linedisthres,
rectnum,
distance,
percent,
realrectnum,
realxypara
);
// 回收内存
ImageBasicOp::deleteImage(inimg);
return NO_ERROR;
}
else
return INVALID_DATA;
} | the_stack |
*
* Code and text by Sean Baxter, NVIDIA Research
* See http://nvlabs.github.io/moderngpu for repository and documentation.
*
******************************************************************************/
#pragma once
#include "../mgpudevice.cuh"
#include "deviceutil.cuh"
#include "intrinsics.cuh"
namespace mgpu {
////////////////////////////////////////////////////////////////////////////////
// Cooperative load functions.
template<int NT, int VT, typename InputIt, typename T>
MGPU_DEVICE void DeviceSharedToReg(InputIt data, int tid, T* reg,
bool sync) {
#pragma unroll
for(int i = 0; i < VT; ++i)
reg[i] = data[NT * i + tid];
if(sync) __syncthreads();
}
template<int NT, int VT, typename InputIt, typename T>
MGPU_DEVICE void DeviceGlobalToRegPred(int count, InputIt data, int tid,
T* reg, bool sync) {
// TODO: Attempt to issue 4 loads at a time.
#pragma unroll
for(int i = 0; i < VT; ++i) {
int index = NT * i + tid;
if(index < count) reg[i] = data[index];
}
if(sync) __syncthreads();
}
template<int NT, int VT, typename InputIt, typename T>
MGPU_DEVICE void DeviceGlobalToReg(int count, InputIt data, int tid,
T* reg, bool sync) {
if(count >= NT * VT) {
#pragma unroll
for(int i = 0; i < VT; ++i)
reg[i] = data[NT * i + tid];
} else
DeviceGlobalToRegPred<NT, VT>(count, data, tid, reg, false);
if(sync) __syncthreads();
}
template<int NT, int VT0, int VT1, typename InputIt, typename T>
MGPU_DEVICE void DeviceGlobalToReg2(int count, InputIt data, int tid,
T* reg, bool sync) {
DeviceGlobalToReg<NT, VT0>(count, data, tid, reg, false);
#pragma unroll
for(int i = VT0; i < VT1; ++i) {
int index = NT * i + tid;
if(index < count) reg[i] = data[index];
}
if(sync) __syncthreads();
}
template<int NT, int VT, typename InputIt, typename T>
MGPU_DEVICE void DeviceGlobalToRegDefault(int count, InputIt data, int tid,
T* reg, T init, bool sync) {
if(count >= NT * VT) {
#pragma unroll
for(int i = 0; i < VT; ++i)
reg[i] = data[NT * i + tid];
} else {
#pragma unroll
for(int i = 0; i < VT; ++i) {
int index = NT * i + tid;
reg[i] = init;
if(index < count) reg[i] = data[index];
}
}
if(sync) __syncthreads();
}
template<int NT, int VT0, int VT1, typename InputIt, typename T>
MGPU_DEVICE void DeviceGlobalToRegDefault2(int count, InputIt data, int tid,
T* reg, T init, bool sync) {
DeviceGlobalToRegDefault<NT, VT0>(count, data, tid, reg, init, false);
#pragma unroll
for(int i = VT0; i < VT1; ++i) {
int index = NT * i + tid;
reg[i] = init;
if(index < count) reg[i] = data[index];
}
if(sync) __syncthreads();
}
////////////////////////////////////////////////////////////////////////////////
template<int NT, int VT, typename InputIt, typename T>
MGPU_DEVICE void DeviceGlobalToThread(int count, InputIt data, int tid,
T* reg) {
data += VT * tid;
if(count >= NT * VT) {
#pragma unroll
for(int i = 0; i < VT; ++i)
reg[i] = ldg(data + i);
} else {
count -= VT * tid;
#pragma unroll
for(int i = 0; i < VT; ++i)
if(i < count) reg[i] = ldg(data + i);
}
}
template<int NT, int VT, typename InputIt, typename T>
MGPU_DEVICE void DeviceGlobalToThreadDefault(int count, InputIt data, int tid,
T* reg, T init) {
data += VT * tid;
if(count >= NT * VT) {
#pragma unroll
for(int i = 0; i < VT; ++i)
reg[i] = ldg(data + i);
} else {
count -= VT * tid;
#pragma unroll
for(int i = 0; i < VT; ++i)
reg[i] = (i < count) ? ldg(data + i) : init;
}
}
////////////////////////////////////////////////////////////////////////////////
// Cooperative store functions.
template<int NT, int VT, typename OutputIt, typename T>
MGPU_DEVICE void DeviceRegToShared(const T* reg, int tid,
OutputIt dest, bool sync) {
typedef typename std::iterator_traits<OutputIt>::value_type T2;
#pragma unroll
for(int i = 0; i < VT; ++i)
dest[NT * i + tid] = (T2)reg[i];
if(sync) __syncthreads();
}
template<int NT, int VT, typename OutputIt, typename T>
MGPU_DEVICE void DeviceRegToGlobal(int count, const T* reg, int tid,
OutputIt dest, bool sync) {
#pragma unroll
for(int i = 0; i < VT; ++i) {
int index = NT * i + tid;
if(index < count)
dest[index] = reg[i];
}
if(sync) __syncthreads();
}
////////////////////////////////////////////////////////////////////////////////
// DeviceMemToMemLoop
// Transfer from shared memory to global, or global to shared, for transfers
// that are smaller than NT * VT in the average case. The goal is to reduce
// unnecessary comparison logic.
template<int NT, int VT, typename InputIt, typename OutputIt>
MGPU_DEVICE void DeviceMemToMem4(int count, InputIt source, int tid,
OutputIt dest, bool sync) {
typedef typename std::iterator_traits<InputIt>::value_type T;
T x[VT];
const int Count = (VT < 4) ? VT : 4;
if(count >= NT * VT) {
#pragma unroll
for(int i = 0; i < Count; ++i)
x[i] = source[NT * i + tid];
#pragma unroll
for(int i = 0; i < Count; ++i)
dest[NT * i + tid] = x[i];
} else {
#pragma unroll
for(int i = 0; i < Count; ++i) {
int index = NT * i + tid;
if(index < count)
x[i] = source[NT * i + tid];
}
#pragma unroll
for(int i = 0; i < Count; ++i) {
int index = NT * i + tid;
if(index < count)
dest[index] = x[i];
}
}
if(sync) __syncthreads();
}
template<int NT, typename InputIt, typename OutputIt>
MGPU_DEVICE void DeviceMemToMemLoop(int count, InputIt source, int tid,
OutputIt dest, bool sync) {
for(int i = 0; i < count; i += 4 * NT)
DeviceMemToMem4<NT, 4>(count - i, source + i, tid, dest + i,
false);
if(sync) __syncthreads();
}
////////////////////////////////////////////////////////////////////////////////
// Functions to copy between shared and global memory where the average case is
// to transfer NT * VT elements.
template<int NT, int VT, typename T, typename OutputIt>
MGPU_DEVICE void DeviceSharedToGlobal(int count, const T* source, int tid,
OutputIt dest, bool sync) {
typedef typename std::iterator_traits<OutputIt>::value_type T2;
#pragma unroll
for(int i = 0; i < VT; ++i) {
int index = NT * i + tid;
if(index < count) dest[index] = (T2)source[index];
}
if(sync) __syncthreads();
}
template<int NT, int VT, typename InputIt, typename T>
MGPU_DEVICE void DeviceGlobalToShared(int count, InputIt source, int tid,
T* dest, bool sync) {
T reg[VT];
DeviceGlobalToReg<NT, VT>(count, source, tid, reg, false);
DeviceRegToShared<NT, VT>(reg, tid, dest, sync);
}
template<int NT, int VT0, int VT1, typename InputIt, typename T>
MGPU_DEVICE void DeviceGlobalToShared2(int count, InputIt source, int tid,
T* dest, bool sync) {
T reg[VT1];
DeviceGlobalToReg2<NT, VT0, VT1>(count, source, tid, reg, false);
DeviceRegToShared<NT, VT1>(reg, tid, dest, sync);
}
template<int NT, int VT, typename InputIt, typename T>
MGPU_DEVICE void DeviceGlobalToSharedDefault(int count, InputIt source, int tid,
T* dest, T init, bool sync) {
T reg[VT];
DeviceGlobalToRegDefault<NT, VT>(count, source, tid, reg, init, false);
DeviceRegToShared<NT, VT>(reg, tid, dest, sync);
}
template<int NT, int VT0, int VT1, typename InputIt, typename T>
MGPU_DEVICE void DeviceGlobalToSharedDefault2(int count, InputIt data, int tid,
T* dest, T init, bool sync) {
T reg[VT1];
DeviceGlobalToRegDefault2<NT, VT0, VT1>(count, data, tid, reg, init, false);
DeviceRegToShared<NT, VT1>(reg, tid, dest, sync);
}
////////////////////////////////////////////////////////////////////////////////
template<int NT, int VT, typename InputIt, typename T>
MGPU_DEVICE void DeviceGlobalToSharedLoop(int count, InputIt source, int tid,
T* dest, bool sync) {
const int Granularity = MGPU_MIN(VT, 3);
DeviceGlobalToShared<NT, Granularity>(count, source, tid, dest, false);
int offset = Granularity * NT;
if(count > offset)
DeviceGlobalToShared<NT, VT - Granularity>(count - offset,
source + offset, tid, dest + offset, false);
if(sync) __syncthreads();
/*
source += tid;
while(count > 0) {
T reg[Granularity];
#pragma unroll
for(int i = 0; i < Granularity; ++i) {
int index = NT * i + tid;
if(index < count)
reg[i] = source[NT * i];
}
DeviceRegToShared<NT, Granularity>(reg, tid, dest, false);
source += Granularity * NT;
dest += Granularity * NT;
count -= Granularity * NT;
}
if(sync) __syncthreads();*/
}
template<int NT, int VT, typename InputIt, typename OutputIt>
MGPU_DEVICE void DeviceGlobalToGlobal(int count, InputIt source, int tid,
OutputIt dest, bool sync) {
typedef typename std::iterator_traits<OutputIt>::value_type T;
T values[VT];
DeviceGlobalToReg<NT, VT>(count, source, tid, values, false);
DeviceRegToGlobal<NT, VT>(count, values, tid, dest, sync);
}
////////////////////////////////////////////////////////////////////////////////
// Transponse VT elements in NT threads (x) into thread-order registers (y)
// using only NT * VT / 2 elements of shared memory.
//This function definitely has a bug, don't use!!! fix TODO(erich)
template<int NT, int VT, typename T>
MGPU_DEVICE void HalfSmemTranspose(const T* x, int tid, T* shared, T* y) {
printf("HalfSmemTranspose has a bug, use WAR SmemTranpose or find bug before using in production");
// Transpose the first half values (tid < NT / 2)
#pragma unroll
for(int i = 0; i <= VT / 2; ++i)
if(i < VT / 2 || tid < NT / 2)
shared[NT * i + tid] = x[i];
__syncthreads();
if(tid < NT / 2) {
#pragma unroll
for(int i = 0; i < VT; ++i)
y[i] = shared[VT * tid + i];
}
__syncthreads();
// Transpose the second half values (tid >= NT / 2)
#pragma unroll
for(int i = VT / 2; i < VT; ++i)
if(i > VT / 2 || tid >= NT / 2)
shared[NT * i - NT * VT / 2 + tid] = x[i];
__syncthreads();
if(tid >= NT / 2) {
#pragma unroll
for(int i = 0; i < VT; ++i)
y[i] = shared[VT * tid + i - NT * VT / 2];
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////
// Gather/scatter functions
template<int NT, int VT, typename InputIt, typename T>
MGPU_DEVICE void DeviceGather(int count, InputIt data, int indices[VT],
int tid, T* reg, bool sync) {
if(count >= NT * VT) {
#pragma unroll
for(int i = 0; i < VT; ++i)
reg[i] = data[indices[i]];
} else {
#pragma unroll
for(int i = 0; i < VT; ++i) {
int index = NT * i + tid;
if(index < count)
reg[i] = data[indices[i]];
}
}
if(sync) __syncthreads();
}
template<int NT, int VT, typename InputIt, typename T>
MGPU_DEVICE void DeviceGatherDefault(int count, InputIt data, int indices[VT],
int tid, T* reg, T identity, bool sync) {
if(count >= NT * VT) {
#pragma unroll
for(int i = 0; i < VT; ++i)
reg[i] = data[indices[i]];
} else {
#pragma unroll
for(int i = 0; i < VT; ++i) {
int index = NT * i + tid;
reg[i] = (index < count) ? data[indices[i]] : identity;
}
}
if(sync) __syncthreads();
}
template<int NT, int VT, typename T, typename OutputIt>
MGPU_DEVICE void DeviceScatter(int count, const T* reg, int tid,
int indices[VT], OutputIt data, bool sync) {
if(count >= NT * VT) {
#pragma unroll
for(int i = 0; i < VT; ++i)
data[indices[i]] = reg[i];
} else {
#pragma unroll
for(int i = 0; i < VT; ++i) {
int index = NT * i + tid;
if(index < count)
data[indices[i]] = reg[i];
}
}
if(sync) __syncthreads();
}
////////////////////////////////////////////////////////////////////////////////
// Cooperative transpose functions (strided to thread order)
template<int VT, typename T>
MGPU_DEVICE void DeviceThreadToShared(const T* threadReg, int tid, T* shared,
bool sync) {
if(1 & VT) {
// Odd grain size. Store as type T.
#pragma unroll
for(int i = 0; i < VT; ++i)
shared[VT * tid + i] = threadReg[i];
} else {
// Even grain size. Store as DevicePair<T>. This lets us exploit the
// 8-byte shared memory mode on Kepler.
DevicePair<T>* dest = (DevicePair<T>*)(shared + VT * tid);
#pragma unroll
for(int i = 0; i < VT / 2; ++i)
dest[i] = MakeDevicePair(threadReg[2 * i], threadReg[2 * i + 1]);
}
if(sync) __syncthreads();
}
template<int VT, typename T>
MGPU_DEVICE void DeviceSharedToThread(const T* shared, int tid, T* threadReg,
bool sync) {
if(1 & VT) {
#pragma unroll
for(int i = 0; i < VT; ++i)
threadReg[i] = shared[VT * tid + i];
} else {
const DevicePair<T>* source = (const DevicePair<T>*)(shared + VT * tid);
#pragma unroll
for(int i = 0; i < VT / 2; ++i) {
DevicePair<T> p = source[i];
threadReg[2 * i] = p.x;
threadReg[2 * i + 1] = p.y;
}
}
if(sync) __syncthreads();
}
////////////////////////////////////////////////////////////////////////////////
// DeviceLoad2 - load from pointers of the same type. Optimize for a single LD
// statement.
template<int NT, int VT0, int VT1, typename T>
MGPU_DEVICE void DeviceLoad2ToReg(const T* a_global, int aCount,
const T* b_global, int bCount, int tid, T* reg, bool sync) {
int b0 = b_global - a_global - aCount;
int total = aCount + bCount;
if(total >= NT * VT0) {
#pragma unroll
for(int i = 0; i < VT0; ++i) {
int index = NT * i + tid;
reg[i] = a_global[index + ((index >= aCount) ? b0 : 0)];
}
} else {
#pragma unroll
for(int i = 0; i < VT0; ++i) {
int index = NT * i + tid;
if(index < total)
reg[i] = a_global[index + ((index >= aCount) ? b0 : 0)];
}
}
#pragma unroll
for(int i = VT0; i < VT1; ++i) {
int index = NT * i + tid;
if(index < total)
reg[i] = a_global[index + ((index >= aCount) ? b0 : 0)];
}
}
template<int NT, int VT0, int VT1, typename T>
MGPU_DEVICE void DeviceLoad2ToShared(const T* a_global, int aCount,
const T* b_global, int bCount, int tid, T* shared, bool sync) {
T reg[VT1];
DeviceLoad2ToReg<NT, VT0, VT1>(a_global, aCount, b_global, bCount, tid,
reg, false);
DeviceRegToShared<NT, VT1>(reg, tid, shared, sync);
}
////////////////////////////////////////////////////////////////////////////////
// DeviceLoad2 - load from pointers of different types. Uses two LD statements.
template<int NT, int VT0, int VT1, typename InputIt1, typename InputIt2,
typename T>
MGPU_DEVICE void DeviceLoad2ToReg(InputIt1 a_global, int aCount,
InputIt2 b_global, int bCount, int tid, T* reg, bool sync) {
b_global -= aCount;
int total = aCount + bCount;
if(total >= NT * VT0) {
#pragma unroll
for(int i = 0; i < VT0; ++i) {
int index = NT * i + tid;
if(index < aCount) reg[i] = a_global[index];
else reg[i] = b_global[index];
}
} else {
#pragma unroll
for(int i = 0; i < VT0; ++i) {
int index = NT * i + tid;
if(index < aCount) reg[i] = a_global[index];
else if(index < total) reg[i] = b_global[index];
}
}
#pragma unroll
for(int i = VT0; i < VT1; ++i) {
int index = NT * i + tid;
if(index < aCount) reg[i] = a_global[index];
else if(index < total) reg[i] = b_global[index];
}
}
template<int NT, int VT0, int VT1, typename InputIt1, typename InputIt2,
typename T>
MGPU_DEVICE void DeviceLoad2ToShared(InputIt1 a_global, int aCount,
InputIt2 b_global, int bCount, int tid, T* shared, bool sync) {
T reg[VT1];
DeviceLoad2ToReg<NT, VT0, VT1>(a_global, aCount, b_global, bCount, tid,
reg, false);
DeviceRegToShared<NT, VT1>(reg, tid, shared, sync);
}
////////////////////////////////////////////////////////////////////////////////
// DeviceGatherGlobalToGlobal
template<int NT, int VT, typename InputIt, typename OutputIt>
MGPU_DEVICE void DeviceGatherGlobalToGlobal(int count, InputIt data_global,
const int* indices_shared, int tid, OutputIt dest_global, bool sync) {
typedef typename std::iterator_traits<InputIt>::value_type ValType;
ValType values[VT];
#pragma unroll
for(int i = 0; i < VT; ++i) {
int index = NT * i + tid;
if(index < count) {
int gather = indices_shared[index];
values[i] = data_global[gather];
}
}
if(sync) __syncthreads();
DeviceRegToGlobal<NT, VT>(count, values, tid, dest_global, false);
}
////////////////////////////////////////////////////////////////////////////////
// DeviceTransferMergeValues
// Gather in a merge-like value from two input arrays and store to a single
// output. Like DeviceGatherGlobalToGlobal, but for two arrays at once.
template<int NT, int VT, typename InputIt1, typename InputIt2,
typename T>
MGPU_DEVICE void DeviceTransferMergeValuesReg(int count, InputIt1 a_global,
InputIt2 b_global, int bStart, const int* indices, int tid,
T* reg, bool sync) {
b_global -= bStart;
if(count >= NT * VT) {
#pragma unroll
for(int i = 0; i < VT; ++i) {
reg[i] = (indices[i] < bStart) ? a_global[indices[i]] :
b_global[indices[i]];
}
} else {
#pragma unroll
for(int i = 0; i < VT; ++i) {
int index = NT * i + tid;
if(index < count)
reg[i] = (indices[i] < bStart) ? a_global[indices[i]] :
b_global[indices[i]];
}
}
if(sync) __syncthreads();
}
template<int NT, int VT, typename InputIt1, typename InputIt2,
typename OutputIt>
MGPU_DEVICE void DeviceTransferMergeValuesShared(int count, InputIt1 a_global,
InputIt2 b_global, int bStart, const int* indices_shared, int tid,
OutputIt dest_global, bool sync) {
int indices[VT];
DeviceSharedToReg<NT, VT>(indices_shared, tid, indices);
typedef typename std::iterator_traits<InputIt1>::value_type ValType;
ValType reg[VT];
DeviceTransferMergeValuesReg<NT, VT>(count, a_global, b_global, bStart,
indices, tid, reg, sync);
DeviceRegToGlobal<NT, VT>(count, reg, tid, dest_global, sync);
}
template<int NT, int VT, typename T>
MGPU_DEVICE void DeviceTransferMergeValuesReg(int count, const T* a_global,
const T* b_global, int bStart, const int* indices, int tid, T* reg,
bool sync) {
int bOffset = (int)(b_global - a_global - bStart);
if(count >= NT * VT) {
#pragma unroll
for(int i = 0; i < VT; ++i) {
int gather = indices[i];
if(gather >= bStart) gather += bOffset;
reg[i] = a_global[gather];
}
} else {
#pragma unroll
for(int i = 0; i < VT; ++i) {
int index = NT * i + tid;
int gather = indices[i];
if(gather >= bStart) gather += bOffset;
if(index < count)
reg[i] = a_global[gather];
}
}
if(sync) __syncthreads();
}
template<int NT, int VT, typename T, typename OutputIt>
MGPU_DEVICE void DeviceTransferMergeValuesShared(int count, const T* a_global,
const T* b_global, int bStart, const int* indices_shared, int tid,
OutputIt dest_global, bool sync) {
int indices[VT];
DeviceSharedToReg<NT, VT>(indices_shared, tid, indices);
T reg[VT];
DeviceTransferMergeValuesReg<NT, VT>(count, a_global, b_global, bStart,
indices, tid, reg, sync);
DeviceRegToGlobal<NT, VT>(count, reg, tid, dest_global, sync);
}
} // namespace mgpu | the_stack |
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/cuda/UpSample.cuh>
#include <ATen/native/cuda/KernelUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bilinear2d_out_frame(
const int n,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor<scalar_t, 4> idata,
PackedTensorAccessor<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int height1 = idata.size(2);
const int width1 = idata.size(3);
const int height2 = odata.size(2);
const int width2 = odata.size(3);
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>(
rheight, h2, align_corners, /*cubic=*/false);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const accscalar_t h1lambda = h1r - h1;
const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda;
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const accscalar_t val = h0lambda *
(w0lambda * idata[n][c][h1][w1] +
w1lambda * idata[n][c][h1][w1 + w1p]) +
h1lambda *
(w0lambda * idata[n][c][h1 + h1p][w1] +
w1lambda * idata[n][c][h1 + h1p][w1 + w1p]);
odata[n][c][h2][w2] = static_cast<scalar_t>(val);
}
}
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bilinear2d_nhwc_out_frame(
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
const int batchsize,
const int channels,
const int height1,
const int width1,
const int height2,
const int width2,
const scalar_t* idata,
scalar_t* odata,
const int out_numel) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < out_numel) {
const int c = index % channels;
const int w2 = (index / channels) % width2;
const int h2 = (index / channels / width2) % height2;
const int n = index / channels / width2 / height2;
const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>(
rheight, h2, align_corners, /*cubic=*/false);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const accscalar_t h1lambda = h1r - h1;
const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda;
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
const accscalar_t val = h0lambda * (
w0lambda * idata[idx_cl(n, h1, w1, c, height1, width1, channels)] +
w1lambda * idata[idx_cl(n, h1, w1 + w1p, c, height1, width1, channels)]
) + h1lambda * (
w0lambda * idata[idx_cl(n, h1 + h1p, w1, c, height1, width1, channels)] +
w1lambda * idata[idx_cl(n, h1 + h1p, w1 + w1p, c, height1, width1, channels)]
);
odata[idx_cl(n, h2, w2, c, height2, width2, channels)] = static_cast<scalar_t>(val);
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bilinear2d_backward_out_frame(
const size_t nc,
const int height1,
const int width1,
const int height2,
const int width2,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
scalar_t* __restrict__ idata,
const scalar_t* __restrict__ odata) {
const size_t o_numel = nc * width2 * height2;
const size_t i_numel = nc * width1 * height1;
for (size_t index = blockDim.x * blockIdx.x + threadIdx.x; index < o_numel;
index += blockDim.x * gridDim.x) {
size_t index_temp = index;
const int w2 = index_temp % width2; // 0:width2-1
index_temp /= width2;
const int h2 = index_temp % height2; // 0:height2-1
const size_t nc = index_temp / height2;
//
const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>(
rheight, h2, align_corners, /*cubic=*/false);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const accscalar_t h1lambda = h1r - h1;
const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda;
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
const scalar_t d2val = odata[index];
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1, w1),
i_numel,
static_cast<scalar_t>(h0lambda * w0lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1, w1 + w1p),
i_numel,
static_cast<scalar_t>(h0lambda * w1lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1 + h1p, w1),
i_numel,
static_cast<scalar_t>(h1lambda * w0lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1 + h1p, w1 + w1p),
i_numel,
static_cast<scalar_t>(h1lambda * w1lambda * d2val),
true);
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bilinear2d_backward_nhwc_out_frame(
const size_t nc,
const int height1,
const int width1,
const int height2,
const int width2,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
scalar_t* __restrict__ idata,
const scalar_t* __restrict__ odata,
const int channels,
const size_t o_numel,
const size_t i_numel) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < o_numel) {
const int c = index % channels;
const int w2 = (index / channels) % width2;
const int h2 = (index / channels / width2) % height2;
const int n = index / channels / width2 / height2;
const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>(
rheight, h2, align_corners, /*cubic=*/false);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const accscalar_t h1lambda = h1r - h1;
const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda;
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
const scalar_t d2val = odata[index];
fastAtomicAdd(
idata,
idx_cl(n, h1, w1, c, height1, width1, channels),
i_numel,
static_cast<scalar_t>(h0lambda * w0lambda * d2val),
true);
fastAtomicAdd(
idata,
idx_cl(n, h1, w1 + w1p, c, height1, width1, channels),
i_numel,
static_cast<scalar_t>(h0lambda * w1lambda * d2val),
true);
fastAtomicAdd(
idata,
idx_cl(n, h1 + h1p, w1, c, height1, width1, channels),
i_numel,
static_cast<scalar_t>(h1lambda * w0lambda * d2val),
true);
fastAtomicAdd(
idata,
idx_cl(n, h1 + h1p, w1 + w1p, c, height1, width1, channels),
i_numel,
static_cast<scalar_t>(h1lambda * w1lambda * d2val),
true);
}
}
static void upsample_bilinear2d_out_cuda_template(
const Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU(__func__, {input_arg, output_arg});
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
const auto memory_format = input.suggest_memory_format();
if (input.sizes() == output.sizes()) {
output.copy_(input);
return;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "upsample_bilinear2d_out_frame", [&] {
// heuristic: only use channels_last path when it's faster than the contiguous path
if (memory_format == at::MemoryFormat::ChannelsLast && channels >= 16 && \
output.is_contiguous(memory_format)) {
using accscalar_t = at::acc_type<scalar_t, true>;
TORCH_CHECK(input.numel() < std::numeric_limits<int>::max(),
"upsample_bilinear2d_nhwc only supports input tensors with less than INT_MAX elements");
TORCH_CHECK(output.numel() < std::numeric_limits<int>::max(),
"upsample_bilinear2d_nhwc only supports output tensors with less than INT_MAX elements");
const int batchsize = input.size(0);
const int channels = input.size(1);
const int height1 = input.size(2);
const int width1 = input.size(3);
const int height2 = output.size(2);
const int width2 = output.size(3);
// const int num_kernels = output_height * output_width;
const int num_kernels = output.numel();
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
at::Tensor input_cl = input.contiguous(at::MemoryFormat::ChannelsLast);
const scalar_t* idata = input_cl.data_ptr<scalar_t>();
scalar_t* odata = output.data_ptr<scalar_t>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bilinear2d_nhwc_out_frame<scalar_t, accscalar_t>
<<<ceil_div(num_kernels, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>(
rheight, rwidth, align_corners,
batchsize,
channels,
height1,
width1,
height2,
width2,
idata, odata,
output.numel());
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
// non-channels_last case, not necessarily contiguous
const int num_kernels = output_height * output_width;
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bilinear2d_out_frame<scalar_t, accscalar_t>
<<<ceil_div(num_kernels, num_threads),
num_threads,
0,
stream>>>(
num_kernels, rheight, rwidth, align_corners, idata, odata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
}
static void upsample_bilinear2d_backward_out_cuda_template(
const Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(__func__, {grad_output_arg, grad_input_arg});
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
if (grad_input.numel() == 0) {
return;
}
const auto memory_format = grad_output_.suggest_memory_format();
// initialization to zero is required here. As we launch one thread per output
// element, and atomicAdd to input gradient. Given a sparse sampling case, our
// threads are not covering the whole input tensor.
grad_input.zero_();
const size_t num_kernels = nbatch * channels * output_height * output_width;
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (grad_output_.sizes() == grad_input.sizes()) {
grad_input.copy_(grad_output_);
return;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_output_.scalar_type(), "upsample_bilinear2d_backward_out_frame", [&] {
if (memory_format == at::MemoryFormat::ChannelsLast && channels >= 4 && \
grad_input.is_contiguous(memory_format)) {
using accscalar_t = at::acc_type<scalar_t, true>;
Tensor grad_output = grad_output_.contiguous(at::MemoryFormat::ChannelsLast);
auto idata = grad_input.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bilinear2d_backward_nhwc_out_frame<scalar_t, accscalar_t>
<<<ceil_div(num_kernels, static_cast<size_t>(num_threads)), num_threads, 0, stream>>>(
nbatch * channels,
input_height,
input_width,
output_height,
output_width,
rheight,
rwidth,
align_corners,
idata,
odata,
channels,
grad_output.numel(),
grad_input.numel());
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
using accscalar_t = at::acc_type<scalar_t, true>;
// This is needed for non-contiguous tensors.
Tensor grad_input_c = grad_input.is_contiguous() ? grad_input : at::zeros(grad_input.sizes(), grad_input.options());
Tensor grad_output = grad_output_.contiguous();
auto idata = grad_input_c.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners, scales_h);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_w);
upsample_bilinear2d_backward_out_frame<scalar_t, accscalar_t>
<<<ceil_div(num_kernels, static_cast<size_t>(num_threads)),
num_threads,
0,
stream>>>(
nbatch * channels,
input_height,
input_width,
output_height,
output_width,
rheight,
rwidth,
align_corners,
idata,
odata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
if (!grad_input.is_contiguous()) {
grad_input.copy_(grad_input_c);
}
}
});
}
} // namespace
TORCH_IMPL_FUNC(upsample_bilinear2d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& output) {
upsample_bilinear2d_out_cuda_template(output, input, output_size, align_corners, scales_h, scales_w);
}
TORCH_IMPL_FUNC(upsample_bilinear2d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
const Tensor& grad_input) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_bilinear2d_backward_out_cuda");
upsample_bilinear2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_h, scales_w);
}
} // namespace native
} // namespace at | the_stack |
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
constexpr int CAFFE_CUDA_NUM_THREADS = 512;
constexpr int CAFFE_MAXIMUM_NUM_BLOCKS = 4096;
inline int CAFFE_GET_BLOCKS(const int N) {
return std::min((N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS,
CAFFE_MAXIMUM_NUM_BLOCKS);
}
namespace mshadow {
namespace cuda {
template <typename T>
__global__ void GroupSoftmaxGradKernel(const int nthreads,
T* dstd,
const T* labeld,
const T* groupd,
const int batch_size,
const int label_size,
const int group_step) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
const T* gd = groupd + idx / group_step * label_size;
const int l = static_cast<int>(labeld[idx]);
const T g = gd[l];
T psum = T(0.0f);
T* mdstd = dstd + idx * label_size;
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
psum += mdstd[j];
}
psum = (psum - T(1.0f)) / (psum + T(0.00001f));
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
mdstd[j] *= psum;
}
}
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 2, DType> &src,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &group) {
Copy(dst, src, src.stream_);
DType *dstd = dst.dptr_;
const DType *labeld = label.dptr_;
const DType *groupd = group.dptr_;
const int batch_size = src.size(0);
const int label_size = src.size(1);
const int group_step = batch_size / group.size(0);
const int count = batch_size;
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "GroupSoftmaxGrad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
GroupSoftmaxGradKernel<DType><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
count, dstd, labeld, groupd, batch_size, label_size, group_step);
}
template <typename T>
__global__ void GroupSoftmaxGradKernel(const int nthreads,
T* dstd,
const T* labeld,
const T* groupd,
const int ignore_label,
const int batch_size,
const int label_size,
const int group_step) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
T* mdstd = dstd + idx * label_size;
const int l = static_cast<int>(labeld[idx]);
if (l == ignore_label) {
for (int j = 0; j < label_size; ++j) {
mdstd[j] = T(0.0f);
}
} else {
const T* gd = groupd + idx / group_step * label_size;
const T g = gd[l];
T psum = T(0.0f);
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
psum += mdstd[j];
}
psum = (psum - T(1.0f)) / (psum + T(0.00001f));
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
mdstd[j] *= psum;
}
}
}
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 2, DType> &src,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &group,
const DType &ignore_label) {
Copy(dst, src, src.stream_);
DType *dstd = dst.dptr_;
const DType *labeld = label.dptr_;
const DType *groupd = group.dptr_;
const int batch_size = src.size(0);
const int label_size = src.size(1);
const int group_step = batch_size / group.size(0);
const int count = batch_size;
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "GroupSoftmaxGrad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
GroupSoftmaxGradKernel<DType><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
count, dstd, labeld, groupd, static_cast<int>(ignore_label), batch_size, label_size, group_step);
}
template <typename T>
__global__ void GroupSoftmaxGrad3DKernel(const int nthreads,
T* dstd,
const T* labeld,
const T* groupd,
const int batch_size,
const int depth_size,
const int label_size,
const int group_step) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
//3D shape: (n, c, d)
const int bsi = idx / depth_size; // n
const int dsi = idx % depth_size; // d
const T* gd = groupd + bsi / group_step * label_size;
const int l = static_cast<int>(labeld[idx]);
const T g = gd[l];
T psum = T(0.0f);
T* mdstd = dstd + bsi * label_size * depth_size + dsi;
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
psum += mdstd[j * depth_size];
}
psum = (psum - T(1.0f)) / (psum + T(0.00001f));
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
mdstd[j * depth_size] *= psum;
}
}
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 3, DType> dst,
const Tensor<gpu, 3, DType> &src,
const Tensor<gpu, 2, DType> &label,
const Tensor<gpu, 2, DType> &group) {
Copy(dst, src, src.stream_);
DType *dstd = dst.dptr_;
const DType *labeld = label.dptr_;
const DType *groupd = group.dptr_;
const int batch_size = src.size(0);
const int label_size = src.size(1);
const int depth_size = src.size(2);
const int group_step = batch_size / group.size(0);
const int count = batch_size * depth_size;
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "GroupSoftmaxGrad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
GroupSoftmaxGrad3DKernel<DType><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
count, dstd, labeld, groupd, batch_size, depth_size, label_size, group_step);
}
template <typename T>
__global__ void GroupSoftmaxGrad3DKernel(const int nthreads,
T* dstd,
const T* labeld,
const T* groupd,
const int ignore_label,
const int batch_size,
const int depth_size,
const int label_size,
const int group_step) {
CUDA_1D_KERNEL_LOOP(idx, nthreads) {
//3D shape: (n, c, d)
const int bsi = idx / depth_size; // n
const int dsi = idx % depth_size; // d
const int l = static_cast<int>(labeld[idx]);
T* mdstd = dstd + bsi * label_size * depth_size + dsi;
if (l == ignore_label) {
for (int j = 0; j < label_size; ++j) {
mdstd[j * depth_size] = T(0.0f);
}
} else {
const T* gd = groupd + bsi / group_step * label_size;
const T g = gd[l];
T psum = T(0.0f);
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
psum += mdstd[j * depth_size];
}
psum = (psum - T(1.0f)) / (psum + T(0.00001f));
for (int j = 0; j < label_size; ++j) {
if(g == gd[j])
mdstd[j * depth_size] *= psum;
}
}
}
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 3, DType> dst,
const Tensor<gpu, 3, DType> &src,
const Tensor<gpu, 2, DType> &label,
const Tensor<gpu, 2, DType> &group,
const DType &ignore_label) {
Copy(dst, src, src.stream_);
DType *dstd = dst.dptr_;
const DType *labeld = label.dptr_;
const DType *groupd = group.dptr_;
const int batch_size = src.size(0);
const int label_size = src.size(1);
const int depth_size = src.size(2);
const int group_step = batch_size / group.size(0);
const int count = batch_size * depth_size;
const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
dim3 dimGrid(kMaxGridDim, (gridSize + kMaxGridDim - 1) / kMaxGridDim);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "GroupSoftmaxGrad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
GroupSoftmaxGrad3DKernel<DType><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
count, dstd, labeld, groupd, static_cast<int>(ignore_label), batch_size, depth_size, label_size, group_step);
}
} // namespace cuda
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 2, DType> &src,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &group) {
cuda::GroupSoftmaxGrad(dst, src, label, group);
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 2, DType> dst,
const Tensor<gpu, 2, DType> &src,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &group,
const DType &ignore_label) {
cuda::GroupSoftmaxGrad(dst, src, label, group, ignore_label);
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 3, DType> dst,
const Tensor<gpu, 3, DType> &src,
const Tensor<gpu, 2, DType> &label,
const Tensor<gpu, 2, DType> &group) {
cuda::GroupSoftmaxGrad(dst, src, label, group);
}
template<typename DType>
inline void GroupSoftmaxGrad(Tensor<gpu, 3, DType> dst,
const Tensor<gpu, 3, DType> &src,
const Tensor<gpu, 2, DType> &label,
const Tensor<gpu, 2, DType> &group,
const DType &ignore_label) {
cuda::GroupSoftmaxGrad(dst, src, label, group, ignore_label);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(GroupSoftmaxOutputParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new GroupSoftmaxOutputOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet | the_stack |
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <boost/numeric/odeint.hpp>
#include <boost/numeric/odeint/external/thrust/thrust.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_real.hpp>
#include <boost/random/variate_generator.hpp>
using namespace std;
using namespace boost::numeric::odeint;
//change this to float if your device does not support double computation
typedef double value_type;
//change this to host_vector< ... > of you want to run on CPU
typedef thrust::device_vector< value_type > state_type;
typedef thrust::device_vector< size_t > index_vector_type;
// typedef thrust::host_vector< value_type > state_type;
// typedef thrust::host_vector< size_t > index_vector_type;
const value_type sigma = 10.0;
const value_type b = 8.0 / 3.0;
//[ thrust_lorenz_parameters_define_simple_system
struct lorenz_system
{
struct lorenz_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
// unpack the parameter we want to vary and the Lorenz variables
value_type R = thrust::get< 3 >( t );
value_type x = thrust::get< 0 >( t );
value_type y = thrust::get< 1 >( t );
value_type z = thrust::get< 2 >( t );
thrust::get< 4 >( t ) = sigma * ( y - x );
thrust::get< 5 >( t ) = R * x - y - x * z;
thrust::get< 6 >( t ) = -b * z + x * y ;
}
};
lorenz_system( size_t N , const state_type &beta )
: m_N( N ) , m_beta( beta ) { }
template< class State , class Deriv >
void operator()( const State &x , Deriv &dxdt , value_type t ) const
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) ,
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
m_beta.begin() ,
boost::begin( dxdt ) ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
m_beta.begin() ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ) ) ,
lorenz_functor() );
}
size_t m_N;
const state_type &m_beta;
};
//]
struct lorenz_perturbation_system
{
struct lorenz_perturbation_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
value_type R = thrust::get< 1 >( t );
value_type x = thrust::get< 0 >( thrust::get< 0 >( t ) );
value_type y = thrust::get< 1 >( thrust::get< 0 >( t ) );
value_type z = thrust::get< 2 >( thrust::get< 0 >( t ) );
value_type dx = thrust::get< 3 >( thrust::get< 0 >( t ) );
value_type dy = thrust::get< 4 >( thrust::get< 0 >( t ) );
value_type dz = thrust::get< 5 >( thrust::get< 0 >( t ) );
thrust::get< 0 >( thrust::get< 2 >( t ) ) = sigma * ( y - x );
thrust::get< 1 >( thrust::get< 2 >( t ) ) = R * x - y - x * z;
thrust::get< 2 >( thrust::get< 2 >( t ) ) = -b * z + x * y ;
thrust::get< 3 >( thrust::get< 2 >( t ) ) = sigma * ( dy - dx );
thrust::get< 4 >( thrust::get< 2 >( t ) ) = ( R - z ) * dx - dy - x * dz;
thrust::get< 5 >( thrust::get< 2 >( t ) ) = y * dx + x * dy - b * dz;
}
};
lorenz_perturbation_system( size_t N , const state_type &beta )
: m_N( N ) , m_beta( beta ) { }
template< class State , class Deriv >
void operator()( const State &x , Deriv &dxdt , value_type t ) const
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) ,
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ) ) ,
m_beta.begin() ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( dxdt ) ,
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ,
boost::begin( dxdt ) + 4 * m_N ,
boost::begin( dxdt ) + 5 * m_N ) )
) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + m_N ,
boost::begin( x ) + 2 * m_N ,
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
boost::begin( x ) + 6 * m_N ) ) ,
m_beta.begin() ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( dxdt ) + m_N ,
boost::begin( dxdt ) + 2 * m_N ,
boost::begin( dxdt ) + 3 * m_N ,
boost::begin( dxdt ) + 4 * m_N ,
boost::begin( dxdt ) + 5 * m_N ,
boost::begin( dxdt ) + 6 * m_N ) )
) ) ,
lorenz_perturbation_functor() );
}
size_t m_N;
const state_type &m_beta;
};
struct lyap_observer
{
//[thrust_lorenz_parameters_observer_functor
struct lyap_functor
{
template< class T >
__host__ __device__
void operator()( T t ) const
{
value_type &dx = thrust::get< 0 >( t );
value_type &dy = thrust::get< 1 >( t );
value_type &dz = thrust::get< 2 >( t );
value_type norm = sqrt( dx * dx + dy * dy + dz * dz );
dx /= norm;
dy /= norm;
dz /= norm;
thrust::get< 3 >( t ) += log( norm );
}
};
//]
lyap_observer( size_t N , size_t every = 100 )
: m_N( N ) , m_lyap( N ) , m_every( every ) , m_count( 0 )
{
thrust::fill( m_lyap.begin() , m_lyap.end() , 0.0 );
}
template< class Lyap >
void fill_lyap( Lyap &lyap )
{
thrust::copy( m_lyap.begin() , m_lyap.end() , lyap.begin() );
for( size_t i=0 ; i<lyap.size() ; ++i )
lyap[i] /= m_t_overall;
}
template< class State >
void operator()( State &x , value_type t )
{
if( ( m_count != 0 ) && ( ( m_count % m_every ) == 0 ) )
{
thrust::for_each(
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + 3 * m_N ,
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
m_lyap.begin() ) ) ,
thrust::make_zip_iterator( thrust::make_tuple(
boost::begin( x ) + 4 * m_N ,
boost::begin( x ) + 5 * m_N ,
boost::begin( x ) + 6 * m_N ,
m_lyap.end() ) ) ,
lyap_functor() );
clog << t << "\n";
}
++m_count;
m_t_overall = t;
}
size_t m_N;
state_type m_lyap;
size_t m_every;
size_t m_count;
value_type m_t_overall;
};
const size_t N = 1024*2;
const value_type dt = 0.01;
int main( int arc , char* argv[] )
{
int driver_version , runtime_version;
cudaDriverGetVersion( &driver_version );
cudaRuntimeGetVersion ( &runtime_version );
cout << driver_version << "\t" << runtime_version << endl;
//[ thrust_lorenz_parameters_define_beta
vector< value_type > beta_host( N );
const value_type beta_min = 0.0 , beta_max = 56.0;
for( size_t i=0 ; i<N ; ++i )
beta_host[i] = beta_min + value_type( i ) * ( beta_max - beta_min ) / value_type( N - 1 );
state_type beta = beta_host;
//]
//[ thrust_lorenz_parameters_integration
state_type x( 6 * N );
// initialize x,y,z
thrust::fill( x.begin() , x.begin() + 3 * N , 10.0 );
// initial dx
thrust::fill( x.begin() + 3 * N , x.begin() + 4 * N , 1.0 );
// initialize dy,dz
thrust::fill( x.begin() + 4 * N , x.end() , 0.0 );
// create error stepper, can be used with make_controlled or make_dense_output
typedef runge_kutta_dopri5< state_type , value_type , state_type , value_type > stepper_type;
lorenz_system lorenz( N , beta );
lorenz_perturbation_system lorenz_perturbation( N , beta );
lyap_observer obs( N , 1 );
// calculate transients
integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , stepper_type() ) , lorenz , std::make_pair( x.begin() , x.begin() + 3 * N ) , 0.0 , 10.0 , dt );
// calculate the Lyapunov exponents -- the main loop
double t = 0.0;
while( t < 10000.0 )
{
integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , stepper_type() ) , lorenz_perturbation , x , t , t + 1.0 , 0.1 );
t += 1.0;
obs( x , t );
}
vector< value_type > lyap( N );
obs.fill_lyap( lyap );
for( size_t i=0 ; i<N ; ++i )
cout << beta_host[i] << "\t" << lyap[i] << "\n";
//]
return 0;
} | the_stack |
#pragma once
#include <gunrock/app/enactor_base.cuh>
#include <gunrock/app/enactor_iteration.cuh>
#include <gunrock/app/enactor_loop.cuh>
#include <gunrock/oprtr/oprtr.cuh>
#include <gunrock/app/rw/rw_problem.cuh>
#include <math.h>
#include <curand.h>
#include <curand_kernel.h>
namespace gunrock {
namespace app {
namespace rw {
/**
* @brief Speciflying parameters for RW Enactor
* @param parameters The util::Parameter<...> structure holding all parameter
* info \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_enactor(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(app::UseParameters_enactor(parameters));
return retval;
}
/**
* @brief defination of RW iteration loop
* @tparam EnactorT Type of enactor
*/
template <typename EnactorT>
struct RWIterationLoop : public IterationLoopBase<EnactorT, Use_FullQ | Push> {
typedef typename EnactorT::VertexT VertexT;
typedef typename EnactorT::SizeT SizeT;
typedef typename EnactorT::ValueT ValueT;
typedef typename EnactorT::Problem::GraphT::CsrT CsrT;
typedef typename EnactorT::Problem::GraphT::GpT GpT;
typedef IterationLoopBase<EnactorT, Use_FullQ | Push> BaseIterationLoop;
RWIterationLoop() : BaseIterationLoop() {}
/**
* @brief Core computation of RW, one iteration
* @param[in] peer_ Which GPU peers to work on, 0 means local
* \return cudaError_t error message(s), if any
*/
cudaError_t Core(int peer_ = 0) {
// --
// Alias variables
auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0];
auto &enactor_slice =
this->enactor
->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_];
auto &enactor_stats = enactor_slice.enactor_stats;
auto &graph = data_slice.sub_graph[0];
auto &frontier = enactor_slice.frontier;
auto &oprtr_parameters = enactor_slice.oprtr_parameters;
auto &retval = enactor_stats.retval;
auto &iteration = enactor_stats.iteration;
// problem specific data alias:
auto &walks = data_slice.walks;
auto &rand = data_slice.rand;
auto &walk_length = data_slice.walk_length;
auto &walks_per_node = data_slice.walks_per_node;
auto &walk_mode = data_slice.walk_mode;
auto &store_walks = data_slice.store_walks;
auto &gen = data_slice.gen;
auto &neighbors_seen = data_slice.neighbors_seen;
auto &steps_taken = data_slice.steps_taken;
if (walk_mode == 0) { // uniform random walk
auto uniform_rw_op =
[graph, walks, rand, iteration, walk_length, store_walks,
neighbors_seen, steps_taken] __host__
__device__(VertexT * v, const SizeT &i) {
SizeT write_idx =
(i * walk_length) + iteration; // Write location in RW array
if (store_walks) {
walks[write_idx] = v[i]; // record current position in walk
}
if (!util::isValid(v[i])) {
return;
}
if (iteration < walk_length - 1) {
SizeT num_neighbors = graph.GetNeighborListLength(v[i]);
if (num_neighbors == 0) {
v[i] = util::PreDefinedValues<VertexT>::InvalidValue;
return;
}
// Randomly sample neighbor
SizeT neighbor_list_offset = graph.GetNeighborListOffset(v[i]);
SizeT rand_offset =
(SizeT)round(0.5 + num_neighbors * rand[i]) - 1;
VertexT neighbor =
graph.GetEdgeDest(neighbor_list_offset + rand_offset);
v[i] = neighbor; // Replace vertex w/ neighbor in queue
steps_taken[i]++;
neighbors_seen[i] += (uint64_t)
num_neighbors; // Record number of neighbors we've seen
}
};
curandSetStream(gen, oprtr_parameters.stream);
curandGenerateUniform(gen, rand.GetPointer(util::DEVICE),
graph.nodes * walks_per_node);
GUARD_CU(frontier.V_Q()->ForAll(uniform_rw_op, frontier.queue_length,
util::DEVICE, oprtr_parameters.stream));
} else if (walk_mode ==
1) { // greedy: walk to neighbor w/ maximum node value
auto greedy_rw_op =
[graph, walks, iteration, walk_length, store_walks, neighbors_seen,
steps_taken] __host__
__device__(VertexT * v, const SizeT &i) {
SizeT write_idx =
(i * walk_length) + iteration; // Write location in RW array
if (store_walks) {
walks[write_idx] = v[i]; // record current position in walk
}
if (!util::isValid(v[i])) {
return;
}
if (iteration < walk_length - 1) {
SizeT num_neighbors = graph.GetNeighborListLength(v[i]);
if (num_neighbors == 0) {
v[i] = util::PreDefinedValues<VertexT>::InvalidValue;
return;
}
SizeT neighbor_list_offset = graph.GetNeighborListOffset(v[i]);
// Find neighbor with max value
VertexT max_neighbor_id =
graph.GetEdgeDest(neighbor_list_offset + 0);
ValueT max_neighbor_val = graph.node_values[max_neighbor_id];
for (SizeT offset = 1; offset < num_neighbors; offset++) {
VertexT neighbor =
graph.GetEdgeDest(neighbor_list_offset + offset);
ValueT neighbor_val = graph.node_values[neighbor];
if (neighbor_val > max_neighbor_val) {
max_neighbor_id = neighbor;
max_neighbor_val = neighbor_val;
}
}
v[i] = max_neighbor_id; // Replace vertex w/ neighbor in queue
steps_taken[i]++;
neighbors_seen[i] += (uint64_t)
num_neighbors; // Record number of neighbors we've seen
}
};
GUARD_CU(frontier.V_Q()->ForAll(greedy_rw_op, frontier.queue_length,
util::DEVICE, oprtr_parameters.stream));
} else if (walk_mode == 2) {
curandGenerateUniform(gen, rand.GetPointer(util::DEVICE),
graph.nodes * walks_per_node);
auto stochastic_greedy_rw_op =
[graph, walks, rand, iteration, walk_length, store_walks,
neighbors_seen, steps_taken] __host__
__device__(VertexT * v, const SizeT &i) {
SizeT write_idx =
(i * walk_length) + iteration; // Write location in RW array
if (store_walks) {
walks[write_idx] = v[i]; // record current position in walk
}
if (!util::isValid(v[i])) {
return;
}
if (iteration < walk_length - 1) {
SizeT num_neighbors = graph.GetNeighborListLength(v[i]);
if (num_neighbors == 0) {
v[i] = util::PreDefinedValues<VertexT>::InvalidValue;
return;
}
SizeT neighbor_list_offset = graph.GetNeighborListOffset(v[i]);
VertexT neighbor, next_neighbor;
ValueT sum_neighbor_scores = 0;
for (SizeT offset = 0; offset < num_neighbors; offset++) {
neighbor = graph.GetEdgeDest(neighbor_list_offset + offset);
sum_neighbor_scores += graph.node_values[neighbor];
}
ValueT r = rand[i] * sum_neighbor_scores;
ValueT acc = 0;
for (SizeT offset = 0; offset < num_neighbors; offset++) {
neighbor = graph.GetEdgeDest(neighbor_list_offset + offset);
ValueT neighbor_score = graph.node_values[neighbor];
acc += neighbor_score;
if (r < acc) {
next_neighbor = neighbor;
break;
}
}
v[i] = next_neighbor; // Replace vertex w/ neighbor in queue
steps_taken[i]++;
neighbors_seen[i] += (uint64_t)
num_neighbors; // Record number of neighbors we've seen
}
};
GUARD_CU(frontier.V_Q()->ForAll(stochastic_greedy_rw_op,
frontier.queue_length, util::DEVICE,
oprtr_parameters.stream));
} else {
printf("ERROR: unknown walk_mode=%d\n", walk_mode);
}
return retval;
}
cudaError_t Compute_OutputLength(int peer_) {
return cudaSuccess; // No need to load balance or get output size
}
cudaError_t Check_Queue_Size(int peer_) {
return cudaSuccess; // no need to check queue size for RW
}
bool Stop_Condition(int gpu_num = 0) {
auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0];
auto &enactor_slices = this->enactor->enactor_slices;
auto iter = enactor_slices[0].enactor_stats.iteration;
return iter == data_slice.walk_length;
}
/**
* @brief Routine to combine received data and local data
* @tparam NUM_VERTEX_ASSOCIATES Number of data associated with each
* transmition item, typed VertexT
* @tparam NUM_VALUE__ASSOCIATES Number of data associated with each
* transmition item, typed ValueT
* @param received_length The numver of transmition items received
* @param[in] peer_ which peer GPU the data came from
* \return cudaError_t error message(s), if any
*/
template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES>
cudaError_t ExpandIncoming(SizeT &received_length, int peer_) {
// ================ INCOMPLETE TEMPLATE - MULTIGPU ====================
auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0];
auto &enactor_slice =
this->enactor
->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_];
// auto iteration = enactor_slice.enactor_stats.iteration;
// TODO: add problem specific data alias here, e.g.:
// auto &distances = data_slice.distances;
auto expand_op = [
// TODO: pass data used by the lambda, e.g.:
// distances
] __host__ __device__(VertexT & key, const SizeT &in_pos,
VertexT *vertex_associate_ins,
ValueT *value__associate_ins) -> bool {
// TODO: fill in the lambda to combine received and local data, e.g.:
// ValueT in_val = value__associate_ins[in_pos];
// ValueT old_val = atomicMin(distances + key, in_val);
// if (old_val <= in_val)
// return false;
return true;
};
cudaError_t retval =
BaseIterationLoop::template ExpandIncomingBase<NUM_VERTEX_ASSOCIATES,
NUM_VALUE__ASSOCIATES>(
received_length, peer_, expand_op);
return retval;
}
}; // end of RWIteration
/**
* @brief Template enactor class.
* @tparam _Problem Problem type we process on
* @tparam ARRAY_FLAG Flags for util::Array1D used in the enactor
* @tparam cudaHostRegisterFlag Flags for util::Array1D used in the enactor
*/
template <typename _Problem, util::ArrayFlag ARRAY_FLAG = util::ARRAY_NONE,
unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault>
class Enactor
: public EnactorBase<
typename _Problem::GraphT, typename _Problem::GraphT::VertexT,
typename _Problem::GraphT::ValueT, ARRAY_FLAG, cudaHostRegisterFlag> {
public:
typedef _Problem Problem;
typedef typename Problem::SizeT SizeT;
typedef typename Problem::VertexT VertexT;
typedef typename Problem::GraphT GraphT;
typedef typename GraphT::VertexT LabelT;
typedef typename GraphT::ValueT ValueT;
typedef EnactorBase<GraphT, LabelT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag>
BaseEnactor;
typedef Enactor<Problem, ARRAY_FLAG, cudaHostRegisterFlag> EnactorT;
typedef RWIterationLoop<EnactorT> IterationT;
Problem *problem;
IterationT *iterations;
/**
* @brief RW constructor
*/
Enactor() : BaseEnactor("RW"), problem(NULL) {
this->max_num_vertex_associates = 0;
this->max_num_value__associates = 1;
}
/**
* @brief RW destructor
*/
virtual ~Enactor() { /*Release();*/
}
/*
* @brief Releasing allocated memory space
* @param target The location to release memory from
* \return cudaError_t error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseEnactor::Release(target));
delete[] iterations;
iterations = NULL;
problem = NULL;
return retval;
}
/**
* @brief Initialize the problem.
* @param[in] problem The problem object.
* @param[in] target Target location of data
* \return cudaError_t error message(s), if any
*/
cudaError_t Init(Problem &problem, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
this->problem = &problem;
// Lazy initialization
// !! POSSIBLE BUG: @sgpyc suggested changing the 2 to 1, but that causes
// strange behavior, where V_Q does not get initialized properly.
GUARD_CU(BaseEnactor::Init(problem, Enactor_None, 2, NULL, target, false));
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
auto &enactor_slice = this->enactor_slices[gpu * this->num_gpus + 0];
auto &graph = problem.sub_graphs[gpu];
GUARD_CU(enactor_slice.frontier.Allocate(graph.nodes, graph.edges,
this->queue_factors));
}
iterations = new IterationT[this->num_gpus];
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
GUARD_CU(iterations[gpu].Init(this, gpu));
}
GUARD_CU(this->Init_Threads(
this, (CUT_THREADROUTINE) & (GunrockThread<EnactorT>)));
return retval;
}
/**
* @brief one run of RW, to be called within GunrockThread
* @param thread_data Data for the CPU thread
* \return cudaError_t error message(s), if any
*/
cudaError_t Run(ThreadSlice &thread_data) {
gunrock::app::Iteration_Loop<0, 0, IterationT>(
thread_data, iterations[thread_data.thread_num]);
return cudaSuccess;
}
/**
* @brief Reset enactor
* @param[in] target Target location of data
* \return cudaError_t error message(s), if any
*/
cudaError_t Reset(int walks_per_node, util::Location target = util::DEVICE) {
typedef typename GraphT::GpT GpT;
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseEnactor::Reset(target));
SizeT num_nodes = this->problem->data_slices[0][0].sub_graph[0].nodes;
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
if (this->num_gpus == 1) {
this->thread_slices[gpu].init_size = num_nodes * walks_per_node;
for (int peer_ = 0; peer_ < this->num_gpus; peer_++) {
auto &frontier =
this->enactor_slices[gpu * this->num_gpus + peer_].frontier;
frontier.queue_length = (peer_ == 0) ? num_nodes * walks_per_node : 0;
if (peer_ == 0) {
util::Array1D<SizeT, VertexT> tmp;
tmp.Allocate(num_nodes * walks_per_node, target | util::HOST);
for (SizeT i = 0; i < num_nodes * walks_per_node; ++i) {
tmp[i] = (VertexT)i % num_nodes;
}
GUARD_CU(tmp.Move(util::HOST, target));
GUARD_CU(frontier.V_Q()->ForEach(
tmp,
[] __host__ __device__(VertexT & v, VertexT & i) { v = i; },
num_nodes * walks_per_node, target, 0));
tmp.Release();
}
}
} else {
// MULTIGPU INCOMPLETE
}
}
GUARD_CU(BaseEnactor::Sync());
return retval;
}
/**
* @brief Enacts a RW computing on the specified graph.
* \return cudaError_t error message(s), if any
*/
cudaError_t Enact() {
cudaError_t retval = cudaSuccess;
GUARD_CU(this->Run_Threads(this));
util::PrintMsg("GPU RW Done.", this->flag & Debug);
return retval;
}
};
} // namespace rw
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End: | the_stack |
namespace AggMIS {
namespace Aggregation {
namespace Kernels {
__global__ void allocateNodesKernel(int size,
int *adjIndexes,
int *adjacency,
int *partIn,
int *partOut,
int *aggregated) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
if (aggregated[idx] == 0)
{
int start = adjIndexes[idx];
int end = adjIndexes[idx + 1];
// Storage for possible aggregations.
int candidates[10];
int candidateCounts[10];
for (int i = 0; i < 10; i++)
{
candidates[i] = -1;
candidateCounts[i] = 0;
}
// Going through neighbors to aggregate:
for (int i = start; i < end; i++)
{
int candidate = partIn[adjacency[i]];
if (candidate != -1)
{
for (int j = 0; j < 10 && candidate != -1; j++)
{
if (candidates[j] == -1)
{
candidates[j] = candidate;
candidateCounts[j] = 1;
} else
{
if (candidates[j] == candidate)
{
candidateCounts[j] += 1;
candidate = -1;
}
}
}
}
}
// Finding the most adjacent aggregate and adding node to it:
int addTo = candidates[0];
int count = candidateCounts[0];
for (int i = 1; i < 10; i++)
{
if (candidateCounts[i] > count)
{
count = candidateCounts[i];
addTo = candidates[i];
}
}
partOut[idx] = addTo;
if (addTo != -1)
{
aggregated[idx] = 1;
}
}
}
}
__global__ void checkAggregationFillAggregates(int size,
int *adjIndices,
int *adjacency,
int* aggregation,
int* valuesIn,
int* valuesOut,
int* incomplete) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
// Find the currently marked distance
int currentVal = valuesIn[idx];
int currentAgg = aggregation[idx];
// Checking if any neighbors have a better value
int start = adjIndices[idx];
int end = adjIndices[idx + 1];
for (int i = start; i < end; i++)
{
int neighborAgg = aggregation[adjacency[i]];
int neighborVal = valuesIn[adjacency[i]];
if (neighborAgg == currentAgg && neighborVal > currentVal)
{
currentVal = neighborVal;
incomplete[0] = 1;
}
}
// Write out the distance to the output vector:
valuesOut[idx] = currentVal;
}
}
}
// Functions
AggMIS::Types::IntVector_d* AggregateToNearest(AggMIS::Types::Graph_d &graph,
AggMIS::Types::IntVector_d &roots) {
// Create temp vectors to work with
int size = graph.Size();
AggMIS::Types::IntVector_d *aggregated = new AggMIS::Types::IntVector_d(roots);
AggMIS::Types::IntVector_d *partIn = new AggMIS::Types::IntVector_d(roots);
// Prefix sum to number aggregate roots:
thrust::inclusive_scan(partIn->begin(), partIn->end(), partIn->begin());
// Transform non root nodes to -1
thrust::transform(partIn->begin(), partIn->end(), aggregated->begin(), partIn->begin(), Functors::NumberParts());
AggMIS::Types::IntVector_d *partOut = new AggMIS::Types::IntVector_d(*partIn);
// Preparing to call aggregate kernel:
int *partIn_d = thrust::raw_pointer_cast(partIn->data()); // Pointer to partIn vector
int *partOut_d = thrust::raw_pointer_cast(partOut->data()); // Pointer to partOut vector
int *adjIndexes_d = thrust::raw_pointer_cast(graph.indices->data()); // Pointer to adjacency indexes
int *adjacency_d = thrust::raw_pointer_cast(graph.adjacency->data()); // Pointer to adjacency
int *aggregated_d = thrust::raw_pointer_cast(aggregated->data()); // Pointer to aggregated
bool complete = false; // Indicates whether all nodes are aggregated
// Figuring out block sizes for kernel call:
int blockSize = 256;
int nBlocks = size / blockSize + (size%blockSize == 0 ? 0 : 1);
while (!complete)
{
// Allocating nodes
Kernels::allocateNodesKernel << < nBlocks, blockSize >> > (size, adjIndexes_d, adjacency_d, partIn_d, partOut_d, aggregated_d);
// Copying partOut to partIn
thrust::copy(partOut->begin(), partOut->end(), partIn->begin());
// Checking if done
int unallocatedNodes = thrust::count(aggregated->begin(), aggregated->end(), 0);
complete = unallocatedNodes == 0;
}
// Cleaning up
aggregated->clear();
partOut->clear();
delete aggregated;
delete partOut;
return partIn;
}
bool IsValidAggregation(AggMIS::Types::Graph_d &graph,
AggMIS::Types::IntVector_d &aggregation,
bool verbose) {
// Counter for number of errors found
int errors = 0;
// Check to make sure that the aggregate id's are sequential
AggMIS::Types::IntVector_d scratch(aggregation);
thrust::sort(scratch.begin(), scratch.end());
int newLength = thrust::unique(scratch.begin(), scratch.end()) - scratch.begin();
scratch.resize(newLength);
if (scratch[0] != 0 || scratch[scratch.size() - 1] != scratch.size() - 1)
{
if (verbose) {
printf("Error found in aggregation: improper aggregate indices:\n");
int firstId = scratch[0];
int lastId = scratch[scratch.size() - 1];
int count = scratch.size();
printf("\tFirst index is %d, last index is %d, there are %d unique id's\n", firstId, lastId, count);
}
errors++;
return false;
}
// Check to make sure each aggregate is a connected component
AggMIS::Types::IntVector_d *valuesIn = GraphHelpers::GetIndicesVector(aggregation.size());
AggMIS::Types::IntVector_d valuesOut(aggregation.size());
AggMIS::Types::IntVector_d incomplete(1, 1);
// Figuring out block sizes for kernel call:
int size = graph.Size();
int blockSize = 256;
int nBlocks = size / blockSize + (size%blockSize == 0 ? 0 : 1);
// Getting raw pointers
int *valuesIn_d = thrust::raw_pointer_cast(valuesIn->data());
int *valuesOut_d = thrust::raw_pointer_cast(&valuesOut[0]);
int *incomplete_d = thrust::raw_pointer_cast(&incomplete[0]);
int *adjacency_d = thrust::raw_pointer_cast(graph.adjacency->data());
int *adjIndices_d = thrust::raw_pointer_cast(graph.indices->data());
int *aggregation_d = thrust::raw_pointer_cast(&aggregation[0]);
// Flood filling within each aggregate
int *originalOut = valuesIn_d;
while (incomplete[0] == 1)
{
incomplete[0] = 0;
Kernels::checkAggregationFillAggregates << < nBlocks, blockSize >> >
(size, adjIndices_d, adjacency_d, aggregation_d, valuesIn_d, valuesOut_d, incomplete_d);
int *temp = valuesIn_d;
valuesIn_d = valuesOut_d;
valuesOut_d = temp;
}
if (originalOut != valuesOut_d)
valuesOut.assign(valuesIn->begin(), valuesIn->end());
valuesIn->assign(aggregation.begin(), aggregation.end());
//
int correctLength = newLength;
thrust::sort(thrust::make_zip_iterator(thrust::make_tuple(valuesIn->begin(), valuesOut.begin())),
thrust::make_zip_iterator(thrust::make_tuple(valuesIn->end(), valuesOut.end())));
newLength = thrust::unique(thrust::make_zip_iterator(thrust::make_tuple(valuesOut.begin(), valuesIn->begin())),
thrust::make_zip_iterator(thrust::make_tuple(valuesOut.end(), valuesIn->end())))
- thrust::make_zip_iterator(thrust::make_tuple(valuesOut.begin(), valuesIn->begin()));
valuesIn->resize(newLength);
valuesOut.resize(newLength);
if (newLength != correctLength)
{
if (verbose)
printf("Error: there were %d connected components found and %d aggregates\n", newLength, correctLength);
errors++;
AggMIS::Types::IntVector_h aggIds(*valuesIn);
AggMIS::Types::IntVector_h nodeIds(valuesOut);
for (int i = 0; i < valuesOut.size() - 1; i++)
{
int currentAgg = aggIds[i];
int nextAgg = aggIds[i + 1];
if (currentAgg == nextAgg && verbose)
printf("Aggregate %d was filled from %d and %d\n", currentAgg, nodeIds[i], nodeIds[i + 1]);
}
}
// Clean up
scratch.resize(0);
valuesIn->resize(0);
delete valuesIn;
incomplete.resize(0);
return errors == 0;
}
AggMIS::Types::Graph_d* GetAggregateMap(AggMIS::Types::IntVector_d& aggregation) {
AggMIS::Types::Graph_d* output = new AggMIS::Types::Graph_d();
// Setting adjacency of output to be indices
GraphHelpers::SetToIndicesVector(aggregation.size(), *(output->adjacency));
AggMIS::Types::IntVector_d aggLabels(aggregation.begin(), aggregation.end());
// Sorting by key to get node id's grouped by aggregates
thrust::sort_by_key(aggLabels.begin(), aggLabels.end(), output->adjacency->begin());
// Resizing the indices to aggregate count
int maxAggregate = aggLabels[aggLabels.size() - 1];
output->indices->resize(maxAggregate + 2, 0);
// Figuring out block sizes for kernel call:
int size = aggregation.size();
int blockSize = 256;
int nBlocks = size / blockSize + (size%blockSize == 0 ? 0 : 1);
// Calling kernel to find indices for each part:
GraphHelpers::Kernels::findPartIndicesKernel << < nBlocks, blockSize >> >
(size,
AggMIS::Types::StartOf(aggLabels),
output->indStart());
// Cleaning up
aggLabels.clear();
return output;
}
}
} | the_stack |
#include <array>
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/base/types.hpp>
#include <ginkgo/core/matrix/csr.hpp>
#include <ginkgo/core/matrix/dense.hpp>
#include "accessor/reduced_row_major.hpp"
#include "core/base/mixed_precision_types.hpp"
#include "core/components/fill_array_kernels.hpp"
#include "core/components/prefix_sum_kernels.hpp"
#include "core/matrix/dense_kernels.hpp"
#include "core/synthesizer/implementation_selection.hpp"
#include "cuda/base/config.hpp"
#include "cuda/base/cusparse_bindings.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/atomic.cuh"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/format_conversion.cuh"
#include "cuda/components/reduction.cuh"
#include "cuda/components/thread_ids.cuh"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The ELL matrix format namespace.
*
* @ingroup ell
*/
namespace ell {
constexpr int default_block_size = 512;
// TODO: num_threads_per_core and ratio are parameters should be tuned
/**
* num_threads_per_core is the oversubscribing parameter. There are
* `num_threads_per_core` threads assigned to each physical core.
*/
constexpr int num_threads_per_core = 4;
/**
* ratio is the parameter to decide when to use threads to do reduction on each
* row. (#cols/#rows > ratio)
*/
constexpr double ratio = 1e-2;
/**
* max_thread_per_worker is the max number of thread per worker. The
* `compiled_kernels` must be a list <0, 1, 2, ..., max_thread_per_worker>
*/
constexpr int max_thread_per_worker = 32;
/**
* A compile-time list of sub-warp sizes for which the spmv kernels should be
* compiled.
* 0 is a special case where it uses a sub-warp size of warp_size in
* combination with atomic_adds.
*/
using compiled_kernels = syn::value_list<int, 0, 1, 2, 4, 8, 16, 32>;
#include "common/cuda_hip/matrix/ell_kernels.hpp.inc"
namespace {
template <int dim, typename Type1, typename Type2>
GKO_INLINE auto as_cuda_accessor(
const acc::range<acc::reduced_row_major<dim, Type1, Type2>>& acc)
{
return acc::range<
acc::reduced_row_major<dim, cuda_type<Type1>, cuda_type<Type2>>>(
acc.get_accessor().get_size(),
as_cuda_type(acc.get_accessor().get_stored_data()),
acc.get_accessor().get_stride());
}
template <int info, typename InputValueType, typename MatrixValueType,
typename OutputValueType, typename IndexType>
void abstract_spmv(syn::value_list<int, info>, int num_worker_per_row,
const matrix::Ell<MatrixValueType, IndexType>* a,
const matrix::Dense<InputValueType>* b,
matrix::Dense<OutputValueType>* c,
const matrix::Dense<MatrixValueType>* alpha = nullptr,
const matrix::Dense<OutputValueType>* beta = nullptr)
{
using a_accessor =
gko::acc::reduced_row_major<1, OutputValueType, const MatrixValueType>;
using b_accessor =
gko::acc::reduced_row_major<2, OutputValueType, const InputValueType>;
const auto nrows = a->get_size()[0];
const auto stride = a->get_stride();
const auto num_stored_elements_per_row =
a->get_num_stored_elements_per_row();
constexpr int num_thread_per_worker =
(info == 0) ? max_thread_per_worker : info;
constexpr bool atomic = (info == 0);
const dim3 block_size(default_block_size / num_thread_per_worker,
num_thread_per_worker, 1);
const dim3 grid_size(ceildiv(nrows * num_worker_per_row, block_size.x),
b->get_size()[1], 1);
const auto a_vals = gko::acc::range<a_accessor>(
std::array<size_type, 1>{{num_stored_elements_per_row * stride}},
a->get_const_values());
const auto b_vals = gko::acc::range<b_accessor>(
std::array<size_type, 2>{{b->get_size()[0], b->get_size()[1]}},
b->get_const_values(), std::array<size_type, 1>{{b->get_stride()}});
if (alpha == nullptr && beta == nullptr) {
kernel::spmv<num_thread_per_worker, atomic>
<<<grid_size, block_size, 0, 0>>>(
nrows, num_worker_per_row, as_cuda_accessor(a_vals),
a->get_const_col_idxs(), stride, num_stored_elements_per_row,
as_cuda_accessor(b_vals), as_cuda_type(c->get_values()),
c->get_stride());
} else if (alpha != nullptr && beta != nullptr) {
const auto alpha_val = gko::acc::range<a_accessor>(
std::array<size_type, 1>{1}, alpha->get_const_values());
kernel::spmv<num_thread_per_worker, atomic>
<<<grid_size, block_size, 0, 0>>>(
nrows, num_worker_per_row, as_cuda_accessor(alpha_val),
as_cuda_accessor(a_vals), a->get_const_col_idxs(), stride,
num_stored_elements_per_row, as_cuda_accessor(b_vals),
as_cuda_type(beta->get_const_values()),
as_cuda_type(c->get_values()), c->get_stride());
} else {
GKO_KERNEL_NOT_FOUND;
}
}
GKO_ENABLE_IMPLEMENTATION_SELECTION(select_abstract_spmv, abstract_spmv);
template <typename ValueType, typename IndexType>
std::array<int, 3> compute_thread_worker_and_atomicity(
std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType>* a)
{
int num_thread_per_worker = 1;
int atomic = 0;
int num_worker_per_row = 1;
const auto nrows = a->get_size()[0];
const auto ell_ncols = a->get_num_stored_elements_per_row();
// TODO: num_threads_per_core should be tuned for AMD gpu
const auto nwarps = exec->get_num_warps_per_sm() *
exec->get_num_multiprocessor() * num_threads_per_core;
// Use multithreads to perform the reduction on each row when the matrix is
// wide.
// To make every thread have computation, so pick the value which is the
// power of 2 less than max_thread_per_worker and is less than or equal to
// ell_ncols. If the num_thread_per_worker is max_thread_per_worker and
// allow more than one worker to work on the same row, use atomic add to
// handle the worker write the value into the same position. The #worker is
// decided according to the number of worker allowed on GPU.
if (static_cast<double>(ell_ncols) / nrows > ratio) {
while (num_thread_per_worker < max_thread_per_worker &&
(num_thread_per_worker << 1) <= ell_ncols) {
num_thread_per_worker <<= 1;
}
if (num_thread_per_worker == max_thread_per_worker) {
num_worker_per_row =
std::min(ell_ncols / max_thread_per_worker, nwarps / nrows);
num_worker_per_row = std::max(num_worker_per_row, 1);
}
if (num_worker_per_row > 1) {
atomic = 1;
}
}
return {num_thread_per_worker, atomic, num_worker_per_row};
}
} // namespace
template <typename InputValueType, typename MatrixValueType,
typename OutputValueType, typename IndexType>
void spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<MatrixValueType, IndexType>* a,
const matrix::Dense<InputValueType>* b,
matrix::Dense<OutputValueType>* c)
{
const auto data = compute_thread_worker_and_atomicity(exec, a);
const int num_thread_per_worker = std::get<0>(data);
const int atomic = std::get<1>(data);
const int num_worker_per_row = std::get<2>(data);
/**
* info is the parameter for selecting the cuda kernel.
* for info == 0, it uses the kernel by warp_size threads with atomic
* operation for other value, it uses the kernel without atomic_add
*/
const int info = (!atomic) * num_thread_per_worker;
if (atomic) {
components::fill_array(exec, c->get_values(),
c->get_num_stored_elements(),
zero<OutputValueType>());
}
select_abstract_spmv(
compiled_kernels(),
[&info](int compiled_info) { return info == compiled_info; },
syn::value_list<int>(), syn::type_list<>(), num_worker_per_row, a, b,
c);
}
GKO_INSTANTIATE_FOR_EACH_MIXED_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_SPMV_KERNEL);
template <typename InputValueType, typename MatrixValueType,
typename OutputValueType, typename IndexType>
void advanced_spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<MatrixValueType>* alpha,
const matrix::Ell<MatrixValueType, IndexType>* a,
const matrix::Dense<InputValueType>* b,
const matrix::Dense<OutputValueType>* beta,
matrix::Dense<OutputValueType>* c)
{
const auto data = compute_thread_worker_and_atomicity(exec, a);
const int num_thread_per_worker = std::get<0>(data);
const int atomic = std::get<1>(data);
const int num_worker_per_row = std::get<2>(data);
/**
* info is the parameter for selecting the cuda kernel.
* for info == 0, it uses the kernel by warp_size threads with atomic
* operation for other value, it uses the kernel without atomic_add
*/
const int info = (!atomic) * num_thread_per_worker;
if (atomic) {
dense::scale(exec, beta, c);
}
select_abstract_spmv(
compiled_kernels(),
[&info](int compiled_info) { return info == compiled_info; },
syn::value_list<int>(), syn::type_list<>(), num_worker_per_row, a, b, c,
alpha, beta);
}
GKO_INSTANTIATE_FOR_EACH_MIXED_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_ADVANCED_SPMV_KERNEL);
template <typename ValueType, typename IndexType>
void convert_to_dense(std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType>* source,
matrix::Dense<ValueType>* result)
{
const auto num_rows = result->get_size()[0];
const auto num_cols = result->get_size()[1];
const auto result_stride = result->get_stride();
const auto col_idxs = source->get_const_col_idxs();
const auto vals = source->get_const_values();
const auto source_stride = source->get_stride();
const dim3 block_size(config::warp_size,
config::max_block_size / config::warp_size, 1);
const dim3 init_grid_dim(ceildiv(num_cols, block_size.x),
ceildiv(num_rows, block_size.y), 1);
kernel::initialize_zero_dense<<<init_grid_dim, block_size>>>(
num_rows, num_cols, result_stride, as_cuda_type(result->get_values()));
const auto grid_dim = ceildiv(num_rows, default_block_size);
kernel::fill_in_dense<<<grid_dim, default_block_size>>>(
num_rows, source->get_num_stored_elements_per_row(), source_stride,
as_cuda_type(col_idxs), as_cuda_type(vals), result_stride,
as_cuda_type(result->get_values()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_CONVERT_TO_DENSE_KERNEL);
template <typename ValueType, typename IndexType>
void convert_to_csr(std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType>* source,
matrix::Csr<ValueType, IndexType>* result)
{
auto num_rows = result->get_size()[0];
auto row_ptrs = result->get_row_ptrs();
auto col_idxs = result->get_col_idxs();
auto values = result->get_values();
const auto stride = source->get_stride();
const auto max_nnz_per_row = source->get_num_stored_elements_per_row();
constexpr auto rows_per_block =
ceildiv(default_block_size, config::warp_size);
const auto grid_dim_nnz = ceildiv(source->get_size()[0], rows_per_block);
kernel::count_nnz_per_row<<<grid_dim_nnz, default_block_size>>>(
num_rows, max_nnz_per_row, stride,
as_cuda_type(source->get_const_values()), as_cuda_type(row_ptrs));
components::prefix_sum(exec, row_ptrs, num_rows + 1);
size_type grid_dim = ceildiv(num_rows, default_block_size);
kernel::fill_in_csr<<<grid_dim, default_block_size>>>(
num_rows, max_nnz_per_row, stride,
as_cuda_type(source->get_const_values()),
as_cuda_type(source->get_const_col_idxs()), as_cuda_type(row_ptrs),
as_cuda_type(col_idxs), as_cuda_type(values));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_CONVERT_TO_CSR_KERNEL);
template <typename ValueType, typename IndexType>
void count_nonzeros(std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType>* source,
size_type* result)
{
const auto num_rows = source->get_size()[0];
auto nnz_per_row = Array<size_type>(exec, num_rows);
calculate_nonzeros_per_row(exec, source, &nnz_per_row);
*result = reduce_add_array(exec, num_rows, nnz_per_row.get_const_data());
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_COUNT_NONZEROS_KERNEL);
template <typename ValueType, typename IndexType>
void calculate_nonzeros_per_row(std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType>* source,
Array<size_type>* result)
{
const auto num_rows = source->get_size()[0];
const auto max_nnz_per_row = source->get_num_stored_elements_per_row();
const auto stride = source->get_stride();
const auto values = source->get_const_values();
const auto warp_size = config::warp_size;
const auto grid_dim = ceildiv(num_rows * warp_size, default_block_size);
kernel::count_nnz_per_row<<<grid_dim, default_block_size>>>(
num_rows, max_nnz_per_row, stride, as_cuda_type(values),
as_cuda_type(result->get_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_CALCULATE_NONZEROS_PER_ROW_KERNEL);
template <typename ValueType, typename IndexType>
void extract_diagonal(std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType>* orig,
matrix::Diagonal<ValueType>* diag)
{
const auto max_nnz_per_row = orig->get_num_stored_elements_per_row();
const auto orig_stride = orig->get_stride();
const auto diag_size = diag->get_size()[0];
const auto num_blocks =
ceildiv(diag_size * max_nnz_per_row, default_block_size);
const auto orig_values = orig->get_const_values();
const auto orig_col_idxs = orig->get_const_col_idxs();
auto diag_values = diag->get_values();
kernel::extract_diagonal<<<num_blocks, default_block_size>>>(
diag_size, max_nnz_per_row, orig_stride, as_cuda_type(orig_values),
as_cuda_type(orig_col_idxs), as_cuda_type(diag_values));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_EXTRACT_DIAGONAL_KERNEL);
} // namespace ell
} // namespace cuda
} // namespace kernels
} // namespace gko | the_stack |
* \file
* The cub::BlockHistogram class provides [<em>collective</em>](index.html#sec0) methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block.
*/
#pragma once
#include "specializations/block_histogram_sort.cuh"
#include "specializations/block_histogram_atomic.cuh"
#include "../util_ptx.cuh"
#include "../util_arch.cuh"
#include "../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Algorithmic variants
******************************************************************************/
/**
* \brief BlockHistogramAlgorithm enumerates alternative algorithms for the parallel construction of block-wide histograms.
*/
enum BlockHistogramAlgorithm
{
/**
* \par Overview
* Sorting followed by differentiation. Execution is comprised of two phases:
* -# Sort the data using efficient radix sort
* -# Look for "runs" of same-valued keys by detecting discontinuities; the run-lengths are histogram bin counts.
*
* \par Performance Considerations
* Delivers consistent throughput regardless of sample bin distribution.
*/
BLOCK_HISTO_SORT,
/**
* \par Overview
* Use atomic addition to update byte counts directly
*
* \par Performance Considerations
* Performance is strongly tied to the hardware implementation of atomic
* addition, and may be significantly degraded for non uniformly-random
* input distributions where many concurrent updates are likely to be
* made to the same bin counter.
*/
BLOCK_HISTO_ATOMIC,
};
/******************************************************************************
* Block histogram
******************************************************************************/
/**
* \brief The BlockHistogram class provides [<em>collective</em>](index.html#sec0) methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block. 
* \ingroup BlockModule
*
* \tparam T The sample type being histogrammed (must be castable to an integer bin identifier)
* \tparam BLOCK_DIM_X The thread block length in threads along the X dimension
* \tparam ITEMS_PER_THREAD The number of items per thread
* \tparam BINS The number bins within the histogram
* \tparam ALGORITHM <b>[optional]</b> cub::BlockHistogramAlgorithm enumerator specifying the underlying algorithm to use (default: cub::BLOCK_HISTO_SORT)
* \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1)
* \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1)
* \tparam PTX_ARCH <b>[optional]</b> \ptxversion
*
* \par Overview
* - A <a href="http://en.wikipedia.org/wiki/Histogram"><em>histogram</em></a>
* counts the number of observations that fall into each of the disjoint categories (known as <em>bins</em>).
* - BlockHistogram can be optionally specialized to use different algorithms:
* -# <b>cub::BLOCK_HISTO_SORT</b>. Sorting followed by differentiation. [More...](\ref cub::BlockHistogramAlgorithm)
* -# <b>cub::BLOCK_HISTO_ATOMIC</b>. Use atomic addition to update byte counts directly. [More...](\ref cub::BlockHistogramAlgorithm)
*
* \par Performance Considerations
* - \granularity
*
* \par A Simple Example
* \blockcollective{BlockHistogram}
* \par
* The code snippet below illustrates a 256-bin histogram of 512 integer samples that
* are partitioned across 128 threads where each thread owns 4 samples.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_histogram.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each
* typedef cub::BlockHistogram<unsigned char, 128, 4, 256> BlockHistogram;
*
* // Allocate shared memory for BlockHistogram
* __shared__ typename BlockHistogram::TempStorage temp_storage;
*
* // Allocate shared memory for block-wide histogram bin counts
* __shared__ unsigned int smem_histogram[256];
*
* // Obtain input samples per thread
* unsigned char data[4];
* ...
*
* // Compute the block-wide histogram
* BlockHistogram(temp_storage).Histogram(data, smem_histogram);
*
* \endcode
*
* \par Performance and Usage Considerations
* - The histogram output can be constructed in shared or device-accessible memory
* - See cub::BlockHistogramAlgorithm for performance details regarding algorithmic alternatives
*
*/
template <
typename T,
int BLOCK_DIM_X,
int ITEMS_PER_THREAD,
int BINS,
BlockHistogramAlgorithm ALGORITHM = BLOCK_HISTO_SORT,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1,
int PTX_ARCH = CUB_PTX_ARCH>
class BlockHistogram
{
private:
/******************************************************************************
* Constants and type definitions
******************************************************************************/
/// Constants
enum
{
/// The thread block size in threads
BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
};
/**
* Ensure the template parameterization meets the requirements of the
* targeted device architecture. BLOCK_HISTO_ATOMIC can only be used
* on version SM120 or later. Otherwise BLOCK_HISTO_SORT is used
* regardless.
*/
static const BlockHistogramAlgorithm SAFE_ALGORITHM =
((ALGORITHM == BLOCK_HISTO_ATOMIC) && (PTX_ARCH < 120)) ?
BLOCK_HISTO_SORT :
ALGORITHM;
/// Internal specialization.
typedef typename If<(SAFE_ALGORITHM == BLOCK_HISTO_SORT),
BlockHistogramSort<T, BLOCK_DIM_X, ITEMS_PER_THREAD, BINS, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH>,
BlockHistogramAtomic<BINS> >::Type InternalBlockHistogram;
/// Shared memory storage layout type for BlockHistogram
typedef typename InternalBlockHistogram::TempStorage _TempStorage;
/******************************************************************************
* Thread fields
******************************************************************************/
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
/******************************************************************************
* Utility methods
******************************************************************************/
/// Internal storage allocator
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
public:
/// \smemstorage{BlockHistogram}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using a private static allocation of shared memory as temporary storage.
*/
__device__ __forceinline__ BlockHistogram()
:
temp_storage(PrivateStorage()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
/**
* \brief Collective constructor using the specified memory allocation as temporary storage.
*/
__device__ __forceinline__ BlockHistogram(
TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage
:
temp_storage(temp_storage.Alias()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
//@} end member group
/******************************************************************//**
* \name Histogram operations
*********************************************************************/
//@{
/**
* \brief Initialize the shared histogram counters to zero.
*
* \par Snippet
* The code snippet below illustrates a the initialization and update of a
* histogram of 512 integer samples that are partitioned across 128 threads
* where each thread owns 4 samples.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_histogram.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each
* typedef cub::BlockHistogram<unsigned char, 128, 4, 256> BlockHistogram;
*
* // Allocate shared memory for BlockHistogram
* __shared__ typename BlockHistogram::TempStorage temp_storage;
*
* // Allocate shared memory for block-wide histogram bin counts
* __shared__ unsigned int smem_histogram[256];
*
* // Obtain input samples per thread
* unsigned char thread_samples[4];
* ...
*
* // Initialize the block-wide histogram
* BlockHistogram(temp_storage).InitHistogram(smem_histogram);
*
* // Update the block-wide histogram
* BlockHistogram(temp_storage).Composite(thread_samples, smem_histogram);
*
* \endcode
*
* \tparam CounterT <b>[inferred]</b> Histogram counter type
*/
template <typename CounterT >
__device__ __forceinline__ void InitHistogram(CounterT histogram[BINS])
{
// Initialize histogram bin counts to zeros
int histo_offset = 0;
#pragma unroll
for(; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS)
{
histogram[histo_offset + linear_tid] = 0;
}
// Finish up with guarded initialization if necessary
if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS))
{
histogram[histo_offset + linear_tid] = 0;
}
}
/**
* \brief Constructs a block-wide histogram in shared/device-accessible memory. Each thread contributes an array of input elements.
*
* \par
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a 256-bin histogram of 512 integer samples that
* are partitioned across 128 threads where each thread owns 4 samples.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_histogram.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each
* typedef cub::BlockHistogram<unsigned char, 128, 4, 256> BlockHistogram;
*
* // Allocate shared memory for BlockHistogram
* __shared__ typename BlockHistogram::TempStorage temp_storage;
*
* // Allocate shared memory for block-wide histogram bin counts
* __shared__ unsigned int smem_histogram[256];
*
* // Obtain input samples per thread
* unsigned char thread_samples[4];
* ...
*
* // Compute the block-wide histogram
* BlockHistogram(temp_storage).Histogram(thread_samples, smem_histogram);
*
* \endcode
*
* \tparam CounterT <b>[inferred]</b> Histogram counter type
*/
template <
typename CounterT >
__device__ __forceinline__ void Histogram(
T (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's input values to histogram
CounterT histogram[BINS]) ///< [out] Reference to shared/device-accessible memory histogram
{
// Initialize histogram bin counts to zeros
InitHistogram(histogram);
CTA_SYNC();
// Composite the histogram
InternalBlockHistogram(temp_storage).Composite(items, histogram);
}
/**
* \brief Updates an existing block-wide histogram in shared/device-accessible memory. Each thread composites an array of input elements.
*
* \par
* - \granularity
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates a the initialization and update of a
* histogram of 512 integer samples that are partitioned across 128 threads
* where each thread owns 4 samples.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_histogram.cuh>
*
* __global__ void ExampleKernel(...)
* {
* // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each
* typedef cub::BlockHistogram<unsigned char, 128, 4, 256> BlockHistogram;
*
* // Allocate shared memory for BlockHistogram
* __shared__ typename BlockHistogram::TempStorage temp_storage;
*
* // Allocate shared memory for block-wide histogram bin counts
* __shared__ unsigned int smem_histogram[256];
*
* // Obtain input samples per thread
* unsigned char thread_samples[4];
* ...
*
* // Initialize the block-wide histogram
* BlockHistogram(temp_storage).InitHistogram(smem_histogram);
*
* // Update the block-wide histogram
* BlockHistogram(temp_storage).Composite(thread_samples, smem_histogram);
*
* \endcode
*
* \tparam CounterT <b>[inferred]</b> Histogram counter type
*/
template <
typename CounterT >
__device__ __forceinline__ void Composite(
T (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's input values to histogram
CounterT histogram[BINS]) ///< [out] Reference to shared/device-accessible memory histogram
{
InternalBlockHistogram(temp_storage).Composite(items, histogram);
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s) | the_stack |
#include "CUFLU.h"
#if ( MODEL == HYDRO && defined MHD )
// external functions
#ifdef __CUDACC__
#include "CUFLU_Shared_FluUtility.cu"
#endif
// internal functions
GPU_DEVICE
static real dE_Upwind( const real FC_Ele_L, const real FC_Ele_R, const real FC_Mom, const real D_L, const real D_R,
const real V_L1, const real V_L2, const real V_R1, const real V_R2,
const real B_L1, const real B_L2, const real B_R1, const real B_R2,
const real dt_dh );
#ifdef UNSPLIT_GRAVITY
GPU_DEVICE
void UpdateVelocityByGravity( real &v1, real &v2, const int TDir1, const int TDir2,
const int i_usg, const int j_usg, const int k_usg,
const real dt_half, const double dh_f8, const real GraConst,
const real g_Pot_USG[], const double Corner_USG[], const double Time,
const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func,
const double ExtAcc_AuxArray[] );
#endif
//-------------------------------------------------------------------------------------------------------
// Function : MHD_ComputeElectric
// Description : Compute the edge-centered line-averaged electric field E=B x V (electromotive force; EMF)
// for the constrained-transport algorithm
//
// Note : 1. Ref : (a) Gardiner & Stone, J. Comput. Phys., 227, 4123 (2008)
// (b) Stone et al., ApJS, 178, 137 (2008)
// 2. This function is shared by MHM_RP and CTU schemes
// 3. g_EC_Ele [] has the size of N_EC_ELE^3 but is accessed with a stride "NEle"
// --> But there are only NEle-1 useful elements along x/y/z for Ex/Ey/Ez, respectively
// g_FC_Flux[] has the size of N_FC_FLUX^3 but is accessed with a stride "NFlux"
// g_PriVar [] has the size of FLU_NXT^3 but is accessed with a stride "NPri"
// 4. EMF-x/y/z( i, j, k ) are defined at the lower-left edge center of
// g_PriVar( i+OffsetPri+1, j+OffsetPri+1, k+OffsetPri+1 )
// 5. Store the electric field at the patch boundaries for correcting the coarse-grid B field
// --> Option "DumpIntEle"
// --> Structure of g_IntEle[] = [face index][E field index][cell index]
// Face index: [0/1/2] = left/middle/right x faces
// [3/4/5] = left/middle/right y faces
// [6/7/8] = left/middle/right z faces
// E field index on x faces: [0/1] = Ey/Ez
// y faces: [0/1] = Ez/Ex
// z faces: [0/1] = Ex/Ey
// Cell index on x faces: [Nz][Ny] (= [PS2+1][PS2] for Ey and [PS2][PS2+1] for Ez)
// on y faces: [Nx][Nz] (= [PS2+1][PS2] for Ez and [PS2][PS2+1] for Ex)
// on z faces: [Ny][Nx] (= [PS2+1][PS2] for Ex and [PS2][PS2+1] for Ey)
// 6. For the unsplitting scheme in gravity (i.e., UNSPLIT_GRAVITY), this function also corrects the half-step
// velocity by gravity when CorrHalfVel==true
//
// Parameter : g_EC_Ele : Array to store the output electric field
// g_FC_Flux : Array storing the input face-centered fluxes
// g_PriVar : Array storing the input cell-centered primitive variables
// NEle : Stride for accessing g_EC_Ele[]
// NFlux : Stride for accessing g_FC_Flux[]
// NPri : Stride for accessing g_PriVar[]
// OffsetPri : Offset for accessing g_PriVar[]
// dt : Time interval to advance solution
// dh : Cell size
// DumpIntEle : Store the inter-patch electric field (i.e., E field at the patch boundaries)
// in g_IntEle[]
// g_IntEle : Array for DumpIntEle
// CorrHalfVel : true --> correct the half-step velocity by gravity (for UNSPLIT_GRAVITY only)
// g_Pot_USG : Array storing the input potential for CorrHalfVel (for UNSPLIT_GRAVITY only)
// g_Corner : Array storing the corner coordinates of each patch group (for UNSPLIT_GRAVITY only)
// Time : Current physical time (for UNSPLIT_GRAVITY only)
// UsePot : Add self-gravity and/or external potential (for UNSPLIT_GRAVITY only)
// ExtAcc : Add external acceleration (for UNSPLIT_GRAVITY only)
// ExtAcc_Func : Function pointer to the external acceleration routine (for UNSPLIT_GRAVITY only)
// ExtAcc_AuxArray : Auxiliary array for external acceleration (for UNSPLIT_GRAVITY only)
//
// Return : g_EC_Ele[], g_IntEle[]
//------------------------------------------------------------------------------------------------------
GPU_DEVICE
void MHD_ComputeElectric( real g_EC_Ele[][ CUBE(N_EC_ELE) ],
const real g_FC_Flux[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
const real g_PriVar[][ CUBE(FLU_NXT) ],
const int NEle, const int NFlux, const int NPri, const int OffsetPri,
const real dt, const real dh,
const bool DumpIntEle, real g_IntEle[][NCOMP_ELE][ PS2P1*PS2 ],
const bool CorrHalfVel, const real g_Pot_USG[], const double g_Corner[], const double Time,
const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func,
const double ExtAcc_AuxArray[] )
{
// check
# ifdef GAMER_DEBUG
# ifdef UNSPLIT_GRAVITY
if ( CorrHalfVel )
{
if ( UsePot && g_Pot_USG == NULL )
printf( "ERROR : g_Pot_USG == NULL !!\n" );
if ( ExtAcc && g_Corner == NULL )
printf( "ERROR : g_Corner == NULL !!\n" );
}
# else
if ( CorrHalfVel )
printf( "ERROR : CorrHalfVel is NOT supported when UNSPLIT_GRAVITY is off !!\n" );
# endif
# endif // #ifdef GAMER_DEBUG
const int NEleM1 = NEle - 1;
const int didx_flux[3] = { 1, NFlux, SQR(NFlux) };
const int didx_pri [3] = { 1, NPri, SQR(NPri) };
const real dt_dh = dt / dh;
# ifdef UNSPLIT_GRAVITY
const double dh_f8 = (double)dh;
const real GraConst = -(real)0.25*dt_dh;
const real dt_half = (real)0.5*dt;
const int pri_ghost = ( NPri - PS2 )/2; // number of ghost zones on each side for g_PriVar[]
const int idx_pri2usg = USG_GHOST_SIZE_F - pri_ghost; // index difference between g_PriVar[] and g_Pot_USG[]
int ijk_usg[3];
double Corner_USG[3]; // central coordinates of the 0th cell in g_Pot_USG[]
if ( CorrHalfVel && ExtAcc )
for (int d=0; d<3; d++) Corner_USG[d] = g_Corner[d] - dh_f8*USG_GHOST_SIZE_F;
// check
# ifdef GAMER_DEBUG
if ( CorrHalfVel && idx_pri2usg + OffsetPri < 1 )
printf( "ERROR : idx_pri2usg (%d) + OffsetPri (%d) < 1 (USG_GHOST_SIZE_F %d, NPri %d) !!\n",
idx_pri2usg, OffsetPri, USG_GHOST_SIZE_F, NPri );
# endif
# endif // #ifdef UNSPLIT_GRAVITY
for (int d=0; d<3; d++)
{
const int TDir1 = (d+1)%3; // transverse direction 1
const int TDir2 = (d+2)%3; // transverse direction 2
const int TV1 = TDir1 + 1; // velocity component along the transverse direction 1
const int TV2 = TDir2 + 1; // velocity component along the transverse direction 2
const int TB1 = TDir1 + MAG_OFFSET; // B flux component along the transverse direction 1
const int TB2 = TDir2 + MAG_OFFSET; // B flux component along the transverse direction 2
int idx_ele_e[2], idx_flux_s[3];
switch ( d )
{
case 0 : idx_ele_e [0] = NEleM1; idx_ele_e [1] = NEle;
idx_flux_s[0] = 1; idx_flux_s[1] = 0; idx_flux_s[2] = 0;
break;
case 1 : idx_ele_e [0] = NEle; idx_ele_e [1] = NEleM1;
idx_flux_s[0] = 0; idx_flux_s[1] = 1; idx_flux_s[2] = 0;
break;
case 2 : idx_ele_e [0] = NEle; idx_ele_e [1] = NEle;
idx_flux_s[0] = 0; idx_flux_s[1] = 0; idx_flux_s[2] = 1;
break;
}
const int size_ij = idx_ele_e[0]*idx_ele_e[1];
CGPU_LOOP( idx0, NEleM1*SQR(NEle) )
{
const int i_ele = idx0 % idx_ele_e[0];
const int j_ele = idx0 % size_ij / idx_ele_e[0];
const int k_ele = idx0 / size_ij;
const int idx_ele = IDX321( i_ele, j_ele, k_ele, NEle, NEle );
const int i_flux = i_ele + idx_flux_s[0];
const int j_flux = j_ele + idx_flux_s[1];
const int k_flux = k_ele + idx_flux_s[2];
const int idx_flux = IDX321( i_flux, j_flux, k_flux, NFlux, NFlux );
const int i_pri = i_flux + OffsetPri;
const int j_pri = j_flux + OffsetPri;
const int k_pri = k_flux + OffsetPri;
const int idx_pri = IDX321( i_pri, j_pri, k_pri, NPri, NPri );
real D_L, D_R, V_L1, V_L2, V_R1, V_R2, B_L1, B_L2, B_R1, B_R2;
int idx_L, idx_R;
real Ele_Out;
// compute the edge-centered electric field
Ele_Out = ( - g_FC_Flux[TDir1][TB2][ idx_flux + didx_flux[TDir2] ]
- g_FC_Flux[TDir1][TB2][ idx_flux ]
+ g_FC_Flux[TDir2][TB1][ idx_flux + didx_flux[TDir1] ]
+ g_FC_Flux[TDir2][TB1][ idx_flux ] );
idx_L = idx_pri;
idx_R = idx_L + didx_pri[TDir2];
D_L = g_PriVar[ 0][ idx_L ];
V_L1 = g_PriVar[TV1][ idx_L ];
V_L2 = g_PriVar[TV2][ idx_L ];
B_L1 = g_PriVar[TB1][ idx_L ];
B_L2 = g_PriVar[TB2][ idx_L ];
D_R = g_PriVar[ 0][ idx_R ];
V_R1 = g_PriVar[TV1][ idx_R ];
V_R2 = g_PriVar[TV2][ idx_R ];
B_R1 = g_PriVar[TB1][ idx_R ];
B_R2 = g_PriVar[TB2][ idx_R ];
// correct the half-step velocity by gravity for the unsplitting scheme
# ifdef UNSPLIT_GRAVITY
if ( CorrHalfVel )
{
ijk_usg[0] = i_pri + idx_pri2usg;
ijk_usg[1] = j_pri + idx_pri2usg;
ijk_usg[2] = k_pri + idx_pri2usg;
UpdateVelocityByGravity( V_L1, V_L2, TDir1, TDir2, ijk_usg[0], ijk_usg[1], ijk_usg[2], dt_half, dh_f8,
GraConst, g_Pot_USG, Corner_USG, Time, UsePot, ExtAcc, ExtAcc_Func, ExtAcc_AuxArray );
ijk_usg[TDir2] ++;
UpdateVelocityByGravity( V_R1, V_R2, TDir1, TDir2, ijk_usg[0], ijk_usg[1], ijk_usg[2], dt_half, dh_f8,
GraConst, g_Pot_USG, Corner_USG, Time, UsePot, ExtAcc, ExtAcc_Func, ExtAcc_AuxArray );
} // if ( CorrHalfVel )
# endif // #ifdef UNSPLIT_GRAVITY
Ele_Out += dE_Upwind( -g_FC_Flux[TDir1][TB2][ idx_flux ],
-g_FC_Flux[TDir1][TB2][ idx_flux + didx_flux[TDir2] ],
g_FC_Flux[TDir2][ 0][ idx_flux ],
D_L, D_R, V_L1, V_L2, V_R1, V_R2, B_L1, B_L2, B_R1, B_R2, dt_dh );
idx_L = idx_pri + didx_pri[TDir1];
idx_R = idx_L + didx_pri[TDir2];
D_L = g_PriVar[ 0][ idx_L ];
V_L1 = g_PriVar[TV1][ idx_L ];
V_L2 = g_PriVar[TV2][ idx_L ];
B_L1 = g_PriVar[TB1][ idx_L ];
B_L2 = g_PriVar[TB2][ idx_L ];
D_R = g_PriVar[ 0][ idx_R ];
V_R1 = g_PriVar[TV1][ idx_R ];
V_R2 = g_PriVar[TV2][ idx_R ];
B_R1 = g_PriVar[TB1][ idx_R ];
B_R2 = g_PriVar[TB2][ idx_R ];
// correct the half-step velocity by gravity for the unsplitting scheme
# ifdef UNSPLIT_GRAVITY
if ( CorrHalfVel )
{
ijk_usg[0] = i_pri + idx_pri2usg;
ijk_usg[1] = j_pri + idx_pri2usg;
ijk_usg[2] = k_pri + idx_pri2usg;
ijk_usg[TDir1] ++;
UpdateVelocityByGravity( V_L1, V_L2, TDir1, TDir2, ijk_usg[0], ijk_usg[1], ijk_usg[2], dt_half, dh_f8,
GraConst, g_Pot_USG, Corner_USG, Time, UsePot, ExtAcc, ExtAcc_Func, ExtAcc_AuxArray );
ijk_usg[TDir2] ++;
UpdateVelocityByGravity( V_R1, V_R2, TDir1, TDir2, ijk_usg[0], ijk_usg[1], ijk_usg[2], dt_half, dh_f8,
GraConst, g_Pot_USG, Corner_USG, Time, UsePot, ExtAcc, ExtAcc_Func, ExtAcc_AuxArray );
} // if ( CorrHalfVel )
# endif // #ifdef UNSPLIT_GRAVITY
Ele_Out += dE_Upwind( -g_FC_Flux[TDir1][TB2][ idx_flux ],
-g_FC_Flux[TDir1][TB2][ idx_flux + didx_flux[TDir2] ],
g_FC_Flux[TDir2][ 0][ idx_flux + didx_flux[TDir1] ],
D_L, D_R, V_L1, V_L2, V_R1, V_R2, B_L1, B_L2, B_R1, B_R2, dt_dh );
idx_L = idx_pri;
idx_R = idx_L + didx_pri[TDir1];
D_L = g_PriVar[ 0][ idx_L ];
V_L1 = g_PriVar[TV1][ idx_L ];
V_L2 = g_PriVar[TV2][ idx_L ];
B_L1 = g_PriVar[TB1][ idx_L ];
B_L2 = g_PriVar[TB2][ idx_L ];
D_R = g_PriVar[ 0][ idx_R ];
V_R1 = g_PriVar[TV1][ idx_R ];
V_R2 = g_PriVar[TV2][ idx_R ];
B_R1 = g_PriVar[TB1][ idx_R ];
B_R2 = g_PriVar[TB2][ idx_R ];
// correct the half-step velocity by gravity for the unsplitting scheme
# ifdef UNSPLIT_GRAVITY
if ( CorrHalfVel )
{
ijk_usg[0] = i_pri + idx_pri2usg;
ijk_usg[1] = j_pri + idx_pri2usg;
ijk_usg[2] = k_pri + idx_pri2usg;
UpdateVelocityByGravity( V_L1, V_L2, TDir1, TDir2, ijk_usg[0], ijk_usg[1], ijk_usg[2], dt_half, dh_f8,
GraConst, g_Pot_USG, Corner_USG, Time, UsePot, ExtAcc, ExtAcc_Func, ExtAcc_AuxArray );
ijk_usg[TDir1] ++;
UpdateVelocityByGravity( V_R1, V_R2, TDir1, TDir2, ijk_usg[0], ijk_usg[1], ijk_usg[2], dt_half, dh_f8,
GraConst, g_Pot_USG, Corner_USG, Time, UsePot, ExtAcc, ExtAcc_Func, ExtAcc_AuxArray );
} // if ( CorrHalfVel )
# endif // #ifdef UNSPLIT_GRAVITY
Ele_Out += dE_Upwind( +g_FC_Flux[TDir2][TB1][ idx_flux ],
+g_FC_Flux[TDir2][TB1][ idx_flux + didx_flux[TDir1] ],
g_FC_Flux[TDir1][ 0][ idx_flux ],
D_L, D_R, V_L1, V_L2, V_R1, V_R2, B_L1, B_L2, B_R1, B_R2, dt_dh );
idx_L = idx_pri + didx_pri[TDir2];
idx_R = idx_L + didx_pri[TDir1];
D_L = g_PriVar[ 0][ idx_L ];
V_L1 = g_PriVar[TV1][ idx_L ];
V_L2 = g_PriVar[TV2][ idx_L ];
B_L1 = g_PriVar[TB1][ idx_L ];
B_L2 = g_PriVar[TB2][ idx_L ];
D_R = g_PriVar[ 0][ idx_R ];
V_R1 = g_PriVar[TV1][ idx_R ];
V_R2 = g_PriVar[TV2][ idx_R ];
B_R1 = g_PriVar[TB1][ idx_R ];
B_R2 = g_PriVar[TB2][ idx_R ];
// correct the half-step velocity by gravity for the unsplitting scheme
# ifdef UNSPLIT_GRAVITY
if ( CorrHalfVel )
{
ijk_usg[0] = i_pri + idx_pri2usg;
ijk_usg[1] = j_pri + idx_pri2usg;
ijk_usg[2] = k_pri + idx_pri2usg;
ijk_usg[TDir2] ++;
UpdateVelocityByGravity( V_L1, V_L2, TDir1, TDir2, ijk_usg[0], ijk_usg[1], ijk_usg[2], dt_half, dh_f8,
GraConst, g_Pot_USG, Corner_USG, Time, UsePot, ExtAcc, ExtAcc_Func, ExtAcc_AuxArray );
ijk_usg[TDir1] ++;
UpdateVelocityByGravity( V_R1, V_R2, TDir1, TDir2, ijk_usg[0], ijk_usg[1], ijk_usg[2], dt_half, dh_f8,
GraConst, g_Pot_USG, Corner_USG, Time, UsePot, ExtAcc, ExtAcc_Func, ExtAcc_AuxArray );
} // if ( CorrHalfVel )
# endif // #ifdef UNSPLIT_GRAVITY
Ele_Out += dE_Upwind( +g_FC_Flux[TDir2][TB1][ idx_flux ],
+g_FC_Flux[TDir2][TB1][ idx_flux + didx_flux[TDir1] ],
g_FC_Flux[TDir1][ 0][ idx_flux + didx_flux[TDir2] ],
D_L, D_R, V_L1, V_L2, V_R1, V_R2, B_L1, B_L2, B_R1, B_R2, dt_dh );
Ele_Out *= (real)0.25;
// store the electric field of all cells in g_EC_Ele[]
g_EC_Ele[d][idx_ele] = Ele_Out;
// store the inter-patch electric field in g_IntEle[]
if ( DumpIntEle )
{
// sanity check: this function assumes N_FL_ELE == PS2+1
# if ( N_FL_ELE != PS2+1 )
# error : ERROR : N_FL_ELE != PS2+1 !!
# endif
if ( d == 0 ) {
if ( j_ele == 0 || j_ele == PS1 || j_ele == PS2 ) g_IntEle[ 3 + j_ele/PS1 ][1][ i_ele*PS2P1 + k_ele ] = Ele_Out;
if ( k_ele == 0 || k_ele == PS1 || k_ele == PS2 ) g_IntEle[ 6 + k_ele/PS1 ][0][ j_ele*PS2 + i_ele ] = Ele_Out;
} // d == 0
else if ( d == 1 ) {
if ( k_ele == 0 || k_ele == PS1 || k_ele == PS2 ) g_IntEle[ 6 + k_ele/PS1 ][1][ j_ele*PS2P1 + i_ele ] = Ele_Out;
if ( i_ele == 0 || i_ele == PS1 || i_ele == PS2 ) g_IntEle[ 0 + i_ele/PS1 ][0][ k_ele*PS2 + j_ele ] = Ele_Out;
} // d == 1
else {
if ( i_ele == 0 || i_ele == PS1 || i_ele == PS2 ) g_IntEle[ 0 + i_ele/PS1 ][1][ k_ele*PS2P1 + j_ele ] = Ele_Out;
if ( j_ele == 0 || j_ele == PS1 || j_ele == PS2 ) g_IntEle[ 3 + j_ele/PS1 ][0][ i_ele*PS2 + k_ele ] = Ele_Out;
} // d == 2
} // if ( DumpIntEle )
} // CGPU_LOOP( idx_ele, idx_ele_e[0]*idx_ele_e[1]*idx_ele_e[2] )
} // for ( int d=0; d<3; d++)
# ifdef __CUDACC__
__syncthreads();
# endif
} // FUNCTION : MHD_ComputeElectric
//-------------------------------------------------------------------------------------------------------
// Function : dE_Upwind
// Description : Calculate the first partial derivative of electric field with the upwind scheme
//
// Note : 1. Ref : Gardiner & Stone, J. Comput. Phys., 227, 4123 (2008)
// 2. Invoked by MHD_ComputeElectric()
//
// Parameter : FC_Ele_L/R : Left/right face-centered electric field
// FC_Mom : Face-centered momentum for determining the upwind direction
// D_L/R : Left/right cell-centered density
// V_L/R_1/2 : Left/right cell-centered velocity along the transverse direction 1/2
// B_L/R_1/2 : Left/right cell-centered B field along the transverse direction 1/2
// dt_dh : dt/dh --> for normalizing velocity only
//
// Return : dE
//------------------------------------------------------------------------------------------------------
GPU_DEVICE
real dE_Upwind( const real FC_Ele_L, const real FC_Ele_R, const real FC_Mom, const real D_L, const real D_R,
const real V_L1, const real V_L2, const real V_R1, const real V_R2,
const real B_L1, const real B_L2, const real B_R1, const real B_R2,
const real dt_dh )
{
// convert dimensional momentum to dimensionless velocity to reduce the effect of round-off errors
const real FC_Vel = (real)2.0*dt_dh*FC_Mom/( D_L + D_R );
real dE, CC_Ele_L, CC_Ele_R; // CC_Ele_L/R: left/right cell-centered electric field
// MAX_ERROR is defined in Macro.h
if ( FABS(FC_Vel) <= MAX_ERROR )
{
CC_Ele_R = B_R1*V_R2 - B_R2*V_R1;
CC_Ele_L = B_L1*V_L2 - B_L2*V_L1;
dE = (real)0.5*( FC_Ele_R - CC_Ele_R + FC_Ele_L - CC_Ele_L );
}
else if ( FC_Vel > (real)0.0 )
{
CC_Ele_L = B_L1*V_L2 - B_L2*V_L1;
dE = FC_Ele_L - CC_Ele_L;
}
else
{
CC_Ele_R = B_R1*V_R2 - B_R2*V_R1;
dE = FC_Ele_R - CC_Ele_R;
}
return dE;
} // FUNCTION : dE_Upwind
#ifdef UNSPLIT_GRAVITY
//-------------------------------------------------------------------------------------------------------
// Function : UpdateVelocityByGravity
// Description : Update the half-step velocity by gravity for the unsplitting scheme
//
// Note : 1. Invoked by MHD_ComputeElectric()
// 2. Only used when enabling UNSPLIT_GRAVITY
//
// Parameter : v1/2 : Velocity along the 1st/2nd transverse directions to be updated
// --> call-by-reference
// TDir1/2 : 1st/2nd transverse directions
// i/j/k_usg : Array indices of g_Pot_USG[]
// dt_half : 0.5*dt
// dh_f8 : Cell size in double precision
// GraConst : -0.25*dt/dh
// g_Pot_USG : Input potential array
// Corner_USG : Cell-centered coordinates of the 0th cell in g_Pot_USG[]
// Time : Current physical time
// UsePot : Add self-gravity and/or external potential
// ExtAcc : Add external acceleration
// ExtAcc_Func : Function pointer to the external acceleration routine
// ExtAcc_AuxArray : Auxiliary array for external acceleration
//
// Return : v1, v2
//------------------------------------------------------------------------------------------------------
GPU_DEVICE
void UpdateVelocityByGravity( real &v1, real &v2, const int TDir1, const int TDir2,
const int i_usg, const int j_usg, const int k_usg,
const real dt_half, const double dh_f8, const real GraConst,
const real g_Pot_USG[], const double Corner_USG[], const double Time,
const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func,
const double ExtAcc_AuxArray[] )
{
const int didx_usg[3] = { 1, USG_NXT_F, SQR(USG_NXT_F) };
real Acc[3] = { (real)0.0, (real)0.0, (real)0.0 };
// external acceleration
if ( ExtAcc )
{
double xyz[3]; // cell-centered coordinates
xyz[0] = Corner_USG[0] + i_usg*dh_f8; // always use double precision to calculate coordinates
xyz[1] = Corner_USG[1] + j_usg*dh_f8;
xyz[2] = Corner_USG[2] + k_usg*dh_f8;
ExtAcc_Func( Acc, xyz[0], xyz[1], xyz[2], Time, ExtAcc_AuxArray );
Acc[TDir1] *= dt_half;
Acc[TDir2] *= dt_half;
}
// self-gravity and external potential
if ( UsePot )
{
const int idx_usg = IDX321( i_usg, j_usg, k_usg, USG_NXT_F, USG_NXT_F );
Acc[TDir1] += GraConst*( g_Pot_USG[ idx_usg + didx_usg[TDir1] ] - g_Pot_USG[ idx_usg - didx_usg[TDir1] ] );
Acc[TDir2] += GraConst*( g_Pot_USG[ idx_usg + didx_usg[TDir2] ] - g_Pot_USG[ idx_usg - didx_usg[TDir2] ] );
}
// advance velocity by gravity
v1 += Acc[TDir1];
v2 += Acc[TDir2];
} // FUNCTION : UpdateVelocityByGravity
#endif // #ifdef UNSPLIT_GRAVITY
//-------------------------------------------------------------------------------------------------------
// Function : MHD_UpdataMagnetic
// Description : Update magnetic field with the constrained transport algorithm
//
// Note : 1. This function is shared by MHM_RP and CTU schemes
// 2. g_FC_Bx/y/z_Out[] are accessed with a stride "NOut"
// g_FC_B_In[] has the size of FLU_NXT_P1^3 and is also accessed with the same stride
// g_EC_Ele[] has the size of N_EC_ELE^3 but is accessed with a stride "NEle"
//
// Parameter : g_FC_B_Out : Array to store the output face-centered B field
// --> Separate into three arrays since the array dimension is different
// during the half- and full-step updates
// g_FC_B_In : Array storing the input face-centered B field
// g_EC_Ele : Array storing the input edge-centered electric field
// dt : Time interval to advance solution
// dh : Cell size
// NOut : Stride for accessing g_FC_Bx/y/z_Out[]
// NEle : Stride for accessing g_EC_Ele[]
// Offset_B_In : Offset for accessing g_FC_B_In[]
//
// Return : g_FC_B_Out[]
//------------------------------------------------------------------------------------------------------
GPU_DEVICE
void MHD_UpdateMagnetic( real *g_FC_Bx_Out, real *g_FC_By_Out, real *g_FC_Bz_Out,
const real g_FC_B_In[][ FLU_NXT_P1*SQR(FLU_NXT) ],
const real g_EC_Ele[][ CUBE(N_EC_ELE) ],
const real dt, const real dh, const int NOut, const int NEle, const int Offset_B_In )
{
const int NOutP1 = NOut + 1;
const int didx_ele[3] = { 1, NEle, SQR(NEle) };
const real dt_dh = dt / dh;
real *g_FC_B_Out[3] = { g_FC_Bx_Out, g_FC_By_Out, g_FC_Bz_Out };
real dE1, dE2;
for (int d=0; d<3; d++)
{
const int TDir1 = (d+1)%3; // transverse direction 1
const int TDir2 = (d+2)%3; // transverse direction 2
int idx_out_e_i, idx_out_e_j, idx_out_e_k, size_in_i, size_in_j;
switch ( d )
{
case 0 : idx_out_e_i = NOutP1; idx_out_e_j = NOut; idx_out_e_k = NOut;
size_in_i = FLU_NXT_P1; size_in_j = FLU_NXT;
break;
case 1 : idx_out_e_i = NOut; idx_out_e_j = NOutP1; idx_out_e_k = NOut;
size_in_i = FLU_NXT; size_in_j = FLU_NXT_P1;
break;
case 2 : idx_out_e_i = NOut; idx_out_e_j = NOut; idx_out_e_k = NOutP1;
size_in_i = FLU_NXT; size_in_j = FLU_NXT;
break;
}
const int size_ij = idx_out_e_i*idx_out_e_j;
CGPU_LOOP( idx_out, idx_out_e_i*idx_out_e_j*idx_out_e_k )
{
const int i_out = idx_out % idx_out_e_i;
const int j_out = idx_out % size_ij / idx_out_e_i;
const int k_out = idx_out / size_ij;
const int idx_ele = IDX321( i_out, j_out, k_out, NEle, NEle );
const int i_in = i_out + Offset_B_In;
const int j_in = j_out + Offset_B_In;
const int k_in = k_out + Offset_B_In;
const int idx_in = IDX321( i_in, j_in, k_in, size_in_i, size_in_j );
dE1 = g_EC_Ele[TDir1][ idx_ele + didx_ele[TDir2] ] - g_EC_Ele[TDir1][idx_ele];
dE2 = g_EC_Ele[TDir2][ idx_ele + didx_ele[TDir1] ] - g_EC_Ele[TDir2][idx_ele];
g_FC_B_Out[d][idx_out] = g_FC_B_In[d][idx_in] + dt_dh*( dE1 - dE2 );
} // CGPU_LOOP( idx_out, idx_out_e_i*idx_out_e_j*idx_out_e_k )
} // for (int d=0; d<3; d++)
# ifdef __CUDACC__
__syncthreads();
# endif
} // FUNCTION : MHD_UpdateMagnetic
//-------------------------------------------------------------------------------------------------------
// Function : MHD_HalfStepPrimitive
// Description : Evaluate the half-step cell-centered primitive variables
//
// Note : 1. Used by the CTU scheme
// 2. Use face-centered fluxes for the conservative update and then convert momentum to velocity
// 3. Cell-centered B field is simply obtained by averaging the half-step face-centered B field
// 4. Cell-centered primitive variables are only used for computing the edge-centered
// electric field, which is then used for the full-step CT update
// --> Only need to calculate velocity and B field
// --> Skip energy and passive scalars
// --> No need to apply the dual-energy formalism
// 5. g_Flu_In[] has the size of FLU_NXT^3 and is accessed with the same stride
// g_FC_B_Half[] has the size of FLU_NXT_P1*SQR(FLU_NXT) but is accessed with the dimension
// (N_HF_VAR+1)*SQR(N_HF_VAR)
// g_PriVar_Out[] has the size of FLU_NXT^3 but is accessed with a stride N_HF_VAR
// --> g_FC_B_Half[] contains all the face-centered B field for g_PriVar_Out[],
// nothing more, nothing less
// --> Just like the relation between g_Flu_Array_In[] and g_Mag_Array_In[]
// --> One can invoke MHD_GetCellCenteredBField() to compute the cell-centered
// B field directly
// g_Flux[] has the size of N_FC_FLUX^3 but is accessed with a stride N_HF_FLUX
// --> Although currently we have N_FC_FLUX == N_HF_FLUX
//
// Parameter : g_Flu_In : Array storing the input initial cell-centered fluid data
// g_FC_B_Half : Array storing the input half-step face-centered B field
// g_PriVar_Out : Array to store the output half-step primitive variables
// g_Flux : Array storing the input face-centered fluxes
// dt : Full-step time interval
// dh : Cell size
// MinDens : Minimum allowed density
//
// Return : g_PriVar_Out[]
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE
void MHD_HalfStepPrimitive( const real g_Flu_In[][ CUBE(FLU_NXT) ],
const real g_FC_B_Half[][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_PriVar_Out[][ CUBE(FLU_NXT) ],
const real g_Flux[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
const real dt, const real dh, const real MinDens )
{
const int didx_flux[3] = { 1, N_HF_FLUX, SQR(N_HF_FLUX) };
const real dt_dh2 = (real)0.5*dt/dh;
const int NFluVar = NCOMP_FLUID - 1; // density + momentum*3
real dFlux[3][NFluVar], Output_1Cell[ NFluVar + NCOMP_MAG ];
const int size_ij = SQR(N_HF_VAR);
CGPU_LOOP( idx_out, CUBE(N_HF_VAR) )
{
const int i_out = idx_out % N_HF_VAR;
const int j_out = idx_out % size_ij / N_HF_VAR;
const int k_out = idx_out / size_ij;
const int i_flux = i_out + 1;
const int j_flux = j_out + 1;
const int k_flux = k_out + 1;
const int idx_flux = IDX321( i_flux, j_flux, k_flux, N_HF_FLUX, N_HF_FLUX );
const int i_flu_in = i_out + FLU_GHOST_SIZE - 1; // assuming N_HF_VAR = PS2+2
const int j_flu_in = j_out + FLU_GHOST_SIZE - 1;
const int k_flu_in = k_out + FLU_GHOST_SIZE - 1;
const int idx_flu_in = IDX321( i_flu_in, j_flu_in, k_flu_in, FLU_NXT, FLU_NXT );
// 1. calculate flux difference to update the fluid data by 0.5*dt
for (int d=0; d<3; d++)
for (int v=0; v<NFluVar; v++)
dFlux[d][v] = g_Flux[d][v][idx_flux] - g_Flux[d][v][ idx_flux - didx_flux[d] ];
for (int v=0; v<NFluVar; v++)
Output_1Cell[v] = g_Flu_In[v][idx_flu_in] - dt_dh2*( dFlux[0][v] + dFlux[1][v] + dFlux[2][v] );
// apply density floor
Output_1Cell[DENS] = FMAX( Output_1Cell[DENS], MinDens );
// check negative density
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( Hydro_CheckNegative(Output_1Cell[DENS]) )
printf( "WARNING : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Output_1Cell[DENS], __FILE__, __LINE__, __FUNCTION__ );
# endif
// 2. momentum --> velocity
const real _Dens = (real)1.0 / Output_1Cell[DENS];
for (int v=1; v<NFluVar; v++) Output_1Cell[v] *= _Dens;
// 3. compute the cell-centered half-step B field
MHD_GetCellCenteredBField( Output_1Cell+NFluVar, g_FC_B_Half[0], g_FC_B_Half[1], g_FC_B_Half[2],
N_HF_VAR, N_HF_VAR, N_HF_VAR, i_out, j_out, k_out );
// 4. store results to the output array
// --> variable indices in g_PriVar_Out[] remain consistent with other arrays even though
// energy and passive scalars have not been skipped here
for (int v=0; v<NFluVar; v++) g_PriVar_Out[ v ][idx_out] = Output_1Cell[ v ];
for (int v=0; v<NCOMP_MAG; v++) g_PriVar_Out[ v + MAG_OFFSET ][idx_out] = Output_1Cell[ v + NFluVar ];
} // CGPU_LOOP( idx_out, CUBE(N_HF_VAR) )
# ifdef __CUDACC__
__syncthreads();
# endif
} // FUNCTION : MHD_HalfStepPrimitive
#endif // #if ( MODEL == HYDRO && defined MHD )
#endif // #ifndef __CUFLU_CONSTRAINEDTRANSPORT__ | the_stack |
#include "camera_calibration/feature_detection/cuda_refinement_by_symmetry.cuh"
#include <cub/cub.cuh>
#include <libvis/cuda/cholesky_solver.h>
#include <libvis/cuda/cuda_auto_tuner.h>
#include <libvis/cuda/cuda_util.h>
#include <libvis/logging.h>
#include <math_constants.h>
#include "camera_calibration/feature_detection/cuda_util.cuh"
namespace vis {
template <int block_width>
__global__ void TransformSamplesToPatternSpaceKernel(
int num_samples,
CUDABuffer_<float2> sample_positions,
float window_half_size,
CUDABuffer_<float2> pattern_sample_positions,
CUDABuffer_<float> local_pattern_tr_pixel_buffer) {
// Load the homography (column-major storage, as is Eigen's default)
__shared__ float h[9];
if (threadIdx.x < 9) {
h[threadIdx.x] = local_pattern_tr_pixel_buffer(0, 9 * blockIdx.z + threadIdx.x);
}
__syncthreads();
// Transform the samples (scaling with window_half_size and multiplication with local_pattern_tr_pixel)
for (int sample_index = /*blockIdx.x * block_width +*/ threadIdx.x; sample_index < num_samples; sample_index += block_width) {
float2 sample = sample_positions(0, sample_index);
float2 scaled_sample = make_float2(window_half_size * sample.x, window_half_size * sample.y);
float pattern_offset_factor = 1.f / (h[2] * scaled_sample.x + h[5] * scaled_sample.y + h[8]);
float pattern_offset_x = (h[0] * scaled_sample.x + h[3] * scaled_sample.y + h[6]) * pattern_offset_factor;
float pattern_offset_y = (h[1] * scaled_sample.x + h[4] * scaled_sample.y + h[7]) * pattern_offset_factor;
pattern_sample_positions(blockIdx.z, sample_index) = make_float2(pattern_offset_x, pattern_offset_y);
}
}
void CallTransformSamplesToPatternSpaceKernel(
cudaStream_t stream,
int feature_count,
int num_samples,
const CUDABuffer_<float2>& sample_positions,
float window_half_size,
CUDABuffer_<float2>* pattern_sample_positions,
CUDABuffer_<float> local_pattern_tr_pixel_buffer) {
#define CALL_KERNEL(block_width_value) \
constexpr int block_width = block_width_value; \
dim3 grid_dim(1, 1, feature_count); \
dim3 block_dim(block_width, 1, 1); \
TransformSamplesToPatternSpaceKernel<block_width> \
<<<grid_dim, block_dim, 0, stream>>>( \
num_samples, sample_positions, window_half_size, *pattern_sample_positions, local_pattern_tr_pixel_buffer);
if (num_samples > 512) {
CALL_KERNEL(1024);
} else if (num_samples > 256) {
CALL_KERNEL(512);
} else if (num_samples > 128) {
CALL_KERNEL(256);
} else {
CALL_KERNEL(128);
}
#undef CALL_KERNEL
CHECK_CUDA_NO_ERROR();
}
struct GradientsXYCostFunction {
__forceinline__ __device__ static float ComputeCornerRefinementCost(
float* pixel_tr_pattern_samples,
int sample_index,
const CUDABuffer_<float2>& pattern_sample_positions,
int image_width,
int image_height,
cudaTextureObject_t image) {
const float& H00 = pixel_tr_pattern_samples[0];
const float& H01 = pixel_tr_pattern_samples[3];
const float& H02 = pixel_tr_pattern_samples[6];
const float& H10 = pixel_tr_pattern_samples[1];
const float& H11 = pixel_tr_pattern_samples[4];
const float& H12 = pixel_tr_pattern_samples[7];
const float& H20 = pixel_tr_pattern_samples[2];
const float& H21 = pixel_tr_pattern_samples[5];
float2 sample = pattern_sample_positions(blockIdx.z, sample_index);
// Get sample in one direction
float sample_factor = 1.f / (H20 * sample.x + H21 * sample.y + /*H22*/ 1);
float sample_pos_x = (H00 * sample.x + H01 * sample.y + H02) * sample_factor + 0.5f;
float sample_pos_y = (H10 * sample.x + H11 * sample.y + H12) * sample_factor + 0.5f;
if (!ImageContainsPixelCornerConv(sample_pos_x, sample_pos_y, image_width, image_height)) {
return CUDART_NAN_F;
}
float2 intensity_a = tex2D<float2>(image, sample_pos_x, sample_pos_y);
// Get sample in opposite direction
sample_factor = 1.f / (H20 * (-sample.x) + H21 * (-sample.y) + /*H22*/ 1);
sample_pos_x = (H00 * (-sample.x) + H01 * (-sample.y) + H02) * sample_factor + 0.5f;
sample_pos_y = (H10 * (-sample.x) + H11 * (-sample.y) + H12) * sample_factor + 0.5f;
if (!ImageContainsPixelCornerConv(sample_pos_x, sample_pos_y, image_width, image_height)) {
return CUDART_NAN_F;
}
float2 intensity_b = tex2D<float2>(image, sample_pos_x, sample_pos_y);
float2 intensity_sum = make_float2(
intensity_a.x + intensity_b.x,
intensity_a.y + intensity_b.y);
return intensity_sum.x * intensity_sum.x + intensity_sum.y * intensity_sum.y;
}
__forceinline__ __device__ static void GetImageSampleAndJacobian(
float sample_pos_x,
float sample_pos_y,
cudaTextureObject_t image,
float2* intensity,
float2* dx,
float2* dy) {
*intensity = tex2D<float2>(image, sample_pos_x, sample_pos_y);
int ix = static_cast<int>(::max(0.f, sample_pos_x - 0.5f));
int iy = static_cast<int>(::max(0.f, sample_pos_y - 0.5f));
float tx = ::max(0.f, ::min(1.f, sample_pos_x - 0.5f - ix)); // truncated x = trunc(cx + fx*ls.x/ls.z)
float ty = ::max(0.f, ::min(1.f, sample_pos_y - 0.5f - iy)); // truncated y = trunc(cy + fy*ls.y/ls.z)
float2 top_left = tex2D<float2>(image, ix + 0.5f, iy + 0.5f);
float2 top_right = tex2D<float2>(image, ix + 1.5f, iy + 0.5f);
float2 bottom_left = tex2D<float2>(image, ix + 0.5f, iy + 1.5f);
float2 bottom_right = tex2D<float2>(image, ix + 1.5f, iy + 1.5f);
*dx = make_float2(
(bottom_right.x - bottom_left.x) * ty + (top_right.x - top_left.x) * (1 - ty),
(bottom_right.y - bottom_left.y) * ty + (top_right.y - top_left.y) * (1 - ty));
*dy = make_float2(
(bottom_right.x - top_right.x) * tx + (bottom_left.x - top_left.x) * (1 - tx),
(bottom_right.y - top_right.y) * tx + (bottom_left.y - top_left.y) * (1 - tx));
}
__forceinline__ __device__ static void AddCornerRefinementCostAndJacobian(
float* pixel_tr_pattern_samples,
int sample_index,
const CUDABuffer_<float2>& pattern_sample_positions,
int image_width,
int image_height,
cudaTextureObject_t image,
float* H,
float* b,
float* cost) {
const float& H00 = pixel_tr_pattern_samples[0];
const float& H01 = pixel_tr_pattern_samples[3];
const float& H02 = pixel_tr_pattern_samples[6];
const float& H10 = pixel_tr_pattern_samples[1];
const float& H11 = pixel_tr_pattern_samples[4];
const float& H12 = pixel_tr_pattern_samples[7];
const float& H20 = pixel_tr_pattern_samples[2];
const float& H21 = pixel_tr_pattern_samples[5];
float2 sample = pattern_sample_positions(blockIdx.z, sample_index);
// Get sample in one direction
float sample_factor = 1.f / (H20 * sample.x + H21 * sample.y + /*H22*/ 1);
float sample_pos_x = (H00 * sample.x + H01 * sample.y + H02) * sample_factor + 0.5f;
float sample_pos_y = (H10 * sample.x + H11 * sample.y + H12) * sample_factor + 0.5f;
if (!ImageContainsPixelCornerConv(sample_pos_x, sample_pos_y, image_width, image_height)) {
*cost = CUDART_NAN_F;
return;
}
float2 intensity_a, dx_a, dy_a;
GetImageSampleAndJacobian(sample_pos_x, sample_pos_y, image, &intensity_a, &dx_a, &dy_a);
// Get sample in opposite direction
sample_factor = 1.f / (H20 * (-sample.x) + H21 * (-sample.y) + /*H22*/ 1);
sample_pos_x = (H00 * (-sample.x) + H01 * (-sample.y) + H02) * sample_factor + 0.5f;
sample_pos_y = (H10 * (-sample.x) + H11 * (-sample.y) + H12) * sample_factor + 0.5f;
if (!ImageContainsPixelCornerConv(sample_pos_x, sample_pos_y, image_width, image_height)) {
*cost = CUDART_NAN_F;
return;
}
float2 intensity_b, dx_b, dy_b;
GetImageSampleAndJacobian(sample_pos_x, sample_pos_y, image, &intensity_b, &dx_b, &dy_b);
float2 residuals = make_float2(
intensity_a.x + intensity_b.x,
intensity_a.y + intensity_b.y);
// Sample in first direction
float term0 = 1 / (H20 * sample.x + H21 * sample.y + 1);
float term1 = -1 * term0 * term0;
float term2 = (H00 * sample.x + H01 * sample.y + H02) * term1;
float term3 = (H10 * sample.x + H11 * sample.y + H12) * term1;
// position_wrt_homography_a[2 x 8] = ...
const float jac_row0_0 = sample.x * term0;
const float jac_row0_1 = sample.y * term0;
const float jac_row0_2 = term0;
constexpr float jac_row0_3 = 0;
constexpr float jac_row0_4 = 0;
constexpr float jac_row0_5 = 0;
const float jac_row0_6 = sample.x * term2;
const float jac_row0_7 = sample.y * term2;
constexpr float jac_row1_0 = 0;
constexpr float jac_row1_1 = 0;
constexpr float jac_row1_2 = 0;
const float jac_row1_3 = jac_row0_0;
const float jac_row1_4 = jac_row0_1;
const float jac_row1_5 = jac_row0_2;
const float jac_row1_6 = sample.x * term3;
const float jac_row1_7 = sample.y * term3;
// Sample in opposite direction
term0 = 1 / (H20 * (-sample.x) + H21 * (-sample.y) + 1);
term1 = -1 * term0 * term0;
term2 = (H00 * (-sample.x) + H01 * (-sample.y) + H02) * term1;
term3 = (H10 * (-sample.x) + H11 * (-sample.y) + H12) * term1;
// position_wrt_homography_a[2 x 8] = ...
const float jac2_row0_0 = (-sample.x) * term0;
const float jac2_row0_1 = (-sample.y) * term0;
const float jac2_row0_2 = term0;
constexpr float jac2_row0_3 = 0;
constexpr float jac2_row0_4 = 0;
constexpr float jac2_row0_5 = 0;
const float jac2_row0_6 = (-sample.x) * term2;
const float jac2_row0_7 = (-sample.y) * term2;
constexpr float jac2_row1_0 = 0;
constexpr float jac2_row1_1 = 0;
constexpr float jac2_row1_2 = 0;
const float jac2_row1_3 = jac2_row0_0;
const float jac2_row1_4 = jac2_row0_1;
const float jac2_row1_5 = jac2_row0_2;
const float jac2_row1_6 = (-sample.x) * term3;
const float jac2_row1_7 = (-sample.y) * term3;
// Computing gradient_a * position_wrt_homography_a + gradient_b * position_wrt_homography_b.
// Calling the gradient_a matrix G (and the gradient_b matrix F):
// [dx_a.x dy_a.x]
// [dx_a.y dy_a.y]
const float& G00 = dx_a.x;
const float& G01 = dy_a.x;
const float& G10 = dx_a.y;
const float& G11 = dy_a.y;
const float& F00 = dx_b.x;
const float& F01 = dy_b.x;
const float& F10 = dx_b.y;
const float& F11 = dy_b.y;
constexpr int kDim = 8;
float jac_row[kDim];
constexpr float weight = 1;
// Row 0 (re-ordering the terms from row-wise to column-wise):
jac_row[0] = G00 * jac_row0_0 + G01 * jac_row1_0 + F00 * jac2_row0_0 + F01 * jac2_row1_0;
jac_row[3] = G00 * jac_row0_1 + G01 * jac_row1_1 + F00 * jac2_row0_1 + F01 * jac2_row1_1;
jac_row[6] = G00 * jac_row0_2 + G01 * jac_row1_2 + F00 * jac2_row0_2 + F01 * jac2_row1_2;
jac_row[1] = G00 * jac_row0_3 + G01 * jac_row1_3 + F00 * jac2_row0_3 + F01 * jac2_row1_3;
jac_row[4] = G00 * jac_row0_4 + G01 * jac_row1_4 + F00 * jac2_row0_4 + F01 * jac2_row1_4;
jac_row[7] = G00 * jac_row0_5 + G01 * jac_row1_5 + F00 * jac2_row0_5 + F01 * jac2_row1_5;
jac_row[2] = G00 * jac_row0_6 + G01 * jac_row1_6 + F00 * jac2_row0_6 + F01 * jac2_row1_6;
jac_row[5] = G00 * jac_row0_7 + G01 * jac_row1_7 + F00 * jac2_row0_7 + F01 * jac2_row1_7;
float* cur_H = H;
#pragma unroll
for (int i = 0; i < kDim; ++ i) {
#pragma unroll
for (int k = i; k < kDim; ++ k) {
*cur_H += jac_row[i] * weight * jac_row[k];
++ cur_H;
}
b[i] += residuals.x * weight * jac_row[i];
}
// Row 1 (re-ordering the terms from row-wise to column-wise):
jac_row[0] = G10 * jac_row0_0 + G11 * jac_row1_0 + F10 * jac2_row0_0 + F11 * jac2_row1_0;
jac_row[3] = G10 * jac_row0_1 + G11 * jac_row1_1 + F10 * jac2_row0_1 + F11 * jac2_row1_1;
jac_row[6] = G10 * jac_row0_2 + G11 * jac_row1_2 + F10 * jac2_row0_2 + F11 * jac2_row1_2;
jac_row[1] = G10 * jac_row0_3 + G11 * jac_row1_3 + F10 * jac2_row0_3 + F11 * jac2_row1_3;
jac_row[4] = G10 * jac_row0_4 + G11 * jac_row1_4 + F10 * jac2_row0_4 + F11 * jac2_row1_4;
jac_row[7] = G10 * jac_row0_5 + G11 * jac_row1_5 + F10 * jac2_row0_5 + F11 * jac2_row1_5;
jac_row[2] = G10 * jac_row0_6 + G11 * jac_row1_6 + F10 * jac2_row0_6 + F11 * jac2_row1_6;
jac_row[5] = G10 * jac_row0_7 + G11 * jac_row1_7 + F10 * jac2_row0_7 + F11 * jac2_row1_7;
cur_H = H;
#pragma unroll
for (int i = 0; i < kDim; ++ i) {
#pragma unroll
for (int k = i; k < kDim; ++ k) {
*cur_H += jac_row[i] * weight * jac_row[k];
++ cur_H;
}
b[i] += residuals.y * weight * jac_row[i];
}
*cost += residuals.x * residuals.x + residuals.y * residuals.y; // Actually: 0.5 times this. However, we don't care about (positive) scaling here.
}
};
struct IntensitiesCostFunction {
__forceinline__ __device__ static float ComputeCornerRefinementCost(
float* pixel_tr_pattern_samples,
int sample_index,
const CUDABuffer_<float2>& pattern_sample_positions,
int image_width,
int image_height,
cudaTextureObject_t image) {
const float& H00 = pixel_tr_pattern_samples[0];
const float& H01 = pixel_tr_pattern_samples[3];
const float& H02 = pixel_tr_pattern_samples[6];
const float& H10 = pixel_tr_pattern_samples[1];
const float& H11 = pixel_tr_pattern_samples[4];
const float& H12 = pixel_tr_pattern_samples[7];
const float& H20 = pixel_tr_pattern_samples[2];
const float& H21 = pixel_tr_pattern_samples[5];
float2 sample = pattern_sample_positions(blockIdx.z, sample_index);
// Get sample in one direction
float sample_factor = 1.f / (H20 * sample.x + H21 * sample.y + /*H22*/ 1);
float sample_pos_x = (H00 * sample.x + H01 * sample.y + H02) * sample_factor + 0.5f;
float sample_pos_y = (H10 * sample.x + H11 * sample.y + H12) * sample_factor + 0.5f;
if (!ImageContainsPixelCornerConv(sample_pos_x, sample_pos_y, image_width, image_height)) {
return CUDART_NAN_F;
}
float intensity_a = tex2D<float>(image, sample_pos_x, sample_pos_y);
// Get sample in opposite direction
sample_factor = 1.f / (H20 * (-sample.x) + H21 * (-sample.y) + /*H22*/ 1);
sample_pos_x = (H00 * (-sample.x) + H01 * (-sample.y) + H02) * sample_factor + 0.5f;
sample_pos_y = (H10 * (-sample.x) + H11 * (-sample.y) + H12) * sample_factor + 0.5f;
if (!ImageContainsPixelCornerConv(sample_pos_x, sample_pos_y, image_width, image_height)) {
return CUDART_NAN_F;
}
float intensity_b = tex2D<float>(image, sample_pos_x, sample_pos_y);
float intensity_diff = intensity_a - intensity_b;
return intensity_diff * intensity_diff;
}
__forceinline__ __device__ static void GetImageSampleAndJacobian(
float sample_pos_x,
float sample_pos_y,
cudaTextureObject_t image,
float* intensity,
float* dx,
float* dy) {
*intensity = tex2D<float>(image, sample_pos_x, sample_pos_y);
int ix = static_cast<int>(::max(0.f, sample_pos_x - 0.5f));
int iy = static_cast<int>(::max(0.f, sample_pos_y - 0.5f));
float tx = ::max(0.f, ::min(1.f, sample_pos_x - 0.5f - ix)); // truncated x = trunc(cx + fx*ls.x/ls.z)
float ty = ::max(0.f, ::min(1.f, sample_pos_y - 0.5f - iy)); // truncated y = trunc(cy + fy*ls.y/ls.z)
float top_left = tex2D<float>(image, ix + 0.5f, iy + 0.5f);
float top_right = tex2D<float>(image, ix + 1.5f, iy + 0.5f);
float bottom_left = tex2D<float>(image, ix + 0.5f, iy + 1.5f);
float bottom_right = tex2D<float>(image, ix + 1.5f, iy + 1.5f);
*dx = (bottom_right - bottom_left) * ty + (top_right - top_left) * (1 - ty);
*dy = (bottom_right - top_right) * tx + (bottom_left - top_left) * (1 - tx);
}
__forceinline__ __device__ static void AddCornerRefinementCostAndJacobian(
float* pixel_tr_pattern_samples,
int sample_index,
const CUDABuffer_<float2>& pattern_sample_positions,
int image_width,
int image_height,
cudaTextureObject_t image,
float* H,
float* b,
float* cost) {
const float& H00 = pixel_tr_pattern_samples[0];
const float& H01 = pixel_tr_pattern_samples[3];
const float& H02 = pixel_tr_pattern_samples[6];
const float& H10 = pixel_tr_pattern_samples[1];
const float& H11 = pixel_tr_pattern_samples[4];
const float& H12 = pixel_tr_pattern_samples[7];
const float& H20 = pixel_tr_pattern_samples[2];
const float& H21 = pixel_tr_pattern_samples[5];
float2 sample = pattern_sample_positions(blockIdx.z, sample_index);
// Get sample in one direction
float sample_factor = 1.f / (H20 * sample.x + H21 * sample.y + /*H22*/ 1);
float sample_pos_x = (H00 * sample.x + H01 * sample.y + H02) * sample_factor + 0.5f;
float sample_pos_y = (H10 * sample.x + H11 * sample.y + H12) * sample_factor + 0.5f;
if (!ImageContainsPixelCornerConv(sample_pos_x, sample_pos_y, image_width, image_height)) {
*cost = CUDART_NAN_F;
return;
}
float intensity_a, dx_a, dy_a;
GetImageSampleAndJacobian(sample_pos_x, sample_pos_y, image, &intensity_a, &dx_a, &dy_a);
// Get sample in opposite direction
sample_factor = 1.f / (H20 * (-sample.x) + H21 * (-sample.y) + /*H22*/ 1);
sample_pos_x = (H00 * (-sample.x) + H01 * (-sample.y) + H02) * sample_factor + 0.5f;
sample_pos_y = (H10 * (-sample.x) + H11 * (-sample.y) + H12) * sample_factor + 0.5f;
if (!ImageContainsPixelCornerConv(sample_pos_x, sample_pos_y, image_width, image_height)) {
*cost = CUDART_NAN_F;
return;
}
float intensity_b, dx_b, dy_b;
GetImageSampleAndJacobian(sample_pos_x, sample_pos_y, image, &intensity_b, &dx_b, &dy_b);
float residual = intensity_a - intensity_b;
// Sample in first direction
float term0 = 1 / (H20 * sample.x + H21 * sample.y + 1);
float term1 = -1 * term0 * term0;
float term2 = (H00 * sample.x + H01 * sample.y + H02) * term1;
float term3 = (H10 * sample.x + H11 * sample.y + H12) * term1;
// position_wrt_homography_a[2 x 8] = ...
const float jac_row0_0 = sample.x * term0;
const float jac_row0_1 = sample.y * term0;
const float jac_row0_2 = term0;
constexpr float jac_row0_3 = 0;
constexpr float jac_row0_4 = 0;
constexpr float jac_row0_5 = 0;
const float jac_row0_6 = sample.x * term2;
const float jac_row0_7 = sample.y * term2;
constexpr float jac_row1_0 = 0;
constexpr float jac_row1_1 = 0;
constexpr float jac_row1_2 = 0;
const float jac_row1_3 = jac_row0_0;
const float jac_row1_4 = jac_row0_1;
const float jac_row1_5 = jac_row0_2;
const float jac_row1_6 = sample.x * term3;
const float jac_row1_7 = sample.y * term3;
// Sample in opposite direction
term0 = 1 / (H20 * (-sample.x) + H21 * (-sample.y) + 1);
term1 = -1 * term0 * term0;
term2 = (H00 * (-sample.x) + H01 * (-sample.y) + H02) * term1;
term3 = (H10 * (-sample.x) + H11 * (-sample.y) + H12) * term1;
// position_wrt_homography_a[2 x 8] = ...
const float jac2_row0_0 = (-sample.x) * term0;
const float jac2_row0_1 = (-sample.y) * term0;
const float jac2_row0_2 = term0;
constexpr float jac2_row0_3 = 0;
constexpr float jac2_row0_4 = 0;
constexpr float jac2_row0_5 = 0;
const float jac2_row0_6 = (-sample.x) * term2;
const float jac2_row0_7 = (-sample.y) * term2;
constexpr float jac2_row1_0 = 0;
constexpr float jac2_row1_1 = 0;
constexpr float jac2_row1_2 = 0;
const float jac2_row1_3 = jac2_row0_0;
const float jac2_row1_4 = jac2_row0_1;
const float jac2_row1_5 = jac2_row0_2;
const float jac2_row1_6 = (-sample.x) * term3;
const float jac2_row1_7 = (-sample.y) * term3;
// Computing gradient_a * position_wrt_homography_a + gradient_b * position_wrt_homography_b.
// Calling the gradient_a matrix G (and the gradient_b matrix F):
// [dx_a.x dy_a.x]
// [dx_a.y dy_a.y]
const float& G00 = dx_a;
const float& G01 = dy_a;
const float& F00 = dx_b;
const float& F01 = dy_b;
constexpr int kDim = 8;
float jac_row[kDim];
constexpr float weight = 1;
// (Re-ordering the terms from row-wise to column-wise):
jac_row[0] = G00 * jac_row0_0 + G01 * jac_row1_0 - F00 * jac2_row0_0 - F01 * jac2_row1_0;
jac_row[3] = G00 * jac_row0_1 + G01 * jac_row1_1 - F00 * jac2_row0_1 - F01 * jac2_row1_1;
jac_row[6] = G00 * jac_row0_2 + G01 * jac_row1_2 - F00 * jac2_row0_2 - F01 * jac2_row1_2;
jac_row[1] = G00 * jac_row0_3 + G01 * jac_row1_3 - F00 * jac2_row0_3 - F01 * jac2_row1_3;
jac_row[4] = G00 * jac_row0_4 + G01 * jac_row1_4 - F00 * jac2_row0_4 - F01 * jac2_row1_4;
jac_row[7] = G00 * jac_row0_5 + G01 * jac_row1_5 - F00 * jac2_row0_5 - F01 * jac2_row1_5;
jac_row[2] = G00 * jac_row0_6 + G01 * jac_row1_6 - F00 * jac2_row0_6 - F01 * jac2_row1_6;
jac_row[5] = G00 * jac_row0_7 + G01 * jac_row1_7 - F00 * jac2_row0_7 - F01 * jac2_row1_7;
float* cur_H = H;
#pragma unroll
for (int i = 0; i < kDim; ++ i) {
#pragma unroll
for (int k = i; k < kDim; ++ k) {
*cur_H += jac_row[i] * weight * jac_row[k];
++ cur_H;
}
b[i] += residual * weight * jac_row[i];
}
*cost += residual * residual; // Actually: 0.5 times this. However, we don't care about (positive) scaling here.
}
};
template <int block_width, typename CostFunction>
__global__ void
__launch_bounds__(/*maxThreadsPerBlock*/ 512, /*minBlocksPerMultiprocessor*/ 1)
RefineCheckerboardCornerPositionCUDAKernel_Refine(
int num_samples,
CUDABuffer_<float2> pattern_sample_positions,
cudaTextureObject_t image_texture,
CUDABuffer_<float> pixel_tr_pattern_samples,
float* final_cost,
int window_half_size,
int image_width,
int image_height) {
// #define LOG_CONDITION (threadIdx.x == 0 && blockIdx.z == 0 /*pixel_tr_pattern_samples(0, 9 * blockIdx.z + 6) > 110 && pixel_tr_pattern_samples(0, 9 * blockIdx.z + 7) > 140 && pixel_tr_pattern_samples(0, 9 * blockIdx.z + 6) < 120 && pixel_tr_pattern_samples(0, 9 * blockIdx.z + 7) < 150*/)
constexpr int block_height = 1;
// Number of optimized variables
constexpr int kDim = 8;
// Half a kDim x kDim matrix, including the diagonal
constexpr int kHSize = kDim * (kDim + 1) / 2;
if (::isnan(/*original_position_x*/ pixel_tr_pattern_samples(0, 9 * blockIdx.z + 6))) {
return;
}
typedef cub::BlockReduce<float, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceFloat;
__shared__ union {
typename BlockReduceFloat::TempStorage temp_storage;
struct {
float H[kHSize];
float b[kDim];
} h_and_b;
} shared;
__shared__ float test_cost_shared;
__shared__ float buffer1[kDim];
__shared__ float buffer2[kDim];
float* cur_pixel_tr_pattern_samples = buffer1;
float* test_pixel_tr_pattern_samples = buffer2;
if (threadIdx.x < kDim) {
cur_pixel_tr_pattern_samples[threadIdx.x] = pixel_tr_pattern_samples(0, 9 * blockIdx.z + threadIdx.x);
}
float lambda = -1;
float last_step_squared_norm = -1;
__shared__ bool applied_update;
float H_local[kHSize];
float b_local[kDim];
float cost_local;
__shared__ float temp[2];
constexpr int kMaxIterationCount = 30;
for (int iteration = 0; iteration < kMaxIterationCount; ++ iteration) {
// Clear accumulation buffers
#pragma unroll
for (int i = 0; i < kDim; ++ i) {
b_local[i] = 0;
}
#pragma unroll
for (int i = 0; i < kHSize; ++ i) {
H_local[i] = 0;
}
cost_local = 0;
// Compute cost and Jacobian
__syncthreads(); // for cur_pixel_tr_pattern_samples and for BlockReduceCoeffs
// if (LOG_CONDITION) {
// printf("INPUT CUDA:\n%f, %f, %f,\n%f, %f, %f,\n%f, %f, %f;\n",
// cur_pixel_tr_pattern_samples[0], cur_pixel_tr_pattern_samples[3], cur_pixel_tr_pattern_samples[6],
// cur_pixel_tr_pattern_samples[1], cur_pixel_tr_pattern_samples[4], cur_pixel_tr_pattern_samples[7],
// cur_pixel_tr_pattern_samples[2], cur_pixel_tr_pattern_samples[5], pixel_tr_pattern_samples(0, 9 * blockIdx.z + 8));
// }
for (int sample_index = threadIdx.x; sample_index < num_samples; sample_index += block_width) {
CostFunction::AddCornerRefinementCostAndJacobian(
cur_pixel_tr_pattern_samples, sample_index,
pattern_sample_positions, image_width, image_height, image_texture, H_local, b_local, &cost_local);
}
#pragma unroll
for (int i = 0; i < kDim; ++ i) {
float result = BlockReduceFloat(shared.temp_storage).Sum(b_local[i]);
if (threadIdx.x == 0) {
temp[i & 1] = result;
}
__syncthreads();
b_local[i] = temp[i & 1];
}
__syncthreads();
#pragma unroll
for (int i = 0; i < kHSize; ++ i) {
float result = BlockReduceFloat(shared.temp_storage).Sum(H_local[i]);
if (threadIdx.x == 0) {
temp[i & 1] = result;
}
__syncthreads();
H_local[i] = temp[i & 1];
}
// NOTE: cost_local will only be correct for threadIdx.x == 0.
cost_local = BlockReduceFloat(shared.temp_storage).Sum(cost_local);
__syncthreads(); // for re-use of shared below
// if (LOG_CONDITION) {
// printf("OUTPUT CUDA:\ncost = %f\n", cost_local);
// printf("b:\n");
// for (int i = 0; i < kDim; ++ i) {
// printf("%f\n", b_local[i]);
// }
// printf("H:\n");
// for (int i = 0; i < kHSize; ++ i) {
// printf("%f\n", H_local[i]);
// }
// }
// if (LOG_CONDITION) {
// printf("[%i] Iteration %i | cost: %f\n", blockIdx.z, iteration, cost_local);
// }
// Initialize lambda from the values of H on the diagonal?
if (lambda < 0) {
lambda = 0.001f * (1.f / kDim) * (
H_local[0] +
H_local[8] +
H_local[8 + 7] +
H_local[8 + 7 + 6] +
H_local[8 + 7 + 6 + 5] +
H_local[8 + 7 + 6 + 5 + 4] +
H_local[8 + 7 + 6 + 5 + 4 + 3] +
H_local[8 + 7 + 6 + 5 + 4 + 3 + 2]);
}
applied_update = false;
float old_lambda = 0;
for (int lm_iteration = 0; lm_iteration < 10; ++ lm_iteration) {
// TODO: Split this up among threads?
H_local[0] += (lambda - old_lambda);
H_local[8] += (lambda - old_lambda);
H_local[8 + 7] += (lambda - old_lambda);
H_local[8 + 7 + 6] += (lambda - old_lambda);
H_local[8 + 7 + 6 + 5] += (lambda - old_lambda);
H_local[8 + 7 + 6 + 5 + 4] += (lambda - old_lambda);
H_local[8 + 7 + 6 + 5 + 4 + 3] += (lambda - old_lambda);
H_local[8 + 7 + 6 + 5 + 4 + 3 + 2] += (lambda - old_lambda);
old_lambda = lambda;
if (threadIdx.x < kDim) {
shared.h_and_b.b[threadIdx.x] = b_local[threadIdx.x];
} else if (threadIdx.x < kDim + kHSize) {
shared.h_and_b.H[threadIdx.x - kDim] = H_local[threadIdx.x - kDim];
}
__syncthreads(); // make all threads see the updated shared variables in SolveWithParallelCholesky()
// Solve for the update (in-place; the result is in shared.h_and_b.b afterwards).
SolveWithParallelCholesky<kDim>(shared.h_and_b.H, shared.h_and_b.b);
// Test whether the update improves the cost.
if (threadIdx.x < kDim) {
test_pixel_tr_pattern_samples[threadIdx.x] = cur_pixel_tr_pattern_samples[threadIdx.x] - shared.h_and_b.b[threadIdx.x];
}
float dx = -shared.h_and_b.b[6];
float dy = -shared.h_and_b.b[7];
__syncthreads(); // for test_pixel_tr_pattern_samples and BlockReduceFloat and applied_update
float test_cost = 0;
for (int sample_index = threadIdx.x; sample_index < num_samples; sample_index += block_width) {
test_cost += CostFunction::ComputeCornerRefinementCost(
test_pixel_tr_pattern_samples, sample_index,
pattern_sample_positions, image_width, image_height, image_texture);
}
// NOTE: test_cost will only be correct for threadIdx.x == 0.
test_cost = BlockReduceFloat(shared.temp_storage).Sum(test_cost);
// if (LOG_CONDITION) {
// printf(" [%i] LM iteration %i | lambda: %f, dx: %f, dy: %f, test cost: %f\n", blockIdx.z, lm_iteration, lambda, dx, dy, test_cost);
// }
if (threadIdx.x == 0) {
test_cost_shared = test_cost;
if (test_cost < cost_local) {
applied_update = true;
}
}
__syncthreads(); // for applied_update, test_cost_shared
if (::isnan(test_cost_shared)) {
// Position went out of bounds
// if (LOG_CONDITION) {
// printf(" [%i] Position out of bounds\n", blockIdx.z);
// }
pixel_tr_pattern_samples(0, 9 * blockIdx.z + 6) = CUDART_NAN_F;
return;
} else if (applied_update) {
lambda *= 0.5f;
last_step_squared_norm = dx * dx + dy * dy;
// Swap cur_pixel_tr_pattern_samples and test_pixel_tr_pattern_samples
// to accept the test values
float* temp = cur_pixel_tr_pattern_samples;
cur_pixel_tr_pattern_samples = test_pixel_tr_pattern_samples;
test_pixel_tr_pattern_samples = temp;
break;
} else {
lambda *= 2.f;
}
}
if (!applied_update) {
// Cannot find an update that improves the cost. Treat this as converged.
// if (LOG_CONDITION) {
// printf(" [%i] Cannot find update, assuming convergence. Position: (%f, %f)\n", blockIdx.z, cur_pixel_tr_pattern_samples[6], cur_pixel_tr_pattern_samples[7]);
// }
if (threadIdx.x < kDim) {
pixel_tr_pattern_samples(0, 9 * blockIdx.z + threadIdx.x) = cur_pixel_tr_pattern_samples[threadIdx.x];
}
if (final_cost && threadIdx.x == 32) {
final_cost[blockIdx.z] = test_cost_shared;
}
return;
}
// NOTE: This does not necessarily need to be checked, as the CPU code will
// later use a stricter threshold anyway.
// // Check for divergence.
// if (fabs(pixel_tr_pattern_samples(0, 9 * blockIdx.z + 6) - cur_pixel_tr_pattern_samples[6]) >= window_half_size ||
// fabs(pixel_tr_pattern_samples(0, 9 * blockIdx.z + 7) - cur_pixel_tr_pattern_samples[7]) >= window_half_size) {
// // The result is probably not the originally intended corner,
// // since it is not within the original search window.
// if (LOG_CONDITION) {
// printf(" [%i] Position too far away from start. original_position_x: %f, position_x: %f, original_position_y: %f, position_y: %f, window_half_size: %i\n",
// blockIdx.z, pixel_tr_pattern_samples(0, 9 * blockIdx.z + 6), cur_pixel_tr_pattern_samples[6], pixel_tr_pattern_samples(0, 9 * blockIdx.z + 7), cur_pixel_tr_pattern_samples[7], window_half_size);
// }
// pixel_tr_pattern_samples(0, 9 * blockIdx.z + 6) = CUDART_NAN_F;
// return;
// }
}
if (last_step_squared_norm >= 1e-4f) {
// Not converged
// if (LOG_CONDITION) {
// printf(" [%i] Not converged\n", blockIdx.z);
// }
if (threadIdx.x == 0) {
pixel_tr_pattern_samples(0, 9 * blockIdx.z + 6) = CUDART_NAN_F;
}
} else {
// Converged
// if (LOG_CONDITION) {
// printf(" [%i] Converged. Position: (%f, %f)\n", blockIdx.z, cur_pixel_tr_pattern_samples[6], cur_pixel_tr_pattern_samples[7]);
// }
if (threadIdx.x < kDim) {
pixel_tr_pattern_samples(0, 9 * blockIdx.z + threadIdx.x) = cur_pixel_tr_pattern_samples[threadIdx.x];
}
if (final_cost && threadIdx.x == 32) {
final_cost[blockIdx.z] = test_cost_shared;
}
}
}
void CallRefineCheckerboardCornerPositionCUDAKernel_Refine(
cudaStream_t stream,
int feature_count,
int num_samples,
const CUDABuffer_<float2>& pattern_sample_positions,
cudaTextureObject_t image_texture,
FeatureRefinement refinement_type,
CUDABuffer_<float>* pixel_tr_pattern_samples,
float* final_cost,
int window_half_size,
int image_width,
int image_height) {
if (refinement_type == FeatureRefinement::GradientsXY) {
#define CALL_KERNEL(block_width_value) \
constexpr int block_width = block_width_value; \
dim3 grid_dim(1, 1, feature_count); \
dim3 block_dim(block_width, 1, 1); \
RefineCheckerboardCornerPositionCUDAKernel_Refine<block_width, GradientsXYCostFunction> \
<<<grid_dim, block_dim, 0, stream>>>( \
num_samples, pattern_sample_positions, image_texture, *pixel_tr_pattern_samples, \
final_cost, window_half_size, image_width, image_height);
// TODO: This did not work because too many resources were requested (most
// likely shared memory).
/*if (num_samples > 512) {
CALL_KERNEL(1024);
} else*/ if (num_samples > 256) {
CALL_KERNEL(512);
} else if (num_samples > 128) {
CALL_KERNEL(256);
} else {
CALL_KERNEL(128);
}
#undef CALL_KERNEL
} else if (refinement_type == FeatureRefinement::Intensities) {
#define CALL_KERNEL(block_width_value) \
constexpr int block_width = block_width_value; \
dim3 grid_dim(1, 1, feature_count); \
dim3 block_dim(block_width, 1, 1); \
RefineCheckerboardCornerPositionCUDAKernel_Refine<block_width, IntensitiesCostFunction> \
<<<grid_dim, block_dim, 0, stream>>>( \
num_samples, pattern_sample_positions, image_texture, *pixel_tr_pattern_samples, \
final_cost, window_half_size, image_width, image_height);
// TODO: This did not work because too many resources were requested (most
// likely shared memory).
/*if (num_samples > 512) {
CALL_KERNEL(1024);
} else*/ if (num_samples > 256) {
CALL_KERNEL(512);
} else if (num_samples > 128) {
CALL_KERNEL(256);
} else {
CALL_KERNEL(128);
}
#undef CALL_KERNEL
} else {
LOG(FATAL) << "This refinement type is not supported here yet.";
}
CHECK_CUDA_NO_ERROR();
}
} | the_stack |
#pragma once
#include <gunrock/util/cta_work_distribution.cuh>
#include <gunrock/util/cta_work_progress.cuh>
#include <gunrock/util/kernel_runtime_stats.cuh>
#include <gunrock/priority_queue/near_far_pile.cuh>
#include <gunrock/priority_queue/kernel_policy.cuh>
#include <gunrock/util/test_utils.cuh>
#include <moderngpu.cuh>
namespace gunrock {
namespace priority_queue {
/**
* Arch dispatch
*/
/**
* Not valid for this arch (default)
*/
template <typename KernelPolicy, typename ProblemData, typename PriorityQueue,
typename Functor,
bool VALID = (__GR_CUDA_ARCH__ >= KernelPolicy::CUDA_ARCH)>
struct Dispatch {
typedef typename KernelPolicy::VertexId VertexId;
typedef typename KernelPolicy::SizeT SizeT;
typedef typename ProblemData::DataSlice DataSlice;
typedef typename PriorityQueue::NearFarPile NearFarPile;
static __device__ __forceinline__ void MarkVisit(VertexId *&vertex_in,
DataSlice *&problem,
SizeT &input_queue_length,
SizeT &node_num) {}
static __device__ __forceinline__ void MarkNF(
VertexId *&vertex_in, NearFarPile *&pq, DataSlice *&problem,
SizeT &input_queue_length, unsigned int &lower_priority_score_limit,
unsigned int &upper_priority_score_limit, SizeT &node_num) {}
static __device__ __forceinline__ void MarkValid(VertexId *&vertex_in,
NearFarPile *&pq,
DataSlice *&problem,
SizeT &input_queue_length,
SizeT &node_num) {}
static __device__ __forceinline__ void Compact(
VertexId *&vertex_in, NearFarPile *&pq, int &selector,
SizeT &input_queue_length, VertexId *&vertex_out, SizeT &v_out_offset,
SizeT &far_pile_offset, SizeT &node_num) {}
static __device__ __forceinline__ void Compact2(
VertexId *&vertex_in, NearFarPile *&pq, int &selector,
SizeT &input_queue_length, VertexId *&vertex_out, SizeT &v_out_offset,
SizeT &node_num) {}
};
template <typename KernelPolicy, typename ProblemData, typename PriorityQueue,
typename Functor>
struct Dispatch<KernelPolicy, ProblemData, PriorityQueue, Functor, true> {
typedef typename KernelPolicy::VertexId VertexId;
typedef typename KernelPolicy::SizeT SizeT;
typedef typename ProblemData::DataSlice DataSlice;
typedef typename PriorityQueue::NearFarPile NearFarPile;
static __device__ __forceinline__ void MarkVisit(VertexId *&vertex_in,
DataSlice *&problem,
SizeT &input_queue_length,
SizeT &node_num) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int my_id = tid + bid * blockDim.x;
if (my_id >= input_queue_length) return;
unsigned int my_vert = vertex_in[my_id];
// if (my_vert < 0 || my_vert >= node_num) return;
if (my_vert >= node_num) return;
problem->visit_lookup[my_vert] = my_id;
}
static __device__ __forceinline__ void MarkNF(
VertexId *&vertex_in, NearFarPile *&pq, DataSlice *&problem,
SizeT &input_queue_length, unsigned int &lower_priority_score_limit,
unsigned int &upper_priority_score_limit, SizeT &node_num) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int my_id = tid + bid * blockDim.x;
if (my_id >= input_queue_length) return;
unsigned int bucket_max = UINT_MAX / problem->delta[0];
unsigned int my_vert = vertex_in[my_id];
// if (my_vert < 0 || my_vert >= node_num) { pq->d_valid_near[my_id] = 0;
// pq->d_valid_far[my_id] = 0; return; }
if (my_vert >= node_num) {
pq->d_valid_near[my_id] = 0;
pq->d_valid_far[my_id] = 0;
return;
}
unsigned int bucket_id = Functor::ComputePriorityScore(my_vert, problem);
bool valid = (my_id == problem->visit_lookup[my_vert]);
// printf(" valid:%d, my_id: %d, my_vert: %d\n", valid, my_id, my_vert,
// bucket_id);
pq->d_valid_near[my_id] = (bucket_id < upper_priority_score_limit &&
bucket_id >= lower_priority_score_limit && valid)
? 1
: 0;
pq->d_valid_far[my_id] = (bucket_id >= upper_priority_score_limit &&
bucket_id < bucket_max && valid)
? 1
: 0;
// printf("valid near, far: %d, %d\n", pq->d_valid_near[my_id],
// pq->d_valid_far[my_id]);
}
static __device__ __forceinline__ void MarkValid(VertexId *&vertex_in,
NearFarPile *&pq,
DataSlice *&problem,
SizeT &input_queue_length,
SizeT &node_num) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int my_id = tid + bid * blockDim.x;
if (my_id >= input_queue_length) return;
unsigned int my_vert = vertex_in[my_id];
// if (my_vert < 0 || my_vert >= node_num) { pq->d_valid_near[my_id] = 0;
// pq->d_valid_far[my_id] = 0; return; }
if (my_vert >= node_num) {
pq->d_valid_near[my_id] = 0;
pq->d_valid_far[my_id] = 0;
return;
}
pq->d_valid_near[my_id] = (my_id == problem->d_visit_lookup[my_vert]);
pq->d_valid_far[my_id] = 0;
}
static __device__ __forceinline__ void Compact(
VertexId *&vertex_in, NearFarPile *&pq, int &selector,
SizeT &input_queue_length, VertexId *&vertex_out, SizeT &v_out_offset,
SizeT &far_pile_offset, SizeT &node_num) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int my_id = bid * blockDim.x + tid;
if (my_id >= input_queue_length) return;
unsigned int my_vert = vertex_in[my_id];
// if (my_vert<0 || my_vert >= node_num) return;
if (my_vert >= node_num) return;
unsigned int my_valid = pq->d_valid_near[my_id];
if (my_valid == pq->d_valid_near[my_id + 1] - 1)
vertex_out[my_valid + v_out_offset] = my_vert;
my_valid = pq->d_valid_far[my_id];
if (my_valid == pq->d_valid_far[my_id + 1] - 1)
pq->d_queue[selector][my_valid + far_pile_offset] = my_vert;
}
static __device__ __forceinline__ void Compact2(
VertexId *&vertex_in, NearFarPile *&pq, int &selector,
SizeT &input_queue_length, VertexId *&vertex_out, SizeT &v_out_offset,
SizeT &node_num) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int my_id = bid * blockDim.x + tid;
if (my_id >= input_queue_length) return;
unsigned int my_vert = vertex_in[my_id];
// if (my_vert<0 || my_vert >= node_num) return;
if (my_vert >= node_num) return;
unsigned int my_valid = pq->d_valid_near[my_id];
if (my_valid == pq->d_valid_near[my_id + 1] - 1)
vertex_out[my_valid + v_out_offset] = my_vert;
}
};
/**
* @brief Mark the queue index in a lookup table, if multiple queue indices
* map to same vertex ID, only one (the last one written) will be kept
*
* @tparam KernelPolicy Kernel policy type.
* @tparam ProblemData Problem data type.
* @tparam PriorityQueue PriorityQueue data type.
* @tparam Functor Functor type.
*
* @param[in] vertex_in Device pointer of the input vertex IDs
* @param[in] problem Problem object which stores user-defined priority
* value
* @param[in] input_queue_length Input queue length
*/
template <typename KernelPolicy, typename ProblemData, typename PriorityQueue,
typename Functor>
__launch_bounds__(KernelPolicy::THREADS, KernelPolicy::BLOCKS) __global__
void MarkVisit(typename KernelPolicy::VertexId *vertex_in,
typename ProblemData::DataSlice *problem,
typename KernelPolicy::SizeT input_queue_length,
typename KernelPolicy::SizeT node_num) {
Dispatch<KernelPolicy, ProblemData, PriorityQueue, Functor>::MarkVisit(
vertex_in, problem, input_queue_length, node_num);
}
/**
* @brief Mark whether the vertex ID is valid in near pile/far pile
*
* @tparam KernelPolicy Kernel policy type.
* @tparam ProblemData Problem data type.
* @tparam PriorityQueue PriorityQueue data type.
* @tparam Functor Functor type.
*
* @param[in] vertex_in Device pointer of the input vertex IDs
* @param[out] pq PriorityQueue pointer which will be used to store
* the near/far pile after the input vertices is splitted.
* @param[in] problem Problem object which stores user-defined priority
* value
* @param[in] input_queue_length Input queue length
* @param[in] lower_priority_score_limit Near pile priority value threshold
* @param[in] upper_priority_score_limit Far pile priority value threshold
*/
template <typename KernelPolicy, typename ProblemData, typename PriorityQueue,
typename Functor>
__launch_bounds__(KernelPolicy::THREADS, KernelPolicy::BLOCKS) __global__
void MarkNF(typename KernelPolicy::VertexId *vertex_in,
typename PriorityQueue::NearFarPile *pq,
typename ProblemData::DataSlice *problem,
typename KernelPolicy::SizeT input_queue_length,
unsigned int lower_priority_score_limit,
unsigned int upper_priority_score_limit,
typename KernelPolicy::SizeT node_num) {
Dispatch<KernelPolicy, ProblemData, PriorityQueue, Functor>::MarkNF(
vertex_in, pq, problem, input_queue_length, lower_priority_score_limit,
upper_priority_score_limit, node_num);
}
template <typename KernelPolicy, typename ProblemData, typename PriorityQueue,
typename Functor>
__launch_bounds__(KernelPolicy::THREADS, KernelPolicy::BLOCKS) __global__
void MarkValid(typename KernelPolicy::VertexId *vertex_in,
typename PriorityQueue::NearFarPile *pq,
typename ProblemData::DataSlice *problem,
typename KernelPolicy::SizeT input_queue_length,
typename KernelPolicy::SizeT node_num) {
Dispatch<KernelPolicy, ProblemData, PriorityQueue, Functor>::MarkValid(
vertex_in, pq, problem, input_queue_length, node_num);
}
/**
* @brief Compact the input queue into near far pile, remove the duplicate IDs,
* append the newly generated far pile at the end of current priority queue
* and output the vertices in the output queue
*
* @tparam KernelPolicy Kernel policy type.
* @tparam ProblemData Problem data type.
* @tparam PriorityQueue PriorityQueue data type.
* @tparam Functor Functor type.
*
* @param[in] vertex_in Device pointer of the input vertex IDs
* @param[out] pq PriorityQueue pointer which will be used to store
* the near/far pile after the input vertices is splitted.
* @param[in] selector Binary switch for choosing from ping-pong buffers
* @param[in] input_queue_length Input queue length
* @param[out] vertex_out Device pointer of the output vertex IDs, will be
* used for any other following operators
* @param[in] v_out_offset The near pile queue offset
* @param[in] far_pile_offset Where to append the newly generated elements in
* the far pile
*/
template <typename KernelPolicy, typename ProblemData, typename PriorityQueue,
typename Functor>
__launch_bounds__(KernelPolicy::THREADS, KernelPolicy::BLOCKS) __global__
void Compact(typename KernelPolicy::VertexId *vertex_in,
typename PriorityQueue::NearFarPile *pq, int selector,
typename KernelPolicy::SizeT input_queue_length,
typename KernelPolicy::VertexId *vertex_out,
typename KernelPolicy::SizeT v_out_offset,
typename KernelPolicy::SizeT far_pile_offset,
typename KernelPolicy::SizeT node_num) {
Dispatch<KernelPolicy, ProblemData, PriorityQueue, Functor>::Compact(
vertex_in, pq, selector, input_queue_length, vertex_out, v_out_offset,
far_pile_offset, node_num);
}
template <typename KernelPolicy, typename ProblemData, typename PriorityQueue,
typename Functor>
__launch_bounds__(KernelPolicy::THREADS, KernelPolicy::BLOCKS) __global__
void Compact2(typename KernelPolicy::VertexId *vertex_in,
typename PriorityQueue::NearFarPile *pq, int selector,
typename KernelPolicy::SizeT input_queue_length,
typename KernelPolicy::VertexId *vertex_out,
typename KernelPolicy::SizeT v_out_offset,
typename KernelPolicy::SizeT node_num) {
Dispatch<KernelPolicy, ProblemData, PriorityQueue, Functor>::Compact2(
vertex_in, pq, selector, input_queue_length, vertex_out, v_out_offset,
node_num);
}
/**
* @brief Split a queue into two parts (near/far piles) according to
* its user-defined priority value.
*
* @tparam KernelPolicy Kernel policy type.
* @tparam ProblemData Problem data type.
* @tparam PriorityQueue PriorityQueue data type.
* @tparam Functor Functor type.
*
* @param[in] vertex_in Device pointer of the input vertex IDs
* @param[out] pq PriorityQueue pointer which will be used to store
* the near/far pile after the input vertices is splitted.
* @param[in] input_queue_length Input queue length
* @param[in] problem Problem object which stores user-defined priority
* value
* @param[out] vertex_out Device pointer of the output vertex IDs, will be
* used for any other following operators
* @param[in] far_pile_offset Where to append the newly generated elements in
* the far pile
* @param[in] lower_limit Near pile priority value threshold
* @param[in] upper_limit Far pile priority value threshold
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <typename KernelPolicy, typename ProblemData, typename PriorityQueue,
typename Functor>
unsigned int Bisect(typename KernelPolicy::VertexId *vertex_in,
PriorityQueue *pq,
typename KernelPolicy::SizeT input_queue_length,
typename ProblemData::DataSlice *problem,
typename KernelPolicy::VertexId *vertex_out,
typename KernelPolicy::SizeT far_pile_offset,
unsigned int lower_limit, unsigned int upper_limit,
CudaContext &context,
typename KernelPolicy::SizeT node_num) {
typedef typename KernelPolicy::VertexId VertexId;
typedef typename KernelPolicy::SizeT SizeT;
typename PriorityQueue::NearFarPile *nf_pile = pq->d_nf_pile[0];
int block_num =
(input_queue_length + KernelPolicy::THREADS - 1) / KernelPolicy::THREADS;
unsigned int close_size[1];
unsigned int far_size[1];
close_size[0] = 0;
far_size[0] = 0;
if (input_queue_length > 0) {
MarkVisit<KernelPolicy, ProblemData, PriorityQueue, Functor>
<<<block_num, KernelPolicy::THREADS>>>(vertex_in, problem,
input_queue_length, node_num);
// MarkNF
MarkNF<KernelPolicy, ProblemData, PriorityQueue, Functor>
<<<block_num, KernelPolicy::THREADS>>>(vertex_in, nf_pile, problem,
input_queue_length, lower_limit,
upper_limit, node_num);
// Scan(near)
// Scan(far)
Scan<mgpu::MgpuScanTypeExc>(
pq->nf_pile[0]->d_valid_near, input_queue_length + 1, 0,
mgpu::plus<VertexId>(), (VertexId *)0, (VertexId *)0,
pq->nf_pile[0]->d_valid_near, context);
Scan<mgpu::MgpuScanTypeExc>(
pq->nf_pile[0]->d_valid_far, input_queue_length + 1, 0,
mgpu::plus<VertexId>(), (VertexId *)0, (VertexId *)0,
pq->nf_pile[0]->d_valid_far, context);
// Compact
Compact<KernelPolicy, ProblemData, PriorityQueue, Functor>
<<<block_num, KernelPolicy::THREADS>>>(vertex_in, nf_pile, pq->selector,
input_queue_length, vertex_out,
0, far_pile_offset, node_num);
// get output_near_length
// get output_far_length
cudaMemcpy(&close_size[0],
pq->nf_pile[0]->d_valid_near + input_queue_length,
sizeof(VertexId), cudaMemcpyDeviceToHost);
cudaMemcpy(&far_size[0], pq->nf_pile[0]->d_valid_far + input_queue_length,
sizeof(VertexId), cudaMemcpyDeviceToHost);
}
// Update near/far length
pq->queue_length = far_pile_offset + far_size[0];
return close_size[0];
}
/**
* @brief Remove redundant elements in a queue
*
* @tparam KernelPolicy Kernel policy type.
* @tparam ProblemData Problem data type.
* @tparam PriorityQueue PriorityQueue data type.
* @tparam Functor Functor type.
*
* @param[in] vertex_in Device pointer of the input vertex IDs
* @param[out] pq PriorityQueue pointer which will be used to store
* the near/far pile after the input vertices is splitted.
* @param[in] input_queue_length Input queue length
* @param[in] problem Problem object which stores user-defined priority
* value
* @param[out] vertex_out Device pointer of the output vertex IDs, will be
* used for any other following operators
* @param[in] far_pile_offset Where to append the newly generated elements in
* the far pile
* @param[in] lower_limit Near pile priority value threshold
* @param[in] upper_limit Far pile priority value threshold
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <typename KernelPolicy, typename ProblemData, typename PriorityQueue,
typename Functor>
unsigned int RemoveInvalid(typename KernelPolicy::VertexId *vertex_in,
PriorityQueue *pq,
typename KernelPolicy::SizeT input_queue_length,
typename ProblemData::DataSlice *problem,
typename KernelPolicy::VertexId *vertex_out,
CudaContext &context,
typename KernelPolicy::SizeT node_num) {
typedef typename KernelPolicy::VertexId VertexId;
typedef typename KernelPolicy::SizeT SizeT;
typename PriorityQueue::NearFarPile *nf_pile = pq->d_nf_pile[0];
int block_num =
(input_queue_length + KernelPolicy::THREADS - 1) / KernelPolicy::THREADS;
unsigned int close_size[1];
close_size[0] = 0;
if (input_queue_length > 0) {
MarkVisit<KernelPolicy, ProblemData, PriorityQueue, Functor>
<<<block_num, KernelPolicy::THREADS>>>(vertex_in, problem,
input_queue_length, node_num);
// MarkValid
MarkValid<KernelPolicy, ProblemData, PriorityQueue, Functor>
<<<block_num, KernelPolicy::THREADS>>>(vertex_in, nf_pile, problem,
input_queue_length, node_num);
// Scan(near)
// Scan(far)
Scan<mgpu::MgpuScanTypeExc>(
pq->nf_pile[0]->d_valid_near, input_queue_length + 1, 0,
mgpu::plus<VertexId>(), (VertexId *)0, (VertexId *)0,
pq->nf_pile[0]->d_valid_near, context);
// Compact2
Compact2<KernelPolicy, ProblemData, PriorityQueue, Functor>
<<<block_num, KernelPolicy::THREADS>>>(vertex_in, nf_pile, pq->selector,
input_queue_length, vertex_out,
0, node_num);
// get output_length
cudaMemcpy(&close_size[0],
pq->nf_pile[0]->d_valid_near + input_queue_length,
sizeof(VertexId), cudaMemcpyDeviceToHost);
}
// Update queue length
return close_size[0];
}
} // namespace priority_queue
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End: | the_stack |
// RegionGrow.cu
// 实现图像的区域生长操作,串行算法 regionGrow_serial,并行 regionGrow_parallel
#include "RegionGrow.h"
#include <iostream>
#include <fstream>
#include <cmath>
using namespace std;
#include "ErrorCode.h"
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 宏:REGIONGROW_INI_IFI
// 定义了一个无穷大
#define REGIONGROW_INI_IFI 0x7fffffff
// Device 子程序: _findRootDev
// 查找根节点标记值算法,根据给定的 label 数组和坐标值
// 返回该坐标对应的根节点坐标值。该函数是为了便于其他 Kernel 函数调用。
static __device__ int // 返回值:根节点标记值
_findRootDev(
int *label, // 输入的标记数组
int idx // 输入点的标记值
);
// Device 子程序: _unionDev
// 合并两个不同像素点以使它们位于同一连通区域中
static __device__ void // Device 程序无返回值
_unionDev(
int *label, // 标记值数组
unsigned char elenum1, // 第一个像素点灰度值
unsigned char elenum2, // 第二个像素点灰度值
int elelabel1, // 第一个像素点标记值
int elelabel2, // 第二个像素点标记值
unsigned char threshold, // 输入生长规则
int *flag // 变换标记,当这两个输入像素点被合并到一个
// 区域后,该标记值将被设为 1。
);
// Kernel 函数: _initLabelPerBlockKer (初始化每个块内像素点的标记值)
// 初始化每个线程块内点的标记值。该过程主要分为两个部分,首先,若当前节点的灰度
// 值为种子点,则标记值设为 -1,否则节点的标记值为其在源图像中的索引值,
// 如对于坐标为 (c, r) 点,其初始标记值为 r * width + c,其中 width 为图像宽;
// 然后,将各点标记值赋值为该点满足阈值关系的八邻域点中的最小标记值。
// 该过程在一个线程块中进行。
static __global__ void // Kernel 函数无返回值
_initLabelPerBlockKer(
ImageCuda inimg, // 输入图像
int *label, // 输入标记数组
unsigned char seed, // 输入种子点标记值
unsigned char threshold // 输入生长规则
);
// Kernel 函数: _mergeBordersKer (合并不同块内像素点的标记值)
// 不同线程块的合并过程。该过程主要合并每两个线程块边界的点,
// 在这里我们主要采用每次合并 4 × 4 个线程块的策略。
static __global__ void // Kernel 函数无返回值
_mergeBordersKer(
ImageCuda inimg, // 输入图像
int *label, // 输入标记数组
int blockw, // 应合并线程块的长度
int blockh, // 应合并线程块的宽度
int threadz_z, // 合并水平方向线程块时,z 向线程最大值
int threadz_y, // 合并竖直方向线程块时,z 向线程最大值
unsigned char threshold // 输入生长规则
);
// Device 子程序:_findRootDev (查找根节点标记值)
static __device__ int _findRootDev(int *label, int idx)
{
// 在 label 数组中查找 idx 下标对应的最小标记值,
// 并将该值作为返回值。
int nexidx;
do {
nexidx = idx;
if (nexidx == -1)
return -1;
idx = label[nexidx];
} while (idx < nexidx);
// 处理完毕,返回根节点标记值。
return idx;
}
// Kernel 函数:_initLabelPerBlockKer (初始化各线程块内像素点的标记值)
static __global__ void _initLabelPerBlockKer(ImageCuda inimg, int *label,
unsigned char seed,
unsigned char threshold)
{
// 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标
// 的 x 和 y 分量 (其中, c 表示 column; r 表示 row)。
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height)
return;
int i, j, k;
// 计算输入坐标点在label数组中对应的数组下标
int idx = r * inimg.imgMeta.width + c;
// 计算输入坐标点对应的图像数据数组下标
int inidx = r * inimg.pitchBytes + c, newidx;
// 计算应申请的 shared memory 的步长
int spitch = blockDim.x + 2;
// 计算当前坐标点在 shared memory 中对应的下标
int localidx = (threadIdx.y + 1) * spitch + threadIdx.x + 1;
// oldlabel 用来记录当前点未经过八邻域判断前的标记值,
// newlabel 用来记录经过一轮判断后当前点的最新标记值,
// 当一个点的 oldlabel 与 newlabel 一致时,当前点对应的标记值为最终标记
// 初始时,每个点的标记值设为其在 shared memory 中的对应下标
int oldlabel, newlabel = localidx;
// curvalue 用来记录当前点的灰度值,newvalue 用来记录其八邻域点的灰度值
unsigned char curvalue, newvalue;
curvalue = inimg.imgMeta.imgData[inidx];
// 若当前点的灰度值与种子点灰度值 seed 相同,则标记值设为 -1。
if (curvalue == seed)
newlabel = -1;
// 共享内存数据区,该部分包含了存储在共享内存中的像素点的标记值。
// 由于未对 Kernel 的尺寸做出假设,这里使用动态申请的 Shared
// Memory(共享内存)。
extern __shared__ int slabel[];
// 共享变量 sflag 数组用来存储是否应停止循环信息。
// 当 sflag[0] 的值为 0 时,表示块内的迭代已经完成。
__shared__ int sflag[1];
// 由于 shared memory 的大小为 (blockDim.x + 2) * (blockDim.y + 2)
// 在这里将 shared memory 的边界点(即 shared memory 中超出线程块的点)
// 的标记值设为无穷大。
if (threadIdx.x == 0)
slabel[localidx - 1] = REGIONGROW_INI_IFI;
if (threadIdx.x == blockDim.x - 1)
slabel[localidx + 1] = REGIONGROW_INI_IFI;
if (threadIdx.y == 0) {
slabel[localidx - spitch] = REGIONGROW_INI_IFI;
if (threadIdx.x == 0)
slabel[localidx - spitch - 1] = REGIONGROW_INI_IFI;
if (threadIdx.x == blockDim.x - 1)
slabel[localidx - spitch + 1] = REGIONGROW_INI_IFI;
}
if (threadIdx.y == blockDim.y - 1) {
slabel[localidx + spitch] = REGIONGROW_INI_IFI;
if (threadIdx.x == 0)
slabel[localidx + spitch - 1] = REGIONGROW_INI_IFI;
if (threadIdx.x == blockDim.x - 1)
slabel[localidx + spitch + 1] = REGIONGROW_INI_IFI;
}
while (1) {
// 将当前点的标记值设为其在 shared memory 中的数组下标
slabel[localidx] = newlabel;
// 将 sflag[0] 标记值设为 0
if ((threadIdx.x | threadIdx.y) == 0)
sflag[0] = 0;
// 初始时,将 newlabel 值赋给 oldlabel
oldlabel = newlabel;
__syncthreads();
// 若当前点灰度值满足生长规则,则在八邻域范围内查找也满足生长规则的点,
// 并将这些点的最小标记值赋予记录在 newlabel 中
if (curvalue > threshold && curvalue != seed) {
for (i = r - 1;i <= r + 1;i++) {
for (j = c - 1;j <= c + 1;j++) {
if (j == c && i == r)
continue;
newidx = i * inimg.pitchBytes + j;
newvalue = inimg.imgMeta.imgData[newidx];
if ((i >= 0 && i < inimg.imgMeta.height
&& j >= 0 && j < inimg.imgMeta.width)
&& newvalue > threshold) {
k = localidx + (i - r) * spitch + j - c;
newlabel = min(newlabel, slabel[k]);
}
}
}
}
__syncthreads();
// 若当前点的 oldlabel 值大于 newlabel 值,
// 表明当前点的标记值不是最终的标记值
// 则将 sflag[0] 值设为 1,来继续进行循环判断,并通过原子操作
// 将 newlabel 与 slabel[oldlabel] 的较小值赋予 slabel[oldlabel]
if (oldlabel > newlabel) {
atomicMin(&slabel[oldlabel], newlabel);
sflag[0] = 1;
}
__syncthreads();
// 当线程块内所有像素点对应的标记值不再改变,
// 即 sflag[0] 的值为 0 时,循环结束。
if (sflag[0] == 0) break;
// 计算 newlabel 对应的根节点标记值,并将该值赋给 newlabel
newlabel = _findRootDev(slabel, newlabel);
__syncthreads();
}
// 将 newlabel 的值转换为其在 label 数组中的数组下标
if (newlabel != -1) {
j = newlabel / spitch;
i = newlabel % spitch;
i += blockIdx.x * blockDim.x - 1;
j += blockIdx.y * blockDim.y - 1;
newlabel = j * inimg.imgMeta.width + i;
}
label[idx] = newlabel;
}
// Device 子程序:_unionDev (合并两个不同像素点以使它们位于同一连通区域中)
static __device__ void _unionDev(
int *label, unsigned char elenum1, unsigned char elenum2,
int label1, int label2, unsigned char threshold, int *flag)
{
int newlabel1, newlabel2;
// 比较两个输入像素点的灰度值是否均大于等于 threshold
if ((elenum1 > threshold && elenum2 > threshold)) {
// 若两个点满足指定条件,则分别计算这两个点的根节点标记值
// 计算第一个点的根节点标记值
newlabel1 = _findRootDev(label, label1);
// 计算第二个点的根节点标记值
newlabel2 = _findRootDev(label, label2);
// 将较小的标记值赋值给另一点在标记数组中的值
// 并将 flag[0] 置为 1
if (newlabel1 > newlabel2) {
// 使用原子操作以保证操作的唯一性与正确性
atomicMin(&label[newlabel1], newlabel2);
flag[0] = 1;
} else if (newlabel2 > newlabel1) {
atomicMin(&label[newlabel2], newlabel1);
flag[0] = 1;
}
}
}
static __global__ void _mergeBordersKer(
ImageCuda inimg, int *label,
int blockw, int blockh,
int threadz_x, int threadz_y, unsigned char threshold)
{
int idx, iterateTimes, i;
int x, y;
int curidx, newidx;
unsigned char curvalue, newvalue;
// 在这里以每次合并 4 * 4 = 16 个线程块的方式合并线程块
// 分别计算待合并线程块在 GRID 中的 x 和 y 向分量
int threadidx_x = blockDim.x * blockIdx.x + threadIdx.x;
int threadidx_y = blockDim.y * blockIdx.y + threadIdx.y;
// 共享数组变量,只含有一个元素,每当有两个像素点合并时,该数组
// 变量值置为 1。
__shared__ int sflag[1];
while (1) {
// 设置 sflag[0] 的值为 0。
if ((threadIdx.x | threadIdx.y | threadIdx.z) == 0)
sflag[0] = 0;
__syncthreads();
// 合并上下相邻线程块的水平方向边界点
// 由于位于 GRID 中最后一行的线程块向下没有待合并的线程块
// 因而这里不处理最后一行的线程块
if ((threadIdx.y < blockDim.y - 1)) {
// 计算为了合并一行线程块的迭代次数
iterateTimes = blockw / threadz_x;
// 计算待合并像素点在源图像中的像素点坐标
x = threadidx_x * blockw + threadIdx.z;
y = threadidx_y * blockh + blockh - 1;
for (i = 0; i < iterateTimes; i++) {
if (threadIdx.z < threadz_x && x < inimg.imgMeta.width &&
y < inimg.imgMeta.height) {
idx = y * inimg.imgMeta.width + x;
// 计算当前像素点灰度值
curidx = y * inimg.pitchBytes + x;
curvalue = inimg.imgMeta.imgData[curidx];
// 计算位于当前像素点下方像素点的灰度值,
// 其坐标值为 (x, y + 1)。
newidx = curidx + inimg.pitchBytes;
newvalue = inimg.imgMeta.imgData[newidx];
// 合并这两个像素点
_unionDev(label, curvalue, newvalue,
idx, idx + inimg.imgMeta.width, threshold, sflag);
// 若当前像素点不为最左侧像素点时,即 x != 0 时,合并
// 位于当前像素点左下方像素点,其坐标值为 (x - 1, y + 1)。
if (x - 1 >= 0) {
newidx -= 1;
newvalue = inimg.imgMeta.imgData[newidx];
_unionDev(label, curvalue, newvalue,
idx, idx + inimg.imgMeta.width - 1,
threshold, sflag);
}
// 若当前像素点不为最右侧像素点时,x != inimg.imgMeta.width
// 时,合并位于当前像素点右下方像素点,其坐标值为
// (x + 1, y + 1)。
if (x + 1 < inimg.imgMeta.width) {
newidx += 2;
newvalue = inimg.imgMeta.imgData[newidx];
_unionDev(label, curvalue, newvalue,
idx, idx + inimg.imgMeta.width + 1,
threshold, sflag);
}
}
// 计算下次迭代的起始像素点的 x 坐标
x += threadz_x;
}
}
// 合并左右相邻线程块的竖直方向边界点
// 由于位于 GRID 中最后一列的线程块向右没有待合并的线程块
// 因而这里不处理最后一列的线程块
if ((threadIdx.x < blockDim.x - 1)) {
// 计算为了合并一列线程块的迭代次数
iterateTimes = blockh / threadz_y;
// 计算待合并像素点在源图像中的像素点坐标,
// 由于处理的是每个线程块的最右一列像素点,
// 因此 x 坐标值因在原基础上加上线程块宽度 - 1
x = threadidx_x * blockw + blockw - 1;
y = threadidx_y * blockh + threadIdx.z;
for (i = 0;i < iterateTimes;i++) {
if (threadIdx.z < threadz_y && x < inimg.imgMeta.width &&
y < inimg.imgMeta.height) {
idx = y * inimg.imgMeta.width + x;
// 计算当前像素点灰度值
curidx = y * inimg.pitchBytes + x;
curvalue = inimg.imgMeta.imgData[curidx];
// 计算位于当前像素点右侧像素点的灰度值,
// 其坐标值为 (x + 1, y)。
newidx = curidx + 1;
newvalue = inimg.imgMeta.imgData[newidx];
// 合并这两个像素点
_unionDev(label, curvalue, newvalue,
idx, idx + 1, threshold, sflag);
// 若当前像素点不为最上侧像素点时,即 y != 0 时,合并
// 位于当前像素点右上方像素点,其坐标值为 (x + 1, y - 1)。
if (y - 1 >= 0) {
newidx -= inimg.pitchBytes;
newvalue = inimg.imgMeta.imgData[newidx];
_unionDev(label, curvalue, newvalue, idx,
idx - inimg.imgMeta.width + 1,
threshold, sflag);
}
// 若当前像素点不为最下侧像素点时,y != inimg.imgMeta.height
// 时,合并位于当前像素点右下方像素点,其坐标值为
// (x + 1, y + 1)。
if (y + 1 < inimg.imgMeta.height) {
newidx = curidx + inimg.pitchBytes + 1;
newvalue = inimg.imgMeta.imgData[newidx];
_unionDev(label, curvalue, newvalue,
idx, idx + inimg.imgMeta.width + 1,
threshold, sflag);
}
}
// 计算下次迭代的起始像素点的 y 坐标
y += threadz_y;
}
}
__syncthreads();
if (sflag[0] == 0) break;
}
}
static __global__ void _markFinalLabelKer(
ImageCuda outimg, int *label)
{
// 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标
// 的 x 和 y 分量 (其中, c 表示 column; r 表示 row)。
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (c >= outimg.imgMeta.width || r >= outimg.imgMeta.height)
return;
// 计算输入坐标点在label数组中对应的数组下标
int inidx = r * outimg.imgMeta.width + c;
// 计算输入坐标点在图像数组中对应的数组下标
int outidx = r * outimg.pitchBytes + c;
label[inidx] = _findRootDev(label, label[inidx]);
outimg.imgMeta.imgData[outidx] = 0;
if (label[inidx] == -1)
outimg.imgMeta.imgData[outidx] = 255;
}
// Host 成员方法:regionGrow_parallel
__host__ int RegionGrow::regionGrow_parallel(Image *inimg, Image *outimg)
{
// 检查输入输出图像是否为 NULL,如果为 NULL 直接报错返回。
if (inimg == NULL || outimg == NULL)
return NULL_POINTER;
// 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为
// 输入和输出图像准备内存空间,以便盛放数据。
int errcode; // 局部变量,错误码
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 将输出图像拷贝入 Device 内存。
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
if (errcode != NO_ERROR) {
// 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图
// 像的 ROI 子图像尺寸相同的图像。
errcode = ImageBasicOp::makeAtCurrentDevice(
outimg, inimg->roiX2 - inimg->roiX1,
inimg->roiY2 - inimg->roiY1);
// 如果创建图像也操作失败,则说明操作彻底失败,报错退出。
if (errcode != NO_ERROR)
return errcode;
}
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 提取输出图像的 ROI 子图像。
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一
if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width)
insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width;
else
outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width;
if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height)
insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height;
else
outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y;
// 计算初始化块内内存时,共享内存的大小。
int smsize = sizeof (int) * (blocksize.x + 2) * (blocksize.y + 2);
// 计算各标记数组的存储数据大小
int data_size = insubimgCud.imgMeta.width * insubimgCud.imgMeta.height *
sizeof (int);
// 存储最终标记值的数组,其大小与输入图像大小一致
int *devLabel;
cudaError_t cudaerrcode;
// 为标记数组分配大小。
cudaerrcode = cudaMalloc((void **)&devLabel, data_size);
if (cudaerrcode != cudaSuccess) {
cudaFree(devLabel);
return cudaerrcode;
}
// 调用核函数,初始化每个线程块内标记值
_initLabelPerBlockKer<<<gridsize, blocksize, smsize>>>(
insubimgCud, devLabel, seed, threshold);
// 合并线程块时每次合并线程块的长、宽和高
int blockw, blockh, blockz;
// 计算第一次合并时,应合并线程块的长、宽和高
// 第一次合并时,应合并线程块的长应为初始线程块长,宽为初始线程块宽
blockw = blocksize.x;
blockh = blocksize.y;
// 由于这里采用的是 3 维线程块,线程块的高设为初始线程块长和宽的较大者。
blockz = blockw;
if (blockw < blockh)
blockz = blockh;
// 计算每次合并的线程块个数,在这里我们采用的是每次合并 4 × 4 的线程块,
// 由于采用这种方式合并所需的迭代次数最少。
int xtiles = 4, ytiles = 4;
// 计算合并线程块前 GRID 的长
int tilesizex = gridsize.x;
// 计算合并线程块前 GRID 的宽
int tilesizey = gridsize.y;
// 定义为进行线程块合并而采用的线程块与网格。
dim3 blockformerge, gridformerge;
// 由于每个线程块的大小限制为 1024,而 tilesizex * tilesizey * blockz 的值
// 为每次用来进行合并操作的三维线程块的最大大小,因此当该值不大于 1024 时,
// 可将所有线程块放在一个三维线程块中合并,这样,我们就可以以该值是否
// 不大于 1024 来作为是否终止循环的判断条件。
while (tilesizex * tilesizey * blockz > 1024) {
// 计算每次合并线程块时 GRID 的长,这里采用向上取整的方式
tilesizex = (tilesizex - 1) / xtiles + 1;
// 计算每次合并线程块时 GRID 的宽,这里采用向上取整的方式
tilesizey = (tilesizey - 1) / ytiles + 1;
// 设置为了合并而采用的三维线程块大小,这里采用的是 4 × 4 的方式,
// 因此线程块的长为 4,宽也为 4,高则为 32。
blockformerge.x = xtiles; blockformerge.y = ytiles;
blockformerge.z = blockz;
// 设置为了合并而采用的二维网格的大小。
gridformerge.x = tilesizex; gridformerge.y = tilesizey;
gridformerge.z = 1;
// 调用核函数,每次合并4 × 4 个线程块内的标记值
_mergeBordersKer<<<gridformerge, blockformerge>>>(
insubimgCud, devLabel, blockw, blockh,
blocksize.x, blocksize.y, threshold);
// 在每次迭代后,修改应合并线程块的长和宽,因为每次合并 4 * 4 个线程块,
// 因此,经过迭代后,应合并线程块的长和宽应分别乘 4。
blockw *= xtiles;
blockh *= ytiles;
}
// 进行最后一轮线程块的合并
// 计算该轮应采用的三维线程块大小
blockformerge.x = tilesizex; blockformerge.y = tilesizey;
blockformerge.z = blockz;
// 设置该论应采用的网格大小,长宽高分别为1。
gridformerge.x = 1; gridformerge.y = 1;gridformerge.z = 1;
// 调用核函数,进行最后一轮线程块合并
_mergeBordersKer<<<gridformerge, blockformerge>>>(
insubimgCud, devLabel, blockw, blockh,
blocksize.x, blocksize.y, threshold);
// 调用核函数,将最终标记值传到输出图像中
_markFinalLabelKer<<<gridsize, blocksize>>>(
outsubimgCud, devLabel);
// 释放已分配的数组内存,避免内存泄露
cudaFree(devLabel);
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 处理完毕,退出。
return NO_ERROR;
}
/*__host__ int RegionGrow::regionGrow_serial(Image *inimg, Image *outimg)
{
// 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。
if (inimg == NULL || outimg == NULL)
return NULL_POINTER;
static int nDx[] = {-1, 0, 1, -1, 1, -1, 0, 1};
static int nDy[] = {-1, -1, -1, 0, 0, 1, 1, 1};
// 定义堆栈,存储坐标
int * pnGrowQueX ;
int * pnGrowQueY ;
int * pUnRegion;
pUnRegion = new int [inimg->width * inimg->height];
// 分配空间
pnGrowQueX = new int [inimg->width * inimg->height];
pnGrowQueY = new int [inimg->width * inimg->height];
// 定义堆栈的起点和终点
// 当nStart=nEnd, 表示堆栈中只有一个点
int nStart = 0;
int nEnd = 0;
int index;
int nCurrX, nCurrY, xx, yy;
int originFlag = 1;
memset(pUnRegion,0,sizeof(int)*inimg->width*inimg->height);
int i, j;
for (i = 0; i < inimg->width; i++)
for (j = 0; j < inimg->height; j++) {
index = j * inimg->width + i;
outimg->imgData[index] = 0;
}
for (int i = 0; i < inimg->width; i++)
for (int j = 0; j < inimg->height; j++)
{
index = j * inimg->width + i;
if (inimg->imgData[index] == seed) {
// 把种子点的坐标压入栈
outimg->imgData[index] = seed;
nStart = nEnd = 0;
pnGrowQueX[nEnd] = i;
pnGrowQueY[nEnd] = j;
originFlag++;
while (nStart <= nEnd) {
nCurrX = pnGrowQueX[nStart];
nCurrY = pnGrowQueY[nStart];
for (int k = 0;k < 8;k++) {
// 4邻域象素的坐标
xx = nCurrX + nDx[k];
yy = nCurrY + nDy[k];
// 判断象素(xx,yy) 是否在图像内部
// 判断象素(xx,yy) 是否已经处理过
if (xx >= 0 && xx < inimg->width
&& yy >= 0 && yy < inimg->height
&& pUnRegion[yy * inimg->width + xx] != originFlag
&& inimg->imgData[yy * inimg->width + xx] > threshold) {
// 堆栈的尾部指针后移一位
nEnd++;
// 象素(xx,yy) 压入栈
pnGrowQueX[nEnd] = xx;
pnGrowQueY[nEnd] = yy;
// 把象素(xx,yy)设置成逻辑()
// 同时也表明该象素处理过
pUnRegion[yy * inimg->width + xx] = originFlag;
outimg->imgData[yy * outimg->width + xx] = seed;
}
}
nStart++;
}
}
}
// 释放内存
delete []pnGrowQueX;
delete []pnGrowQueY;
delete []pUnRegion;
pnGrowQueX = NULL ;
pnGrowQueY = NULL ;
return NO_ERROR;
}*/
__host__ int RegionGrow::regionGrow_serial(Image *inimg, Image *outimg)
{
// 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。
if (inimg == NULL || outimg == NULL)
return NULL_POINTER;
static int nDx[] = {-1, 0, 1, -1, 1, -1, 0, 1};
static int nDy[] = {-1, -1, -1, 0, 0, 1, 1, 1};
// 定义堆栈,存储坐标
int * pnGrowQueX ;
int * pnGrowQueY ;
int * pUnRegion;
pUnRegion = new int [inimg->width * inimg->height];
// 分配空间
pnGrowQueX = new int [inimg->width * inimg->height];
pnGrowQueY = new int [inimg->width * inimg->height];
// 定义堆栈的起点和终点
// 当nStart=nEnd, 表示堆栈中只有一个点
int nStart = 0;
int nEnd = 0;
int index;
int nCurrX, nCurrY, xx, yy;
memset(pUnRegion,0,sizeof(int)*inimg->width*inimg->height);
int i, j;
for (i = 0; i < inimg->width; i++)
for (j = 0; j < inimg->height; j++) {
index = j * inimg->width + i;
outimg->imgData[index] = 0;
}
for (int i = 0; i < inimg->width; i++)
for (int j = 0; j < inimg->height; j++)
{
index = j * inimg->width + i;
if (inimg->imgData[index] == seed && pUnRegion[index] == 0) {
// 把种子点的坐标压入栈
outimg->imgData[index] = seed;
nStart = nEnd = 0;
pnGrowQueX[nEnd] = i;
pnGrowQueY[nEnd] = j;
while (nStart <= nEnd) {
nCurrX = pnGrowQueX[nStart];
nCurrY = pnGrowQueY[nStart];
for (int k = 0;k < 8;k++) {
// 4邻域象素的坐标
xx = nCurrX + nDx[k];
yy = nCurrY + nDy[k];
// 判断象素(xx,yy) 是否在图像内部
// 判断象素(xx,yy) 是否已经处理过
if (xx >= 0 && xx < inimg->width
&& yy >= 0 && yy < inimg->height
&& pUnRegion[yy * inimg->width + xx] == 0
&& inimg->imgData[yy * inimg->width + xx] > threshold) {
// 堆栈的尾部指针后移一位
nEnd++;
// 象素(xx,yy) 压入栈
pnGrowQueX[nEnd] = xx;
pnGrowQueY[nEnd] = yy;
// 把象素(xx,yy)设置成逻辑()
// 同时也表明该象素处理过
pUnRegion[yy * inimg->width + xx] = 1;
outimg->imgData[yy * outimg->width + xx] = seed;
}
}
nStart++;
}
}
}
// 释放内存
delete []pnGrowQueX;
delete []pnGrowQueY;
delete []pUnRegion;
pnGrowQueX = NULL ;
pnGrowQueY = NULL ;
return NO_ERROR;
} | the_stack |
#include <numeric>
#include "cupoch/geometry/intersection_test.h"
#include "cupoch/geometry/occupancygrid.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/geometry/trianglemesh.h"
#include "cupoch/geometry/voxelgrid.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/helper.h"
#include "cupoch/utility/platform.h"
using namespace cupoch;
using namespace cupoch::geometry;
namespace {
struct create_dense_functor {
create_dense_functor(int num_h, int num_d) : num_h_(num_h), num_d_(num_d){};
const int num_h_;
const int num_d_;
__device__ thrust::tuple<Eigen::Vector3i, Voxel> operator()(
size_t idx) const {
int widx = idx / (num_h_ * num_d_);
int hdidx = idx % (num_h_ * num_d_);
int hidx = hdidx / num_d_;
int didx = hdidx % num_d_;
Eigen::Vector3i grid_index(widx, hidx, didx);
return thrust::make_tuple(grid_index, geometry::Voxel(grid_index));
}
};
struct create_from_pointcloud_functor {
create_from_pointcloud_functor(const Eigen::Vector3f &min_bound,
float voxel_size,
bool has_colors)
: min_bound_(min_bound),
voxel_size_(voxel_size),
has_colors_(has_colors){};
const Eigen::Vector3f min_bound_;
const float voxel_size_;
const bool has_colors_;
__device__ thrust::tuple<Eigen::Vector3i, geometry::Voxel> operator()(
const Eigen::Vector3f &point, const Eigen::Vector3f &color) const {
Eigen::Vector3f ref_coord = (point - min_bound_) / voxel_size_;
Eigen::Vector3i voxel_index =
Eigen::device_vectorize<float, 3, ::floor>(ref_coord)
.cast<int>();
return thrust::make_tuple(
voxel_index, (has_colors_) ? geometry::Voxel(voxel_index, color)
: geometry::Voxel(voxel_index));
}
};
struct create_from_trianglemesh_functor {
create_from_trianglemesh_functor(const Eigen::Vector3f *vertices,
const Eigen::Vector3i *triangles,
int n_triangles,
const Eigen::Vector3f &min_bound,
float voxel_size,
int num_h,
int num_d)
: vertices_(vertices),
triangles_(triangles),
n_triangles_(n_triangles),
min_bound_(min_bound),
voxel_size_(voxel_size),
box_half_size_(Eigen::Vector3f(
voxel_size / 2, voxel_size / 2, voxel_size / 2)),
num_h_(num_h),
num_d_(num_d){};
const Eigen::Vector3f *vertices_;
const Eigen::Vector3i *triangles_;
const int n_triangles_;
const Eigen::Vector3f min_bound_;
const float voxel_size_;
const Eigen::Vector3f box_half_size_;
const int num_h_;
const int num_d_;
__device__ thrust::tuple<Eigen::Vector3i, geometry::Voxel> operator()(
size_t idx) const {
int widx = idx / (num_h_ * num_d_);
int hdidx = idx % (num_h_ * num_d_);
int hidx = hdidx / num_d_;
int didx = hdidx % num_d_;
const Eigen::Vector3f box_center =
min_bound_ + Eigen::Vector3f(widx, hidx, didx) * voxel_size_;
for (int i = 0; i < n_triangles_; ++i) {
Eigen::Vector3i tri = triangles_[i];
const Eigen::Vector3f &v0 = vertices_[tri(0)];
const Eigen::Vector3f &v1 = vertices_[tri(1)];
const Eigen::Vector3f &v2 = vertices_[tri(2)];
if (intersection_test::TriangleAABB(box_center, box_half_size_, v0,
v1, v2)) {
Eigen::Vector3i grid_index(widx, hidx, didx);
return thrust::make_tuple(grid_index,
geometry::Voxel(grid_index));
}
}
return thrust::make_tuple(
Eigen::Vector3i(INVALID_VOXEL_INDEX, INVALID_VOXEL_INDEX,
INVALID_VOXEL_INDEX),
geometry::Voxel());
}
};
} // namespace
std::shared_ptr<VoxelGrid> VoxelGrid::CreateDense(const Eigen::Vector3f &origin,
float voxel_size,
float width,
float height,
float depth) {
auto output = std::make_shared<VoxelGrid>();
int num_w = int(std::round(width / voxel_size));
int num_h = int(std::round(height / voxel_size));
int num_d = int(std::round(depth / voxel_size));
output->origin_ = origin;
output->voxel_size_ = voxel_size;
int n_total = num_w * num_h * num_d;
resize_all(n_total, output->voxels_keys_, output->voxels_values_);
create_dense_functor func(num_h, num_d);
thrust::transform(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(n_total),
make_tuple_begin(output->voxels_keys_, output->voxels_values_),
func);
thrust::sort_by_key(
utility::exec_policy(0)->on(0), output->voxels_keys_.begin(),
output->voxels_keys_.end(), output->voxels_values_.begin());
auto end = thrust::unique_by_key(
utility::exec_policy(0)->on(0), output->voxels_keys_.begin(),
output->voxels_keys_.end(), output->voxels_values_.begin());
resize_all(thrust::distance(output->voxels_keys_.begin(), end.first),
output->voxels_keys_, output->voxels_values_);
return output;
}
std::shared_ptr<VoxelGrid> VoxelGrid::CreateFromPointCloudWithinBounds(
const PointCloud &input,
float voxel_size,
const Eigen::Vector3f &min_bound,
const Eigen::Vector3f &max_bound) {
auto output = std::make_shared<VoxelGrid>();
if (voxel_size <= 0.0) {
utility::LogError("[VoxelGridFromPointCloud] voxel_size <= 0.");
}
if (voxel_size * std::numeric_limits<int>::max() <
(max_bound - min_bound).maxCoeff()) {
utility::LogError("[VoxelGridFromPointCloud] voxel_size is too small.");
}
output->voxel_size_ = voxel_size;
output->origin_ = min_bound;
utility::device_vector<Eigen::Vector3i> voxels_keys(input.points_.size());
utility::device_vector<geometry::Voxel> voxels_values(input.points_.size());
bool has_colors = input.HasColors();
create_from_pointcloud_functor func(min_bound, voxel_size, has_colors);
if (!has_colors) {
thrust::transform(
input.points_.begin(), input.points_.end(),
thrust::make_constant_iterator(Eigen::Vector3f(0.0, 0.0, 0.0)),
make_tuple_begin(voxels_keys, voxels_values), func);
} else {
thrust::transform(input.points_.begin(), input.points_.end(),
input.colors_.begin(),
make_tuple_begin(voxels_keys, voxels_values), func);
}
thrust::sort_by_key(utility::exec_policy(0)->on(0), voxels_keys.begin(),
voxels_keys.end(), voxels_values.begin());
utility::device_vector<int> counts(voxels_keys.size());
resize_all(voxels_keys.size(), output->voxels_keys_,
output->voxels_values_);
auto end = thrust::reduce_by_key(
utility::exec_policy(0)->on(0), voxels_keys.begin(),
voxels_keys.end(),
make_tuple_iterator(voxels_values.begin(),
thrust::make_constant_iterator(1)),
output->voxels_keys_.begin(),
make_tuple_begin(output->voxels_values_, counts),
thrust::equal_to<Eigen::Vector3i>(), add_voxel_color_functor());
resize_all(thrust::distance(output->voxels_keys_.begin(), end.first),
output->voxels_keys_, output->voxels_values_);
thrust::transform(output->voxels_values_.begin(),
output->voxels_values_.end(), counts.begin(),
output->voxels_values_.begin(),
devide_voxel_color_functor());
utility::LogDebug(
"Pointcloud is voxelized from {:d} points to {:d} voxels.",
(int)input.points_.size(), (int)output->voxels_keys_.size());
return output;
}
std::shared_ptr<VoxelGrid> VoxelGrid::CreateFromPointCloud(
const PointCloud &input, float voxel_size) {
Eigen::Vector3f voxel_size3(voxel_size, voxel_size, voxel_size);
Eigen::Vector3f min_bound = input.GetMinBound() - voxel_size3 * 0.5;
Eigen::Vector3f max_bound = input.GetMaxBound() + voxel_size3 * 0.5;
return CreateFromPointCloudWithinBounds(input, voxel_size, min_bound,
max_bound);
}
std::shared_ptr<VoxelGrid> VoxelGrid::CreateFromTriangleMeshWithinBounds(
const TriangleMesh &input,
float voxel_size,
const Eigen::Vector3f &min_bound,
const Eigen::Vector3f &max_bound) {
auto output = std::make_shared<VoxelGrid>();
if (voxel_size <= 0.0) {
utility::LogError("[CreateFromTriangleMesh] voxel_size <= 0.");
}
if (voxel_size * std::numeric_limits<int>::max() <
(max_bound - min_bound).maxCoeff()) {
utility::LogError("[CreateFromTriangleMesh] voxel_size is too small.");
}
output->voxel_size_ = voxel_size;
output->origin_ = min_bound;
Eigen::Vector3f grid_size = max_bound - min_bound;
int num_w = int(std::round(grid_size(0) / voxel_size));
int num_h = int(std::round(grid_size(1) / voxel_size));
int num_d = int(std::round(grid_size(2) / voxel_size));
size_t n_total = num_w * num_h * num_d;
create_from_trianglemesh_functor func(
thrust::raw_pointer_cast(input.vertices_.data()),
thrust::raw_pointer_cast(input.triangles_.data()),
input.triangles_.size(), min_bound, voxel_size, num_h, num_d);
resize_all(n_total, output->voxels_keys_, output->voxels_values_);
thrust::transform(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_total),
make_tuple_begin(output->voxels_keys_, output->voxels_values_),
func);
auto check_fn =
[] __device__(
const thrust::tuple<Eigen::Vector3i, geometry::Voxel> &x)
-> bool {
Eigen::Vector3i idxs = thrust::get<0>(x);
return idxs == Eigen::Vector3i(INVALID_VOXEL_INDEX, INVALID_VOXEL_INDEX,
INVALID_VOXEL_INDEX);
};
remove_if_vectors(utility::exec_policy(0)->on(0), check_fn,
output->voxels_keys_, output->voxels_values_);
return output;
}
std::shared_ptr<VoxelGrid> VoxelGrid::CreateFromTriangleMesh(
const TriangleMesh &input, float voxel_size) {
Eigen::Vector3f voxel_size3(voxel_size, voxel_size, voxel_size);
Eigen::Vector3f min_bound = input.GetMinBound() - voxel_size3 * 0.5;
Eigen::Vector3f max_bound = input.GetMaxBound() + voxel_size3 * 0.5;
return CreateFromTriangleMeshWithinBounds(input, voxel_size, min_bound,
max_bound);
}
std::shared_ptr<VoxelGrid> VoxelGrid::CreateFromOccupancyGrid(
const OccupancyGrid &input) {
auto output = std::make_shared<VoxelGrid>();
if (input.voxel_size_ <= 0.0) {
utility::LogError(
"[CreateFromOccupancyGrid] occupancy grid voxel_size <= 0.");
}
output->voxel_size_ = input.voxel_size_;
output->origin_ = input.origin_;
std::shared_ptr<utility::device_vector<OccupancyVoxel>> occvoxels =
input.ExtractOccupiedVoxels();
output->voxels_keys_.resize(occvoxels->size());
output->voxels_values_.resize(occvoxels->size());
thrust::transform(
occvoxels->begin(), occvoxels->end(),
make_tuple_begin(output->voxels_keys_, output->voxels_values_),
[] __device__(const OccupancyVoxel &voxel) {
return thrust::make_tuple(
voxel.grid_index_.cast<int>(),
Voxel(voxel.grid_index_.cast<int>(), voxel.color_));
});
return output;
} | the_stack |
namespace anakin{
namespace saber{
template <typename dtype, int thread_number>
__global__ void group_normalize_kernel(const dtype* in_data, const dtype* scale,
const dtype* bias, int n, int c, int h, int w, int group,
int group_size, float eps, dtype* out_data, dtype* out_mean,
dtype* out_var){
__shared__ dtype block_sums[thread_number];
__shared__ dtype block_squares[thread_number];
int group_index = blockIdx.x;
int thread_index = threadIdx.x;
block_squares[thread_index] = 0;
block_sums[thread_index] = 0;
__syncthreads();
int batch_index = group_index / group;
int inner_group_index = group_index % group;
int real_channel = (c - inner_group_index * group_size) >= group_size ?
group_size : c - inner_group_index * group_size;
int compute_size = real_channel * w * h;
int group_start_ind = inner_group_index * group_size + batch_index * c;
int group_start_num = group_start_ind * h * w;
for (int i = thread_index; i < compute_size; i += thread_number){
block_sums[thread_index] += in_data[group_start_num + i];
block_squares[thread_index] += in_data[group_start_num + i] * in_data[group_start_num + i];
}
__syncthreads();
//reduce
int activate = thread_number / 2;
//this assume thread number be 2^n
while (activate >= 64){
if (thread_index < activate){
block_sums[thread_index] += block_sums[thread_index + activate];
block_squares[thread_index] += block_squares[thread_index + activate];
}
__syncthreads();
activate >>= 1;
}
if (activate >= 32){
if (thread_index < 32){
block_sums[thread_index] += block_sums[thread_index + 32];
block_squares[thread_index] += block_squares[thread_index + 32];
}
}
if (activate >= 16){
if (thread_index < 16){
block_sums[thread_index] += block_sums[thread_index + 16];
block_squares[thread_index] += block_squares[thread_index + 16];
}
}
if (activate >= 8){
if (thread_index < 8){
block_sums[thread_index] += block_sums[thread_index + 8];
block_squares[thread_index] += block_squares[thread_index + 8];
}
}
if (activate >= 4){
if (thread_index < 4){
block_sums[thread_index] += block_sums[thread_index + 4];
block_squares[thread_index] += block_squares[thread_index + 4];
}
}
if (activate >= 2){
if (thread_index < 2){
block_sums[thread_index] += block_sums[thread_index + 2];
block_squares[thread_index] += block_squares[thread_index + 2];
}
}
if (activate >= 1){
if (thread_index < 1){
block_sums[thread_index] += block_sums[thread_index + 1];
block_squares[thread_index] += block_squares[thread_index + 1];
}
}
dtype group_mean = block_sums[0] / compute_size;
dtype group_var = block_squares[0] / compute_size - group_mean * group_mean;
dtype group_var_inv = 1 / sqrt(group_var + eps);
for (int i = thread_index; i < compute_size; i += thread_number){
int c_index = i / (h * w);
dtype dest_val = (in_data[group_start_num + i] - group_mean) * group_var_inv;
if (scale){
dest_val *= scale[group_start_ind + c_index];
}
if (bias){
dest_val *= bias[group_start_ind + c_index];
}
out_data[group_start_num + i] = dest_val;
}
if (out_mean){
out_mean[group_index] = group_mean;
}
if (out_var){
out_var[group_index] = group_var;
}
}
template <typename Dtype, bool has_scale, bool shared>
__global__ void normalize_kernel_no_across_spatial(const int size_in_channel, const int n,\
const int channels,const Dtype* scale, const Dtype* bottom_data, Dtype* top_data, const float eps, const int p){
CUDA_KERNEL_LOOP(index, size_in_channel*n){
float sqr_sum = 0.f;
int num_index=index/size_in_channel;
int index_in_channel=index%size_in_channel;
int data_index=num_index*channels*size_in_channel+index_in_channel;
for (int i = 0; i < channels; ++i) {
if (p == 1) {
sqr_sum += fabsf(bottom_data[data_index + i * size_in_channel]);
} else {
sqr_sum += bottom_data[data_index + i * size_in_channel] * \
bottom_data[data_index + i * size_in_channel];
}
}
float norm;
if (p == 1) {
norm = 1.f / (sqr_sum + eps);
} else {
norm = 1.f / sqrtf(sqr_sum+ eps);
}
for (int i = 0; i < channels; ++i) {
if (has_scale) {
if (shared) {
top_data[data_index + i * size_in_channel] = \
bottom_data[data_index + i * size_in_channel] * scale[0]*norm;
} else {
top_data[data_index + i * size_in_channel] = \
bottom_data[data_index + i * size_in_channel] * scale[i]*norm;
}
} else {
top_data[data_index + i * size_in_channel] = \
bottom_data[data_index + i * size_in_channel] * norm;
}
}
}
}
template <typename dtype>
__global__ void gpu_pow_reverse(int n, \
const dtype* src, dtype* dst, dtype alpha, dtype eps) {
CUDA_KERNEL_LOOP(idx, n) {
dst[idx] = 1 / (pow(src[idx], alpha) + eps);
}
}
template <unsigned int blockSize, typename dtype >
__global__ void reduce_add_atomic(int total_size, int p, int inner_size, \
const dtype* src, dtype* dst) {
extern __shared__ dtype sdata[];
int tid = threadIdx.x;
int i = blockIdx.y * inner_size + blockIdx.x * blockSize + tid;
int idx_limit = (blockIdx.y + 1) * inner_size;
//int gridSize = blockSize * 2 * gridDim.y;
#if 0
dtype sum = 0;
while (i < total_size) {
sum += src[i] * src[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (i + blockSize < total_size) {
sum += src[i + blockSize] * src[i + blockSize];
}
i += gridSize;
}
#endif
//! L1 norm
if (p == 1) {
sdata[tid] = i < idx_limit ? fabsf(src[i]) : 0;
} else {
//! L2 norm
sdata[tid] = i < idx_limit ? src[i] * src[i] : 0;
}
__syncthreads();
if (blockSize >= 1024) {
if (tid < 512) {
sdata[tid] += sdata[tid + 512];
}
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) {
sdata[tid] += sdata[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
sdata[tid] += sdata[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
sdata[tid] += sdata[tid + 64];
}
__syncthreads();
}
#if 0
dtype sum = sdata[tid];
__syncthreads();
if ( tid < 32 ) {
//! Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) {
sum += sdata[tid + 32];
}
//! Reduce final warp using shuffle
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
sum += __shfl_down(sum, offset);
}
//! write result for this block to global mem
if (tid == 0) {
atomicAdd(dst + blockIdx.y, sum);
}
}
#endif
#if 1
if ( tid < 32 ) {
volatile dtype *vsum = sdata;
if (blockSize >= 64) {
vsum[tid] += vsum[tid + 32];
}
if (blockSize >= 32) {
vsum[tid] += vsum[tid + 16];
}
if (blockSize >= 16) {
vsum[tid] += vsum[tid + 8];
}
if (blockSize >= 8) {
vsum[tid] += vsum[tid + 4];
}
if (blockSize >= 4) {
vsum[tid] += vsum[tid + 2];
}
if (blockSize >= 2) {
vsum[tid] += vsum[tid + 1];
}
//! write result for this block to global mem
if (tid == 0) {
atomicAdd(dst + blockIdx.y, *vsum);
}
}
#endif
};
//! normalize with scale
template <typename dtype, bool channel_shared>
__global__ void normalize_with_scale_kernel(
int n, int inner_size, int channel_stride, int channel_size, \
const dtype* norm, const dtype* scale, \
const dtype* src, dtype* dst) {
CUDA_KERNEL_LOOP(idx, n) {
int outer_idx = idx / inner_size;
if (channel_shared) {
dst[idx] = src[idx] * norm[outer_idx] * scale[0];
} else {
int channel = (idx / channel_stride) % channel_size;
dst[idx] = src[idx] * norm[outer_idx] * scale[channel];
//printf("channel: %d, scale: %.2f\n", channel, scale[channel]);
}
}
}
//! normalize without scale
template <typename dtype>
__global__ void normalize_kernel(int n, int inner_size, \
const dtype* norm, const dtype* src, dtype* dst) {
CUDA_KERNEL_LOOP(idx, n) {
int outer_idx = idx / inner_size;
dst[idx] = src[idx] * norm[outer_idx];
}
}
//! normalize with scale
template <typename dtype, bool channel_shared>
__global__ void normalize_with_scale_compute_norm_kernel(
int n, int inner_size, int channel_stride, int channel_size, \
const dtype* norm, const dtype* scale, \
const dtype* src, dtype* dst) {
__shared__ dtype sdata[1];
int tid = threadIdx.x;
int i = blockIdx.y * inner_size + blockIdx.x * blockDim.x + tid;
int idx_limit = (blockIdx.y + 1) * inner_size;
if(tid == 0) {
sdata[0] = 1 / (sqrtf(norm[blockIdx.y] / inner_size) + 1e-6f);
}
__syncthreads();
if (channel_shared) {
if (i < idx_limit) {
dst[i] = src[i] * sdata[0] * scale[0];
}
} else {
if (i < idx_limit) {
int channel = (i / channel_stride) % channel_size;
dst[i] = src[i] * sdata[0] * scale[channel];
}
}
}
//! normalize without scale
template <typename dtype>
__global__ void normalize_compute_norm_kernel(int n, int inner_size, \
const dtype* norm, const dtype* src, dtype* dst) {
__shared__ dtype sdata[1];
int tid = threadIdx.x;
int i = blockIdx.y * inner_size + blockIdx.x * blockDim.x + tid;
int idx_limit = (blockIdx.y + 1) * inner_size;
if(tid == 0) {
sdata[0] = 1 / (sqrtf(norm[blockIdx.y] / inner_size) + 1e-6f);
}
__syncthreads();
if (i < idx_limit) {
dst[i] = src[i] * sdata[0];
}
}
template <>
SaberStatus SaberNormalize<NV, AK_FLOAT>::dispatch(\
const std::vector<DataTensor_in*>& inputs, \
std::vector<DataTensor_out*>& outputs, \
NormalizeParam<NV> ¶m) {
cudaStream_t stream = this->_ctx->get_compute_stream();
const float* src = static_cast<float*>(inputs[0]->data());
float* dst = static_cast<float*>(outputs[0]->mutable_data());
const float eps = param.eps;
int n = inputs[0] -> num();
int c = inputs[0] -> channel();
int h = inputs[0] -> height();
int w = inputs[0] -> width();
if (param.group > 0){
float* scale = nullptr;
float* bias = nullptr;
float* out_mean = nullptr;
float* out_var = nullptr;
int group_size = (c - 1) / param.group + 1;
if (param.has_scale){
scale = static_cast<float*>(param.scale->data());
}
if (param.has_bias){
bias = static_cast<float*>(param.bias->data());
}
if (outputs.size() > 1){
out_mean = static_cast<float*>(outputs[1]->data());
}
if (outputs.size() > 2){
out_var = static_cast<float*>(outputs[2]->data());
}
int blocks = n * param.group;
group_normalize_kernel<float, CUDA_NUM_THREADS>
<<<blocks, CUDA_NUM_THREADS, 2 * CUDA_NUM_THREADS * sizeof(float), stream>>>
(src, scale, bias, n, c, h, w, param.group, group_size, eps,
dst, out_mean, out_var);
return SaberSuccess;
}
if (!param.across_spatial) {
int num=inputs[0]->num();
int size_in_channel = inputs[0]->width() * inputs[0]->height();
int thread_num=size_in_channel*num;
int channel = inputs[0]->channel();
if (param.has_scale) {
if (param.channel_shared) {
normalize_kernel_no_across_spatial<float, true, true> \
<<<CUDA_GET_BLOCKS(thread_num), CUDA_NUM_THREADS, 0, stream>>>\
(size_in_channel,num, channel, static_cast<float*>(param.scale->data()), src, dst, param.eps, param.p);
} else {
normalize_kernel_no_across_spatial<float, true, false> \
<<<CUDA_GET_BLOCKS(thread_num), CUDA_NUM_THREADS, 0, stream>>>\
(size_in_channel,num, channel, static_cast<float*>(param.scale->data()), src, dst, param.eps, param.p);
}
} else {
normalize_kernel_no_across_spatial<float, false, false> \
<<<CUDA_GET_BLOCKS(thread_num), CUDA_NUM_THREADS, 0, stream>>>\
(size_in_channel, num,channel, nullptr, src, dst, param.eps, param.p);
}
} else {
float* norm_reduce_ptr = static_cast<float*>(_norm_reduce.mutable_data());
const size_t share_mem_size = CUDA_NUM_THREADS * sizeof(float);
//! compute sum across C * H * W or H * W
int blockx = CUDA_NUM_THREADS;
int gridy = _norm_size;
//! each thread compute one value
int gridx = (_compute_size + blockx - 1) / blockx;
dim3 grid(gridx, gridy);
//cudaMemsetAsync(norm_reduce_ptr, 0, sizeof(float) * _norm_size, stream);
cudaMemset(norm_reduce_ptr, 0, sizeof(float) * _norm_size);
reduce_add_atomic<CUDA_NUM_THREADS, float>\
<<<grid, CUDA_NUM_THREADS, share_mem_size, stream>>>\
(_size, param.p, _compute_size, src, norm_reduce_ptr);
#if 0 //compute norm in one kernel
if (param.has_scale) {
//! scale is shared across channel
if (param.channel_shared) {
normalize_with_scale_compute_norm_kernel<float, true>\
<<<grid, CUDA_NUM_THREADS, 0, stream>>>\
(_size, _compute_size, _channel_stride, _channels, _norm_reduce.data(), \
param.scale->data(), inputs[0]->data(), outputs[0]->mutable_data());
} else {//! scale is diffs across channel
normalize_with_scale_compute_norm_kernel<float, false>\
<<<grid, CUDA_NUM_THREADS, 0, stream>>>\
(_size, _compute_size, _channel_stride, _channels, _norm_reduce.data(), \
param.scale->data(), inputs[0]->data(), outputs[0]->mutable_data());
}
} else { //! without scale
normalize_compute_norm_kernel<float>\
<<<grid, CUDA_NUM_THREADS, 0, stream>>>\
(_size, _compute_size, _norm_reduce.data(), \
inputs[0]->data(), outputs[0]->mutable_data());
}
cudaDeviceSynchronize();
#else
//compute norm and result individually
//! compute square root
float pw = 0.5f;
if (param.p == 1) {
pw = 1.f;
}
gpu_pow_reverse<float><<<CUDA_GET_BLOCKS(_norm_size), CUDA_NUM_THREADS, 0, stream>>>\
(_norm_size, static_cast<float*>(_norm_reduce.data()), static_cast<float*>(_norm_reduce.mutable_data()), pw, eps);
//! compute output with scale
if (param.has_scale) {
//! scale is shared across channel
if (param.channel_shared) {
normalize_with_scale_kernel<float, true>\
<<<CUDA_GET_BLOCKS(_size), CUDA_NUM_THREADS, 0, stream>>>\
(_size, _compute_size, _channel_stride, _channels, static_cast<float*>(_norm_reduce.data()), \
static_cast<float*>(param.scale->data()), static_cast<float*>(inputs[0]->data()), static_cast<float*>(outputs[0]->mutable_data()));
} else {//! scale is diffs across channel
normalize_with_scale_kernel<float, false>\
<<<CUDA_GET_BLOCKS(_size), CUDA_NUM_THREADS, 0, stream>>>\
(_size, _compute_size, _channel_stride, _channels, static_cast<float*>(_norm_reduce.data()), \
static_cast<float*>(param.scale->data()), static_cast<float*>(inputs[0]->data()), static_cast<float*>(outputs[0]->mutable_data()));
}
} else { //! without scale
normalize_kernel<float><<<CUDA_GET_BLOCKS(_size), CUDA_NUM_THREADS, 0, stream>>>\
(_size, _compute_size, static_cast<float*>(_norm_reduce.data()), \
static_cast<float*>(inputs[0]->data()), static_cast<float*>(outputs[0]->mutable_data()));
}
#endif
}
return SaberSuccess;
}
DEFINE_OP_TEMPLATE(SaberNormalize, NormalizeParam, NV, AK_HALF);
DEFINE_OP_TEMPLATE(SaberNormalize, NormalizeParam, NV, AK_INT8);
} //namespace anakin
} //namespace anakin | the_stack |
#include <algorithm>
#include <cub/cub.cuh>
#include <iostream>
#include <utility>
#include <vector>
#include "HugeCTR/include/common.hpp"
#include "HugeCTR/include/data_simulator.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/data.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/infrequent_embedding.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/model.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/update.cuh"
#include "HugeCTR/include/embeddings/hybrid_embedding/utils.cuh"
#include "HugeCTR/include/embeddings/hybrid_embedding/utils.hpp"
#include "HugeCTR/include/shuffle/shuffle.cuh"
#include "HugeCTR/include/tensor2.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace hybrid_embedding {
namespace infrequent_embedding_kernels {
template <typename dtype, typename emtype>
__global__ void hier_update_model(InfrequentEmbeddingSelectionView<dtype>* indices,
const dtype* __restrict__ category_location,
const emtype* __restrict__ gradients,
float* __restrict__ embedding_vectors,
uint32_t embedding_vec_size, uint32_t num_instances,
uint32_t local_samples_size, uint32_t local_comm_buff_size,
const float* __restrict__ lr_ptr, const float scale) {
float lr = __ldg(lr_ptr) / scale;
const uint32_t num_indices = indices->model_indices_offsets[num_instances];
// Load offset only when the network_id changes
uint32_t previous_network_id = 0;
uint32_t offset = 0;
for (uint32_t i = blockIdx.x; i < num_indices; i += gridDim.x) {
uint32_t index = indices->model_indices[i];
dtype category = indices->samples[index];
dtype location = category_location[2 * category + 1];
uint32_t network_id = index / local_samples_size;
if (network_id != previous_network_id) {
offset = indices->model_indices_offsets[network_id];
previous_network_id = network_id;
}
atomicAdd(
embedding_vectors + location * embedding_vec_size + threadIdx.x,
-lr * TypeConvertFunc<float, emtype>::convert(
gradients[embedding_vec_size * (network_id * local_comm_buff_size + i - offset) +
threadIdx.x]));
}
}
template <typename dtype, typename emtype>
__global__ void infrequent_update_model_direct(
const emtype* const* __restrict__ gradients_pointers, float* embedding_vectors,
InfrequentEmbeddingSelectionView<dtype>* indices, const dtype* __restrict__ category_location,
uint32_t num_instances, uint32_t model_id, uint32_t embedding_vec_size,
uint32_t local_samples_size, const float* __restrict__ lr_ptr, const float scale) {
float lr = __ldg(lr_ptr) / scale;
// Shift pattern
const uint32_t offset = indices->model_indices_offsets[model_id + 1];
const uint32_t num_model_indices = indices->model_indices_offsets[num_instances];
for (uint32_t i = blockIdx.x; i < num_model_indices; i += gridDim.x) {
uint32_t vid = (i + offset) % num_model_indices;
uint32_t index = indices->model_indices[vid];
uint32_t network_id = index / local_samples_size;
uint32_t local_index = index % local_samples_size;
dtype category = indices->samples[index];
uint32_t location = category_location[2 * category + 1];
const emtype* gradients = gradients_pointers[network_id];
atomicAdd(embedding_vectors + location * embedding_vec_size + threadIdx.x,
-lr * TypeConvertFunc<float, emtype>::convert(
gradients[local_index * embedding_vec_size + threadIdx.x]));
}
}
// template <typename dtype>
// __global__ void calculate_network_indices_mask(const dtype* __restrict__ local_samples,
// const dtype* __restrict__ category_location,
// bool* mask, uint32_t local_samples_size,
// uint32_t num_instances) {
// for (uint32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < local_samples_size;
// i += gridDim.x * blockDim.x) {
// dtype category = local_samples[i];
// uint32_t model_id = static_cast<uint32_t>(category_location[2 * category]);
// for (uint32_t section_id = 0; section_id < num_instances; section_id++) {
// mask[local_samples_size * section_id + i] = (model_id == section_id);
// }
// }
// }
template <typename LambdaPtr>
static __global__ void offsets_to_sizes(size_t* sizes, LambdaPtr get_offsets_ptr,
size_t element_size, uint32_t num_instances) {
uint32_t* offsets = get_offsets_ptr();
for (int t = blockIdx.x * blockDim.x + threadIdx.x; t < num_instances;
t += gridDim.x * blockDim.x) {
sizes[t] = (offsets[t + 1] - offsets[t]) * element_size;
}
}
} // namespace infrequent_embedding_kernels
template <typename dtype>
InfrequentEmbeddingBase<dtype>::InfrequentEmbeddingBase() {
HCTR_LIB_THROW(cudaMalloc(&indices_view_, sizeof(*indices_view_)));
}
template <typename dtype>
InfrequentEmbeddingBase<dtype>::~InfrequentEmbeddingBase() {
cudaFree(indices_view_);
}
template <typename dtype>
void InfrequentEmbeddingBase<dtype>::set_current_indices(
InfrequentEmbeddingSelection<dtype>* indices, cudaStream_t stream) {
indices_ = indices;
data_ = indices->get_data();
HCTR_LIB_THROW(cudaMemcpyAsync(indices_view_, indices->get_device_view(), sizeof(*indices_view_),
cudaMemcpyDeviceToDevice, stream));
}
template <typename dtype, typename emtype>
InfrequentEmbedding<dtype, emtype>::InfrequentEmbedding(const Model<dtype>& model,
const GPUResource& gpu_resource,
uint32_t embedding_vec_size)
: model_(model), gpu_resource(gpu_resource), embedding_vec_size_(embedding_vec_size) {
auto buf = GeneralBuffer2<CudaAllocator>::create();
buf->reserve({ceildiv<size_t>(model.num_categories, model.num_instances), embedding_vec_size_},
&infrequent_embedding_vectors_);
buf->reserve({model_.num_instances}, &model_indices_sizes_);
buf->reserve({model_.num_instances}, &model_indices_sizes_ptrs_);
buf->reserve({model_.num_instances}, &network_indices_sizes_);
buf->reserve({model_.num_instances}, &network_indices_sizes_ptrs_);
buf->reserve({model.num_instances, 1}, &interaction_layer_input_pointers_train_);
buf->reserve({model.num_instances, 1}, &interaction_layer_input_pointers_eval_);
buf->reserve({model.num_instances, 1}, &gradients_pointers_);
buf->allocate();
auto managed_buf = GeneralBuffer2<CudaManagedAllocator>::create();
managed_buf->reserve({model.num_instances + 1, 1}, &model_indices_offsets_);
managed_buf->reserve({model.num_instances + 1, 1}, &network_indices_offsets_);
managed_buf->allocate();
int current_device;
HCTR_LIB_THROW(cudaGetDevice(¤t_device));
HCTR_LIB_THROW(cudaMemAdvise(managed_buf->get_ptr(), managed_buf->get_size_in_bytes(),
cudaMemAdviseSetReadMostly, current_device));
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::initialize_embedding_vectors(
const std::vector<size_t>& table_sizes) {
CudaDeviceContext context(gpu_resource.get_device_id());
const size_t num_tables = table_sizes.size();
for (size_t i = 0; i < num_tables; i++) {
float up_bound = sqrt(1.f / table_sizes[i]);
const size_t offset = embedding_vec_size_ * model_.h_infrequent_model_table_offsets[i];
const size_t number_of_vectors =
model_.h_infrequent_model_table_offsets[i + 1] - model_.h_infrequent_model_table_offsets[i];
UniformGenerator::fill(
infrequent_embedding_vectors_.get_ptr() + offset, embedding_vec_size_ * number_of_vectors,
-up_bound, up_bound, gpu_resource.get_sm_count(),
gpu_resource.get_replica_variant_curand_generator(), gpu_resource.get_stream());
}
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::forward_model(emtype* message_buffer,
cudaStream_t stream) {
auto indices = this->indices_view_;
auto category_location = model_.category_location.get_ptr();
auto infrequent_embedding_vectors = infrequent_embedding_vectors_.get_ptr();
auto embedding_vec_size = embedding_vec_size_;
auto num_instances = model_.num_instances;
auto copy_desc = CopyDescriptors::make_OneToOne<float, emtype, 1>(
embedding_vec_size,
[=] __device__() { return indices->model_indices_offsets[num_instances]; },
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<float, emtype, 1> {
uint32_t index = indices->model_indices[i];
dtype category = indices->samples[index];
dtype location = category_location[2 * category + 1];
return {infrequent_embedding_vectors + location * embedding_vec_size,
{message_buffer + i * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_->samples.get_num_elements() / model_.num_instances / 8);
HCTR_LIB_THROW(cudaPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::fused_intra_forward_model(emtype** message_buffer,
cudaStream_t stream) {
auto indices = this->indices_view_;
auto category_location = model_.category_location.get_ptr();
auto infrequent_embedding_vectors = infrequent_embedding_vectors_.get_ptr();
size_t embedding_vec_size = embedding_vec_size_;
auto local_instance_id = model_.instance_id;
auto num_instances = model_.num_instances;
auto per_node_instances = num_instances / model_.h_num_instances_per_node.size();
uint32_t local_samples_size =
ceildiv<uint32_t>(data_->batch_size, num_instances) * data_->table_sizes.size();
uint32_t local_comm_buff_size =
ceildiv<uint32_t>(max_num_infrequent_per_batch_, model_.num_instances);
auto copy_desc = CopyDescriptors::make_OneToOne<float, emtype, 1>(
embedding_vec_size,
[=] __device__() { return indices->model_indices_offsets[num_instances]; },
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<float, emtype, 1> {
uint32_t num_selected = indices->model_indices_offsets[num_instances];
uint32_t vid =
(i + indices->model_indices_offsets[(local_instance_id + 1) % per_node_instances]) %
num_selected;
uint32_t index = indices->model_indices[vid];
uint32_t network_id = (index / local_samples_size);
dtype category = indices->samples[index];
dtype location = category_location[2 * category + 1];
uint32_t local_network_id = (network_id % per_node_instances);
emtype* output_ptr =
&message_buffer[local_network_id][(network_id - local_network_id + local_instance_id) *
local_comm_buff_size * embedding_vec_size];
return {
infrequent_embedding_vectors + location * embedding_vec_size,
{output_ptr + (vid - indices->model_indices_offsets[network_id]) * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_->samples.get_num_elements() / model_.num_instances / 8);
HCTR_LIB_THROW(cudaPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::forward_network(const emtype* message_buffer,
emtype* interaction_layer_input,
cudaStream_t stream) {
auto indices = this->indices_view_;
auto embedding_vec_size = embedding_vec_size_;
auto num_instances = model_.num_instances;
auto copy_desc = CopyDescriptors::make_OneToOne<emtype, emtype, 1>(
embedding_vec_size,
[=] __device__() { return indices->network_indices_offsets[num_instances]; },
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<emtype, emtype, 1> {
uint32_t index = indices->network_indices[i];
return {message_buffer + i * embedding_vec_size,
{interaction_layer_input + index * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_->samples.get_num_elements() / model_.num_instances / 8);
HCTR_LIB_THROW(cudaPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::hier_forward_network(const emtype* message_buffer,
emtype* interaction_layer_input,
cudaStream_t stream) {
auto indices = this->indices_view_;
auto embedding_vec_size = embedding_vec_size_;
auto num_instances = model_.num_instances;
uint32_t local_samples_size =
ceildiv<uint32_t>(data_->batch_size, model_.num_instances) * data_->table_sizes.size();
uint32_t local_comm_buff_size =
ceildiv<uint32_t>(max_num_infrequent_per_batch_, model_.num_instances);
auto copy_desc = CopyDescriptors::make_OneToOne<emtype, emtype, 1>(
embedding_vec_size,
[=] __device__() { return indices->network_indices_offsets[num_instances]; },
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<emtype, emtype, 1> {
uint32_t index = indices->network_indices[i];
// Find model id and offset
uint32_t model_id = 0;
uint32_t offset = 0;
uint32_t next_offset = indices->network_indices_offsets[1];
while (next_offset <= i) {
offset = next_offset;
model_id++;
next_offset = indices->network_indices_offsets[model_id + 1];
}
return {
message_buffer + (model_id * local_comm_buff_size + i - offset) * embedding_vec_size,
{interaction_layer_input + index * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_->samples.get_num_elements() / model_.num_instances / 8);
HCTR_LIB_THROW(cudaPeekAtLastError());
}
/** Forward network for single GPU (no communications) */
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::forward_network_direct(bool is_train,
cudaStream_t stream) {
const uint32_t num_instances = model_.num_instances;
const uint32_t model_id = model_.global_instance_id;
uint32_t local_samples_size =
ceildiv<uint32_t>(data_->batch_size, num_instances) * data_->table_sizes.size();
auto interaction_layer_input_pointers = is_train
? interaction_layer_input_pointers_train_.get_ptr()
: interaction_layer_input_pointers_eval_.get_ptr();
auto indices = this->indices_view_;
auto category_location = model_.category_location.get_ptr();
auto model_table = infrequent_embedding_vectors_.get_ptr();
auto embedding_vec_size = embedding_vec_size_;
auto copy_desc = CopyDescriptors::make_OneToOne<float, emtype, 1>(
embedding_vec_size,
[=] __device__() { return indices->model_indices_offsets[num_instances]; },
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<float, emtype, 1> {
const uint32_t offset = indices->model_indices_offsets[model_id + 1];
const uint32_t num_model_indices = indices->model_indices_offsets[num_instances];
const uint32_t vid = (i + offset) % num_model_indices;
const uint32_t index = indices->model_indices[vid];
const dtype category = indices->samples[index];
const dtype location = category_location[2 * category + 1];
const uint32_t network_id = index / local_samples_size;
const uint32_t local_index = index % local_samples_size;
emtype* interaction_layer_input = interaction_layer_input_pointers[network_id];
return {model_table + location * embedding_vec_size,
{interaction_layer_input + local_index * embedding_vec_size},
{true}};
});
PROFILE_RECORD("inf_forward_network_direct.forward_network_direct.start", stream, false);
shuffle(copy_desc, stream, local_samples_size / 10);
HCTR_LIB_THROW(cudaPeekAtLastError());
PROFILE_RECORD("inf_forward_network_direct.forward_network_direct.stop", stream, false);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::update_network(const emtype* gradients,
emtype* message_buffer,
cudaStream_t stream) {
auto indices = this->indices_view_;
auto embedding_vec_size = embedding_vec_size_;
auto num_instances = model_.num_instances;
auto copy_desc = CopyDescriptors::make_OneToOne<emtype, emtype, 1>(
embedding_vec_size,
[=] __device__() { return indices->network_indices_offsets[num_instances]; },
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<emtype, emtype, 1> {
uint32_t index = indices->network_indices[i];
return {gradients + index * embedding_vec_size,
{message_buffer + i * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_->samples.get_num_elements() / model_.num_instances / 8);
HCTR_LIB_THROW(cudaPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::fused_intra_update_network(const emtype* gradients,
emtype** message_buffer,
cudaStream_t stream) {
auto indices = this->indices_view_;
size_t embedding_vec_size = embedding_vec_size_;
auto local_instance_id = model_.instance_id;
auto num_instances = model_.num_instances;
auto per_node_instances = num_instances / model_.h_num_instances_per_node.size();
uint32_t local_comm_buff_size =
ceildiv<uint32_t>(max_num_infrequent_per_train_batch_, model_.num_instances);
auto copy_desc = CopyDescriptors::make_OneToOne<emtype, emtype, 1>(
embedding_vec_size,
[=] __device__() { return indices->network_indices_offsets[num_instances]; },
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<emtype, emtype, 1> {
uint32_t num_selected = indices->network_indices_offsets[num_instances];
uint32_t vid =
(i + indices->network_indices_offsets[(local_instance_id + 1) % per_node_instances]) %
num_selected;
uint32_t index = indices->network_indices[vid];
uint32_t model_id;
for (model_id = 0;
model_id < num_instances && indices->network_indices_offsets[model_id + 1] <= vid;
model_id++)
;
uint32_t local_model_id = (model_id % per_node_instances);
emtype* output_ptr =
&message_buffer[local_model_id][(model_id - local_model_id + local_instance_id) *
local_comm_buff_size * embedding_vec_size];
return {
gradients + index * embedding_vec_size,
{output_ptr + (vid - indices->network_indices_offsets[model_id]) * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_->samples.get_num_elements() / model_.num_instances / 8);
HCTR_LIB_THROW(cudaPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::update_model(const emtype* message_buffer, float* dev_lr,
float scale, cudaStream_t stream) {
auto indices = this->indices_view_;
const dtype* __restrict__ category_location = model_.category_location.get_ptr();
auto num_instances = model_.num_instances;
uint32_t n_blocks = gpu_resource.get_sm_count();
sgd_atomic_update(
message_buffer, infrequent_embedding_vectors_.get_ptr(),
[indices, num_instances] __device__() {
return indices->model_indices_offsets[num_instances];
},
[indices, category_location] __device__(uint32_t i) {
uint32_t index = indices->model_indices[i];
dtype category = indices->samples[index];
return category_location[2 * category + 1];
},
n_blocks, embedding_vec_size_, dev_lr, scale, stream);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::hier_update_model(const emtype* message_buffer,
float* dev_lr, float scale,
cudaStream_t stream) {
const uint32_t& num_instances = model_.num_instances;
uint32_t local_samples_size =
ceildiv<uint32_t>(data_->batch_size, num_instances) * data_->table_sizes.size();
uint32_t local_comm_buff_size =
ceildiv<uint32_t>(max_num_infrequent_per_train_batch_, model_.num_instances);
int num_sm = gpu_resource.get_sm_count();
int n_blocks = 16 * num_sm; // TODO: better heuristics
infrequent_embedding_kernels::hier_update_model<<<n_blocks, embedding_vec_size_, 0, stream>>>(
this->indices_view_, model_.category_location.get_ptr(), message_buffer,
infrequent_embedding_vectors_.get_ptr(), embedding_vec_size_, model_.num_instances,
local_samples_size, local_comm_buff_size, dev_lr, scale);
HCTR_LIB_THROW(cudaPeekAtLastError());
}
/** Update model for single GPU (no communications), lr is a device variable */
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::update_model_direct(float* dev_lr, float scale,
cudaStream_t stream) {
const uint32_t& num_instances = model_.num_instances;
uint32_t local_samples_size =
ceildiv<uint32_t>(data_->batch_size, num_instances) * data_->table_sizes.size();
int num_sm = gpu_resource.get_sm_count();
int n_blocks = 16 * num_sm; // TODO: better heuristics
/* Each model reads from the gradients of each network */
PROFILE_RECORD("inf_update_model_direct.infrequent_update_model_direct.start", stream, false);
infrequent_embedding_kernels::
infrequent_update_model_direct<<<n_blocks, embedding_vec_size_, 0, stream>>>(
gradients_pointers_.get_ptr(), infrequent_embedding_vectors_.get_ptr(),
this->indices_view_, model_.category_location.get_ptr(), model_.num_instances,
model_.global_instance_id, embedding_vec_size_, local_samples_size, dev_lr, scale);
HCTR_LIB_THROW(cudaPeekAtLastError());
PROFILE_RECORD("inf_update_model_direct.infrequent_update_model_direct.stop", stream, false);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_model_indices_sizes_from_offsets(
cudaStream_t stream) {
auto indices = this->indices_view_;
constexpr size_t TPB = 256;
const size_t n_blocks = ceildiv<size_t>(model_.num_instances, TPB);
infrequent_embedding_kernels::offsets_to_sizes<<<n_blocks, TPB, 0, stream>>>(
model_indices_sizes_.get_ptr(), [=] __device__() { return indices->model_indices_offsets; },
embedding_vec_size_ * sizeof(emtype), model_.num_instances);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_network_indices_sizes_from_offsets(
cudaStream_t stream) {
auto indices = this->indices_view_;
constexpr size_t TPB = 256;
const size_t n_blocks = ceildiv<size_t>(model_.num_instances, TPB);
infrequent_embedding_kernels::offsets_to_sizes<<<n_blocks, TPB, 0, stream>>>(
network_indices_sizes_.get_ptr(),
[=] __device__() { return indices->network_indices_offsets; },
embedding_vec_size_ * sizeof(emtype), model_.num_instances);
}
template class InfrequentEmbeddingBase<uint32_t>;
template class InfrequentEmbeddingBase<long long>;
template class InfrequentEmbedding<uint32_t, __half>;
template class InfrequentEmbedding<uint32_t, float>;
template class InfrequentEmbedding<long long, __half>;
template class InfrequentEmbedding<long long, float>;
} // namespace hybrid_embedding
} // namespace HugeCTR | the_stack |
#define USE_HEIGHTS
#include "deviceCode.h"
#include "owl/common/math/random.h"
namespace cdf {
extern "C" __constant__ LaunchParams optixLaunchParams;
typedef owl::common::LCG<4> Random;
inline __device__
vec3f backGroundColor()
{
const vec2i pixelID = owl::getLaunchIndex();
const float t = pixelID.y / (float)optixGetLaunchDimensions().y;
const vec3f c = (1.0f - t)*vec3f(1.0f, 1.0f, 1.0f) + t * vec3f(0.5f, 0.7f, 1.0f);
return c;
}
__device__ float linear_to_srgb(float x) {
if (x <= 0.0031308f) {
return 12.92f * x;
}
return 1.055f * pow(x, 1.f/2.4f) - 0.055f;
}
inline __device__ vec4f over(const vec4f &A, const vec4f &B)
{
return A + (1.f-A.w)*B;
}
// ==================================================================
// conventional cdf sampling
// ==================================================================
__device__
const float* upper_bound (const float* first, const float* last, const float& val)
{
const float* it;
// iterator_traits<const float*>::difference_type count, step;
int count, step;
// count = std::distance(first,last);
count = (last-first);
while (count > 0)
{
it = first;
step=count/2;
// std::advance (it,step);
it = it + step;
if ( ! (val < *it)) // or: if (!comp(val,*it)), for version (2)
{
first=++it;
count-=step+1;
}
else count=step;
}
return first;
}
struct Sample {
int x; // column
int y; // row
float pdfx;
float pdfy;
};
__device__ float sample_cdf(const float* data, unsigned int n, float x, int *idx, float* pdf)
{
*idx = upper_bound(data, data + n, x) - data;
float scaled_sample;
if (*idx == 0) {
*pdf = data[0];
scaled_sample = x / data[0];
} else {
if (*idx < n) {
*pdf = data[*idx] - data[*idx - 1];
scaled_sample = (x - data[*idx - 1]) / (data[*idx] - data[*idx - 1]);
} else { /*printf("Oups %i\n",*idx);*/ }
}
// keep result in [0,1)
return min(scaled_sample, 0.99999994f);
}
// Uv range: [0, 1]
__device__
vec3f toPolar(vec2f uv)
{
float theta = 2.0 * M_PI * uv.x + - M_PI / 2.0;
float phi = M_PI * uv.y;
vec3f n;
n.x = __cosf(theta) * __sinf(phi);
n.z = __sinf(theta) * __sinf(phi);
n.y = __cosf(phi);
n.x = -n.x;
return n;
}
__device__ Sample sampleCDF(float rx, float ry)
{
auto &lp = optixLaunchParams;
float* rows = lp.environmentMapRows;
float* cols = lp.environmentMapCols;
int width = lp.environmentMapWidth;
int height = lp.environmentMapHeight;
float row_pdf, col_pdf;
int x, y;
ry = sample_cdf(rows, height, ry, &y, &row_pdf);
y = max(min(y, height - 1), 0);
rx = sample_cdf(cols + y * width, width, rx, &x, &col_pdf);
return {x,y,col_pdf,row_pdf}; // TODO: actual *light* sampling
}
struct LightSample {
vec3f L;
vec3f intensity;
float pdf;
};
__device__
vec2f toUV(vec3f n)
{
vec2f uv;
uv.x = atan2f(float(n.x), float(n.z));
uv.x = (uv.x + M_PI / 2.0f) / (M_PI * 2.0f) + M_PI * (28.670f / 360.0f);
uv.y = clamp(float(acosf(n.y) / M_PI), .001f, .999f);
return uv;
}
__device__ LightSample sampleEnvironmentMap(vec3f dir)
{
vec2f uv = toUV(dir);
float u = uv.x;
float v = uv.y;
auto &lp = optixLaunchParams;
vec4f texel = tex2D<float4>(lp.hdrTexture,u*(lp.environmentMapWidth-1),v*(lp.environmentMapHeight-1));
return {dir,{texel.x,texel.y,texel.z}, 1.f/float(M_PI)};
}
__device__ vec3f cosineSampleHemisphere(float u1, float u2)
{
float r = sqrtf(u1);
float theta = u2 * 2.f * float(M_PI);
float x = r * cosf(theta);
float y = r * sinf(theta);
float z = sqrtf(1.f - u1);
return {x,y,z};
}
// ==================================================================
// Triangle model
// ==================================================================
struct ModelPRD {
float t_hit;
vec3f gn;
int primID;
};
OPTIX_CLOSEST_HIT_PROGRAM(ModelCH)()
{
ModelPRD& prd = owl::getPRD<ModelPRD>();
const TriangleGeom& self = owl::getProgramData<TriangleGeom>();
prd.t_hit = optixGetRayTmax();
prd.primID = optixGetPrimitiveIndex();
const vec3i index = self.indexBuffer[prd.primID];
const vec3f& v1 = self.vertexBuffer[index.x];
const vec3f& v2 = self.vertexBuffer[index.y];
const vec3f& v3 = self.vertexBuffer[index.z];
prd.gn = normalize(cross(v2 - v1, v3 - v1));
}
// ==================================================================
// cdf sampling w/ triangle BVH
// ==================================================================
struct PRD {
float x;
float y;
float rowPdf;
float colPdf;
};
static __forceinline__ __device__
void *unpackPointer( uint32_t i0, uint32_t i1 )
{
const uint64_t uptr = static_cast<uint64_t>( i0 ) << 32 | i1;
void* ptr = reinterpret_cast<void*>( uptr );
return ptr;
}
static __forceinline__ __device__
void packPointer( void* ptr, uint32_t& i0, uint32_t& i1 )
{
const uint64_t uptr = reinterpret_cast<uint64_t>( ptr );
i0 = uptr >> 32;
i1 = uptr & 0x00000000ffffffff;
}
template<typename T>
static __forceinline__ __device__ T *getPRD()
{
const uint32_t u0 = optixGetPayload_0();
const uint32_t u1 = optixGetPayload_1();
return reinterpret_cast<T*>( unpackPointer( u0, u1 ) );
}
OPTIX_CLOSEST_HIT_PROGRAM(CdfCH)()
{
const auto &self = owl::getProgramData<CdfGeom>();
float2 b = optixGetTriangleBarycentrics();
int id = optixGetPrimitiveIndex();
int start = self.rowStart;
int end = self.rowStart + (self.rowCount - 1);
float alpha = ((id % 2) == 0) ? (b.x + b.y) : 1.f - (b.x + b.y);
optixSetPayload_0(__float_as_int(optixGetRayTmax()));
optixSetPayload_1(__float_as_int(start * (1.f - alpha) + end * alpha));
optixSetPayload_2(__float_as_int(self.triPdfs[id]));
optixSetPayload_3(__float_as_int(self.geomPdf));
}
__device__ Sample sampleCDF_BVH(float rx, float ry)
{
auto &lp = optixLaunchParams;
unsigned int p0, p1, p2, p3;
optixTrace(lp.cdf,
make_float3(rx, ry, 0.f),
make_float3(0.f, 0.f, 1.f),
0.f, // tmin
1.1f, // tmax
0.0f, // rayTime
OptixVisibilityMask( 255 ),
OPTIX_RAY_FLAG_DISABLE_ANYHIT | OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
0,// SBT offset
1,// SBT stride
0,// missSBTIndex
p0, p1, p2, p3);
return {int(__int_as_float(p0) * lp.environmentMapWidth), int(__int_as_float(p1)), __int_as_float(p2), __int_as_float(p3)};
}
// ==================================================================
// Importance sampling interface
// ==================================================================
__device__ LightSample importanceSampleEnvironmentMap(float rx, float ry)
{
auto &lp = optixLaunchParams;
int width = lp.environmentMapWidth;
int height = lp.environmentMapHeight;
Sample sample = sampleCDF(rx,ry);
float invjacobian = width * height / float(4 * M_PI);
vec3f L(toPolar(vec2f((sample.x) / float(width), (sample.y)/float(height))));
LightSample ls = sampleEnvironmentMap(L);
ls.pdf = sample.pdfx * sample.pdfy * invjacobian;
return ls;
}
__device__ LightSample importanceSampleEnvironmentMapBVH(float rx, float ry)
{
auto &lp = optixLaunchParams;
int width = lp.environmentMapWidth;
int height = lp.environmentMapHeight;
Sample sample = sampleCDF_BVH(rx,ry);
float invjacobian = width * height / float(4 * M_PI);
vec3f L(toPolar(vec2f((sample.x) / float(width), (sample.y)/float(height))));
LightSample ls = sampleEnvironmentMap(L);
ls.pdf = sample.pdfx * sample.pdfy * invjacobian;
return ls;
}
__device__ LightSample importanceSampleEnvironmentMapRandom(float rx, float ry)
{
auto &lp = optixLaunchParams;
vec3f L(toPolar(vec2f(rx, ry)));
LightSample ls = sampleEnvironmentMap(L);
ls.pdf = 1.f / float(4 * M_PI);
return ls;
}
// ==================================================================
//
// ==================================================================
inline __device__ Ray generateRay(const vec2f screen)
{
auto &lp = optixLaunchParams;
vec3f org = lp.camera.org;
vec3f dir
= lp.camera.dir_00
+ screen.u * lp.camera.dir_du
+ screen.v * lp.camera.dir_dv;
dir = normalize(dir);
if (fabs(dir.x) < 1e-5f) dir.x = 1e-5f;
if (fabs(dir.y) < 1e-5f) dir.y = 1e-5f;
if (fabs(dir.z) < 1e-5f) dir.z = 1e-5f;
return Ray(org,dir,0.f,1e10f);
}
inline __device__ vec3f hue_to_rgb(float hue)
{
float s = saturate( hue ) * 6.0f;
float r = saturate( fabsf(s - 3.f) - 1.0f );
float g = saturate( 2.0f - fabsf(s - 2.0f) );
float b = saturate( 2.0f - fabsf(s - 4.0f) );
return vec3f(r, g, b);
}
inline __device__ vec3f temperature_to_rgb(float t)
{
float K = 4.0f / 6.0f;
float h = K - K * t;
float v = .5f + 0.5f * t; return v * hue_to_rgb(h);
}
inline __device__
vec3f heatMap(float t)
{
#if 1
return temperature_to_rgb(t);
#else
if (t < .25f) return lerp(vec3f(0.f,1.f,0.f),vec3f(0.f,1.f,1.f),(t-0.f)/.25f);
if (t < .5f) return lerp(vec3f(0.f,1.f,1.f),vec3f(0.f,0.f,1.f),(t-.25f)/.25f);
if (t < .75f) return lerp(vec3f(0.f,0.f,1.f),vec3f(1.f,1.f,1.f),(t-.5f)/.25f);
if (t < 1.f) return lerp(vec3f(1.f,1.f,1.f),vec3f(1.f,0.f,0.f),(t-.75f)/.25f);
return vec3f(1.f,0.f,0.f);
#endif
}
OPTIX_RAYGEN_PROGRAM(benchmark)()
{
auto &lp = optixLaunchParams;
const int spp = lp.render.spp;
const vec2i threadIdx = owl::getLaunchIndex();
Ray ray = generateRay(vec2f(threadIdx)+vec2f(.5f));
vec4f bgColor = vec4f(backGroundColor(),1.f);
Random random(threadIdx.x,threadIdx.y);
uint64_t clock_begin = clock();
vec4f accumColor = 0.f;
for (int s=0; s<spp; ++s) {
float rx = random();
float ry = random();
float rz = random();
vec4f color(0.f);
if (lp.benchmarkMode == BenchmarkModeBinarySearch) {
Sample sample = sampleCDF(rx,ry);
color = vec4f((float)sample.x/lp.environmentMapWidth,
(float)sample.y/lp.environmentMapHeight,
0.f,1.f/spp);
} else if (lp.benchmarkMode == BenchmarkModeBVH) {
Sample sample = sampleCDF_BVH(rx,ry);
color = vec4f((float)sample.x/lp.environmentMapWidth,
(float)sample.y/lp.environmentMapHeight,
0.f,1.f/spp);
} else if (lp.benchmarkMode == BenchmarkModeErrors) {
Sample sample = sampleCDF(rx,ry);
Sample sampleBVH = sampleCDF_BVH(rx,ry);
if (sample.x != sampleBVH.x || sample.y != sampleBVH.y) {
// printf("sample: %i %i %f %f, sampleBVH: %i %i %f %f\n",
// sample.x,sample.y,sample.pdfx,sample.pdfy,
// sampleBVH.x,sampleBVH.y,sampleBVH.pdfx,sampleBVH.pdfy);
int off = abs(sample.x-sampleBVH.x)+abs(sample.y-sampleBVH.y);
off-=1; // as offSamples array is zero-based
if (off < lp.offSamplesMax-1)
atomicAdd(&lp.offSamples[off], 1);
else
atomicAdd(&lp.offSamples[lp.offSamplesMax-1], 1);
accumColor=vec4f(1.f);
vec4f texel = tex2D<float4>(lp.hdrTexture,sample.x,sample.y);
vec4f texelBVH = tex2D<float4>(lp.hdrTexture,sampleBVH.x,sampleBVH.y);
// Normalize to min/max luminance range
texel = (texel-lp.luminanceMin)/(lp.luminanceMax-lp.luminanceMin);
texelBVH = (texelBVH-lp.luminanceMin)/(lp.luminanceMax-lp.luminanceMin);
vec3f err(fabsf(texel.x-texelBVH.x),
fabsf(texel.y-texelBVH.y),
fabsf(texel.y-texelBVH.z));
float errMax = max(err.x,max(err.y,err.z));
if (errMax < 1e-30f)
atomicAdd(&lp.offLuminance[ 0], 1);
else if (errMax < 1e-20f)
atomicAdd(&lp.offLuminance[ 1], 1);
else if (errMax < 1e-15f)
atomicAdd(&lp.offLuminance[ 2], 1);
else if (errMax < 1e-10)
atomicAdd(&lp.offLuminance[ 3], 1);
else if (errMax < 1e-8)
atomicAdd(&lp.offLuminance[ 4], 1);
else if (errMax < 1e-7)
atomicAdd(&lp.offLuminance[ 5], 1);
else if (errMax < 1e-6)
atomicAdd(&lp.offLuminance[ 6], 1);
else if (errMax < 1e-5)
atomicAdd(&lp.offLuminance[ 7], 1);
else if (errMax < 1e-4)
atomicAdd(&lp.offLuminance[ 8], 1);
else if (errMax < 1e-3)
atomicAdd(&lp.offLuminance[ 9], 1);
else if (errMax < 1e-2)
atomicAdd(&lp.offLuminance[10], 1);
else {
// printf("(%i %i): %f %f %f | (%i %i): %f %f %f\n",
// sample.x,sample.y,texel.x,texel.y,texel.z,
// sampleBVH.x,sampleBVH.y,texelBVH.x,texelBVH.y,texelBVH.z);
atomicAdd(&lp.offLuminance[11], 1);
}
}
}
accumColor = over(color,accumColor);
}
uint64_t clock_end = clock();
if (lp.render.heatMapEnabled > 0.f) {
float t = (clock_end-clock_begin)*(lp.render.heatMapScale/spp);
accumColor = over(vec4f(heatMap(t),.5f),accumColor);
}
int pixelID = threadIdx.x + owl::getLaunchDims().x*threadIdx.y;
if (lp.accumID > 0)
accumColor += vec4f(lp.accumBuffer[pixelID]);
lp.accumBuffer[pixelID] = accumColor;
accumColor *= (1.f/(lp.accumID+1));
// bool crossHairs = (owl::getLaunchIndex().x == owl::getLaunchDims().x/2
// ||
// owl::getLaunchIndex().y == owl::getLaunchDims().y/2
// );
// if (crossHairs) accumColor = vec4f(1.f) - accumColor;
lp.fbPointer[pixelID] = make_rgba(vec3f(accumColor*(1.f/spp)));
}
OPTIX_RAYGEN_PROGRAM(renderFrame)()
{
auto &lp = optixLaunchParams;
const int spp = lp.render.spp;
const vec2i threadIdx = owl::getLaunchIndex();
int pixelID = threadIdx.x + owl::getLaunchDims().x*threadIdx.y;
Random random(pixelID,lp.accumID);
uint64_t clock_begin = clock();
vec4f accumColor = 0.f;
for (int s=0; s<spp; ++s) {
float rx = random();
float ry = random();
Ray ray = generateRay(vec2f(threadIdx)+vec2f(rx,ry));
ModelPRD prd{-1.f,vec3f(-1),-1};
owl::traceRay(lp.model.group, ray, prd,
OPTIX_RAY_FLAG_DISABLE_ANYHIT);
// face normal forward
if (dot(ray.direction,prd.gn) > 0.f) {
prd.gn = -prd.gn;
}
vec4f color(0.f);
if (prd.t_hit >= 0.f) {
float r1 = random();
float r2 = random();
LightSample ls;
if (lp.renderMode == RenderModeBinarySearch)
ls = importanceSampleEnvironmentMap(r1,r2);
else if (lp.renderMode == RenderModeBVH)
ls = importanceSampleEnvironmentMapBVH(r1,r2);
else if (lp.renderMode == RenderModeRandom)
ls = importanceSampleEnvironmentMapRandom(r1,r2);
else
assert(0 && "unsupported render mode");
vec3f isectPos = ray.origin + ray.direction * prd.t_hit;
ModelPRD shadowPrd = {-1.f,vec3f(-1),-1};
Ray shadowRay;
shadowRay.origin = isectPos;
shadowRay.direction = ls.L;;
shadowRay.tmin = 1e-2f;
shadowRay.tmax = 1e20f;
owl::traceRay(lp.model.group, shadowRay, shadowPrd,
OPTIX_RAY_FLAG_DISABLE_ANYHIT | OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT);
ls.intensity = clamp(ls.intensity * lp.hdriIntensity,vec3f(0.f),vec3f(1e30f));
if (shadowPrd.primID < 0)
color = vec4f(vec3f(.3f + max(0.f,dot(ls.L,prd.gn)))*ls.intensity/ls.pdf,1.f);
color = min(color, vec4f(100.f)); // clamp out fire flies
} else {
LightSample s = sampleEnvironmentMap(ray.direction);
color = vec4f(s.intensity,1.f);
}
accumColor += color;
}
accumColor = accumColor / float(spp);
uint64_t clock_end = clock();
if (lp.render.heatMapEnabled > 0.f) {
float t = (clock_end-clock_begin)*(lp.render.heatMapScale/spp);
accumColor = over(vec4f(heatMap(t),.5f),accumColor);
}
if (lp.accumID > 0)
accumColor += vec4f(lp.accumBuffer[pixelID]);
lp.accumBuffer[pixelID] = accumColor;
accumColor *= (1.f/(lp.accumID+1));
accumColor.x = linear_to_srgb(accumColor.x);
accumColor.y = linear_to_srgb(accumColor.y);
accumColor.z = linear_to_srgb(accumColor.z);
// bool crossHairs = (owl::getLaunchIndex().x == owl::getLaunchDims().x/2
// ||
// owl::getLaunchIndex().y == owl::getLaunchDims().y/2
// );
// if (crossHairs) accumColor = vec4f(1.f) - accumColor;
lp.fbPointer[pixelID] = make_rgba(vec3f(accumColor));
}
} | the_stack |
#pragma once
#include <thrust/sequence.h>
#include <cub/cub.cuh>
#include "open3d/ml/impl/misc/MemoryAllocation.h"
#include "open3d/utility/Helper.h"
#include "open3d/utility/MiniVec.h"
namespace open3d {
namespace ml {
namespace impl {
namespace {
using namespace open3d::utility;
template <class T, bool LARGE_ARRAY>
__global__ void IotaCUDAKernel(T* first, int64_t len, T value) {
int64_t linear_idx;
const int64_t x = blockDim.x * blockIdx.x + threadIdx.x;
if (LARGE_ARRAY) {
const int64_t y = blockDim.y * blockIdx.y + threadIdx.y;
const int64_t z = blockDim.z * blockIdx.z + threadIdx.z;
linear_idx = z * gridDim.x * blockDim.x * gridDim.y +
y * gridDim.x * blockDim.x + x;
} else {
linear_idx = x;
}
if (linear_idx < len) {
T* ptr = first + linear_idx;
value += linear_idx;
*ptr = value;
}
}
/// Iota function for CUDA
template <class T>
void IotaCUDA(const cudaStream_t& stream, T* first, T* last, T value) {
ptrdiff_t len = last - first;
if (len) {
const int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid;
if (len > block.x * INT32_MAX) {
grid.y = std::ceil(std::cbrt(len));
grid.z = grid.y;
grid.x = DivUp(len, int64_t(grid.z) * grid.y * block.x);
IotaCUDAKernel<T, true>
<<<grid, block, 0, stream>>>(first, len, value);
} else {
grid = dim3(DivUp(len, block.x), 1, 1);
IotaCUDAKernel<T, false>
<<<grid, block, 0, stream>>>(first, len, value);
}
}
}
__global__ void ComputeBatchIdKernel(int64_t* hashes,
const int64_t num_voxels,
const int64_t batch_hash) {
int64_t linear_idx;
const int64_t x = blockDim.x * blockIdx.x + threadIdx.x;
const int64_t y = blockDim.y * blockIdx.y + threadIdx.y;
const int64_t z = blockDim.z * blockIdx.z + threadIdx.z;
linear_idx = z * gridDim.x * blockDim.x * gridDim.y +
y * gridDim.x * blockDim.x + x;
if (linear_idx >= num_voxels) return;
hashes[linear_idx] /= batch_hash;
}
/// This function computes batch_id from hash value.
///
/// \param hashes Input and output array.
/// \param num_voxels Number of valid voxels.
/// \param batch_hash The value used to hash batch dimension.
///
void ComputeBatchId(const cudaStream_t& stream,
int64_t* hashes,
const int64_t num_voxels,
const int64_t batch_hash) {
if (num_voxels) {
const int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid;
grid.y = std::ceil(std::cbrt(num_voxels));
grid.z = grid.y;
grid.x = DivUp(num_voxels, int64_t(grid.z) * grid.y * block.x);
ComputeBatchIdKernel<<<grid, block, 0, stream>>>(hashes, num_voxels,
batch_hash);
}
}
__global__ void ComputeVoxelPerBatchKernel(int64_t* num_voxels_per_batch,
int64_t* unique_batches_count,
int64_t* unique_batches,
const int64_t num_batches) {
int64_t linear_idx;
const int64_t x = blockDim.x * blockIdx.x + threadIdx.x;
const int64_t y = blockDim.y * blockIdx.y + threadIdx.y;
const int64_t z = blockDim.z * blockIdx.z + threadIdx.z;
linear_idx = z * gridDim.x * blockDim.x * gridDim.y +
y * gridDim.x * blockDim.x + x;
if (linear_idx >= num_batches) return;
int64_t out_idx = unique_batches[linear_idx];
num_voxels_per_batch[out_idx] = unique_batches_count[linear_idx];
}
/// This function computes number of voxels per batch element.
///
/// \param num_voxels_per_batch The output array.
/// \param unique_batches_count Counts for unique batch_id.
/// \param unique_batches Unique batch_id.
/// \param num_batches Number of non empty batches (<= batch_size).
///
void ComputeVoxelPerBatch(const cudaStream_t& stream,
int64_t* num_voxels_per_batch,
int64_t* unique_batches_count,
int64_t* unique_batches,
const int64_t num_batches) {
if (num_batches) {
const int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid;
grid.y = std::ceil(std::cbrt(num_batches));
grid.z = grid.y;
grid.x = DivUp(num_batches, int64_t(grid.z) * grid.y * block.x);
ComputeVoxelPerBatchKernel<<<grid, block, 0, stream>>>(
num_voxels_per_batch, unique_batches_count, unique_batches,
num_batches);
}
}
__global__ void ComputeIndicesBatchesKernel(int64_t* indices_batches,
const int64_t* row_splits,
const int64_t batch_size) {
int64_t linear_idx;
const int64_t x = blockDim.x * blockIdx.x + threadIdx.x;
const int64_t y = blockDim.y * blockIdx.y + threadIdx.y;
const int64_t z = blockDim.z * blockIdx.z + threadIdx.z;
linear_idx = z * gridDim.x * blockDim.x * gridDim.y +
y * gridDim.x * blockDim.x + x;
if (linear_idx >= batch_size) return;
for (int64_t i = row_splits[linear_idx]; i < row_splits[linear_idx + 1];
++i) {
indices_batches[i] = linear_idx;
}
}
/// This function computes mapping of index to batch_id.
///
/// \param indices_batches The output array.
/// \param row_splits The row_splits for defining batches.
/// \param batch_size The batch_size of given points.
///
void ComputeIndicesBatches(const cudaStream_t& stream,
int64_t* indices_batches,
const int64_t* row_splits,
const int64_t batch_size) {
if (batch_size) {
const int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid;
grid.y = std::ceil(std::cbrt(batch_size));
grid.z = grid.y;
grid.x = DivUp(batch_size, int64_t(grid.z) * grid.y * block.x);
ComputeIndicesBatchesKernel<<<grid, block, 0, stream>>>(
indices_batches, row_splits, batch_size);
}
}
template <class T, int NDIM>
__global__ void ComputeHashKernel(
int64_t* __restrict__ hashes,
int64_t num_points,
const T* const __restrict__ points,
const int64_t batch_size,
const int64_t* row_splits,
const int64_t* indices_batches,
const open3d::utility::MiniVec<T, NDIM> points_range_min_vec,
const open3d::utility::MiniVec<T, NDIM> points_range_max_vec,
const open3d::utility::MiniVec<T, NDIM> inv_voxel_size,
const open3d::utility::MiniVec<int64_t, NDIM> strides,
const int64_t batch_hash,
const int64_t invalid_hash) {
typedef MiniVec<T, NDIM> Vec_t;
int64_t linear_idx;
const int64_t x = blockDim.x * blockIdx.x + threadIdx.x;
const int64_t y = blockDim.y * blockIdx.y + threadIdx.y;
const int64_t z = blockDim.z * blockIdx.z + threadIdx.z;
linear_idx = z * gridDim.x * blockDim.x * gridDim.y +
y * gridDim.x * blockDim.x + x;
if (linear_idx >= num_points) return;
Vec_t point(points + linear_idx * NDIM);
if ((point >= points_range_min_vec && point <= points_range_max_vec)
.all()) {
auto coords = ((point - points_range_min_vec) * inv_voxel_size)
.template cast<int64_t>();
int64_t h = coords.dot(strides);
h += indices_batches[linear_idx] * batch_hash; // add hash for batch_id
hashes[linear_idx] = h;
} else {
hashes[linear_idx] = invalid_hash; // max hash value used as invalid
}
}
/// This function computes the hash (linear index) for each point.
/// Points outside the range will get a specific hash value.
///
/// \tparam T The floating point type for the points
/// \tparam NDIM The number of dimensions, e.g., 3.
///
/// \param hashes The output vector with the hashes/linear indexes.
/// \param num_points The number of points.
/// \param points The array with the point coordinates. The shape is
/// [num_points,NDIM] and the storage order is row-major.
/// \param batch_size The batch size of points.
/// \param row_splits row_splits for defining batches.
/// \param indices_batches Mapping of index to batch_id.
/// \param points_range_min_vec The minimum range for a point to be valid.
/// \param points_range_max_vec The maximum range for a point to be valid.
/// \param inv_voxel_size The reciprocal of the voxel edge lengths in each
/// dimension
/// \param strides The strides for computing the linear index.
/// \param batch_hash The value for hashing batch dimension.
/// \param invalid_hash The value to use for points outside the range.
template <class T, int NDIM>
void ComputeHash(const cudaStream_t& stream,
int64_t* hashes,
int64_t num_points,
const T* const points,
const int64_t batch_size,
const int64_t* row_splits,
const int64_t* indices_batches,
const MiniVec<T, NDIM> points_range_min_vec,
const MiniVec<T, NDIM> points_range_max_vec,
const MiniVec<T, NDIM> inv_voxel_size,
const MiniVec<int64_t, NDIM> strides,
const int64_t batch_hash,
const int64_t invalid_hash) {
if (num_points) {
const int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid;
grid.y = std::ceil(std::cbrt(num_points));
grid.z = grid.y;
grid.x = DivUp(num_points, int64_t(grid.z) * grid.y * block.x);
ComputeHashKernel<T, NDIM><<<grid, block, 0, stream>>>(
hashes, num_points, points, batch_size, row_splits,
indices_batches, points_range_min_vec, points_range_max_vec,
inv_voxel_size, strides, batch_hash, invalid_hash);
}
}
template <class T>
__global__ void LimitCountsKernel(T* counts, int64_t num, T limit) {
int64_t linear_idx;
const int64_t x = blockDim.x * blockIdx.x + threadIdx.x;
const int64_t y = blockDim.y * blockIdx.y + threadIdx.y;
const int64_t z = blockDim.z * blockIdx.z + threadIdx.z;
linear_idx = z * gridDim.x * blockDim.x * gridDim.y +
y * gridDim.x * blockDim.x + x;
if (linear_idx >= num) return;
if (counts[linear_idx] > limit) {
counts[linear_idx] = limit;
}
}
/// This function performs an element-wise minimum operation.
///
/// \param counts The input and output array.
/// \param num Number of input elements.
/// \param limit The second operator for the minimum operation.
template <class T>
void LimitCounts(const cudaStream_t& stream, T* counts, int64_t num, T limit) {
if (num) {
const int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid;
grid.y = std::ceil(std::cbrt(num));
grid.z = grid.y;
grid.x = DivUp(num, int64_t(grid.z) * grid.y * block.x);
LimitCountsKernel<<<grid, block, 0, stream>>>(counts, num, limit);
}
}
__global__ void ComputeStartIdxKernel(
int64_t* start_idx,
int64_t* points_count,
const int64_t* num_voxels_prefix_sum,
const int64_t* unique_hashes_count_prefix_sum,
const int64_t* out_batch_splits,
const int64_t batch_size,
const int64_t max_voxels,
const int64_t max_points_per_voxel) {
int64_t linear_idx;
const int64_t x = blockDim.x * blockIdx.x + threadIdx.x;
const int64_t y = blockDim.y * blockIdx.y + threadIdx.y;
const int64_t z = blockDim.z * blockIdx.z + threadIdx.z;
linear_idx = z * gridDim.x * blockDim.x * gridDim.y +
y * gridDim.x * blockDim.x + x;
if (linear_idx >= batch_size) return;
int64_t voxel_idx;
if (0 == linear_idx) {
voxel_idx = 0;
} else {
voxel_idx = num_voxels_prefix_sum[linear_idx - 1];
}
int64_t begin_out = out_batch_splits[linear_idx];
int64_t end_out = out_batch_splits[linear_idx + 1];
for (int64_t out_idx = begin_out; out_idx < end_out;
out_idx++, voxel_idx++) {
if (voxel_idx == 0) {
start_idx[out_idx] = 0;
points_count[out_idx] = min(max_points_per_voxel,
unique_hashes_count_prefix_sum[0]);
} else {
start_idx[out_idx] = unique_hashes_count_prefix_sum[voxel_idx - 1];
points_count[out_idx] =
min(max_points_per_voxel,
unique_hashes_count_prefix_sum[voxel_idx] -
unique_hashes_count_prefix_sum[voxel_idx - 1]);
}
}
}
/// Computes the starting index of each voxel.
///
/// \param start_idx The output array for storing starting index.
/// \param points_count The output array for storing points count.
/// \param num_voxels_prefix_sum The Inclusive prefix sum which gives
/// the index of starting voxel for each batch.
/// \param unique_hashes_count_prefix_sum Inclusive prefix sum defining
/// where point indices for each voxel ends.
/// \param out_batch_splits Defines starting and ending voxels for
/// each batch element.
/// \param batch_size The batch size.
/// \param max_voxels Maximum voxels per batch.
/// \param max_points_per_voxel Maximum points per voxel.
///
void ComputeStartIdx(const cudaStream_t& stream,
int64_t* start_idx,
int64_t* points_count,
const int64_t* num_voxels_prefix_sum,
const int64_t* unique_hashes_count_prefix_sum,
const int64_t* out_batch_splits,
const int64_t batch_size,
const int64_t max_voxels,
const int64_t max_points_per_voxel) {
if (batch_size) {
const int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid;
grid.y = std::ceil(std::cbrt(batch_size));
grid.z = grid.y;
grid.x = DivUp(batch_size, int64_t(grid.z) * grid.y * block.x);
ComputeStartIdxKernel<<<grid, block, 0, stream>>>(
start_idx, points_count, num_voxels_prefix_sum,
unique_hashes_count_prefix_sum, out_batch_splits, batch_size,
max_voxels, max_points_per_voxel);
}
}
template <class T, int NDIM>
__global__ void ComputeVoxelCoordsKernel(
int32_t* __restrict__ voxel_coords,
const T* const __restrict__ points,
const int64_t* const __restrict__ point_indices,
const int64_t* const __restrict__ prefix_sum,
const MiniVec<T, NDIM> points_range_min_vec,
const MiniVec<T, NDIM> inv_voxel_size,
int64_t num_voxels) {
typedef MiniVec<T, NDIM> Vec_t;
int64_t linear_idx;
const int64_t x = blockDim.x * blockIdx.x + threadIdx.x;
const int64_t y = blockDim.y * blockIdx.y + threadIdx.y;
const int64_t z = blockDim.z * blockIdx.z + threadIdx.z;
linear_idx = z * gridDim.x * blockDim.x * gridDim.y +
y * gridDim.x * blockDim.x + x;
if (linear_idx >= num_voxels) return;
int64_t point_idx = point_indices[prefix_sum[linear_idx]];
Vec_t point(points + point_idx * NDIM);
auto coords = ((point - points_range_min_vec) * inv_voxel_size)
.template cast<int32_t>();
for (int i = 0; i < NDIM; ++i) {
voxel_coords[linear_idx * NDIM + i] = coords[i];
}
}
/// Computes the coordinates for each voxel
///
/// \param voxel_coords The output array with shape [num_voxels, NDIM].
/// \param points The array with the point coordinates.
/// \param point_indices The array with the point indices for all voxels.
/// \param prefix_sum Inclusive prefix sum defining where the point indices
/// for each voxels end.
/// \param points_range_min The lower bound of the domain to be
/// voxelized.
/// \param points_range_max The upper bound of the domain to be
/// voxelized.
/// \param inv_voxel_size The reciprocal of the voxel edge lengths for each
/// dimension.
/// \param num_voxels The number of voxels.
template <class T, int NDIM>
void ComputeVoxelCoords(const cudaStream_t& stream,
int32_t* voxel_coords,
const T* const points,
const int64_t* const point_indices,
const int64_t* const prefix_sum,
const MiniVec<T, NDIM> points_range_min_vec,
const MiniVec<T, NDIM> inv_voxel_size,
int64_t num_voxels) {
if (num_voxels) {
const int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid;
grid.y = std::ceil(std::cbrt(num_voxels));
grid.z = grid.y;
grid.x = DivUp(num_voxels, int64_t(grid.z) * grid.y * block.x);
ComputeVoxelCoordsKernel<<<grid, block, 0, stream>>>(
voxel_coords, points, point_indices, prefix_sum,
points_range_min_vec, inv_voxel_size, num_voxels);
}
}
__global__ void CopyPointIndicesKernel(
int64_t* __restrict__ out,
const int64_t* const __restrict__ point_indices,
const int64_t* const __restrict__ prefix_sum_in,
const int64_t* const __restrict__ prefix_sum_out,
const int64_t num_voxels) {
// TODO data coalescing can be optimized
int64_t linear_idx;
const int64_t x = blockDim.x * blockIdx.x + threadIdx.x;
const int64_t y = blockDim.y * blockIdx.y + threadIdx.y;
const int64_t z = blockDim.z * blockIdx.z + threadIdx.z;
linear_idx = z * gridDim.x * blockDim.x * gridDim.y +
y * gridDim.x * blockDim.x + x;
if (linear_idx >= num_voxels) return;
int64_t begin_out;
if (0 == linear_idx) {
begin_out = 0;
} else {
begin_out = prefix_sum_out[linear_idx - 1];
}
int64_t end_out = prefix_sum_out[linear_idx];
int64_t num_points = end_out - begin_out;
int64_t in_idx = prefix_sum_in[linear_idx];
for (int64_t out_idx = begin_out; out_idx < end_out; ++out_idx, ++in_idx) {
out[out_idx] = point_indices[in_idx];
}
}
/// Copies the point indices for each voxel to the output.
///
/// \param out The output array with the point indices for all voxels.
/// \param point_indices The array with the point indices for all voxels.
/// \param prefix_sum_in Inclusive prefix sum defining where the point
/// indices for each voxels end.
/// \param prefix_sum_out Inclusive prefix sum defining where the point
/// indices for each voxels end.
/// \param num_voxels The number of voxels.
///
void CopyPointIndices(const cudaStream_t& stream,
int64_t* out,
const int64_t* const point_indices,
const int64_t* const prefix_sum_in,
const int64_t* const prefix_sum_out,
const int64_t num_voxels) {
if (num_voxels) {
const int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid;
grid.y = std::ceil(std::cbrt(num_voxels));
grid.z = grid.y;
grid.x = DivUp(num_voxels, int64_t(grid.z) * grid.y * block.x);
CopyPointIndicesKernel<<<grid, block, 0, stream>>>(
out, point_indices, prefix_sum_in, prefix_sum_out, num_voxels);
}
}
} // namespace
/// This function voxelizes a point cloud.
/// The function returns the integer coordinates of the voxels that
/// contain points and a compact list of the indices that associate the
/// voxels to the points.
///
/// All pointer arguments point to device memory unless stated
/// otherwise.
///
/// \tparam T Floating-point data type for the point positions.
///
/// \tparam NDIM The number of dimensions of the points.
///
/// \tparam OUTPUT_ALLOCATOR Type of the output_allocator. See
/// \p output_allocator for more information.
///
/// \param stream The cuda stream for all kernel launches.
///
/// \param temp Pointer to temporary memory. If nullptr then the required
/// size of temporary memory will be written to \p temp_size and no
/// work is done.
///
/// \param temp_size The size of the temporary memory in bytes. This is
/// used as an output if temp is nullptr
///
/// \param texture_alignment The texture alignment in bytes. This is used
/// for allocating segments within the temporary memory.
///
/// \param num_points The number of points.
///
/// \param points Array with the point positions. The shape is
/// [num_points,NDIM].
///
/// \param batch_size The batch size of points.
///
/// \param row_splits row_splits for defining batches.
///
/// \param voxel_size The edge lenghts of the voxel. The shape is
/// [NDIM]. This pointer points to host memory!
///
/// \param points_range_min The lower bound of the domain to be
/// voxelized.
/// The shape is [NDIM].
/// This pointer points to host memory!
///
/// \param points_range_max The upper bound of the domain to be
/// voxelized.
/// The shape is [NDIM].
/// This pointer points to host memory!
///
/// \param max_points_per_voxel This parameter limits the number of
/// points
/// that are recorderd for each voxel.
///
/// \param max_voxels This parameter limits the number of voxels that
/// will be generated.
///
/// \param output_allocator An object that implements functions for
/// allocating the output arrays. The object must implement
/// functions AllocVoxelCoords(int32_t** ptr, int64_t rows,
/// int64_t cols), AllocVoxelPointIndices(int64_t** ptr, int64_t
/// size), AllocVoxelPointRowSplits(int64_t** ptr, int64_t
/// size) and AllocVoxelBatchSplits(int64_t** ptr, int64_t size).
/// All functions should allocate memory and return a pointer
/// to that memory in ptr. The argments size, rows, and cols
/// define the size of the array as the number of elements.
/// All functions must accept zero size arguments. In this case
/// ptr does not need to be set.
///
template <class T, int NDIM, class OUTPUT_ALLOCATOR>
void VoxelizeCUDA(const cudaStream_t& stream,
void* temp,
size_t& temp_size,
int texture_alignment,
size_t num_points,
const T* const points,
const size_t batch_size,
const int64_t* const row_splits,
const T* const voxel_size,
const T* const points_range_min,
const T* const points_range_max,
const int64_t max_points_per_voxel,
const int64_t max_voxels,
OUTPUT_ALLOCATOR& output_allocator) {
using namespace open3d::utility;
typedef MiniVec<T, NDIM> Vec_t;
const bool get_temp_size = !temp;
if (get_temp_size) {
temp = (char*)1; // worst case pointer alignment
temp_size = std::numeric_limits<int64_t>::max();
}
MemoryAllocation mem_temp(temp, temp_size, texture_alignment);
const Vec_t inv_voxel_size = T(1) / Vec_t(voxel_size);
const Vec_t points_range_min_vec(points_range_min);
const Vec_t points_range_max_vec(points_range_max);
MiniVec<int32_t, NDIM> extents =
ceil((points_range_max_vec - points_range_min_vec) * inv_voxel_size)
.template cast<int32_t>();
MiniVec<int64_t, NDIM> strides;
for (int i = 0; i < NDIM; ++i) {
strides[i] = 1;
for (int j = 0; j < i; ++j) {
strides[i] *= extents[j];
}
}
const int64_t batch_hash = strides[NDIM - 1] * extents[NDIM - 1];
const int64_t invalid_hash = batch_hash * batch_size;
/// store batch_id for each point
std::pair<int64_t*, size_t> indices_batches =
mem_temp.Alloc<int64_t>(num_points);
if (!get_temp_size) {
ComputeIndicesBatches(stream, indices_batches.first, row_splits,
batch_size);
}
// use double buffers for the sorting
std::pair<int64_t*, size_t> point_indices =
mem_temp.Alloc<int64_t>(num_points);
std::pair<int64_t*, size_t> point_indices_alt =
mem_temp.Alloc<int64_t>(num_points);
std::pair<int64_t*, size_t> hashes = mem_temp.Alloc<int64_t>(num_points);
std::pair<int64_t*, size_t> hashes_alt =
mem_temp.Alloc<int64_t>(num_points);
cub::DoubleBuffer<int64_t> point_indices_dbuf(point_indices.first,
point_indices_alt.first);
cub::DoubleBuffer<int64_t> hashes_dbuf(hashes.first, hashes_alt.first);
if (!get_temp_size) {
IotaCUDA(stream, point_indices.first,
point_indices.first + point_indices.second, int64_t(0));
ComputeHash(stream, hashes.first, num_points, points, batch_size,
row_splits, indices_batches.first, points_range_min_vec,
points_range_max_vec, inv_voxel_size, strides, batch_hash,
invalid_hash);
}
{
// TODO compute end_bit for radix sort
std::pair<void*, size_t> sort_pairs_temp(nullptr, 0);
cub::DeviceRadixSort::SortPairs(
sort_pairs_temp.first, sort_pairs_temp.second, hashes_dbuf,
point_indices_dbuf, num_points, 0, sizeof(int64_t) * 8, stream);
sort_pairs_temp = mem_temp.Alloc(sort_pairs_temp.second);
if (!get_temp_size) {
cub::DeviceRadixSort::SortPairs(sort_pairs_temp.first,
sort_pairs_temp.second, hashes_dbuf,
point_indices_dbuf, num_points, 0,
sizeof(int64_t) * 8, stream);
}
mem_temp.Free(sort_pairs_temp);
}
// reuse the alternate buffers
std::pair<int64_t*, size_t> unique_hashes(hashes_dbuf.Alternate(),
hashes.second);
std::pair<int64_t*, size_t> unique_hashes_count(
point_indices_dbuf.Alternate(), point_indices.second);
// encode unique hashes(voxels) and their counts(points per voxel)
int64_t num_voxels = 0;
int64_t last_hash = 0; // 0 is a valid hash value
{
std::pair<void*, size_t> encode_temp(nullptr, 0);
std::pair<int64_t*, size_t> num_voxels_mem = mem_temp.Alloc<int64_t>(1);
cub::DeviceRunLengthEncode::Encode(
encode_temp.first, encode_temp.second, hashes_dbuf.Current(),
unique_hashes.first, unique_hashes_count.first,
num_voxels_mem.first, num_points, stream);
encode_temp = mem_temp.Alloc(encode_temp.second);
if (!get_temp_size) {
cub::DeviceRunLengthEncode::Encode(
encode_temp.first, encode_temp.second,
hashes_dbuf.Current(), unique_hashes.first,
unique_hashes_count.first, num_voxels_mem.first, num_points,
stream);
// get the number of voxels
cudaMemcpyAsync(&num_voxels, num_voxels_mem.first, sizeof(int64_t),
cudaMemcpyDeviceToHost, stream);
// get the last hash value
cudaMemcpyAsync(&last_hash,
hashes_dbuf.Current() + hashes.second - 1,
sizeof(int64_t), cudaMemcpyDeviceToHost, stream);
// wait for the async copies
while (cudaErrorNotReady == cudaStreamQuery(stream)) { /*empty*/
}
}
mem_temp.Free(encode_temp);
}
if (invalid_hash == last_hash) {
// the last hash is invalid we have one voxel less
--num_voxels;
}
// reuse the hashes buffer
std::pair<int64_t*, size_t> unique_hashes_count_prefix_sum(
hashes_dbuf.Current(), hashes.second);
// compute the prefix sum for unique_hashes_count
// gives starting index of each voxel
{
std::pair<void*, size_t> inclusive_scan_temp(nullptr, 0);
cub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
unique_hashes_count.first, unique_hashes_count_prefix_sum.first,
unique_hashes_count.second, stream);
inclusive_scan_temp = mem_temp.Alloc(inclusive_scan_temp.second);
if (!get_temp_size) {
// We only need the prefix sum for the first num_voxels.
cub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
unique_hashes_count.first,
unique_hashes_count_prefix_sum.first, num_voxels, stream);
}
mem_temp.Free(inclusive_scan_temp);
}
// Limit the number of output points to max_points_per_voxel by
// limiting the unique_hashes_count.
if (!get_temp_size) {
if (max_points_per_voxel < num_points) {
LimitCounts(stream, unique_hashes_count.first, num_voxels,
max_points_per_voxel);
}
}
// Convert unique_hashes to batch_id (divide with batch_hash)
int64_t* unique_hashes_batch_id = unique_hashes.first;
if (!get_temp_size) {
ComputeBatchId(stream, unique_hashes_batch_id, num_voxels, batch_hash);
}
std::pair<int64_t*, size_t> unique_batches =
mem_temp.Alloc<int64_t>(batch_size);
std::pair<int64_t*, size_t> unique_batches_count =
mem_temp.Alloc<int64_t>(batch_size);
int64_t num_batches = 0; // Store non empty batches
// Convert batch_id to counts (array of num_voxels per batch)
{
std::pair<void*, size_t> encode_temp(nullptr, 0);
std::pair<int64_t*, size_t> num_batches_mem =
mem_temp.Alloc<int64_t>(1);
cub::DeviceRunLengthEncode::Encode(
encode_temp.first, encode_temp.second, unique_hashes_batch_id,
unique_batches.first, unique_batches_count.first,
num_batches_mem.first, num_voxels, stream);
encode_temp = mem_temp.Alloc(encode_temp.second);
if (!get_temp_size) {
cub::DeviceRunLengthEncode::Encode(
encode_temp.first, encode_temp.second,
unique_hashes_batch_id, unique_batches.first,
unique_batches_count.first, num_batches_mem.first,
num_voxels, stream);
// get the number of non empty batches.
cudaMemcpyAsync(&num_batches, num_batches_mem.first,
sizeof(int64_t), cudaMemcpyDeviceToHost, stream);
// wait for the async copies
while (cudaErrorNotReady == cudaStreamQuery(stream)) { /*empty*/
}
}
mem_temp.Free(encode_temp);
}
// Insert count(0) for empty batches
std::pair<int64_t*, size_t> num_voxels_per_batch =
mem_temp.Alloc<int64_t>(batch_size);
if (!get_temp_size) {
cudaMemset(num_voxels_per_batch.first, 0, batch_size * sizeof(int64_t));
ComputeVoxelPerBatch(stream, num_voxels_per_batch.first,
unique_batches_count.first, unique_batches.first,
num_batches);
}
std::pair<int64_t*, size_t> num_voxels_prefix_sum(unique_batches.first,
batch_size);
// compute the prefix sum for number of voxels per batch
// gives starting voxel index for each batch
// used only when voxel count exceeds max_voxels
{
std::pair<void*, size_t> inclusive_scan_temp(nullptr, 0);
cub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
num_voxels_per_batch.first, num_voxels_prefix_sum.first,
num_voxels_per_batch.second, stream);
inclusive_scan_temp = mem_temp.Alloc(inclusive_scan_temp.second);
if (!get_temp_size) {
if (num_voxels > max_voxels) {
cub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
num_voxels_per_batch.first, num_voxels_prefix_sum.first,
num_voxels_per_batch.second, stream);
}
}
mem_temp.Free(inclusive_scan_temp);
}
// Limit the number of voxels per batch to max_voxels
if (!get_temp_size) {
if (num_voxels >= max_voxels)
LimitCounts(stream, num_voxels_per_batch.first, batch_size,
max_voxels);
}
// Prefix sum of limited counts to get batch splits.
int64_t* out_batch_splits = nullptr;
if (!get_temp_size) {
output_allocator.AllocVoxelBatchSplits(&out_batch_splits,
batch_size + 1);
cudaMemsetAsync(out_batch_splits, 0, sizeof(int64_t), stream);
}
// Prefix sum of counts to get batch splits
{
std::pair<void*, size_t> inclusive_scan_temp(nullptr, 0);
cub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
num_voxels_per_batch.first, out_batch_splits + 1,
num_voxels_per_batch.second, stream);
inclusive_scan_temp = mem_temp.Alloc(inclusive_scan_temp.second);
if (!get_temp_size) {
cub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
num_voxels_per_batch.first, out_batch_splits + 1,
batch_size, stream);
}
mem_temp.Free(inclusive_scan_temp);
}
// num_valid_voxels excludes voxels exceeding max_voxels
int64_t num_valid_voxels = num_points;
if (!get_temp_size) {
// get the number of valid voxels.
cudaMemcpyAsync(&num_valid_voxels, out_batch_splits + batch_size,
sizeof(int64_t), cudaMemcpyDeviceToHost, stream);
// wait for the async copies
while (cudaErrorNotReady == cudaStreamQuery(stream)) { /*empty*/
}
}
// start_idx stores starting index of each valid voxel.
// points_count stores number of valid points in respective voxel.
std::pair<int64_t*, size_t> start_idx(indices_batches.first,
num_valid_voxels);
std::pair<int64_t*, size_t> points_count =
mem_temp.Alloc<int64_t>(num_valid_voxels);
if (!get_temp_size) {
if (num_voxels <= max_voxels) {
// starting index and points_count will be same as
// unique_hashes_count_prefix_sum and unique_hashes_count when voxel
// count doesn't exceeds max_voxels
cudaMemsetAsync(start_idx.first, 0, sizeof(int64_t), stream);
cudaMemcpyAsync(start_idx.first + 1,
unique_hashes_count_prefix_sum.first,
(num_voxels - 1) * sizeof(int64_t),
cudaMemcpyDeviceToDevice, stream);
mem_temp.Free(points_count);
points_count.first = unique_hashes_count.first;
} else {
ComputeStartIdx(stream, start_idx.first, points_count.first,
num_voxels_prefix_sum.first,
unique_hashes_count_prefix_sum.first,
out_batch_splits, batch_size, max_voxels,
max_points_per_voxel);
}
}
int64_t* out_voxel_row_splits = nullptr;
if (!get_temp_size) {
output_allocator.AllocVoxelPointRowSplits(&out_voxel_row_splits,
num_valid_voxels + 1);
}
if (!get_temp_size) {
// set first element to 0
cudaMemsetAsync(out_voxel_row_splits, 0, sizeof(int64_t), stream);
}
{
std::pair<void*, size_t> inclusive_scan_temp(nullptr, 0);
cub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
points_count.first, out_voxel_row_splits + 1, num_valid_voxels,
stream);
inclusive_scan_temp = mem_temp.Alloc(inclusive_scan_temp.second);
if (!get_temp_size) {
cub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
points_count.first, out_voxel_row_splits + 1,
num_valid_voxels, stream);
}
mem_temp.Free(inclusive_scan_temp);
}
if (get_temp_size) {
// return the memory peak as the required temporary memory size.
temp_size = mem_temp.MaxUsed();
return;
}
int32_t* out_voxel_coords = nullptr;
output_allocator.AllocVoxelCoords(&out_voxel_coords, num_valid_voxels,
NDIM);
ComputeVoxelCoords(stream, out_voxel_coords, points,
point_indices_dbuf.Current(), start_idx.first,
points_range_min_vec, inv_voxel_size, num_valid_voxels);
int64_t num_valid_points = 0;
{
cudaMemcpyAsync(&num_valid_points,
out_voxel_row_splits + num_valid_voxels,
sizeof(int64_t), cudaMemcpyDeviceToHost, stream);
// wait for the async copies
while (cudaErrorNotReady == cudaStreamQuery(stream)) { /*empty*/
}
}
int64_t* out_point_indices = nullptr;
output_allocator.AllocVoxelPointIndices(&out_point_indices,
num_valid_points);
CopyPointIndices(stream, out_point_indices, point_indices_dbuf.Current(),
start_idx.first, out_voxel_row_splits + 1,
num_valid_voxels);
}
} // namespace impl
} // namespace ml
} // namespace open3d | the_stack |
#include "Device/DataMovement/Indexing.cuh"
#include "Device/Util/DeviceProperties.cuh"
#include "Device/Util/Basic.cuh"
#include "Host/Algorithm.hpp"
namespace xlib {
namespace detail {
template<unsigned PARTITION_SIZE, typename T>
__global__ void blockPartition(const T* __restrict__ d_prefixsum,
int prefixsum_size,
int* __restrict__ d_partition,
int num_partitions) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = id; i < num_partitions; i += stride) {
T searched = static_cast<T>(i) * PARTITION_SIZE;
d_partition[i] = xlib::upper_bound_left(d_prefixsum, prefixsum_size,
searched);
}
if (id == 0)
d_partition[num_partitions] = prefixsum_size - 2;
}
template<unsigned BLOCK_SIZE, typename T, unsigned ITEMS_PER_THREAD>
__device__ __forceinline__
void threadPartitionAuxLoop(const T* __restrict__ ptr,
int block_start_pos,
int chunk_size,
T searched,
T* __restrict__ smem_prefix,
int (®_pos)[ITEMS_PER_THREAD],
T (®_offset)[ITEMS_PER_THREAD]) {
const unsigned ITEMS_PER_BLOCK = BLOCK_SIZE * ITEMS_PER_THREAD;
T low_limit = 0;
while (chunk_size > 0) {
int smem_size = ::min(chunk_size, ITEMS_PER_BLOCK);
#pragma unroll
for (int i = 0; i < ITEMS_PER_THREAD; i++) {
int index = i * BLOCK_SIZE + threadIdx.x;
if (index < smem_size)
smem_prefix[index] = ptr[index];
}
xlib::sync<BLOCK_SIZE>();
int ubound = xlib::upper_bound_left(smem_prefix, smem_size, searched);
int smem_pos = ::min(::max(0, ubound), ITEMS_PER_BLOCK - 2);
assert(smem_pos >= 0 && smem_pos + 1 < ITEMS_PER_BLOCK);
T offset = ::max(searched - smem_prefix[smem_pos], 0);
T next = smem_prefix[smem_pos + 1];
T high_limit = smem_prefix[smem_size - 1];
#pragma unroll
for (int i = 0; i < ITEMS_PER_THREAD; i++) {
T loc_search = searched + i;
if (loc_search < low_limit || loc_search >= high_limit)
continue;
if (loc_search == next) {
do {
smem_pos++;
assert(smem_pos >= 0 && smem_pos + 1 < smem_size);
next = smem_prefix[smem_pos + 1];
} while (loc_search == next);
offset = 0;
}
reg_pos[i] = block_start_pos + smem_pos;
reg_offset[i] = offset;
offset++;
}
xlib::sync<BLOCK_SIZE>();
low_limit = high_limit;
chunk_size -= ITEMS_PER_BLOCK - 1;
ptr += ITEMS_PER_BLOCK - 1;
block_start_pos += ITEMS_PER_BLOCK - 1;
}
}
/**
* @brief
* @details
* @verbatim
* d_prefixsum input: 0, 3, 7, 10, 13
* ITEMS_PER_THREAD: 5
* reg_pos output: t1(0, 0, 0, 1, 1) t2(1, 1, 2, 2, 2) t3(3, 3, 3, *, *)
* reg_offset output: t1(0, 1, 2, 0, 1) t2(2, 3, 0, 1, 2) t3(0, 1, 2, *, *)
* *: undefined
* @endverbatim
*
* @tparam BLOCK_SIZE
* @tparam T
* @tparam ITEMS_PER_THREAD
* @param[in] d_partition
* @param[in] d_prefixsum
* @param[in] reg_pos
* @param[in] reg_offset
* @param[in] smem
*
* @warning |smem| must be equal to BLOCK_SIZE * ITEMS_PER_THREAD
* @remark The best way to detect unused registers in the last thread block
* is to fill the `reg_offset` array with a special value
* @warning requires `__syncthreads()` at the end if the shared memory is used
* @remark the function uses static indexing for `reg_pos` and `reg_offset`
*/
template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_THREAD, typename T>
__device__ __forceinline__
void threadPartition(const T* __restrict__ d_prefixsum, int prefixsum_size,
void* __restrict__ smem,
int (®_pos)[ITEMS_PER_THREAD],
T (®_offset)[ITEMS_PER_THREAD],
int block_start_pos,
int block_end_pos) {
const unsigned ITEMS_PER_BLOCK = BLOCK_SIZE * ITEMS_PER_THREAD;
T block_search_low = static_cast<T>(blockIdx.x) * ITEMS_PER_BLOCK;
T searched = block_search_low +
static_cast<T>(threadIdx.x) * ITEMS_PER_THREAD;
int chunk_size = block_end_pos - block_start_pos + 2;
const T* ptr = d_prefixsum + block_start_pos;
if (blockIdx.x == gridDim.x - 1)
xlib::reg_fill(reg_pos, -1);
auto smem_prefix = static_cast<T*>(smem);
detail::threadPartitionAuxLoop<BLOCK_SIZE>
(ptr, block_start_pos, chunk_size, searched,
smem_prefix, reg_pos, reg_offset);
}
template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_THREAD, typename T>
__device__ __forceinline__
void threadPartitionNoDup(const T* __restrict__ d_prefixsum,
int prefixsum_size,
void* __restrict__ smem,
int (®_pos)[ITEMS_PER_THREAD],
T (®_offset)[ITEMS_PER_THREAD],
int block_start_pos,
int block_end_pos) {
const unsigned ITEMS_PER_BLOCK = BLOCK_SIZE * ITEMS_PER_THREAD;
T block_search_low = static_cast<T>(blockIdx.x) * ITEMS_PER_BLOCK;
T searched = block_search_low +
static_cast<T>(threadIdx.x) * ITEMS_PER_THREAD;
const T* ptr = d_prefixsum + block_start_pos;
int smem_size = block_end_pos - block_start_pos + 2;
auto smem_prefix = static_cast<T*>(smem);
#pragma unroll
for (int i = 0; i < ITEMS_PER_THREAD; i++) {
int index = i * BLOCK_SIZE + threadIdx.x;
if (index < smem_size)
smem_prefix[index] = ptr[index];
}
xlib::sync<BLOCK_SIZE>();
int smem_pos = xlib::upper_bound_left(smem_prefix, smem_size, searched);
T next = smem_prefix[smem_pos + 1];
T offset = searched - smem_prefix[smem_pos];
T limit = smem_prefix[smem_size - 1];
#pragma unroll
for (int i = 0; i < ITEMS_PER_THREAD; i++) {
reg_pos[i] = searched < limit ? block_start_pos + smem_pos : -1;
reg_offset[i] = offset;
searched++;
if (searched == next) {
smem_pos++;
next = smem_prefix[smem_pos + 1];
offset = 0;
} else
offset++;
}
xlib::sync<BLOCK_SIZE>();
}
//==============================================================================
//==============================================================================
//==============================================================================
//==============================================================================
template<bool BLOCK_PARTITION_FROM_GLOBAL, bool NO_DUPLICATE,
bool LAST_BLOCK_CHECK, unsigned BLOCK_SIZE,
unsigned ITEMS_PER_THREAD = 0, typename T, typename Lambda>
__device__ __forceinline__
void binarySearchLBGen(const T* __restrict__ d_prefixsum,
int prefixsum_size,
int* __restrict__ d_partitions,
void* __restrict__ smem,
const Lambda& lambda) {
const unsigned _ITEMS_PER_THREAD = ITEMS_PER_THREAD == 0 ?
smem_per_thread<T, BLOCK_SIZE>() :
ITEMS_PER_THREAD;
int reg_pos[_ITEMS_PER_THREAD];
T reg_offset[_ITEMS_PER_THREAD];
static_assert(!BLOCK_PARTITION_FROM_GLOBAL, "Deprecated");
int block_start_pos, block_end_pos;
if (BLOCK_PARTITION_FROM_GLOBAL) {
block_start_pos = d_partitions[ blockIdx.x ];
block_end_pos = d_partitions[ blockIdx.x + 1 ];
}
else {
const unsigned ITEMS_PER_BLOCK = BLOCK_SIZE * _ITEMS_PER_THREAD;
const unsigned IDX1 = BLOCK_SIZE >= 64 ? xlib::WARP_SIZE : 1;
auto smem_prefix = static_cast<T*>(smem);
if (threadIdx.x == 0) {
T block_search = static_cast<T>(blockIdx.x) * ITEMS_PER_BLOCK;
smem_prefix[0] = xlib::upper_bound_left(d_prefixsum, prefixsum_size,
block_search);
}
else if (threadIdx.x == IDX1) {
T block_search = static_cast<T>(blockIdx.x + 1) * ITEMS_PER_BLOCK;
smem_prefix[1] = blockIdx.x == gridDim.x - 1 ? prefixsum_size - 2 :
xlib::upper_bound_left(d_prefixsum, prefixsum_size,
block_search);
}
xlib::sync<BLOCK_SIZE>();
block_start_pos = smem_prefix[0];
block_end_pos = smem_prefix[1];
xlib::sync<BLOCK_SIZE>();
}
if (NO_DUPLICATE) {
detail::threadPartitionNoDup<BLOCK_SIZE>
(d_prefixsum, prefixsum_size, smem, reg_pos, reg_offset,
block_start_pos, block_end_pos);
}
else {
detail::threadPartition<BLOCK_SIZE>
(d_prefixsum, prefixsum_size, smem, reg_pos, reg_offset,
block_start_pos, block_end_pos);
}
threadToWarpIndexing<_ITEMS_PER_THREAD>(reg_pos, reg_offset, smem);
//if (LAST_BLOCK_CHECK && blockIdx.x == gridDim.x - 1) {
#pragma unroll
for (int i = 0; i < _ITEMS_PER_THREAD; i++) {
if (reg_pos[i] != -1) {
assert(reg_pos[i] < prefixsum_size);
lambda(reg_pos[i], reg_offset[i]);
}
}
//}
/*else {
#pragma unroll
for (int i = 0; i < _ITEMS_PER_THREAD; i++) {
assert(reg_pos[i] < prefixsum_size);
lambda(reg_pos[i], reg_offset[i]);
}
}*/
}
template<unsigned BLOCK_SIZE, typename T, typename Lambda>
__device__ __forceinline__
void simpleBinarySearchLBGen(const T* __restrict__ d_prefixsum,
int prefixsum_size,
void* __restrict__ smem,
const Lambda& lambda) {
const unsigned ITEMS_PER_BLOCK = blockDim.x;
T work_index = blockIdx.x * ITEMS_PER_BLOCK + threadIdx.x;
int pos = xlib::upper_bound_left(d_prefixsum, prefixsum_size,
work_index);
if ((pos >= 0) && (pos < prefixsum_size - 1) && (work_index < d_prefixsum[prefixsum_size - 1])) {
T offset = work_index - d_prefixsum[pos];
lambda(pos, offset);
}
}
} // namespace detail
//==============================================================================
//==============================================================================
template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_THREAD,
typename T, typename Lambda>
__device__ __forceinline__
void binarySearchLB(const T* __restrict__ d_prefixsum,
int prefixsum_size,
void* __restrict__ smem,
const Lambda& lambda) {
detail::binarySearchLBGen<false, false, true, BLOCK_SIZE, ITEMS_PER_THREAD>
(d_prefixsum, prefixsum_size, nullptr, smem, lambda);
}
template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_THREAD,
typename T, typename Lambda>
__device__ __forceinline__
void simpleBinarySearchLB(const T* __restrict__ d_prefixsum,
int prefixsum_size,
void* __restrict__ smem,
const Lambda& lambda) {
detail::simpleBinarySearchLBGen<BLOCK_SIZE>
(d_prefixsum, prefixsum_size, smem, lambda);
}
template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_THREAD,
typename T, typename Lambda>
__device__ __forceinline__
void binarySearchLBAllPos(const T* __restrict__ d_prefixsum,
int prefixsum_size,
void* __restrict__ smem,
const Lambda& lambda) {
detail::binarySearchLBGen<false, false, false, BLOCK_SIZE, ITEMS_PER_THREAD>
(d_prefixsum, prefixsum_size, nullptr, smem, lambda);
}
//==============================================================================
template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_THREAD,
typename T, typename Lambda>
__device__ __forceinline__
void binarySearchLB(const T* __restrict__ d_prefixsum,
int prefixsum_size,
int* __restrict__ d_partitions,
void* __restrict__ smem,
const Lambda& lambda) {
detail::binarySearchLBGen<true, false, true, BLOCK_SIZE, ITEMS_PER_THREAD>
(d_prefixsum, prefixsum_size, d_partitions, smem, lambda);
}
template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_THREAD,
typename T, typename Lambda>
__device__ __forceinline__
void binarySearchLBAllPos(const T* __restrict__ d_prefixsum,
int prefixsum_size,
int* __restrict__ d_partitions,
void* __restrict__ smem,
const Lambda& lambda) {
detail::binarySearchLBGen<true, false, false, BLOCK_SIZE, ITEMS_PER_THREAD>
(d_prefixsum, prefixsum_size, d_partitions, smem, lambda);
}
//------------------------------------------------------------------------------
template<unsigned BLOCK_SIZE, unsigned ITEMS_PER_THREAD,
typename T, typename Lambda>
__device__ __forceinline__
void binarySearchLBAllPosNoDup(const T* __restrict__ d_prefixsum,
int prefixsum_size,
void* __restrict__ smem,
const Lambda& lambda) {
detail::binarySearchLBGen<false, true, false, BLOCK_SIZE, ITEMS_PER_THREAD>
(d_prefixsum, prefixsum_size, nullptr, smem, lambda);
}
} // namespace xlib | the_stack |
#include "core/providers/cuda/cu_inc/common.cuh"
#include "layer_norm_impl.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
using namespace onnxruntime::cuda;
template <typename U, bool simplified>
__device__ void cuWelfordOnlineSum(
const U curr,
U& mu,
U& sigma2,
U& count) {
count = count + U(1);
U delta = curr - mu;
U lmean = mu + delta / count;
mu = lmean;
if (simplified) {
sigma2 = sigma2 + curr * curr;
} else {
U delta2 = curr - lmean;
sigma2 = sigma2 + delta * delta2;
}
}
template <typename U, bool simplified>
__device__ void cuChanOnlineSum(
const U muB,
const U sigma2B,
const U countB,
U& mu,
U& sigma2,
U& count) {
U delta = muB - mu;
U nA = count;
U nB = countB;
count = count + countB;
U nX = count;
if (nX > U(0)) {
nA = nA / nX;
nB = nB / nX;
mu = nA * mu + nB * muB;
if (simplified) {
sigma2 = sigma2 + sigma2B;
} else {
sigma2 = sigma2 + sigma2B + delta * delta * nA * nB * nX;
}
} else {
mu = U(0);
sigma2 = U(0);
}
}
template <typename T, typename U, bool simplified>
__device__ void cuWelfordMuSigma2(
const T* __restrict__ vals,
const int n1,
const int n2,
const int i1,
U& mu,
U& sigma2,
U* buf) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
U count = U(0);
mu = U(0);
sigma2 = U(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const T* lvals = vals + i1 * n2;
int l = 4 * thrx;
for (; l + 3 < n2; l += 4 * numx) {
for (int k = 0; k < 4; ++k) {
U curr = static_cast<U>(lvals[l + k]);
cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count);
}
}
for (; l < n2; ++l) {
U curr = static_cast<U>(lvals[l]);
cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count);
}
// intra-warp reductions
#pragma unroll
for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) {
U muB = WARP_SHFL_DOWN(mu, stride);
U countB = WARP_SHFL_DOWN(count, stride);
U sigma2B = WARP_SHFL_DOWN(sigma2, stride);
cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
U* ubuf = (U*)buf;
U* ibuf = (U*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
U muB = ubuf[2 * threadIdx.y];
U sigma2B = ubuf[2 * threadIdx.y + 1];
U countB = ibuf[threadIdx.y];
cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / U(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0);
sigma2 = WARP_SHFL(sigma2 / U(n2), 0);
}
}
}
template <bool simplified>
__device__ void cuWelfordMuSigma2(
const half* __restrict__ vals,
const int n1,
const int n2,
const int i1,
float& mu,
float& sigma2,
float* buf) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
float count = 0.0f;
mu = float(0);
sigma2 = float(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const half* lvals = vals + i1 * n2;
int l = 8 * thrx;
if ((((size_t)lvals) & 3) != 0) {
// 16 bit alignment
// first thread consumes first point
if (thrx == 0) {
float curr = static_cast<float>(lvals[0]);
cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count);
}
++l;
}
// at this point, lvals[l] are 32 bit aligned for all threads.
for (; l + 7 < n2; l += 8 * numx) {
for (int k = 0; k < 8; k += 2) {
float2 curr = __half22float2(*((__half2*)(lvals + l + k)));
cuWelfordOnlineSum<float, simplified>(static_cast<float>(curr.x), mu, sigma2, count);
cuWelfordOnlineSum<float, simplified>(static_cast<float>(curr.y), mu, sigma2, count);
}
}
for (; l < n2; ++l) {
float curr = static_cast<float>(lvals[l]);
cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count);
}
// intra-warp reductions
#pragma unroll
for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) {
float muB = WARP_SHFL_DOWN(mu, stride);
float countB = WARP_SHFL_DOWN(count, stride);
float sigma2B = WARP_SHFL_DOWN(sigma2, stride);
cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
float* ubuf = (float*)buf;
float* ibuf = (float*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
float muB = ubuf[2 * threadIdx.y];
float sigma2B = ubuf[2 * threadIdx.y + 1];
float countB = ibuf[threadIdx.y];
cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / float(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0);
sigma2 = WARP_SHFL(sigma2 / float(n2), 0);
}
}
}
template <typename U>
__device__ U rsqrt(U v) {
return U(1) / sqrt(v);
}
template <>
__device__ float rsqrt(float v) {
return rsqrtf(v);
}
template <>
__device__ double rsqrt(double v) {
return rsqrt(v);
}
namespace {
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
// template <typename T>
// struct SharedMemory
// {
// // Ensure that we won't compile any un-specialized types
// __device__ T *getPointer()
// {
// extern __device__ void error(void);
// error();
// return NULL;
// }
// };
// https://github.com/NVIDIA/apex/issues/246
template <typename T>
struct SharedMemory;
template <>
struct SharedMemory<float> {
__device__ float* getPointer() {
extern __shared__ float s_float[];
return s_float;
}
};
template <>
struct SharedMemory<double> {
__device__ double* getPointer() {
extern __shared__ double s_double[];
return s_double;
}
};
} // namespace
template <typename T, typename U, bool simplified>
__global__ void cuApplyLayerNorm(
T* __restrict__ output_vals,
U* __restrict__ mean,
U* __restrict__ inv_std_dev,
const T* __restrict__ vals,
const int n1,
const int n2,
const U epsilon,
const T* __restrict__ gamma,
const T* __restrict__ beta) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensors are contiguous
//
for (int i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) {
SharedMemory<U> shared;
U* buf = shared.getPointer();
U mu, sigma2;
cuWelfordMuSigma2<T, U, simplified>(vals, n1, n2, i1, mu, sigma2, buf);
const T* lvals = vals + i1 * n2;
T* ovals = output_vals + i1 * n2;
U c_inv_std_dev = rsqrt(sigma2 + epsilon);
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
for (int i = thrx; i < n2; i += numx) {
U curr = static_cast<U>(lvals[i]);
T gamma_i = (gamma != NULL) ? gamma[i]: (T)1;
T beta_i = (beta != NULL) ? beta[i] : (T) 0;
if (simplified) {
ovals[i] = gamma_i * static_cast<T>(c_inv_std_dev * curr);
} else {
ovals[i] = gamma_i * static_cast<T>(c_inv_std_dev * (curr - mu)) + beta_i;
}
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
if (mean != nullptr) mean[i1] = mu;
if (inv_std_dev != nullptr) inv_std_dev[i1] = c_inv_std_dev;
}
}
}
template <typename T, typename U, bool simplified>
void HostApplyLayerNorm(
const cudaDeviceProp& prop,
cudaStream_t stream,
T* output,
U* mean,
U* inv_std_dev,
const T* input,
int n1,
int n2,
double epsilon,
const T* gamma,
const T* beta) {
const int maxGridY = prop.maxGridSize[1];
const int warp_size = prop.warpSize;
ORT_ENFORCE(warp_size == GPU_WARP_SIZE);
dim3 threads(warp_size, 4, 1);
#ifdef __HIP_PLATFORM_HCC__
// Optimization for ROCm MI100
threads.y = 1;
#endif
const dim3 blocks(1, std::min<unsigned int>(n1, maxGridY), 1);
int nshared =
threads.y > 1 ? threads.y * sizeof(U) + (threads.y / 2) * sizeof(U) : 0;
cuApplyLayerNorm<T, U, simplified><<<blocks, threads, nshared, stream>>>(
output,
mean,
inv_std_dev,
input,
n1, n2,
U(epsilon),
gamma, beta);
}
#define LAYERNORM_LINEAR_IMPL(T, U, simplified) \
template void HostApplyLayerNorm<T, U, simplified>(const cudaDeviceProp& prop, cudaStream_t stream, T* output, U* mean, U* inv_std_dev, const T* input, int n1, int n2, \
double epsilon, const T* gamma, const T* beta);
LAYERNORM_LINEAR_IMPL(float, float, true)
LAYERNORM_LINEAR_IMPL(half, float, true)
LAYERNORM_LINEAR_IMPL(double, double, true)
LAYERNORM_LINEAR_IMPL(float, float, false)
LAYERNORM_LINEAR_IMPL(half, float, false)
LAYERNORM_LINEAR_IMPL(double, double, false)
//LAYERNORM_LINEAR_IMPL(half, half)
#if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__))
LAYERNORM_LINEAR_IMPL(nv_bfloat16, float, true)
LAYERNORM_LINEAR_IMPL(nv_bfloat16, float, false)
#endif
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime | the_stack |
#include "constitutive_models.cuh"
#include "particle_buffer.cuh"
#include "settings.h"
#include "utility_funcs.hpp"
#include <MnBase/Algorithm/MappingKernels.cuh>
#include <MnBase/Math/Matrix/MatrixUtils.h>
#include <MnSystem/Cuda/DeviceUtils.cuh>
namespace mn {
using namespace config;
using namespace placeholder;
template <typename ParticleArray, typename Partition>
__global__ void activate_blocks(uint32_t particleCount, ParticleArray parray,
Partition partition) {
uint32_t parid = blockIdx.x * blockDim.x + threadIdx.x;
if (parid >= particleCount)
return;
ivec3 blockid{
int(std::lround(parray.val(_0, parid) / g_dx) - 2) / g_blocksize,
int(std::lround(parray.val(_1, parid) / g_dx) - 2) / g_blocksize,
int(std::lround(parray.val(_2, parid) / g_dx) - 2) / g_blocksize};
partition.insert(blockid);
}
template <typename ParticleArray, typename ParticleBuffer, typename Partition>
__global__ void
build_particle_cell_buckets(uint32_t particleCount, ParticleArray parray,
ParticleBuffer pbuffer, Partition partition) {
uint32_t parid = blockIdx.x * blockDim.x + threadIdx.x;
if (parid >= particleCount)
return;
ivec3 coord{int(std::lround(parray.val(_0, parid) / g_dx) - 2),
int(std::lround(parray.val(_1, parid) / g_dx) - 2),
int(std::lround(parray.val(_2, parid) / g_dx) - 2)};
int cellno = (coord[0] & g_blockmask) * g_blocksize * g_blocksize +
(coord[1] & g_blockmask) * g_blocksize +
(coord[2] & g_blockmask);
coord = coord / g_blocksize;
auto blockno = partition.query(coord);
auto pidic = atomicAdd(pbuffer._ppcs + blockno * g_blockvolume + cellno, 1);
pbuffer._cellbuckets[blockno * g_particle_num_per_block + cellno * g_max_ppc +
pidic] = parid;
}
__global__ void cell_bucket_to_block(int *_ppcs, int *_cellbuckets, int *_ppbs,
int *_buckets) {
int cellno = threadIdx.x & (g_blockvolume - 1);
int pcnt = _ppcs[blockIdx.x * g_blockvolume + cellno];
for (int pidic = 0; pidic < g_max_ppc; pidic++) {
if (pidic < pcnt) {
auto pidib = atomicAggInc<int>(_ppbs + blockIdx.x);
_buckets[blockIdx.x * g_particle_num_per_block + pidib] =
_cellbuckets[blockIdx.x * g_particle_num_per_block +
cellno * g_max_ppc + pidic];
}
__syncthreads();
}
}
__global__ void compute_bin_capacity(uint32_t blockCount, int const *_ppbs,
int *_bincaps) {
uint32_t blockno = blockIdx.x * blockDim.x + threadIdx.x;
if (blockno >= blockCount)
return;
_bincaps[blockno] = (_ppbs[blockno] + g_bin_capacity - 1) / g_bin_capacity;
}
__global__ void init_adv_bucket(const int *_ppbs, int *_buckets) {
auto pcnt = _ppbs[blockIdx.x];
auto bucket = _buckets + blockIdx.x * g_particle_num_per_block;
for (int pidib = threadIdx.x; pidib < pcnt; pidib += blockDim.x) {
bucket[pidib] =
(dir_offset(ivec3{0, 0, 0}) * g_particle_num_per_block) | pidib;
}
}
template <typename Grid> __global__ void clear_grid(Grid grid) {
auto gridblock = grid.ch(_0, blockIdx.x);
for (int cidib = threadIdx.x; cidib < g_blockvolume; cidib += blockDim.x) {
gridblock.val_1d(_0, cidib) = 0.f;
gridblock.val_1d(_1, cidib) = 0.f;
gridblock.val_1d(_2, cidib) = 0.f;
gridblock.val_1d(_3, cidib) = 0.f;
}
}
template <typename Partition>
__global__ void register_neighbor_blocks(uint32_t blockCount,
Partition partition) {
uint32_t blockno = blockIdx.x * blockDim.x + threadIdx.x;
if (blockno >= blockCount)
return;
auto blockid = partition._activeKeys[blockno];
for (char i = 0; i < 2; ++i)
for (char j = 0; j < 2; ++j)
for (char k = 0; k < 2; ++k)
partition.insert(ivec3{blockid[0] + i, blockid[1] + j, blockid[2] + k});
}
template <typename Partition>
__global__ void register_exterior_blocks(uint32_t blockCount,
Partition partition) {
uint32_t blockno = blockIdx.x * blockDim.x + threadIdx.x;
if (blockno >= blockCount)
return;
auto blockid = partition._activeKeys[blockno];
for (char i = -1; i < 2; ++i)
for (char j = -1; j < 2; ++j)
for (char k = -1; k < 2; ++k)
partition.insert(ivec3{blockid[0] + i, blockid[1] + j, blockid[2] + k});
}
template <typename Grid, typename Partition>
__global__ void rasterize(uint32_t particleCount, const ParticleArray parray,
Grid grid, const Partition partition, float dt,
float mass, vec3 v0) {
uint32_t parid = blockIdx.x * blockDim.x + threadIdx.x;
if (parid >= particleCount)
return;
vec3 local_pos{parray.val(_0, parid), parray.val(_1, parid),
parray.val(_2, parid)};
vec3 vel = v0;
vec9 contrib, C;
contrib.set(0.f), C.set(0.f);
contrib = (C * mass - contrib * dt) * g_D_inv;
ivec3 global_base_index{int(std::lround(local_pos[0] * g_dx_inv) - 1),
int(std::lround(local_pos[1] * g_dx_inv) - 1),
int(std::lround(local_pos[2] * g_dx_inv) - 1)};
local_pos = local_pos - global_base_index * g_dx;
vec<vec3, 3> dws;
for (int d = 0; d < 3; ++d)
dws[d] = bspline_weight(local_pos[d]);
for (int i = 0; i < 3; ++i)
for (int j = 0; j < 3; ++j)
for (int k = 0; k < 3; ++k) {
ivec3 offset{i, j, k};
vec3 xixp = offset * g_dx - local_pos;
float W = dws[0][i] * dws[1][j] * dws[2][k];
ivec3 local_index = global_base_index + offset;
float wm = mass * W;
int blockno = partition.query(ivec3{local_index[0] >> g_blockbits,
local_index[1] >> g_blockbits,
local_index[2] >> g_blockbits});
auto grid_block = grid.ch(_0, blockno);
for (int d = 0; d < 3; ++d)
local_index[d] &= g_blockmask;
atomicAdd(
&grid_block.val(_0, local_index[0], local_index[1], local_index[2]),
wm);
atomicAdd(
&grid_block.val(_1, local_index[0], local_index[1], local_index[2]),
wm * vel[0] + (contrib[0] * xixp[0] + contrib[3] * xixp[1] +
contrib[6] * xixp[2]) *
W);
atomicAdd(
&grid_block.val(_2, local_index[0], local_index[1], local_index[2]),
wm * vel[1] + (contrib[1] * xixp[0] + contrib[4] * xixp[1] +
contrib[7] * xixp[2]) *
W);
atomicAdd(
&grid_block.val(_3, local_index[0], local_index[1], local_index[2]),
wm * vel[2] + (contrib[2] * xixp[0] + contrib[5] * xixp[1] +
contrib[8] * xixp[2]) *
W);
}
}
template <typename ParticleArray>
__global__ void array_to_buffer(ParticleArray parray,
ParticleBuffer<material_e::JFluid> pbuffer) {
uint32_t blockno = blockIdx.x;
int pcnt = pbuffer._ppbs[blockno];
auto bucket = pbuffer._blockbuckets + blockno * g_particle_num_per_block;
for (int pidib = threadIdx.x; pidib < pcnt; pidib += blockDim.x) {
auto parid = bucket[pidib];
auto pbin =
pbuffer.ch(_0, pbuffer._binsts[blockno] + pidib / g_bin_capacity);
/// pos
pbin.val(_0, pidib % g_bin_capacity) = parray.val(_0, parid);
pbin.val(_1, pidib % g_bin_capacity) = parray.val(_1, parid);
pbin.val(_2, pidib % g_bin_capacity) = parray.val(_2, parid);
/// J
pbin.val(_3, pidib % g_bin_capacity) = 1.f;
}
}
template <typename ParticleArray>
__global__ void
array_to_buffer(ParticleArray parray,
ParticleBuffer<material_e::FixedCorotated> pbuffer) {
uint32_t blockno = blockIdx.x;
int pcnt = pbuffer._ppbs[blockno];
auto bucket = pbuffer._blockbuckets + blockno * g_particle_num_per_block;
for (int pidib = threadIdx.x; pidib < pcnt; pidib += blockDim.x) {
auto parid = bucket[pidib];
auto pbin =
pbuffer.ch(_0, pbuffer._binsts[blockno] + pidib / g_bin_capacity);
/// pos
pbin.val(_0, pidib % g_bin_capacity) = parray.val(_0, parid);
pbin.val(_1, pidib % g_bin_capacity) = parray.val(_1, parid);
pbin.val(_2, pidib % g_bin_capacity) = parray.val(_2, parid);
/// F
pbin.val(_3, pidib % g_bin_capacity) = 1.f;
pbin.val(_4, pidib % g_bin_capacity) = 0.f;
pbin.val(_5, pidib % g_bin_capacity) = 0.f;
pbin.val(_6, pidib % g_bin_capacity) = 0.f;
pbin.val(_7, pidib % g_bin_capacity) = 1.f;
pbin.val(_8, pidib % g_bin_capacity) = 0.f;
pbin.val(_9, pidib % g_bin_capacity) = 0.f;
pbin.val(_10, pidib % g_bin_capacity) = 0.f;
pbin.val(_11, pidib % g_bin_capacity) = 1.f;
}
}
template <typename ParticleArray>
__global__ void array_to_buffer(ParticleArray parray,
ParticleBuffer<material_e::Sand> pbuffer) {
uint32_t blockno = blockIdx.x;
int pcnt = pbuffer._ppbs[blockno];
auto bucket = pbuffer._blockbuckets + blockno * g_particle_num_per_block;
for (int pidib = threadIdx.x; pidib < pcnt; pidib += blockDim.x) {
auto parid = bucket[pidib];
auto pbin =
pbuffer.ch(_0, pbuffer._binsts[blockno] + pidib / g_bin_capacity);
/// pos
pbin.val(_0, pidib % g_bin_capacity) = parray.val(_0, parid);
pbin.val(_1, pidib % g_bin_capacity) = parray.val(_1, parid);
pbin.val(_2, pidib % g_bin_capacity) = parray.val(_2, parid);
/// F
pbin.val(_3, pidib % g_bin_capacity) = 1.f;
pbin.val(_4, pidib % g_bin_capacity) = 0.f;
pbin.val(_5, pidib % g_bin_capacity) = 0.f;
pbin.val(_6, pidib % g_bin_capacity) = 0.f;
pbin.val(_7, pidib % g_bin_capacity) = 1.f;
pbin.val(_8, pidib % g_bin_capacity) = 0.f;
pbin.val(_9, pidib % g_bin_capacity) = 0.f;
pbin.val(_10, pidib % g_bin_capacity) = 0.f;
pbin.val(_11, pidib % g_bin_capacity) = 1.f;
/// logJp
pbin.val(_12, pidib % g_bin_capacity) =
ParticleBuffer<material_e::Sand>::logJp0;
}
}
template <typename ParticleArray>
__global__ void array_to_buffer(ParticleArray parray,
ParticleBuffer<material_e::NACC> pbuffer) {
uint32_t blockno = blockIdx.x;
int pcnt = pbuffer._ppbs[blockno];
auto bucket = pbuffer._blockbuckets + blockno * g_particle_num_per_block;
for (int pidib = threadIdx.x; pidib < pcnt; pidib += blockDim.x) {
auto parid = bucket[pidib];
auto pbin =
pbuffer.ch(_0, pbuffer._binsts[blockno] + pidib / g_bin_capacity);
/// pos
pbin.val(_0, pidib % g_bin_capacity) = parray.val(_0, parid);
pbin.val(_1, pidib % g_bin_capacity) = parray.val(_1, parid);
pbin.val(_2, pidib % g_bin_capacity) = parray.val(_2, parid);
/// F
pbin.val(_3, pidib % g_bin_capacity) = 1.f;
pbin.val(_4, pidib % g_bin_capacity) = 0.f;
pbin.val(_5, pidib % g_bin_capacity) = 0.f;
pbin.val(_6, pidib % g_bin_capacity) = 0.f;
pbin.val(_7, pidib % g_bin_capacity) = 1.f;
pbin.val(_8, pidib % g_bin_capacity) = 0.f;
pbin.val(_9, pidib % g_bin_capacity) = 0.f;
pbin.val(_10, pidib % g_bin_capacity) = 0.f;
pbin.val(_11, pidib % g_bin_capacity) = 1.f;
/// logJp
pbin.val(_12, pidib % g_bin_capacity) =
ParticleBuffer<material_e::NACC>::logJp0;
}
}
template <typename Grid, typename Partition>
__global__ void update_grid_velocity_query_max(uint32_t blockCount, Grid grid,
Partition partition, float dt,
float *maxVel) {
constexpr int bc = g_bc;
constexpr int numWarps =
g_num_grid_blocks_per_cuda_block * g_num_warps_per_grid_block;
constexpr unsigned activeMask = 0xffffffff;
//__shared__ float sh_maxvels[g_blockvolume * g_num_grid_blocks_per_cuda_block
/// 32];
extern __shared__ float sh_maxvels[];
std::size_t blockno = blockIdx.x * g_num_grid_blocks_per_cuda_block +
threadIdx.x / 32 / g_num_warps_per_grid_block;
auto blockid = partition._activeKeys[blockno];
int isInBound = ((blockid[0] < bc || blockid[0] >= g_grid_size - bc) << 2) |
((blockid[1] < bc || blockid[1] >= g_grid_size - bc) << 1) |
(blockid[2] < bc || blockid[2] >= g_grid_size - bc);
if (threadIdx.x < numWarps)
sh_maxvels[threadIdx.x] = 0.0f;
__syncthreads();
/// within-warp computations
if (blockno < blockCount) {
auto grid_block = grid.ch(_0, blockno);
for (int cidib = threadIdx.x % 32; cidib < g_blockvolume; cidib += 32) {
float mass = grid_block.val_1d(_0, cidib), velSqr = 0.f;
vec3 vel;
if (mass > 0.f) {
mass = 1.f / mass;
#if 0
int i = (cidib >> (g_blockbits << 1)) & g_blockmask;
int j = (cidib >> g_blockbits) & g_blockmask;
int k = cidib & g_blockmask;
#endif
vel[0] = grid_block.val_1d(_1, cidib);
vel[1] = grid_block.val_1d(_2, cidib);
vel[2] = grid_block.val_1d(_3, cidib);
vel[0] = isInBound & 4 ? 0.f : vel[0] * mass;
vel[1] = isInBound & 2 ? 0.f : vel[1] * mass;
vel[1] += g_gravity * dt;
vel[2] = isInBound & 1 ? 0.f : vel[2] * mass;
// if (isInBound) ///< sticky
// vel.set(0.f);
grid_block.val_1d(_1, cidib) = vel[0];
velSqr += vel[0] * vel[0];
grid_block.val_1d(_2, cidib) = vel[1];
velSqr += vel[1] * vel[1];
grid_block.val_1d(_3, cidib) = vel[2];
velSqr += vel[2] * vel[2];
}
// unsigned activeMask = __ballot_sync(0xffffffff, mv[0] != 0.0f);
for (int iter = 1; iter % 32; iter <<= 1) {
float tmp = __shfl_down_sync(activeMask, velSqr, iter, 32);
if ((threadIdx.x % 32) + iter < 32)
velSqr = tmp > velSqr ? tmp : velSqr;
}
if (velSqr > sh_maxvels[threadIdx.x / 32] && (threadIdx.x % 32) == 0)
sh_maxvels[threadIdx.x / 32] = velSqr;
}
}
__syncthreads();
/// various assumptions
for (int interval = numWarps >> 1; interval > 0; interval >>= 1) {
if (threadIdx.x < interval) {
if (sh_maxvels[threadIdx.x + interval] > sh_maxvels[threadIdx.x])
sh_maxvels[threadIdx.x] = sh_maxvels[threadIdx.x + interval];
}
__syncthreads();
}
if (threadIdx.x == 0)
atomicMax(maxVel, sh_maxvels[0]);
}
template <typename Partition, typename Grid>
__global__ void g2p2g(float dt, float newDt,
const ParticleBuffer<material_e::JFluid> pbuffer,
ParticleBuffer<material_e::JFluid> next_pbuffer,
const Partition prev_partition, Partition partition,
const Grid grid, Grid next_grid) {
static constexpr uint64_t numViPerBlock = g_blockvolume * 3;
static constexpr uint64_t numViInArena = numViPerBlock << 3;
static constexpr uint64_t numMViPerBlock = g_blockvolume * 4;
static constexpr uint64_t numMViInArena = numMViPerBlock << 3;
static constexpr unsigned arenamask = (g_blocksize << 1) - 1;
static constexpr unsigned arenabits = g_blockbits + 1;
extern __shared__ char shmem[];
using ViArena =
float(*)[3][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
using ViArenaRef =
float(&)[3][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
ViArenaRef __restrict__ g2pbuffer = *reinterpret_cast<ViArena>(shmem);
using MViArena =
float(*)[4][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
using MViArenaRef =
float(&)[4][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
MViArenaRef __restrict__ p2gbuffer =
*reinterpret_cast<MViArena>(shmem + numViInArena * sizeof(float));
int src_blockno = blockIdx.x;
int ppb = next_pbuffer._ppbs[src_blockno];
if (ppb == 0)
return;
auto blockid = partition._activeKeys[blockIdx.x];
for (int base = threadIdx.x; base < numViInArena; base += blockDim.x) {
char local_block_id = base / numViPerBlock;
auto blockno = partition.query(
ivec3{blockid[0] + ((local_block_id & 4) != 0 ? 1 : 0),
blockid[1] + ((local_block_id & 2) != 0 ? 1 : 0),
blockid[2] + ((local_block_id & 1) != 0 ? 1 : 0)});
auto grid_block = grid.ch(_0, blockno);
int channelid = base % numViPerBlock;
char c = channelid & 0x3f;
char cz = channelid & g_blockmask;
char cy = (channelid >>= g_blockbits) & g_blockmask;
char cx = (channelid >>= g_blockbits) & g_blockmask;
channelid >>= g_blockbits;
float val;
if (channelid == 0)
val = grid_block.val_1d(_1, c);
else if (channelid == 1)
val = grid_block.val_1d(_2, c);
else
val = grid_block.val_1d(_3, c);
g2pbuffer[channelid][cx + (local_block_id & 4 ? g_blocksize : 0)]
[cy + (local_block_id & 2 ? g_blocksize : 0)]
[cz + (local_block_id & 1 ? g_blocksize : 0)] = val;
}
__syncthreads();
for (int base = threadIdx.x; base < numMViInArena; base += blockDim.x) {
int loc = base;
char z = loc & arenamask;
char y = (loc >>= arenabits) & arenamask;
char x = (loc >>= arenabits) & arenamask;
p2gbuffer[loc >> arenabits][x][y][z] = 0.f;
}
__syncthreads();
for (int pidib = threadIdx.x; pidib < ppb; pidib += blockDim.x) {
int source_blockno, source_pidib;
ivec3 base_index;
{
int advect =
next_pbuffer
._blockbuckets[src_blockno * g_particle_num_per_block + pidib];
dir_components(advect / g_particle_num_per_block, base_index);
base_index += blockid;
source_blockno = prev_partition.query(base_index);
source_pidib = advect & (g_particle_num_per_block - 1);
source_blockno =
pbuffer._binsts[source_blockno] + source_pidib / g_bin_capacity;
}
vec3 pos;
float J;
{
auto source_particle_bin = pbuffer.ch(_0, source_blockno);
pos[0] = source_particle_bin.val(_0, source_pidib % g_bin_capacity);
pos[1] = source_particle_bin.val(_1, source_pidib % g_bin_capacity);
pos[2] = source_particle_bin.val(_2, source_pidib % g_bin_capacity);
J = source_particle_bin.val(_3, source_pidib % g_bin_capacity);
}
ivec3 local_base_index = (pos * g_dx_inv + 0.5f).cast<int>() - 1;
vec3 local_pos = pos - local_base_index * g_dx;
base_index = local_base_index;
vec3x3 dws;
#pragma unroll 3
for (int dd = 0; dd < 3; ++dd) {
float d =
(local_pos[dd] - ((int)(local_pos[dd] * g_dx_inv + 0.5) - 1) * g_dx) *
g_dx_inv;
dws(dd, 0) = 0.5f * (1.5 - d) * (1.5 - d);
d -= 1.0f;
dws(dd, 1) = 0.75 - d * d;
d = 0.5f + d;
dws(dd, 2) = 0.5 * d * d;
local_base_index[dd] = ((local_base_index[dd] - 1) & g_blockmask) + 1;
}
vec3 vel;
vel.set(0.f);
vec9 C;
C.set(0.f);
#pragma unroll 3
for (char i = 0; i < 3; i++)
#pragma unroll 3
for (char j = 0; j < 3; j++)
#pragma unroll 3
for (char k = 0; k < 3; k++) {
vec3 xixp = vec3{(float)i, (float)j, (float)k} * g_dx - local_pos;
float W = dws(0, i) * dws(1, j) * dws(2, k);
vec3 vi{g2pbuffer[0][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
g2pbuffer[1][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
g2pbuffer[2][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k]};
vel += vi * W;
C[0] += W * vi[0] * xixp[0];
C[1] += W * vi[1] * xixp[0];
C[2] += W * vi[2] * xixp[0];
C[3] += W * vi[0] * xixp[1];
C[4] += W * vi[1] * xixp[1];
C[5] += W * vi[2] * xixp[1];
C[6] += W * vi[0] * xixp[2];
C[7] += W * vi[1] * xixp[2];
C[8] += W * vi[2] * xixp[2];
}
pos += vel * dt;
J = (1 + (C[0] + C[4] + C[8]) * dt * g_D_inv) * J;
if (J < 0.1)
J = 0.1;
vec9 contrib;
{
float voln = J * pbuffer.volume;
float pressure = pbuffer.bulk * (powf(J, -pbuffer.gamma) - 1.f);
{
contrib[0] =
((C[0] + C[0]) * g_D_inv * pbuffer.visco - pressure) * voln;
contrib[1] = (C[1] + C[3]) * g_D_inv * pbuffer.visco * voln;
contrib[2] = (C[2] + C[6]) * g_D_inv * pbuffer.visco * voln;
contrib[3] = (C[3] + C[1]) * g_D_inv * pbuffer.visco * voln;
contrib[4] =
((C[4] + C[4]) * g_D_inv * pbuffer.visco - pressure) * voln;
contrib[5] = (C[5] + C[7]) * g_D_inv * pbuffer.visco * voln;
contrib[6] = (C[6] + C[2]) * g_D_inv * pbuffer.visco * voln;
contrib[7] = (C[7] + C[5]) * g_D_inv * pbuffer.visco * voln;
contrib[8] =
((C[8] + C[8]) * g_D_inv * pbuffer.visco - pressure) * voln;
}
contrib = (C * pbuffer.mass - contrib * newDt) * g_D_inv;
{
auto particle_bin = next_pbuffer.ch(
_0, next_pbuffer._binsts[src_blockno] + pidib / g_bin_capacity);
particle_bin.val(_0, pidib % g_bin_capacity) = pos[0];
particle_bin.val(_1, pidib % g_bin_capacity) = pos[1];
particle_bin.val(_2, pidib % g_bin_capacity) = pos[2];
particle_bin.val(_3, pidib % g_bin_capacity) = J;
}
}
local_base_index = (pos * g_dx_inv + 0.5f).cast<int>() - 1;
{
int dirtag = dir_offset((base_index - 1) / g_blocksize -
(local_base_index - 1) / g_blocksize);
next_pbuffer.add_advection(partition, local_base_index - 1, dirtag,
pidib);
// partition.add_advection(local_base_index - 1, dirtag, pidib);
}
// dws[d] = bspline_weight(local_pos[d]);
#pragma unroll 3
for (char dd = 0; dd < 3; ++dd) {
local_pos[dd] = pos[dd] - local_base_index[dd] * g_dx;
float d =
(local_pos[dd] - ((int)(local_pos[dd] * g_dx_inv + 0.5) - 1) * g_dx) *
g_dx_inv;
dws(dd, 0) = 0.5f * (1.5 - d) * (1.5 - d);
d -= 1.0f;
dws(dd, 1) = 0.75 - d * d;
d = 0.5f + d;
dws(dd, 2) = 0.5 * d * d;
local_base_index[dd] = (((base_index[dd] - 1) & g_blockmask) + 1) +
local_base_index[dd] - base_index[dd];
}
#pragma unroll 3
for (char i = 0; i < 3; i++)
#pragma unroll 3
for (char j = 0; j < 3; j++)
#pragma unroll 3
for (char k = 0; k < 3; k++) {
pos = vec3{(float)i, (float)j, (float)k} * g_dx - local_pos;
float W = dws(0, i) * dws(1, j) * dws(2, k);
auto wm = pbuffer.mass * W;
atomicAdd(
&p2gbuffer[0][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm);
atomicAdd(
&p2gbuffer[1][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm * vel[0] + (contrib[0] * pos[0] + contrib[3] * pos[1] +
contrib[6] * pos[2]) *
W);
atomicAdd(
&p2gbuffer[2][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm * vel[1] + (contrib[1] * pos[0] + contrib[4] * pos[1] +
contrib[7] * pos[2]) *
W);
atomicAdd(
&p2gbuffer[3][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm * vel[2] + (contrib[2] * pos[0] + contrib[5] * pos[1] +
contrib[8] * pos[2]) *
W);
}
}
__syncthreads();
/// arena no, channel no, cell no
for (int base = threadIdx.x; base < numMViInArena; base += blockDim.x) {
char local_block_id = base / numMViPerBlock;
auto blockno = partition.query(
ivec3{blockid[0] + ((local_block_id & 4) != 0 ? 1 : 0),
blockid[1] + ((local_block_id & 2) != 0 ? 1 : 0),
blockid[2] + ((local_block_id & 1) != 0 ? 1 : 0)});
// auto grid_block = next_grid.template ch<0>(blockno);
int channelid = base & (numMViPerBlock - 1);
char c = channelid % g_blockvolume;
char cz = channelid & g_blockmask;
char cy = (channelid >>= g_blockbits) & g_blockmask;
char cx = (channelid >>= g_blockbits) & g_blockmask;
channelid >>= g_blockbits;
float val =
p2gbuffer[channelid][cx + (local_block_id & 4 ? g_blocksize : 0)]
[cy + (local_block_id & 2 ? g_blocksize : 0)]
[cz + (local_block_id & 1 ? g_blocksize : 0)];
if (channelid == 0)
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_0, c), val);
else if (channelid == 1)
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_1, c), val);
else if (channelid == 2)
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_2, c), val);
else
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_3, c), val);
}
}
template <typename Partition, typename Grid>
__global__ void g2p2g(float dt, float newDt,
const ParticleBuffer<material_e::FixedCorotated> pbuffer,
ParticleBuffer<material_e::FixedCorotated> next_pbuffer,
const Partition prev_partition, Partition partition,
const Grid grid, Grid next_grid) {
static constexpr uint64_t numViPerBlock = g_blockvolume * 3;
static constexpr uint64_t numViInArena = numViPerBlock << 3;
static constexpr uint64_t numMViPerBlock = g_blockvolume * 4;
static constexpr uint64_t numMViInArena = numMViPerBlock << 3;
static constexpr unsigned arenamask = (g_blocksize << 1) - 1;
static constexpr unsigned arenabits = g_blockbits + 1;
extern __shared__ char shmem[];
using ViArena =
float(*)[3][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
using ViArenaRef =
float(&)[3][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
ViArenaRef __restrict__ g2pbuffer = *reinterpret_cast<ViArena>(shmem);
using MViArena =
float(*)[4][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
using MViArenaRef =
float(&)[4][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
MViArenaRef __restrict__ p2gbuffer =
*reinterpret_cast<MViArena>(shmem + numViInArena * sizeof(float));
int src_blockno = blockIdx.x;
int ppb = next_pbuffer._ppbs[src_blockno];
if (ppb == 0)
return;
auto blockid = partition._activeKeys[blockIdx.x];
for (int base = threadIdx.x; base < numViInArena; base += blockDim.x) {
char local_block_id = base / numViPerBlock;
auto blockno = partition.query(
ivec3{blockid[0] + ((local_block_id & 4) != 0 ? 1 : 0),
blockid[1] + ((local_block_id & 2) != 0 ? 1 : 0),
blockid[2] + ((local_block_id & 1) != 0 ? 1 : 0)});
auto grid_block = grid.ch(_0, blockno);
int channelid = base % numViPerBlock;
char c = channelid & 0x3f;
char cz = channelid & g_blockmask;
char cy = (channelid >>= g_blockbits) & g_blockmask;
char cx = (channelid >>= g_blockbits) & g_blockmask;
channelid >>= g_blockbits;
float val;
if (channelid == 0)
val = grid_block.val_1d(_1, c);
else if (channelid == 1)
val = grid_block.val_1d(_2, c);
else
val = grid_block.val_1d(_3, c);
g2pbuffer[channelid][cx + (local_block_id & 4 ? g_blocksize : 0)]
[cy + (local_block_id & 2 ? g_blocksize : 0)]
[cz + (local_block_id & 1 ? g_blocksize : 0)] = val;
}
__syncthreads();
for (int base = threadIdx.x; base < numMViInArena; base += blockDim.x) {
int loc = base;
char z = loc & arenamask;
char y = (loc >>= arenabits) & arenamask;
char x = (loc >>= arenabits) & arenamask;
p2gbuffer[loc >> arenabits][x][y][z] = 0.f;
}
__syncthreads();
for (int pidib = threadIdx.x; pidib < ppb; pidib += blockDim.x) {
int source_blockno, source_pidib;
ivec3 base_index;
{
int advect =
next_pbuffer
._blockbuckets[src_blockno * g_particle_num_per_block + pidib];
dir_components(advect / g_particle_num_per_block, base_index);
base_index += blockid;
source_blockno = prev_partition.query(base_index);
source_pidib = advect & (g_particle_num_per_block - 1);
source_blockno =
pbuffer._binsts[source_blockno] + source_pidib / g_bin_capacity;
}
vec3 pos;
{
auto source_particle_bin = pbuffer.ch(_0, source_blockno);
pos[0] = source_particle_bin.val(_0, source_pidib % g_bin_capacity);
pos[1] = source_particle_bin.val(_1, source_pidib % g_bin_capacity);
pos[2] = source_particle_bin.val(_2, source_pidib % g_bin_capacity);
}
ivec3 local_base_index = (pos * g_dx_inv + 0.5f).cast<int>() - 1;
vec3 local_pos = pos - local_base_index * g_dx;
base_index = local_base_index;
vec3x3 dws;
#pragma unroll 3
for (int dd = 0; dd < 3; ++dd) {
float d =
(local_pos[dd] - ((int)(local_pos[dd] * g_dx_inv + 0.5) - 1) * g_dx) *
g_dx_inv;
dws(dd, 0) = 0.5f * (1.5 - d) * (1.5 - d);
d -= 1.0f;
dws(dd, 1) = 0.75 - d * d;
d = 0.5f + d;
dws(dd, 2) = 0.5 * d * d;
local_base_index[dd] = ((local_base_index[dd] - 1) & g_blockmask) + 1;
}
vec3 vel;
vel.set(0.f);
vec9 C;
C.set(0.f);
#pragma unroll 3
for (char i = 0; i < 3; i++)
#pragma unroll 3
for (char j = 0; j < 3; j++)
#pragma unroll 3
for (char k = 0; k < 3; k++) {
vec3 xixp = vec3{(float)i, (float)j, (float)k} * g_dx - local_pos;
float W = dws(0, i) * dws(1, j) * dws(2, k);
vec3 vi{g2pbuffer[0][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
g2pbuffer[1][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
g2pbuffer[2][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k]};
vel += vi * W;
C[0] += W * vi[0] * xixp[0];
C[1] += W * vi[1] * xixp[0];
C[2] += W * vi[2] * xixp[0];
C[3] += W * vi[0] * xixp[1];
C[4] += W * vi[1] * xixp[1];
C[5] += W * vi[2] * xixp[1];
C[6] += W * vi[0] * xixp[2];
C[7] += W * vi[1] * xixp[2];
C[8] += W * vi[2] * xixp[2];
}
pos += vel * dt;
#pragma unroll 9
for (int d = 0; d < 9; ++d)
dws.val(d) = C[d] * dt * g_D_inv + ((d & 0x3) ? 0.f : 1.f);
vec9 contrib;
{
vec9 F;
auto source_particle_bin = pbuffer.ch(_0, source_blockno);
contrib[0] = source_particle_bin.val(_3, source_pidib % g_bin_capacity);
contrib[1] = source_particle_bin.val(_4, source_pidib % g_bin_capacity);
contrib[2] = source_particle_bin.val(_5, source_pidib % g_bin_capacity);
contrib[3] = source_particle_bin.val(_6, source_pidib % g_bin_capacity);
contrib[4] = source_particle_bin.val(_7, source_pidib % g_bin_capacity);
contrib[5] = source_particle_bin.val(_8, source_pidib % g_bin_capacity);
contrib[6] = source_particle_bin.val(_9, source_pidib % g_bin_capacity);
contrib[7] = source_particle_bin.val(_10, source_pidib % g_bin_capacity);
contrib[8] = source_particle_bin.val(_11, source_pidib % g_bin_capacity);
matrixMatrixMultiplication3d(dws.data(), contrib.data(), F.data());
{
auto particle_bin = next_pbuffer.ch(
_0, next_pbuffer._binsts[src_blockno] + pidib / g_bin_capacity);
particle_bin.val(_0, pidib % g_bin_capacity) = pos[0];
particle_bin.val(_1, pidib % g_bin_capacity) = pos[1];
particle_bin.val(_2, pidib % g_bin_capacity) = pos[2];
particle_bin.val(_3, pidib % g_bin_capacity) = F[0];
particle_bin.val(_4, pidib % g_bin_capacity) = F[1];
particle_bin.val(_5, pidib % g_bin_capacity) = F[2];
particle_bin.val(_6, pidib % g_bin_capacity) = F[3];
particle_bin.val(_7, pidib % g_bin_capacity) = F[4];
particle_bin.val(_8, pidib % g_bin_capacity) = F[5];
particle_bin.val(_9, pidib % g_bin_capacity) = F[6];
particle_bin.val(_10, pidib % g_bin_capacity) = F[7];
particle_bin.val(_11, pidib % g_bin_capacity) = F[8];
}
compute_stress_fixedcorotated(pbuffer.volume, pbuffer.mu, pbuffer.lambda,
F, contrib);
contrib = (C * pbuffer.mass - contrib * newDt) * g_D_inv;
}
local_base_index = (pos * g_dx_inv + 0.5f).cast<int>() - 1;
{
int dirtag = dir_offset((base_index - 1) / g_blocksize -
(local_base_index - 1) / g_blocksize);
next_pbuffer.add_advection(partition, local_base_index - 1, dirtag,
pidib);
// partition.add_advection(local_base_index - 1, dirtag, pidib);
}
// dws[d] = bspline_weight(local_pos[d]);
#pragma unroll 3
for (char dd = 0; dd < 3; ++dd) {
local_pos[dd] = pos[dd] - local_base_index[dd] * g_dx;
float d =
(local_pos[dd] - ((int)(local_pos[dd] * g_dx_inv + 0.5) - 1) * g_dx) *
g_dx_inv;
dws(dd, 0) = 0.5f * (1.5 - d) * (1.5 - d);
d -= 1.0f;
dws(dd, 1) = 0.75 - d * d;
d = 0.5f + d;
dws(dd, 2) = 0.5 * d * d;
local_base_index[dd] = (((base_index[dd] - 1) & g_blockmask) + 1) +
local_base_index[dd] - base_index[dd];
}
#pragma unroll 3
for (char i = 0; i < 3; i++)
#pragma unroll 3
for (char j = 0; j < 3; j++)
#pragma unroll 3
for (char k = 0; k < 3; k++) {
pos = vec3{(float)i, (float)j, (float)k} * g_dx - local_pos;
float W = dws(0, i) * dws(1, j) * dws(2, k);
auto wm = pbuffer.mass * W;
atomicAdd(
&p2gbuffer[0][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm);
atomicAdd(
&p2gbuffer[1][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm * vel[0] + (contrib[0] * pos[0] + contrib[3] * pos[1] +
contrib[6] * pos[2]) *
W);
atomicAdd(
&p2gbuffer[2][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm * vel[1] + (contrib[1] * pos[0] + contrib[4] * pos[1] +
contrib[7] * pos[2]) *
W);
atomicAdd(
&p2gbuffer[3][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm * vel[2] + (contrib[2] * pos[0] + contrib[5] * pos[1] +
contrib[8] * pos[2]) *
W);
}
}
__syncthreads();
/// arena no, channel no, cell no
for (int base = threadIdx.x; base < numMViInArena; base += blockDim.x) {
char local_block_id = base / numMViPerBlock;
auto blockno = partition.query(
ivec3{blockid[0] + ((local_block_id & 4) != 0 ? 1 : 0),
blockid[1] + ((local_block_id & 2) != 0 ? 1 : 0),
blockid[2] + ((local_block_id & 1) != 0 ? 1 : 0)});
// auto grid_block = next_grid.template ch<0>(blockno);
int channelid = base & (numMViPerBlock - 1);
char c = channelid % g_blockvolume;
char cz = channelid & g_blockmask;
char cy = (channelid >>= g_blockbits) & g_blockmask;
char cx = (channelid >>= g_blockbits) & g_blockmask;
channelid >>= g_blockbits;
float val =
p2gbuffer[channelid][cx + (local_block_id & 4 ? g_blocksize : 0)]
[cy + (local_block_id & 2 ? g_blocksize : 0)]
[cz + (local_block_id & 1 ? g_blocksize : 0)];
if (channelid == 0)
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_0, c), val);
else if (channelid == 1)
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_1, c), val);
else if (channelid == 2)
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_2, c), val);
else
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_3, c), val);
}
}
template <typename Partition, typename Grid>
__global__ void g2p2g(float dt, float newDt,
const ParticleBuffer<material_e::Sand> pbuffer,
ParticleBuffer<material_e::Sand> next_pbuffer,
const Partition prev_partition, Partition partition,
const Grid grid, Grid next_grid) {
static constexpr uint64_t numViPerBlock = g_blockvolume * 3;
static constexpr uint64_t numViInArena = numViPerBlock << 3;
static constexpr uint64_t numMViPerBlock = g_blockvolume * 4;
static constexpr uint64_t numMViInArena = numMViPerBlock << 3;
static constexpr unsigned arenamask = (g_blocksize << 1) - 1;
static constexpr unsigned arenabits = g_blockbits + 1;
extern __shared__ char shmem[];
using ViArena =
float(*)[3][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
using ViArenaRef =
float(&)[3][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
ViArenaRef __restrict__ g2pbuffer = *reinterpret_cast<ViArena>(shmem);
using MViArena =
float(*)[4][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
using MViArenaRef =
float(&)[4][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
MViArenaRef __restrict__ p2gbuffer =
*reinterpret_cast<MViArena>(shmem + numViInArena * sizeof(float));
int src_blockno = blockIdx.x;
auto blockid = partition._activeKeys[blockIdx.x];
for (int base = threadIdx.x; base < numViInArena; base += blockDim.x) {
char local_block_id = base / numViPerBlock;
auto blockno = partition.query(
ivec3{blockid[0] + ((local_block_id & 4) != 0 ? 1 : 0),
blockid[1] + ((local_block_id & 2) != 0 ? 1 : 0),
blockid[2] + ((local_block_id & 1) != 0 ? 1 : 0)});
auto grid_block = grid.ch(_0, blockno);
int channelid = base % numViPerBlock;
char c = channelid & 0x3f;
char cz = channelid & g_blockmask;
char cy = (channelid >>= g_blockbits) & g_blockmask;
char cx = (channelid >>= g_blockbits) & g_blockmask;
channelid >>= g_blockbits;
float val;
if (channelid == 0)
val = grid_block.val_1d(_1, c);
else if (channelid == 1)
val = grid_block.val_1d(_2, c);
else
val = grid_block.val_1d(_3, c);
g2pbuffer[channelid][cx + (local_block_id & 4 ? g_blocksize : 0)]
[cy + (local_block_id & 2 ? g_blocksize : 0)]
[cz + (local_block_id & 1 ? g_blocksize : 0)] = val;
}
__syncthreads();
for (int base = threadIdx.x; base < numMViInArena; base += blockDim.x) {
int loc = base;
char z = loc & arenamask;
char y = (loc >>= arenabits) & arenamask;
char x = (loc >>= arenabits) & arenamask;
p2gbuffer[loc >> arenabits][x][y][z] = 0.f;
}
__syncthreads();
for (int pidib = threadIdx.x; pidib < next_pbuffer._ppbs[src_blockno];
pidib += blockDim.x) {
int source_blockno, source_pidib;
ivec3 base_index;
{
int advect =
next_pbuffer
._blockbuckets[src_blockno * g_particle_num_per_block + pidib];
dir_components(advect / g_particle_num_per_block, base_index);
base_index += blockid;
source_blockno = prev_partition.query(base_index);
source_pidib = advect & (g_particle_num_per_block - 1);
source_blockno =
pbuffer._binsts[source_blockno] + source_pidib / g_bin_capacity;
}
vec3 pos;
{
auto source_particle_bin = pbuffer.ch(_0, source_blockno);
pos[0] = source_particle_bin.val(_0, source_pidib % g_bin_capacity);
pos[1] = source_particle_bin.val(_1, source_pidib % g_bin_capacity);
pos[2] = source_particle_bin.val(_2, source_pidib % g_bin_capacity);
}
ivec3 local_base_index = (pos * g_dx_inv + 0.5f).cast<int>() - 1;
vec3 local_pos = pos - local_base_index * g_dx;
base_index = local_base_index;
vec3x3 dws;
#pragma unroll 3
for (int dd = 0; dd < 3; ++dd) {
float d =
(local_pos[dd] - ((int)(local_pos[dd] * g_dx_inv + 0.5) - 1) * g_dx) *
g_dx_inv;
dws(dd, 0) = 0.5f * (1.5 - d) * (1.5 - d);
d -= 1.0f;
dws(dd, 1) = 0.75 - d * d;
d = 0.5f + d;
dws(dd, 2) = 0.5 * d * d;
local_base_index[dd] = ((local_base_index[dd] - 1) & g_blockmask) + 1;
}
vec3 vel;
vel.set(0.f);
vec9 C;
C.set(0.f);
#pragma unroll 3
for (char i = 0; i < 3; i++)
#pragma unroll 3
for (char j = 0; j < 3; j++)
#pragma unroll 3
for (char k = 0; k < 3; k++) {
vec3 xixp = vec3{(float)i, (float)j, (float)k} * g_dx - local_pos;
float W = dws(0, i) * dws(1, j) * dws(2, k);
vec3 vi{g2pbuffer[0][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
g2pbuffer[1][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
g2pbuffer[2][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k]};
vel += vi * W;
C[0] += W * vi[0] * xixp[0];
C[1] += W * vi[1] * xixp[0];
C[2] += W * vi[2] * xixp[0];
C[3] += W * vi[0] * xixp[1];
C[4] += W * vi[1] * xixp[1];
C[5] += W * vi[2] * xixp[1];
C[6] += W * vi[0] * xixp[2];
C[7] += W * vi[1] * xixp[2];
C[8] += W * vi[2] * xixp[2];
}
pos += vel * dt;
#pragma unroll 9
for (int d = 0; d < 9; ++d)
dws.val(d) = C[d] * dt * g_D_inv + ((d & 0x3) ? 0.f : 1.f);
vec9 contrib;
{
vec9 F;
float logJp;
auto source_particle_bin = pbuffer.ch(_0, source_blockno);
contrib[0] = source_particle_bin.val(_3, source_pidib % g_bin_capacity);
contrib[1] = source_particle_bin.val(_4, source_pidib % g_bin_capacity);
contrib[2] = source_particle_bin.val(_5, source_pidib % g_bin_capacity);
contrib[3] = source_particle_bin.val(_6, source_pidib % g_bin_capacity);
contrib[4] = source_particle_bin.val(_7, source_pidib % g_bin_capacity);
contrib[5] = source_particle_bin.val(_8, source_pidib % g_bin_capacity);
contrib[6] = source_particle_bin.val(_9, source_pidib % g_bin_capacity);
contrib[7] = source_particle_bin.val(_10, source_pidib % g_bin_capacity);
contrib[8] = source_particle_bin.val(_11, source_pidib % g_bin_capacity);
logJp = source_particle_bin.val(_12, source_pidib % g_bin_capacity);
matrixMatrixMultiplication3d(dws.data(), contrib.data(), F.data());
compute_stress_sand(pbuffer.volume, pbuffer.mu, pbuffer.lambda,
pbuffer.cohesion, pbuffer.beta, pbuffer.yieldSurface,
pbuffer.volumeCorrection, logJp, F, contrib);
{
auto particle_bin = next_pbuffer.ch(
_0, next_pbuffer._binsts[src_blockno] + pidib / g_bin_capacity);
particle_bin.val(_0, pidib % g_bin_capacity) = pos[0];
particle_bin.val(_1, pidib % g_bin_capacity) = pos[1];
particle_bin.val(_2, pidib % g_bin_capacity) = pos[2];
particle_bin.val(_3, pidib % g_bin_capacity) = F[0];
particle_bin.val(_4, pidib % g_bin_capacity) = F[1];
particle_bin.val(_5, pidib % g_bin_capacity) = F[2];
particle_bin.val(_6, pidib % g_bin_capacity) = F[3];
particle_bin.val(_7, pidib % g_bin_capacity) = F[4];
particle_bin.val(_8, pidib % g_bin_capacity) = F[5];
particle_bin.val(_9, pidib % g_bin_capacity) = F[6];
particle_bin.val(_10, pidib % g_bin_capacity) = F[7];
particle_bin.val(_11, pidib % g_bin_capacity) = F[8];
particle_bin.val(_12, pidib % g_bin_capacity) = logJp;
}
contrib = (C * pbuffer.mass - contrib * newDt) * g_D_inv;
}
local_base_index = (pos * g_dx_inv + 0.5f).cast<int>() - 1;
{
int dirtag = dir_offset((base_index - 1) / g_blocksize -
(local_base_index - 1) / g_blocksize);
next_pbuffer.add_advection(partition, local_base_index - 1, dirtag,
pidib);
// partition.add_advection(local_base_index - 1, dirtag, pidib);
}
// dws[d] = bspline_weight(local_pos[d]);
#pragma unroll 3
for (char dd = 0; dd < 3; ++dd) {
local_pos[dd] = pos[dd] - local_base_index[dd] * g_dx;
float d =
(local_pos[dd] - ((int)(local_pos[dd] * g_dx_inv + 0.5) - 1) * g_dx) *
g_dx_inv;
dws(dd, 0) = 0.5f * (1.5 - d) * (1.5 - d);
d -= 1.0f;
dws(dd, 1) = 0.75 - d * d;
d = 0.5f + d;
dws(dd, 2) = 0.5 * d * d;
local_base_index[dd] = (((base_index[dd] - 1) & g_blockmask) + 1) +
local_base_index[dd] - base_index[dd];
}
#pragma unroll 3
for (char i = 0; i < 3; i++)
#pragma unroll 3
for (char j = 0; j < 3; j++)
#pragma unroll 3
for (char k = 0; k < 3; k++) {
pos = vec3{(float)i, (float)j, (float)k} * g_dx - local_pos;
float W = dws(0, i) * dws(1, j) * dws(2, k);
auto wm = pbuffer.mass * W;
atomicAdd(
&p2gbuffer[0][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm);
atomicAdd(
&p2gbuffer[1][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm * vel[0] + (contrib[0] * pos[0] + contrib[3] * pos[1] +
contrib[6] * pos[2]) *
W);
atomicAdd(
&p2gbuffer[2][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm * vel[1] + (contrib[1] * pos[0] + contrib[4] * pos[1] +
contrib[7] * pos[2]) *
W);
atomicAdd(
&p2gbuffer[3][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm * vel[2] + (contrib[2] * pos[0] + contrib[5] * pos[1] +
contrib[8] * pos[2]) *
W);
}
}
__syncthreads();
/// arena no, channel no, cell no
for (int base = threadIdx.x; base < numMViInArena; base += blockDim.x) {
char local_block_id = base / numMViPerBlock;
auto blockno = partition.query(
ivec3{blockid[0] + ((local_block_id & 4) != 0 ? 1 : 0),
blockid[1] + ((local_block_id & 2) != 0 ? 1 : 0),
blockid[2] + ((local_block_id & 1) != 0 ? 1 : 0)});
// auto grid_block = next_grid.template ch<0>(blockno);
int channelid = base & (numMViPerBlock - 1);
char c = channelid % g_blockvolume;
char cz = channelid & g_blockmask;
char cy = (channelid >>= g_blockbits) & g_blockmask;
char cx = (channelid >>= g_blockbits) & g_blockmask;
channelid >>= g_blockbits;
float val =
p2gbuffer[channelid][cx + (local_block_id & 4 ? g_blocksize : 0)]
[cy + (local_block_id & 2 ? g_blocksize : 0)]
[cz + (local_block_id & 1 ? g_blocksize : 0)];
if (channelid == 0)
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_0, c), val);
else if (channelid == 1)
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_1, c), val);
else if (channelid == 2)
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_2, c), val);
else
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_3, c), val);
}
}
template <typename Partition, typename Grid>
__global__ void g2p2g(float dt, float newDt,
const ParticleBuffer<material_e::NACC> pbuffer,
ParticleBuffer<material_e::NACC> next_pbuffer,
const Partition prev_partition, Partition partition,
const Grid grid, Grid next_grid) {
static constexpr uint64_t numViPerBlock = g_blockvolume * 3;
static constexpr uint64_t numViInArena = numViPerBlock << 3;
static constexpr uint64_t numMViPerBlock = g_blockvolume * 4;
static constexpr uint64_t numMViInArena = numMViPerBlock << 3;
static constexpr unsigned arenamask = (g_blocksize << 1) - 1;
static constexpr unsigned arenabits = g_blockbits + 1;
extern __shared__ char shmem[];
using ViArena =
float(*)[3][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
using ViArenaRef =
float(&)[3][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
ViArenaRef __restrict__ g2pbuffer = *reinterpret_cast<ViArena>(shmem);
using MViArena =
float(*)[4][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
using MViArenaRef =
float(&)[4][g_blocksize << 1][g_blocksize << 1][g_blocksize << 1];
MViArenaRef __restrict__ p2gbuffer =
*reinterpret_cast<MViArena>(shmem + numViInArena * sizeof(float));
int src_blockno = blockIdx.x;
auto blockid = partition._activeKeys[blockIdx.x];
for (int base = threadIdx.x; base < numViInArena; base += blockDim.x) {
char local_block_id = base / numViPerBlock;
auto blockno = partition.query(
ivec3{blockid[0] + ((local_block_id & 4) != 0 ? 1 : 0),
blockid[1] + ((local_block_id & 2) != 0 ? 1 : 0),
blockid[2] + ((local_block_id & 1) != 0 ? 1 : 0)});
auto grid_block = grid.ch(_0, blockno);
int channelid = base % numViPerBlock;
char c = channelid & 0x3f;
char cz = channelid & g_blockmask;
char cy = (channelid >>= g_blockbits) & g_blockmask;
char cx = (channelid >>= g_blockbits) & g_blockmask;
channelid >>= g_blockbits;
float val;
if (channelid == 0)
val = grid_block.val_1d(_1, c);
else if (channelid == 1)
val = grid_block.val_1d(_2, c);
else
val = grid_block.val_1d(_3, c);
g2pbuffer[channelid][cx + (local_block_id & 4 ? g_blocksize : 0)]
[cy + (local_block_id & 2 ? g_blocksize : 0)]
[cz + (local_block_id & 1 ? g_blocksize : 0)] = val;
}
__syncthreads();
for (int base = threadIdx.x; base < numMViInArena; base += blockDim.x) {
int loc = base;
char z = loc & arenamask;
char y = (loc >>= arenabits) & arenamask;
char x = (loc >>= arenabits) & arenamask;
p2gbuffer[loc >> arenabits][x][y][z] = 0.f;
}
__syncthreads();
for (int pidib = threadIdx.x; pidib < next_pbuffer._ppbs[src_blockno];
pidib += blockDim.x) {
int source_blockno, source_pidib;
ivec3 base_index;
{
int advect =
next_pbuffer
._blockbuckets[src_blockno * g_particle_num_per_block + pidib];
dir_components(advect / g_particle_num_per_block, base_index);
base_index += blockid;
source_blockno = prev_partition.query(base_index);
source_pidib = advect & (g_particle_num_per_block - 1);
source_blockno =
pbuffer._binsts[source_blockno] + source_pidib / g_bin_capacity;
}
vec3 pos;
{
auto source_particle_bin = pbuffer.ch(_0, source_blockno);
pos[0] = source_particle_bin.val(_0, source_pidib % g_bin_capacity);
pos[1] = source_particle_bin.val(_1, source_pidib % g_bin_capacity);
pos[2] = source_particle_bin.val(_2, source_pidib % g_bin_capacity);
}
ivec3 local_base_index = (pos * g_dx_inv + 0.5f).cast<int>() - 1;
vec3 local_pos = pos - local_base_index * g_dx;
base_index = local_base_index;
vec3x3 dws;
#pragma unroll 3
for (int dd = 0; dd < 3; ++dd) {
float d =
(local_pos[dd] - ((int)(local_pos[dd] * g_dx_inv + 0.5) - 1) * g_dx) *
g_dx_inv;
dws(dd, 0) = 0.5f * (1.5 - d) * (1.5 - d);
d -= 1.0f;
dws(dd, 1) = 0.75 - d * d;
d = 0.5f + d;
dws(dd, 2) = 0.5 * d * d;
local_base_index[dd] = ((local_base_index[dd] - 1) & g_blockmask) + 1;
}
vec3 vel;
vel.set(0.f);
vec9 C;
C.set(0.f);
#pragma unroll 3
for (char i = 0; i < 3; i++)
#pragma unroll 3
for (char j = 0; j < 3; j++)
#pragma unroll 3
for (char k = 0; k < 3; k++) {
vec3 xixp = vec3{(float)i, (float)j, (float)k} * g_dx - local_pos;
float W = dws(0, i) * dws(1, j) * dws(2, k);
vec3 vi{g2pbuffer[0][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
g2pbuffer[1][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
g2pbuffer[2][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k]};
vel += vi * W;
C[0] += W * vi[0] * xixp[0];
C[1] += W * vi[1] * xixp[0];
C[2] += W * vi[2] * xixp[0];
C[3] += W * vi[0] * xixp[1];
C[4] += W * vi[1] * xixp[1];
C[5] += W * vi[2] * xixp[1];
C[6] += W * vi[0] * xixp[2];
C[7] += W * vi[1] * xixp[2];
C[8] += W * vi[2] * xixp[2];
}
pos += vel * dt;
#pragma unroll 9
for (int d = 0; d < 9; ++d)
dws.val(d) = C[d] * dt * g_D_inv + ((d & 0x3) ? 0.f : 1.f);
vec9 contrib;
{
vec9 F;
float logJp;
auto source_particle_bin = pbuffer.ch(_0, source_blockno);
contrib[0] = source_particle_bin.val(_3, source_pidib % g_bin_capacity);
contrib[1] = source_particle_bin.val(_4, source_pidib % g_bin_capacity);
contrib[2] = source_particle_bin.val(_5, source_pidib % g_bin_capacity);
contrib[3] = source_particle_bin.val(_6, source_pidib % g_bin_capacity);
contrib[4] = source_particle_bin.val(_7, source_pidib % g_bin_capacity);
contrib[5] = source_particle_bin.val(_8, source_pidib % g_bin_capacity);
contrib[6] = source_particle_bin.val(_9, source_pidib % g_bin_capacity);
contrib[7] = source_particle_bin.val(_10, source_pidib % g_bin_capacity);
contrib[8] = source_particle_bin.val(_11, source_pidib % g_bin_capacity);
logJp = source_particle_bin.val(_12, source_pidib % g_bin_capacity);
matrixMatrixMultiplication3d(dws.data(), contrib.data(), F.data());
compute_stress_nacc(pbuffer.volume, pbuffer.mu, pbuffer.lambda,
pbuffer.bm, pbuffer.xi, pbuffer.beta, pbuffer.Msqr,
pbuffer.hardeningOn, logJp, F, contrib);
{
auto particle_bin = next_pbuffer.ch(
_0, next_pbuffer._binsts[src_blockno] + pidib / g_bin_capacity);
particle_bin.val(_0, pidib % g_bin_capacity) = pos[0];
particle_bin.val(_1, pidib % g_bin_capacity) = pos[1];
particle_bin.val(_2, pidib % g_bin_capacity) = pos[2];
particle_bin.val(_3, pidib % g_bin_capacity) = F[0];
particle_bin.val(_4, pidib % g_bin_capacity) = F[1];
particle_bin.val(_5, pidib % g_bin_capacity) = F[2];
particle_bin.val(_6, pidib % g_bin_capacity) = F[3];
particle_bin.val(_7, pidib % g_bin_capacity) = F[4];
particle_bin.val(_8, pidib % g_bin_capacity) = F[5];
particle_bin.val(_9, pidib % g_bin_capacity) = F[6];
particle_bin.val(_10, pidib % g_bin_capacity) = F[7];
particle_bin.val(_11, pidib % g_bin_capacity) = F[8];
particle_bin.val(_12, pidib % g_bin_capacity) = logJp;
}
contrib = (C * pbuffer.mass - contrib * newDt) * g_D_inv;
}
local_base_index = (pos * g_dx_inv + 0.5f).cast<int>() - 1;
{
int dirtag = dir_offset((base_index - 1) / g_blocksize -
(local_base_index - 1) / g_blocksize);
next_pbuffer.add_advection(partition, local_base_index - 1, dirtag,
pidib);
// partition.add_advection(local_base_index - 1, dirtag, pidib);
}
// dws[d] = bspline_weight(local_pos[d]);
#pragma unroll 3
for (char dd = 0; dd < 3; ++dd) {
local_pos[dd] = pos[dd] - local_base_index[dd] * g_dx;
float d =
(local_pos[dd] - ((int)(local_pos[dd] * g_dx_inv + 0.5) - 1) * g_dx) *
g_dx_inv;
dws(dd, 0) = 0.5f * (1.5 - d) * (1.5 - d);
d -= 1.0f;
dws(dd, 1) = 0.75 - d * d;
d = 0.5f + d;
dws(dd, 2) = 0.5 * d * d;
local_base_index[dd] = (((base_index[dd] - 1) & g_blockmask) + 1) +
local_base_index[dd] - base_index[dd];
}
#pragma unroll 3
for (char i = 0; i < 3; i++)
#pragma unroll 3
for (char j = 0; j < 3; j++)
#pragma unroll 3
for (char k = 0; k < 3; k++) {
pos = vec3{(float)i, (float)j, (float)k} * g_dx - local_pos;
float W = dws(0, i) * dws(1, j) * dws(2, k);
auto wm = pbuffer.mass * W;
atomicAdd(
&p2gbuffer[0][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm);
atomicAdd(
&p2gbuffer[1][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm * vel[0] + (contrib[0] * pos[0] + contrib[3] * pos[1] +
contrib[6] * pos[2]) *
W);
atomicAdd(
&p2gbuffer[2][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm * vel[1] + (contrib[1] * pos[0] + contrib[4] * pos[1] +
contrib[7] * pos[2]) *
W);
atomicAdd(
&p2gbuffer[3][local_base_index[0] + i][local_base_index[1] + j]
[local_base_index[2] + k],
wm * vel[2] + (contrib[2] * pos[0] + contrib[5] * pos[1] +
contrib[8] * pos[2]) *
W);
}
}
__syncthreads();
/// arena no, channel no, cell no
for (int base = threadIdx.x; base < numMViInArena; base += blockDim.x) {
char local_block_id = base / numMViPerBlock;
auto blockno = partition.query(
ivec3{blockid[0] + ((local_block_id & 4) != 0 ? 1 : 0),
blockid[1] + ((local_block_id & 2) != 0 ? 1 : 0),
blockid[2] + ((local_block_id & 1) != 0 ? 1 : 0)});
// auto grid_block = next_grid.template ch<0>(blockno);
int channelid = base & (numMViPerBlock - 1);
char c = channelid % g_blockvolume;
char cz = channelid & g_blockmask;
char cy = (channelid >>= g_blockbits) & g_blockmask;
char cx = (channelid >>= g_blockbits) & g_blockmask;
channelid >>= g_blockbits;
float val =
p2gbuffer[channelid][cx + (local_block_id & 4 ? g_blocksize : 0)]
[cy + (local_block_id & 2 ? g_blocksize : 0)]
[cz + (local_block_id & 1 ? g_blocksize : 0)];
if (channelid == 0)
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_0, c), val);
else if (channelid == 1)
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_1, c), val);
else if (channelid == 2)
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_2, c), val);
else
atomicAdd(&next_grid.ch(_0, blockno).val_1d(_3, c), val);
}
}
template <typename Grid>
__global__ void mark_active_grid_blocks(uint32_t blockCount, const Grid grid,
int *_marks) {
auto idx = blockIdx.x * blockDim.x + threadIdx.x;
int blockno = idx / g_blockvolume, cellno = idx % g_blockvolume;
if (blockno >= blockCount)
return;
if (grid.ch(_0, blockno).val_1d(_0, cellno) != 0.f)
_marks[blockno] = 1;
}
__global__ void mark_active_particle_blocks(uint32_t blockCount,
const int *__restrict__ _ppbs,
int *_marks) {
std::size_t blockno = blockIdx.x * blockDim.x + threadIdx.x;
if (blockno >= blockCount)
return;
if (_ppbs[blockno] > 0)
_marks[blockno] = 1;
}
template <typename Partition>
__global__ void
update_partition(uint32_t blockCount, const int *__restrict__ _sourceNos,
const Partition partition, Partition next_partition) {
uint32_t blockno = blockIdx.x * blockDim.x + threadIdx.x;
if (blockno >= blockCount)
return;
uint32_t sourceNo = _sourceNos[blockno];
auto sourceBlockid = partition._activeKeys[sourceNo];
next_partition._activeKeys[blockno] = sourceBlockid;
next_partition.reinsert(blockno);
}
template <typename ParticleBuffer>
__global__ void
update_buckets(uint32_t blockCount, const int *__restrict__ _sourceNos,
const ParticleBuffer pbuffer, ParticleBuffer next_pbuffer) {
__shared__ std::size_t sourceNo[1];
std::size_t blockno = blockIdx.x;
if (blockno >= blockCount)
return;
if (threadIdx.x == 0) {
sourceNo[0] = _sourceNos[blockno];
next_pbuffer._ppbs[blockno] = pbuffer._ppbs[sourceNo[0]];
}
__syncthreads();
auto pcnt = next_pbuffer._ppbs[blockno];
for (int pidib = threadIdx.x; pidib < pcnt; pidib += blockDim.x)
next_pbuffer._blockbuckets[blockno * g_particle_num_per_block + pidib] =
pbuffer._blockbuckets[sourceNo[0] * g_particle_num_per_block + pidib];
}
template <typename Partition, typename Grid>
__global__ void copy_selected_grid_blocks(
const ivec3 *__restrict__ prev_blockids, const Partition partition,
const int *__restrict__ _marks, Grid prev_grid, Grid grid) {
auto blockid = prev_blockids[blockIdx.x];
if (_marks[blockIdx.x]) {
auto blockno = partition.query(blockid);
if (blockno == -1)
return;
auto sourceblock = prev_grid.ch(_0, blockIdx.x);
auto targetblock = grid.ch(_0, blockno);
targetblock.val_1d(_0, threadIdx.x) = sourceblock.val_1d(_0, threadIdx.x);
targetblock.val_1d(_1, threadIdx.x) = sourceblock.val_1d(_1, threadIdx.x);
targetblock.val_1d(_2, threadIdx.x) = sourceblock.val_1d(_2, threadIdx.x);
targetblock.val_1d(_3, threadIdx.x) = sourceblock.val_1d(_3, threadIdx.x);
}
}
template <typename Partition>
__global__ void check_table(uint32_t blockCount, Partition partition) {
uint32_t blockno = blockIdx.x * blockDim.x + threadIdx.x;
if (blockno >= blockCount)
return;
auto blockid = partition._activeKeys[blockno];
if (partition.query(blockid) != blockno)
printf("FUCK, partition table is wrong!\n");
}
template <typename Grid> __global__ void sum_grid_mass(Grid grid, float *sum) {
atomicAdd(sum, grid.ch(_0, blockIdx.x).val_1d(_0, threadIdx.x));
}
__global__ void sum_particle_count(uint32_t count, int *__restrict__ _cnts,
int *sum) {
auto idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= count)
return;
atomicAdd(sum, _cnts[idx]);
}
template <typename Partition>
__global__ void check_partition(uint32_t blockCount, Partition partition) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= blockCount)
return;
ivec3 blockid = partition._activeKeys[idx];
if (blockid[0] == 0 || blockid[1] == 0 || blockid[2] == 0)
printf("\tDAMN, encountered zero block record\n");
if (partition.query(blockid) != idx) {
int id = partition.query(blockid);
ivec3 bid = partition._activeKeys[id];
printf("\t\tcheck partition %d, (%d, %d, %d), feedback index %d, (%d, %d, "
"%d)\n",
idx, (int)blockid[0], (int)blockid[1], (int)blockid[2], id, bid[0],
bid[1], bid[2]);
}
}
template <typename Partition, typename Domain>
__global__ void check_partition_domain(uint32_t blockCount, int did,
Domain const domain,
Partition partition) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= blockCount)
return;
ivec3 blockid = partition._activeKeys[idx];
if (domain.inside(blockid)) {
printf(
"%d-th block (%d, %d, %d) is in domain[%d] (%d, %d, %d)-(%d, %d, %d)\n",
idx, blockid[0], blockid[1], blockid[2], did, domain._min[0],
domain._min[1], domain._min[2], domain._max[0], domain._max[1],
domain._max[2]);
}
}
template <typename Partition, typename ParticleBuffer, typename ParticleArray>
__global__ void
retrieve_particle_buffer(Partition partition, Partition prev_partition,
ParticleBuffer pbuffer, ParticleBuffer next_pbuffer,
ParticleArray parray, int *_parcnt) {
int pcnt = next_pbuffer._ppbs[blockIdx.x];
ivec3 blockid = partition._activeKeys[blockIdx.x];
auto advection_bucket =
next_pbuffer._blockbuckets + blockIdx.x * g_particle_num_per_block;
// auto particle_offset = pbuffer._binsts[blockIdx.x];
for (int pidib = threadIdx.x; pidib < pcnt; pidib += blockDim.x) {
auto advect = advection_bucket[pidib];
ivec3 source_blockid;
dir_components(advect / g_particle_num_per_block, source_blockid);
source_blockid += blockid;
auto source_blockno = prev_partition.query(source_blockid);
auto source_pidib = advect % g_particle_num_per_block;
auto source_bin = pbuffer.ch(_0, pbuffer._binsts[source_blockno] +
source_pidib / g_bin_capacity);
auto _source_pidib = source_pidib % g_bin_capacity;
auto parid = atomicAdd(_parcnt, 1);
/// pos
parray.val(_0, parid) = source_bin.val(_0, _source_pidib);
parray.val(_1, parid) = source_bin.val(_1, _source_pidib);
parray.val(_2, parid) = source_bin.val(_2, _source_pidib);
}
}
} // namespace mn
#endif | the_stack |
# include "QuEST.h"
# include "QuEST_precision.h"
# include "QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey
# include "mt19937ar.h"
# include <stdlib.h>
# include <stdio.h>
# include <math.h>
# define REDUCE_SHARED_SIZE 512
# define DEBUG 0
/*
* struct types for concisely passing unitaries to kernels
*/
// hide these from doxygen
/// \cond HIDDEN_SYMBOLS
typedef struct ArgMatrix2 {
Complex r0c0, r0c1;
Complex r1c0, r1c1;
} ArgMatrix2;
typedef struct ArgMatrix4
{
Complex r0c0, r0c1, r0c2, r0c3;
Complex r1c0, r1c1, r1c2, r1c3;
Complex r2c0, r2c1, r2c2, r2c3;
Complex r3c0, r3c1, r3c2, r3c3;
} ArgMatrix4;
ArgMatrix2 argifyMatrix2(ComplexMatrix2 m) {
ArgMatrix2 a;
a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0];
a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1];
a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0];
a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1];
return a;
}
ArgMatrix4 argifyMatrix4(ComplexMatrix4 m) {
ArgMatrix4 a;
a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0];
a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1];
a.r0c2.real=m.real[0][2]; a.r0c2.imag=m.imag[0][2];
a.r0c3.real=m.real[0][3]; a.r0c3.imag=m.imag[0][3];
a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0];
a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1];
a.r1c2.real=m.real[1][2]; a.r1c2.imag=m.imag[1][2];
a.r1c3.real=m.real[1][3]; a.r1c3.imag=m.imag[1][3];
a.r2c0.real=m.real[2][0]; a.r2c0.imag=m.imag[2][0];
a.r2c1.real=m.real[2][1]; a.r2c1.imag=m.imag[2][1];
a.r2c2.real=m.real[2][2]; a.r2c2.imag=m.imag[2][2];
a.r2c3.real=m.real[2][3]; a.r2c3.imag=m.imag[2][3];
a.r3c0.real=m.real[3][0]; a.r3c0.imag=m.imag[3][0];
a.r3c1.real=m.real[3][1]; a.r3c1.imag=m.imag[3][1];
a.r3c2.real=m.real[3][2]; a.r3c2.imag=m.imag[3][2];
a.r3c3.real=m.real[3][3]; a.r3c3.imag=m.imag[3][3];
return a;
}
/// \endcond
/*
* in-kernel bit twiddling functions
*/
__forceinline__ __device__ int extractBit (const int locationOfBitFromRight, const long long int theEncodedNumber) {
return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight;
}
__forceinline__ __device__ int getBitMaskParity(long long int mask) {
int parity = 0;
while (mask) {
parity = !parity;
mask = mask & (mask-1);
}
return parity;
}
__forceinline__ __device__ long long int flipBit(const long long int number, const int bitInd) {
return (number ^ (1LL << bitInd));
}
__forceinline__ __device__ long long int insertZeroBit(const long long int number, const int index) {
long long int left, right;
left = (number >> index) << index;
right = number - left;
return (left << 1) ^ right;
}
__forceinline__ __device__ long long int insertTwoZeroBits(const long long int number, const int bit1, const int bit2) {
int small = (bit1 < bit2)? bit1 : bit2;
int big = (bit1 < bit2)? bit2 : bit1;
return insertZeroBit(insertZeroBit(number, small), big);
}
__forceinline__ __device__ long long int insertZeroBits(long long int number, int* inds, const int numInds) {
/* inserted bit inds must strictly increase, so that their final indices are correct.
* in-lieu of sorting (avoided since no C++ variable-size arrays, and since we're already
* memory bottle-necked so overhead eats this slowdown), we find the next-smallest index each
* at each insert. recall every element of inds (a positive or zero number) is unique.
* This function won't appear in the CPU code, which can use C99 variable-size arrays and
* ought to make a sorted array before threading
*/
int curMin = inds[0];
int prevMin = -1;
for (int n=0; n < numInds; n++) {
// find next min
for (int t=0; t < numInds; t++)
if (inds[t]>prevMin && inds[t]<curMin)
curMin = inds[t];
number = insertZeroBit(number, curMin);
// set curMin to an arbitrary non-visited elem
prevMin = curMin;
for (int t=0; t < numInds; t++)
if (inds[t] > curMin) {
curMin = inds[t];
break;
}
}
return number;
}
/*
* state vector and density matrix operations
*/
#ifdef __cplusplus
extern "C" {
#endif
void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) {
cudaDeviceSynchronize();
cudaMemcpy(
qureg.deviceStateVec.real + startInd,
reals,
numAmps * sizeof(*(qureg.deviceStateVec.real)),
cudaMemcpyHostToDevice);
cudaMemcpy(
qureg.deviceStateVec.imag + startInd,
imags,
numAmps * sizeof(*(qureg.deviceStateVec.imag)),
cudaMemcpyHostToDevice);
}
/** works for both statevectors and density matrices */
void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) {
// copy copyQureg's GPU statevec to targetQureg's GPU statevec
cudaDeviceSynchronize();
cudaMemcpy(
targetQureg.deviceStateVec.real,
copyQureg.deviceStateVec.real,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.real)),
cudaMemcpyDeviceToDevice);
cudaMemcpy(
targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.imag,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.imag)),
cudaMemcpyDeviceToDevice);
}
__global__ void densmatr_initPureStateKernel(
long long int numPureAmps,
qreal *targetVecReal, qreal *targetVecImag,
qreal *copyVecReal, qreal *copyVecImag)
{
// this is a particular index of the pure copyQureg
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=numPureAmps) return;
qreal realRow = copyVecReal[index];
qreal imagRow = copyVecImag[index];
for (long long int col=0; col < numPureAmps; col++) {
qreal realCol = copyVecReal[col];
qreal imagCol = - copyVecImag[col]; // minus for conjugation
targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol;
targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol;
}
}
void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock);
densmatr_initPureStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
copyQureg.numAmpsPerChunk,
targetQureg.deviceStateVec.real, targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.real, copyQureg.deviceStateVec.imag);
}
__global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = probFactor;
stateVecImag[index] = 0.0;
}
void densmatr_initPlusState(Qureg qureg)
{
qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented));
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
densmatr_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
probFactor,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void densmatr_initClassicalStateKernel(
long long int densityNumElems,
qreal *densityReal, qreal *densityImag,
long long int densityInd)
{
// initialise the state to all zeros
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= densityNumElems) return;
densityReal[index] = 0.0;
densityImag[index] = 0.0;
if (index==densityInd){
// classical state has probability 1
densityReal[densityInd] = 1.0;
densityImag[densityInd] = 0.0;
}
}
void densmatr_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
// index of the desired state in the flat density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int densityInd = (densityDim + 1)*stateInd;
// identical to pure version
densmatr_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, densityInd);
}
void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env)
{
// allocate CPU memory
long long int numAmps = 1L << numQubits;
long long int numAmpsPerRank = numAmps/env.numRanks;
qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real));
qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag));
if (env.numRanks>1){
qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real));
qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag));
}
// check cpu memory allocation was successful
if ( (!(qureg->stateVec.real) || !(qureg->stateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
if ( env.numRanks>1 && (!(qureg->pairStateVec.real) || !(qureg->pairStateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
qureg->numQubitsInStateVec = numQubits;
qureg->numAmpsPerChunk = numAmpsPerRank;
qureg->numAmpsTotal = numAmps;
qureg->chunkId = env.rank;
qureg->numChunks = env.numRanks;
qureg->isDensityMatrix = 0;
// allocate GPU memory
cudaMalloc(&(qureg->deviceStateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.real)));
cudaMalloc(&(qureg->deviceStateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.imag)));
cudaMalloc(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal));
cudaMalloc(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))*
sizeof(qreal));
// check gpu memory allocation was successful
if (!(qureg->deviceStateVec.real) || !(qureg->deviceStateVec.imag)){
printf("Could not allocate memory on GPU!\n");
exit (EXIT_FAILURE);
}
}
void statevec_destroyQureg(Qureg qureg, QuESTEnv env)
{
// Free CPU memory
free(qureg.stateVec.real);
free(qureg.stateVec.imag);
if (env.numRanks>1){
free(qureg.pairStateVec.real);
free(qureg.pairStateVec.imag);
}
// Free GPU memory
cudaFree(qureg.deviceStateVec.real);
cudaFree(qureg.deviceStateVec.imag);
cudaFree(qureg.firstLevelReduction);
cudaFree(qureg.secondLevelReduction);
}
DiagonalOp agnostic_createDiagonalOp(int numQubits, QuESTEnv env) {
DiagonalOp op;
op.numQubits = numQubits;
op.numElemsPerChunk = (1LL << numQubits) / env.numRanks;
op.chunkId = env.rank;
op.numChunks = env.numRanks;
// allocate CPU memory (initialised to zero)
op.real = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal));
op.imag = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal));
// @TODO no handling of rank>1 allocation (no distributed GPU)
// check cpu memory allocation was successful
if ( !op.real || !op.imag ) {
printf("Could not allocate memory!\n");
exit(EXIT_FAILURE);
}
// allocate GPU memory
size_t arrSize = op.numElemsPerChunk * sizeof(qreal);
cudaMalloc(&(op.deviceOperator.real), arrSize);
cudaMalloc(&(op.deviceOperator.imag), arrSize);
// check gpu memory allocation was successful
if (!op.deviceOperator.real || !op.deviceOperator.imag) {
printf("Could not allocate memory on GPU!\n");
exit(EXIT_FAILURE);
}
// initialise GPU memory to zero
cudaMemset(op.deviceOperator.real, 0, arrSize);
cudaMemset(op.deviceOperator.imag, 0, arrSize);
return op;
}
void agnostic_destroyDiagonalOp(DiagonalOp op) {
free(op.real);
free(op.imag);
cudaFree(op.deviceOperator.real);
cudaFree(op.deviceOperator.imag);
}
void agnostic_syncDiagonalOp(DiagonalOp op) {
cudaDeviceSynchronize();
size_t mem_elems = op.numElemsPerChunk * sizeof *op.real;
cudaMemcpy(op.deviceOperator.real, op.real, mem_elems, cudaMemcpyHostToDevice);
cudaMemcpy(op.deviceOperator.imag, op.imag, mem_elems, cudaMemcpyHostToDevice);
}
__global__ void agnostic_initDiagonalOpFromPauliHamilKernel(
DiagonalOp op, enum pauliOpType* pauliCodes, qreal* termCoeffs, int numSumTerms
) {
// each thread processes one diagonal element
long long int elemInd = blockIdx.x*blockDim.x + threadIdx.x;
if (elemInd >= op.numElemsPerChunk)
return;
qreal elem = 0;
// elem is (+-) every coefficient, with sign determined by parity
for (int t=0; t<numSumTerms; t++) {
// determine the parity of the Z-targeted qubits in the element's corresponding state
int isOddNumOnes = 0;
for (int q=0; q<op.numQubits; q++)
if (pauliCodes[q + t*op.numQubits] == PAULI_Z)
if (extractBit(q, elemInd))
isOddNumOnes = !isOddNumOnes;
// avoid warp divergence
int sign = 1 - 2*isOddNumOnes; // (-1 if isOddNumOnes, else +1)
elem += termCoeffs[t] * sign;
}
op.deviceOperator.real[elemInd] = elem;
op.deviceOperator.imag[elemInd] = 0;
}
void agnostic_initDiagonalOpFromPauliHamil(DiagonalOp op, PauliHamil hamil) {
// copy args intop GPU memory
enum pauliOpType* d_pauliCodes;
size_t mem_pauliCodes = hamil.numSumTerms * op.numQubits * sizeof *d_pauliCodes;
cudaMalloc(&d_pauliCodes, mem_pauliCodes);
cudaMemcpy(d_pauliCodes, hamil.pauliCodes, mem_pauliCodes, cudaMemcpyHostToDevice);
qreal* d_termCoeffs;
size_t mem_termCoeffs = hamil.numSumTerms * sizeof *d_termCoeffs;
cudaMalloc(&d_termCoeffs, mem_termCoeffs);
cudaMemcpy(d_termCoeffs, hamil.termCoeffs, mem_termCoeffs, cudaMemcpyHostToDevice);
int numThreadsPerBlock = 128;
int numBlocks = ceil(op.numElemsPerChunk / (qreal) numThreadsPerBlock);
agnostic_initDiagonalOpFromPauliHamilKernel<<<numBlocks, numThreadsPerBlock>>>(
op, d_pauliCodes, d_termCoeffs, hamil.numSumTerms);
// copy populated operator into to RAM
cudaDeviceSynchronize();
size_t mem_elems = op.numElemsPerChunk * sizeof *op.real;
cudaMemcpy(op.real, op.deviceOperator.real, mem_elems, cudaMemcpyDeviceToHost);
cudaMemcpy(op.imag, op.deviceOperator.imag, mem_elems, cudaMemcpyDeviceToHost);
cudaFree(d_pauliCodes);
cudaFree(d_termCoeffs);
}
int GPUExists(void){
int deviceCount, device;
int gpuDeviceCount = 0;
struct cudaDeviceProp properties;
cudaError_t cudaResultCode = cudaGetDeviceCount(&deviceCount);
if (cudaResultCode != cudaSuccess) deviceCount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < deviceCount; ++device) {
cudaGetDeviceProperties(&properties, device);
if (properties.major != 9999) { /* 9999 means emulation only */
++gpuDeviceCount;
}
}
if (gpuDeviceCount) return 1;
else return 0;
}
QuESTEnv createQuESTEnv(void) {
if (!GPUExists()){
printf("Trying to run GPU code with no GPU available\n");
exit(EXIT_FAILURE);
}
QuESTEnv env;
env.rank=0;
env.numRanks=1;
env.seeds = NULL;
env.numSeeds = 0;
seedQuESTDefault(&env);
return env;
}
void syncQuESTEnv(QuESTEnv env){
cudaDeviceSynchronize();
}
int syncQuESTSuccess(int successCode){
return successCode;
}
void destroyQuESTEnv(QuESTEnv env){
free(env.seeds);
}
void reportQuESTEnv(QuESTEnv env){
printf("EXECUTION ENVIRONMENT:\n");
printf("Running locally on one node with GPU\n");
printf("Number of ranks is %d\n", env.numRanks);
# ifdef _OPENMP
printf("OpenMP enabled\n");
printf("Number of threads available is %d\n", omp_get_max_threads());
# else
printf("OpenMP disabled\n");
# endif
}
void getEnvironmentString(QuESTEnv env, char str[200]){
// OpenMP can be hybridised with GPU in future, so this check is safe and worthwhile
int ompStatus=0;
int numThreads=1;
# ifdef _OPENMP
ompStatus=1;
numThreads=omp_get_max_threads();
# endif
// there is no reporting of CUDA cores/threads/blocks currently (since non-trivial)
sprintf(str, "CUDA=1 OpenMP=%d MPI=0 threads=%d ranks=1", ompStatus, numThreads);
}
void copyStateToGPU(Qureg qureg)
{
if (DEBUG) printf("Copying data to GPU\n");
cudaMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice);
cudaMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyHostToDevice);
if (DEBUG) printf("Finished copying data to GPU\n");
}
void copyStateFromGPU(Qureg qureg)
{
cudaDeviceSynchronize();
if (DEBUG) printf("Copying data from GPU\n");
cudaMemcpy(qureg.stateVec.real, qureg.deviceStateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost);
cudaMemcpy(qureg.stateVec.imag, qureg.deviceStateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost);
if (DEBUG) printf("Finished copying data from GPU\n");
}
/** Print the current state vector of probability amplitudes for a set of qubits to standard out.
For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits
*/
void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){
long long int index;
int rank;
copyStateFromGPU(qureg);
if (qureg.numQubitsInStateVec<=5){
for (rank=0; rank<qureg.numChunks; rank++){
if (qureg.chunkId==rank){
if (reportRank) {
printf("Reporting state from rank %d [\n", qureg.chunkId);
//printf("\trank, index, real, imag\n");
printf("real, imag\n");
} else if (rank==0) {
printf("Reporting state [\n");
printf("real, imag\n");
}
for(index=0; index<qureg.numAmpsPerChunk; index++){
printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]);
}
if (reportRank || rank==qureg.numChunks-1) printf("]\n");
}
syncQuESTEnv(env);
}
}
}
qreal statevec_getRealAmp(Qureg qureg, long long int index){
qreal el=0;
cudaMemcpy(&el, &(qureg.deviceStateVec.real[index]),
sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost);
return el;
}
qreal statevec_getImagAmp(Qureg qureg, long long int index){
qreal el=0;
cudaMemcpy(&el, &(qureg.deviceStateVec.imag[index]),
sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost);
return el;
}
__global__ void statevec_initBlankStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
// initialise the statevector to be all-zeros
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
}
void statevec_initBlankState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initBlankStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initZeroStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
// initialise the state to |0000..0000>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==0){
// zero state |0000..0000> has probability 1
stateVecReal[0] = 1.0;
stateVecImag[0] = 0.0;
}
}
void statevec_initZeroState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initZeroStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize);
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
}
void statevec_initPlusState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){
long long int index;
// initialise the state to |stateInd>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==stateInd){
// classical state has probability 1
stateVecReal[stateInd] = 1.0;
stateVecImag[stateInd] = 0.0;
}
}
void statevec_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, stateInd);
}
__global__ void statevec_initDebugStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = (index*2.0)/10.0;
stateVecImag[index] = (index*2.0+1.0)/10.0;
}
void statevec_initDebugState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initDebugStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){
long long int index;
int bit;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2);
bit = extractBit(qubitId, index);
if (bit==outcome) {
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
} else {
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
}
}
void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initStateOfSingleQubitKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg->numAmpsPerChunk, qureg->deviceStateVec.real, qureg->deviceStateVec.imag, qubitId, outcome);
}
// returns 1 if successful, else 0
int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){
long long int chunkSize, stateVecSize;
long long int indexInChunk, totalIndex;
chunkSize = qureg->numAmpsPerChunk;
stateVecSize = chunkSize*qureg->numChunks;
qreal *stateVecReal = qureg->stateVec.real;
qreal *stateVecImag = qureg->stateVec.imag;
FILE *fp;
char line[200];
fp = fopen(filename, "r");
if (fp == NULL)
return 0;
indexInChunk = 0; totalIndex = 0;
while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){
if (line[0]!='#'){
int chunkId = totalIndex/chunkSize;
if (chunkId==qureg->chunkId){
# if QuEST_PREC==1
sscanf(line, "%f, %f", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==2
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==4
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# endif
indexInChunk += 1;
}
totalIndex += 1;
}
}
fclose(fp);
copyStateToGPU(*qureg);
// indicate success
return 1;
}
int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){
qreal diff;
int chunkSize = mq1.numAmpsPerChunk;
copyStateFromGPU(mq1);
copyStateFromGPU(mq2);
for (int i=0; i<chunkSize; i++){
diff = mq1.stateVec.real[i] - mq2.stateVec.real[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
}
return 1;
}
__global__ void statevec_compactUnitaryKernel (Qureg qureg, int rotQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << rotQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
void statevec_compactUnitary(Qureg qureg, int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_compactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, alpha, beta);
}
__global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
}
void statevec_controlledCompactUnitary(Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_controlledCompactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, alpha, beta);
}
__global__ void statevec_unitaryKernel(Qureg qureg, int targetQubit, ArgMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
void statevec_unitary(Qureg qureg, int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_unitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, argifyMatrix2(u));
}
__global__ void statevec_multiControlledMultiQubitUnitaryKernel(
Qureg qureg, long long int ctrlMask, int* targs, int numTargs,
qreal* uRe, qreal* uIm, long long int* ampInds, qreal* reAmps, qreal* imAmps, long long int numTargAmps)
{
// decide the amplitudes this thread will modify
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
long long int numTasks = qureg.numAmpsPerChunk >> numTargs; // kernel called on every 1 in 2^numTargs amplitudes
if (thisTask>=numTasks) return;
// find this task's start index (where all targs are 0)
long long int ind00 = insertZeroBits(thisTask, targs, numTargs);
// this task only modifies amplitudes if control qubits are 1 for this state
if (ctrlMask && (ctrlMask&ind00) != ctrlMask)
return;
qreal *reVec = qureg.deviceStateVec.real;
qreal *imVec = qureg.deviceStateVec.imag;
/*
each thread needs:
long long int ampInds[numAmps];
qreal reAmps[numAmps];
qreal imAmps[numAmps];
but instead has access to shared arrays, with below stride and offset
*/
size_t stride = gridDim.x*blockDim.x;
size_t offset = blockIdx.x*blockDim.x + threadIdx.x;
// determine the indices and record values of target amps
long long int ind;
for (int i=0; i < numTargAmps; i++) {
// get global index of current target qubit assignment
ind = ind00;
for (int t=0; t < numTargs; t++)
if (extractBit(t, i))
ind = flipBit(ind, targs[t]);
ampInds[i*stride+offset] = ind;
reAmps [i*stride+offset] = reVec[ind];
imAmps [i*stride+offset] = imVec[ind];
}
// update the amplitudes
for (int r=0; r < numTargAmps; r++) {
ind = ampInds[r*stride+offset];
reVec[ind] = 0;
imVec[ind] = 0;
for (int c=0; c < numTargAmps; c++) {
qreal uReElem = uRe[c + r*numTargAmps];
qreal uImElem = uIm[c + r*numTargAmps];
reVec[ind] += reAmps[c*stride+offset]*uReElem - imAmps[c*stride+offset]*uImElem;
imVec[ind] += reAmps[c*stride+offset]*uImElem + imAmps[c*stride+offset]*uReElem;
}
}
}
void statevec_multiControlledMultiQubitUnitary(Qureg qureg, long long int ctrlMask, int* targs, int numTargs, ComplexMatrixN u)
{
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>numTargs)/threadsPerCUDABlock);
// allocate device space for global {targs} (length: numTargs) and populate
int *d_targs;
size_t targMemSize = numTargs * sizeof *d_targs;
cudaMalloc(&d_targs, targMemSize);
cudaMemcpy(d_targs, targs, targMemSize, cudaMemcpyHostToDevice);
// flatten out the u.real and u.imag lists
int uNumRows = (1 << u.numQubits);
qreal* uReFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uReFlat);
qreal* uImFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uImFlat);
long long int i = 0;
for (int r=0; r < uNumRows; r++)
for (int c=0; c < uNumRows; c++) {
uReFlat[i] = u.real[r][c];
uImFlat[i] = u.imag[r][c];
i++;
}
// allocate device space for global u.real and u.imag (flatten by concatenating rows) and populate
qreal* d_uRe;
qreal* d_uIm;
size_t uMemSize = uNumRows*uNumRows * sizeof *d_uRe; // size of each of d_uRe and d_uIm
cudaMalloc(&d_uRe, uMemSize);
cudaMalloc(&d_uIm, uMemSize);
cudaMemcpy(d_uRe, uReFlat, uMemSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_uIm, uImFlat, uMemSize, cudaMemcpyHostToDevice);
// allocate device Wspace for thread-local {ampInds}, {reAmps}, {imAmps} (length: 1<<numTargs)
long long int *d_ampInds;
qreal *d_reAmps;
qreal *d_imAmps;
size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks;
int numTargAmps = uNumRows;
cudaMalloc(&d_ampInds, numTargAmps*gridSize * sizeof *d_ampInds);
cudaMalloc(&d_reAmps, numTargAmps*gridSize * sizeof *d_reAmps);
cudaMalloc(&d_imAmps, numTargAmps*gridSize * sizeof *d_imAmps);
// call kernel
statevec_multiControlledMultiQubitUnitaryKernel<<<CUDABlocks,threadsPerCUDABlock>>>(
qureg, ctrlMask, d_targs, numTargs, d_uRe, d_uIm, d_ampInds, d_reAmps, d_imAmps, numTargAmps);
// free kernel memory
free(uReFlat);
free(uImFlat);
cudaFree(d_targs);
cudaFree(d_uRe);
cudaFree(d_uIm);
cudaFree(d_ampInds);
cudaFree(d_reAmps);
cudaFree(d_imAmps);
}
__global__ void statevec_multiControlledTwoQubitUnitaryKernel(Qureg qureg, long long int ctrlMask, int q1, int q2, ArgMatrix4 u){
// decide the 4 amplitudes this thread will modify
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
long long int numTasks = qureg.numAmpsPerChunk >> 2; // kernel called on every 1 in 4 amplitudes
if (thisTask>=numTasks) return;
qreal *reVec = qureg.deviceStateVec.real;
qreal *imVec = qureg.deviceStateVec.imag;
// find indices of amplitudes to modify (treat q1 as the least significant bit)
long long int ind00, ind01, ind10, ind11;
ind00 = insertTwoZeroBits(thisTask, q1, q2);
// modify only if control qubits are 1 for this state
if (ctrlMask && (ctrlMask&ind00) != ctrlMask)
return;
ind01 = flipBit(ind00, q1);
ind10 = flipBit(ind00, q2);
ind11 = flipBit(ind01, q2);
// extract statevec amplitudes
qreal re00, re01, re10, re11;
qreal im00, im01, im10, im11;
re00 = reVec[ind00]; im00 = imVec[ind00];
re01 = reVec[ind01]; im01 = imVec[ind01];
re10 = reVec[ind10]; im10 = imVec[ind10];
re11 = reVec[ind11]; im11 = imVec[ind11];
// apply u * {amp00, amp01, amp10, amp11}
reVec[ind00] =
u.r0c0.real*re00 - u.r0c0.imag*im00 +
u.r0c1.real*re01 - u.r0c1.imag*im01 +
u.r0c2.real*re10 - u.r0c2.imag*im10 +
u.r0c3.real*re11 - u.r0c3.imag*im11;
imVec[ind00] =
u.r0c0.imag*re00 + u.r0c0.real*im00 +
u.r0c1.imag*re01 + u.r0c1.real*im01 +
u.r0c2.imag*re10 + u.r0c2.real*im10 +
u.r0c3.imag*re11 + u.r0c3.real*im11;
reVec[ind01] =
u.r1c0.real*re00 - u.r1c0.imag*im00 +
u.r1c1.real*re01 - u.r1c1.imag*im01 +
u.r1c2.real*re10 - u.r1c2.imag*im10 +
u.r1c3.real*re11 - u.r1c3.imag*im11;
imVec[ind01] =
u.r1c0.imag*re00 + u.r1c0.real*im00 +
u.r1c1.imag*re01 + u.r1c1.real*im01 +
u.r1c2.imag*re10 + u.r1c2.real*im10 +
u.r1c3.imag*re11 + u.r1c3.real*im11;
reVec[ind10] =
u.r2c0.real*re00 - u.r2c0.imag*im00 +
u.r2c1.real*re01 - u.r2c1.imag*im01 +
u.r2c2.real*re10 - u.r2c2.imag*im10 +
u.r2c3.real*re11 - u.r2c3.imag*im11;
imVec[ind10] =
u.r2c0.imag*re00 + u.r2c0.real*im00 +
u.r2c1.imag*re01 + u.r2c1.real*im01 +
u.r2c2.imag*re10 + u.r2c2.real*im10 +
u.r2c3.imag*re11 + u.r2c3.real*im11;
reVec[ind11] =
u.r3c0.real*re00 - u.r3c0.imag*im00 +
u.r3c1.real*re01 - u.r3c1.imag*im01 +
u.r3c2.real*re10 - u.r3c2.imag*im10 +
u.r3c3.real*re11 - u.r3c3.imag*im11;
imVec[ind11] =
u.r3c0.imag*re00 + u.r3c0.real*im00 +
u.r3c1.imag*re01 + u.r3c1.real*im01 +
u.r3c2.imag*re10 + u.r3c2.real*im10 +
u.r3c3.imag*re11 + u.r3c3.real*im11;
}
void statevec_multiControlledTwoQubitUnitary(Qureg qureg, long long int ctrlMask, int q1, int q2, ComplexMatrix4 u)
{
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock); // one kernel eval for every 4 amplitudes
statevec_multiControlledTwoQubitUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, ctrlMask, q1, q2, argifyMatrix4(u));
}
__global__ void statevec_controlledUnitaryKernel(Qureg qureg, int controlQubit, int targetQubit, ArgMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_controlledUnitary(Qureg qureg, int controlQubit, int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_controlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, argifyMatrix2(u));
}
__global__ void statevec_multiControlledUnitaryKernel(
Qureg qureg,
long long int ctrlQubitsMask, long long int ctrlFlipMask,
int targetQubit, ArgMatrix2 u
){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
if (ctrlQubitsMask == (ctrlQubitsMask & (indexUp ^ ctrlFlipMask))) {
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_multiControlledUnitary(
Qureg qureg,
long long int ctrlQubitsMask, long long int ctrlFlipMask,
int targetQubit, ComplexMatrix2 u
){
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_multiControlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg, ctrlQubitsMask, ctrlFlipMask, targetQubit, argifyMatrix2(u));
}
__global__ void statevec_pauliXKernel(Qureg qureg, int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
void statevec_pauliX(Qureg qureg, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_pauliXKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit);
}
__global__ void statevec_pauliYKernel(Qureg qureg, int targetQubit, int conjFac){
long long int sizeHalfBlock = 1LL << targetQubit;
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int numTasks = qureg.numAmpsPerChunk >> 1;
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
long long int thisBlock = thisTask / sizeHalfBlock;
long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
long long int indexLo = indexUp + sizeHalfBlock;
qreal stateRealUp, stateImagUp;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
void statevec_pauliY(Qureg qureg, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, 1);
}
void statevec_pauliYConj(Qureg qureg, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, -1);
}
__global__ void statevec_controlledPauliYKernel(Qureg qureg, int controlQubit, int targetQubit, int conjFac)
{
long long int index;
long long int sizeBlock, sizeHalfBlock;
long long int stateVecSize;
int controlBit;
qreal stateRealUp, stateImagUp;
long long int thisBlock, indexUp, indexLo;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
}
void statevec_controlledPauliY(Qureg qureg, int controlQubit, int targetQubit)
{
int conjFactor = 1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor);
}
void statevec_controlledPauliYConj(Qureg qureg, int controlQubit, int targetQubit)
{
int conjFactor = -1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor);
}
__global__ void statevec_phaseShiftByTermKernel(Qureg qureg, int targetQubit, qreal cosAngle, qreal sinAngle) {
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, indexUp,indexLo;
qreal stateRealLo, stateImagLo;
long long int thisTask;
long long int numTasks = qureg.numAmpsPerChunk >> 1;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
void statevec_phaseShiftByTerm(Qureg qureg, int targetQubit, Complex term)
{
qreal cosAngle = term.real;
qreal sinAngle = term.imag;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_phaseShiftByTermKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, cosAngle, sinAngle);
}
__global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, int idQubit1, int idQubit2, qreal cosAngle, qreal sinAngle)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
qreal stateRealLo, stateImagLo;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_controlledPhaseShift(Qureg qureg, int idQubit1, int idQubit2, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2, cosAngle, sinAngle);
}
__global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) {
qreal stateRealLo, stateImagLo;
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
long long int mask = getQubitBitMask(controlQubits, numControlQubits);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_multiControlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, cosAngle, sinAngle);
}
__global__ void statevec_multiRotateZKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) {
long long int stateVecSize = qureg.numAmpsPerChunk;
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
int fac = getBitMaskParity(mask & index)? -1 : 1;
qreal stateReal = stateVecReal[index];
qreal stateImag = stateVecImag[index];
stateVecReal[index] = cosAngle*stateReal + fac * sinAngle*stateImag;
stateVecImag[index] = - fac * sinAngle*stateReal + cosAngle*stateImag;
}
void statevec_multiRotateZ(Qureg qureg, long long int mask, qreal angle)
{
qreal cosAngle = cos(angle/2.0);
qreal sinAngle = sin(angle/2.0);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_multiRotateZKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, cosAngle, sinAngle);
}
__global__ void statevec_multiControlledMultiRotateZKernel(Qureg qureg, long long int ctrlMask, long long int targMask, qreal cosAngle, qreal sinAngle) {
long long int stateVecSize = qureg.numAmpsPerChunk;
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
// amplitudes corresponding to control qubits not all-in-one are unmodified
if (ctrlMask && ((ctrlMask & index) != ctrlMask))
return;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
// avoid warp divergence, setting fac = +- 1
int fac = 1-2*getBitMaskParity(targMask & index);
qreal stateReal = stateVecReal[index];
qreal stateImag = stateVecImag[index];
stateVecReal[index] = cosAngle*stateReal + fac * sinAngle*stateImag;
stateVecImag[index] = - fac * sinAngle*stateReal + cosAngle*stateImag;
}
void statevec_multiControlledMultiRotateZ(Qureg qureg, long long int ctrlMask, long long int targMask, qreal angle)
{
qreal cosAngle = cos(angle/2.0);
qreal sinAngle = sin(angle/2.0);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_multiControlledMultiRotateZKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, ctrlMask, targMask, cosAngle, sinAngle);
}
qreal densmatr_calcTotalProb(Qureg qureg) {
// computes the trace using Kahan summation
qreal pTotal=0;
qreal y, t, c;
c = 0;
long long int numCols = 1LL << qureg.numQubitsRepresented;
long long diagIndex;
copyStateFromGPU(qureg);
for (int col=0; col< numCols; col++) {
diagIndex = col*(numCols + 1);
y = qureg.stateVec.real[diagIndex] - c;
t = pTotal + y;
c = ( t - pTotal ) - y; // brackets are important
pTotal = t;
}
return pTotal;
}
qreal statevec_calcTotalProb(Qureg qureg){
/* IJB - implemented using Kahan summation for greater accuracy at a slight floating
point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */
/* Don't change the bracketing in this routine! */
qreal pTotal=0;
qreal y, t, c;
long long int index;
long long int numAmpsPerRank = qureg.numAmpsPerChunk;
copyStateFromGPU(qureg);
c = 0.0;
for (index=0; index<numAmpsPerRank; index++){
/* Perform pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; by Kahan */
// pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index];
y = qureg.stateVec.real[index]*qureg.stateVec.real[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
/* Perform pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; by Kahan */
//pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index];
y = qureg.stateVec.imag[index]*qureg.stateVec.imag[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
}
return pTotal;
}
__global__ void statevec_controlledPhaseFlipKernel(Qureg qureg, int idQubit1, int idQubit2)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_controlledPhaseFlip(Qureg qureg, int idQubit1, int idQubit2)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2);
}
__global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask)
{
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits)
{
int threadsPerCUDABlock, CUDABlocks;
long long int mask = getQubitBitMask(controlQubits, numControlQubits);
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_multiControlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask);
}
__global__ void statevec_swapQubitAmpsKernel(Qureg qureg, int qb1, int qb2) {
qreal *reVec = qureg.deviceStateVec.real;
qreal *imVec = qureg.deviceStateVec.imag;
long long int numTasks = qureg.numAmpsPerChunk >> 2; // each iteration updates 2 amps and skips 2 amps
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
long long int ind00, ind01, ind10;
qreal re01, re10, im01, im10;
// determine ind00 of |..0..0..>, |..0..1..> and |..1..0..>
ind00 = insertTwoZeroBits(thisTask, qb1, qb2);
ind01 = flipBit(ind00, qb1);
ind10 = flipBit(ind00, qb2);
// extract statevec amplitudes
re01 = reVec[ind01]; im01 = imVec[ind01];
re10 = reVec[ind10]; im10 = imVec[ind10];
// swap 01 and 10 amps
reVec[ind01] = re10; reVec[ind10] = re01;
imVec[ind01] = im10; imVec[ind10] = im01;
}
void statevec_swapQubitAmps(Qureg qureg, int qb1, int qb2)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock);
statevec_swapQubitAmpsKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, qb1, qb2);
}
__global__ void statevec_hadamardKernel (Qureg qureg, int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal recRoot2 = 1.0/sqrt(2.0);
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo);
stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo);
stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo);
stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo);
}
void statevec_hadamard(Qureg qureg, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_hadamardKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit);
}
__global__ void statevec_controlledNotKernel(Qureg qureg, int controlQubit, int targetQubit)
{
long long int index;
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
long long int stateVecSize;
int controlBit;
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
}
void statevec_controlledNot(Qureg qureg, int controlQubit, int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledNotKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit);
}
__global__ void statevec_multiControlledMultiQubitNotKernel(Qureg qureg, int ctrlMask, int targMask) {
qreal* stateRe = qureg.deviceStateVec.real;
qreal* stateIm = qureg.deviceStateVec.imag;
// althouugh each thread swaps/updates two amplitudes, we still invoke one thread per amp
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
if (ampInd >= qureg.numAmpsPerChunk)
return;
// modify amplitudes only if control qubits are 1 for this state
if (ctrlMask && ((ctrlMask & ampInd) != ctrlMask))
return;
long long int mateInd = ampInd ^ targMask;
// if the mate is lower index, another thread is handling it
if (mateInd < ampInd)
return;
/* it may seem wasteful to spawn more threads than are needed, and abort
* half of them due to the amp pairing above (and potentially abort
* an exponential number due to ctrlMask). however, since we are moving
* global memory directly in a potentially non-contiguous fashoin, this
* method is likely to be memory bandwidth bottlenecked anyway
*/
qreal mateRe = stateRe[mateInd];
qreal mateIm = stateIm[mateInd];
// swap amp with mate
stateRe[mateInd] = stateRe[ampInd];
stateIm[mateInd] = stateIm[ampInd];
stateRe[ampInd] = mateRe;
stateIm[ampInd] = mateIm;
}
void statevec_multiControlledMultiQubitNot(Qureg qureg, int ctrlMask, int targMask) {
int numThreadsPerBlock = 128;
int numBlocks = ceil(qureg.numAmpsPerChunk / (qreal) numThreadsPerBlock);
statevec_multiControlledMultiQubitNotKernel<<<numBlocks, numThreadsPerBlock>>>(qureg, ctrlMask, targMask);
}
__device__ __host__ unsigned int log2Int( unsigned int x )
{
unsigned int ans = 0 ;
while( x>>=1 ) ans++;
return ans ;
}
__device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){
int i, l, r;
int threadMax, maxDepth;
threadMax = length/2;
maxDepth = log2Int(length/2);
for (i=0; i<maxDepth+1; i++){
if (threadIdx.x<threadMax){
l = threadIdx.x;
r = l + threadMax;
arrayIn[l] = arrayIn[r] + arrayIn[l];
}
threadMax = threadMax >> 1;
__syncthreads(); // optimise -- use warp shuffle instead
}
if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0];
}
__global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){
extern __shared__ qreal tempReductionArray[];
int blockOffset = blockIdx.x*length;
tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2];
tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1];
__syncthreads();
reduceBlock(tempReductionArray, reducedArray, length);
}
__global__ void densmatr_findProbabilityOfZeroKernel(
Qureg qureg, int measureQubit, qreal *reducedArray
) {
// run by each thread
// use of block here refers to contiguous amplitudes where measureQubit = 0,
// (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numTasks = densityDim >> 1;
long long int sizeHalfBlock = 1LL << (measureQubit);
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int thisBlock; // which block this thread is processing
long long int thisTask; // which part of the block this thread is processing
long long int basisIndex; // index of this thread's computational basis state
long long int densityIndex; // " " index of |basis><basis| in the flat density matrix
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
// figure out which density matrix prob that this thread is assigned
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
densityIndex = (densityDim + 1) * basisIndex;
// record the probability in the CUDA-BLOCK-wide array
qreal prob = qureg.deviceStateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0
tempReductionArray[threadIdx.x] = prob;
// sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
__global__ void statevec_findProbabilityOfZeroKernel(
Qureg qureg, int measureQubit, qreal *reducedArray
) {
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
// (good for shared memory parallelism)
extern __shared__ qreal tempReductionArray[];
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
qreal realVal, imagVal;
realVal = stateVecReal[index];
imagVal = stateVecImag[index];
tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal;
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
int getNumReductionLevels(long long int numValuesToReduce, int numReducedPerLevel){
int levels=0;
while (numValuesToReduce){
numValuesToReduce = numValuesToReduce/numReducedPerLevel;
levels++;
}
return levels;
}
void swapDouble(qreal **a, qreal **b){
qreal *temp;
temp = *a;
*a = *b;
*b = temp;
}
qreal densmatr_findProbabilityOfZero(Qureg qureg, int measureQubit)
{
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim >> 1; // half of the diagonal has measureQubit=0
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
densmatr_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg, measureQubit, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal zeroProb;
cudaMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return zeroProb;
}
qreal statevec_findProbabilityOfZero(Qureg qureg, int measureQubit)
{
qreal stateProb=0;
// 1-qubit edge-case breaks below loop logic
if (qureg.numQubitsInStateVec == 1) {
qreal amp;
cudaMemcpy(&, qureg.deviceStateVec.real, sizeof(qreal), cudaMemcpyDeviceToHost);
stateProb += amp*amp;
cudaMemcpy(&, qureg.deviceStateVec.imag, sizeof(qreal), cudaMemcpyDeviceToHost);
stateProb += amp*amp;
return stateProb;
}
long long int numValuesToReduce = qureg.numAmpsPerChunk>>1;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int firstTime=1;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
while(numValuesToReduce>1){
if (numValuesToReduce<maxReducedPerLevel){
// Need less than one CUDA block to reduce values
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
} else {
// Use full CUDA blocks, with block size constrained by shared mem usage
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime){
statevec_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg, measureQubit, qureg.firstLevelReduction);
firstTime=0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return stateProb;
}
qreal statevec_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome)
{
qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
qreal densmatr_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome)
{
qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
// atomicAdd on floats/doubles isn't available on <6 CC devices, so we add it ourselves
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
static __inline__ __device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__global__ void statevec_calcProbOfAllOutcomesKernel(
qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits
) {
// each thread handles one amplitude (all amplitudes are involved)
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
if (ampInd >= qureg.numAmpsTotal) return;
qreal prob = (
qureg.deviceStateVec.real[ampInd]*qureg.deviceStateVec.real[ampInd] +
qureg.deviceStateVec.imag[ampInd]*qureg.deviceStateVec.imag[ampInd]);
// each amplitude contributes to one outcome
long long int outcomeInd = 0;
for (int q=0; q<numQubits; q++)
outcomeInd += extractBit(qubits[q], ampInd) * (1LL << q);
// each thread atomically writes directly to the global output.
// this beat block-heirarchal atomic reductions in both global and shared memory!
atomicAdd(&outcomeProbs[outcomeInd], prob);
}
void statevec_calcProbOfAllOutcomes(qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits) {
// copy qubits to GPU memory
int* d_qubits;
size_t mem_qubits = numQubits * sizeof *d_qubits;
cudaMalloc(&d_qubits, mem_qubits);
cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice);
// create one thread for every amplitude
int numThreadsPerBlock = 128;
int numBlocks = ceil(qureg.numAmpsPerChunk / (qreal) numThreadsPerBlock);
// create global GPU array for outcomeProbs
qreal* d_outcomeProbs;
long long int numOutcomes = (1LL << numQubits);
size_t mem_outcomeProbs = numOutcomes * sizeof *d_outcomeProbs;
cudaMalloc(&d_outcomeProbs, mem_outcomeProbs);
cudaMemset(d_outcomeProbs, 0, mem_outcomeProbs);
// populate per-block subarrays
statevec_calcProbOfAllOutcomesKernel<<<numBlocks, numThreadsPerBlock>>>(
d_outcomeProbs, qureg, d_qubits, numQubits);
// copy outcomeProbs from GPU memory
cudaMemcpy(outcomeProbs, d_outcomeProbs, mem_outcomeProbs, cudaMemcpyDeviceToHost);
// free GPU memory
cudaFree(d_qubits);
cudaFree(d_outcomeProbs);
}
__global__ void densmatr_calcProbOfAllOutcomesKernel(
qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits
) {
// each thread handles one diagonal amplitude
long long int diagInd = blockIdx.x*blockDim.x + threadIdx.x;
long long int numDiags = (1LL << qureg.numQubitsRepresented);
if (diagInd >= numDiags) return;
long long int flatInd = (1 + numDiags)*diagInd;
qreal prob = qureg.deviceStateVec.real[flatInd]; // im[flatInd] assumed ~ 0
// each diagonal amplitude contributes to one outcome
long long int outcomeInd = 0;
for (int q=0; q<numQubits; q++)
outcomeInd += extractBit(qubits[q], diagInd) * (1LL << q);
// each thread atomically writes directly to the global output.
// this beat block-heirarchal atomic reductions in both global and shared memory!
atomicAdd(&outcomeProbs[outcomeInd], prob);
}
void densmatr_calcProbOfAllOutcomes(qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits) {
// copy qubits to GPU memory
int* d_qubits;
size_t mem_qubits = numQubits * sizeof *d_qubits;
cudaMalloc(&d_qubits, mem_qubits);
cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice);
// create global array, with per-block subarrays
int numThreadsPerBlock = 128;
int numDiags = (1LL << qureg.numQubitsRepresented);
int numBlocks = ceil(numDiags / (qreal) numThreadsPerBlock);
// create global GPU array for outcomeProbs
qreal* d_outcomeProbs;
long long int numOutcomes = (1LL << numQubits);
size_t mem_outcomeProbs = numOutcomes * sizeof *d_outcomeProbs;
cudaMalloc(&d_outcomeProbs, mem_outcomeProbs);
cudaMemset(d_outcomeProbs, 0, mem_outcomeProbs);
// populate per-block subarrays
densmatr_calcProbOfAllOutcomesKernel<<<numBlocks, numThreadsPerBlock>>>(
d_outcomeProbs, qureg, d_qubits, numQubits);
// copy outcomeProbs from GPU memory
cudaMemcpy(outcomeProbs, d_outcomeProbs, mem_outcomeProbs, cudaMemcpyDeviceToHost);
// free GPU memory
cudaFree(d_qubits);
cudaFree(d_outcomeProbs);
}
/** computes Tr(conjTrans(a) b) = sum of (a_ij^* b_ij), which is a real number */
__global__ void densmatr_calcInnerProductKernel(
Qureg a, Qureg b, long long int numTermsToSum, qreal* reducedArray
) {
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numTermsToSum) return;
// Re{ conj(a) b } = Re{ (aRe - i aIm)(bRe + i bIm) } = aRe bRe + aIm bIm
qreal prod = (
a.deviceStateVec.real[index]*b.deviceStateVec.real[index]
+ a.deviceStateVec.imag[index]*b.deviceStateVec.imag[index]);
// array of each thread's collected sum term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = prod;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
qreal densmatr_calcInnerProduct(Qureg a, Qureg b) {
// we're summing the square of every term in the density matrix
long long int numValuesToReduce = a.numAmpsTotal;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the terms in each block
// arbitrarily store the reduction in the b qureg's array
if (firstTime) {
densmatr_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
a, b, a.numAmpsTotal, b.firstLevelReduction);
firstTime = 0;
}
// sum the block terms
else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
b.firstLevelReduction,
b.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(b.firstLevelReduction), &(b.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal innerprod;
cudaMemcpy(&innerprod, b.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return innerprod;
}
/** computes either a real or imag term in the inner product */
__global__ void statevec_calcInnerProductKernel(
int getRealComp,
qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2,
long long int numTermsToSum, qreal* reducedArray)
{
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numTermsToSum) return;
// choose whether to calculate the real or imaginary term of the inner product
qreal innerProdTerm;
if (getRealComp)
innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index];
else
innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index];
// array of each thread's collected sum term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = innerProdTerm;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Terrible code which unnecessarily individually computes and sums the real and imaginary components of the
* inner product, so as to not have to worry about keeping the sums separated during reduction.
* Truly disgusting, probably doubles runtime, please fix.
* @todo could even do the kernel twice, storing real in bra.reduc and imag in ket.reduc?
*/
Complex statevec_calcInnerProduct(Qureg bra, Qureg ket) {
qreal innerProdReal, innerProdImag;
int getRealComp;
long long int numValuesToReduce;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel;
int firstTime;
// compute real component of inner product
getRealComp = 1;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// compute imag component of inner product
getRealComp = 0;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// return complex
Complex innerProd;
innerProd.real = innerProdReal;
innerProd.imag = innerProdImag;
return innerProd;
}
/** computes one term of (vec^*T) dens * vec */
__global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) {
// figure out which density matrix row to consider
long long int col;
long long int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row >= dim) return;
qreal* densReal = dens.deviceStateVec.real;
qreal* densImag = dens.deviceStateVec.imag;
qreal* vecReal = vec.deviceStateVec.real;
qreal* vecImag = vec.deviceStateVec.imag;
// compute the row-th element of the product dens*vec
qreal prodReal = 0;
qreal prodImag = 0;
for (col=0LL; col < dim; col++) {
qreal densElemReal = densReal[dim*col + row];
qreal densElemImag = densImag[dim*col + row];
prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col];
prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col];
}
// multiply with row-th elem of (vec^*)
qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row];
// imag of every term should be zero, because each is a valid fidelity calc of an eigenstate
//qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row];
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = termReal;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
qreal densmatr_calcFidelity(Qureg qureg, Qureg pureState) {
// we're summing the square of every term in the density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
// store the reduction in the pureState array
if (firstTime) {
densmatr_calcFidelityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg, pureState, densityDim, pureState.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
pureState.firstLevelReduction,
pureState.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal fidelity;
cudaMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return fidelity;
}
__global__ void densmatr_calcHilbertSchmidtDistanceSquaredKernel(
qreal* aRe, qreal* aIm, qreal* bRe, qreal* bIm,
long long int numAmpsToSum, qreal *reducedArray
) {
// figure out which density matrix term this thread is assigned
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numAmpsToSum) return;
// compute this thread's sum term
qreal difRe = aRe[index] - bRe[index];
qreal difIm = aIm[index] - bIm[index];
qreal term = difRe*difRe + difIm*difIm;
// array of each thread's collected term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = term;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/* computes sqrt(Tr( (a-b) conjTrans(a-b) ) = sqrt( sum of abs vals of (a-b)) */
qreal densmatr_calcHilbertSchmidtDistance(Qureg a, Qureg b) {
// we're summing the square of every term in (a-b)
long long int numValuesToReduce = a.numAmpsPerChunk;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block (store reduction temp values in a's reduction array)
if (firstTime) {
densmatr_calcHilbertSchmidtDistanceSquaredKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
a.deviceStateVec.real, a.deviceStateVec.imag,
b.deviceStateVec.real, b.deviceStateVec.imag,
numValuesToReduce, a.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
a.firstLevelReduction,
a.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(a.firstLevelReduction), &(a.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal trace;
cudaMemcpy(&trace, a.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
qreal sqrtTrace = sqrt(trace);
return sqrtTrace;
}
__global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) {
// figure out which density matrix term this thread is assigned
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numAmpsToSum) return;
qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index];
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = term;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Computes the trace of the density matrix squared */
qreal densmatr_calcPurity(Qureg qureg) {
// we're summing the square of every term in the density matrix
long long int numValuesToReduce = qureg.numAmpsPerChunk;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
densmatr_calcPurityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
numValuesToReduce, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal traceDensSquared;
cudaMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return traceDensSquared;
}
__global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability)
{
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- measured probability
qreal renorm; // probability (returned) value
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
// (good for shared memory parallelism)
long long int numTasks=qureg.numAmpsPerChunk>>1;
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
renorm=1/sqrt(totalProbability);
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
if (outcome==0){
stateVecReal[index]=stateVecReal[index]*renorm;
stateVecImag[index]=stateVecImag[index]*renorm;
stateVecReal[index+sizeHalfBlock]=0;
stateVecImag[index+sizeHalfBlock]=0;
} else if (outcome==1){
stateVecReal[index]=0;
stateVecImag[index]=0;
stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm;
stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm;
}
}
/*
* outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or
* else the state-vector will lose normalisation
*/
void statevec_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, measureQubit, outcome, outcomeProb);
}
/** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */
__global__ void densmatr_collapseToKnownProbOutcomeKernel(
qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit,
long long int part1, long long int part2, long long int part3,
long long int rowBit, long long int colBit, long long int desired, long long int undesired)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numBasesToVisit) return;
long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
// renormalise desired outcome
vecReal[base + desired] /= outcomeProb;
vecImag[base + desired] /= outcomeProb;
// kill undesired outcome
vecReal[base + undesired] = 0;
vecImag[base + undesired] = 0;
// kill |..0..><..1..| states
vecReal[base + colBit] = 0;
vecImag[base + colBit] = 0;
vecReal[base + rowBit] = 0;
vecImag[base + rowBit] = 0;
}
/** This involves finding |...i...><...j...| states and killing those where i!=j */
void densmatr_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb) {
int rowQubit = measureQubit + qureg.numQubitsRepresented;
int colBit = 1LL << measureQubit;
int rowBit = 1LL << rowQubit;
long long int numBasesToVisit = qureg.numAmpsPerChunk/4;
long long int part1 = colBit -1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numBasesToVisit - (rowBit >> 1);
long long int desired, undesired;
if (outcome == 0) {
desired = 0;
undesired = colBit | rowBit;
} else {
desired = colBit | rowBit;
undesired = 0;
}
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock);
densmatr_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
outcomeProb, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBasesToVisit,
part1, part2, part3, rowBit, colBit, desired, undesired);
}
__global__ void densmatr_mixDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) {
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
if (ampInd >= numAmpsToVisit) return;
combineQureg.deviceStateVec.real[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.imag[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.real[ampInd] += otherProb*otherQureg.deviceStateVec.real[ampInd];
combineQureg.deviceStateVec.imag[ampInd] += otherProb*otherQureg.deviceStateVec.imag[ampInd];
}
void densmatr_mixDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) {
long long int numAmpsToVisit = combineQureg.numAmpsPerChunk;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_mixDensityMatrixKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
combineQureg, otherProb, otherQureg, numAmpsToVisit
);
}
/** Called once for every 4 amplitudes in density matrix
* Works by establishing the |..0..><..0..| state (for its given index) then
* visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa><rt2 NOT(X) part3|
* From the brain of Simon Benjamin
*/
__global__ void densmatr_mixDephasingKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int colBit, long long int rowBit)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
vecReal[ampInd + colBit] *= fac;
vecImag[ampInd + colBit] *= fac;
vecReal[ampInd + rowBit] *= fac;
vecImag[ampInd + rowBit] *= fac;
}
void densmatr_oneQubitDegradeOffDiagonal(Qureg qureg, int targetQubit, qreal dephFac) {
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_mixDephasingKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, colBit, rowBit);
}
void densmatr_mixDephasing(Qureg qureg, int targetQubit, qreal dephase) {
if (dephase == 0)
return;
qreal dephFac = 1 - dephase;
densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephFac);
}
/** Called 12 times for every 16 amplitudes in density matrix
* Each sums from the |..0..0..><..0..0..| index to visit either
* |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..|
* etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par><t3 0 part4 0 part5|.
* From the brain of Simon Benjamin
*/
__global__ void densmatr_mixTwoQubitDephasingKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numBackgroundStates, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3, long long int part4, long long int part5,
long long int colBit1, long long int rowBit1, long long int colBit2, long long int rowBit2)
{
long long int outerInd = blockIdx.x*blockDim.x + threadIdx.x;
if (outerInd >= numAmpsToVisit) return;
// sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A|
int meta = 1 + (outerInd/numBackgroundStates);
if (meta > 4) meta++;
if (meta > 9) meta++;
long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2);
long long int scanInd = outerInd % numBackgroundStates;
long long int stateInd = (
shift +
(scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4));
vecReal[stateInd] *= fac;
vecImag[stateInd] *= fac;
}
// @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems?
void densmatr_mixTwoQubitDephasing(Qureg qureg, int qubit1, int qubit2, qreal dephase) {
if (dephase == 0)
return;
// assumes qubit2 > qubit1
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3);
qreal dephFac = 1 - dephase;
// refers to states |a 0 b 0 c><d 0 e 0 f| (target qubits are fixed)
long long int numBackgroundStates = qureg.numAmpsPerChunk/16;
// 12 of these states experience dephasing
long long int numAmpsToVisit = 12 * numBackgroundStates;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_mixTwoQubitDephasingKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBackgroundStates, numAmpsToVisit,
part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2);
}
/** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */
__global__ void densmatr_mixDepolarisingKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int bothBits)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
long long int targetInd = baseInd + bothBits;
qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]);
qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]);
vecReal[baseInd] *= 1 - depolLevel;
vecImag[baseInd] *= 1 - depolLevel;
vecReal[targetInd] *= 1 - depolLevel;
vecImag[targetInd] *= 1 - depolLevel;
vecReal[baseInd] += realAvDepol;
vecImag[baseInd] += imagAvDepol;
vecReal[targetInd] += realAvDepol;
vecImag[targetInd] += imagAvDepol;
}
/** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */
__global__ void densmatr_mixDampingKernel(
qreal damping, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int bothBits)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
long long int targetInd = baseInd + bothBits;
qreal realAvDepol = damping * ( vecReal[targetInd]);
qreal imagAvDepol = damping * ( vecImag[targetInd]);
vecReal[targetInd] *= 1 - damping;
vecImag[targetInd] *= 1 - damping;
vecReal[baseInd] += realAvDepol;
vecImag[baseInd] += imagAvDepol;
}
void densmatr_mixDepolarising(Qureg qureg, int targetQubit, qreal depolLevel) {
if (depolLevel == 0)
return;
densmatr_mixDephasing(qureg, targetQubit, depolLevel);
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int bothBits = colBit | rowBit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_mixDepolarisingKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, bothBits);
}
void densmatr_mixDamping(Qureg qureg, int targetQubit, qreal damping) {
if (damping == 0)
return;
qreal dephase = sqrt(1-damping);
densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephase);
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int bothBits = colBit | rowBit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_mixDampingKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
damping, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, bothBits);
}
/** Called once for every 16 amplitudes */
__global__ void densmatr_mixTwoQubitDepolarisingKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int part4, long long int part5,
long long int rowCol1, long long int rowCol2)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
// index of |..0..0..><..0..0|
long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4);
long long int ind01 = ind00 + rowCol1;
long long int ind10 = ind00 + rowCol2;
long long int ind11 = ind00 + rowCol1 + rowCol2;
qreal realAvDepol = depolLevel * 0.25 * (
vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]);
qreal imagAvDepol = depolLevel * 0.25 * (
vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]);
qreal retain = 1 - depolLevel;
vecReal[ind00] *= retain; vecImag[ind00] *= retain;
vecReal[ind01] *= retain; vecImag[ind01] *= retain;
vecReal[ind10] *= retain; vecImag[ind10] *= retain;
vecReal[ind11] *= retain; vecImag[ind11] *= retain;
vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol;
vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol;
vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol;
vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol;
}
void densmatr_mixTwoQubitDepolarising(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) {
if (depolLevel == 0)
return;
// assumes qubit2 > qubit1
densmatr_mixTwoQubitDephasing(qureg, qubit1, qubit2, depolLevel);
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int rowCol1 = colBit1 | rowBit1;
long long int rowCol2 = colBit2 | rowBit2;
long long int numAmpsToVisit = qureg.numAmpsPerChunk/16;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = numAmpsToVisit - (rowBit2 >> 3);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_mixTwoQubitDepolarisingKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, part4, part5, rowCol1, rowCol2);
}
__global__ void statevec_setWeightedQuregKernel(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) {
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
long long int numAmpsToVisit = qureg1.numAmpsPerChunk;
if (ampInd >= numAmpsToVisit) return;
qreal *vecRe1 = qureg1.deviceStateVec.real;
qreal *vecIm1 = qureg1.deviceStateVec.imag;
qreal *vecRe2 = qureg2.deviceStateVec.real;
qreal *vecIm2 = qureg2.deviceStateVec.imag;
qreal *vecReOut = out.deviceStateVec.real;
qreal *vecImOut = out.deviceStateVec.imag;
qreal facRe1 = fac1.real;
qreal facIm1 = fac1.imag;
qreal facRe2 = fac2.real;
qreal facIm2 = fac2.imag;
qreal facReOut = facOut.real;
qreal facImOut = facOut.imag;
qreal re1,im1, re2,im2, reOut,imOut;
long long int index = ampInd;
re1 = vecRe1[index]; im1 = vecIm1[index];
re2 = vecRe2[index]; im2 = vecIm2[index];
reOut = vecReOut[index];
imOut = vecImOut[index];
vecReOut[index] = (facReOut*reOut - facImOut*imOut) + (facRe1*re1 - facIm1*im1) + (facRe2*re2 - facIm2*im2);
vecImOut[index] = (facReOut*imOut + facImOut*reOut) + (facRe1*im1 + facIm1*re1) + (facRe2*im2 + facIm2*re2);
}
void statevec_setWeightedQureg(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) {
long long int numAmpsToVisit = qureg1.numAmpsPerChunk;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
statevec_setWeightedQuregKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
fac1, qureg1, fac2, qureg2, facOut, out
);
}
__global__ void statevec_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) {
// each thread modifies one value; a wasteful and inefficient strategy
long long int numTasks = qureg.numAmpsPerChunk;
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask >= numTasks) return;
qreal* stateRe = qureg.deviceStateVec.real;
qreal* stateIm = qureg.deviceStateVec.imag;
qreal* opRe = op.deviceOperator.real;
qreal* opIm = op.deviceOperator.imag;
qreal a = stateRe[thisTask];
qreal b = stateIm[thisTask];
qreal c = opRe[thisTask];
qreal d = opIm[thisTask];
// (a + b i)(c + d i) = (a c - b d) + i (a d + b c)
stateRe[thisTask] = a*c - b*d;
stateIm[thisTask] = a*d + b*c;
}
void statevec_applyDiagonalOp(Qureg qureg, DiagonalOp op)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_applyDiagonalOpKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, op);
}
__global__ void densmatr_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) {
// each thread modifies one value; a wasteful and inefficient strategy
long long int numTasks = qureg.numAmpsPerChunk;
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask >= numTasks) return;
qreal* stateRe = qureg.deviceStateVec.real;
qreal* stateIm = qureg.deviceStateVec.imag;
qreal* opRe = op.deviceOperator.real;
qreal* opIm = op.deviceOperator.imag;
int opDim = (1 << op.numQubits);
qreal a = stateRe[thisTask];
qreal b = stateIm[thisTask];
qreal c = opRe[thisTask % opDim];
qreal d = opIm[thisTask % opDim];
// (a + b i)(c + d i) = (a c - b d) + i (a d + b c)
stateRe[thisTask] = a*c - b*d;
stateIm[thisTask] = a*d + b*c;
}
void densmatr_applyDiagonalOp(Qureg qureg, DiagonalOp op) {
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
densmatr_applyDiagonalOpKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, op);
}
/** computes either a real or imag term of |vec_i|^2 op_i */
__global__ void statevec_calcExpecDiagonalOpKernel(
int getRealComp,
qreal* vecReal, qreal* vecImag, qreal* opReal, qreal* opImag,
long long int numTermsToSum, qreal* reducedArray)
{
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numTermsToSum) return;
qreal vecAbs = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index];
// choose whether to calculate the real or imaginary term of the expec term
qreal expecVal;
if (getRealComp)
expecVal = vecAbs * opReal[index];
else
expecVal = vecAbs * opImag[index];
// array of each thread's collected sum term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = expecVal;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
Complex statevec_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) {
/* @TODO: remove all this reduction boilerplate from QuEST GPU
* (e.g. a func which accepts a pointer to do every-value reduction?)
*/
qreal expecReal, expecImag;
int getRealComp;
long long int numValuesToReduce;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel;
int firstTime;
// compute real component of inner product
getRealComp = 1;
numValuesToReduce = qureg.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
statevec_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
op.deviceOperator.real, op.deviceOperator.imag,
numValuesToReduce,
qureg.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// compute imag component of inner product
getRealComp = 0;
numValuesToReduce = qureg.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
statevec_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
op.deviceOperator.real, op.deviceOperator.imag,
numValuesToReduce,
qureg.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// return complex
Complex expecVal;
expecVal.real = expecReal;
expecVal.imag = expecImag;
return expecVal;
}
__global__ void densmatr_calcExpecDiagonalOpKernel(
int getRealComp,
qreal* matReal, qreal* matImag, qreal* opReal, qreal* opImag,
int numQubits, long long int numTermsToSum, qreal* reducedArray)
{
/** if the thread represents a diagonal op, then it computes either a
* real or imag term of matr_{ii} op_i. Otherwise, it writes a 0 to the
* reduction array
*/
// index will identy one of the 2^Q diagonals to be summed
long long int matInd = blockIdx.x*blockDim.x + threadIdx.x;
if (matInd >= numTermsToSum) return;
long long int diagSpacing = (1LL << numQubits) + 1LL;
int isDiag = ((matInd % diagSpacing) == 0);
long long int opInd = matInd / diagSpacing;
qreal val = 0;
if (isDiag) {
qreal matRe = matReal[matInd];
qreal matIm = matImag[matInd];
qreal opRe = opReal[opInd];
qreal opIm = opImag[opInd];
// (matRe + matIm i)(opRe + opIm i) =
// (matRe opRe - matIm opIm) + i (matRe opIm + matIm opRe)
if (getRealComp)
val = matRe * opRe - matIm * opIm;
else
val = matRe * opIm + matIm * opRe;
}
// array of each thread's collected sum term, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = val;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
Complex densmatr_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) {
/* @TODO: remove all this reduction boilerplate from QuEST GPU
* (e.g. a func which accepts a pointer to do every-value reduction?)
*/
qreal expecReal, expecImag;
int getRealComp;
long long int numValuesToReduce;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel;
int firstTime;
// compute real component of inner product
getRealComp = 1;
numValuesToReduce = qureg.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
densmatr_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
op.deviceOperator.real, op.deviceOperator.imag,
op.numQubits, numValuesToReduce,
qureg.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// compute imag component of inner product
getRealComp = 0;
numValuesToReduce = qureg.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
densmatr_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
op.deviceOperator.real, op.deviceOperator.imag,
op.numQubits, numValuesToReduce,
qureg.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// return complex
Complex expecVal;
expecVal.real = expecReal;
expecVal.imag = expecImag;
return expecVal;
}
void agnostic_setDiagonalOpElems(DiagonalOp op, long long int startInd, qreal* real, qreal* imag, long long int numElems) {
// update both RAM and VRAM, for consistency
memcpy(&op.real[startInd], real, numElems * sizeof(qreal));
memcpy(&op.imag[startInd], imag, numElems * sizeof(qreal));
cudaDeviceSynchronize();
cudaMemcpy(
op.deviceOperator.real + startInd,
real,
numElems * sizeof(*(op.deviceOperator.real)),
cudaMemcpyHostToDevice);
cudaMemcpy(
op.deviceOperator.imag + startInd,
imag,
numElems * sizeof(*(op.deviceOperator.imag)),
cudaMemcpyHostToDevice);
}
__global__ void statevec_applyPhaseFuncOverridesKernel(
Qureg qureg, int* qubits, int numQubits, enum bitEncoding encoding,
qreal* coeffs, qreal* exponents, int numTerms,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
int conj
) {
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=qureg.numAmpsPerChunk) return;
// determine global amplitude index (non-distributed, so it's just local index)
long long int globalAmpInd = index;
// determine phase index of {qubits}
long long int phaseInd = 0LL;
if (encoding == UNSIGNED) {
for (int q=0; q<numQubits; q++)
phaseInd += (1LL << q) * extractBit(qubits[q], globalAmpInd);
}
else if (encoding == TWOS_COMPLEMENT) {
for (int q=0; q<numQubits-1; q++) // use final qubit to indicate sign
phaseInd += (1LL << q) * extractBit(qubits[q], globalAmpInd);
if (extractBit(qubits[numQubits-1], globalAmpInd) == 1)
phaseInd -= (1LL << (numQubits-1));
}
// determine if this phase index has an overriden value (i < numOverrides)
int i;
for (i=0; i<numOverrides; i++)
if (phaseInd == overrideInds[i])
break;
// determine phase from {coeffs}, {exponents} (unless overriden)
qreal phase = 0;
if (i < numOverrides)
phase = overridePhases[i];
else
for (int t=0; t<numTerms; t++)
phase += coeffs[t] * pow(phaseInd, exponents[t]);
// negate phase to conjugate operator
if (conj)
phase *= -1;
// modify amp to amp * exp(i phase)
qreal c = cos(phase);
qreal s = sin(phase);
qreal re = qureg.deviceStateVec.real[index];
qreal im = qureg.deviceStateVec.imag[index];
// = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)}
qureg.deviceStateVec.real[index] = re*c - im*s;
qureg.deviceStateVec.imag[index] = re*s + im*c;
}
void statevec_applyPhaseFuncOverrides(
Qureg qureg, int* qubits, int numQubits, enum bitEncoding encoding,
qreal* coeffs, qreal* exponents, int numTerms,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
int conj
) {
// allocate device space for global list of {qubits}, {coeffs}, {exponents}, {overrideInds} and {overridePhases}
int* d_qubits; size_t mem_qubits = numQubits * sizeof *d_qubits;
qreal* d_coeffs; size_t mem_terms = numTerms * sizeof *d_coeffs;
qreal* d_exponents;
long long int* d_overrideInds; size_t mem_inds = numOverrides * sizeof *d_overrideInds;
qreal* d_overridePhases; size_t mem_phas = numOverrides * sizeof *d_overridePhases;
cudaMalloc(&d_qubits, mem_qubits); cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice);
cudaMalloc(&d_coeffs, mem_terms); cudaMemcpy(d_coeffs, coeffs, mem_terms, cudaMemcpyHostToDevice);
cudaMalloc(&d_exponents, mem_terms); cudaMemcpy(d_exponents, exponents, mem_terms, cudaMemcpyHostToDevice);
cudaMalloc(&d_overrideInds, mem_inds); cudaMemcpy(d_overrideInds, overrideInds, mem_inds, cudaMemcpyHostToDevice);
cudaMalloc(&d_overridePhases,mem_phas); cudaMemcpy(d_overridePhases, overridePhases, mem_phas, cudaMemcpyHostToDevice);
// call kernel
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock);
statevec_applyPhaseFuncOverridesKernel<<<CUDABlocks,threadsPerCUDABlock>>>(
qureg, d_qubits, numQubits, encoding,
d_coeffs, d_exponents, numTerms,
d_overrideInds, d_overridePhases, numOverrides,
conj);
// cleanup device memory
cudaFree(d_qubits);
cudaFree(d_coeffs);
cudaFree(d_exponents);
cudaFree(d_overrideInds);
cudaFree(d_overridePhases);
}
__global__ void statevec_applyMultiVarPhaseFuncOverridesKernel(
Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding,
qreal* coeffs, qreal* exponents, int* numTermsPerReg,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
long long int *phaseInds,
int conj
) {
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=qureg.numAmpsPerChunk) return;
// determine global amplitude index (non-distributed, so it's just local index)
long long int globalAmpInd = index;
/*
* each thread needs to write to a local:
* long long int phaseInds[numRegs];
* but instead has access to shared array phaseInds, with below stride and offset
*/
size_t stride = gridDim.x*blockDim.x;
size_t offset = blockIdx.x*blockDim.x + threadIdx.x;
// determine phase indices
int flatInd = 0;
if (encoding == UNSIGNED) {
for (int r=0; r<numRegs; r++) {
phaseInds[r*stride+offset] = 0LL;
for (int q=0; q<numQubitsPerReg[r]; q++)
phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd);
}
}
else if (encoding == TWOS_COMPLEMENT) {
for (int r=0; r<numRegs; r++) {
phaseInds[r*stride+offset] = 0LL;
for (int q=0; q<numQubitsPerReg[r]-1; q++)
phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd);
// use final qubit to indicate sign
if (extractBit(qubits[flatInd++], globalAmpInd) == 1)
phaseInds[r*stride+offset] -= (1LL << (numQubitsPerReg[r]-1));
}
}
// determine if this phase index has an overriden value (i < numOverrides)
int i;
for (i=0; i<numOverrides; i++) {
int found = 1;
for (int r=0; r<numRegs; r++) {
if (phaseInds[r*stride+offset] != overrideInds[i*numRegs+r]) {
found = 0;
break;
}
}
if (found)
break;
}
// compute the phase (unless overriden)
qreal phase = 0;
if (i < numOverrides)
phase = overridePhases[i];
else {
flatInd = 0;
for (int r=0; r<numRegs; r++) {
for (int t=0; t<numTermsPerReg[r]; t++) {
phase += coeffs[flatInd] * pow(phaseInds[r*stride+offset], exponents[flatInd]);
flatInd++;
}
}
}
// negate phase to conjugate operator
if (conj)
phase *= -1;
// modify amp to amp * exp(i phase)
qreal c = cos(phase);
qreal s = sin(phase);
qreal re = qureg.deviceStateVec.real[index];
qreal im = qureg.deviceStateVec.imag[index];
// = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)}
qureg.deviceStateVec.real[index] = re*c - im*s;
qureg.deviceStateVec.imag[index] = re*s + im*c;
}
void statevec_applyMultiVarPhaseFuncOverrides(
Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding,
qreal* coeffs, qreal* exponents, int* numTermsPerReg,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
int conj
) {
// determine size of arrays, for cloning into GPU memory
size_t mem_numQubitsPerReg = numRegs * sizeof *numQubitsPerReg;
size_t mem_numTermsPerReg = numRegs * sizeof *numTermsPerReg;
size_t mem_overridePhases = numOverrides * sizeof *overridePhases;
size_t mem_overrideInds = numOverrides * numRegs * sizeof *overrideInds;
size_t mem_qubits = 0;
size_t mem_coeffs = 0;
size_t mem_exponents = 0;
for (int r=0; r<numRegs; r++) {
mem_qubits += numQubitsPerReg[r] * sizeof *qubits;
mem_coeffs += numTermsPerReg[r] * sizeof *coeffs;
mem_exponents += numTermsPerReg[r] * sizeof *exponents;
}
// allocate global GPU memory
int* d_qubits; cudaMalloc(&d_qubits, mem_qubits);
qreal* d_coeffs; cudaMalloc(&d_coeffs, mem_coeffs);
qreal* d_exponents; cudaMalloc(&d_exponents, mem_exponents);
int* d_numQubitsPerReg; cudaMalloc(&d_numQubitsPerReg, mem_numQubitsPerReg);
int* d_numTermsPerReg; cudaMalloc(&d_numTermsPerReg, mem_numTermsPerReg);
long long int* d_overrideInds; cudaMalloc(&d_overrideInds, mem_overrideInds);
qreal* d_overridePhases; cudaMalloc(&d_overridePhases, mem_overridePhases);
// copy function args into GPU memory
cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice);
cudaMemcpy(d_coeffs, coeffs, mem_coeffs, cudaMemcpyHostToDevice);
cudaMemcpy(d_exponents, exponents, mem_exponents, cudaMemcpyHostToDevice);
cudaMemcpy(d_numQubitsPerReg, numQubitsPerReg, mem_numQubitsPerReg, cudaMemcpyHostToDevice);
cudaMemcpy(d_numTermsPerReg, numTermsPerReg, mem_numTermsPerReg, cudaMemcpyHostToDevice);
cudaMemcpy(d_overrideInds, overrideInds, mem_overrideInds, cudaMemcpyHostToDevice);
cudaMemcpy(d_overridePhases, overridePhases, mem_overridePhases, cudaMemcpyHostToDevice);
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock);
// allocate thread-local working space {phaseInds}
long long int *d_phaseInds;
size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks;
cudaMalloc(&d_phaseInds, numRegs*gridSize * sizeof *d_phaseInds);
// call kernel
statevec_applyMultiVarPhaseFuncOverridesKernel<<<CUDABlocks,threadsPerCUDABlock>>>(
qureg, d_qubits, d_numQubitsPerReg, numRegs, encoding,
d_coeffs, d_exponents, d_numTermsPerReg,
d_overrideInds, d_overridePhases, numOverrides,
d_phaseInds,
conj);
// free device memory
cudaFree(d_qubits);
cudaFree(d_coeffs);
cudaFree(d_exponents);
cudaFree(d_numQubitsPerReg);
cudaFree(d_numTermsPerReg);
cudaFree(d_overrideInds);
cudaFree(d_overridePhases);
cudaFree(d_phaseInds);
}
__global__ void statevec_applyParamNamedPhaseFuncOverridesKernel(
Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding,
enum phaseFunc phaseFuncName, qreal* params, int numParams,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
long long int* phaseInds,
int conj
) {
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=qureg.numAmpsPerChunk) return;
// determine global amplitude index (non-distributed, so it's just local index)
long long int globalAmpInd = index;
/*
* each thread needs to write to a local:
* long long int phaseInds[numRegs];
* but instead has access to shared array phaseInds, with below stride and offset
*/
size_t stride = gridDim.x*blockDim.x;
size_t offset = blockIdx.x*blockDim.x + threadIdx.x;
// determine phase indices
if (encoding == UNSIGNED) {
int flatInd = 0;
for (int r=0; r<numRegs; r++) {
phaseInds[r*stride+offset] = 0LL;
for (int q=0; q<numQubitsPerReg[r]; q++)
phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd);
}
}
else if (encoding == TWOS_COMPLEMENT) {
int flatInd = 0;
for (int r=0; r<numRegs; r++) {
phaseInds[r*stride+offset] = 0LL;
for (int q=0; q<numQubitsPerReg[r]-1; q++)
phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd);
// use final qubit to indicate sign
if (extractBit(qubits[flatInd++], globalAmpInd) == 1)
phaseInds[r*stride+offset] -= (1LL << (numQubitsPerReg[r]-1));
}
}
// determine if this phase index has an overriden value (i < numOverrides)
int i;
for (i=0; i<numOverrides; i++) {
int found = 1;
for (int r=0; r<numRegs; r++) {
if (phaseInds[r*stride+offset] != overrideInds[i*numRegs+r]) {
found = 0;
break;
}
}
if (found)
break;
}
// compute the phase (unless overriden)
qreal phase = 0;
if (i < numOverrides)
phase = overridePhases[i];
else {
// compute norm related phases
if (phaseFuncName == NORM || phaseFuncName == INVERSE_NORM ||
phaseFuncName == SCALED_NORM || phaseFuncName == SCALED_INVERSE_NORM ||
phaseFuncName == SCALED_INVERSE_SHIFTED_NORM) {
qreal norm = 0;
if (phaseFuncName == SCALED_INVERSE_SHIFTED_NORM) {
for (int r=0; r<numRegs; r++) {
qreal dif = phaseInds[r*stride+offset] - params[2+r];
norm += dif*dif;
}
}
else
for (int r=0; r<numRegs; r++)
norm += phaseInds[r*stride+offset]*phaseInds[r*stride+offset];
norm = sqrt(norm);
if (phaseFuncName == NORM)
phase = norm;
else if (phaseFuncName == INVERSE_NORM)
phase = (norm == 0.)? params[0] : 1/norm; // smallest non-zero norm is 1
else if (phaseFuncName == SCALED_NORM)
phase = params[0] * norm;
else if (phaseFuncName == SCALED_INVERSE_NORM || phaseFuncName == SCALED_INVERSE_SHIFTED_NORM)
phase = (norm <= REAL_EPS)? params[1] : params[0] / norm; // unless shifted closer to zero
}
// compute product related phases
else if (phaseFuncName == PRODUCT || phaseFuncName == INVERSE_PRODUCT ||
phaseFuncName == SCALED_PRODUCT || phaseFuncName == SCALED_INVERSE_PRODUCT) {
qreal prod = 1;
for (int r=0; r<numRegs; r++)
prod *= phaseInds[r*stride+offset];
if (phaseFuncName == PRODUCT)
phase = prod;
else if (phaseFuncName == INVERSE_PRODUCT)
phase = (prod == 0.)? params[0] : 1/prod; // smallest non-zero prod is +- 1
else if (phaseFuncName == SCALED_PRODUCT)
phase = params[0] * prod;
else if (phaseFuncName == SCALED_INVERSE_PRODUCT)
phase = (prod == 0.)? params[1] : params[0] / prod;
}
// compute Euclidean distance related phases
else if (phaseFuncName == DISTANCE || phaseFuncName == INVERSE_DISTANCE ||
phaseFuncName == SCALED_DISTANCE || phaseFuncName == SCALED_INVERSE_DISTANCE ||
phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE) {
qreal dist = 0;
if (phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE) {
for (int r=0; r<numRegs; r+=2) {
qreal dif = (phaseInds[r*stride+offset] - phaseInds[(r+1)*stride+offset] - params[2+r/2]);
dist += dif*dif;
}
}
else
for (int r=0; r<numRegs; r+=2) {
qreal dif = (phaseInds[(r+1)*stride+offset] - phaseInds[r*stride+offset]);
dist += dif*dif;
}
dist = sqrt(dist);
if (phaseFuncName == DISTANCE)
phase = dist;
else if (phaseFuncName == INVERSE_DISTANCE)
phase = (dist == 0.)? params[0] : 1/dist; // smallest non-zero dist is 1
else if (phaseFuncName == SCALED_DISTANCE)
phase = params[0] * dist;
else if (phaseFuncName == SCALED_INVERSE_DISTANCE || phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE)
phase = (dist <= REAL_EPS)? params[1] : params[0] / dist; // unless shifted closer
}
}
// negate phase to conjugate operator
if (conj)
phase *= -1;
// modify amp to amp * exp(i phase)
qreal c = cos(phase);
qreal s = sin(phase);
qreal re = qureg.deviceStateVec.real[index];
qreal im = qureg.deviceStateVec.imag[index];
// = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)}
qureg.deviceStateVec.real[index] = re*c - im*s;
qureg.deviceStateVec.imag[index] = re*s + im*c;
}
void statevec_applyParamNamedPhaseFuncOverrides(
Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding,
enum phaseFunc phaseFuncName, qreal* params, int numParams,
long long int* overrideInds, qreal* overridePhases, int numOverrides,
int conj
) {
// determine size of arrays, for cloning into GPU memory
size_t mem_numQubitsPerReg = numRegs * sizeof *numQubitsPerReg;
size_t mem_overridePhases = numOverrides * sizeof *overridePhases;
size_t mem_overrideInds = numOverrides * numRegs * sizeof *overrideInds;
size_t mem_params = numParams * sizeof *params;
size_t mem_qubits = 0;
for (int r=0; r<numRegs; r++)
mem_qubits += numQubitsPerReg[r] * sizeof *qubits;
// allocate global GPU memory
int* d_qubits; cudaMalloc(&d_qubits, mem_qubits);
int* d_numQubitsPerReg; cudaMalloc(&d_numQubitsPerReg, mem_numQubitsPerReg);
long long int* d_overrideInds; cudaMalloc(&d_overrideInds, mem_overrideInds);
qreal* d_overridePhases; cudaMalloc(&d_overridePhases, mem_overridePhases);
qreal* d_params = NULL; if (numParams > 0) cudaMalloc(&d_params, mem_params);
// copy function args into GPU memory
cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice);
cudaMemcpy(d_numQubitsPerReg, numQubitsPerReg, mem_numQubitsPerReg, cudaMemcpyHostToDevice);
cudaMemcpy(d_overrideInds, overrideInds, mem_overrideInds, cudaMemcpyHostToDevice);
cudaMemcpy(d_overridePhases, overridePhases, mem_overridePhases, cudaMemcpyHostToDevice);
if (numParams > 0)
cudaMemcpy(d_params, params, mem_params, cudaMemcpyHostToDevice);
int threadsPerCUDABlock = 128;
int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock);
// allocate thread-local working space {phaseInds}
long long int *d_phaseInds;
size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks;
cudaMalloc(&d_phaseInds, numRegs*gridSize * sizeof *d_phaseInds);
// call kernel
statevec_applyParamNamedPhaseFuncOverridesKernel<<<CUDABlocks,threadsPerCUDABlock>>>(
qureg, d_qubits, d_numQubitsPerReg, numRegs, encoding,
phaseFuncName, d_params, numParams,
d_overrideInds, d_overridePhases, numOverrides,
d_phaseInds,
conj);
// free device memory
cudaFree(d_qubits);
cudaFree(d_numQubitsPerReg);
cudaFree(d_overrideInds);
cudaFree(d_overridePhases);
cudaFree(d_phaseInds);
if (numParams > 0)
cudaFree(d_params);
}
void seedQuEST(QuESTEnv *env, unsigned long int *seedArray, int numSeeds) {
// free existing seed array, if exists
if (env->seeds != NULL)
free(env->seeds);
// record keys in permanent heap
env->seeds = (unsigned long int*) malloc(numSeeds * sizeof *(env->seeds));
for (int i=0; i<numSeeds; i++)
(env->seeds)[i] = seedArray[i];
env->numSeeds = numSeeds;
// pass keys to Mersenne Twister seeder
init_by_array(seedArray, numSeeds);
}
#ifdef __cplusplus
}
#endif | the_stack |
using namespace tvgutil;
#ifdef _MSC_VER
// Suppress some VC++ warnings that are produced when including the Thrust headers.
#pragma warning(disable:4244 4267)
#endif
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#ifdef _MSC_VER
// Reenable the suppressed warnings for the rest of the translation unit.
#pragma warning(default:4244 4267)
#endif
#include <orx/base/MemoryBlockFactory.h>
using namespace orx;
#include "ransac/shared/PreemptiveRansac_Shared.h"
//#################### LOCAL TYPES ####################
struct ValidDepth
{
__device__ bool operator()(float x) const
{
return x > 0.0f;
}
};
namespace grove {
//#################### CUDA KERNELS ####################
__global__ void ck_compute_energies(const Keypoint3DColour *keypoints, const ScorePrediction *predictions, const int *inlierRasterIndices,
uint32_t nbInliers, PoseCandidate *poseCandidates, int nbCandidates)
{
const int tid = threadIdx.x;
const int threadsPerBlock = blockDim.x;
const int candidateIdx = blockIdx.x;
if(candidateIdx >= nbCandidates)
{
// The candidate has been culled, so early out. Note that since we are using each thread block to
// compute the energy for a single candidate, the entire block will return in this case, so the
// __syncthreads() call later in the kernel is safe.
return;
}
PoseCandidate& currentCandidate = poseCandidates[candidateIdx];
// For each thread in the block, first compute the sum of the energies for a strided subset of the inliers.
// In particular, thread tid in the block computes the sum of the energies for the inliers with array indices
// tid + k * threadsPerBlock.
float energySum = compute_energy_sum_for_inlier_subset(
currentCandidate.cameraPose, keypoints, predictions, inlierRasterIndices, nbInliers, tid, threadsPerBlock
);
// Then, add up the sums computed by the individual threads to compute the overall energy for the candidate.
// To do this, we perform an efficient, shuffle-based reduction as described in the following blog post:
// https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler
// Step 1: Sum the energies in each warp using downward shuffling, storing the result in the energySum variable of the first thread in the warp.
for(int offset = warpSize / 2; offset > 0; offset /= 2)
#if defined(__CUDACC_VER_MAJOR__) && (__CUDACC_VER_MAJOR__ >= 9)
energySum += __shfl_down_sync(0xFFFFFFFF, energySum, offset);
#else
energySum += __shfl_down(energySum, offset);
#endif
// Step 2: If this is the first thread in the warp, add the energy sum for the warp to the candidate's energy.
if((threadIdx.x & (warpSize - 1)) == 0) atomicAdd(¤tCandidate.energy, energySum);
// Step 3: Wait for all of the atomic adds to finish.
__syncthreads();
// Step 4: If this is the first thread in the entire block, compute the final energy for the candidate by dividing by the number of inliers.
if(tid == 0) currentCandidate.energy = currentCandidate.energy / static_cast<float>(nbInliers);
}
template <typename RNG>
__global__ void ck_generate_pose_candidates(const Keypoint3DColour *keypoints, const ScorePrediction *predictions,
const Vector2i imgSize, RNG *rngs, PoseCandidate *poseCandidates, int *nbPoseCandidates,
uint32_t maxCandidateGenerationIterations, uint32_t maxPoseCandidates,
bool useAllModesPerLeafInPoseHypothesisGeneration, bool checkMinDistanceBetweenSampledModes,
float minDistanceBetweenSampledModes, bool checkRigidTransformationConstraint,
float translationErrorMaxForCorrectPose)
{
const int candidateIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(candidateIdx >= maxPoseCandidates) return;
// Try to generate a valid pose candidate.
PoseCandidate candidate;
bool valid = generate_pose_candidate(
keypoints, predictions, imgSize, rngs[candidateIdx], candidate, maxCandidateGenerationIterations, useAllModesPerLeafInPoseHypothesisGeneration,
checkMinDistanceBetweenSampledModes, minDistanceBetweenSampledModes, checkRigidTransformationConstraint, translationErrorMaxForCorrectPose
);
// If we succeed, grab a unique index in the output array and store the candidate into the corresponding array element.
if(valid)
{
const int finalCandidateIdx = atomicAdd(nbPoseCandidates, 1);
poseCandidates[finalCandidateIdx] = candidate;
}
}
__global__ void ck_prepare_inliers_for_optimisation(const Keypoint3DColour *keypoints, const ScorePrediction *predictions, const int *inlierIndices, int nbInliers,
const PoseCandidate *poseCandidates, int nbPoseCandidates, float inlierThreshold, Vector4f *inlierCameraPoints,
Keypoint3DColourCluster *inlierModes)
{
const int candidateIdx = blockIdx.y;
const int inlierIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(candidateIdx < nbPoseCandidates && inlierIdx < nbInliers)
{
prepare_inlier_for_optimisation(
candidateIdx, inlierIdx, keypoints, predictions, inlierIndices, nbInliers, poseCandidates, inlierThreshold, inlierCameraPoints, inlierModes
);
}
}
__global__ void ck_reset_candidate_energies(PoseCandidate *poseCandidates, int nbPoseCandidates)
{
const int candidateIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(candidateIdx < nbPoseCandidates)
{
poseCandidates[candidateIdx].energy = 0.0f;
}
}
template <bool useMask, typename RNG>
__global__ void ck_sample_inliers(const Keypoint3DColour *keypoints, const ScorePrediction *predictions, const Vector2i imgSize, RNG *rngs,
int *inlierRasterIndices, int *nbInliers, uint32_t ransacInliersPerIteration, int *inliersMask = NULL)
{
const uint32_t sampleIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(sampleIdx < ransacInliersPerIteration)
{
// Try to sample the raster index of a valid keypoint which prediction has at least one modal cluster, using the mask if necessary.
const int rasterIdx = sample_inlier<useMask>(keypoints, predictions, imgSize, rngs[sampleIdx], inliersMask);
// If we succeed, grab a unique index in the output array and store the inlier raster index into the corresponding array element.
if(rasterIdx >= 0)
{
const int arrayIdx = atomicAdd(nbInliers, 1);
inlierRasterIndices[arrayIdx] = rasterIdx;
}
}
}
//#################### CONSTRUCTORS ####################
PreemptiveRansac_CUDA::PreemptiveRansac_CUDA(const SettingsContainer_CPtr& settings, const std::string& settingsNamespace)
: PreemptiveRansac(settings, settingsNamespace)
{
MemoryBlockFactory& mbf = MemoryBlockFactory::instance();
// Allocate memory blocks.
m_nbInliers_device = mbf.make_block<int>(1); // Size 1, just to store a value that can be accessed from the GPU.
m_nbPoseCandidates_device = mbf.make_block<int>(1); // As above.
m_rngs = mbf.make_block<CUDARNG>(m_maxPoseCandidates);
// Default random seed.
m_rngSeed = 42;
// Reset RNGs.
init_random();
}
//#################### PUBLIC MEMBER FUNCTIONS ####################
uint32_t PreemptiveRansac_CUDA::count_valid_depths(const ORFloatImage *depthImage) const
{
const float *depths = depthImage->GetData(MEMORYDEVICE_CUDA);
const int pixelCount = depthImage->noDims.width * depthImage->noDims.height;
thrust::device_ptr<const float> depthsStart(depths);
thrust::device_ptr<const float> depthsEnd(depths + pixelCount);
return static_cast<uint32_t>(thrust::count_if(depthsStart, depthsEnd, ValidDepth()));
}
//#################### PROTECTED MEMBER FUNCTIONS ####################
void PreemptiveRansac_CUDA::compute_energies_and_sort()
{
const int *inlierRasterIndices = m_inlierRasterIndicesBlock->GetData(MEMORYDEVICE_CUDA);
const Keypoint3DColour *keypoints = m_keypointsImage->GetData(MEMORYDEVICE_CUDA);
const uint32_t nbInliers = static_cast<uint32_t>(m_inlierRasterIndicesBlock->dataSize); // The number of currently sampled inlier points (used to compute the energy).
const int nbPoseCandidates = static_cast<int>(m_poseCandidates->dataSize); // The number of currently "valid" pose candidates.
PoseCandidate *poseCandidates = m_poseCandidates->GetData(MEMORYDEVICE_CUDA); // The raster indices of the current sampled inlier points.
const ScorePrediction *predictions = m_predictionsImage->GetData(MEMORYDEVICE_CUDA);
// Reset the energies for all pose candidates.
{
dim3 blockSize(256);
dim3 gridSize((nbPoseCandidates + blockSize.x - 1) / blockSize.x);
ck_reset_candidate_energies<<<gridSize,blockSize>>>(poseCandidates, nbPoseCandidates);
ORcudaKernelCheck;
}
// Compute the energies for all pose candidates.
{
// Launch one block per candidate (in this way, many blocks will exit immediately in the later stages of P-RANSAC).
dim3 blockSize(128); // Threads to compute the energy for each candidate.
dim3 gridSize(nbPoseCandidates);
ck_compute_energies<<<gridSize,blockSize>>>(keypoints, predictions, inlierRasterIndices, nbInliers, poseCandidates, nbPoseCandidates);
ORcudaKernelCheck;
}
// Sort the candidates into non-decreasing order of energy.
thrust::device_ptr<PoseCandidate> candidatesStart(poseCandidates);
thrust::device_ptr<PoseCandidate> candidatesEnd(poseCandidates + nbPoseCandidates);
thrust::sort(candidatesStart, candidatesEnd);
}
void PreemptiveRansac_CUDA::generate_pose_candidates()
{
const Vector2i imgSize = m_keypointsImage->noDims;
const Keypoint3DColour *keypoints = m_keypointsImage->GetData(MEMORYDEVICE_CUDA);
PoseCandidate *poseCandidates = m_poseCandidates->GetData(MEMORYDEVICE_CUDA);
const ScorePrediction *predictions = m_predictionsImage->GetData(MEMORYDEVICE_CUDA);
CUDARNG *rngs = m_rngs->GetData(MEMORYDEVICE_CUDA);
// Reset the number of pose candidates (we do this on the device only at this stage, and update the corresponding host value once we are done generating).
int *nbPoseCandidates_device = m_nbPoseCandidates_device->GetData(MEMORYDEVICE_CUDA);
ORcudaSafeCall(cudaMemsetAsync(nbPoseCandidates_device, 0, sizeof(int)));
// Generate at most m_maxPoseCandidates new pose candidates.
dim3 blockSize(32);
dim3 gridSize((m_maxPoseCandidates + blockSize.x - 1) / blockSize.x);
ck_generate_pose_candidates<<<gridSize,blockSize>>>(
keypoints, predictions, imgSize, rngs, poseCandidates, nbPoseCandidates_device, m_maxCandidateGenerationIterations,
m_maxPoseCandidates, m_useAllModesPerLeafInPoseHypothesisGeneration, m_checkMinDistanceBetweenSampledModes,
m_minSquaredDistanceBetweenSampledModes, m_checkRigidTransformationConstraint, m_maxTranslationErrorForCorrectPose
);
ORcudaKernelCheck;
// Copy all relevant data back across to the host for use by the Kabsch algorithm.
m_poseCandidates->dataSize = m_nbPoseCandidates_device->GetElement(0, MEMORYDEVICE_CUDA);
m_poseCandidates->UpdateHostFromDevice();
// Run Kabsch on all the generated candidates to estimate the rigid transformations.
compute_candidate_poses_kabsch();
// Copy the computed rigid transformations back across to the device.
m_poseCandidates->UpdateDeviceFromHost();
}
void PreemptiveRansac_CUDA::prepare_inliers_for_optimisation()
{
Vector4f *inlierCameraPoints = m_poseOptimisationCameraPoints->GetData(MEMORYDEVICE_CUDA);
Keypoint3DColourCluster *inlierModes = m_poseOptimisationPredictedModes->GetData(MEMORYDEVICE_CUDA);
const int *inlierRasterIndices = m_inlierRasterIndicesBlock->GetData(MEMORYDEVICE_CUDA);
const Keypoint3DColour *keypoints = m_keypointsImage->GetData(MEMORYDEVICE_CUDA);
const uint32_t nbInliers = static_cast<uint32_t>(m_inlierRasterIndicesBlock->dataSize);
const int nbPoseCandidates = static_cast<int>(m_poseCandidates->dataSize);
const PoseCandidate *poseCandidates = m_poseCandidates->GetData(MEMORYDEVICE_CUDA);
const ScorePrediction *predictions = m_predictionsImage->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(256);
dim3 gridSize((nbInliers + blockSize.x - 1) / blockSize.x, nbPoseCandidates);
ck_prepare_inliers_for_optimisation<<<gridSize, blockSize>>>(
keypoints, predictions, inlierRasterIndices, nbInliers, poseCandidates, nbPoseCandidates,
m_poseOptimisationInlierThreshold, inlierCameraPoints, inlierModes
);
ORcudaKernelCheck;
// Compute and set the actual size of the buffers to avoid unnecessary copies.
const size_t bufferSize = static_cast<size_t>(nbInliers * nbPoseCandidates);
m_poseOptimisationCameraPoints->dataSize = bufferSize;
m_poseOptimisationPredictedModes->dataSize = bufferSize;
// Make the buffers available to the optimiser, which runs on the CPU.
m_poseOptimisationCameraPoints->UpdateHostFromDevice();
m_poseOptimisationPredictedModes->UpdateHostFromDevice();
}
void PreemptiveRansac_CUDA::reset_inliers(bool resetMask)
{
PreemptiveRansac::reset_inliers(resetMask);
ORcudaSafeCall(cudaMemsetAsync(m_nbInliers_device->GetData(MEMORYDEVICE_CUDA), 0, sizeof(int)));
}
void PreemptiveRansac_CUDA::sample_inliers(bool useMask)
{
const Vector2i imgSize = m_keypointsImage->noDims;
int *inlierRasterIndices = m_inlierRasterIndicesBlock->GetData(MEMORYDEVICE_CUDA);
int *inliersMask = m_inliersMaskImage->GetData(MEMORYDEVICE_CUDA);
const Keypoint3DColour *keypoints = m_keypointsImage->GetData(MEMORYDEVICE_CUDA);
int *nbInliers_device = m_nbInliers_device->GetData(MEMORYDEVICE_CUDA);
const ScorePrediction *predictions = m_predictionsImage->GetData(MEMORYDEVICE_CUDA);
CUDARNG *rngs = m_rngs->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(128);
dim3 gridSize((m_ransacInliersPerIteration + blockSize.x - 1) / blockSize.x);
if(useMask)
{
ck_sample_inliers<true><<<gridSize,blockSize>>>(
keypoints, predictions, imgSize, rngs, inlierRasterIndices, nbInliers_device, m_ransacInliersPerIteration, inliersMask
);
ORcudaKernelCheck;
}
else
{
ck_sample_inliers<false><<<gridSize,blockSize>>>(
keypoints, predictions, imgSize, rngs, inlierRasterIndices, nbInliers_device, m_ransacInliersPerIteration
);
ORcudaKernelCheck;
}
// Update the host's record of the number of inliers.
m_inlierRasterIndicesBlock->dataSize = static_cast<size_t>(m_nbInliers_device->GetElement(0, MEMORYDEVICE_CUDA));
}
void PreemptiveRansac_CUDA::update_candidate_poses()
{
// Copy the pose candidates across to the CPU so that they can be optimised.
m_poseCandidates->UpdateHostFromDevice();
// Call the base class implementation to optimise the poses.
PreemptiveRansac::update_candidate_poses();
// Copy the optimised pose candidates back across to the GPU.
m_poseCandidates->UpdateDeviceFromHost();
}
//#################### PRIVATE MEMBER FUNCTIONS ####################
void PreemptiveRansac_CUDA::init_random()
{
CUDARNG *rngs = m_rngs->GetData(MEMORYDEVICE_CUDA);
// Initialize random states
dim3 blockSize(256);
dim3 gridSize((m_maxPoseCandidates + blockSize.x - 1) / blockSize.x);
ck_reinit_rngs<<<gridSize, blockSize>>>(rngs, m_maxPoseCandidates, m_rngSeed);
ORcudaKernelCheck;
}
void PreemptiveRansac_CUDA::update_host_pose_candidates() const
{
m_poseCandidates->UpdateHostFromDevice();
}
} | the_stack |
#include "crys_kernel.cu"
#define CUDA_ERRCK { cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \
exit (-1); }}
uint4* d_Block_Work;
uint2* d_FinalReduce;
float *d_Output, *d_ReductionSum;
cudaArray *d_Coors, *d_Sprms, *d_Wghts;
float4* Coors;
float2* Sprms;
uint4* Block_Work;
int totNumBlocks;
int MaxBlocks;
Atom *ComputeAtom, *BasisAtom;
Shell* ComputeShell;
float Coor[BLOCK_SIZE];
float Alpha[BLOCK_SIZE];
float Coeff[BLOCK_SIZE];
float Wghts[TABLESIZE];
float *ReductionSum;
int totReductionElements;
uint2 *FinalReduce;
void AllocateDataOnDevice(int, int, int, int, int, int);
void RunKernel(int, struct pb_TimerSet *, struct pb_Parameters *);
void CalcOnHost(int);
Atom* ReadBasisAtoms(int&, struct pb_Parameters *);
int TotalNumOfShells(char*, int, int&);
void PopulateShells(char*, int);
void PopulateHostData(int, int, int);
int NumOfIntegrals(int);
void DistributeBlockWork(int);
void FreeAllData(struct pb_TimerSet *);
void PopulateWeights();
double root1(double X);
int main(int argc, char* argv[])
{
struct pb_TimerSet timers;
struct pb_Parameters *params;
pb_InitializeTimerSet( &timers );
params = pb_ReadParameters( &argc, argv );
totNumBlocks = 0;
MaxBlocks = 0;
int numBasisAtoms;
pb_SwitchToTimer( &timers, pb_TimerID_IO );
BasisAtom = ReadBasisAtoms(numBasisAtoms, params);
pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE );
int totNumAtoms;
int totNumShells = TotalNumOfShells(params->inpFiles[0], numBasisAtoms,
totNumAtoms);
totReductionElements = totNumShells * (totNumShells + 1) *
(totNumShells + 2) * (totNumShells + 3) / 24;
ComputeAtom = (Atom*)malloc(totNumAtoms * sizeof(Atom));
ComputeShell = (Shell*)malloc(totNumShells * sizeof(Shell));
pb_SwitchToTimer( &timers, pb_TimerID_IO );
PopulateShells(params->inpFiles[0], numBasisAtoms);
pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE );
// all shells are ready now
// prepare host data
int totBasisShells = 0;
for(int i = 0; i < numBasisAtoms; i ++)
for(int j = 0; j < BasisAtom[i].numShells; j ++)
totBasisShells += BasisAtom[i].AtomShell[j].numPrimitives;
Coors = (float4*)malloc(totNumAtoms * sizeof(float4));
Sprms = (float2*)malloc(totBasisShells * sizeof(float2));
PopulateHostData(totNumAtoms, totNumShells, numBasisAtoms);
// distribute the work now
FinalReduce = (uint2*)malloc(totReductionElements * sizeof(uint2));
int numIntegrals = NumOfIntegrals(totNumShells);
pb_SwitchToTimer( &timers, pb_TimerID_IO );
printf("Total # of integrals to compute: %d\n", numIntegrals);
printf("Total # of blocks allocated: %d\n", totNumBlocks);
printf("Final array size: %d\n", totReductionElements);
pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE );
Block_Work = (uint4*)malloc(totNumBlocks * sizeof(uint4));
DistributeBlockWork(totNumShells);
int d_output_mem = totNumBlocks * sizeof(float);
int d_work_mem = totNumBlocks * sizeof(uint4);
int reduction_mem = totReductionElements * sizeof(float);
int final_mem = totReductionElements * sizeof(uint2);
ReductionSum = (float*)malloc(reduction_mem);
// prepare device data
pb_SwitchToTimer( &timers, pb_TimerID_COPY );
AllocateDataOnDevice(d_output_mem, d_work_mem, reduction_mem,
final_mem, totNumAtoms, totBasisShells);
pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE );
int d_total_mem = d_output_mem + d_work_mem + reduction_mem + final_mem;
printf("%.2lf MB allocated\n", (double)d_total_mem / 1048576);
printf("maxblocks = %d\n", MaxBlocks);
// okay, now ready to do something useful
RunKernel(numIntegrals, &timers, params);
// loading data back to the host
pb_SwitchToTimer( &timers, pb_TimerID_COPY );
cudaMemcpy(ReductionSum, d_ReductionSum,
reduction_mem, cudaMemcpyDeviceToHost);
CUDA_ERRCK
pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE );
FreeAllData( &timers );
if(params->outFile)
{
pb_SwitchToTimer( &timers, pb_TimerID_IO );
FILE *file = fopen(params->outFile, "w");
printf("totReductionElements %d\n", totReductionElements);
// only printing the first 20000
for(int i = 0; i < 20000; i ++)
{
fprintf(file, "%d\t%e\n", i, ReductionSum[i]);
}
fclose(file);
}
if( pb_compareFiles(params->outFile, "data/rpes.out", 1) )
{
printf("TEST PASSED\n");
}
else
{
printf("TEST FAILED\n");
}
pb_SwitchToTimer( &timers, pb_TimerID_NONE );
pb_PrintTimerSet( &timers );
pb_FreeParameters( params );
return 0;
}
void RunKernel(int numIntegrals, struct pb_TimerSet *timers,
struct pb_Parameters *params)
{
int runs = (int)(ceil(1.0 * totNumBlocks / GRID_SIZE));
printf("%d computation cycles will be performed...\n", runs);
int RemainingBlocks = totNumBlocks;
int StartBlock = 0;
for(int run = 0; run < runs; run ++)
{
int numBlocks = min(GRID_SIZE, RemainingBlocks);
dim3 grid(numBlocks, 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
pb_SwitchToTimer( timers, pb_TimerID_GPU );
ComputeX <<< grid, block >>> (d_Block_Work, d_Output, StartBlock);
CUDA_ERRCK
if (params->synchronizeGpu) cudaThreadSynchronize();
pb_SwitchToTimer( timers, pb_TimerID_COMPUTE );
RemainingBlocks -= GRID_SIZE;
StartBlock += numBlocks;
}
runs = (int)(ceil(1.0 * totReductionElements / GRID_SIZE));
printf("done.\n\n%d reduction cycles will be performed...\n", runs);
int RemainReduction = totReductionElements;
int Offset = 0;
for(int run = 0; run < runs; run ++)
{
int numBlocks = min(GRID_SIZE, RemainReduction);
dim3 grid(numBlocks, 1, 1);
dim3 block(MaxBlocks, 1, 1);
pb_SwitchToTimer( timers, pb_TimerID_GPU );
DoReduction <<< grid, block >>> (d_ReductionSum, d_Output,
d_FinalReduce, MaxBlocks, Offset);
CUDA_ERRCK
if (params->synchronizeGpu) cudaThreadSynchronize();
pb_SwitchToTimer( timers, pb_TimerID_COMPUTE );
RemainReduction -= GRID_SIZE;
Offset += numBlocks;
}
}
void AllocateDataOnDevice(int d_output_mem, int d_work_mem,
int reduction_mem, int final_mem,
int numCoors, int numSprms)
{
cudaMalloc((void**)&d_ReductionSum, reduction_mem);
CUDA_ERRCK
cudaMalloc((void**)&d_Output, d_output_mem);
CUDA_ERRCK
cudaMalloc((void**)&d_Block_Work, d_work_mem);
CUDA_ERRCK
cudaMalloc((void**)&d_FinalReduce, final_mem);
CUDA_ERRCK
cudaMallocArray(&d_Coors, &texCoors.channelDesc,
numCoors, 1);
CUDA_ERRCK
cudaMallocArray(&d_Sprms, &texSprms.channelDesc,
numSprms, 1);
CUDA_ERRCK
cudaMallocArray(&d_Wghts, &texWghts.channelDesc,
1 << LOG_TABLE_WIDTH, 2);
CUDA_ERRCK
cudaMemcpy(d_Block_Work, Block_Work, d_work_mem,
cudaMemcpyHostToDevice);
CUDA_ERRCK
cudaMemcpy(d_FinalReduce, FinalReduce, final_mem,
cudaMemcpyHostToDevice);
CUDA_ERRCK
cudaMemcpyToArray(d_Coors, 0, 0, (void*)Coors,
numCoors * sizeof(float4),
cudaMemcpyHostToDevice);
CUDA_ERRCK
cudaMemcpyToArray(d_Sprms, 0, 0, (void*)Sprms,
numSprms * sizeof(float2),
cudaMemcpyHostToDevice);
CUDA_ERRCK
cudaMemcpyToArray(d_Wghts, 0, 0, (void*)Wghts,
TABLESIZE * sizeof(float),
cudaMemcpyHostToDevice);
CUDA_ERRCK
texWghts.filterMode = cudaFilterModeLinear;
cudaBindTextureToArray(texCoors, d_Coors, texCoors.channelDesc);
CUDA_ERRCK
cudaBindTextureToArray(texSprms, d_Sprms, texSprms.channelDesc);
CUDA_ERRCK
cudaBindTextureToArray(texWghts, d_Wghts, texWghts.channelDesc);
CUDA_ERRCK
}
void FreeAllData( struct pb_TimerSet *timers )
{
pb_SwitchToTimer( timers, pb_TimerID_COPY );
cudaFree((void*)d_FinalReduce);
CUDA_ERRCK
cudaFree((void*)d_Block_Work);
CUDA_ERRCK
cudaFree((void*)d_Output);
CUDA_ERRCK
cudaFree((void*)d_ReductionSum);
CUDA_ERRCK
cudaFreeArray(d_Coors);
CUDA_ERRCK
cudaFreeArray(d_Wghts);
CUDA_ERRCK
cudaFreeArray(d_Sprms);
CUDA_ERRCK
pb_SwitchToTimer( timers, pb_TimerID_COMPUTE );
free ((void*)Block_Work);
free ((void*)FinalReduce);
free ((void*)ComputeAtom);
free ((void*)BasisAtom);
free ((void*)ComputeShell);
}
Atom* ReadBasisAtoms(int& numBasisAtoms, struct pb_Parameters *params)
{
FILE* basis = fopen(params->inpFiles[1], "r");
if(!basis)
{
printf("Unable to open file %s\n", params->inpFiles[1]);
exit(0);
}
int numAtoms = 0, numShells = 0;
fscanf(basis, "%*s %*s %d", &numAtoms);
fscanf(basis, "%*s %*s %d", &numShells);
printf("\n>>>>>>> STARTED BASIS SET OUTPUT <<<<<<<\n");
printf("\n# OF KNOWN ATOMS: %d\n", numAtoms);
printf("# OF KNOWN SHELLS: %d\n\n", numShells);
numBasisAtoms = numAtoms;
Atom* BasisAtom = (Atom*)malloc(numAtoms * sizeof(Atom));
for(int atom = 0; atom < numAtoms; atom ++)
{
char type[4];
char buff[4];
fscanf(basis, "%*s %s", type);
fscanf(basis, "%*s %d", &numShells);
BasisAtom[atom].numShells = numShells;
strcpy(BasisAtom[atom].Type, type);
printf("\nAtom %s (%d shells)\n", BasisAtom[atom].Type,
BasisAtom[atom].numShells);
for(int shell = 0; shell < numShells; shell ++)
{
int numPrimitives = 0;
fscanf(basis, "%*s %*d %*s %d", &numPrimitives);
BasisAtom[atom].AtomShell[shell].numPrimitives = numPrimitives;
sprintf(buff, "%d", shell + 1);
strcpy(BasisAtom[atom].AtomShell[shell].Type , type);
strcpy(BasisAtom[atom].AtomShell[shell].Type + 1, buff);
printf("\tShell %s: %d primitives\n",
BasisAtom[atom].AtomShell[shell].Type,
BasisAtom[atom].AtomShell[shell].numPrimitives);
for(int prim = 0; prim < numPrimitives; prim ++)
{
fscanf(basis, "%*s %*s %*s %f %f",
&BasisAtom[atom].AtomShell[shell].Alpha[prim],
&BasisAtom[atom].AtomShell[shell].Coeff[prim]);
printf("\t\tprimitive %d: %10.2f %5.2f\n", prim + 1,
BasisAtom[atom].AtomShell[shell].Alpha[prim],
BasisAtom[atom].AtomShell[shell].Coeff[prim]);
}
printf("\n");
}
}
printf(">>>>>>>> DONE BASIS SET OUTPUT <<<<<<<<\n\n\n");
fclose(basis);
return BasisAtom;
}
int TotalNumOfShells(char* fname, int numBasisAtoms, int& totNumAtoms)
{
FILE* inp = fopen(fname, "r");
if(!inp)
{
printf("Unable to open %s\n", fname);
exit(0);
}
int numShells = 0;
fscanf(inp, "%*s %d", &totNumAtoms);
for(int atom = 0; atom < totNumAtoms; atom ++)
{
char type[8];
fscanf(inp, "%s %*s %*s %*s", type);
int notfound = 1;
for(int batom = 0; batom < numBasisAtoms; batom ++)
{
if(!strcmp(BasisAtom[batom].Type, type))
{
numShells += BasisAtom[batom].numShells;
notfound = 0;
break;
}
}
if(notfound)
{
printf("Unable to find atom \'%s\' in the basis set\n", type);
exit(0);
}
}
fclose(inp);
return numShells;
}
void PopulateShells(char* fname, int numBasisAtoms)
{
FILE* inp = fopen(fname, "r");
if(!inp)
{
printf("Unable to open %s\n", fname);
exit(0);
}
int numAtoms = 0, currentShell = 0;
fscanf(inp, "%*s %d", &numAtoms);
for(int atom = 0; atom < numAtoms; atom ++)
{
fscanf(inp, "%s %f %f %f", &ComputeAtom[atom].Type,
&ComputeAtom[atom].X,
&ComputeAtom[atom].Y, &ComputeAtom[atom].Z);
int currentInList = 0;
for(int batom = 0; batom < numBasisAtoms; batom ++)
{
if(!strcmp(BasisAtom[batom].Type, ComputeAtom[atom].Type))
{
for(int shell = 0; shell < BasisAtom[batom].numShells;
shell ++)
{
ComputeShell[currentShell] =
BasisAtom[batom].AtomShell[shell];
ComputeShell[currentShell].myAtom = atom;
// this part populates inList
for(int prim = 0; prim <
BasisAtom[batom].AtomShell[shell].numPrimitives;
prim ++)
ComputeShell[currentShell].inList[prim] =
currentInList ++;
currentShell ++;
}
break;
}
// this part populates inList
else
{
for(int shell = 0; shell < BasisAtom[batom].numShells;
shell ++)
currentInList +=
BasisAtom[batom].AtomShell[shell].numPrimitives;
}
}
}
fclose(inp);
}
void PopulateHostData(int totNumAtoms, int totNumShells, int numBasisAtoms)
{
PopulateWeights();
for(int atom = 0; atom < totNumAtoms; atom ++)
{
Coors[atom].x = ComputeAtom[atom].X;
Coors[atom].y = ComputeAtom[atom].Y;
Coors[atom].z = ComputeAtom[atom].Z;
}
int currentPos = 0;
for(int batom = 0; batom < numBasisAtoms; batom ++)
{
for(int shell = 0; shell < BasisAtom[batom].numShells; shell ++)
{
for(int prim = 0; prim <
BasisAtom[batom].AtomShell[shell].numPrimitives;
prim ++)
{
Sprms[currentPos].x =
BasisAtom[batom].AtomShell[shell].Alpha[prim];
Sprms[currentPos].y =
BasisAtom[batom].AtomShell[shell].Coeff[prim];
currentPos ++;
}
}
}
}
int NumOfIntegrals(int totNumShells)
{
int numIntegrals = 0;
int firstRedElement = 0;
int redElement = 0;
for(int shell1 = 0; shell1 < totNumShells; shell1 ++)
for(int shell2 = shell1; shell2 < totNumShells; shell2 ++)
for(int shell3 = shell2; shell3 < totNumShells; shell3 ++)
for(int shell4 = shell3; shell4 < totNumShells; shell4 ++)
{
int integrals = ComputeShell[shell1].numPrimitives *
ComputeShell[shell2].numPrimitives *
ComputeShell[shell3].numPrimitives *
ComputeShell[shell4].numPrimitives;
numIntegrals += integrals;
int blocks = (int)ceil(1.0 * integrals / BLOCK_SIZE);
totNumBlocks += blocks;
if(blocks > MaxBlocks)
MaxBlocks = blocks;
FinalReduce[redElement].x = firstRedElement;
FinalReduce[redElement].y = blocks;
firstRedElement += blocks;
redElement ++;
}
return numIntegrals;
}
void DistributeBlockWork(int totNumShells)
{
int numElements = 0;
int StartBlock = 0;
for(int shell1 = 0; shell1 < totNumShells; shell1 ++)
{
for(int shell2 = shell1; shell2 < totNumShells; shell2 ++)
{
for(int shell3 = shell2; shell3 < totNumShells; shell3 ++)
{
for(int shell4 = shell3; shell4 < totNumShells; shell4 ++)
{
int integrals = ComputeShell[shell1].numPrimitives *
ComputeShell[shell2].numPrimitives *
ComputeShell[shell3].numPrimitives *
ComputeShell[shell4].numPrimitives;
int blocks = (int)ceil(1.0 * integrals / BLOCK_SIZE);
StartBlock = numElements;
for(int block = 0; block < blocks; block ++)
{
int a4 = ComputeShell[shell4].numPrimitives;
int a3 = ComputeShell[shell3].numPrimitives;
int a2 = ComputeShell[shell2].numPrimitives;
int a1 = ComputeShell[shell1].numPrimitives;
int offset4 = ComputeShell[shell4].inList[0];
int offset3 = ComputeShell[shell3].inList[0];
int offset2 = ComputeShell[shell2].inList[0];
int offset1 = ComputeShell[shell1].inList[0];
Block_Work[numElements].y =
(ComputeShell[shell1].myAtom << 24) |
(ComputeShell[shell2].myAtom << 16) |
(ComputeShell[shell3].myAtom << 8 ) |
(ComputeShell[shell4].myAtom ) ;
Block_Work[numElements].z =
(offset1 << 24) |
(offset2 << 16) |
(offset3 << 8 ) |
(offset4) ;
Block_Work[numElements].x =
(a1 << 12) |
(a2 << 8) |
(a3 << 4) |
(a4 ) ;
Block_Work[numElements].w = StartBlock;
numElements ++;
}
}
}
}
}
}
double root1(double X)
{
double PIE4;
double WW1 = 0.0;
double F1,E,Y,inv;
PIE4 = 7.85398163397448E-01;
if (X < 3.0e-7)
{
WW1 = 1.0 - 0.333333333 * X;
}
else if (X < 1.0)
{
F1 = ((((((((-8.36313918003957E-08*X+1.21222603512827E-06 )*X-
1.15662609053481E-05 )*X+9.25197374512647E-05 )*X-
6.40994113129432E-04 )*X+3.78787044215009E-03 )*X-
1.85185172458485E-02 )*X+7.14285713298222E-02 )*X-
1.99999999997023E-01 )*X+3.33333333333318E-01;
WW1 = (X+X)*F1 + exp(-X);
}
else if (X < 3.0)
{
Y = X-2.0;
F1 = ((((((((((-1.61702782425558E-10*Y+1.96215250865776E-09 )*Y-
2.14234468198419E-08 )*Y+2.17216556336318E-07 )*Y-
1.98850171329371E-06 )*Y+1.62429321438911E-05 )*Y-
1.16740298039895E-04 )*Y+7.24888732052332E-04 )*Y-
3.79490003707156E-03 )*Y+1.61723488664661E-02 )*Y-
5.29428148329736E-02 )*Y+1.15702180856167E-01;
WW1 = (X+X)*F1+exp(-X);
}
else if (X < 5.0)
{
Y = X-4.0;
F1 = ((((((((((-2.62453564772299E-11*Y+3.24031041623823E-10 )*Y-
3.614965656163E-09)*Y+3.760256799971E-08)*Y-
3.553558319675E-07)*Y+3.022556449731E-06)*Y-
2.290098979647E-05)*Y+1.526537461148E-04)*Y-
8.81947375894379E-04)*Y+4.33207949514611E-03 )*Y-
1.75257821619926E-02 )*Y+5.28406320615584E-02;
WW1 = (X+X)*F1+exp(-X);
}
else if (X < 10.0)
{
E = exp(-X);
inv = 1 / X;
WW1 = (((((( 4.6897511375022E-01*inv-6.9955602298985E-01)*inv +
5.3689283271887E-01)*inv-3.2883030418398E-01)*inv +
2.4645596956002E-01)*inv-4.9984072848436E-01)*inv -
3.1501078774085E-06)*E + sqrt(PIE4*inv);
}
else if (X < 15.0)
{
E = exp(-X);
inv = 1 / X;
WW1 = (((-1.8784686463512E-01*inv+2.2991849164985E-01)*inv -
4.9893752514047E-01)*inv-2.1916512131607E-05)*E \
+ sqrt(PIE4*inv);
}
else if (X < 33.0)
{
E = exp(-X);
inv = 1 / X;
WW1 = (( 1.9623264149430E-01*inv-4.9695241464490E-01)*inv -
6.0156581186481E-05)*E + sqrt(PIE4*inv);
}
else
{
inv = 1 / X;
WW1 = sqrt(PIE4*inv);
}
return WW1;
}
void PopulateWeights()
{
for(int i = 0; i < TABLESIZE; i ++)
{
float X = (float)(1.0 * i * W_MAX_SIZE / (TABLESIZE - 1));
Wghts[i] = root1(X);
}
} | the_stack |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.