hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
2fd37c90cb743eea7121739e36158de86af0f23e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename x_t>
struct L2NormFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<1>& tl,
float* output,
float* output_per_tensor,
bool per_tensor,
int max_chunks_per_tensor)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
x_t* x = (x_t*)tl.addresses[0][tensor_loc];
x += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
__shared__ float s_vals[512];
float vals[ILP]; // = {0}; // this probably works too but I want to be sure...
for(int i = 0; i < ILP; i++)
vals[i] = 0.f;
for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
float next = static_cast<float>(x[i]);
vals[ii] += next*next;
}
}
}
float val = 0.f;
for(int i = 0; i < ILP; i++)
val += vals[i];
float final = reduce_block_into_lanes(s_vals, val);
if(threadIdx.x == 0)
{
if(!isfinite(final))
*noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
output[blockIdx.x] += final;
if(per_tensor)
output_per_tensor[(tl.start_tensor_this_launch + tensor_loc)*max_chunks_per_tensor + chunk_idx] = final;
}
}
};
// Probably better to template, but since we are not likely to support other norm
template<typename x_t>
struct MaxNormFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<1>& tl,
float* output,
float* output_per_tensor,
bool per_tensor,
int max_chunks_per_tensor)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
x_t* x = (x_t*)tl.addresses[0][tensor_loc];
x += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
__shared__ float s_vals[512];
float vals[ILP]; // = {0}; // this probably works too but I want to be sure...
for(int i = 0; i < ILP; i++)
vals[i] = 0.f;
for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
float next = static_cast<float>(x[i]);
vals[ii] = fmaxf(fabsf(vals[ii]), fabsf(next));
}
}
}
float val = 0.f;
for(int i = 0; i < ILP; i++)
val = fmaxf(fabsf(val), fabsf(vals[i]));
float final = reduce_block_into_lanes_max_op(s_vals, val);
if(threadIdx.x == 0)
{
if(!isfinite(final))
*noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
output[blockIdx.x] = fmaxf(fabsf(output[blockIdx.x]), fabsf(final));
if(per_tensor)
output_per_tensor[(tl.start_tensor_this_launch + tensor_loc)*max_chunks_per_tensor + chunk_idx] = final;
}
}
};
__global__ void cleanup(
float* output,
float* output_per_tensor,
float* ret,
float* ret_per_tensor,
bool per_tensor,
int max_chunks_per_tensor)
{
__shared__ float vals[512];
if(blockIdx.x == 0)
{
float val = 0;
if(threadIdx.x < 320)
val = output[threadIdx.x];
float final = reduce_block_into_lanes(vals, val);
if(threadIdx.x == 0)
*ret = sqrt(final);
}
if(per_tensor)
{
float* output_this_tensor = output_per_tensor + blockIdx.x*max_chunks_per_tensor;
float val = 0;
for(int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x)
val += output_this_tensor[i];
float final = reduce_block_into_lanes(vals, val);
if(threadIdx.x == 0)
ret_per_tensor[blockIdx.x] = sqrt(final);
}
}
__global__ void cleanup_v2(
float* output,
float* output_per_tensor,
float* ret,
float* ret_per_tensor,
bool per_tensor,
int max_chunks_per_tensor,
int norm_type,
float alpha,
float beta)
{
__shared__ float vals[512];
if(blockIdx.x == 0)
{
float val = 0;
if(threadIdx.x < 320)
val = output[threadIdx.x];
if (norm_type == 0) {
float final = reduce_block_into_lanes_max_op(vals, val);
if(threadIdx.x == 0)
*ret = alpha * (*ret) + beta * final;
}
else {
float final = reduce_block_into_lanes(vals, val);
if(threadIdx.x == 0)
*ret = sqrt(alpha * (*ret) * (*ret) + beta * final);
}
}
if(per_tensor)
{
float* output_this_tensor = output_per_tensor + blockIdx.x*max_chunks_per_tensor;
if (norm_type == 0) {
float val = 0;
for(int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x)
val = fmaxf(fabsf(val), fabsf(output_this_tensor[i]));
float final = reduce_block_into_lanes_max_op(vals, val);
if(threadIdx.x == 0)
ret_per_tensor[blockIdx.x] = alpha * ret_per_tensor[blockIdx.x] + beta * final;
}
else {
float val = 0;
for(int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x)
val += output_this_tensor[i];
float final = reduce_block_into_lanes(vals, val);
if(threadIdx.x == 0)
ret_per_tensor[blockIdx.x] = sqrt(alpha * ret_per_tensor[blockIdx.x] * ret_per_tensor[blockIdx.x] + beta * final);
}
}
}
std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::optional<bool> per_tensor_python)
{
bool per_tensor = per_tensor_python.has_value() ? per_tensor_python.value() : false;
auto float_options = tensor_lists[0][0].options().dtype(at::kFloat);
auto output = at::zeros({320}, float_options);
at::Tensor output_per_tensor;
at::Tensor ret_per_tensor;
int ntensors = tensor_lists[0].size();
int max_chunks_per_tensor = -1;
if(per_tensor)
{
for(int t = 0; t < ntensors; t++)
{
int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1)/chunk_size;
if(max_chunks_this_tensor > max_chunks_per_tensor)
max_chunks_per_tensor = max_chunks_this_tensor;
}
output_per_tensor = at::zeros({ntensors*max_chunks_per_tensor}, float_options);
ret_per_tensor = at::empty({ntensors}, float_options);
}
else
{
ret_per_tensor = at::empty({0}, float_options);
}
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_cuda",
multi_tensor_apply<1>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
L2NormFunctor<scalar_t_0>(),
output.DATA_PTR<float>(),
per_tensor ? output_per_tensor.DATA_PTR<float>() : nullptr,
per_tensor,
max_chunks_per_tensor);)
AT_CUDA_CHECK(hipGetLastError());
// AT_CUDA_CHECK(hipDeviceSynchronize());
// This involves one more small kernel launches, but will be negligible end to end.
// I could get rid of these by hacking the functor + multi tensor harness with persistence
// logic, but keeping it simple for now
auto ret = at::empty({1}, output.options());
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( cleanup), dim3(per_tensor ? ntensors : 1), dim3(512), 0, stream,
output.DATA_PTR<float>(),
per_tensor ? output_per_tensor.DATA_PTR<float>() : nullptr,
ret.DATA_PTR<float>(),
per_tensor ? ret_per_tensor.DATA_PTR<float>() : nullptr,
per_tensor,
max_chunks_per_tensor);
return std::tuple<at::Tensor, at::Tensor>(ret, ret_per_tensor);
}
// Compute and update grad norm
// Here use a per tensor norm, and blend new norm(n) and old norm(gn) by
// L-2: gn = sqrt(a * gn^2 + b * n^2)
// L-inf: gn = a * gn + b * n
void multi_tensor_norm_out_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor out,
const float alpha,
const float beta,
const int norm_type)
{
auto float_options = tensor_lists[0][0].options().dtype(at::kFloat);
// we don't need global thus uses empty here
auto output = at::empty({320}, float_options);
at::Tensor output_per_tensor;
at::Tensor ret_per_tensor;
int ntensors = tensor_lists[0].size();
int max_chunks_per_tensor = -1;
for(int t = 0; t < ntensors; t++)
{
int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1)/chunk_size;
if(max_chunks_this_tensor > max_chunks_per_tensor)
max_chunks_per_tensor = max_chunks_this_tensor;
}
// Although it is single write then read, still need to be zero
// Since tailing element also participate cleanup
output_per_tensor = at::zeros({ntensors*max_chunks_per_tensor}, float_options);
if (norm_type == 0) {
DISPATCH_FLOAT_AND_HALF(
tensor_lists[0][0].scalar_type(), 0, "multi_tensor_maxnorm_cuda",
multi_tensor_apply<1>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
MaxNormFunctor<scalar_t_0>(),
output.DATA_PTR<float>(),
output_per_tensor.DATA_PTR<float>(),
true,
max_chunks_per_tensor);)
}
else {
DISPATCH_FLOAT_AND_HALF(
tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_cuda",
multi_tensor_apply<1>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
L2NormFunctor<scalar_t_0>(),
output.DATA_PTR<float>(),
output_per_tensor.DATA_PTR<float>(),
true,
max_chunks_per_tensor);)
}
AT_CUDA_CHECK(hipGetLastError());
// AT_CUDA_CHECK(hipDeviceSynchronize());
// This involves one more small kernel launches, but will be negligible end to end.
// I could get rid of these by hacking the functor + multi tensor harness with persistence
// logic, but keeping it simple for now
auto ret = at::empty({1}, output.options());
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( cleanup_v2), dim3(ntensors), dim3(512), 0, stream,
output.DATA_PTR<float>(),
output_per_tensor.DATA_PTR<float>(),
ret.DATA_PTR<float>(),
out.DATA_PTR<float>(),
true,
max_chunks_per_tensor,
norm_type,
alpha,
beta);
return ;
}
| 2fd37c90cb743eea7121739e36158de86af0f23e.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename x_t>
struct L2NormFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<1>& tl,
float* output,
float* output_per_tensor,
bool per_tensor,
int max_chunks_per_tensor)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
x_t* x = (x_t*)tl.addresses[0][tensor_loc];
x += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
__shared__ float s_vals[512];
float vals[ILP]; // = {0}; // this probably works too but I want to be sure...
for(int i = 0; i < ILP; i++)
vals[i] = 0.f;
for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
float next = static_cast<float>(x[i]);
vals[ii] += next*next;
}
}
}
float val = 0.f;
for(int i = 0; i < ILP; i++)
val += vals[i];
float final = reduce_block_into_lanes(s_vals, val);
if(threadIdx.x == 0)
{
if(!isfinite(final))
*noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
output[blockIdx.x] += final;
if(per_tensor)
output_per_tensor[(tl.start_tensor_this_launch + tensor_loc)*max_chunks_per_tensor + chunk_idx] = final;
}
}
};
// Probably better to template, but since we are not likely to support other norm
template<typename x_t>
struct MaxNormFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<1>& tl,
float* output,
float* output_per_tensor,
bool per_tensor,
int max_chunks_per_tensor)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
x_t* x = (x_t*)tl.addresses[0][tensor_loc];
x += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
__shared__ float s_vals[512];
float vals[ILP]; // = {0}; // this probably works too but I want to be sure...
for(int i = 0; i < ILP; i++)
vals[i] = 0.f;
for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
float next = static_cast<float>(x[i]);
vals[ii] = fmaxf(fabsf(vals[ii]), fabsf(next));
}
}
}
float val = 0.f;
for(int i = 0; i < ILP; i++)
val = fmaxf(fabsf(val), fabsf(vals[i]));
float final = reduce_block_into_lanes_max_op(s_vals, val);
if(threadIdx.x == 0)
{
if(!isfinite(final))
*noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
output[blockIdx.x] = fmaxf(fabsf(output[blockIdx.x]), fabsf(final));
if(per_tensor)
output_per_tensor[(tl.start_tensor_this_launch + tensor_loc)*max_chunks_per_tensor + chunk_idx] = final;
}
}
};
__global__ void cleanup(
float* output,
float* output_per_tensor,
float* ret,
float* ret_per_tensor,
bool per_tensor,
int max_chunks_per_tensor)
{
__shared__ float vals[512];
if(blockIdx.x == 0)
{
float val = 0;
if(threadIdx.x < 320)
val = output[threadIdx.x];
float final = reduce_block_into_lanes(vals, val);
if(threadIdx.x == 0)
*ret = sqrt(final);
}
if(per_tensor)
{
float* output_this_tensor = output_per_tensor + blockIdx.x*max_chunks_per_tensor;
float val = 0;
for(int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x)
val += output_this_tensor[i];
float final = reduce_block_into_lanes(vals, val);
if(threadIdx.x == 0)
ret_per_tensor[blockIdx.x] = sqrt(final);
}
}
__global__ void cleanup_v2(
float* output,
float* output_per_tensor,
float* ret,
float* ret_per_tensor,
bool per_tensor,
int max_chunks_per_tensor,
int norm_type,
float alpha,
float beta)
{
__shared__ float vals[512];
if(blockIdx.x == 0)
{
float val = 0;
if(threadIdx.x < 320)
val = output[threadIdx.x];
if (norm_type == 0) {
float final = reduce_block_into_lanes_max_op(vals, val);
if(threadIdx.x == 0)
*ret = alpha * (*ret) + beta * final;
}
else {
float final = reduce_block_into_lanes(vals, val);
if(threadIdx.x == 0)
*ret = sqrt(alpha * (*ret) * (*ret) + beta * final);
}
}
if(per_tensor)
{
float* output_this_tensor = output_per_tensor + blockIdx.x*max_chunks_per_tensor;
if (norm_type == 0) {
float val = 0;
for(int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x)
val = fmaxf(fabsf(val), fabsf(output_this_tensor[i]));
float final = reduce_block_into_lanes_max_op(vals, val);
if(threadIdx.x == 0)
ret_per_tensor[blockIdx.x] = alpha * ret_per_tensor[blockIdx.x] + beta * final;
}
else {
float val = 0;
for(int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x)
val += output_this_tensor[i];
float final = reduce_block_into_lanes(vals, val);
if(threadIdx.x == 0)
ret_per_tensor[blockIdx.x] = sqrt(alpha * ret_per_tensor[blockIdx.x] * ret_per_tensor[blockIdx.x] + beta * final);
}
}
}
std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::optional<bool> per_tensor_python)
{
bool per_tensor = per_tensor_python.has_value() ? per_tensor_python.value() : false;
auto float_options = tensor_lists[0][0].options().dtype(at::kFloat);
auto output = at::zeros({320}, float_options);
at::Tensor output_per_tensor;
at::Tensor ret_per_tensor;
int ntensors = tensor_lists[0].size();
int max_chunks_per_tensor = -1;
if(per_tensor)
{
for(int t = 0; t < ntensors; t++)
{
int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1)/chunk_size;
if(max_chunks_this_tensor > max_chunks_per_tensor)
max_chunks_per_tensor = max_chunks_this_tensor;
}
output_per_tensor = at::zeros({ntensors*max_chunks_per_tensor}, float_options);
ret_per_tensor = at::empty({ntensors}, float_options);
}
else
{
ret_per_tensor = at::empty({0}, float_options);
}
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_cuda",
multi_tensor_apply<1>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
L2NormFunctor<scalar_t_0>(),
output.DATA_PTR<float>(),
per_tensor ? output_per_tensor.DATA_PTR<float>() : nullptr,
per_tensor,
max_chunks_per_tensor);)
AT_CUDA_CHECK(cudaGetLastError());
// AT_CUDA_CHECK(cudaDeviceSynchronize());
// This involves one more small kernel launches, but will be negligible end to end.
// I could get rid of these by hacking the functor + multi tensor harness with persistence
// logic, but keeping it simple for now
auto ret = at::empty({1}, output.options());
auto stream = at::cuda::getCurrentCUDAStream();
cleanup<<<per_tensor ? ntensors : 1, 512, 0, stream>>>(
output.DATA_PTR<float>(),
per_tensor ? output_per_tensor.DATA_PTR<float>() : nullptr,
ret.DATA_PTR<float>(),
per_tensor ? ret_per_tensor.DATA_PTR<float>() : nullptr,
per_tensor,
max_chunks_per_tensor);
return std::tuple<at::Tensor, at::Tensor>(ret, ret_per_tensor);
}
// Compute and update grad norm
// Here use a per tensor norm, and blend new norm(n) and old norm(gn) by
// L-2: gn = sqrt(a * gn^2 + b * n^2)
// L-inf: gn = a * gn + b * n
void multi_tensor_norm_out_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor out,
const float alpha,
const float beta,
const int norm_type)
{
auto float_options = tensor_lists[0][0].options().dtype(at::kFloat);
// we don't need global thus uses empty here
auto output = at::empty({320}, float_options);
at::Tensor output_per_tensor;
at::Tensor ret_per_tensor;
int ntensors = tensor_lists[0].size();
int max_chunks_per_tensor = -1;
for(int t = 0; t < ntensors; t++)
{
int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1)/chunk_size;
if(max_chunks_this_tensor > max_chunks_per_tensor)
max_chunks_per_tensor = max_chunks_this_tensor;
}
// Although it is single write then read, still need to be zero
// Since tailing element also participate cleanup
output_per_tensor = at::zeros({ntensors*max_chunks_per_tensor}, float_options);
if (norm_type == 0) {
DISPATCH_FLOAT_AND_HALF(
tensor_lists[0][0].scalar_type(), 0, "multi_tensor_maxnorm_cuda",
multi_tensor_apply<1>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
MaxNormFunctor<scalar_t_0>(),
output.DATA_PTR<float>(),
output_per_tensor.DATA_PTR<float>(),
true,
max_chunks_per_tensor);)
}
else {
DISPATCH_FLOAT_AND_HALF(
tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_cuda",
multi_tensor_apply<1>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
L2NormFunctor<scalar_t_0>(),
output.DATA_PTR<float>(),
output_per_tensor.DATA_PTR<float>(),
true,
max_chunks_per_tensor);)
}
AT_CUDA_CHECK(cudaGetLastError());
// AT_CUDA_CHECK(cudaDeviceSynchronize());
// This involves one more small kernel launches, but will be negligible end to end.
// I could get rid of these by hacking the functor + multi tensor harness with persistence
// logic, but keeping it simple for now
auto ret = at::empty({1}, output.options());
auto stream = at::cuda::getCurrentCUDAStream();
cleanup_v2<<<ntensors, 512, 0, stream>>>(
output.DATA_PTR<float>(),
output_per_tensor.DATA_PTR<float>(),
ret.DATA_PTR<float>(),
out.DATA_PTR<float>(),
true,
max_chunks_per_tensor,
norm_type,
alpha,
beta);
return ;
}
|
0466c456c949c9544d830a07ed9421b3d9097899.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cuda_accumulate_occ(float * device_mapOcc, int numObjs, int numClusters, int clusterStart, int sub_numClusters, float *device_reduceOcc) {
int objIndex = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ float shared_objects[];
if(objIndex < numObjs) {
for(int i = 0;i < sub_numClusters; i++)
// for(int i = clusterStart + sub_numClusters - 1;i >= clusterStart; i--)
shared_objects[threadIdx.x * sub_numClusters + i] = device_mapOcc[objIndex * numClusters + i + clusterStart];
}
else {
for(int i = 0;i < sub_numClusters; i++)
//for(int i = clusterStart + sub_numClusters - 1;i >= clusterStart; i--)
shared_objects[threadIdx.x * sub_numClusters + i] = 0;
}
__syncthreads();
for(int i = (blockDim.x >> 1); i >= 1; i>>=1) {
if(threadIdx.x < i) {
for(int j = 0;j < sub_numClusters; j++) {
//for(int j = clusterStart + sub_numClusters - 1;j >= clusterStart; j--)
shared_objects[threadIdx.x * sub_numClusters + j] += shared_objects[(threadIdx.x + i) * sub_numClusters + j];
}
}
__syncthreads();
}
if(threadIdx.x == 0) {
for(int i = 0;i < sub_numClusters;i++) {
// for(int i = clusterStart + sub_numClusters - 1;i >= clusterStart; i--)
device_reduceOcc[blockIdx.x * numClusters + i + clusterStart] = shared_objects[i];
}
}
} | 0466c456c949c9544d830a07ed9421b3d9097899.cu | #include "includes.h"
__global__ void cuda_accumulate_occ(float * device_mapOcc, int numObjs, int numClusters, int clusterStart, int sub_numClusters, float *device_reduceOcc) {
int objIndex = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ float shared_objects[];
if(objIndex < numObjs) {
for(int i = 0;i < sub_numClusters; i++)
// for(int i = clusterStart + sub_numClusters - 1;i >= clusterStart; i--)
shared_objects[threadIdx.x * sub_numClusters + i] = device_mapOcc[objIndex * numClusters + i + clusterStart];
}
else {
for(int i = 0;i < sub_numClusters; i++)
//for(int i = clusterStart + sub_numClusters - 1;i >= clusterStart; i--)
shared_objects[threadIdx.x * sub_numClusters + i] = 0;
}
__syncthreads();
for(int i = (blockDim.x >> 1); i >= 1; i>>=1) {
if(threadIdx.x < i) {
for(int j = 0;j < sub_numClusters; j++) {
//for(int j = clusterStart + sub_numClusters - 1;j >= clusterStart; j--)
shared_objects[threadIdx.x * sub_numClusters + j] += shared_objects[(threadIdx.x + i) * sub_numClusters + j];
}
}
__syncthreads();
}
if(threadIdx.x == 0) {
for(int i = 0;i < sub_numClusters;i++) {
// for(int i = clusterStart + sub_numClusters - 1;i >= clusterStart; i--)
device_reduceOcc[blockIdx.x * numClusters + i + clusterStart] = shared_objects[i];
}
}
} |
c74893ca6d0abf4a7b0dbd9ecf3bf5a327e847e5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <ctime>
#include <iostream>
#define DATA_SIZE 1048576
#define THREAD_SIZE 256
#define BLOCK_SIZE 32
int data[DATA_SIZE];
void Generate(int* number, int size) {
for (int i = 0; i < size; ++i)
number[i] = rand() % 10;
}
__global__ static void sumOf(int* data, int* result, clock_t* time) {
extern __shared__ int shared[];
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int sum = 0;
clock_t start;
int offset = 1, mask = 1;
if (tid == 0) time[bid] = clock();
shared[tid] = 0;
for (int i = bid * THREAD_SIZE + tid; i < DATA_SIZE;
i += BLOCK_SIZE * THREAD_SIZE) {
shared[tid] += data[i] * data[i];
}
__syncthreads();
offset = THREAD_SIZE / 2;
while (offset > 0) {
if (tid < offset) {
shared[tid] += shared[tid + offset];
}
offset >>= 1;
__syncthreads();
}
/*
while (offset < THREAD_SIZE){
if ((tid & mask) == 0) {
shared[tid] += shared[tid + offset];
}
offset += offset;
mask += offset;
__syncthreads();
}*/
if (tid == 0) {
result[bid] = shared[0];
time[bid + BLOCK_SIZE] = clock();
}
/*
if (tid == 0) {
for (int i = 0; i < THREAD_SIZE; ++i)
shared[0] += shared[i];
result[bid] = shared[0];
time[bid + BLOCK_SIZE] = clock();
}
*/
}
int main(int argc, char** argv) {
Generate(data, DATA_SIZE);
int* gpudata, *result;
clock_t* time;
hipMalloc((void**)&gpudata, sizeof(int)* DATA_SIZE);
hipMalloc((void**)&result, sizeof(int)* THREAD_SIZE * BLOCK_SIZE);
hipMalloc((void**)&time, sizeof(int)* 2 * BLOCK_SIZE);
hipMemcpy(gpudata, data, sizeof(int)* DATA_SIZE, hipMemcpyHostToDevice);
sumOf << <BLOCK_SIZE, THREAD_SIZE, THREAD_SIZE * sizeof(int) >> >(gpudata, result, time);
clock_t usetime[BLOCK_SIZE * 2];
int sum[THREAD_SIZE * BLOCK_SIZE];
hipMemcpy(sum, result, sizeof(int)* THREAD_SIZE * BLOCK_SIZE, hipMemcpyDeviceToHost);
hipMemcpy(usetime, time, sizeof(int)* 2 * BLOCK_SIZE, hipMemcpyDeviceToHost);
hipFree(gpudata);
hipFree(result);
hipFree(time);
int finalsum = 0;
for (int i = 0; i < THREAD_SIZE * BLOCK_SIZE; ++i)
finalsum += sum[i];
clock_t minp, maxp;
minp = usetime[0], maxp = usetime[BLOCK_SIZE];
for (int i = 0; i < BLOCK_SIZE; ++i){
if (usetime[i] < minp) minp = usetime[i];
if (usetime[i + BLOCK_SIZE] > maxp) maxp = usetime[i + BLOCK_SIZE];
}
int val = 0;
for (int i = 0; i < DATA_SIZE; ++i)
val += data[i] * data[i];
printf("%d\n", val);
printf("%d %d\n", finalsum, maxp - minp);
system("pause");
return 0;
} | c74893ca6d0abf4a7b0dbd9ecf3bf5a327e847e5.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <ctime>
#include <iostream>
#define DATA_SIZE 1048576
#define THREAD_SIZE 256
#define BLOCK_SIZE 32
int data[DATA_SIZE];
void Generate(int* number, int size) {
for (int i = 0; i < size; ++i)
number[i] = rand() % 10;
}
__global__ static void sumOf(int* data, int* result, clock_t* time) {
extern __shared__ int shared[];
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int sum = 0;
clock_t start;
int offset = 1, mask = 1;
if (tid == 0) time[bid] = clock();
shared[tid] = 0;
for (int i = bid * THREAD_SIZE + tid; i < DATA_SIZE;
i += BLOCK_SIZE * THREAD_SIZE) {
shared[tid] += data[i] * data[i];
}
__syncthreads();
offset = THREAD_SIZE / 2;
while (offset > 0) {
if (tid < offset) {
shared[tid] += shared[tid + offset];
}
offset >>= 1;
__syncthreads();
}
/*
while (offset < THREAD_SIZE){
if ((tid & mask) == 0) {
shared[tid] += shared[tid + offset];
}
offset += offset;
mask += offset;
__syncthreads();
}*/
if (tid == 0) {
result[bid] = shared[0];
time[bid + BLOCK_SIZE] = clock();
}
/*
if (tid == 0) {
for (int i = 0; i < THREAD_SIZE; ++i)
shared[0] += shared[i];
result[bid] = shared[0];
time[bid + BLOCK_SIZE] = clock();
}
*/
}
int main(int argc, char** argv) {
Generate(data, DATA_SIZE);
int* gpudata, *result;
clock_t* time;
cudaMalloc((void**)&gpudata, sizeof(int)* DATA_SIZE);
cudaMalloc((void**)&result, sizeof(int)* THREAD_SIZE * BLOCK_SIZE);
cudaMalloc((void**)&time, sizeof(int)* 2 * BLOCK_SIZE);
cudaMemcpy(gpudata, data, sizeof(int)* DATA_SIZE, cudaMemcpyHostToDevice);
sumOf << <BLOCK_SIZE, THREAD_SIZE, THREAD_SIZE * sizeof(int) >> >(gpudata, result, time);
clock_t usetime[BLOCK_SIZE * 2];
int sum[THREAD_SIZE * BLOCK_SIZE];
cudaMemcpy(sum, result, sizeof(int)* THREAD_SIZE * BLOCK_SIZE, cudaMemcpyDeviceToHost);
cudaMemcpy(usetime, time, sizeof(int)* 2 * BLOCK_SIZE, cudaMemcpyDeviceToHost);
cudaFree(gpudata);
cudaFree(result);
cudaFree(time);
int finalsum = 0;
for (int i = 0; i < THREAD_SIZE * BLOCK_SIZE; ++i)
finalsum += sum[i];
clock_t minp, maxp;
minp = usetime[0], maxp = usetime[BLOCK_SIZE];
for (int i = 0; i < BLOCK_SIZE; ++i){
if (usetime[i] < minp) minp = usetime[i];
if (usetime[i + BLOCK_SIZE] > maxp) maxp = usetime[i + BLOCK_SIZE];
}
int val = 0;
for (int i = 0; i < DATA_SIZE; ++i)
val += data[i] * data[i];
printf("%d\n", val);
printf("%d %d\n", finalsum, maxp - minp);
system("pause");
return 0;
} |
bb4a41261615a7576f4669f0bd590560b850a9e4.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
* Copyright 2010 Duane Merrill
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*
*
* AUTHORS' REQUEST:
*
* If you use|reference|benchmark this code, please cite our Technical
* Report (http://www.cs.virginia.edu/~dgm4d/papers/RadixSortTR.pdf):
*
* @TechReport{ Merrill:Sorting:2010,
* author = "Duane Merrill and Andrew Grimshaw",
* title = "Revisiting Sorting for GPGPU Stream Architectures",
* year = "2010",
* institution = "University of Virginia, Department of Computer Science",
* address = "Charlottesville, VA, USA",
* number = "CS2010-03"
* }
*
* For more information, see our Google Code project site:
* http://code.google.com/p/back40computing/
*
* Thanks!
******************************************************************************/
/******************************************************************************
* Radix Sorting API
******************************************************************************/
#pragma once
#include "radixsort_base.cu"
namespace b40c {
/**
* Storage management structure for multi-CTA-sorting device vectors.
*
* Multi-CTA sorting is performed out-of-core, meaning that sorting passes
* must have two equally sized arrays: one for reading in from, the other for
* writing out to. As such, this structure maintains a pair of device vectors
* for keys (and for values), and a "selector" member to index which vector
* contains valid data (i.e., the data to-be-sorted, or the valid-sorted data
* after a sorting operation).
*
* E.g., consider a MultiCtaRadixSortStorage "device_storage". The valid data
* should always be accessible by:
*
* device_storage.d_keys[device_storage.selector];
*
* The non-selected array(s) can be allocated lazily upon first sorting by the
* sorting enactor if left NULL, or a-priori by the caller. (If user-allocated,
* they should be large enough to accomodate num_elements.)
*
* It is the caller's responsibility to free any non-NULL storage arrays when
* no longer needed. This allows for the storage to be re-used for subsequent
* sorting operations of the same size.
*
* NOTE: After a sorting operation has completed, the selecter member will
* index the key (and value) pointers that contain the final sorted results.
* (E.g., an odd number of sorting passes may leave the results in d_keys[1] if
* the input started in d_keys[0].)
*
*/
template <typename K, typename V = KeysOnlyType>
struct MultiCtaRadixSortStorage
{
// Pair of device vector pointers for keys
K* d_keys[2];
// Pair of device vector pointers for values
V* d_values[2];
// Number of elements for sorting in the above vectors
int num_elements;
// Selector into the pair of device vector pointers indicating valid
// sorting elements (i.e., where the results are)
int selector;
// Constructor
MultiCtaRadixSortStorage(int num_elements = 0) :
num_elements(num_elements),
selector(0)
{
d_keys[0] = NULL;
d_keys[1] = NULL;
d_values[0] = NULL;
d_values[1] = NULL;
}
// Constructor
MultiCtaRadixSortStorage(int num_elements, K* keys, V* values = NULL) :
num_elements(num_elements),
selector(0)
{
d_keys[0] = keys;
d_keys[1] = NULL;
d_values[0] = values;
d_values[1] = NULL;
}
};
/**
* Base class for multi-CTA sorting enactors
*/
template <typename K, typename V, typename Storage = MultiCtaRadixSortStorage<K, V> >
class MultiCtaRadixSortingEnactor :
public BaseRadixSortingEnactor<K, V, Storage>
{
private:
typedef BaseRadixSortingEnactor<K, V, Storage> Base;
protected:
// Maximum number of threadblocks this enactor will launch
int max_grid_size;
// Fixed "tile size" of keys by which threadblocks iterate over
int tile_elements;
// Temporary device storage needed for scanning digit histograms produced
// by separate CTAs
int *d_spine;
protected:
/**
* Constructor.
*/
MultiCtaRadixSortingEnactor(
int max_grid_size,
int tile_elements,
int max_radix_bits,
const CudaProperties &props = CudaProperties()) :
Base::BaseRadixSortingEnactor(props),
max_grid_size(max_grid_size),
tile_elements(tile_elements),
d_spine(NULL)
{
// Allocate the spine
int spine_elements = max_grid_size * (1 << max_radix_bits);
int spine_tiles = (spine_elements + B40C_RADIXSORT_SPINE_TILE_ELEMENTS - 1) /
B40C_RADIXSORT_SPINE_TILE_ELEMENTS;
spine_elements = spine_tiles * B40C_RADIXSORT_SPINE_TILE_ELEMENTS;
hipMalloc((void**) &d_spine, spine_elements * sizeof(int));
}
/**
* Computes the work-decomposition amongst CTAs for the give problem size
* and grid size
*/
void GetWorkDecomposition(
int num_elements,
int grid_size,
CtaDecomposition &work_decomposition)
{
int total_tiles = (num_elements + tile_elements - 1) / tile_elements;
int tiles_per_block = total_tiles / grid_size;
int extra_tiles = total_tiles - (tiles_per_block * grid_size);
work_decomposition.num_big_blocks = extra_tiles;
work_decomposition.big_block_elements = (tiles_per_block + 1) * tile_elements;
work_decomposition.normal_block_elements = tiles_per_block * tile_elements;
work_decomposition.extra_elements_last_block = num_elements - (num_elements / tile_elements * tile_elements);
work_decomposition.num_elements = num_elements;
}
/**
* Pre-sorting logic.
*/
virtual hipError_t PreSort(Storage &problem_storage, int passes)
{
// Allocate device memory for temporary storage (if necessary)
if (problem_storage.d_keys[0] == NULL) {
hipMalloc((void**) &problem_storage.d_keys[0], problem_storage.num_elements * sizeof(K));
}
if (problem_storage.d_keys[1] == NULL) {
hipMalloc((void**) &problem_storage.d_keys[1], problem_storage.num_elements * sizeof(K));
}
if (!Base::KeysOnly()) {
if (problem_storage.d_values[0] == NULL) {
hipMalloc((void**) &problem_storage.d_values[0], problem_storage.num_elements * sizeof(V));
}
if (problem_storage.d_values[1] == NULL) {
hipMalloc((void**) &problem_storage.d_values[1], problem_storage.num_elements * sizeof(V));
}
}
return hipSuccess;
}
/**
* Post-sorting logic.
*/
virtual hipError_t PostSort(MultiCtaRadixSortStorage<K, V> &problem_storage, int passes)
{
return hipSuccess;
}
public:
/**
* Destructor
*/
virtual ~MultiCtaRadixSortingEnactor()
{
if (d_spine) hipFree(d_spine);
}
};
}// namespace b40c
| bb4a41261615a7576f4669f0bd590560b850a9e4.cu | /******************************************************************************
* Copyright 2010 Duane Merrill
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*
*
* AUTHORS' REQUEST:
*
* If you use|reference|benchmark this code, please cite our Technical
* Report (http://www.cs.virginia.edu/~dgm4d/papers/RadixSortTR.pdf):
*
* @TechReport{ Merrill:Sorting:2010,
* author = "Duane Merrill and Andrew Grimshaw",
* title = "Revisiting Sorting for GPGPU Stream Architectures",
* year = "2010",
* institution = "University of Virginia, Department of Computer Science",
* address = "Charlottesville, VA, USA",
* number = "CS2010-03"
* }
*
* For more information, see our Google Code project site:
* http://code.google.com/p/back40computing/
*
* Thanks!
******************************************************************************/
/******************************************************************************
* Radix Sorting API
******************************************************************************/
#pragma once
#include "radixsort_base.cu"
namespace b40c {
/**
* Storage management structure for multi-CTA-sorting device vectors.
*
* Multi-CTA sorting is performed out-of-core, meaning that sorting passes
* must have two equally sized arrays: one for reading in from, the other for
* writing out to. As such, this structure maintains a pair of device vectors
* for keys (and for values), and a "selector" member to index which vector
* contains valid data (i.e., the data to-be-sorted, or the valid-sorted data
* after a sorting operation).
*
* E.g., consider a MultiCtaRadixSortStorage "device_storage". The valid data
* should always be accessible by:
*
* device_storage.d_keys[device_storage.selector];
*
* The non-selected array(s) can be allocated lazily upon first sorting by the
* sorting enactor if left NULL, or a-priori by the caller. (If user-allocated,
* they should be large enough to accomodate num_elements.)
*
* It is the caller's responsibility to free any non-NULL storage arrays when
* no longer needed. This allows for the storage to be re-used for subsequent
* sorting operations of the same size.
*
* NOTE: After a sorting operation has completed, the selecter member will
* index the key (and value) pointers that contain the final sorted results.
* (E.g., an odd number of sorting passes may leave the results in d_keys[1] if
* the input started in d_keys[0].)
*
*/
template <typename K, typename V = KeysOnlyType>
struct MultiCtaRadixSortStorage
{
// Pair of device vector pointers for keys
K* d_keys[2];
// Pair of device vector pointers for values
V* d_values[2];
// Number of elements for sorting in the above vectors
int num_elements;
// Selector into the pair of device vector pointers indicating valid
// sorting elements (i.e., where the results are)
int selector;
// Constructor
MultiCtaRadixSortStorage(int num_elements = 0) :
num_elements(num_elements),
selector(0)
{
d_keys[0] = NULL;
d_keys[1] = NULL;
d_values[0] = NULL;
d_values[1] = NULL;
}
// Constructor
MultiCtaRadixSortStorage(int num_elements, K* keys, V* values = NULL) :
num_elements(num_elements),
selector(0)
{
d_keys[0] = keys;
d_keys[1] = NULL;
d_values[0] = values;
d_values[1] = NULL;
}
};
/**
* Base class for multi-CTA sorting enactors
*/
template <typename K, typename V, typename Storage = MultiCtaRadixSortStorage<K, V> >
class MultiCtaRadixSortingEnactor :
public BaseRadixSortingEnactor<K, V, Storage>
{
private:
typedef BaseRadixSortingEnactor<K, V, Storage> Base;
protected:
// Maximum number of threadblocks this enactor will launch
int max_grid_size;
// Fixed "tile size" of keys by which threadblocks iterate over
int tile_elements;
// Temporary device storage needed for scanning digit histograms produced
// by separate CTAs
int *d_spine;
protected:
/**
* Constructor.
*/
MultiCtaRadixSortingEnactor(
int max_grid_size,
int tile_elements,
int max_radix_bits,
const CudaProperties &props = CudaProperties()) :
Base::BaseRadixSortingEnactor(props),
max_grid_size(max_grid_size),
tile_elements(tile_elements),
d_spine(NULL)
{
// Allocate the spine
int spine_elements = max_grid_size * (1 << max_radix_bits);
int spine_tiles = (spine_elements + B40C_RADIXSORT_SPINE_TILE_ELEMENTS - 1) /
B40C_RADIXSORT_SPINE_TILE_ELEMENTS;
spine_elements = spine_tiles * B40C_RADIXSORT_SPINE_TILE_ELEMENTS;
cudaMalloc((void**) &d_spine, spine_elements * sizeof(int));
}
/**
* Computes the work-decomposition amongst CTAs for the give problem size
* and grid size
*/
void GetWorkDecomposition(
int num_elements,
int grid_size,
CtaDecomposition &work_decomposition)
{
int total_tiles = (num_elements + tile_elements - 1) / tile_elements;
int tiles_per_block = total_tiles / grid_size;
int extra_tiles = total_tiles - (tiles_per_block * grid_size);
work_decomposition.num_big_blocks = extra_tiles;
work_decomposition.big_block_elements = (tiles_per_block + 1) * tile_elements;
work_decomposition.normal_block_elements = tiles_per_block * tile_elements;
work_decomposition.extra_elements_last_block = num_elements - (num_elements / tile_elements * tile_elements);
work_decomposition.num_elements = num_elements;
}
/**
* Pre-sorting logic.
*/
virtual cudaError_t PreSort(Storage &problem_storage, int passes)
{
// Allocate device memory for temporary storage (if necessary)
if (problem_storage.d_keys[0] == NULL) {
cudaMalloc((void**) &problem_storage.d_keys[0], problem_storage.num_elements * sizeof(K));
}
if (problem_storage.d_keys[1] == NULL) {
cudaMalloc((void**) &problem_storage.d_keys[1], problem_storage.num_elements * sizeof(K));
}
if (!Base::KeysOnly()) {
if (problem_storage.d_values[0] == NULL) {
cudaMalloc((void**) &problem_storage.d_values[0], problem_storage.num_elements * sizeof(V));
}
if (problem_storage.d_values[1] == NULL) {
cudaMalloc((void**) &problem_storage.d_values[1], problem_storage.num_elements * sizeof(V));
}
}
return cudaSuccess;
}
/**
* Post-sorting logic.
*/
virtual cudaError_t PostSort(MultiCtaRadixSortStorage<K, V> &problem_storage, int passes)
{
return cudaSuccess;
}
public:
/**
* Destructor
*/
virtual ~MultiCtaRadixSortingEnactor()
{
if (d_spine) cudaFree(d_spine);
}
};
}// namespace b40c
|
0f12f55012e4bc4b4acd9012f5d2848734253038.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "cryptocontexthelper.h"
#include "subgaussian/subgaussian.h"
#include <errno.h>
//#include "palisade.h"
//#include "palisadecore.h"
//#include "cryptocontexthelper.h"
//#include "/usr/local/include/palisade/trapdoor/abe/kp_abe_rns.h"
#include "abe/kp_abe_rns.h"
using namespace std;
using namespace lbcrypto;
#define CUDA_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void copyRow(unsigned long long * &A, unsigned long long * &B, int m, unsigned q_amount, unsigned n) {
unsigned int index = 0;
for (int i = 0; i < m; i++){
for (int j = 0; j < q_amount; j++, index+=n){
// DELETING THE CUDA CALL
hipMemcpy(A+index, B+index, sizeof(unsigned long long) * n, hipMemcpyDeviceToDevice);
}
}
}
int KPABE_BenchmarkCircuitTestDCRT(usint iter, int32_t base, usint n, size_t size, usint ell);
usint EvalNANDTree(usint *x, usint ell);
__global__ void NAND_circuit_1(int i, int m_m ,unsigned long long* &wCT, unsigned long long * origCT_device, unsigned long long *&wPublicElementB, unsigned long long * p_psi, unsigned long long * wPubElemB_dev){ //, unsigned long long * wPubElemB_dev){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < m_m*m_m){
//printf("\%llu \n", origCT_device[idx]);
wCT[i*m_m + int(idx/m_m)] = origCT_device[(2*i + 1) *m_m + idx];
wCT[i*m_m + int(idx/m_m)] += p_psi[idx%m_m * m_m] * origCT_device[(2*i + 2)*m_m + idx%m_m]; //p_psi[idx] * origCT_device[2*i + 2];
wPublicElementB[i*m_m + int(idx/m_m)] = wPubElemB_dev[(2*i + 2)*m_m + idx%m_m] * p_psi[idx%m_m * m_m];
/*
wPublicElementB[i*m_m + idx] = wPubElemB_dev[2*i + 2] * p_psi[idx];
for(int k=1; k<m_m; k++){
wPublicElementB[i*m_m + idx] = wPubElemB_dev[(2*i + 2)*m_m + k] * p_psi[k*m_m + idx];
wCT[i*m_m + idx] += p_psi[k*m_m + idx] * origCT_device[(2*i + 2)*m_m + k];
}
wPublicElementB[i*m_m + idx] = wPubElemB_dev[idx] - wPublicElementB[i*m_m + idx];
wCT[i*m_m + idx] = origCT_device[idx] - wCT[i*m_m + idx];
*/
}
}
int main() {
// PalisadeParallelControls.Enable();
cout << "BEFORE CALLING THE KPABE BENCH" << endl;
//PseudoRandomNumberGenerator::InitPRNG();
usint iter = 4;
usint att = 2;
usint q_size = 4;
usint n = 1 << 12;
usint base = 1 << 20;
KPABE_BenchmarkCircuitTestDCRT(iter, base,n, q_size,att);
return 0;
}
void copyMatrixH2D2(unsigned long long *&tmp, Matrix<DCRTPoly> src) {
vector<vector<DCRTPoly>> matrix = src.GetData();
unsigned long long index = 0;
for (int i = 0; i < src.GetRows(); i++){
for (int j = 0; j < src.GetRows(); j++){
int q_size = matrix[i][j].GetParams()->GetParams().size();
vector<PolyImpl<NativeVector>> rns_poly = matrix[i][j].GetAllElements();
int n = rns_poly[0].GetRingDimension();
for (int k = 0; k < q_size; k++, index+=n){
//CUDA_CALL(hipMemcpy(dest+index, &rns_poly[k].GetValues()[0], sizeof(unsigned long long) * n,hipMemcpyHostToDevice));
hipMemcpy(&tmp+index, &rns_poly[k].GetValues()[0], sizeof(unsigned long long) * n, hipMemcpyHostToHost);
}
}
}
}
void copyMatrixH2D(unsigned long long *&dest, Matrix<DCRTPoly> src) {
vector<vector<DCRTPoly>> matrix = src.GetData();
unsigned long long index = 0;
for (int i = 0; i < src.GetRows(); i++){
for (int j = 0; j < src.GetRows(); j++){
int q_size = matrix[i][j].GetParams()->GetParams().size();
vector<PolyImpl<NativeVector>> rns_poly = matrix[i][j].GetAllElements();
int n = rns_poly[0].GetRingDimension();
for (int k = 0; k < q_size; k++, index+=n)
//CUDA_CALL(hipMemcpy(dest+index, &rns_poly[k].GetValues()[0], sizeof(unsigned long long) * n,hipMemcpyHostToDevice));
hipMemcpy(dest+index, &rns_poly[k].GetValues()[0], sizeof(unsigned long long) * n,hipMemcpyHostToDevice);
}
}
}
void copyDCRTPolyD2H(DCRTPoly pol, unsigned long long *dev) {
int q_size = pol.GetParams()->GetParams().size();
vector<PolyImpl<NativeVector>> rns_poly = pol.GetAllElements();
int n = rns_poly[0].GetRingDimension();
unsigned long long * poly_h = (unsigned long long*)malloc(n*q_size*sizeof(unsigned long long));
hipMemcpy(poly_h, dev, sizeof(unsigned long long) * n * q_size,hipMemcpyDeviceToHost);
unsigned long long index = 0;
for (int k = 0; k < q_size; k++, index+=n){
std::vector<unsigned long long> v(poly_h, poly_h + n);
//rns_poly[k] = v;
}
std::vector<int64_t> v(poly_h, poly_h + (n*q_size));
pol = v;
std::cout << "Operation Completed" << std::endl;
}
void print_array(unsigned long long a[])
{
cout << "[";
for (int i = 0; i < 1024; i++)
{
cout << a[i] << ", ";
}
cout << "]\n";
}
void printMatrix(unsigned long long int *d_matrix, Matrix<DCRTPoly> h_matrix, unsigned i, unsigned m, unsigned size,
unsigned n) {
vector<vector<DCRTPoly>> matrix = h_matrix.GetData();
unsigned long long index = 0;
for (int i = 0; i < h_matrix.GetRows(); i++){
for (int j = 0; j < h_matrix.GetRows(); j++){
int q_size = matrix[i][j].GetParams()->GetParams().size();
vector<PolyImpl<NativeVector>> rns_poly = matrix[i][j].GetAllElements();
int n = rns_poly[0].GetRingDimension();
for (int k = 0; k < q_size; k++, index+=n)
print_array(d_matrix+index);
}
}
}
void EvalCT_GPU(KPABErns &kpabe, const shared_ptr<ILDCRTParams<BigInteger>> ¶ms, Matrix<DCRTPoly> &pubElemB, /*unsigned long long* &negPubElemB_device,*/
usint x[], usint *x_device, unsigned long long* &origCT_device, Matrix<DCRTPoly> &origCT, usint *&evalAttributes_dev,
unsigned long long* &evalCT_device, usint ell, usint M, size_t size) {
vector<LatticeSubgaussianUtility<NativeInteger>> m_util = kpabe.get_util();
//usint ell = kpabe.Get_ell();
usint m = M; //params->GetModulus().GetMSB()+2 ; //kpabe.Get_m();
usint n = params->GetRingDimension();
usint q_size = params->GetParams().size();
cout << "m: "<< m<< endl;
auto zero_alloc = DCRTPoly::Allocator(params, Format::EVALUATION);
// Part pertaining to A (does not change)
//cout << "START OF COPY_ROW FUNCTION" << endl;
copyRow(evalCT_device, origCT_device, m, q_size, n);
usint gateCnt = ell - 1;
//cout << "AFTER COPYING ROW" << endl;
// Matrix<DCRTPoly> psi(zero_alloc, m_m, m_m);
// w stands for Wire
unsigned long long* wPublicElementB;
// createMatrix(wPublicElementB, gateCnt, m, q_size, n); // Bis associated with internal wires of the circuit
hipMalloc(&wPublicElementB, sizeof(unsigned long long) * gateCnt * m * q_size * n);
//cout << "AFTER FIRST CUDA CALL" << endl;
unsigned long long* wCT;
// createMatrix(wCT, gateCnt, m, q_size, n); // Ciphertexts associated with internal wires of the circuit
hipMalloc(&wCT, sizeof(unsigned long long) * gateCnt * m * q_size * n);
// Attribute values associated with internal wires of the circuit
//TODO check this one
std::vector<usint> wX(gateCnt);
// Temporary variables for bit decomposition operation
Matrix<DCRTPoly> negB(zero_alloc, 1, m); // Format::EVALUATION (NTT domain)
//#/ unsigned long long* negB;
// createMatrix(negB, gateCnt, m, q_size, n); // Format::EVALUATION (NTT domain)
//#/ CUDA_CALL(hipMalloc(&negB, sizeof(unsigned long long) * gateCnt * m * q_size * n));
// Input level of the circuit
usint t = ell >> 1; // the number of the gates in the first level (the
// number of input gates)
unsigned long long* p_psi;
// looping to evaluate and calculate w, wB, wC
// and R for all first level input gates
for (usint i = 0; i < t; i++){
wX[i] = x[0] - x[2 * i + 1] * x[2 * i + 2]; // calculating binary wire value
//
#pragma omp parallel for schedule(dynamic)
for (usint j = 0; j < m; j++) { // Negating Bis for bit decomposition
negB(0, j) = pubElemB(2 * i + 1, j).Negate();
negB(0, j).SwitchFormat();
}
auto psi = InverseRingVectorDCRT(m_util, negB, 1);
psi->SwitchFormat();
hipMalloc(reinterpret_cast<void **>(&p_psi), sizeof(unsigned long long) * m * m * size * n);
/// hipMalloc(reinterpret_cast<void **>(&p_psi), sizeof(unsigned long long) * (ell + 1) * m * size * n);
copyMatrixH2D(p_psi, (*psi));
printf("THE VALUE OF n=%d\n", n);
for(usint i=0; i<4 ; i++){
//printf("%d\n", (*psi)(0, 0).GetElementAtIndex(i).at(i));
bigintnat::NativeIntegerT<unsigned long> tmp = (*psi)(0, 0).GetElementAtIndex(i).at(i);
//printf("%d\n", tmp.GetMSB());
for (usint j=1; j< n+3; j++)
//*//printf("%ul\n", tmp.GetGetBitAtIndex(j));
}
// Newly Added, copying origCT before calling the kernel
unsigned long long * origCT_device_n, *wPubElemB_dev;
hipMalloc(&origCT_device_n, sizeof(unsigned long long) * (1) * m * size * n);
hipMalloc(&wPubElemB_dev, sizeof(unsigned long long) * (1) * m * size * n);
// Copying publicElementB
unsigned long long* publicElemB_device;
hipMalloc(reinterpret_cast<void **>(&publicElemB_device), sizeof(unsigned long long) * (ell + 1) * m * size * n);
copyMatrixH2D(publicElemB_device, pubElemB);
copyMatrixH2D(origCT_device_n, origCT.ExtractRows(1, ell+1));
//copyMatrixH2D(wPubElemB_dev, pubElemB);
// Calling the first NAND circuit kernel ## TESTING PHASE
if (x[2 * i + 2] != 0) {
hipLaunchKernelGGL(( NAND_circuit_1), dim3(m), dim3(m), 0, 0, i, m , wCT, origCT_device_n, wPublicElementB, p_psi, publicElemB_device);
printf("AFTER ENTERING THE KERNEL\n");
unsigned long long * res_wCT;
hipMemcpy(res_wCT, &wCT, sizeof(unsigned long long) * n, hipMemcpyDeviceToHost);
/*
unsigned long long *tmp;
hipHostMalloc(&tmp, sizeof(unsigned long long) * gateCnt * m * q_size * n);
copyMatrixH2D2(tmp, origCT);
*/
/*
for(int j =0; j< 10; j++){
printf("%llu \n", res_wCT[j]);
}
*/
}
printf("RESULTS FROM origCT\n");
Matrix<DCRTPoly> wCT1(zero_alloc, gateCnt, m);
#pragma omp parallel for schedule(dynamic)
/*
for (usint j = 0; j < m; j++) {
if (x[2 * i + 2] != 0)
wCT1(i, j) = origCT(2 * i + 1, j);
else
wCT1(i, j).SetValuesToZero();
}
for(int j =0; j< 10; j++){
printf("%u \n", wCT1(i, j));
// printf("%u \n", origCT(0, j));
}
*/
}
}
int KPABE_BenchmarkCircuitTestDCRT(usint iter, int32_t base, usint n, size_t size, usint ell) {
// usint n = 1 << 12; // cyclotomic order
size_t kRes = 50; // CRT modulus size
// usint ell = 4; // No of attributes
// size_t size = 2; // Number of CRT moduli
std::cout << "Number of attributes: " << ell << std::endl;
std::cout << "n: " << n << std::endl;
// double sigma = SIGMA;
std::vector<NativeInteger> moduli;
std::vector<NativeInteger> roots_Of_Unity;
// makes sure the first integer is less than 2^60-1 to take advangate of NTL
// optimizations
NativeInteger firstInteger = FirstPrime<NativeInteger>(kRes, 2 * n);
NativeInteger q = PreviousPrime<NativeInteger>(firstInteger, 2 * n);
moduli.push_back(q);
roots_Of_Unity.push_back(RootOfUnity<NativeInteger>(2 * n, moduli[0]));
std::cout << "q["<< 0 <<"]_k: " << q.GetMSB() << std::endl;
NativeInteger prevQ = q;
for (size_t i = 1; i < size; i++) {
prevQ = lbcrypto::PreviousPrime<NativeInteger>(prevQ, 2 * n);
NativeInteger nextRootOfUnity(RootOfUnity<NativeInteger>(2 * n, prevQ));
moduli.push_back(prevQ);
std::cout << "q["<< i <<"]_k: " << moduli[i].GetMSB() << std::endl;
roots_Of_Unity.push_back(nextRootOfUnity);
}
auto ilDCRTParams =
std::make_shared<ILDCRTParams<BigInteger>>(2 * n, moduli, roots_Of_Unity);
ChineseRemainderTransformFTT<NativeVector>::PreCompute(roots_Of_Unity, 2 * n,
moduli);
std::cout << "k: " << ilDCRTParams->GetModulus().GetMSB() << std::endl;
size_t digitCount = (long)ceil(
log2(ilDCRTParams->GetParams()[0]->GetModulus().ConvertToDouble()) /
log2(base));
size_t k = ilDCRTParams->GetModulus().GetMSB(); //digitCount * ilDCRTParams->GetParams().size();
std::cout << "digit count = " << digitCount << std::endl;
//std::cout << "k = " << k << std::endl;
size_t m = k + 2;
//std::cout << "m = " << m << std::endl;
auto zero_alloc = DCRTPoly::Allocator(ilDCRTParams, Format::COEFFICIENT);
DCRTPoly::DggType dgg = DCRTPoly::DggType(SIGMA);
DCRTPoly::DugType dug = DCRTPoly::DugType();
DCRTPoly::BugType bug = DCRTPoly::BugType();
// Trapdoor Generation
std::pair<Matrix<DCRTPoly>, RLWETrapdoorPair<DCRTPoly>> trapdoorA =
RLWETrapdoorUtility<DCRTPoly>::TrapdoorGen(
ilDCRTParams, SIGMA, base); // A.first is the public element
DCRTPoly pubElemBeta(dug, ilDCRTParams, Format::EVALUATION);
Matrix<DCRTPoly> publicElementB(zero_alloc, ell + 1, m);
Matrix<DCRTPoly> ctCin(zero_alloc, ell + 2, m);
DCRTPoly c1(dug, ilDCRTParams, Format::EVALUATION);
KPABErns pkg, sender, receiver;
pkg.Setup(ilDCRTParams, base, ell, dug, &publicElementB);
sender.Setup(ilDCRTParams, base, ell);
receiver.Setup(ilDCRTParams, base, ell);
// Attribute values all are set to 1 for NAND gate Format::EVALUATION
std::vector<usint> x(ell + 1);
x[0] = 1;
usint found = 0;
while (found == 0) {
for (usint i = 1; i < ell + 1; i++)
// x[i] = rand() & 0x1;
x[i] = bug.GenerateInteger().ConvertToInt();
if (EvalNANDTree(&x[1], ell) == 0) found = 1;
}
usint y;
TimeVar t1;
double avg_keygen(0.0), avg_evalct(0.0), avg_evalpk(0.0), avg_enc(0.0),
avg_dec(0.0);
// plaintext
for (usint i = 0; i < iter; i++) {
std::cout << "running iter " << i + 1 << std::endl;
NativePoly ptext(bug, ilDCRTParams->GetParams()[0], Format::COEFFICIENT);
// circuit outputs
Matrix<DCRTPoly> evalBf(
DCRTPoly::Allocator(ilDCRTParams, Format::EVALUATION), 1,
m); // evaluated Bs
Matrix<DCRTPoly> evalCf(
DCRTPoly::Allocator(ilDCRTParams, Format::EVALUATION), 1,
m); // evaluated Cs
Matrix<DCRTPoly> ctCA(DCRTPoly::Allocator(ilDCRTParams, Format::EVALUATION),
1, m); // CA
// secret key corresponding to the circuit output
Matrix<DCRTPoly> sk(zero_alloc, 2, m);
// decrypted text
NativePoly dtext;
// Switches to Format::EVALUATION representation
// ptext.SwitchFormat();
//cout << "START OF ENCRYPTION " << endl;
TIC(t1);
sender.Encrypt(ilDCRTParams, trapdoorA.first, publicElementB, pubElemBeta,
&x[0], ptext, dgg, dug, bug, &ctCin,
&c1); // Cin and c1 are the ciphertext
avg_enc += TOC(t1);
ctCA = ctCin.ExtractRow(0); // CA is A^T * s + e 0,A
// Allocate and copy variables used by functions
unsigned long long* deneme = reinterpret_cast<unsigned long long*>(malloc(sizeof(unsigned long long) * (ell + 1) * m * size * n));
//hipMemcpy(deneme, publicElemB_device, sizeof(unsigned long long) * (ell + 1) * m * size * n, hipMemcpyDeviceToHost);
// THIS LINE CAUSES A PROBLEM
//printMatrix(publicElemB_device,publicElementB ,ell+1,m,size,n);
usint* x_device;
hipMalloc(&x_device,(ell+1) * sizeof(usint));
hipMemcpy(x_device,&x[0], (ell+1) * sizeof(usint),hipMemcpyHostToDevice);
unsigned long long* ctCin_device;
// TODO: Bunu tekrar bak
hipMalloc(&ctCin_device, sizeof(unsigned long long) * (ell + 1) * m * size * n);
unsigned long long* evalCf_device;
hipMalloc(&ctCin_device, sizeof(unsigned long long) * (1) * m * size * n);
copyMatrixH2D(ctCin_device, ctCin.ExtractRows(1, ell + 1));
usint* y_device;
hipMalloc(&y_device, sizeof(usint));
//printf("Before entering the EVALCT_GPU kernel \n");
EvalCT_GPU(sender, ilDCRTParams, publicElementB, &x[0], x_device, ctCin_device, ctCin, y_device, evalCf_device, ell, m, size);
hipDeviceSynchronize();
TIC(t1);
receiver.EvalCT(ilDCRTParams, publicElementB, &x[0],
ctCin.ExtractRows(1, ell + 1), &y, &evalCf);
avg_evalct += TOC(t1);
TIC(t1);
pkg.EvalPK(ilDCRTParams, publicElementB, &evalBf);
avg_evalpk += TOC(t1);
TIC(t1);
pkg.KeyGen(ilDCRTParams, trapdoorA.first, evalBf, pubElemBeta,
trapdoorA.second, dgg, &sk);
avg_keygen += TOC(t1);
// CheckSecretKeyKPDCRT(m, trapdoorA.first, evalBf, sk, pubElemBeta);
TIC(t1);
receiver.Decrypt(ilDCRTParams, sk, ctCA, evalCf, c1, &dtext);
avg_dec += TOC_US(t1);
NativeVector ptext2 = ptext.GetValues();
ptext2.SetModulus(NativeInteger(2));
if (ptext2 != dtext.GetValues()) {
std::cout << "Decryption fails at iteration: " << i << std::endl;
// std::cerr << ptext << std::endl;
// std::cerr << dtext << std::endl;
return 0;
}
// std::cerr << ptext << std::endl;
// std::cerr << dtext << std::endl;
}
std::cout << "Encryption is successful after " << iter << " iterations!\n";
std::cout << "Average key generation time : "
<< "\t" << (avg_keygen) / iter << " ms" << std::endl;
std::cout << "Average ciphertext Format::EVALUATION time : "
<< "\t" << (avg_evalct) / iter << " ms" << std::endl;
std::cout << "Average public key Format::EVALUATION time : "
<< "\t" << (avg_evalpk) / iter << " ms" << std::endl;
std::cout << "Average encryption time : "
<< "\t" << (avg_enc) / iter << " ms" << std::endl;
std::cout << "Average decryption time : "
<< "\t" << (avg_dec) / (iter * 1000) << " ms" << std::endl;
return 0;
}
usint EvalNANDTree(usint *x, usint ell) {
usint y;
if (ell == 2) {
y = 1 - x[0] * x[1];
return y;
} else {
ell >>= 1;
y = 1 - (EvalNANDTree(&x[0], ell) * EvalNANDTree(&x[ell], ell));
}
return y;
}
| 0f12f55012e4bc4b4acd9012f5d2848734253038.cu | #include <iostream>
#include "cryptocontexthelper.h"
#include "subgaussian/subgaussian.h"
#include <errno.h>
//#include "palisade.h"
//#include "palisadecore.h"
//#include "cryptocontexthelper.h"
//#include "/usr/local/include/palisade/trapdoor/abe/kp_abe_rns.h"
#include "abe/kp_abe_rns.h"
using namespace std;
using namespace lbcrypto;
#define CUDA_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void copyRow(unsigned long long * &A, unsigned long long * &B, int m, unsigned q_amount, unsigned n) {
unsigned int index = 0;
for (int i = 0; i < m; i++){
for (int j = 0; j < q_amount; j++, index+=n){
// DELETING THE CUDA CALL
cudaMemcpy(A+index, B+index, sizeof(unsigned long long) * n, cudaMemcpyDeviceToDevice);
}
}
}
int KPABE_BenchmarkCircuitTestDCRT(usint iter, int32_t base, usint n, size_t size, usint ell);
usint EvalNANDTree(usint *x, usint ell);
__global__ void NAND_circuit_1(int i, int m_m ,unsigned long long* &wCT, unsigned long long * origCT_device, unsigned long long *&wPublicElementB, unsigned long long * p_psi, unsigned long long * wPubElemB_dev){ //, unsigned long long * wPubElemB_dev){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < m_m*m_m){
//printf("\%llu \n", origCT_device[idx]);
wCT[i*m_m + int(idx/m_m)] = origCT_device[(2*i + 1) *m_m + idx];
wCT[i*m_m + int(idx/m_m)] += p_psi[idx%m_m * m_m] * origCT_device[(2*i + 2)*m_m + idx%m_m]; //p_psi[idx] * origCT_device[2*i + 2];
wPublicElementB[i*m_m + int(idx/m_m)] = wPubElemB_dev[(2*i + 2)*m_m + idx%m_m] * p_psi[idx%m_m * m_m];
/*
wPublicElementB[i*m_m + idx] = wPubElemB_dev[2*i + 2] * p_psi[idx];
for(int k=1; k<m_m; k++){
wPublicElementB[i*m_m + idx] = wPubElemB_dev[(2*i + 2)*m_m + k] * p_psi[k*m_m + idx];
wCT[i*m_m + idx] += p_psi[k*m_m + idx] * origCT_device[(2*i + 2)*m_m + k];
}
wPublicElementB[i*m_m + idx] = wPubElemB_dev[idx] - wPublicElementB[i*m_m + idx];
wCT[i*m_m + idx] = origCT_device[idx] - wCT[i*m_m + idx];
*/
}
}
int main() {
// PalisadeParallelControls.Enable();
cout << "BEFORE CALLING THE KPABE BENCH" << endl;
//PseudoRandomNumberGenerator::InitPRNG();
usint iter = 4;
usint att = 2;
usint q_size = 4;
usint n = 1 << 12;
usint base = 1 << 20;
KPABE_BenchmarkCircuitTestDCRT(iter, base,n, q_size,att);
return 0;
}
void copyMatrixH2D2(unsigned long long *&tmp, Matrix<DCRTPoly> src) {
vector<vector<DCRTPoly>> matrix = src.GetData();
unsigned long long index = 0;
for (int i = 0; i < src.GetRows(); i++){
for (int j = 0; j < src.GetRows(); j++){
int q_size = matrix[i][j].GetParams()->GetParams().size();
vector<PolyImpl<NativeVector>> rns_poly = matrix[i][j].GetAllElements();
int n = rns_poly[0].GetRingDimension();
for (int k = 0; k < q_size; k++, index+=n){
//CUDA_CALL(cudaMemcpy(dest+index, &rns_poly[k].GetValues()[0], sizeof(unsigned long long) * n,cudaMemcpyHostToDevice));
cudaMemcpy(&tmp+index, &rns_poly[k].GetValues()[0], sizeof(unsigned long long) * n, cudaMemcpyHostToHost);
}
}
}
}
void copyMatrixH2D(unsigned long long *&dest, Matrix<DCRTPoly> src) {
vector<vector<DCRTPoly>> matrix = src.GetData();
unsigned long long index = 0;
for (int i = 0; i < src.GetRows(); i++){
for (int j = 0; j < src.GetRows(); j++){
int q_size = matrix[i][j].GetParams()->GetParams().size();
vector<PolyImpl<NativeVector>> rns_poly = matrix[i][j].GetAllElements();
int n = rns_poly[0].GetRingDimension();
for (int k = 0; k < q_size; k++, index+=n)
//CUDA_CALL(cudaMemcpy(dest+index, &rns_poly[k].GetValues()[0], sizeof(unsigned long long) * n,cudaMemcpyHostToDevice));
cudaMemcpy(dest+index, &rns_poly[k].GetValues()[0], sizeof(unsigned long long) * n,cudaMemcpyHostToDevice);
}
}
}
void copyDCRTPolyD2H(DCRTPoly pol, unsigned long long *dev) {
int q_size = pol.GetParams()->GetParams().size();
vector<PolyImpl<NativeVector>> rns_poly = pol.GetAllElements();
int n = rns_poly[0].GetRingDimension();
unsigned long long * poly_h = (unsigned long long*)malloc(n*q_size*sizeof(unsigned long long));
cudaMemcpy(poly_h, dev, sizeof(unsigned long long) * n * q_size,cudaMemcpyDeviceToHost);
unsigned long long index = 0;
for (int k = 0; k < q_size; k++, index+=n){
std::vector<unsigned long long> v(poly_h, poly_h + n);
//rns_poly[k] = v;
}
std::vector<int64_t> v(poly_h, poly_h + (n*q_size));
pol = v;
std::cout << "Operation Completed" << std::endl;
}
void print_array(unsigned long long a[])
{
cout << "[";
for (int i = 0; i < 1024; i++)
{
cout << a[i] << ", ";
}
cout << "]\n";
}
void printMatrix(unsigned long long int *d_matrix, Matrix<DCRTPoly> h_matrix, unsigned i, unsigned m, unsigned size,
unsigned n) {
vector<vector<DCRTPoly>> matrix = h_matrix.GetData();
unsigned long long index = 0;
for (int i = 0; i < h_matrix.GetRows(); i++){
for (int j = 0; j < h_matrix.GetRows(); j++){
int q_size = matrix[i][j].GetParams()->GetParams().size();
vector<PolyImpl<NativeVector>> rns_poly = matrix[i][j].GetAllElements();
int n = rns_poly[0].GetRingDimension();
for (int k = 0; k < q_size; k++, index+=n)
print_array(d_matrix+index);
}
}
}
void EvalCT_GPU(KPABErns &kpabe, const shared_ptr<ILDCRTParams<BigInteger>> ¶ms, Matrix<DCRTPoly> &pubElemB, /*unsigned long long* &negPubElemB_device,*/
usint x[], usint *x_device, unsigned long long* &origCT_device, Matrix<DCRTPoly> &origCT, usint *&evalAttributes_dev,
unsigned long long* &evalCT_device, usint ell, usint M, size_t size) {
vector<LatticeSubgaussianUtility<NativeInteger>> m_util = kpabe.get_util();
//usint ell = kpabe.Get_ell();
usint m = M; //params->GetModulus().GetMSB()+2 ; //kpabe.Get_m();
usint n = params->GetRingDimension();
usint q_size = params->GetParams().size();
cout << "m: "<< m<< endl;
auto zero_alloc = DCRTPoly::Allocator(params, Format::EVALUATION);
// Part pertaining to A (does not change)
//cout << "START OF COPY_ROW FUNCTION" << endl;
copyRow(evalCT_device, origCT_device, m, q_size, n);
usint gateCnt = ell - 1;
//cout << "AFTER COPYING ROW" << endl;
// Matrix<DCRTPoly> psi(zero_alloc, m_m, m_m);
// w stands for Wire
unsigned long long* wPublicElementB;
// createMatrix(wPublicElementB, gateCnt, m, q_size, n); // Bis associated with internal wires of the circuit
cudaMalloc(&wPublicElementB, sizeof(unsigned long long) * gateCnt * m * q_size * n);
//cout << "AFTER FIRST CUDA CALL" << endl;
unsigned long long* wCT;
// createMatrix(wCT, gateCnt, m, q_size, n); // Ciphertexts associated with internal wires of the circuit
cudaMalloc(&wCT, sizeof(unsigned long long) * gateCnt * m * q_size * n);
// Attribute values associated with internal wires of the circuit
//TODO check this one
std::vector<usint> wX(gateCnt);
// Temporary variables for bit decomposition operation
Matrix<DCRTPoly> negB(zero_alloc, 1, m); // Format::EVALUATION (NTT domain)
//#/ unsigned long long* negB;
// createMatrix(negB, gateCnt, m, q_size, n); // Format::EVALUATION (NTT domain)
//#/ CUDA_CALL(cudaMalloc(&negB, sizeof(unsigned long long) * gateCnt * m * q_size * n));
// Input level of the circuit
usint t = ell >> 1; // the number of the gates in the first level (the
// number of input gates)
unsigned long long* p_psi;
// looping to evaluate and calculate w, wB, wC
// and R for all first level input gates
for (usint i = 0; i < t; i++){
wX[i] = x[0] - x[2 * i + 1] * x[2 * i + 2]; // calculating binary wire value
//
#pragma omp parallel for schedule(dynamic)
for (usint j = 0; j < m; j++) { // Negating Bis for bit decomposition
negB(0, j) = pubElemB(2 * i + 1, j).Negate();
negB(0, j).SwitchFormat();
}
auto psi = InverseRingVectorDCRT(m_util, negB, 1);
psi->SwitchFormat();
cudaMalloc(reinterpret_cast<void **>(&p_psi), sizeof(unsigned long long) * m * m * size * n);
/// cudaMalloc(reinterpret_cast<void **>(&p_psi), sizeof(unsigned long long) * (ell + 1) * m * size * n);
copyMatrixH2D(p_psi, (*psi));
printf("THE VALUE OF n=%d\n", n);
for(usint i=0; i<4 ; i++){
//printf("%d\n", (*psi)(0, 0).GetElementAtIndex(i).at(i));
bigintnat::NativeIntegerT<unsigned long> tmp = (*psi)(0, 0).GetElementAtIndex(i).at(i);
//printf("%d\n", tmp.GetMSB());
for (usint j=1; j< n+3; j++)
//*//printf("%ul\n", tmp.GetGetBitAtIndex(j));
}
// Newly Added, copying origCT before calling the kernel
unsigned long long * origCT_device_n, *wPubElemB_dev;
cudaMalloc(&origCT_device_n, sizeof(unsigned long long) * (1) * m * size * n);
cudaMalloc(&wPubElemB_dev, sizeof(unsigned long long) * (1) * m * size * n);
// Copying publicElementB
unsigned long long* publicElemB_device;
cudaMalloc(reinterpret_cast<void **>(&publicElemB_device), sizeof(unsigned long long) * (ell + 1) * m * size * n);
copyMatrixH2D(publicElemB_device, pubElemB);
copyMatrixH2D(origCT_device_n, origCT.ExtractRows(1, ell+1));
//copyMatrixH2D(wPubElemB_dev, pubElemB);
// Calling the first NAND circuit kernel ## TESTING PHASE
if (x[2 * i + 2] != 0) {
NAND_circuit_1<<<m, m>>>(i, m , wCT, origCT_device_n, wPublicElementB, p_psi, publicElemB_device);
printf("AFTER ENTERING THE KERNEL\n");
unsigned long long * res_wCT;
cudaMemcpy(res_wCT, &wCT, sizeof(unsigned long long) * n, cudaMemcpyDeviceToHost);
/*
unsigned long long *tmp;
cudaMallocHost(&tmp, sizeof(unsigned long long) * gateCnt * m * q_size * n);
copyMatrixH2D2(tmp, origCT);
*/
/*
for(int j =0; j< 10; j++){
printf("%llu \n", res_wCT[j]);
}
*/
}
printf("RESULTS FROM origCT\n");
Matrix<DCRTPoly> wCT1(zero_alloc, gateCnt, m);
#pragma omp parallel for schedule(dynamic)
/*
for (usint j = 0; j < m; j++) {
if (x[2 * i + 2] != 0)
wCT1(i, j) = origCT(2 * i + 1, j);
else
wCT1(i, j).SetValuesToZero();
}
for(int j =0; j< 10; j++){
printf("%u \n", wCT1(i, j));
// printf("%u \n", origCT(0, j));
}
*/
}
}
int KPABE_BenchmarkCircuitTestDCRT(usint iter, int32_t base, usint n, size_t size, usint ell) {
// usint n = 1 << 12; // cyclotomic order
size_t kRes = 50; // CRT modulus size
// usint ell = 4; // No of attributes
// size_t size = 2; // Number of CRT moduli
std::cout << "Number of attributes: " << ell << std::endl;
std::cout << "n: " << n << std::endl;
// double sigma = SIGMA;
std::vector<NativeInteger> moduli;
std::vector<NativeInteger> roots_Of_Unity;
// makes sure the first integer is less than 2^60-1 to take advangate of NTL
// optimizations
NativeInteger firstInteger = FirstPrime<NativeInteger>(kRes, 2 * n);
NativeInteger q = PreviousPrime<NativeInteger>(firstInteger, 2 * n);
moduli.push_back(q);
roots_Of_Unity.push_back(RootOfUnity<NativeInteger>(2 * n, moduli[0]));
std::cout << "q["<< 0 <<"]_k: " << q.GetMSB() << std::endl;
NativeInteger prevQ = q;
for (size_t i = 1; i < size; i++) {
prevQ = lbcrypto::PreviousPrime<NativeInteger>(prevQ, 2 * n);
NativeInteger nextRootOfUnity(RootOfUnity<NativeInteger>(2 * n, prevQ));
moduli.push_back(prevQ);
std::cout << "q["<< i <<"]_k: " << moduli[i].GetMSB() << std::endl;
roots_Of_Unity.push_back(nextRootOfUnity);
}
auto ilDCRTParams =
std::make_shared<ILDCRTParams<BigInteger>>(2 * n, moduli, roots_Of_Unity);
ChineseRemainderTransformFTT<NativeVector>::PreCompute(roots_Of_Unity, 2 * n,
moduli);
std::cout << "k: " << ilDCRTParams->GetModulus().GetMSB() << std::endl;
size_t digitCount = (long)ceil(
log2(ilDCRTParams->GetParams()[0]->GetModulus().ConvertToDouble()) /
log2(base));
size_t k = ilDCRTParams->GetModulus().GetMSB(); //digitCount * ilDCRTParams->GetParams().size();
std::cout << "digit count = " << digitCount << std::endl;
//std::cout << "k = " << k << std::endl;
size_t m = k + 2;
//std::cout << "m = " << m << std::endl;
auto zero_alloc = DCRTPoly::Allocator(ilDCRTParams, Format::COEFFICIENT);
DCRTPoly::DggType dgg = DCRTPoly::DggType(SIGMA);
DCRTPoly::DugType dug = DCRTPoly::DugType();
DCRTPoly::BugType bug = DCRTPoly::BugType();
// Trapdoor Generation
std::pair<Matrix<DCRTPoly>, RLWETrapdoorPair<DCRTPoly>> trapdoorA =
RLWETrapdoorUtility<DCRTPoly>::TrapdoorGen(
ilDCRTParams, SIGMA, base); // A.first is the public element
DCRTPoly pubElemBeta(dug, ilDCRTParams, Format::EVALUATION);
Matrix<DCRTPoly> publicElementB(zero_alloc, ell + 1, m);
Matrix<DCRTPoly> ctCin(zero_alloc, ell + 2, m);
DCRTPoly c1(dug, ilDCRTParams, Format::EVALUATION);
KPABErns pkg, sender, receiver;
pkg.Setup(ilDCRTParams, base, ell, dug, &publicElementB);
sender.Setup(ilDCRTParams, base, ell);
receiver.Setup(ilDCRTParams, base, ell);
// Attribute values all are set to 1 for NAND gate Format::EVALUATION
std::vector<usint> x(ell + 1);
x[0] = 1;
usint found = 0;
while (found == 0) {
for (usint i = 1; i < ell + 1; i++)
// x[i] = rand() & 0x1;
x[i] = bug.GenerateInteger().ConvertToInt();
if (EvalNANDTree(&x[1], ell) == 0) found = 1;
}
usint y;
TimeVar t1;
double avg_keygen(0.0), avg_evalct(0.0), avg_evalpk(0.0), avg_enc(0.0),
avg_dec(0.0);
// plaintext
for (usint i = 0; i < iter; i++) {
std::cout << "running iter " << i + 1 << std::endl;
NativePoly ptext(bug, ilDCRTParams->GetParams()[0], Format::COEFFICIENT);
// circuit outputs
Matrix<DCRTPoly> evalBf(
DCRTPoly::Allocator(ilDCRTParams, Format::EVALUATION), 1,
m); // evaluated Bs
Matrix<DCRTPoly> evalCf(
DCRTPoly::Allocator(ilDCRTParams, Format::EVALUATION), 1,
m); // evaluated Cs
Matrix<DCRTPoly> ctCA(DCRTPoly::Allocator(ilDCRTParams, Format::EVALUATION),
1, m); // CA
// secret key corresponding to the circuit output
Matrix<DCRTPoly> sk(zero_alloc, 2, m);
// decrypted text
NativePoly dtext;
// Switches to Format::EVALUATION representation
// ptext.SwitchFormat();
//cout << "START OF ENCRYPTION " << endl;
TIC(t1);
sender.Encrypt(ilDCRTParams, trapdoorA.first, publicElementB, pubElemBeta,
&x[0], ptext, dgg, dug, bug, &ctCin,
&c1); // Cin and c1 are the ciphertext
avg_enc += TOC(t1);
ctCA = ctCin.ExtractRow(0); // CA is A^T * s + e 0,A
// Allocate and copy variables used by functions
unsigned long long* deneme = reinterpret_cast<unsigned long long*>(malloc(sizeof(unsigned long long) * (ell + 1) * m * size * n));
//cudaMemcpy(deneme, publicElemB_device, sizeof(unsigned long long) * (ell + 1) * m * size * n, cudaMemcpyDeviceToHost);
// THIS LINE CAUSES A PROBLEM
//printMatrix(publicElemB_device,publicElementB ,ell+1,m,size,n);
usint* x_device;
cudaMalloc(&x_device,(ell+1) * sizeof(usint));
cudaMemcpy(x_device,&x[0], (ell+1) * sizeof(usint),cudaMemcpyHostToDevice);
unsigned long long* ctCin_device;
// TODO: Bunu tekrar bak
cudaMalloc(&ctCin_device, sizeof(unsigned long long) * (ell + 1) * m * size * n);
unsigned long long* evalCf_device;
cudaMalloc(&ctCin_device, sizeof(unsigned long long) * (1) * m * size * n);
copyMatrixH2D(ctCin_device, ctCin.ExtractRows(1, ell + 1));
usint* y_device;
cudaMalloc(&y_device, sizeof(usint));
//printf("Before entering the EVALCT_GPU kernel \n");
EvalCT_GPU(sender, ilDCRTParams, publicElementB, &x[0], x_device, ctCin_device, ctCin, y_device, evalCf_device, ell, m, size);
cudaDeviceSynchronize();
TIC(t1);
receiver.EvalCT(ilDCRTParams, publicElementB, &x[0],
ctCin.ExtractRows(1, ell + 1), &y, &evalCf);
avg_evalct += TOC(t1);
TIC(t1);
pkg.EvalPK(ilDCRTParams, publicElementB, &evalBf);
avg_evalpk += TOC(t1);
TIC(t1);
pkg.KeyGen(ilDCRTParams, trapdoorA.first, evalBf, pubElemBeta,
trapdoorA.second, dgg, &sk);
avg_keygen += TOC(t1);
// CheckSecretKeyKPDCRT(m, trapdoorA.first, evalBf, sk, pubElemBeta);
TIC(t1);
receiver.Decrypt(ilDCRTParams, sk, ctCA, evalCf, c1, &dtext);
avg_dec += TOC_US(t1);
NativeVector ptext2 = ptext.GetValues();
ptext2.SetModulus(NativeInteger(2));
if (ptext2 != dtext.GetValues()) {
std::cout << "Decryption fails at iteration: " << i << std::endl;
// std::cerr << ptext << std::endl;
// std::cerr << dtext << std::endl;
return 0;
}
// std::cerr << ptext << std::endl;
// std::cerr << dtext << std::endl;
}
std::cout << "Encryption is successful after " << iter << " iterations!\n";
std::cout << "Average key generation time : "
<< "\t" << (avg_keygen) / iter << " ms" << std::endl;
std::cout << "Average ciphertext Format::EVALUATION time : "
<< "\t" << (avg_evalct) / iter << " ms" << std::endl;
std::cout << "Average public key Format::EVALUATION time : "
<< "\t" << (avg_evalpk) / iter << " ms" << std::endl;
std::cout << "Average encryption time : "
<< "\t" << (avg_enc) / iter << " ms" << std::endl;
std::cout << "Average decryption time : "
<< "\t" << (avg_dec) / (iter * 1000) << " ms" << std::endl;
return 0;
}
usint EvalNANDTree(usint *x, usint ell) {
usint y;
if (ell == 2) {
y = 1 - x[0] * x[1];
return y;
} else {
ell >>= 1;
y = 1 - (EvalNANDTree(&x[0], ell) * EvalNANDTree(&x[ell], ell));
}
return y;
}
|
16e52b786b4ad05862d01164a9bc695c276a4036.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <chrono>
#include<iostream>
#include<cstdlib>
#include<cmath>
//#define DEBUG
/* comment following lines for benchmark run*/
#define ENABLE_SINGLE_SHOT_COL_INDEX
#define ENABLE_SINGLE_SHOT_ROW_INDEX
__global__ void kernel(int* a_InputArray, int* b_InputArray, int* AddMatrix, int* ResultMatrix, const int MatrixRows, const int MatrixColums) {
// To ensure uniq Thread Index is used in operations
int ThreadIndex = threadIdx.x + blockIdx.x * blockDim.x;
#if defined(DEBUG)
printf("ThreadIndex: %d\n", ThreadIndex);
#endif
if (ThreadIndex < (MatrixRows * MatrixColums)) {
// Applying the equation D = AxB + C
ResultMatrix[ThreadIndex] = a_InputArray[ThreadIndex % MatrixRows] * b_InputArray[ThreadIndex % MatrixColums] + AddMatrix[ThreadIndex];
}
}
// debuging functions
void init_array(int* pArray, const int MatrixRows);
void init_mat(int* pMatrix, const int MatrixRows, const int MatrixColums);
void print_array(int* pArray, const int MatrixRows, char* MatrixName);
void print_mat(int* pMatrix, const int MatrixRows, const int MatrixColums, char* MatrixName);
int calculate_error(int* p_a_Matrix, int* p_b_Matrix, const int MatrixRows, const int MatrixColums);
void cpu_calculate_array(int* a_InputArray, int* b_InputArray, int* AddMatrix, int* ResultMatrix, const int MatrixRows, const int MatrixColums);
void cpu_calculate_array(int* a_InputArray, int* b_InputArray, int* AddMatrix, int* ResultMatrix, const int MatrixRows, const int MatrixColums) {
for (int LoopIndex = 0; LoopIndex < MatrixRows * MatrixColums; LoopIndex++)
{
// Applying the equation D = AxB + C
ResultMatrix[LoopIndex] = a_InputArray[LoopIndex % MatrixRows] * b_InputArray[LoopIndex % MatrixColums] + AddMatrix[LoopIndex];
#if defined(DEBUG)
printf("[ Indices id:%d, aId:%d, bId:%d ] Inputs a:%d, b:%d, c:%d >> Output d:%d\n", LoopIndex, LoopIndex % MatrixRows, LoopIndex % MatrixColums, a_InputArray[LoopIndex % MatrixRows], b_InputArray[LoopIndex % MatrixColums], AddMatrix[LoopIndex], ResultMatrix[LoopIndex]);
#endif
}
}
int main(void) {
srand(time(NULL));
// Variables Initialization
int* h_a_InputArray, * h_b_InputArray, * h_c_AddMatrix, * h_d_GpuResultMatrix, * d_CpuResultMatrix;
int* dev_a_InputArray, * dev_b_InputArray, * dev_c_AddMatrix, * dev_d_GpuResultMatrix;
hipEvent_t start, end;
float time = 0.0;
#ifdef ENABLE_SINGLE_SHOT_ROW_INDEX
int RowsNum = 512;
#else
for (int RowsNum = 32; RowsNum <= 4096; RowsNum+=64)
#endif // ENABLE_SINGLE_SHOT_ROW_INDEX
{
#ifdef ENABLE_SINGLE_SHOT_COL_INDEX
int ColNum = 512;
#else
for (int ColNum = 32; ColNum <= 4096; ColNum += 64)
#endif // ENABLE_SINGLE_SHOT_COL_INDEX
{
// Using Parameterized Index for testing multiple values
int MatrixRows = RowsNum;
int MatrixColums = ColNum;
printf("Trying with dimention %dx%d :%d\n", MatrixRows, MatrixColums, MatrixRows * MatrixColums);
// Allocating Memory
h_a_InputArray = (int*)malloc(sizeof(int) * MatrixRows);
h_b_InputArray = (int*)malloc(sizeof(int) * MatrixColums);
h_c_AddMatrix = (int*)malloc(sizeof(int) * MatrixRows * MatrixColums);
h_d_GpuResultMatrix = (int*)malloc(sizeof(int) * MatrixRows * MatrixColums);
d_CpuResultMatrix = (int*)malloc(sizeof(int) * MatrixRows * MatrixColums);
// Input Data Initialization
init_array(h_a_InputArray, MatrixRows);
init_array(h_b_InputArray, MatrixColums);
init_mat(h_c_AddMatrix, MatrixRows, MatrixColums);
#ifdef DEBUG
printf("<<<<<<<<<< initial data:\n");
print_array(h_a_InputArray, MatrixRows, "a-vector");
print_array(h_b_InputArray, MatrixColums, "b-vector");
print_mat(h_c_AddMatrix, MatrixRows, MatrixColums, "c-matrix");
#endif // !DEBUG
// Allocating Memory
hipMalloc((void**)&dev_a_InputArray, sizeof(int) * MatrixRows);
hipMalloc((void**)&dev_b_InputArray, sizeof(int) * MatrixColums);
hipMalloc((void**)&dev_c_AddMatrix, sizeof(int) * MatrixRows * MatrixColums);
hipMalloc((void**)&dev_d_GpuResultMatrix, sizeof(int) * MatrixRows * MatrixColums);
// Copying input data from Host [Cpu context] to Device/Kernel [Gpu context]
hipMemcpy(dev_a_InputArray, h_a_InputArray, sizeof(int) * MatrixRows, hipMemcpyHostToDevice);
hipMemcpy(dev_b_InputArray, h_b_InputArray, sizeof(int) * MatrixColums, hipMemcpyHostToDevice);
hipMemcpy(dev_c_AddMatrix, h_c_AddMatrix, sizeof(int) * MatrixRows * MatrixColums, hipMemcpyHostToDevice);
// Creating events used for Gpu Execution time measurments
hipEventCreate(&start);
hipEventCreate(&end);
#ifdef DEBUG
printf("\n\nRunning Kernel...\n\n");
#endif // DEBUG
// record gpu start time event
hipEventRecord(start);
// run gpu kernel <<<# of Threads per block, # of blocks per grid>>
kernel << <MatrixRows * MatrixColums / 256 + 1, 256 >> > (dev_a_InputArray, dev_b_InputArray, dev_c_AddMatrix, dev_d_GpuResultMatrix, MatrixRows, MatrixColums);
// record gpu end time event
hipEventRecord(end);
hipEventSynchronize(end);
// get execution time for gpu threads
hipEventElapsedTime(&time, start, end);
// Copying output data from Device/Kernel [Gpu context] to Host [Cpu context]
hipMemcpy(h_d_GpuResultMatrix, dev_d_GpuResultMatrix, sizeof(int) * MatrixRows * MatrixColums, hipMemcpyDeviceToHost);
printf("\tGpu Return with Error Code: %s\n", hipGetErrorString(hipGetLastError()));
printf("\tGPU Time Elapsed: %f ms\n", time);
// freeing Gpu Memory
hipFree(dev_a_InputArray);
hipFree(dev_b_InputArray);
hipFree(dev_c_AddMatrix);
hipFree(dev_d_GpuResultMatrix);
// record cpu start time event
auto cpu_start = std::chrono::high_resolution_clock::now();
// calculate the output using Cpu performance
cpu_calculate_array(h_a_InputArray, h_b_InputArray, h_c_AddMatrix, d_CpuResultMatrix, MatrixRows, MatrixColums);
// record cpu end time event
auto cpu_done = std::chrono::high_resolution_clock::now();
printf("\tCPU Time Elapsed: %f ms\n", std::chrono::duration_cast<std::chrono::microseconds>(cpu_done - cpu_start).count() / 1000.0);
#ifdef DEBUG
printf(">>>>>>>>>> Gpu final data:\n");
print_mat(h_d_GpuResultMatrix, MatrixRows, MatrixColums, "d-matrix");
printf(">>>>>>>>>> Cpu final data:\n");
print_mat(d_CpuResultMatrix, MatrixRows, MatrixColums, "cpu-matrix");
#endif // DEBUG
//calculate the gpu and cpu error deviation to ensure that no issue between both results
int error = calculate_error(h_d_GpuResultMatrix, d_CpuResultMatrix, MatrixRows, MatrixColums);
printf("\tDeviation between Cpu and Gpu: %d\n",error);
// freeing Cpu Memory
free(h_a_InputArray);
free(h_b_InputArray);
free(h_c_AddMatrix);
free(h_d_GpuResultMatrix);
free(d_CpuResultMatrix);
}
}
return 0;
};
void init_array(int* pArray, const int ArraySize) {
int LoopIndex;
for (LoopIndex = 0; LoopIndex < ArraySize; LoopIndex++)
{
pArray[LoopIndex] = rand() % 4 + 1;
}
}
void init_mat(int* pMatrix, const int MatrixRows, const int MatrixColums) {
int LoopIndex;
for (LoopIndex = 0; LoopIndex < MatrixRows * MatrixColums; LoopIndex++)
{
pMatrix[LoopIndex] = rand() % 4 + 1;
}
}
void print_array(int* pArray, const int ArraySize, char* ArrayName) {
int LoopIndex;
for (LoopIndex = 0; LoopIndex < ArraySize; LoopIndex++)
printf("\n%s[%d]: %d", ArrayName, LoopIndex, pArray[LoopIndex]);
printf("\n");
}
void print_mat(int* pMatrix, const int MatrixRows, const int MatrixColums, char* MatrixName) {
int LoopIndex;
for (LoopIndex = 0; LoopIndex < MatrixRows*MatrixColums; LoopIndex++) {
if (LoopIndex % MatrixColums == 0)
{
printf("\n%s[%d]:", MatrixName, LoopIndex);
}
printf("\t%d", pMatrix[LoopIndex]);
}
printf("\n");
}
int calculate_error(int* p_a_Matrix, int* p_b_Matrix, const int MatrixRows, const int MatrixColums)
{
int LoopIndex, error_result = 0;
for (LoopIndex = 0; LoopIndex < MatrixRows * MatrixColums; LoopIndex++)
{
if (abs(p_a_Matrix[LoopIndex] - p_b_Matrix[LoopIndex]) > 0)
{
printf("\tIndex %d has error InputA=%d , InputB=%d\n", LoopIndex, p_a_Matrix[LoopIndex], p_b_Matrix[LoopIndex]);
}
error_result += abs(p_a_Matrix[LoopIndex] - p_b_Matrix[LoopIndex]);
}
return error_result;
}
| 16e52b786b4ad05862d01164a9bc695c276a4036.cu | #include <stdio.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <chrono>
#include<iostream>
#include<cstdlib>
#include<cmath>
//#define DEBUG
/* comment following lines for benchmark run*/
#define ENABLE_SINGLE_SHOT_COL_INDEX
#define ENABLE_SINGLE_SHOT_ROW_INDEX
__global__ void kernel(int* a_InputArray, int* b_InputArray, int* AddMatrix, int* ResultMatrix, const int MatrixRows, const int MatrixColums) {
// To ensure uniq Thread Index is used in operations
int ThreadIndex = threadIdx.x + blockIdx.x * blockDim.x;
#if defined(DEBUG)
printf("ThreadIndex: %d\n", ThreadIndex);
#endif
if (ThreadIndex < (MatrixRows * MatrixColums)) {
// Applying the equation D = AxB + C
ResultMatrix[ThreadIndex] = a_InputArray[ThreadIndex % MatrixRows] * b_InputArray[ThreadIndex % MatrixColums] + AddMatrix[ThreadIndex];
}
}
// debuging functions
void init_array(int* pArray, const int MatrixRows);
void init_mat(int* pMatrix, const int MatrixRows, const int MatrixColums);
void print_array(int* pArray, const int MatrixRows, char* MatrixName);
void print_mat(int* pMatrix, const int MatrixRows, const int MatrixColums, char* MatrixName);
int calculate_error(int* p_a_Matrix, int* p_b_Matrix, const int MatrixRows, const int MatrixColums);
void cpu_calculate_array(int* a_InputArray, int* b_InputArray, int* AddMatrix, int* ResultMatrix, const int MatrixRows, const int MatrixColums);
void cpu_calculate_array(int* a_InputArray, int* b_InputArray, int* AddMatrix, int* ResultMatrix, const int MatrixRows, const int MatrixColums) {
for (int LoopIndex = 0; LoopIndex < MatrixRows * MatrixColums; LoopIndex++)
{
// Applying the equation D = AxB + C
ResultMatrix[LoopIndex] = a_InputArray[LoopIndex % MatrixRows] * b_InputArray[LoopIndex % MatrixColums] + AddMatrix[LoopIndex];
#if defined(DEBUG)
printf("[ Indices id:%d, aId:%d, bId:%d ] Inputs a:%d, b:%d, c:%d >> Output d:%d\n", LoopIndex, LoopIndex % MatrixRows, LoopIndex % MatrixColums, a_InputArray[LoopIndex % MatrixRows], b_InputArray[LoopIndex % MatrixColums], AddMatrix[LoopIndex], ResultMatrix[LoopIndex]);
#endif
}
}
int main(void) {
srand(time(NULL));
// Variables Initialization
int* h_a_InputArray, * h_b_InputArray, * h_c_AddMatrix, * h_d_GpuResultMatrix, * d_CpuResultMatrix;
int* dev_a_InputArray, * dev_b_InputArray, * dev_c_AddMatrix, * dev_d_GpuResultMatrix;
cudaEvent_t start, end;
float time = 0.0;
#ifdef ENABLE_SINGLE_SHOT_ROW_INDEX
int RowsNum = 512;
#else
for (int RowsNum = 32; RowsNum <= 4096; RowsNum+=64)
#endif // ENABLE_SINGLE_SHOT_ROW_INDEX
{
#ifdef ENABLE_SINGLE_SHOT_COL_INDEX
int ColNum = 512;
#else
for (int ColNum = 32; ColNum <= 4096; ColNum += 64)
#endif // ENABLE_SINGLE_SHOT_COL_INDEX
{
// Using Parameterized Index for testing multiple values
int MatrixRows = RowsNum;
int MatrixColums = ColNum;
printf("Trying with dimention %dx%d :%d\n", MatrixRows, MatrixColums, MatrixRows * MatrixColums);
// Allocating Memory
h_a_InputArray = (int*)malloc(sizeof(int) * MatrixRows);
h_b_InputArray = (int*)malloc(sizeof(int) * MatrixColums);
h_c_AddMatrix = (int*)malloc(sizeof(int) * MatrixRows * MatrixColums);
h_d_GpuResultMatrix = (int*)malloc(sizeof(int) * MatrixRows * MatrixColums);
d_CpuResultMatrix = (int*)malloc(sizeof(int) * MatrixRows * MatrixColums);
// Input Data Initialization
init_array(h_a_InputArray, MatrixRows);
init_array(h_b_InputArray, MatrixColums);
init_mat(h_c_AddMatrix, MatrixRows, MatrixColums);
#ifdef DEBUG
printf("<<<<<<<<<< initial data:\n");
print_array(h_a_InputArray, MatrixRows, "a-vector");
print_array(h_b_InputArray, MatrixColums, "b-vector");
print_mat(h_c_AddMatrix, MatrixRows, MatrixColums, "c-matrix");
#endif // !DEBUG
// Allocating Memory
cudaMalloc((void**)&dev_a_InputArray, sizeof(int) * MatrixRows);
cudaMalloc((void**)&dev_b_InputArray, sizeof(int) * MatrixColums);
cudaMalloc((void**)&dev_c_AddMatrix, sizeof(int) * MatrixRows * MatrixColums);
cudaMalloc((void**)&dev_d_GpuResultMatrix, sizeof(int) * MatrixRows * MatrixColums);
// Copying input data from Host [Cpu context] to Device/Kernel [Gpu context]
cudaMemcpy(dev_a_InputArray, h_a_InputArray, sizeof(int) * MatrixRows, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b_InputArray, h_b_InputArray, sizeof(int) * MatrixColums, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c_AddMatrix, h_c_AddMatrix, sizeof(int) * MatrixRows * MatrixColums, cudaMemcpyHostToDevice);
// Creating events used for Gpu Execution time measurments
cudaEventCreate(&start);
cudaEventCreate(&end);
#ifdef DEBUG
printf("\n\nRunning Kernel...\n\n");
#endif // DEBUG
// record gpu start time event
cudaEventRecord(start);
// run gpu kernel <<<# of Threads per block, # of blocks per grid>>
kernel << <MatrixRows * MatrixColums / 256 + 1, 256 >> > (dev_a_InputArray, dev_b_InputArray, dev_c_AddMatrix, dev_d_GpuResultMatrix, MatrixRows, MatrixColums);
// record gpu end time event
cudaEventRecord(end);
cudaEventSynchronize(end);
// get execution time for gpu threads
cudaEventElapsedTime(&time, start, end);
// Copying output data from Device/Kernel [Gpu context] to Host [Cpu context]
cudaMemcpy(h_d_GpuResultMatrix, dev_d_GpuResultMatrix, sizeof(int) * MatrixRows * MatrixColums, cudaMemcpyDeviceToHost);
printf("\tGpu Return with Error Code: %s\n", cudaGetErrorString(cudaGetLastError()));
printf("\tGPU Time Elapsed: %f ms\n", time);
// freeing Gpu Memory
cudaFree(dev_a_InputArray);
cudaFree(dev_b_InputArray);
cudaFree(dev_c_AddMatrix);
cudaFree(dev_d_GpuResultMatrix);
// record cpu start time event
auto cpu_start = std::chrono::high_resolution_clock::now();
// calculate the output using Cpu performance
cpu_calculate_array(h_a_InputArray, h_b_InputArray, h_c_AddMatrix, d_CpuResultMatrix, MatrixRows, MatrixColums);
// record cpu end time event
auto cpu_done = std::chrono::high_resolution_clock::now();
printf("\tCPU Time Elapsed: %f ms\n", std::chrono::duration_cast<std::chrono::microseconds>(cpu_done - cpu_start).count() / 1000.0);
#ifdef DEBUG
printf(">>>>>>>>>> Gpu final data:\n");
print_mat(h_d_GpuResultMatrix, MatrixRows, MatrixColums, "d-matrix");
printf(">>>>>>>>>> Cpu final data:\n");
print_mat(d_CpuResultMatrix, MatrixRows, MatrixColums, "cpu-matrix");
#endif // DEBUG
//calculate the gpu and cpu error deviation to ensure that no issue between both results
int error = calculate_error(h_d_GpuResultMatrix, d_CpuResultMatrix, MatrixRows, MatrixColums);
printf("\tDeviation between Cpu and Gpu: %d\n",error);
// freeing Cpu Memory
free(h_a_InputArray);
free(h_b_InputArray);
free(h_c_AddMatrix);
free(h_d_GpuResultMatrix);
free(d_CpuResultMatrix);
}
}
return 0;
};
void init_array(int* pArray, const int ArraySize) {
int LoopIndex;
for (LoopIndex = 0; LoopIndex < ArraySize; LoopIndex++)
{
pArray[LoopIndex] = rand() % 4 + 1;
}
}
void init_mat(int* pMatrix, const int MatrixRows, const int MatrixColums) {
int LoopIndex;
for (LoopIndex = 0; LoopIndex < MatrixRows * MatrixColums; LoopIndex++)
{
pMatrix[LoopIndex] = rand() % 4 + 1;
}
}
void print_array(int* pArray, const int ArraySize, char* ArrayName) {
int LoopIndex;
for (LoopIndex = 0; LoopIndex < ArraySize; LoopIndex++)
printf("\n%s[%d]: %d", ArrayName, LoopIndex, pArray[LoopIndex]);
printf("\n");
}
void print_mat(int* pMatrix, const int MatrixRows, const int MatrixColums, char* MatrixName) {
int LoopIndex;
for (LoopIndex = 0; LoopIndex < MatrixRows*MatrixColums; LoopIndex++) {
if (LoopIndex % MatrixColums == 0)
{
printf("\n%s[%d]:", MatrixName, LoopIndex);
}
printf("\t%d", pMatrix[LoopIndex]);
}
printf("\n");
}
int calculate_error(int* p_a_Matrix, int* p_b_Matrix, const int MatrixRows, const int MatrixColums)
{
int LoopIndex, error_result = 0;
for (LoopIndex = 0; LoopIndex < MatrixRows * MatrixColums; LoopIndex++)
{
if (abs(p_a_Matrix[LoopIndex] - p_b_Matrix[LoopIndex]) > 0)
{
printf("\tIndex %d has error InputA=%d , InputB=%d\n", LoopIndex, p_a_Matrix[LoopIndex], p_b_Matrix[LoopIndex]);
}
error_result += abs(p_a_Matrix[LoopIndex] - p_b_Matrix[LoopIndex]);
}
return error_result;
}
|
c4a9b3f896d18f1495a281d835c8d1a07b46de61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "jacob_hip.cuh"
__device__ void eval_jacob (const double t, const double pres, const double * __restrict__ y_in, double * __restrict__ jac, const mechanism_memory * d_mem) {
extern __shared__ double y_shared[];
double * local_reaction_rates = d_mem->reaction_rates;
double * local_cooling_rates = d_mem->cooling_rates ;
double * rlocal_reaction_rates = d_mem->drrate_dT;
double * rlocal_cooling_rates = d_mem->dcrate_dT;
// scale related piece
//double * y = d_mem->temp_array; // working space for scaling the variable back;
cvklu_data *rate_data = d_mem->chemistry_data;
// these should be retreieved from d_mem object
double T_local = d_mem->temperature[T_ID];
double Tge = d_mem->dTs_ge[T_ID];
double mdensity = d_mem->density[T_ID];
double inv_mdensity = 1.0 / mdensity;
double h2_optical_depth_approx = d_mem->h2_optical_depth_approx[T_ID];
// scaling the input vector back to cgs units
#ifdef SCALE_INPUT
double * __restrict__ scale = d_mem->scale;
double * __restrict__ inv_scale = d_mem->inv_scale;
for (int i = 0; i < 10; i++){
y_shared[S_INDEX(i)] = y_in[INDEX(i)]*scale[INDEX(i)];
// printf( "y_in[%d] = %0.5g; scale[%d] = %0.5g; y_shared[S_INDEX(%d)]\n", i, y_in[INDEX(i)], i, scale[INDEX(i)], i, y_shared[S_INDEX(i)] );
}
#else
for (int i = 0; i < 10; i++){
y_shared[S_INDEX(i)] = y_in[INDEX(i)];
}
#endif
evaluate_temperature ( &T_local, &Tge, y_shared, mdensity, rate_data );
interpolate_reaction_rates ( local_reaction_rates, T_local, rate_data);
interpolate_cooling_rates ( local_cooling_rates , T_local, rate_data);
interpolate_drrate_dT( rlocal_reaction_rates, T_local, rate_data );
interpolate_dcrate_dT( rlocal_cooling_rates, T_local, rate_data );
/*
if (T_ID == 0 ){
printf("FROM JAC[%ld]: at time = %0.5g, t_local = %0.5g, f(h2): %0.5g\n", T_ID,t, T_local,y[INDEX(0)]/ (y[INDEX(0)] + y[INDEX(2)] ));
for (int i = 0; i < 10; i++){
printf( "y_in[%d] = %0.5g; \n", i, y_in[INDEX(i)] );
}
}
*/
/*
for (int i = 0; i < 23; i++){
printf("reaction rate[%d] = %0.5g\n", i, local_reaction_rates[INDEX(i)]);
}
for (int i = 0; i < 23; i++){
printf("drrate_dT [%d] = %0.5g\n", i, rlocal_reaction_rates[INDEX(i)]);
}
printf("\n");
*/
// *d_mem->jac_call += 1;
// printf("jac_call = %d\n", *d_mem->jac_call );
// }
// df_H2_1 / H2_1:
jac[INDEX(0)] = -local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(11)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(12)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(20)]*pow(y_shared[S_INDEX(2)], 2);
// df_H2_2 / H2_1:
jac[INDEX(1)] = local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(3)];
// df_H_1 / H2_1:
jac[INDEX(2)] = local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(3)] + 2*local_reaction_rates[INDEX(11)]*y_shared[S_INDEX(8)] + 2*local_reaction_rates[INDEX(12)]*y_shared[S_INDEX(2)] - 2*local_reaction_rates[INDEX(20)]*pow(y_shared[S_INDEX(2)], 2);
// df_H_2 / H2_1:
jac[INDEX(3)] = -local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(3)];
// df_ge / H2_1:
jac[INDEX(9)] = -y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)]*pow(local_cooling_rates[INDEX(17)], 2)*h2_optical_depth_approx/(pow(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0, 2)*pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)], 2)) - 0.5*y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(21)]*1.0/(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0) - 2.01588*local_cooling_rates[INDEX(25)]*mdensity - local_cooling_rates[INDEX(17)]*h2_optical_depth_approx/(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) + 0.5*local_cooling_rates[INDEX(24)]*local_cooling_rates[INDEX(22)]*pow(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0, -2.0)*(-y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y_shared[S_INDEX(2)], 3)*local_cooling_rates[INDEX(20)])/pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)], 2);
jac[INDEX(9)] *= inv_mdensity;
// df_H2_1 / H2_2:
jac[INDEX(10)] = local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(4)];
// df_H2_2 / H2_2:
jac[INDEX(11)] = -local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(4)];
// df_H_1 / H2_2:
jac[INDEX(12)] = -local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(2)] + 2*local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(4)];
// df_H_2 / H2_2:
jac[INDEX(13)] = local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(2)];
// df_H_m0 / H2_2:
jac[INDEX(14)] = -local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(4)];
// df_de / H2_2:
jac[INDEX(18)] = -local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(8)];
// df_H2_1 / H_1:
jac[INDEX(20)] = local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(4)] + local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)] - local_reaction_rates[INDEX(12)]*y_shared[S_INDEX(0)] + 2*local_reaction_rates[INDEX(20)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)] + 3*local_reaction_rates[INDEX(21)]*pow(y_shared[S_INDEX(2)], 2);
// df_H2_2 / H_1:
jac[INDEX(21)] = local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)];
// df_H_1 / H_1:
jac[INDEX(22)] = -local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)] + 2*local_reaction_rates[INDEX(12)]*y_shared[S_INDEX(0)] + local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(4)] - 4*local_reaction_rates[INDEX(20)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)] - 6*local_reaction_rates[INDEX(21)]*pow(y_shared[S_INDEX(2)], 2);
// df_H_2 / H_1:
jac[INDEX(23)] = local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(3)] + local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)];
// df_H_m0 / H_1:
jac[INDEX(24)] = local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(4)];
// df_de / H_1:
jac[INDEX(28)] = local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(4)] + local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(4)];
// df_ge / H_1:
jac[INDEX(29)] = -y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(12)]*pow(local_cooling_rates[INDEX(17)], 2)*h2_optical_depth_approx/(pow(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0, 2)*pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)], 2)) - local_cooling_rates[INDEX(0)]*y_shared[S_INDEX(8)] - local_cooling_rates[INDEX(4)]*y_shared[S_INDEX(8)] + 0.5*local_cooling_rates[INDEX(23)]*local_cooling_rates[INDEX(22)]*pow(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0, -2.0)*(-y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y_shared[S_INDEX(2)], 3)*local_cooling_rates[INDEX(20)])/pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)], 2) + 0.5*(-y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(21)] + 3*pow(y_shared[S_INDEX(2)], 2)*local_cooling_rates[INDEX(20)])*1.0/(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0);
jac[INDEX(29)] *= inv_mdensity;
// df_H2_1 / H_2:
jac[INDEX(30)] = -local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)];
// df_H2_2 / H_2:
jac[INDEX(31)] = local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)] + local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(4)];
// df_H_1 / H_2:
jac[INDEX(32)] = local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)] + 2*local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(4)];
// df_H_2 / H_2:
jac[INDEX(33)] = -local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)] - local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(4)];
// df_H_m0 / H_2:
jac[INDEX(34)] = -local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(4)];
// df_de / H_2:
jac[INDEX(38)] = -local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(4)];
// df_ge / H_2:
jac[INDEX(39)] = -y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(15)]*pow(local_cooling_rates[INDEX(17)], 2)*h2_optical_depth_approx/(pow(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0, 2)*pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)], 2)) - local_cooling_rates[INDEX(11)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(7)];
jac[INDEX(39)] *= inv_mdensity;
// df_H2_1 / H_m0:
jac[INDEX(40)] = local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)];
// df_H2_2 / H_m0:
jac[INDEX(41)] = local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)];
// df_H_1 / H_m0:
jac[INDEX(42)] = -local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)] + 2*local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)] + local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)];
// df_H_2 / H_m0:
jac[INDEX(43)] = -local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)];
// df_H_m0 / H_m0:
jac[INDEX(44)] = -local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)];
// df_de / H_m0:
jac[INDEX(48)] = local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)];
// df_He_1 / He_1:
jac[INDEX(55)] = -local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(8)];
// df_He_2 / He_1:
jac[INDEX(56)] = local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(8)];
// df_de / He_1:
jac[INDEX(58)] = local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(8)];
// df_ge / He_1:
jac[INDEX(59)] = -y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(14)]*pow(local_cooling_rates[INDEX(17)], 2)*h2_optical_depth_approx/(pow(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0, 2)*pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)], 2)) - local_cooling_rates[INDEX(5)]*y_shared[S_INDEX(8)];
jac[INDEX(59)] *= inv_mdensity;
// df_He_1 / He_2:
jac[INDEX(65)] = local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(8)];
// df_He_2 / He_2:
jac[INDEX(66)] = -local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(8)];
// df_He_3 / He_2:
jac[INDEX(67)] = local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(8)];
// df_de / He_2:
jac[INDEX(68)] = -local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(8)];
// df_ge / He_2:
jac[INDEX(69)] = -local_cooling_rates[INDEX(11)]*y_shared[S_INDEX(8)] - local_cooling_rates[INDEX(2)]*y_shared[S_INDEX(8)] - local_cooling_rates[INDEX(1)]*pow(y_shared[S_INDEX(8)], 2) - local_cooling_rates[INDEX(6)]*y_shared[S_INDEX(8)] - local_cooling_rates[INDEX(3)]*pow(y_shared[S_INDEX(8)], 2) - y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(8)] - y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(9)];
jac[INDEX(69)] *= inv_mdensity;
// df_He_2 / He_3:
jac[INDEX(76)] = local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(8)];
// df_He_3 / He_3:
jac[INDEX(77)] = -local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(8)];
// df_de / He_3:
jac[INDEX(78)] = -local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(8)];
// df_ge / He_3:
jac[INDEX(79)] = -4.0*local_cooling_rates[INDEX(11)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(10)];
jac[INDEX(79)] *= inv_mdensity;
// df_H2_1 / de:
jac[INDEX(80)] = -local_reaction_rates[INDEX(11)]*y_shared[S_INDEX(0)];
// df_H2_2 / de:
jac[INDEX(81)] = -local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)];
// df_H_1 / de:
jac[INDEX(82)] = -local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)] + 2*local_reaction_rates[INDEX(11)]*y_shared[S_INDEX(0)] + local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)] + 2*local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)];
// df_H_2 / de:
jac[INDEX(83)] = local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)];
// df_H_m0 / de:
jac[INDEX(84)] = local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)];
// df_He_1 / de:
jac[INDEX(85)] = -local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)] + local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)];
// df_He_2 / de:
jac[INDEX(86)] = local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)] - local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)] - local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)] + local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)];
// df_He_3 / de:
jac[INDEX(87)] = local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)] - local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)];
// df_de / de:
jac[INDEX(88)] = local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)] + local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)] - local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)] + local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)] - local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)] - local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)];
// df_ge / de:
jac[INDEX(89)] = -y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(16)]*pow(local_cooling_rates[INDEX(17)], 2)*h2_optical_depth_approx/(pow(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0, 2)*pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)], 2)) - y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(0)] - y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(4)] - y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(7)] - y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(5)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(2)] - 2*y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(1)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(6)] - 2*y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(3)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(9)] - y_shared[S_INDEX(7)]*local_cooling_rates[INDEX(10)] - local_cooling_rates[INDEX(11)]*(y_shared[S_INDEX(3)] + y_shared[S_INDEX(6)] + 4.0*y_shared[S_INDEX(7)]) - local_cooling_rates[INDEX(18)]*(T_local - 2.73);
jac[INDEX(89)] *= inv_mdensity;
// df_H2_1 / ge:
jac[INDEX(90)] = rlocal_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + rlocal_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] - rlocal_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] - rlocal_reaction_rates[INDEX(11)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(12)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)] + rlocal_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)] + rlocal_reaction_rates[INDEX(20)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)] + rlocal_reaction_rates[INDEX(21)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)];
jac[INDEX(90)] *= Tge;
// df_H2_2 / ge:
jac[INDEX(91)] = rlocal_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(3)] - rlocal_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] + rlocal_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] + rlocal_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)];
jac[INDEX(91)] *= Tge;
// df_H_1 / ge:
jac[INDEX(92)] = -rlocal_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(3)] - rlocal_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] + rlocal_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] + 2*rlocal_reaction_rates[INDEX(11)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(8)] + 2*rlocal_reaction_rates[INDEX(12)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)] + rlocal_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + 2*rlocal_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] + 2*rlocal_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)] - 2*rlocal_reaction_rates[INDEX(20)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)] - 2*rlocal_reaction_rates[INDEX(21)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)];
jac[INDEX(92)] *= Tge;
// df_H_2 / ge:
jac[INDEX(93)] = rlocal_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(3)] + rlocal_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] - rlocal_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] - rlocal_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)];
jac[INDEX(93)] *= Tge;
// df_H_m0 / ge:
jac[INDEX(94)] = rlocal_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)];
jac[INDEX(94)] *= Tge;
// df_He_1 / ge:
jac[INDEX(95)] = -rlocal_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)];
jac[INDEX(95)] *= Tge;
// df_He_2 / ge:
jac[INDEX(96)] = rlocal_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)]*y_shared[S_INDEX(8)];
jac[INDEX(96)] *= Tge;
// df_He_3 / ge:
jac[INDEX(97)] = rlocal_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)]*y_shared[S_INDEX(8)];
jac[INDEX(97)] *= Tge;
// df_de / ge:
jac[INDEX(98)] = rlocal_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + rlocal_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + rlocal_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(8)];
jac[INDEX(98)] *= Tge;
jac[INDEX(99)] = -y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(17)]*h2_optical_depth_approx*(-local_cooling_rates[INDEX(17)]*(-y_shared[S_INDEX(0)]*rlocal_cooling_rates[INDEX(13)] - y_shared[S_INDEX(2)]*rlocal_cooling_rates[INDEX(12)] - y_shared[S_INDEX(3)]*rlocal_cooling_rates[INDEX(15)] - y_shared[S_INDEX(5)]*rlocal_cooling_rates[INDEX(14)] - y_shared[S_INDEX(8)]*rlocal_cooling_rates[INDEX(16)])/pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)], 2) - rlocal_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]* local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]))/pow(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]* local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0, 2) - y_shared[S_INDEX(0)]*h2_optical_depth_approx*rlocal_cooling_rates[INDEX(17)]/(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]* local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) + 0.5*pow(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0, -2.0)*(-y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y_shared[S_INDEX(2)], 3)* local_cooling_rates[INDEX(20)])*(-1.0*local_cooling_rates[INDEX(22)]*(-y_shared[S_INDEX(0)]*rlocal_cooling_rates[INDEX(24)] - y_shared[S_INDEX(2)]*rlocal_cooling_rates[INDEX(23)])/pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)], 2) - 1.0*rlocal_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]* local_cooling_rates[INDEX(23)])) + 0.5*1.0/(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0)*(-y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*rlocal_cooling_rates[INDEX(21)] + pow(y_shared[S_INDEX(2)], 3)*rlocal_cooling_rates[INDEX(20)]);
jac[INDEX(99)] *= inv_mdensity;
jac[INDEX(99)] *= Tge;
#ifdef SCALE_INPUT
jac[INDEX(0)] *= inv_scale[INDEX(0)] * scale[INDEX(0)];
jac[INDEX(1)] *= inv_scale[INDEX(1)] * scale[INDEX(0)];
jac[INDEX(2)] *= inv_scale[INDEX(2)] * scale[INDEX(0)];
jac[INDEX(3)] *= inv_scale[INDEX(3)] * scale[INDEX(0)];
jac[INDEX(9)] *= inv_scale[INDEX(9)] * scale[INDEX(0)];
jac[INDEX(10)] *= inv_scale[INDEX(0)] * scale[INDEX(1)];
jac[INDEX(11)] *= inv_scale[INDEX(1)] * scale[INDEX(1)];
jac[INDEX(12)] *= inv_scale[INDEX(2)] * scale[INDEX(1)];
jac[INDEX(13)] *= inv_scale[INDEX(3)] * scale[INDEX(1)];
jac[INDEX(14)] *= inv_scale[INDEX(4)] * scale[INDEX(1)];
jac[INDEX(18)] *= inv_scale[INDEX(8)] * scale[INDEX(1)];
jac[INDEX(20)] *= inv_scale[INDEX(0)] * scale[INDEX(2)];
jac[INDEX(21)] *= inv_scale[INDEX(1)] * scale[INDEX(2)];
jac[INDEX(22)] *= inv_scale[INDEX(2)] * scale[INDEX(2)];
jac[INDEX(23)] *= inv_scale[INDEX(3)] * scale[INDEX(2)];
jac[INDEX(24)] *= inv_scale[INDEX(4)] * scale[INDEX(2)];
jac[INDEX(28)] *= inv_scale[INDEX(8)] * scale[INDEX(2)];
jac[INDEX(29)] *= inv_scale[INDEX(9)] * scale[INDEX(2)];
jac[INDEX(30)] *= inv_scale[INDEX(0)] * scale[INDEX(3)];
jac[INDEX(31)] *= inv_scale[INDEX(1)] * scale[INDEX(3)];
jac[INDEX(32)] *= inv_scale[INDEX(2)] * scale[INDEX(3)];
jac[INDEX(33)] *= inv_scale[INDEX(3)] * scale[INDEX(3)];
jac[INDEX(34)] *= inv_scale[INDEX(4)] * scale[INDEX(3)];
jac[INDEX(38)] *= inv_scale[INDEX(8)] * scale[INDEX(3)];
jac[INDEX(39)] *= inv_scale[INDEX(9)] * scale[INDEX(3)];
jac[INDEX(40)] *= inv_scale[INDEX(0)] * scale[INDEX(4)];
jac[INDEX(41)] *= inv_scale[INDEX(1)] * scale[INDEX(4)];
jac[INDEX(42)] *= inv_scale[INDEX(2)] * scale[INDEX(4)];
jac[INDEX(43)] *= inv_scale[INDEX(3)] * scale[INDEX(4)];
jac[INDEX(44)] *= inv_scale[INDEX(4)] * scale[INDEX(4)];
jac[INDEX(48)] *= inv_scale[INDEX(8)] * scale[INDEX(4)];
jac[INDEX(55)] *= inv_scale[INDEX(5)] * scale[INDEX(5)];
jac[INDEX(56)] *= inv_scale[INDEX(6)] * scale[INDEX(5)];
jac[INDEX(58)] *= inv_scale[INDEX(8)] * scale[INDEX(5)];
jac[INDEX(59)] *= inv_scale[INDEX(9)] * scale[INDEX(5)];
jac[INDEX(65)] *= inv_scale[INDEX(5)] * scale[INDEX(6)];
jac[INDEX(66)] *= inv_scale[INDEX(6)] * scale[INDEX(6)];
jac[INDEX(67)] *= inv_scale[INDEX(7)] * scale[INDEX(6)];
jac[INDEX(68)] *= inv_scale[INDEX(8)] * scale[INDEX(6)];
jac[INDEX(69)] *= inv_scale[INDEX(9)] * scale[INDEX(6)];
jac[INDEX(76)] *= inv_scale[INDEX(6)] * scale[INDEX(7)];
jac[INDEX(77)] *= inv_scale[INDEX(7)] * scale[INDEX(7)];
jac[INDEX(78)] *= inv_scale[INDEX(8)] * scale[INDEX(7)];
jac[INDEX(79)] *= inv_scale[INDEX(9)] * scale[INDEX(7)];
jac[INDEX(80)] *= inv_scale[INDEX(0)] * scale[INDEX(8)];
jac[INDEX(81)] *= inv_scale[INDEX(1)] * scale[INDEX(8)];
jac[INDEX(82)] *= inv_scale[INDEX(2)] * scale[INDEX(8)];
jac[INDEX(83)] *= inv_scale[INDEX(3)] * scale[INDEX(8)];
jac[INDEX(84)] *= inv_scale[INDEX(4)] * scale[INDEX(8)];
jac[INDEX(85)] *= inv_scale[INDEX(5)] * scale[INDEX(8)];
jac[INDEX(86)] *= inv_scale[INDEX(6)] * scale[INDEX(8)];
jac[INDEX(87)] *= inv_scale[INDEX(7)] * scale[INDEX(8)];
jac[INDEX(88)] *= inv_scale[INDEX(8)] * scale[INDEX(8)];
jac[INDEX(89)] *= inv_scale[INDEX(9)] * scale[INDEX(8)];
jac[INDEX(90)] *= inv_scale[INDEX(0)] * scale[INDEX(9)];
jac[INDEX(91)] *= inv_scale[INDEX(1)] * scale[INDEX(9)];
jac[INDEX(92)] *= inv_scale[INDEX(2)] * scale[INDEX(9)];
jac[INDEX(93)] *= inv_scale[INDEX(3)] * scale[INDEX(9)];
jac[INDEX(94)] *= inv_scale[INDEX(4)] * scale[INDEX(9)];
jac[INDEX(95)] *= inv_scale[INDEX(5)] * scale[INDEX(9)];
jac[INDEX(96)] *= inv_scale[INDEX(6)] * scale[INDEX(9)];
jac[INDEX(97)] *= inv_scale[INDEX(7)] * scale[INDEX(9)];
jac[INDEX(98)] *= inv_scale[INDEX(8)] * scale[INDEX(9)];
jac[INDEX(99)] *= inv_scale[INDEX(9)] * scale[INDEX(9)];
#endif
/*
if (T_ID == 0){
// printf("at time = %0.5g, temp = %0.5g\n",t, *T_local);
for (int i = 0; i<10; i++){
printf("y[INDEX(%d)] = %0.5g\n", i, y[INDEX(i)]);
}
}
*/
/*
if (T_ID == 0){
printf("density: %0.5g\n", mdensity);
printf("T_local: %0.5g\n", T_local);
printf("Tge : %0.5g\n", Tge);
for (int i = 0; i<100; i++){
printf("jac[INDEX(%d)] = %0.5g\n", i, jac[INDEX(i)]);
}
}
*/
} // end eval_jacob
| c4a9b3f896d18f1495a281d835c8d1a07b46de61.cu | #include "jacob.cuh"
__device__ void eval_jacob (const double t, const double pres, const double * __restrict__ y_in, double * __restrict__ jac, const mechanism_memory * d_mem) {
extern __shared__ double y_shared[];
double * local_reaction_rates = d_mem->reaction_rates;
double * local_cooling_rates = d_mem->cooling_rates ;
double * rlocal_reaction_rates = d_mem->drrate_dT;
double * rlocal_cooling_rates = d_mem->dcrate_dT;
// scale related piece
//double * y = d_mem->temp_array; // working space for scaling the variable back;
cvklu_data *rate_data = d_mem->chemistry_data;
// these should be retreieved from d_mem object
double T_local = d_mem->temperature[T_ID];
double Tge = d_mem->dTs_ge[T_ID];
double mdensity = d_mem->density[T_ID];
double inv_mdensity = 1.0 / mdensity;
double h2_optical_depth_approx = d_mem->h2_optical_depth_approx[T_ID];
// scaling the input vector back to cgs units
#ifdef SCALE_INPUT
double * __restrict__ scale = d_mem->scale;
double * __restrict__ inv_scale = d_mem->inv_scale;
for (int i = 0; i < 10; i++){
y_shared[S_INDEX(i)] = y_in[INDEX(i)]*scale[INDEX(i)];
// printf( "y_in[%d] = %0.5g; scale[%d] = %0.5g; y_shared[S_INDEX(%d)]\n", i, y_in[INDEX(i)], i, scale[INDEX(i)], i, y_shared[S_INDEX(i)] );
}
#else
for (int i = 0; i < 10; i++){
y_shared[S_INDEX(i)] = y_in[INDEX(i)];
}
#endif
evaluate_temperature ( &T_local, &Tge, y_shared, mdensity, rate_data );
interpolate_reaction_rates ( local_reaction_rates, T_local, rate_data);
interpolate_cooling_rates ( local_cooling_rates , T_local, rate_data);
interpolate_drrate_dT( rlocal_reaction_rates, T_local, rate_data );
interpolate_dcrate_dT( rlocal_cooling_rates, T_local, rate_data );
/*
if (T_ID == 0 ){
printf("FROM JAC[%ld]: at time = %0.5g, t_local = %0.5g, f(h2): %0.5g\n", T_ID,t, T_local,y[INDEX(0)]/ (y[INDEX(0)] + y[INDEX(2)] ));
for (int i = 0; i < 10; i++){
printf( "y_in[%d] = %0.5g; \n", i, y_in[INDEX(i)] );
}
}
*/
/*
for (int i = 0; i < 23; i++){
printf("reaction rate[%d] = %0.5g\n", i, local_reaction_rates[INDEX(i)]);
}
for (int i = 0; i < 23; i++){
printf("drrate_dT [%d] = %0.5g\n", i, rlocal_reaction_rates[INDEX(i)]);
}
printf("\n");
*/
// *d_mem->jac_call += 1;
// printf("jac_call = %d\n", *d_mem->jac_call );
// }
// df_H2_1 / H2_1:
jac[INDEX(0)] = -local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(11)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(12)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(20)]*pow(y_shared[S_INDEX(2)], 2);
// df_H2_2 / H2_1:
jac[INDEX(1)] = local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(3)];
// df_H_1 / H2_1:
jac[INDEX(2)] = local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(3)] + 2*local_reaction_rates[INDEX(11)]*y_shared[S_INDEX(8)] + 2*local_reaction_rates[INDEX(12)]*y_shared[S_INDEX(2)] - 2*local_reaction_rates[INDEX(20)]*pow(y_shared[S_INDEX(2)], 2);
// df_H_2 / H2_1:
jac[INDEX(3)] = -local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(3)];
// df_ge / H2_1:
jac[INDEX(9)] = -y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)]*pow(local_cooling_rates[INDEX(17)], 2)*h2_optical_depth_approx/(pow(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0, 2)*pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)], 2)) - 0.5*y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(21)]*1.0/(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0) - 2.01588*local_cooling_rates[INDEX(25)]*mdensity - local_cooling_rates[INDEX(17)]*h2_optical_depth_approx/(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) + 0.5*local_cooling_rates[INDEX(24)]*local_cooling_rates[INDEX(22)]*pow(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0, -2.0)*(-y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y_shared[S_INDEX(2)], 3)*local_cooling_rates[INDEX(20)])/pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)], 2);
jac[INDEX(9)] *= inv_mdensity;
// df_H2_1 / H2_2:
jac[INDEX(10)] = local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(4)];
// df_H2_2 / H2_2:
jac[INDEX(11)] = -local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(4)];
// df_H_1 / H2_2:
jac[INDEX(12)] = -local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(2)] + 2*local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(4)];
// df_H_2 / H2_2:
jac[INDEX(13)] = local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(2)];
// df_H_m0 / H2_2:
jac[INDEX(14)] = -local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(4)];
// df_de / H2_2:
jac[INDEX(18)] = -local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(8)];
// df_H2_1 / H_1:
jac[INDEX(20)] = local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(4)] + local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)] - local_reaction_rates[INDEX(12)]*y_shared[S_INDEX(0)] + 2*local_reaction_rates[INDEX(20)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)] + 3*local_reaction_rates[INDEX(21)]*pow(y_shared[S_INDEX(2)], 2);
// df_H2_2 / H_1:
jac[INDEX(21)] = local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)];
// df_H_1 / H_1:
jac[INDEX(22)] = -local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)] + 2*local_reaction_rates[INDEX(12)]*y_shared[S_INDEX(0)] + local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(4)] - 4*local_reaction_rates[INDEX(20)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)] - 6*local_reaction_rates[INDEX(21)]*pow(y_shared[S_INDEX(2)], 2);
// df_H_2 / H_1:
jac[INDEX(23)] = local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(3)] + local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)];
// df_H_m0 / H_1:
jac[INDEX(24)] = local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(4)];
// df_de / H_1:
jac[INDEX(28)] = local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(4)] + local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(4)];
// df_ge / H_1:
jac[INDEX(29)] = -y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(12)]*pow(local_cooling_rates[INDEX(17)], 2)*h2_optical_depth_approx/(pow(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0, 2)*pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)], 2)) - local_cooling_rates[INDEX(0)]*y_shared[S_INDEX(8)] - local_cooling_rates[INDEX(4)]*y_shared[S_INDEX(8)] + 0.5*local_cooling_rates[INDEX(23)]*local_cooling_rates[INDEX(22)]*pow(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0, -2.0)*(-y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y_shared[S_INDEX(2)], 3)*local_cooling_rates[INDEX(20)])/pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)], 2) + 0.5*(-y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(21)] + 3*pow(y_shared[S_INDEX(2)], 2)*local_cooling_rates[INDEX(20)])*1.0/(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0);
jac[INDEX(29)] *= inv_mdensity;
// df_H2_1 / H_2:
jac[INDEX(30)] = -local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)];
// df_H2_2 / H_2:
jac[INDEX(31)] = local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)] + local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(4)];
// df_H_1 / H_2:
jac[INDEX(32)] = local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)] + 2*local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(4)];
// df_H_2 / H_2:
jac[INDEX(33)] = -local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)] - local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(4)];
// df_H_m0 / H_2:
jac[INDEX(34)] = -local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(4)];
// df_de / H_2:
jac[INDEX(38)] = -local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(4)];
// df_ge / H_2:
jac[INDEX(39)] = -y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(15)]*pow(local_cooling_rates[INDEX(17)], 2)*h2_optical_depth_approx/(pow(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0, 2)*pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)], 2)) - local_cooling_rates[INDEX(11)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(7)];
jac[INDEX(39)] *= inv_mdensity;
// df_H2_1 / H_m0:
jac[INDEX(40)] = local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)];
// df_H2_2 / H_m0:
jac[INDEX(41)] = local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)];
// df_H_1 / H_m0:
jac[INDEX(42)] = -local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)] + 2*local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)] + local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)];
// df_H_2 / H_m0:
jac[INDEX(43)] = -local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)];
// df_H_m0 / H_m0:
jac[INDEX(44)] = -local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)];
// df_de / H_m0:
jac[INDEX(48)] = local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)];
// df_He_1 / He_1:
jac[INDEX(55)] = -local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(8)];
// df_He_2 / He_1:
jac[INDEX(56)] = local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(8)];
// df_de / He_1:
jac[INDEX(58)] = local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(8)];
// df_ge / He_1:
jac[INDEX(59)] = -y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(14)]*pow(local_cooling_rates[INDEX(17)], 2)*h2_optical_depth_approx/(pow(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0, 2)*pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)], 2)) - local_cooling_rates[INDEX(5)]*y_shared[S_INDEX(8)];
jac[INDEX(59)] *= inv_mdensity;
// df_He_1 / He_2:
jac[INDEX(65)] = local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(8)];
// df_He_2 / He_2:
jac[INDEX(66)] = -local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(8)];
// df_He_3 / He_2:
jac[INDEX(67)] = local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(8)];
// df_de / He_2:
jac[INDEX(68)] = -local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(8)];
// df_ge / He_2:
jac[INDEX(69)] = -local_cooling_rates[INDEX(11)]*y_shared[S_INDEX(8)] - local_cooling_rates[INDEX(2)]*y_shared[S_INDEX(8)] - local_cooling_rates[INDEX(1)]*pow(y_shared[S_INDEX(8)], 2) - local_cooling_rates[INDEX(6)]*y_shared[S_INDEX(8)] - local_cooling_rates[INDEX(3)]*pow(y_shared[S_INDEX(8)], 2) - y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(8)] - y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(9)];
jac[INDEX(69)] *= inv_mdensity;
// df_He_2 / He_3:
jac[INDEX(76)] = local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(8)];
// df_He_3 / He_3:
jac[INDEX(77)] = -local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(8)];
// df_de / He_3:
jac[INDEX(78)] = -local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(8)];
// df_ge / He_3:
jac[INDEX(79)] = -4.0*local_cooling_rates[INDEX(11)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(10)];
jac[INDEX(79)] *= inv_mdensity;
// df_H2_1 / de:
jac[INDEX(80)] = -local_reaction_rates[INDEX(11)]*y_shared[S_INDEX(0)];
// df_H2_2 / de:
jac[INDEX(81)] = -local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)];
// df_H_1 / de:
jac[INDEX(82)] = -local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)] + 2*local_reaction_rates[INDEX(11)]*y_shared[S_INDEX(0)] + local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)] + 2*local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)];
// df_H_2 / de:
jac[INDEX(83)] = local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)];
// df_H_m0 / de:
jac[INDEX(84)] = local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)];
// df_He_1 / de:
jac[INDEX(85)] = -local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)] + local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)];
// df_He_2 / de:
jac[INDEX(86)] = local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)] - local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)] - local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)] + local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)];
// df_He_3 / de:
jac[INDEX(87)] = local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)] - local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)];
// df_de / de:
jac[INDEX(88)] = local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)] + local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)] - local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)] + local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)] - local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)] - local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)];
// df_ge / de:
jac[INDEX(89)] = -y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(16)]*pow(local_cooling_rates[INDEX(17)], 2)*h2_optical_depth_approx/(pow(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0, 2)*pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)], 2)) - y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(0)] - y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(4)] - y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(7)] - y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(5)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(2)] - 2*y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(1)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(6)] - 2*y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(3)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(9)] - y_shared[S_INDEX(7)]*local_cooling_rates[INDEX(10)] - local_cooling_rates[INDEX(11)]*(y_shared[S_INDEX(3)] + y_shared[S_INDEX(6)] + 4.0*y_shared[S_INDEX(7)]) - local_cooling_rates[INDEX(18)]*(T_local - 2.73);
jac[INDEX(89)] *= inv_mdensity;
// df_H2_1 / ge:
jac[INDEX(90)] = rlocal_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + rlocal_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] - rlocal_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] - rlocal_reaction_rates[INDEX(11)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(12)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)] + rlocal_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)] + rlocal_reaction_rates[INDEX(20)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)] + rlocal_reaction_rates[INDEX(21)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)];
jac[INDEX(90)] *= Tge;
// df_H2_2 / ge:
jac[INDEX(91)] = rlocal_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(3)] - rlocal_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] + rlocal_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] + rlocal_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)];
jac[INDEX(91)] *= Tge;
// df_H_1 / ge:
jac[INDEX(92)] = -rlocal_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(3)] - rlocal_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] + rlocal_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] + 2*rlocal_reaction_rates[INDEX(11)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(8)] + 2*rlocal_reaction_rates[INDEX(12)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)] + rlocal_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + 2*rlocal_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] + 2*rlocal_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)] - 2*rlocal_reaction_rates[INDEX(20)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)] - 2*rlocal_reaction_rates[INDEX(21)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)];
jac[INDEX(92)] *= Tge;
// df_H_2 / ge:
jac[INDEX(93)] = rlocal_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(3)] + rlocal_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] - rlocal_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] - rlocal_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)];
jac[INDEX(93)] *= Tge;
// df_H_m0 / ge:
jac[INDEX(94)] = rlocal_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)];
jac[INDEX(94)] *= Tge;
// df_He_1 / ge:
jac[INDEX(95)] = -rlocal_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)];
jac[INDEX(95)] *= Tge;
// df_He_2 / ge:
jac[INDEX(96)] = rlocal_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)]*y_shared[S_INDEX(8)];
jac[INDEX(96)] *= Tge;
// df_He_3 / ge:
jac[INDEX(97)] = rlocal_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)]*y_shared[S_INDEX(8)];
jac[INDEX(97)] *= Tge;
// df_de / ge:
jac[INDEX(98)] = rlocal_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)]*y_shared[S_INDEX(8)] - rlocal_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + rlocal_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)]*y_shared[S_INDEX(8)] + rlocal_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + rlocal_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - rlocal_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(8)];
jac[INDEX(98)] *= Tge;
jac[INDEX(99)] = -y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(17)]*h2_optical_depth_approx*(-local_cooling_rates[INDEX(17)]*(-y_shared[S_INDEX(0)]*rlocal_cooling_rates[INDEX(13)] - y_shared[S_INDEX(2)]*rlocal_cooling_rates[INDEX(12)] - y_shared[S_INDEX(3)]*rlocal_cooling_rates[INDEX(15)] - y_shared[S_INDEX(5)]*rlocal_cooling_rates[INDEX(14)] - y_shared[S_INDEX(8)]*rlocal_cooling_rates[INDEX(16)])/pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)], 2) - rlocal_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]* local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]))/pow(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]* local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0, 2) - y_shared[S_INDEX(0)]*h2_optical_depth_approx*rlocal_cooling_rates[INDEX(17)]/(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]* local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) + 0.5*pow(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0, -2.0)*(-y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y_shared[S_INDEX(2)], 3)* local_cooling_rates[INDEX(20)])*(-1.0*local_cooling_rates[INDEX(22)]*(-y_shared[S_INDEX(0)]*rlocal_cooling_rates[INDEX(24)] - y_shared[S_INDEX(2)]*rlocal_cooling_rates[INDEX(23)])/pow(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)], 2) - 1.0*rlocal_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]* local_cooling_rates[INDEX(23)])) + 0.5*1.0/(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0)*(-y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*rlocal_cooling_rates[INDEX(21)] + pow(y_shared[S_INDEX(2)], 3)*rlocal_cooling_rates[INDEX(20)]);
jac[INDEX(99)] *= inv_mdensity;
jac[INDEX(99)] *= Tge;
#ifdef SCALE_INPUT
jac[INDEX(0)] *= inv_scale[INDEX(0)] * scale[INDEX(0)];
jac[INDEX(1)] *= inv_scale[INDEX(1)] * scale[INDEX(0)];
jac[INDEX(2)] *= inv_scale[INDEX(2)] * scale[INDEX(0)];
jac[INDEX(3)] *= inv_scale[INDEX(3)] * scale[INDEX(0)];
jac[INDEX(9)] *= inv_scale[INDEX(9)] * scale[INDEX(0)];
jac[INDEX(10)] *= inv_scale[INDEX(0)] * scale[INDEX(1)];
jac[INDEX(11)] *= inv_scale[INDEX(1)] * scale[INDEX(1)];
jac[INDEX(12)] *= inv_scale[INDEX(2)] * scale[INDEX(1)];
jac[INDEX(13)] *= inv_scale[INDEX(3)] * scale[INDEX(1)];
jac[INDEX(14)] *= inv_scale[INDEX(4)] * scale[INDEX(1)];
jac[INDEX(18)] *= inv_scale[INDEX(8)] * scale[INDEX(1)];
jac[INDEX(20)] *= inv_scale[INDEX(0)] * scale[INDEX(2)];
jac[INDEX(21)] *= inv_scale[INDEX(1)] * scale[INDEX(2)];
jac[INDEX(22)] *= inv_scale[INDEX(2)] * scale[INDEX(2)];
jac[INDEX(23)] *= inv_scale[INDEX(3)] * scale[INDEX(2)];
jac[INDEX(24)] *= inv_scale[INDEX(4)] * scale[INDEX(2)];
jac[INDEX(28)] *= inv_scale[INDEX(8)] * scale[INDEX(2)];
jac[INDEX(29)] *= inv_scale[INDEX(9)] * scale[INDEX(2)];
jac[INDEX(30)] *= inv_scale[INDEX(0)] * scale[INDEX(3)];
jac[INDEX(31)] *= inv_scale[INDEX(1)] * scale[INDEX(3)];
jac[INDEX(32)] *= inv_scale[INDEX(2)] * scale[INDEX(3)];
jac[INDEX(33)] *= inv_scale[INDEX(3)] * scale[INDEX(3)];
jac[INDEX(34)] *= inv_scale[INDEX(4)] * scale[INDEX(3)];
jac[INDEX(38)] *= inv_scale[INDEX(8)] * scale[INDEX(3)];
jac[INDEX(39)] *= inv_scale[INDEX(9)] * scale[INDEX(3)];
jac[INDEX(40)] *= inv_scale[INDEX(0)] * scale[INDEX(4)];
jac[INDEX(41)] *= inv_scale[INDEX(1)] * scale[INDEX(4)];
jac[INDEX(42)] *= inv_scale[INDEX(2)] * scale[INDEX(4)];
jac[INDEX(43)] *= inv_scale[INDEX(3)] * scale[INDEX(4)];
jac[INDEX(44)] *= inv_scale[INDEX(4)] * scale[INDEX(4)];
jac[INDEX(48)] *= inv_scale[INDEX(8)] * scale[INDEX(4)];
jac[INDEX(55)] *= inv_scale[INDEX(5)] * scale[INDEX(5)];
jac[INDEX(56)] *= inv_scale[INDEX(6)] * scale[INDEX(5)];
jac[INDEX(58)] *= inv_scale[INDEX(8)] * scale[INDEX(5)];
jac[INDEX(59)] *= inv_scale[INDEX(9)] * scale[INDEX(5)];
jac[INDEX(65)] *= inv_scale[INDEX(5)] * scale[INDEX(6)];
jac[INDEX(66)] *= inv_scale[INDEX(6)] * scale[INDEX(6)];
jac[INDEX(67)] *= inv_scale[INDEX(7)] * scale[INDEX(6)];
jac[INDEX(68)] *= inv_scale[INDEX(8)] * scale[INDEX(6)];
jac[INDEX(69)] *= inv_scale[INDEX(9)] * scale[INDEX(6)];
jac[INDEX(76)] *= inv_scale[INDEX(6)] * scale[INDEX(7)];
jac[INDEX(77)] *= inv_scale[INDEX(7)] * scale[INDEX(7)];
jac[INDEX(78)] *= inv_scale[INDEX(8)] * scale[INDEX(7)];
jac[INDEX(79)] *= inv_scale[INDEX(9)] * scale[INDEX(7)];
jac[INDEX(80)] *= inv_scale[INDEX(0)] * scale[INDEX(8)];
jac[INDEX(81)] *= inv_scale[INDEX(1)] * scale[INDEX(8)];
jac[INDEX(82)] *= inv_scale[INDEX(2)] * scale[INDEX(8)];
jac[INDEX(83)] *= inv_scale[INDEX(3)] * scale[INDEX(8)];
jac[INDEX(84)] *= inv_scale[INDEX(4)] * scale[INDEX(8)];
jac[INDEX(85)] *= inv_scale[INDEX(5)] * scale[INDEX(8)];
jac[INDEX(86)] *= inv_scale[INDEX(6)] * scale[INDEX(8)];
jac[INDEX(87)] *= inv_scale[INDEX(7)] * scale[INDEX(8)];
jac[INDEX(88)] *= inv_scale[INDEX(8)] * scale[INDEX(8)];
jac[INDEX(89)] *= inv_scale[INDEX(9)] * scale[INDEX(8)];
jac[INDEX(90)] *= inv_scale[INDEX(0)] * scale[INDEX(9)];
jac[INDEX(91)] *= inv_scale[INDEX(1)] * scale[INDEX(9)];
jac[INDEX(92)] *= inv_scale[INDEX(2)] * scale[INDEX(9)];
jac[INDEX(93)] *= inv_scale[INDEX(3)] * scale[INDEX(9)];
jac[INDEX(94)] *= inv_scale[INDEX(4)] * scale[INDEX(9)];
jac[INDEX(95)] *= inv_scale[INDEX(5)] * scale[INDEX(9)];
jac[INDEX(96)] *= inv_scale[INDEX(6)] * scale[INDEX(9)];
jac[INDEX(97)] *= inv_scale[INDEX(7)] * scale[INDEX(9)];
jac[INDEX(98)] *= inv_scale[INDEX(8)] * scale[INDEX(9)];
jac[INDEX(99)] *= inv_scale[INDEX(9)] * scale[INDEX(9)];
#endif
/*
if (T_ID == 0){
// printf("at time = %0.5g, temp = %0.5g\n",t, *T_local);
for (int i = 0; i<10; i++){
printf("y[INDEX(%d)] = %0.5g\n", i, y[INDEX(i)]);
}
}
*/
/*
if (T_ID == 0){
printf("density: %0.5g\n", mdensity);
printf("T_local: %0.5g\n", T_local);
printf("Tge : %0.5g\n", Tge);
for (int i = 0; i<100; i++){
printf("jac[INDEX(%d)] = %0.5g\n", i, jac[INDEX(i)]);
}
}
*/
} // end eval_jacob
|
5f9c31f5d314f4e01734edb26e2320ed1fe7fdbb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "scc_kernels.h"
__global__ void selectPivots(const uint32_t *range, uint8_t *tags, const uint32_t num_rows, const uint32_t *pivot_field, const int max_pivot_count){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]))
return;
if( pivot_field[ range[row] % max_pivot_count] == row ) {
myTag = 0;
setForwardVisitedBit(&myTag);
setBackwardVisitedBit(&myTag);
setPivot(&myTag);
tags[row] = myTag;
}
}
__global__ void pollForPivots(const uint32_t *range, const uint8_t *tags, const uint32_t num_rows, uint32_t* pivot_field, const int max_pivot_count, const uint32_t *Fr, const uint32_t *Br){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
uint32_t index = range[row];
uint32_t oldRow = pivot_field[index % max_pivot_count];
uint32_t oldDegree = (Fr[oldRow+1] - Fr[oldRow]) * (Br[oldRow+1] - Br[oldRow]);
uint32_t newDegree = (Fr[row+1] - Fr[row]) * (Br[row+1] - Br[row]);
if(newDegree > oldDegree)
pivot_field[ index % max_pivot_count ] = row;
}
__global__ void update(uint32_t *range, uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]))
return;
if ( isForwardVisited(myTag) && isBackwardVisited(myTag)){
rangeSet(&tags[row]);
}
else{
*terminate = false;
uint32_t index = 3 * range[row] + (uint32_t)isForwardVisited(myTag) + ((uint32_t)isBackwardVisited(myTag) << 1);
range[row] = index;
tags[row] = 0;
}
}
__global__ void trim1(const uint32_t *range, uint8_t *tags, const uint32_t *Fc, const uint32_t *Fr, const uint32_t *Bc, const uint32_t *Br, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
extern __shared__ unsigned shm[];
unsigned * sh_offset = shm;
unsigned * sh_edges = &shm[nodeCount];
for(unsigned id = threadIdx.x; id < medges; id += blockIdx.x) {
sh_offset[id] = d_offset[threadIdx.x];
for(unsigned e = d_offset[id]; e < d_offset[id+1]; ++e)
sh_edges[ii] = d_edges[e];
}
for(unsigned k=0; k < threshold; ++k){
if (row > num_rows || isRangeSet(myTag = tags[row]))
return;
uint32_t myRange = range[row];
uint32_t cnt = Br[row + 1] - Br[row];
const uint32_t *nbrs = &Bc[Br[row]];
bool eliminate = true;
for(uint32_t i = 0; i < cnt; i++){
uint32_t index = nbrs[i];
if ( !isRangeSet(tags[index]) && range[index] == myRange){
eliminate = false;
break;
}
}
if ( !eliminate ) {
eliminate = true;
cnt = Fr[row + 1] - Fr[row];
nbrs = &Fc[Fr[row]];
for(uint32_t i = 0; i < cnt; i++){
uint32_t index = nbrs[i];
if ( !isRangeSet(tags[index]) && range[index] == myRange){
eliminate = false;
break;
}
}
}
if ( eliminate ) {
rangeSet(&myTag);
setTrim1(&myTag);
tags[row] = myTag;
*terminate = false;
}
return;
}
}
__global__ void trim2(const uint32_t *range, uint8_t *tags, const uint32_t *Fc, const uint32_t *Fr, const uint32_t *Bc, const uint32_t *Br, const uint32_t num_rows){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
uint32_t myRange = range[row];
uint32_t cnt = Br[row + 1] - Br[row];
const uint32_t *nbrs = &Bc[Br[row]];
uint32_t inDegree = 0;
uint32_t k = 0; //other neighbour
bool eliminate = false;
for(uint32_t i = 0; i < cnt; i++){
uint32_t index = nbrs[i];
if (!isRangeSet(tags[index]) && range[index] == myRange){
inDegree++;
if(inDegree == 2)
break;
k = index;
}
}
if(inDegree == 1){
cnt = Fr[row + 1] - Fr[row];
nbrs = &Fc[Fr[row]];
for(uint32_t i = 0; i < cnt; i++){
uint32_t index = nbrs[i];
if(index == k){
uint32_t kCnt = Br[k + 1] - Br[k];
const uint32_t *kNbrs = &Bc[Br[k]];
uint32_t kRange = range[k];
inDegree = 0;
for(uint32_t j = 0; j < kCnt; j++){
uint32_t tindex = kNbrs[j];
if(!isRangeSet(tags[tindex]) && range[tindex] == kRange){
inDegree++;
if(inDegree==2)
break;
}
}
if(inDegree == 1)
eliminate = true;
break;
}
}
}
if(!eliminate){
cnt = Fr[row + 1] - Fr[row];
nbrs = &Fc[Fr[row]];
inDegree=0;
k = 0;
for( uint32_t i = 0; i < cnt; i++ ){
uint32_t index = nbrs[i];
if ( !isRangeSet(tags[index]) && range[index] == myRange){
inDegree++;
if(inDegree == 2)
break;
k = index;
}
}
if(inDegree == 1){
cnt = Br[row + 1] - Br[row];
nbrs = &Bc[Br[row]];
for(uint32_t i = 0; i < cnt; i++){
uint32_t index = nbrs[i];
if(index == k){
uint32_t kCnt = Fr[k + 1] - Fr[k];
const uint32_t *kNbrs = &Fc[Fr[k]];
uint32_t kRange = range[k];
inDegree = 0;
for(uint32_t j = 0; j < kCnt; j++){
uint32_t tindex = kNbrs[j];
if(!isRangeSet(tags[tindex]) && range[tindex] == kRange){
inDegree++;
if(inDegree==2)
break;
}
}
if(inDegree == 1)
eliminate = true;
break;
}
}
}
}
if(eliminate){
uint32_t temp = min(row, k);
rangeSet(&tags[row]);
rangeSet(&tags[k]);
setTrim2(&tags[temp]); //Only one of the two will be set as pivot for 2-SCC
}
return;
}
__global__ void fwd(const uint32_t *Fc, const uint32_t *Fr, const uint32_t *range, uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]) || isForwardPropagate(myTag) || !isForwardVisited(myTag))
return;
uint32_t myRange = range[row];
uint32_t cnt = Fr[row + 1] - Fr[row];
const uint32_t *nbrs = &Fc[Fr[row]];
bool end = true;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
uint8_t nbrTag = tags[index];
if(isRangeSet(nbrTag) || isForwardVisited(nbrTag) || range[index] != myRange)
continue;
setForwardVisitedBit(&tags[index]);
end = false;
}
setForwardPropagateBit(&tags[row]);
if (!end)
*terminate = false;
}
__global__ void bwd(const uint32_t *Bc, const uint32_t *Br, const uint32_t *range, uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]) || isBackwardPropagate(myTag) || !isBackwardVisited(myTag))
return;
uint32_t myRange = range[row];
uint32_t cnt = Br[row + 1] - Br[row];
const uint32_t *nbrs = &Bc[Br[row]];
bool end = true;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
uint8_t nbrTag = tags[index];
if(isRangeSet(nbrTag) || isBackwardVisited(nbrTag) || range[index] != myRange )
continue;
setBackwardVisitedBit(&tags[index]);
end = false;
}
setBackwardPropagateBit(&tags[row]);
if (!end)
*terminate = false;
}
__global__ void assignUniqueRange(uint32_t *range, const uint8_t *tags, const uint32_t num_rows){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
range[row] = row;
}
__global__ void propagateRange1(const uint32_t *Fc, const uint32_t *Fr, uint32_t *range, const uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
uint32_t myRange = range[row];
uint32_t cnt = Fr[row + 1] - Fr[row];
const uint32_t *nbrs = &Fc[Fr[row]];
bool end = true;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
uint32_t nbrRange = range[index];
if(!isRangeSet(tags[index]) && nbrRange < myRange){
myRange = nbrRange;
end = false;
}
}
if(!end){
range[row] = myRange;
*terminate = false;
}
}
__global__ void propagateRange2(uint32_t *range, const uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
uint32_t myRange = range[row];
uint32_t newRange;
if(myRange != row && myRange != (newRange = range[myRange])){
range[row] = newRange;
*terminate = false;
}
}
//Coloring
__global__ void colorPropagation(const uint32_t *Fc, const uint32_t *Fr, uint32_t *range, const uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
uint32_t mx = max(row, range[row]);
uint32_t cnt = Fr[row + 1] - Fr[row];
const uint32_t *nbrs = &Fc[Fr[row]];
bool end = true;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
uint32_t nbrRange = range[index];
if(!isRangeSet(tags[index]) && mx < nbrRange){
mx = nbrRange;
end = false;
}
}
if(!end){
range[row] = mx;
*terminate = false;
}
}
//coloring
__global__ void selectPivotColoring(const uint32_t *range, uint8_t *tags, const uint32_t num_rows){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]))
return;
if(range[row] == row){
myTag = 0;
setForwardVisitedBit(&myTag);
setPivot(&myTag);
tags[row] = myTag;
}
}
//coloring
__global__ void fwdColoring(const uint32_t *Fc, const uint32_t *Fr, const uint32_t *range, uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]) || !isForwardVisited(myTag))
return;
uint32_t myRange = range[row];
uint32_t cnt = Fr[row + 1] - Fr[row];
const uint32_t *nbrs = &Fc[Fr[row]];
bool end = true;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
uint8_t nbrTag = tags[index];
if(isRangeSet(nbrTag) || isForwardVisited(nbrTag) || range[index] != myRange)
continue;
setForwardVisitedBit(&tags[index]);
end = false;
}
rangeSet(&tags[row]);
if (!end)
*terminate = false;
}
//coloring
__global__ void updateColoring(uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
*terminate = false;
tags[row] = 0;
}
__global__ void selectFirstPivot(uint8_t *tags, const uint32_t num_rows, const uint32_t *pivot_field){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]))
return;
if( pivot_field[0] == row ) {
myTag = 0;
setForwardVisitedBit(&myTag);
setBackwardVisitedBit(&myTag);
setPivot(&myTag);
tags[row] = myTag;
}
}
__global__ void pollForFirstPivot(const uint8_t *tags, const uint32_t num_rows, uint32_t* pivot_field, const uint32_t *Fr, const uint32_t *Br){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
uint32_t oldRow = pivot_field[0];
uint32_t oldDegree = (Fr[oldRow+1] - Fr[oldRow]) * (Br[oldRow+1] - Br[oldRow]);
uint32_t newDegree = (Fr[row+1] - Fr[row]) * (Br[row+1] - Br[row]);
if(newDegree > oldDegree)
pivot_field[0] = row;
}
void getClusteringCoeff(Graph& G) {
/* Compute the in-clustering coefficient of a node..
* 1. to find the in-CC among the incoming neighbors count the incoming or outgoing edges of the nighbors of the neighbors.
* 2. to find the out-clustering coeff. (using the out-neighbors), count the outgoing edges of the neighbors.
*/
std::cout << "call to getClusteringCoeff" << std::endl;
unsigned long long *d_count;
count=(unsigned long long *)malloc(sizeof(unsigned long long));
*count=0;
hipMemcpy(d_count,count,sizeof(unsigned long long),hipMemcpyHostToDevice);
int BPG=(e+191)/192,TPB=192;
hipLaunchKernelGGL(( CC), dim3(BPG),dim3(TPB), 0, 0, G,d_count,vn,e);
}
__global__ void CC(int* d_adj,int* d_pos,unsigned long long* d_count,int dvn,int de){
unsigned int num=0;
int eid =(blockDim.x)*(blockIdx.x)+(threadIdx.x);
int u,v;
if(eid<de){
int middle;
int begin=0,end=dvn-1;
while(1){
middle=(end+begin)/2;
if(d_pos[middle]<eid){
begin=middle+1;
}
else if(d_pos[middle]>=eid){
if(end==begin+1){
u=middle;
v=d_adj[eid];
break;
}
else if(end==begin){
u=middle;
v=d_adj[eid];
break;
}
else{
if(d_pos[middle-1]>=eid){
end=middle-1;
}
else{
u=middle;
v=d_adj[eid];
break;
}
}
}
}
int us,ue,vs,ve;
if(u==0){
us=0;
ue=d_pos[u];
}
else{
us=d_pos[u-1]+1;
ue=d_pos[u];
}
if(v==0){
vs=0;
ve=d_pos[v];
}
else{
vs=d_pos[v-1]+1;
ve=d_pos[v];
}
while(us<=ue&&vs<=ve){
if(d_adj[us]==d_adj[vs]){
num++;
us++;
vs++;
}
else if(d_adj[us]<d_adj[vs]){
us++;
}
else{
vs++;
}
}
atomicAdd(d_count,num);
}
}
| 5f9c31f5d314f4e01734edb26e2320ed1fe7fdbb.cu | #include "scc_kernels.h"
__global__ void selectPivots(const uint32_t *range, uint8_t *tags, const uint32_t num_rows, const uint32_t *pivot_field, const int max_pivot_count){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]))
return;
if( pivot_field[ range[row] % max_pivot_count] == row ) {
myTag = 0;
setForwardVisitedBit(&myTag);
setBackwardVisitedBit(&myTag);
setPivot(&myTag);
tags[row] = myTag;
}
}
__global__ void pollForPivots(const uint32_t *range, const uint8_t *tags, const uint32_t num_rows, uint32_t* pivot_field, const int max_pivot_count, const uint32_t *Fr, const uint32_t *Br){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
uint32_t index = range[row];
uint32_t oldRow = pivot_field[index % max_pivot_count];
uint32_t oldDegree = (Fr[oldRow+1] - Fr[oldRow]) * (Br[oldRow+1] - Br[oldRow]);
uint32_t newDegree = (Fr[row+1] - Fr[row]) * (Br[row+1] - Br[row]);
if(newDegree > oldDegree)
pivot_field[ index % max_pivot_count ] = row;
}
__global__ void update(uint32_t *range, uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]))
return;
if ( isForwardVisited(myTag) && isBackwardVisited(myTag)){
rangeSet(&tags[row]);
}
else{
*terminate = false;
uint32_t index = 3 * range[row] + (uint32_t)isForwardVisited(myTag) + ((uint32_t)isBackwardVisited(myTag) << 1);
range[row] = index;
tags[row] = 0;
}
}
__global__ void trim1(const uint32_t *range, uint8_t *tags, const uint32_t *Fc, const uint32_t *Fr, const uint32_t *Bc, const uint32_t *Br, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
extern __shared__ unsigned shm[];
unsigned * sh_offset = shm;
unsigned * sh_edges = &shm[nodeCount];
for(unsigned id = threadIdx.x; id < medges; id += blockIdx.x) {
sh_offset[id] = d_offset[threadIdx.x];
for(unsigned e = d_offset[id]; e < d_offset[id+1]; ++e)
sh_edges[ii] = d_edges[e];
}
for(unsigned k=0; k < threshold; ++k){
if (row > num_rows || isRangeSet(myTag = tags[row]))
return;
uint32_t myRange = range[row];
uint32_t cnt = Br[row + 1] - Br[row];
const uint32_t *nbrs = &Bc[Br[row]];
bool eliminate = true;
for(uint32_t i = 0; i < cnt; i++){
uint32_t index = nbrs[i];
if ( !isRangeSet(tags[index]) && range[index] == myRange){
eliminate = false;
break;
}
}
if ( !eliminate ) {
eliminate = true;
cnt = Fr[row + 1] - Fr[row];
nbrs = &Fc[Fr[row]];
for(uint32_t i = 0; i < cnt; i++){
uint32_t index = nbrs[i];
if ( !isRangeSet(tags[index]) && range[index] == myRange){
eliminate = false;
break;
}
}
}
if ( eliminate ) {
rangeSet(&myTag);
setTrim1(&myTag);
tags[row] = myTag;
*terminate = false;
}
return;
}
}
__global__ void trim2(const uint32_t *range, uint8_t *tags, const uint32_t *Fc, const uint32_t *Fr, const uint32_t *Bc, const uint32_t *Br, const uint32_t num_rows){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
uint32_t myRange = range[row];
uint32_t cnt = Br[row + 1] - Br[row];
const uint32_t *nbrs = &Bc[Br[row]];
uint32_t inDegree = 0;
uint32_t k = 0; //other neighbour
bool eliminate = false;
for(uint32_t i = 0; i < cnt; i++){
uint32_t index = nbrs[i];
if (!isRangeSet(tags[index]) && range[index] == myRange){
inDegree++;
if(inDegree == 2)
break;
k = index;
}
}
if(inDegree == 1){
cnt = Fr[row + 1] - Fr[row];
nbrs = &Fc[Fr[row]];
for(uint32_t i = 0; i < cnt; i++){
uint32_t index = nbrs[i];
if(index == k){
uint32_t kCnt = Br[k + 1] - Br[k];
const uint32_t *kNbrs = &Bc[Br[k]];
uint32_t kRange = range[k];
inDegree = 0;
for(uint32_t j = 0; j < kCnt; j++){
uint32_t tindex = kNbrs[j];
if(!isRangeSet(tags[tindex]) && range[tindex] == kRange){
inDegree++;
if(inDegree==2)
break;
}
}
if(inDegree == 1)
eliminate = true;
break;
}
}
}
if(!eliminate){
cnt = Fr[row + 1] - Fr[row];
nbrs = &Fc[Fr[row]];
inDegree=0;
k = 0;
for( uint32_t i = 0; i < cnt; i++ ){
uint32_t index = nbrs[i];
if ( !isRangeSet(tags[index]) && range[index] == myRange){
inDegree++;
if(inDegree == 2)
break;
k = index;
}
}
if(inDegree == 1){
cnt = Br[row + 1] - Br[row];
nbrs = &Bc[Br[row]];
for(uint32_t i = 0; i < cnt; i++){
uint32_t index = nbrs[i];
if(index == k){
uint32_t kCnt = Fr[k + 1] - Fr[k];
const uint32_t *kNbrs = &Fc[Fr[k]];
uint32_t kRange = range[k];
inDegree = 0;
for(uint32_t j = 0; j < kCnt; j++){
uint32_t tindex = kNbrs[j];
if(!isRangeSet(tags[tindex]) && range[tindex] == kRange){
inDegree++;
if(inDegree==2)
break;
}
}
if(inDegree == 1)
eliminate = true;
break;
}
}
}
}
if(eliminate){
uint32_t temp = min(row, k);
rangeSet(&tags[row]);
rangeSet(&tags[k]);
setTrim2(&tags[temp]); //Only one of the two will be set as pivot for 2-SCC
}
return;
}
__global__ void fwd(const uint32_t *Fc, const uint32_t *Fr, const uint32_t *range, uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]) || isForwardPropagate(myTag) || !isForwardVisited(myTag))
return;
uint32_t myRange = range[row];
uint32_t cnt = Fr[row + 1] - Fr[row];
const uint32_t *nbrs = &Fc[Fr[row]];
bool end = true;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
uint8_t nbrTag = tags[index];
if(isRangeSet(nbrTag) || isForwardVisited(nbrTag) || range[index] != myRange)
continue;
setForwardVisitedBit(&tags[index]);
end = false;
}
setForwardPropagateBit(&tags[row]);
if (!end)
*terminate = false;
}
__global__ void bwd(const uint32_t *Bc, const uint32_t *Br, const uint32_t *range, uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]) || isBackwardPropagate(myTag) || !isBackwardVisited(myTag))
return;
uint32_t myRange = range[row];
uint32_t cnt = Br[row + 1] - Br[row];
const uint32_t *nbrs = &Bc[Br[row]];
bool end = true;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
uint8_t nbrTag = tags[index];
if(isRangeSet(nbrTag) || isBackwardVisited(nbrTag) || range[index] != myRange )
continue;
setBackwardVisitedBit(&tags[index]);
end = false;
}
setBackwardPropagateBit(&tags[row]);
if (!end)
*terminate = false;
}
__global__ void assignUniqueRange(uint32_t *range, const uint8_t *tags, const uint32_t num_rows){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
range[row] = row;
}
__global__ void propagateRange1(const uint32_t *Fc, const uint32_t *Fr, uint32_t *range, const uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
uint32_t myRange = range[row];
uint32_t cnt = Fr[row + 1] - Fr[row];
const uint32_t *nbrs = &Fc[Fr[row]];
bool end = true;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
uint32_t nbrRange = range[index];
if(!isRangeSet(tags[index]) && nbrRange < myRange){
myRange = nbrRange;
end = false;
}
}
if(!end){
range[row] = myRange;
*terminate = false;
}
}
__global__ void propagateRange2(uint32_t *range, const uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
uint32_t myRange = range[row];
uint32_t newRange;
if(myRange != row && myRange != (newRange = range[myRange])){
range[row] = newRange;
*terminate = false;
}
}
//Coloring
__global__ void colorPropagation(const uint32_t *Fc, const uint32_t *Fr, uint32_t *range, const uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
uint32_t mx = max(row, range[row]);
uint32_t cnt = Fr[row + 1] - Fr[row];
const uint32_t *nbrs = &Fc[Fr[row]];
bool end = true;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
uint32_t nbrRange = range[index];
if(!isRangeSet(tags[index]) && mx < nbrRange){
mx = nbrRange;
end = false;
}
}
if(!end){
range[row] = mx;
*terminate = false;
}
}
//coloring
__global__ void selectPivotColoring(const uint32_t *range, uint8_t *tags, const uint32_t num_rows){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]))
return;
if(range[row] == row){
myTag = 0;
setForwardVisitedBit(&myTag);
setPivot(&myTag);
tags[row] = myTag;
}
}
//coloring
__global__ void fwdColoring(const uint32_t *Fc, const uint32_t *Fr, const uint32_t *range, uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]) || !isForwardVisited(myTag))
return;
uint32_t myRange = range[row];
uint32_t cnt = Fr[row + 1] - Fr[row];
const uint32_t *nbrs = &Fc[Fr[row]];
bool end = true;
for ( uint32_t i = 0; i < cnt; i++ ) {
uint32_t index = nbrs[i];
uint8_t nbrTag = tags[index];
if(isRangeSet(nbrTag) || isForwardVisited(nbrTag) || range[index] != myRange)
continue;
setForwardVisitedBit(&tags[index]);
end = false;
}
rangeSet(&tags[row]);
if (!end)
*terminate = false;
}
//coloring
__global__ void updateColoring(uint8_t *tags, const uint32_t num_rows, bool volatile *terminate){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
*terminate = false;
tags[row] = 0;
}
__global__ void selectFirstPivot(uint8_t *tags, const uint32_t num_rows, const uint32_t *pivot_field){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
uint8_t myTag;
if (row > num_rows || isRangeSet(myTag = tags[row]))
return;
if( pivot_field[0] == row ) {
myTag = 0;
setForwardVisitedBit(&myTag);
setBackwardVisitedBit(&myTag);
setPivot(&myTag);
tags[row] = myTag;
}
}
__global__ void pollForFirstPivot(const uint8_t *tags, const uint32_t num_rows, uint32_t* pivot_field, const uint32_t *Fr, const uint32_t *Br){
uint32_t row = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x + 1;
if (row > num_rows || isRangeSet(tags[row]))
return;
uint32_t oldRow = pivot_field[0];
uint32_t oldDegree = (Fr[oldRow+1] - Fr[oldRow]) * (Br[oldRow+1] - Br[oldRow]);
uint32_t newDegree = (Fr[row+1] - Fr[row]) * (Br[row+1] - Br[row]);
if(newDegree > oldDegree)
pivot_field[0] = row;
}
void getClusteringCoeff(Graph& G) {
/* Compute the in-clustering coefficient of a node..
* 1. to find the in-CC among the incoming neighbors count the incoming or outgoing edges of the nighbors of the neighbors.
* 2. to find the out-clustering coeff. (using the out-neighbors), count the outgoing edges of the neighbors.
*/
std::cout << "call to getClusteringCoeff" << std::endl;
unsigned long long *d_count;
count=(unsigned long long *)malloc(sizeof(unsigned long long));
*count=0;
cudaMemcpy(d_count,count,sizeof(unsigned long long),cudaMemcpyHostToDevice);
int BPG=(e+191)/192,TPB=192;
CC<<<BPG,TPB>>>(G,d_count,vn,e);
}
__global__ void CC(int* d_adj,int* d_pos,unsigned long long* d_count,int dvn,int de){
unsigned int num=0;
int eid =(blockDim.x)*(blockIdx.x)+(threadIdx.x);
int u,v;
if(eid<de){
int middle;
int begin=0,end=dvn-1;
while(1){
middle=(end+begin)/2;
if(d_pos[middle]<eid){
begin=middle+1;
}
else if(d_pos[middle]>=eid){
if(end==begin+1){
u=middle;
v=d_adj[eid];
break;
}
else if(end==begin){
u=middle;
v=d_adj[eid];
break;
}
else{
if(d_pos[middle-1]>=eid){
end=middle-1;
}
else{
u=middle;
v=d_adj[eid];
break;
}
}
}
}
int us,ue,vs,ve;
if(u==0){
us=0;
ue=d_pos[u];
}
else{
us=d_pos[u-1]+1;
ue=d_pos[u];
}
if(v==0){
vs=0;
ve=d_pos[v];
}
else{
vs=d_pos[v-1]+1;
ve=d_pos[v];
}
while(us<=ue&&vs<=ve){
if(d_adj[us]==d_adj[vs]){
num++;
us++;
vs++;
}
else if(d_adj[us]<d_adj[vs]){
us++;
}
else{
vs++;
}
}
atomicAdd(d_count,num);
}
}
|
fadf47de7eb1c8ca35e12507e82030fb41ce71b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
__global__ void copy_full_to_columns_on_device_kernel(float * dest_pt,
float * source_pt, int stride, int offset, int N){
int pos = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(pos >= N) return;
if(pos % stride == 0){
dest_pt[pos * stride + offset] = source_pt[pos];
}
}
void copy_full_to_column_on_device(float * dest_pt, float * source_pt,
int stride, int offset, int N){
int n_blocks = ceil(((float) N ) / ((float) BLOCK_SIZE));
hipLaunchKernelGGL(( copy_full_to_columns_on_device_kernel), dim3(n_blocks), dim3(BLOCK_SIZE), 0, 0, dest_pt,
source_pt, stride, offset, N);
CUDA_CHECK(check_last_error());
CUDA_CHECK(hipDeviceSynchronize());
}
| fadf47de7eb1c8ca35e12507e82030fb41ce71b9.cu | #include "utils.h"
__global__ void copy_full_to_columns_on_device_kernel(float * dest_pt,
float * source_pt, int stride, int offset, int N){
int pos = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(pos >= N) return;
if(pos % stride == 0){
dest_pt[pos * stride + offset] = source_pt[pos];
}
}
void copy_full_to_column_on_device(float * dest_pt, float * source_pt,
int stride, int offset, int N){
int n_blocks = ceil(((float) N ) / ((float) BLOCK_SIZE));
copy_full_to_columns_on_device_kernel<<<n_blocks, BLOCK_SIZE>>>(dest_pt,
source_pt, stride, offset, N);
CUDA_CHECK(check_last_error());
CUDA_CHECK(cudaDeviceSynchronize());
}
|
3d1a324f067b389a42437ac41ce634ae1b01d396.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
float *h_c;
hipMalloc(&a, size);
hipMalloc(&b, size);
hipMalloc(&c, size);
hipHostMalloc(&h_c, size);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
hipError_t addVectorsErr;
hipError_t asyncErr;
/*
* Create 3 streams to run initialize the 3 data vectors in parallel.
*/
hipStream_t stream1, stream2, stream3;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
/*
* Give each `initWith` launch its own non-standard stream.
*/
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, stream1, 3, a, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, stream2, 4, b, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, stream3, 0, c, N);
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
hipMemcpy(h_c, c, size, hipMemcpyDeviceToHost);
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
checkElementsAre(7, h_c, N);
/*
* Destroy streams when they are no longer needed.
*/
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
hipStreamDestroy(stream3);
hipFree(a);
hipFree(b);
hipFree(c);
hipHostFree(h_c);
}
| 3d1a324f067b389a42437ac41ce634ae1b01d396.cu | #include <stdio.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
float *h_c;
cudaMalloc(&a, size);
cudaMalloc(&b, size);
cudaMalloc(&c, size);
cudaMallocHost(&h_c, size);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
/*
* Create 3 streams to run initialize the 3 data vectors in parallel.
*/
cudaStream_t stream1, stream2, stream3;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
/*
* Give each `initWith` launch its own non-standard stream.
*/
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream1>>>(3, a, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream2>>>(4, b, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream3>>>(0, c, N);
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
cudaMemcpy(h_c, c, size, cudaMemcpyDeviceToHost);
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
checkElementsAre(7, h_c, N);
/*
* Destroy streams when they are no longer needed.
*/
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream3);
cudaFree(a);
cudaFree(b);
cudaFree(c);
cudaFreeHost(h_c);
}
|
4d60fdbf5d50ddc77b5bdb41f447eba2c924a68a.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/sparse/hip/SparseHIPTensorMath.cuh>
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/hip/SparseBlasLegacy.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/sparse/hip/SparseHIPBlas.h>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/ThrustAllocator.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
#include <ATen/ops/_sparse_sum_native.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/addmm_native.h>
#include <ATen/ops/bmm_native.h>
#include <ATen/ops/cat.h>
#include <ATen/ops/copy_sparse_to_sparse.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/hspmm_native.h>
#include <ATen/ops/mul.h>
#include <ATen/ops/result_type.h>
#include <ATen/ops/scalar_tensor.h>
#endif
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <bitset>
#include <hipsparse.h>
#include <hip/hip_runtime_api.h>
#include <memory>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) {
Tensor csr = at::empty({dim+1}, CUDA(kInt));
Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense) {
Tensor rowIndices = indices.select(0, 0);
Tensor colIndices = indices.select(0, 1);
Tensor crow_indices = _to_csr_int(rowIndices, m, nnz);
Tensor col_indices = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
col_indices.copy_(colIndices);
s_addmm_out_csr_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, crow_indices, col_indices, values, dense);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, const Scalar& beta, const Scalar& alpha) {
TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = sparse._indices();
Tensor values = sparse._values();
if (nnz == 0) {
at::mul_out(r_, t, at::scalar_tensor(beta, r_.options()));
return r_;
}
s_addmm_out_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha,
Tensor& result
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, *b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(*b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(
const SparseTensor& sparse_,
const Tensor& dense,
SparseTensor& r_
/* , const Scalar& alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::hip::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
Tensor spIndices = newSparse._indices();
Tensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(sparse, dense, r);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
template <typename T>
struct TensorCAddOp {
TensorCAddOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out += val * *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 + val * *in2;
}
T val;
};
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, const at::Scalar& value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
Tensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
at::ScalarType::ComplexHalf, at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
commonDtype, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernelScalar), dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
at::ScalarType::ComplexHalf, at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
} else {
Tensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values, value);
}
AT_CUDA_CHECK(hipGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, const Scalar& value);
SparseTensor& add_out_sparse_cuda(const SparseTensor& t, const SparseTensor& src, const Scalar& value, SparseTensor& r_) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
Tensor t_indices_ = t._indices();
Tensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != scalar_t(1)) {
s_values_ = s_values_.mul(value);
}
});
Tensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
template <typename T>
struct TensorMulOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out *= *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 * *in2;
}
};
SparseTensor& mul_out_sparse_cuda(const Tensor& t_, const Tensor& src_, SparseTensor& r_) {
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
// case mul(sparse, dense)
if (!src_.is_sparse()) {
return _mul_dense_sparse_out(src_, t_, r_);
}
// case mul(dense, sparse)
if (!t_.is_sparse()) {
return _mul_dense_sparse_out(t_, src_, r_);
}
// case mul(sparse, sparse) with a 0-dim input.
if (!src_.dim()) {
return _mul_sparse_sparse_zero_dim_out(src_, t_, r_);
}
if (!t_.dim()) {
return _mul_sparse_sparse_zero_dim_out(t_, src_, r_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
// mul(sparse, sparse)
// Short circuit when there is zero nnz.
// Not strictly necessary, but there are tests checking whether
// resize in mul fails if run on tensors coming from .data/.detach.
if (t_.sizes().equals(src_.sizes()) && (!t_._nnz() || !src_._nnz())) {
r_.resize_as_(t_);
return r_.zero_();
}
return _mul_sparse_sparse_out(t_, src_, r_);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
Tensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
Tensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
at::cuda::ThrustAllocator allocator;
auto policy = thrust::hip::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
Tensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
hipLaunchKernelGGL(( _sparse_sum_backward_cuda_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream,
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return bmm_out_sparse_cuda(self, mat2, result);
}
#if defined(USE_ROCM) || !(defined(_MSC_VER) && CUSPARSE_VERSION < 11000)
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const Tensor& indices_1D) {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
hipLaunchKernelGGL(( search_end_matrix_indices_cuda_kernel), dim3(grid_size), dim3(block_size), 0, stream,
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipDeviceSynchronize();
}
hipDataType getTensorCudaDataType(Tensor self) {
hipDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = HIP_R_32F;
break;
case ScalarType::Double:
cuda_data_type = HIP_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(const SparseTensor& self, const Tensor& mat2, Tensor& result) {
#if defined(_MSC_VER) && (CUSPARSE_VERSION < 11000)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0");
#elif defined(USE_ROCM) || (defined(CUDART_VERSION) && (CUDART_VERSION >= 10010)) // linux cuda >= 10.1 or windows cuda >= 11.0
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for hipsparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = self.coalesce();
int64_t nnz = self_coalesced._nnz();
Tensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
Tensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since hipsparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
std::unique_ptr<int64_t[]> mat_el_end_indices_host(new int64_t[num_matrices]);
{
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
auto dataPtr = allocator.allocate(num_matrices*sizeof(int64_t));
int64_t* mat_el_end_indices_device = static_cast<int64_t*>(dataPtr.get());
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
AT_CUDA_CHECK(hipMemcpy(
mat_el_end_indices_host.get(),
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
hipMemcpyDeviceToHost
));
}
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
::c10::DataPtr dataPtr;
// See Note [Enabling Deterministic Operations]
bool deterministic = globalContext().deterministicAlgorithms();
hipsparseSpMMAlg_t mm_alg = deterministic ? HIPSPARSE_COOMM_ALG2 : HIPSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
hipDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
hipsparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
hipsparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
hipsparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(hipsparseSpMM_bufferSize(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
workspace_buffer_size = required_workspace_buffer_size;
dataPtr = allocator.allocate(workspace_buffer_size);
workspace_buffer = dataPtr.get();
}
TORCH_CUDASPARSE_CHECK(hipsparseSpMM(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(hipsparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
| 4d60fdbf5d50ddc77b5bdb41f447eba2c924a68a.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/sparse/cuda/SparseCUDATensorMath.cuh>
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/cuda/SparseBlasLegacy.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/sparse/cuda/SparseCUDABlas.h>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/ThrustAllocator.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <c10/cuda/CUDACachingAllocator.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
#include <ATen/ops/_sparse_sum_native.h>
#include <ATen/ops/add_native.h>
#include <ATen/ops/addmm_native.h>
#include <ATen/ops/bmm_native.h>
#include <ATen/ops/cat.h>
#include <ATen/ops/copy_sparse_to_sparse.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/hspmm_native.h>
#include <ATen/ops/mul.h>
#include <ATen/ops/result_type.h>
#include <ATen/ops/scalar_tensor.h>
#endif
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <bitset>
#include <cusparse.h>
#include <cuda_runtime_api.h>
#include <memory>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) {
Tensor csr = at::empty({dim+1}, CUDA(kInt));
Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense) {
Tensor rowIndices = indices.select(0, 0);
Tensor colIndices = indices.select(0, 1);
Tensor crow_indices = _to_csr_int(rowIndices, m, nnz);
Tensor col_indices = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
col_indices.copy_(colIndices);
s_addmm_out_csr_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, crow_indices, col_indices, values, dense);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, const Scalar& beta, const Scalar& alpha) {
TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = sparse._indices();
Tensor values = sparse._values();
if (nnz == 0) {
at::mul_out(r_, t, at::scalar_tensor(beta, r_.options()));
return r_;
}
s_addmm_out_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha,
Tensor& result
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, *b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(*b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(
const SparseTensor& sparse_,
const Tensor& dense,
SparseTensor& r_
/* , const Scalar& alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::cuda::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
Tensor spIndices = newSparse._indices();
Tensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(sparse, dense, r);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
template <typename T>
struct TensorCAddOp {
TensorCAddOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out += val * *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 + val * *in2;
}
T val;
};
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, const at::Scalar& value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
Tensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
at::ScalarType::ComplexHalf, at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,
commonDtype, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernelScalar<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
at::ScalarType::ComplexHalf, at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
} else {
Tensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values, value);
}
AT_CUDA_CHECK(cudaGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, const Scalar& value);
SparseTensor& add_out_sparse_cuda(const SparseTensor& t, const SparseTensor& src, const Scalar& value, SparseTensor& r_) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
Tensor t_indices_ = t._indices();
Tensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != scalar_t(1)) {
s_values_ = s_values_.mul(value);
}
});
Tensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
template <typename T>
struct TensorMulOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out *= *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 * *in2;
}
};
SparseTensor& mul_out_sparse_cuda(const Tensor& t_, const Tensor& src_, SparseTensor& r_) {
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
// case mul(sparse, dense)
if (!src_.is_sparse()) {
return _mul_dense_sparse_out(src_, t_, r_);
}
// case mul(dense, sparse)
if (!t_.is_sparse()) {
return _mul_dense_sparse_out(t_, src_, r_);
}
// case mul(sparse, sparse) with a 0-dim input.
if (!src_.dim()) {
return _mul_sparse_sparse_zero_dim_out(src_, t_, r_);
}
if (!t_.dim()) {
return _mul_sparse_sparse_zero_dim_out(t_, src_, r_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
// mul(sparse, sparse)
// Short circuit when there is zero nnz.
// Not strictly necessary, but there are tests checking whether
// resize in mul fails if run on tensors coming from .data/.detach.
if (t_.sizes().equals(src_.sizes()) && (!t_._nnz() || !src_._nnz())) {
r_.resize_as_(t_);
return r_.zero_();
}
return _mul_sparse_sparse_out(t_, src_, r_);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
Tensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
Tensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
at::cuda::ThrustAllocator allocator;
auto policy = thrust::cuda::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
Tensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
_sparse_sum_backward_cuda_kernel<scalar_t><<<grid, block, 0, stream>>>(
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return bmm_out_sparse_cuda(self, mat2, result);
}
#if defined(USE_ROCM) || !(defined(_MSC_VER) && CUSPARSE_VERSION < 11000)
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const Tensor& indices_1D) {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
search_end_matrix_indices_cuda_kernel<<<grid_size, block_size, 0, stream>>>(
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
cudaDeviceSynchronize();
}
cudaDataType getTensorCudaDataType(Tensor self) {
cudaDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = CUDA_R_32F;
break;
case ScalarType::Double:
cuda_data_type = CUDA_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(const SparseTensor& self, const Tensor& mat2, Tensor& result) {
#if defined(_MSC_VER) && (CUSPARSE_VERSION < 11000)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0");
#elif defined(USE_ROCM) || (defined(CUDART_VERSION) && (CUDART_VERSION >= 10010)) // linux cuda >= 10.1 or windows cuda >= 11.0
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for cusparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = self.coalesce();
int64_t nnz = self_coalesced._nnz();
Tensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
Tensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since cusparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
std::unique_ptr<int64_t[]> mat_el_end_indices_host(new int64_t[num_matrices]);
{
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
auto dataPtr = allocator.allocate(num_matrices*sizeof(int64_t));
int64_t* mat_el_end_indices_device = static_cast<int64_t*>(dataPtr.get());
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
AT_CUDA_CHECK(cudaMemcpy(
mat_el_end_indices_host.get(),
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
cudaMemcpyDeviceToHost
));
}
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
::c10::DataPtr dataPtr;
// See Note [Enabling Deterministic Operations]
bool deterministic = globalContext().deterministicAlgorithms();
cusparseSpMMAlg_t mm_alg = deterministic ? CUSPARSE_COOMM_ALG2 : CUSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
cudaDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
cusparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
cusparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
cusparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(cusparseSpMM_bufferSize(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
workspace_buffer_size = required_workspace_buffer_size;
dataPtr = allocator.allocate(workspace_buffer_size);
workspace_buffer = dataPtr.get();
}
TORCH_CUDASPARSE_CHECK(cusparseSpMM(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(cusparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
|
81535a51da1f97ec3daa539dfb1c47bf3b59a2e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#ifdef CUDNN
#pragma comment(lib, "cudnn.lib")
#endif
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
hipLaunchKernelGGL(( binarize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += abs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_input_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, input, n, size, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_weights_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, weights, n, size, binary);
check_error(hipPeekAtLastError());
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half *output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, float *output_f16) {
hipLaunchKernelGGL(( cuda_f32_to_f16) , dim3(size / BLOCK + 1), dim3(BLOCK), 0, get_cuda_stream() , input_f32, size, (half *)output_f16);
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float *output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(float* input_f16, size_t size, float *output_f32) {
hipLaunchKernelGGL(( cuda_f16_to_f32) , dim3(size / BLOCK + 1), dim3(BLOCK), 0, get_cuda_stream() , (half *)input_f16, size, output_f32);
}
half *cuda_make_f16_from_f32_array(float *src, size_t n)
{
half *dst16;
size_t size = sizeof(half)*n;
check_error(hipMalloc((void **)&dst16, size));
if (src) {
cuda_convert_f32_to_f16(src, n, (float *)dst16);
}
if (!dst16) error("Cuda malloc failed\n");
return dst16;
}
__global__ void apply_mask_gpu(float* data_im,
const int height, const int width, const int channel, const int batchNum,
const int x1, const int y1, const int x2, const int y2, const int size) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int boxWidth = x2 - x1 + 1;
int boxHeight = y2 - y1 + 1;
//fprintf(stderr, "x1=%d y1=%d x2=%d y2=%d\n",x1,y1,x2,y2);
if (index >= size)
return;
int col = (index % boxWidth) + x1;
index = (index / boxWidth);
int row = (index % boxHeight) + y1;
index = (index / boxHeight);
for (int c = 0; c < channel; c++){
data_im[col + width*(row + height*c)] = 0;
}
}
void mask_gpu(float* data_im,
const int height, const int width, const int channel, const int batchNum,
network net) {
int gtCount;
int total = 0;
for(int k = 0; k < net.gtMax; k++){
gtCount = net.gtMax*4*batchNum + k*4;
if(net.gt[gtCount] == -1)
continue;
//fprintf(stderr, "x1=%f y1=%f x2=%f y2=%f\n",net.gt[gtCount],net.gt[gtCount+1],net.gt[gtCount+2],net.gt[gtCount+3]);
int boxXc = (int) (width * (float)(net.gt[gtCount]));
int boxYc = (int) (height * (float)(net.gt[gtCount + 1]));
int boxWidth = (int) (width * (float)(net.gt[gtCount + 2]));
int boxHeight = (int) (height * (float)(net.gt[gtCount + 3]));
int x1 = (int) (boxXc - (boxWidth / 2));
int y1 = (int) (boxYc - (boxHeight / 2));
int x2 = min((int) (boxXc + (boxWidth / 2)), width);
int y2 = min((int) (boxYc + (boxHeight / 2)), height);
//fprintf(stderr, "x1=%d y1=%d x2=%d y2=%d\n",x1,y1,x2,y2);
//fprintf(stderr, "width=%d height=%d channel=%d \n",width,height, channel);
total = (x2 - x1 + 1) * (y2 - y1 + 1);
//fprintf(stderr, "total=%d ",total);
hipLaunchKernelGGL(( apply_mask_gpu), dim3(cuda_gridsize(total)), dim3(BLOCK), 0, 0, data_im, height, width, channel, batchNum, x1, y1, x2, y2, total);
}
//fprintf(stderr, "\n");
}
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1; // alpha[0], beta[0] is float for HALF and FLOAT
float alpha = 1, beta = 0;
#ifdef CUDNN_HALF
// Note: For improved performance it is advised to use beta[0] = 0.0.
// For Tensor Core: cudnnSetConvolutionMathType() where cudnnMathType_t mathType = CUDNN_TENSOR_OP_MATH;
// 1. or CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM and use CUDNN_DATA_HALF
// 2. or CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED
// More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops
const size_t input16_size = l.batch*l.c*l.w*l.h;
static size_t max_input16_size = input16_size;
static half* input16 = cuda_make_f16_from_f32_array(NULL, max_input16_size);
const size_t output16_size = l.batch*l.out_c*l.out_h*l.out_w;
static size_t max_output16_size = output16_size;
static half* output16 = cuda_make_f16_from_f32_array(NULL, max_output16_size);
if (max_input16_size < input16_size) {
max_input16_size = input16_size;
cuda_free((float *)input16);
input16 = cuda_make_f16_from_f32_array(state.input, max_input16_size);
}
if (max_output16_size < output16_size) {
max_output16_size = output16_size;
cuda_free((float *)output16);
output16 = cuda_make_f16_from_f32_array(NULL, max_output16_size);
}
cuda_convert_f32_to_f16(state.input, input16_size, (float *)input16);
//fill_ongpu(output16_size / 2, 0, (float *)output16, 1);
cudnnConvolutionForward(cudnn_handle(),
&alpha,
l.srcTensorDesc,
input16,
l.weightDesc,
l.weights_gpu16,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&beta,
l.dstTensorDesc,
output16);
cuda_convert_f16_to_f32((float *)output16, output16_size, l.output_gpu);
#else
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#endif
#else
int i;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
float * a = l.weights_gpu;
float * b = state.workspace;
float * c = l.output_gpu;
gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n);
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
//hipDeviceSynchronize(); // for correct profiling of performance
if (l.mask)
for(i = 0; i < l.batch; ++i){
mask_gpu(l.output_gpu + i*l.out_h*l.out_w*l.out_c, l.out_h, l.out_w, l.out_c, i, state.net);
}
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1);
} else {
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1);
}
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
float alpha = 1, beta = 0;
#ifdef CUDNN_HALF
const size_t input16_size = l.batch*l.c*l.w*l.h;
static size_t max_input16_size = input16_size;
static half* input16 = cuda_make_f16_from_f32_array(NULL, max_input16_size);
const size_t delta16_size = l.batch*l.n*l.out_w*l.out_h;
static size_t max_delta16_size = delta16_size;
static half* delta16 = cuda_make_f16_from_f32_array(NULL, max_delta16_size);
if (max_input16_size < input16_size) {
max_input16_size = input16_size;
cuda_free((float *)input16);
input16 = cuda_make_f16_from_f32_array(state.input, max_input16_size);
}
if (max_delta16_size < delta16_size) {
max_delta16_size = delta16_size;
cuda_free((float *)delta16);
delta16 = cuda_make_f16_from_f32_array(NULL, max_delta16_size);
}
cuda_convert_f32_to_f16(state.input, input16_size, (float *)input16);
cuda_convert_f32_to_f16(l.delta_gpu, delta16_size, (float *)delta16);
// convert input: state.input (x), l.delta_gpu (y) from fp32 to fp16
// get output: l.weight_updates_gpu (dw) and convert it to fp32 (ONLY if it is fp16)
// calculate conv weight updates
// Already: l.weight_updates_gpu = (l.weight_updates_gpu - l.weight*decay*batch*subdivision)*momentum
// so we should copy f32 to f16, or compute: f16=(w_up - w*d*b*s)*m
cuda_convert_f32_to_f16(l.weight_updates_gpu, l.c*l.n*l.size*l.size, l.weight_updates_gpu16);
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
input16, //state.input,
l.ddstTensorDesc,
delta16, //l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu16); // l.weight_updates_gpu);
cuda_convert_f16_to_f32(l.weight_updates_gpu16, l.c*l.n*l.size*l.size, l.weight_updates_gpu);
if (state.delta) {
if (l.binary || l.xnor) swap_binary(&l);
// http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData
// calculate delta for the next layer
// convert input: l.weights_gpu (w), l.delta_gpu (dy) from fp32 to fp16
// get output: state.delta (dx) and convert it to fp32 (ONLY if it is fp16)
cudnnConvolutionBackwardData(cudnn_handle(),
&alpha,
l.weightDesc,
l.weights_gpu16, //l.weights_gpu,
l.ddstTensorDesc,
delta16, //l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&beta,
l.dsrcTensorDesc,
input16); // state.delta);
cuda_convert_f16_to_f32((float *)input16, input16_size, state.delta);
if (l.binary || l.xnor) swap_binary(&l);
if (l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#else // CUDNN_HALF
// calculate conv weight updates
// if used: beta=1 then loss decreases faster
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
// http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData
// calculate delta for the next layer
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#endif // CUDNN_HALF
#else // CUDNN
int m = l.n;
int n = l.size*l.size*l.c;
int k = l.out_w*l.out_h;
int i;
for(i = 0; i < l.batch; ++i){
float * a = l.delta_gpu;
float * b = state.workspace;
float * c = l.weight_updates_gpu;
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
float * a = l.weights_gpu;
float * b = l.delta_gpu;
float * c = state.workspace;
gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k);
col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w);
if(l.binary || l.xnor) {
swap_binary(&l);
}
if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
#endif
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
#ifdef CUDNN_HALF
cuda_convert_f32_to_f16(layer.weights_gpu, layer.c*layer.n*layer.size*layer.size, layer.weights_gpu16);
#endif
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
// update weights:
// weights_gpu = weights_gpu*(1 - decay*lr) + weight_updates_gpu*lr / (batch*subdivision) =
// weights_gpu*(1 - 0.0005*0.001) + weight_updates_gpu*0.001/(64*8) =
// weights_gpu * 0.999 999 5 + weight_updates_gpu * 0.000 001 953125
//
// weight_updates_gpu = (weight_updates_gpu - weights_gpu*decay*batch*subdivision)*momentum =
// (weight_updates_gpu - weights_gpu * 0.0005 * 64 * 8) * 0.9 =
// weight_updates_gpu*0.9 - weights_gpu*0.2304
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
}
| 81535a51da1f97ec3daa539dfb1c47bf3b59a2e1.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#ifdef CUDNN
#pragma comment(lib, "cudnn.lib")
#endif
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += abs(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
binarize_input_kernel<<<cuda_gridsize(size), BLOCK>>>(input, n, size, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += abs(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel<<<cuda_gridsize(n), BLOCK>>>(weights, n, size, binary);
check_error(cudaPeekAtLastError());
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half *output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, float *output_f16) {
cuda_f32_to_f16 <<< size / BLOCK + 1, BLOCK, 0, get_cuda_stream() >>> (input_f32, size, (half *)output_f16);
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float *output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(float* input_f16, size_t size, float *output_f32) {
cuda_f16_to_f32 <<< size / BLOCK + 1, BLOCK, 0, get_cuda_stream() >>> ((half *)input_f16, size, output_f32);
}
half *cuda_make_f16_from_f32_array(float *src, size_t n)
{
half *dst16;
size_t size = sizeof(half)*n;
check_error(cudaMalloc((void **)&dst16, size));
if (src) {
cuda_convert_f32_to_f16(src, n, (float *)dst16);
}
if (!dst16) error("Cuda malloc failed\n");
return dst16;
}
__global__ void apply_mask_gpu(float* data_im,
const int height, const int width, const int channel, const int batchNum,
const int x1, const int y1, const int x2, const int y2, const int size) {
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int boxWidth = x2 - x1 + 1;
int boxHeight = y2 - y1 + 1;
//fprintf(stderr, "x1=%d y1=%d x2=%d y2=%d\n",x1,y1,x2,y2);
if (index >= size)
return;
int col = (index % boxWidth) + x1;
index = (index / boxWidth);
int row = (index % boxHeight) + y1;
index = (index / boxHeight);
for (int c = 0; c < channel; c++){
data_im[col + width*(row + height*c)] = 0;
}
}
void mask_gpu(float* data_im,
const int height, const int width, const int channel, const int batchNum,
network net) {
int gtCount;
int total = 0;
for(int k = 0; k < net.gtMax; k++){
gtCount = net.gtMax*4*batchNum + k*4;
if(net.gt[gtCount] == -1)
continue;
//fprintf(stderr, "x1=%f y1=%f x2=%f y2=%f\n",net.gt[gtCount],net.gt[gtCount+1],net.gt[gtCount+2],net.gt[gtCount+3]);
int boxXc = (int) (width * (float)(net.gt[gtCount]));
int boxYc = (int) (height * (float)(net.gt[gtCount + 1]));
int boxWidth = (int) (width * (float)(net.gt[gtCount + 2]));
int boxHeight = (int) (height * (float)(net.gt[gtCount + 3]));
int x1 = (int) (boxXc - (boxWidth / 2));
int y1 = (int) (boxYc - (boxHeight / 2));
int x2 = min((int) (boxXc + (boxWidth / 2)), width);
int y2 = min((int) (boxYc + (boxHeight / 2)), height);
//fprintf(stderr, "x1=%d y1=%d x2=%d y2=%d\n",x1,y1,x2,y2);
//fprintf(stderr, "width=%d height=%d channel=%d \n",width,height, channel);
total = (x2 - x1 + 1) * (y2 - y1 + 1);
//fprintf(stderr, "total=%d ",total);
apply_mask_gpu<<<cuda_gridsize(total), BLOCK>>>(data_im, height, width, channel, batchNum, x1, y1, x2, y2, total);
}
//fprintf(stderr, "\n");
}
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
state.input = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1; // alpha[0], beta[0] is float for HALF and FLOAT
float alpha = 1, beta = 0;
#ifdef CUDNN_HALF
// Note: For improved performance it is advised to use beta[0] = 0.0.
// For Tensor Core: cudnnSetConvolutionMathType() where cudnnMathType_t mathType = CUDNN_TENSOR_OP_MATH;
// 1. or CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM and use CUDNN_DATA_HALF
// 2. or CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED
// More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops
const size_t input16_size = l.batch*l.c*l.w*l.h;
static size_t max_input16_size = input16_size;
static half* input16 = cuda_make_f16_from_f32_array(NULL, max_input16_size);
const size_t output16_size = l.batch*l.out_c*l.out_h*l.out_w;
static size_t max_output16_size = output16_size;
static half* output16 = cuda_make_f16_from_f32_array(NULL, max_output16_size);
if (max_input16_size < input16_size) {
max_input16_size = input16_size;
cuda_free((float *)input16);
input16 = cuda_make_f16_from_f32_array(state.input, max_input16_size);
}
if (max_output16_size < output16_size) {
max_output16_size = output16_size;
cuda_free((float *)output16);
output16 = cuda_make_f16_from_f32_array(NULL, max_output16_size);
}
cuda_convert_f32_to_f16(state.input, input16_size, (float *)input16);
//fill_ongpu(output16_size / 2, 0, (float *)output16, 1);
cudnnConvolutionForward(cudnn_handle(),
&alpha,
l.srcTensorDesc,
input16,
l.weightDesc,
l.weights_gpu16,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&beta,
l.dstTensorDesc,
output16);
cuda_convert_f16_to_f32((float *)output16, output16_size, l.output_gpu);
#else
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
state.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#endif
#else
int i;
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
float * a = l.weights_gpu;
float * b = state.workspace;
float * c = l.output_gpu;
gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n);
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, state);
}
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
//cudaDeviceSynchronize(); // for correct profiling of performance
if (l.mask)
for(i = 0; i < l.batch; ++i){
mask_gpu(l.output_gpu + i*l.out_h*l.out_w*l.out_c, l.out_h, l.out_w, l.out_c, i, state.net);
}
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
{
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, state);
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1);
} else {
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1);
}
float *original_input = state.input;
if(l.xnor) state.input = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
float alpha = 1, beta = 0;
#ifdef CUDNN_HALF
const size_t input16_size = l.batch*l.c*l.w*l.h;
static size_t max_input16_size = input16_size;
static half* input16 = cuda_make_f16_from_f32_array(NULL, max_input16_size);
const size_t delta16_size = l.batch*l.n*l.out_w*l.out_h;
static size_t max_delta16_size = delta16_size;
static half* delta16 = cuda_make_f16_from_f32_array(NULL, max_delta16_size);
if (max_input16_size < input16_size) {
max_input16_size = input16_size;
cuda_free((float *)input16);
input16 = cuda_make_f16_from_f32_array(state.input, max_input16_size);
}
if (max_delta16_size < delta16_size) {
max_delta16_size = delta16_size;
cuda_free((float *)delta16);
delta16 = cuda_make_f16_from_f32_array(NULL, max_delta16_size);
}
cuda_convert_f32_to_f16(state.input, input16_size, (float *)input16);
cuda_convert_f32_to_f16(l.delta_gpu, delta16_size, (float *)delta16);
// convert input: state.input (x), l.delta_gpu (y) from fp32 to fp16
// get output: l.weight_updates_gpu (dw) and convert it to fp32 (ONLY if it is fp16)
// calculate conv weight updates
// Already: l.weight_updates_gpu = (l.weight_updates_gpu - l.weight*decay*batch*subdivision)*momentum
// so we should copy f32 to f16, or compute: f16=(w_up - w*d*b*s)*m
cuda_convert_f32_to_f16(l.weight_updates_gpu, l.c*l.n*l.size*l.size, l.weight_updates_gpu16);
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
input16, //state.input,
l.ddstTensorDesc,
delta16, //l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu16); // l.weight_updates_gpu);
cuda_convert_f16_to_f32(l.weight_updates_gpu16, l.c*l.n*l.size*l.size, l.weight_updates_gpu);
if (state.delta) {
if (l.binary || l.xnor) swap_binary(&l);
// http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData
// calculate delta for the next layer
// convert input: l.weights_gpu (w), l.delta_gpu (dy) from fp32 to fp16
// get output: state.delta (dx) and convert it to fp32 (ONLY if it is fp16)
cudnnConvolutionBackwardData(cudnn_handle(),
&alpha,
l.weightDesc,
l.weights_gpu16, //l.weights_gpu,
l.ddstTensorDesc,
delta16, //l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&beta,
l.dsrcTensorDesc,
input16); // state.delta);
cuda_convert_f16_to_f32((float *)input16, input16_size, state.delta);
if (l.binary || l.xnor) swap_binary(&l);
if (l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#else // CUDNN_HALF
// calculate conv weight updates
// if used: beta=1 then loss decreases faster
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
state.input,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
state.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
// http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData
// calculate delta for the next layer
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
state.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
state.delta);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);
}
#endif // CUDNN_HALF
#else // CUDNN
int m = l.n;
int n = l.size*l.size*l.c;
int k = l.out_w*l.out_h;
int i;
for(i = 0; i < l.batch; ++i){
float * a = l.delta_gpu;
float * b = state.workspace;
float * c = l.weight_updates_gpu;
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n);
if(state.delta){
if(l.binary || l.xnor) swap_binary(&l);
float * a = l.weights_gpu;
float * b = l.delta_gpu;
float * c = state.workspace;
gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k);
col2im_ongpu(state.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w);
if(l.binary || l.xnor) {
swap_binary(&l);
}
if(l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w);
}
}
#endif
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
#ifdef CUDNN_HALF
cuda_convert_f32_to_f16(layer.weights_gpu, layer.c*layer.n*layer.size*layer.size, layer.weights_gpu16);
#endif
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
if (layer.batch_normalize){
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
if (layer.adam){
cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
}
}
void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
if(layer.scales_gpu){
axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1);
scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
}
if(layer.adam){
scal_ongpu(size, layer.B1, layer.m_gpu, 1);
scal_ongpu(size, layer.B2, layer.v_gpu, 1);
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
}else{
// update weights:
// weights_gpu = weights_gpu*(1 - decay*lr) + weight_updates_gpu*lr / (batch*subdivision) =
// weights_gpu*(1 - 0.0005*0.001) + weight_updates_gpu*0.001/(64*8) =
// weights_gpu * 0.999 999 5 + weight_updates_gpu * 0.000 001 953125
//
// weight_updates_gpu = (weight_updates_gpu - weights_gpu*decay*batch*subdivision)*momentum =
// (weight_updates_gpu - weights_gpu * 0.0005 * 64 * 8) * 0.9 =
// weight_updates_gpu*0.9 - weights_gpu*0.2304
axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
}
|
5efbb916522d46b24f54d8f2afacf97cb171615b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common_hip.cuh"
#include "sobel.cuh"
#include "cpu_functions.cu"
/* prototype for call below that wraps launching the filter kernel */
inline void filter ( const std::string in, const std::string oute);
int main (int argc, char* argv[])
{
if ( argc != 3 )
{
std::cout << "Incorrect usage, execute with parameters: <input 512x512 .pgm image path> <output path>" << std::endl;
return 1;
}
std::string size = std::string ( argv[ 1 ] );
std::string input_file_path = std::string ( argv[ 2 ] );
std::string outpu_file_path = std::string ( argv[ 3 ] );
/* perform sobel filter with GPU */
filter ( input_file_path, outpu_file_path );
return 0;
}
/* wrap the kernel call here
and there is a string map that correlates an input to the correct template function to execute */
void filter ( std::string inputfilename, std::string outputfilename )
{
unsigned char * host_lena = NULL;
unsigned char * dev_input = 0;
unsigned char * dev_output = 0;
hipSetDevice ( 0 );
/* load up lena, allocates memory if not given */
unsigned int width;
unsigned int height;
sdkLoadPGM<unsigned char> ( inputfilename.c_str (), &host_lena, &width, &height );
start ( "gpu timer" );
/* create space on card for lena IN */
hipMalloc ( ( void** )&dev_input, IMAGE_SIZE * IMAGE_SIZE * sizeof ( unsigned char ) );
/* create space on card for lena OUT */
hipMalloc ( ( void** )&dev_output, IMAGE_SIZE * IMAGE_SIZE * sizeof ( unsigned char ) );
/* copy host lena into card space */
hipMemcpy ( dev_input, host_lena, IMAGE_SIZE * IMAGE_SIZE * sizeof ( unsigned char ), hipMemcpyHostToDevice );
/* define kernel parameters */
dim3 threadsPerBlock ( 32 );
dim3 numBlocks ( IMAGE_SIZE / threadsPerBlock.x, IMAGE_SIZE / threadsPerBlock.y );
/* Launch a kernel on the GPU with 32 threads for each block */
hipLaunchKernelGGL(( sobel_kernel), dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, dev_input, dev_output );
/* finish up */
hipError_t error = hipDeviceSynchronize ();
error = hipGetLastError ();
/* copy the data off */
unsigned char out[ IMAGE_SIZE*IMAGE_SIZE ] = { 0 };
hipMemcpy ( out, dev_output, IMAGE_SIZE*IMAGE_SIZE * sizeof ( unsigned char ), hipMemcpyDeviceToHost );
auto time = get_time ( "gpu timer" );
std::cout << "Time: " << time << std::endl;
/*float accuracy = calculate_accuracy ( &out[0], host_lena );
//std::cout << "Accuracy: " << accuracy*100 << "% of pixels were correct" << std::endl;
/* save output file */
sdkSavePGM ( outputfilename.c_str (), out, width, height );
/* cleanup */
free ( host_lena ); host_lena = NULL;
hipFree ( dev_input );
hipFree ( dev_output );
hipDeviceReset ();
}
| 5efbb916522d46b24f54d8f2afacf97cb171615b.cu | #include "common.cuh"
#include "sobel.cuh"
#include "cpu_functions.cu"
/* prototype for call below that wraps launching the filter kernel */
inline void filter ( const std::string in, const std::string oute);
int main (int argc, char* argv[])
{
if ( argc != 3 )
{
std::cout << "Incorrect usage, execute with parameters: <input 512x512 .pgm image path> <output path>" << std::endl;
return 1;
}
std::string size = std::string ( argv[ 1 ] );
std::string input_file_path = std::string ( argv[ 2 ] );
std::string outpu_file_path = std::string ( argv[ 3 ] );
/* perform sobel filter with GPU */
filter ( input_file_path, outpu_file_path );
return 0;
}
/* wrap the kernel call here
and there is a string map that correlates an input to the correct template function to execute */
void filter ( std::string inputfilename, std::string outputfilename )
{
unsigned char * host_lena = NULL;
unsigned char * dev_input = 0;
unsigned char * dev_output = 0;
cudaSetDevice ( 0 );
/* load up lena, allocates memory if not given */
unsigned int width;
unsigned int height;
sdkLoadPGM<unsigned char> ( inputfilename.c_str (), &host_lena, &width, &height );
start ( "gpu timer" );
/* create space on card for lena IN */
cudaMalloc ( ( void** )&dev_input, IMAGE_SIZE * IMAGE_SIZE * sizeof ( unsigned char ) );
/* create space on card for lena OUT */
cudaMalloc ( ( void** )&dev_output, IMAGE_SIZE * IMAGE_SIZE * sizeof ( unsigned char ) );
/* copy host lena into card space */
cudaMemcpy ( dev_input, host_lena, IMAGE_SIZE * IMAGE_SIZE * sizeof ( unsigned char ), cudaMemcpyHostToDevice );
/* define kernel parameters */
dim3 threadsPerBlock ( 32 );
dim3 numBlocks ( IMAGE_SIZE / threadsPerBlock.x, IMAGE_SIZE / threadsPerBlock.y );
/* Launch a kernel on the GPU with 32 threads for each block */
sobel_kernel<<<numBlocks, threadsPerBlock >>>( dev_input, dev_output );
/* finish up */
cudaError_t error = cudaDeviceSynchronize ();
error = cudaGetLastError ();
/* copy the data off */
unsigned char out[ IMAGE_SIZE*IMAGE_SIZE ] = { 0 };
cudaMemcpy ( out, dev_output, IMAGE_SIZE*IMAGE_SIZE * sizeof ( unsigned char ), cudaMemcpyDeviceToHost );
auto time = get_time ( "gpu timer" );
std::cout << "Time: " << time << std::endl;
/*float accuracy = calculate_accuracy ( &out[0], host_lena );
//std::cout << "Accuracy: " << accuracy*100 << "% of pixels were correct" << std::endl;
/* save output file */
sdkSavePGM ( outputfilename.c_str (), out, width, height );
/* cleanup */
free ( host_lena ); host_lena = NULL;
cudaFree ( dev_input );
cudaFree ( dev_output );
cudaDeviceReset ();
}
|
deaac142657f0aa9df4111118dc0405d940a0803.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cudaDefs.h>
#include <glew.h>
#include <freeglut.h>
#include <imageManager.h>
// includes, cuda
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h
#define BLOCK_DIM 32
#define TEXTURE_REFERENCE_API 0
#define TEXTURE_OBJECT_API 1
//#define USED_API TEXTURE_REFERENCE_API
#define USED_API TEXTURE_OBJECT_API
// Configuration
unsigned int startPositionX = 4030;
unsigned int startPositionY = 1450;
hipError_t error = hipSuccess;
hipDeviceProp_t deviceProp = hipDeviceProp_t();
//OpenGL
unsigned int viewportWidth = 1024;
unsigned int viewportHeight = 618;//1024;
unsigned int imageWidth;
unsigned int imageHeight;
unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit
unsigned int imagePitch;
unsigned int pboID;
unsigned int textureID;
static int fpsCount = 0;
static int fpsLimit = 1;
StopWatchInterface* totalTimer = nullptr;
StopWatchInterface* workerTimer = nullptr;
char windowTitle[256];
float fps;
const int cbTimerDelay = 10; //10 ms
//CUDA
cudaGraphicsResource_t cudaPBOResource;
cudaGraphicsResource_t cudaTexResource;
#if USED_API == TEXTURE_REFERENCE_API
hipChannelFormatDesc cudaTexChannelDesc;
texture<uchar4, 2, hipReadModeElementType> cudaTexRef;
#else
hipChannelFormatDesc cudaTexChannelDesc;
hipResourceDesc resDesc;
hipTextureDesc texDesc;
hipTextureObject_t cudaTex = 0;
#endif
unsigned char run_time = 0;
//OpenGL
void initGL(int argc, char** argv);
void releaseOpenGL();
void prepareTexture(const char* imageFileName);
void preparePBO();
//OpenGL Callback functions
void my_display();
void my_resize(GLsizei w, GLsizei h);
void my_idle();
void my_timer(int value);
//CUDA
void initCUDAtex();
void cudaWorker();
void releaseCUDA();
int main(int argc, char* argv[])
{
initializeCUDA(deviceProp);
sdkCreateTimer(&totalTimer);
sdkResetTimer(&totalTimer);
sdkCreateTimer(&workerTimer);
sdkResetTimer(&workerTimer);
initGL(argc, argv);
//prepareTexture("D:/Documents/Projekty/kola/PA2/cv8/assets/lena.png");
prepareTexture("D:/Documents/Projekty/kola/PA2/cv8/assets/world.png");
preparePBO();
initCUDAtex();
//start rendering mainloop
glutMainLoop();
atexit([]()
{
sdkDeleteTimer(&totalTimer);
sdkDeleteTimer(&workerTimer);
releaseCUDA();
releaseOpenGL();
});
}
#pragma region OpenGL Routines
void initGL(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(viewportWidth, viewportHeight);
glutInitWindowPosition(0, 0);
glutCreateWindow("Freeglut window");
glutDisplayFunc(my_display);
glutReshapeFunc(my_resize);
//glutIdleFunc(my_idle);
glutTimerFunc(cbTimerDelay, my_timer, 0);
glutSetCursor(GLUT_CURSOR_CROSSHAIR);
// initialize necessary OpenGL extensions
glewInit();
glClearColor(0.0, 0.0, 0.0, 1.0);
glShadeModel(GL_SMOOTH);
glViewport(0, 0, viewportWidth, viewportHeight);
glFlush();
}
void prepareTexture(const char* imageFileName)
{
FreeImage_Initialise();
FIBITMAP *tmp = ImageManager::GenericLoader(imageFileName, 0);
tmp = FreeImage_ConvertTo32Bits(tmp);
imageWidth = FreeImage_GetWidth(tmp);
imageHeight = FreeImage_GetHeight(tmp);
imageBPP = FreeImage_GetBPP(tmp);
imagePitch = FreeImage_GetPitch(tmp);
//OpenGL Texture
glEnable(GL_TEXTURE_2D);
glGenTextures(1,&textureID);
glBindTexture( GL_TEXTURE_2D, textureID);
//WARNING: Just some of inner format are supported by CUDA!!!
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, FreeImage_GetBits(tmp));
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
void preparePBO()
{
glGenBuffers(1, &pboID);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID); // Make this the current UNPACK buffer (OpenGL is state-based)
glBufferData(GL_PIXEL_UNPACK_BUFFER, imageWidth * imageHeight * 4, NULL,GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image
}
void releaseOpenGL()
{
if (textureID > 0)
glDeleteTextures(1, &textureID);
if (pboID > 0)
glDeleteBuffers(1, &pboID);
}
#pragma endregion
#pragma region OpenGL Callbacs
void my_display()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textureID);
glBegin(GL_QUADS);
glTexCoord2d(0,0); glVertex2d(0,0);
glTexCoord2d(1,0); glVertex2d(viewportWidth, 0);
glTexCoord2d(1,1); glVertex2d(viewportWidth, viewportHeight);
glTexCoord2d(0,1); glVertex2d(0, viewportHeight);
glEnd();
glDisable(GL_TEXTURE_2D);
glFlush();
glutSwapBuffers();
}
void my_resize(GLsizei w, GLsizei h)
{
viewportWidth=w;
viewportHeight=h;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0,0,viewportWidth,viewportHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0,viewportWidth, 0,viewportHeight);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glutPostRedisplay();
}
void my_idle()
{
sdkStartTimer(&totalTimer);
sdkStartTimer(&workerTimer);
cudaWorker();
sdkStopTimer(&workerTimer);
glutPostRedisplay();
sdkStopTimer(&totalTimer);
if (++fpsCount == fpsLimit)
{
fps = 1000.0f / sdkGetAverageTimerValue(&totalTimer);
sprintf(windowTitle, "Freeglut window (%d x %d): %.1f fps worker: %.5f ms", viewportWidth, viewportHeight, fps, sdkGetAverageTimerValue(&workerTimer)/1000.f);
glutSetWindowTitle(windowTitle);
fpsCount = 0;
fpsLimit = (int)((fps > 1.0f) ? fps : 1.0f);
sdkResetTimer(&totalTimer);
}
}
void my_timer(int value)
{
sdkStartTimer(&totalTimer);
sdkStartTimer(&workerTimer);
cudaWorker();
sdkStopTimer(&workerTimer);
glutPostRedisplay();
sdkStopTimer(&totalTimer);
if (++fpsCount == fpsLimit)
{
float fps = 1000.0f / sdkGetAverageTimerValue(&totalTimer);
sprintf(windowTitle, "Freeglut window (%d x %d): %.1f fps worker: %.5f ms", viewportWidth, viewportHeight, fps, sdkGetAverageTimerValue(&workerTimer) / 1000.f);
glutSetWindowTitle(windowTitle);
fpsCount = 0;
fpsLimit = (int)((fps > 1.0f) ? fps : 1.0f);
sdkResetTimer(&totalTimer);
sdkResetTimer(&workerTimer);
}
glutTimerFunc(cbTimerDelay, my_timer, 0);
}
#pragma endregion
#pragma region CUDA Routines
#if USED_API == TEXTURE_REFERENCE_API
__global__ void kernelRefAPI(const unsigned char someValue, const unsigned int pboWidth, const unsigned int pboHeight, unsigned char* pbo)
{
unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int pboElementOffset = (ty * pboWidth + tx) * 4;
if ((tx < pboWidth) && (ty < pboHeight))
{
uchar4 texel = tex2D(cudaTexRef, tx, ty);
pbo[pboElementOffset++] = someValue;
pbo[pboElementOffset++] = texel.y;
pbo[pboElementOffset++] = texel.z;
pbo[pboElementOffset++] = texel.w;
}
}
#endif
#if USED_API == TEXTURE_OBJECT_API
__global__ void kernelObjAPI(const unsigned char time, const unsigned int pboWidth, const unsigned int pboHeight, unsigned char* pbo, hipTextureObject_t tex, const unsigned int startX, const unsigned int startY)
{
unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int pboElementOffset = (ty * pboWidth + tx) * 4;
if ((tx < pboWidth) && (ty < pboHeight))
{
// This is bad, eliminate ifs
uchar4 texel = tex2D<uchar4>(tex, tx, ty);
if(texel.x || texel.z)
printf("%d %d %d,", texel.x, texel.y, texel.z);
bool valid = texel.y == 255 && texel.z == 255;
bool filled = texel.x;
if (tx == startX && ty == startY)
{
texel.x = 255;
texel.y = 0;
texel.z = 0;
}
else if(valid && !filled)
{
bool fill = false;
for (unsigned int x = 0; x < 3; x++)
for (unsigned int y = 0; y < 3; y++)
{
uchar4 next = tex2D<uchar4>(tex, tx + x - 1, ty + y - 1);
fill = fill || (next.x == 255 && next.y == 255 && next.z == 255);
}
texel.x = fill ? 255 : texel.x;
texel.y = fill ? 0 : texel.x;
texel.z = fill ? 0 : texel.x;
}
pbo[pboElementOffset++] = texel.x;
pbo[pboElementOffset++] = texel.y;
pbo[pboElementOffset++] = texel.z;
pbo[pboElementOffset++] = texel.w;
}
}
#endif
void initCUDAtex()
{
hipGLSetGLDevice(0);
checkError();
//Register main texture
hipGraphicsGLRegisterImage(&cudaTexResource, textureID, GL_TEXTURE_2D, hipGraphicsRegisterFlagsReadOnly); // Map the GL texture resource with the CUDA resource
checkError();
#if USED_API == TEXTURE_REFERENCE_API
//cudaTexChannelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindUnsigned);
cudaTexRef.normalized = false; //Otherwise TRUE to access with normalized texture coordinates
cudaTexRef.filterMode = hipFilterModePoint; //Otherwise texRef.filterMode = hipFilterModeLinear; for Linear interpolation of texels
cudaTexRef.addressMode[0] = hipAddressModeClamp; //No repeat texture pattern
cudaTexRef.addressMode[1] = hipAddressModeClamp; //No repeat texture pattern
#else
memset(&texDesc, 0, sizeof(hipTextureDesc));
texDesc.normalizedCoords = false;
texDesc.filterMode = hipFilterModePoint;
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.addressMode[1] = hipAddressModeClamp;
texDesc.readMode = hipReadModeElementType;
#endif
//Register PBO
hipGraphicsGLRegisterBuffer(&cudaPBOResource, pboID, hipGraphicsRegisterFlagsWriteDiscard);
checkError();
}
void cudaWorker()
{
hipArray* array;
hipGraphicsMapResources(1, &cudaTexResource, 0);
hipGraphicsSubResourceGetMappedArray(&array, cudaTexResource, 0, 0);
hipGetChannelDesc(&cudaTexChannelDesc, array);
#if USED_API == TEXTURE_REFERENCE_API
hipBindTextureToArray(&cudaTexRef, array, &cudaTexChannelDesc);
checkError();
hipGraphicsMapResources(1, &cudaPBOResource, 0);
unsigned char* pboData;
size_t pboSize;
hipGraphicsResourceGetMappedPointer((void**)&pboData, &pboSize, cudaPBOResource);
checkError();
someValue++;
if (someValue > 255) someValue = 0;
dim3 block = dim3(BLOCK_DIM, BLOCK_DIM, 1);
dim3 grid = dim3((imageWidth + BLOCK_DIM - 1) / BLOCK_DIM, (imageHeight + BLOCK_DIM - 1) / BLOCK_DIM, 1);
kernelRefAPI << <grid, block >> > (someValue, imageWidth, imageHeight, pboData);
hipUnbindTexture(&cudaTexRef);
#else
memset(&resDesc, 0, sizeof(hipResourceDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = array;
checkCudaErrors(hipCreateTextureObject(&cudaTex, &resDesc, &texDesc, NULL));
checkError();
hipGraphicsMapResources(1, &cudaPBOResource, 0);
unsigned char* pboData;
size_t pboSize;
hipGraphicsResourceGetMappedPointer((void**)&pboData, &pboSize, cudaPBOResource);
checkError();
run_time++;
if (run_time > 1024) run_time = 0;
dim3 block = dim3(BLOCK_DIM, BLOCK_DIM, 1);
dim3 grid = dim3((imageWidth + BLOCK_DIM - 1) / BLOCK_DIM, (imageHeight + BLOCK_DIM - 1) / BLOCK_DIM, 1);
hipLaunchKernelGGL(( kernelObjAPI), dim3(grid), dim3(block), 0, 0, run_time, imageWidth, imageHeight, pboData, cudaTex, startPositionX, startPositionY);
checkCudaErrors(hipDestroyTextureObject(cudaTex));
#endif
hipGraphicsUnmapResources(1, &cudaPBOResource, 0);
hipGraphicsUnmapResources(1, &cudaTexResource, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID);
glBindTexture(GL_TEXTURE_2D, textureID);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, imageWidth, imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory
//printf(".");
}
void releaseCUDA()
{
hipGraphicsUnregisterResource(cudaPBOResource);
hipGraphicsUnregisterResource(cudaTexResource);
}
#pragma endregion
| deaac142657f0aa9df4111118dc0405d940a0803.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cudaDefs.h>
#include <glew.h>
#include <freeglut.h>
#include <imageManager.h>
// includes, cuda
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
#define BLOCK_DIM 32
#define TEXTURE_REFERENCE_API 0
#define TEXTURE_OBJECT_API 1
//#define USED_API TEXTURE_REFERENCE_API
#define USED_API TEXTURE_OBJECT_API
// Configuration
unsigned int startPositionX = 4030;
unsigned int startPositionY = 1450;
cudaError_t error = cudaSuccess;
cudaDeviceProp deviceProp = cudaDeviceProp();
//OpenGL
unsigned int viewportWidth = 1024;
unsigned int viewportHeight = 618;//1024;
unsigned int imageWidth;
unsigned int imageHeight;
unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit
unsigned int imagePitch;
unsigned int pboID;
unsigned int textureID;
static int fpsCount = 0;
static int fpsLimit = 1;
StopWatchInterface* totalTimer = nullptr;
StopWatchInterface* workerTimer = nullptr;
char windowTitle[256];
float fps;
const int cbTimerDelay = 10; //10 ms
//CUDA
cudaGraphicsResource_t cudaPBOResource;
cudaGraphicsResource_t cudaTexResource;
#if USED_API == TEXTURE_REFERENCE_API
cudaChannelFormatDesc cudaTexChannelDesc;
texture<uchar4, 2, cudaReadModeElementType> cudaTexRef;
#else
cudaChannelFormatDesc cudaTexChannelDesc;
cudaResourceDesc resDesc;
cudaTextureDesc texDesc;
cudaTextureObject_t cudaTex = 0;
#endif
unsigned char run_time = 0;
//OpenGL
void initGL(int argc, char** argv);
void releaseOpenGL();
void prepareTexture(const char* imageFileName);
void preparePBO();
//OpenGL Callback functions
void my_display();
void my_resize(GLsizei w, GLsizei h);
void my_idle();
void my_timer(int value);
//CUDA
void initCUDAtex();
void cudaWorker();
void releaseCUDA();
int main(int argc, char* argv[])
{
initializeCUDA(deviceProp);
sdkCreateTimer(&totalTimer);
sdkResetTimer(&totalTimer);
sdkCreateTimer(&workerTimer);
sdkResetTimer(&workerTimer);
initGL(argc, argv);
//prepareTexture("D:/Documents/Projekty/Škola/PA2/cv8/assets/lena.png");
prepareTexture("D:/Documents/Projekty/Škola/PA2/cv8/assets/world.png");
preparePBO();
initCUDAtex();
//start rendering mainloop
glutMainLoop();
atexit([]()
{
sdkDeleteTimer(&totalTimer);
sdkDeleteTimer(&workerTimer);
releaseCUDA();
releaseOpenGL();
});
}
#pragma region OpenGL Routines
void initGL(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(viewportWidth, viewportHeight);
glutInitWindowPosition(0, 0);
glutCreateWindow("Freeglut window");
glutDisplayFunc(my_display);
glutReshapeFunc(my_resize);
//glutIdleFunc(my_idle);
glutTimerFunc(cbTimerDelay, my_timer, 0);
glutSetCursor(GLUT_CURSOR_CROSSHAIR);
// initialize necessary OpenGL extensions
glewInit();
glClearColor(0.0, 0.0, 0.0, 1.0);
glShadeModel(GL_SMOOTH);
glViewport(0, 0, viewportWidth, viewportHeight);
glFlush();
}
void prepareTexture(const char* imageFileName)
{
FreeImage_Initialise();
FIBITMAP *tmp = ImageManager::GenericLoader(imageFileName, 0);
tmp = FreeImage_ConvertTo32Bits(tmp);
imageWidth = FreeImage_GetWidth(tmp);
imageHeight = FreeImage_GetHeight(tmp);
imageBPP = FreeImage_GetBPP(tmp);
imagePitch = FreeImage_GetPitch(tmp);
//OpenGL Texture
glEnable(GL_TEXTURE_2D);
glGenTextures(1,&textureID);
glBindTexture( GL_TEXTURE_2D, textureID);
//WARNING: Just some of inner format are supported by CUDA!!!
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, FreeImage_GetBits(tmp));
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
void preparePBO()
{
glGenBuffers(1, &pboID);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID); // Make this the current UNPACK buffer (OpenGL is state-based)
glBufferData(GL_PIXEL_UNPACK_BUFFER, imageWidth * imageHeight * 4, NULL,GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image
}
void releaseOpenGL()
{
if (textureID > 0)
glDeleteTextures(1, &textureID);
if (pboID > 0)
glDeleteBuffers(1, &pboID);
}
#pragma endregion
#pragma region OpenGL Callbacs
void my_display()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textureID);
glBegin(GL_QUADS);
glTexCoord2d(0,0); glVertex2d(0,0);
glTexCoord2d(1,0); glVertex2d(viewportWidth, 0);
glTexCoord2d(1,1); glVertex2d(viewportWidth, viewportHeight);
glTexCoord2d(0,1); glVertex2d(0, viewportHeight);
glEnd();
glDisable(GL_TEXTURE_2D);
glFlush();
glutSwapBuffers();
}
void my_resize(GLsizei w, GLsizei h)
{
viewportWidth=w;
viewportHeight=h;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0,0,viewportWidth,viewportHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0,viewportWidth, 0,viewportHeight);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glutPostRedisplay();
}
void my_idle()
{
sdkStartTimer(&totalTimer);
sdkStartTimer(&workerTimer);
cudaWorker();
sdkStopTimer(&workerTimer);
glutPostRedisplay();
sdkStopTimer(&totalTimer);
if (++fpsCount == fpsLimit)
{
fps = 1000.0f / sdkGetAverageTimerValue(&totalTimer);
sprintf(windowTitle, "Freeglut window (%d x %d): %.1f fps worker: %.5f ms", viewportWidth, viewportHeight, fps, sdkGetAverageTimerValue(&workerTimer)/1000.f);
glutSetWindowTitle(windowTitle);
fpsCount = 0;
fpsLimit = (int)((fps > 1.0f) ? fps : 1.0f);
sdkResetTimer(&totalTimer);
}
}
void my_timer(int value)
{
sdkStartTimer(&totalTimer);
sdkStartTimer(&workerTimer);
cudaWorker();
sdkStopTimer(&workerTimer);
glutPostRedisplay();
sdkStopTimer(&totalTimer);
if (++fpsCount == fpsLimit)
{
float fps = 1000.0f / sdkGetAverageTimerValue(&totalTimer);
sprintf(windowTitle, "Freeglut window (%d x %d): %.1f fps worker: %.5f ms", viewportWidth, viewportHeight, fps, sdkGetAverageTimerValue(&workerTimer) / 1000.f);
glutSetWindowTitle(windowTitle);
fpsCount = 0;
fpsLimit = (int)((fps > 1.0f) ? fps : 1.0f);
sdkResetTimer(&totalTimer);
sdkResetTimer(&workerTimer);
}
glutTimerFunc(cbTimerDelay, my_timer, 0);
}
#pragma endregion
#pragma region CUDA Routines
#if USED_API == TEXTURE_REFERENCE_API
__global__ void kernelRefAPI(const unsigned char someValue, const unsigned int pboWidth, const unsigned int pboHeight, unsigned char* pbo)
{
unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int pboElementOffset = (ty * pboWidth + tx) * 4;
if ((tx < pboWidth) && (ty < pboHeight))
{
uchar4 texel = tex2D(cudaTexRef, tx, ty);
pbo[pboElementOffset++] = someValue;
pbo[pboElementOffset++] = texel.y;
pbo[pboElementOffset++] = texel.z;
pbo[pboElementOffset++] = texel.w;
}
}
#endif
#if USED_API == TEXTURE_OBJECT_API
__global__ void kernelObjAPI(const unsigned char time, const unsigned int pboWidth, const unsigned int pboHeight, unsigned char* pbo, cudaTextureObject_t tex, const unsigned int startX, const unsigned int startY)
{
unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int pboElementOffset = (ty * pboWidth + tx) * 4;
if ((tx < pboWidth) && (ty < pboHeight))
{
// This is bad, eliminate ifs
uchar4 texel = tex2D<uchar4>(tex, tx, ty);
if(texel.x || texel.z)
printf("%d %d %d,", texel.x, texel.y, texel.z);
bool valid = texel.y == 255 && texel.z == 255;
bool filled = texel.x;
if (tx == startX && ty == startY)
{
texel.x = 255;
texel.y = 0;
texel.z = 0;
}
else if(valid && !filled)
{
bool fill = false;
for (unsigned int x = 0; x < 3; x++)
for (unsigned int y = 0; y < 3; y++)
{
uchar4 next = tex2D<uchar4>(tex, tx + x - 1, ty + y - 1);
fill = fill || (next.x == 255 && next.y == 255 && next.z == 255);
}
texel.x = fill ? 255 : texel.x;
texel.y = fill ? 0 : texel.x;
texel.z = fill ? 0 : texel.x;
}
pbo[pboElementOffset++] = texel.x;
pbo[pboElementOffset++] = texel.y;
pbo[pboElementOffset++] = texel.z;
pbo[pboElementOffset++] = texel.w;
}
}
#endif
void initCUDAtex()
{
cudaGLSetGLDevice(0);
checkError();
//Register main texture
cudaGraphicsGLRegisterImage(&cudaTexResource, textureID, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsReadOnly); // Map the GL texture resource with the CUDA resource
checkError();
#if USED_API == TEXTURE_REFERENCE_API
//cudaTexChannelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindUnsigned);
cudaTexRef.normalized = false; //Otherwise TRUE to access with normalized texture coordinates
cudaTexRef.filterMode = cudaFilterModePoint; //Otherwise texRef.filterMode = cudaFilterModeLinear; for Linear interpolation of texels
cudaTexRef.addressMode[0] = cudaAddressModeClamp; //No repeat texture pattern
cudaTexRef.addressMode[1] = cudaAddressModeClamp; //No repeat texture pattern
#else
memset(&texDesc, 0, sizeof(cudaTextureDesc));
texDesc.normalizedCoords = false;
texDesc.filterMode = cudaFilterModePoint;
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.readMode = cudaReadModeElementType;
#endif
//Register PBO
cudaGraphicsGLRegisterBuffer(&cudaPBOResource, pboID, cudaGraphicsRegisterFlagsWriteDiscard);
checkError();
}
void cudaWorker()
{
cudaArray* array;
cudaGraphicsMapResources(1, &cudaTexResource, 0);
cudaGraphicsSubResourceGetMappedArray(&array, cudaTexResource, 0, 0);
cudaGetChannelDesc(&cudaTexChannelDesc, array);
#if USED_API == TEXTURE_REFERENCE_API
cudaBindTextureToArray(&cudaTexRef, array, &cudaTexChannelDesc);
checkError();
cudaGraphicsMapResources(1, &cudaPBOResource, 0);
unsigned char* pboData;
size_t pboSize;
cudaGraphicsResourceGetMappedPointer((void**)&pboData, &pboSize, cudaPBOResource);
checkError();
someValue++;
if (someValue > 255) someValue = 0;
dim3 block = dim3(BLOCK_DIM, BLOCK_DIM, 1);
dim3 grid = dim3((imageWidth + BLOCK_DIM - 1) / BLOCK_DIM, (imageHeight + BLOCK_DIM - 1) / BLOCK_DIM, 1);
kernelRefAPI << <grid, block >> > (someValue, imageWidth, imageHeight, pboData);
cudaUnbindTexture(&cudaTexRef);
#else
memset(&resDesc, 0, sizeof(cudaResourceDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = array;
checkCudaErrors(cudaCreateTextureObject(&cudaTex, &resDesc, &texDesc, NULL));
checkError();
cudaGraphicsMapResources(1, &cudaPBOResource, 0);
unsigned char* pboData;
size_t pboSize;
cudaGraphicsResourceGetMappedPointer((void**)&pboData, &pboSize, cudaPBOResource);
checkError();
run_time++;
if (run_time > 1024) run_time = 0;
dim3 block = dim3(BLOCK_DIM, BLOCK_DIM, 1);
dim3 grid = dim3((imageWidth + BLOCK_DIM - 1) / BLOCK_DIM, (imageHeight + BLOCK_DIM - 1) / BLOCK_DIM, 1);
kernelObjAPI<<<grid, block>>>(run_time, imageWidth, imageHeight, pboData, cudaTex, startPositionX, startPositionY);
checkCudaErrors(cudaDestroyTextureObject(cudaTex));
#endif
cudaGraphicsUnmapResources(1, &cudaPBOResource, 0);
cudaGraphicsUnmapResources(1, &cudaTexResource, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID);
glBindTexture(GL_TEXTURE_2D, textureID);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, imageWidth, imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory
//printf(".");
}
void releaseCUDA()
{
cudaGraphicsUnregisterResource(cudaPBOResource);
cudaGraphicsUnregisterResource(cudaTexResource);
}
#pragma endregion
|
02c2733e2cb7690376d18448efae7a67a73f65e8.hip | // !!! This is a file automatically generated by hipify!!!
// Includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/types.h>
#include <unistd.h>
#include <errno.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_vector_types.h>
// includes, kernels
#include "common.hip"
#include "mummergpu.h"
#include "mummergpu_kernel.cu"
int USE_PRINT_KERNEL = 1;
#define BREATHING_ROOM (16 * 1024 * 1024)
#define BASES_PER_TREE_PAGE 8388608
//#define BASES_PER_TREE_PAGE 7000000
#define BLOCKSIZE 256
unsigned int cuda_calls = 0;
void trap_dbg() { fprintf(stderr, "Trapped\n"); }
#define CUDA_SAFE_CALL(call) \
do { \
cuda_calls++; \
hipError_t err = call; \
if (hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \
__FILE__, __LINE__, err, hipGetErrorString(err)); \
trap_dbg(); \
exit(EXIT_FAILURE); \
} \
} while (0)
#define CU_SAFE_CALL_NO_SYNC(call) \
do { \
hipError_t err = call; \
if (hipSuccess != err) { \
fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \
err, __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
} \
} while (0)
#define CUT_DEVICE_INIT_DRV(cuDevice) \
do { \
cuDevice = 0; \
int deviceCount = 0; \
hipError_t err = hipInit(0); \
if (hipSuccess == err) \
CU_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); \
if (deviceCount == 0) { \
fprintf(stderr, "There is no device.\n"); \
exit(EXIT_FAILURE); \
} \
int dev; \
for (dev = 0; dev < deviceCount; ++dev) { \
int major, minor; \
CU_SAFE_CALL_NO_SYNC( \
hipDeviceComputeCapability(&major, &minor, dev)); \
if (major >= 1) \
break; \
} \
if (dev == deviceCount) { \
fprintf(stderr, "There is no device supporting CUDA.\n"); \
exit(EXIT_FAILURE); \
} else \
CU_SAFE_CALL_NO_SYNC(hipDeviceGet(&cuDevice, dev)); \
} while (0)
unsigned int num_bind_tex_calls = 0;
#define BIND_TEX(offset, tex, arr, desc, len) \
do { \
CUDA_SAFE_CALL(hipBindTexture(offset, tex, arr, desc, len)); \
++num_bind_tex_calls; \
} while (0)
#define BIND_TEX_ARRAY(tex, arr, desc) \
do { \
CUDA_SAFE_CALL(hipBindTextureToArray(tex, arr, desc)); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC(ptr, size) \
do { \
hipMalloc(ptr, size); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) \
do { \
hipMallocPitch(ptr, out_pitch, rowsize, numrows); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) \
do { \
hipMallocArray(ptr, desc, pitch, rows); \
++num_bind_tex_calls; \
} while (0)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
extern "C" void computeGold(MatchResults *results, char *refstr, char *queries,
int *queryAddrs, int *queryLengths,
PixelOfNode *nodeTexture,
PixelOfChildren *childrenTexture, int numQueries,
int mismatch_length, int rc);
extern "C" void getReferenceString(const char *filename, char **refstr,
size_t *reflen);
extern "C" void
createTreeTexture(const char *filename, PixelOfNode **nodeTexture,
PixelOfChildren **childrenTexture, unsigned int *width,
unsigned int *node_height, unsigned int *children_height,
AuxiliaryNodeData **aux_data, int *num_match_coords,
int min_match_len, Statistics *statistics,
const char *dotfilename, const char *texfilename);
extern "C" void getQueriesTexture(int qfile, char **queryTexture,
size_t *queryLength, int **queryAddrs,
char ***queryNames, int **queryLengths,
unsigned int *numQueries,
unsigned int *num_match_coords,
unsigned int device_memory_avail,
int min_match_length, bool rc);
extern "C" int lookupNumLeaves(ReferencePage *page, TextureAddress addr);
void printAlignments(ReferencePage *page, Alignment *alignments, char *query,
int qrylen, TextureAddress nodeid, int qrypos,
int edge_depth, int min_match, bool rc,
bool forwardcoordinates);
int countLeafNodes(int nodeid);
extern "C" void mapQueriesEndToEnd(MatchContext *ctx, ReferencePage *page,
MatchInfo *h_matches,
unsigned int numMatches,
Alignment *h_alignments,
unsigned int numAligments);
char *createTimer() {
unsigned int *ptr = (unsigned int *)malloc(sizeof(struct Timer_t));
memset(ptr, 0, sizeof(struct Timer_t));
return (char *)ptr;
}
void startTimer(char *ptr) {
gettimeofday(&(((struct Timer_t *)ptr)->start_m), NULL);
}
void stopTimer(char *ptr) {
gettimeofday(&(((struct Timer_t *)ptr)->end_m), NULL);
}
float getTimerValue(char *ptr) {
Timer_t *timer = (Timer_t *)ptr;
if (timer == NULL) {
fprintf(stderr, "Uninitialized timer!!!\n");
return 0.0;
}
if (timer->end_m.tv_sec == 0) {
stopTimer(ptr);
}
return (float)(1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec) +
(0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec)));
}
void deleteTimer(char *ptr) { free((Timer_t *)ptr); }
extern "C" int createReference(const char *fromFile, Reference *ref) {
if (!fromFile || !ref)
return -1;
char *loadreftimer = createTimer();
startTimer(loadreftimer);
getReferenceString(fromFile, &(ref->str), &(ref->len));
stopTimer(loadreftimer);
ref->t_load_from_disk += getTimerValue(loadreftimer);
deleteTimer(loadreftimer);
return 0;
}
extern "C" int destroyReference(Reference *ref) {
free(ref->h_node_tex_array);
free(ref->h_children_tex_array);
free(ref->str);
#if REORDER_REF
free(ref->h_ref_array);
#endif
free(ref->aux_data);
#if TREE_ACCESS_HISTOGRAM
free(ref->h_node_hist);
free(ref->h_child_hist);
#endif
ref->str = NULL;
ref->len = 0;
return 0;
}
extern "C" int createQuerySet(const char *fromFile, QuerySet *queries) {
fprintf(stderr, "Opening %s...\n", fromFile);
int qfile = open(fromFile, O_RDONLY);
if (qfile == -1) {
fprintf(stderr, "Can't open %s: %d\n", fromFile, errno);
exit(1);
}
queries->qfile = qfile;
return 0;
}
extern "C" int destroyQuerySet(QuerySet *queries) {
if (queries->qfile)
close(queries->qfile);
return 0;
}
extern "C" void printStringForError(int err) {}
extern "C" int createMatchContext(Reference *ref, QuerySet *queries,
MatchResults *matches, bool on_cpu,
int min_match_length, char *stats_file,
bool reverse, bool forwardreverse,
bool forwardcoordinates, bool showQueryLength,
char *dotfilename, char *texfilename,
MatchContext *ctx) {
ctx->queries = queries;
ctx->ref = ref;
ctx->full_ref = ref->str;
ctx->full_ref_len = ref->len;
ctx->on_cpu = on_cpu;
ctx->min_match_length = min_match_length;
ctx->stats_file = stats_file;
ctx->reverse = reverse;
ctx->forwardreverse = forwardreverse;
ctx->forwardcoordinates = forwardcoordinates;
ctx->show_query_length = showQueryLength;
ctx->dotfilename = dotfilename;
ctx->texfilename = texfilename;
return 0;
}
extern "C" int destroyMatchContext(MatchContext *ctx) {
free(ctx->full_ref);
// destroyReference(ctx->ref);
destroyQuerySet(ctx->queries);
return 0;
}
void buildReferenceTexture(Reference *ref, char *full_ref, size_t begin,
size_t end, int min_match_len, char *dotfilename,
char *texfilename, Statistics *statistics) {
fprintf(stderr, "Building reference texture...\n");
PixelOfNode *nodeTexture = NULL;
PixelOfChildren *childrenTexture = NULL;
unsigned int width = 0;
unsigned int node_height = 0;
unsigned int children_height = 0;
AuxiliaryNodeData *aux_data = NULL;
int num_nodes;
char *loadreftimer = createTimer();
startTimer(loadreftimer);
ref->len = end - begin + 3;
ref->str = (char *)malloc(ref->len);
ref->str[0] = 's';
strncpy(ref->str + 1, full_ref + begin, ref->len - 3);
strcpy(ref->str + ref->len - 2, "$");
stopTimer(loadreftimer);
statistics->t_ref_from_disk +=
getTimerValue(loadreftimer) + ref->t_load_from_disk;
deleteTimer(loadreftimer);
createTreeTexture(ref->str, &nodeTexture, &childrenTexture, &width,
&node_height, &children_height, &aux_data, &num_nodes,
min_match_len, statistics, dotfilename, texfilename);
ref->h_node_tex_array = nodeTexture;
ref->h_children_tex_array = childrenTexture;
ref->tex_width = width;
ref->tex_node_height = node_height;
ref->tex_children_height = children_height;
#if TREE_ACCESS_HISTOGRAM
ref->h_node_hist = (int *)calloc(width * node_height, sizeof(int));
ref->h_child_hist = (int *)calloc(width * children_height, sizeof(int));
#endif
ref->aux_data = aux_data;
ref->num_nodes = num_nodes;
ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) +
(width * children_height * sizeof(PixelOfChildren));
fprintf(stderr, "This tree will need %d bytes on the board\n",
ref->bytes_on_board);
#if REORDER_REF
char *reordertimer = createTimer();
startTimer(reordertimer);
unsigned int refpitch = ref->pitch = 65536;
int numrows = ceil(ref->len / ((float)refpitch));
int blocksize = 4;
numrows += blocksize;
int refstrsize = numrows * refpitch;
ref->h_ref_array = (char *)malloc(refstrsize);
ref->bytes_on_board += refstrsize;
fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize);
int z_max = numrows * refpitch;
for (int z = 0; z < z_max; z++) {
ref->h_ref_array[z] = 'Z';
}
int x, y;
int maxx = 0, maxy = 0;
size_t reflen = ref->len;
char *refstr = ref->str;
int block_dim = refpitch * blocksize;
for (int i = 0; i < reflen; i++) {
int bigx = i % (block_dim); // ref string reorder
int bigy = i / (block_dim);
y = bigy * blocksize + bigx % blocksize;
x = bigx / blocksize;
// printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]);
assert(x < refpitch);
assert(y < numrows);
ref->h_ref_array[y * refpitch + x] = refstr[i];
if (x > maxx) {
maxx = x;
}
if (y > maxy) {
maxy = y;
}
}
if ((maxx >= refpitch) || (maxy >= numrows)) {
fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n",
maxx, refpitch, maxy, numrows);
exit(1);
}
stopTimer(reordertimer);
if (statistics)
statistics->t_reorder_ref_str += getTimerValue(reordertimer);
deleteTimer(reordertimer);
#else
fprintf(stderr, "The refstr requires %d bytes\n", ref->len);
ref->bytes_on_board += ref->len;
#endif
}
void boardMemory(size_t *free_mem, size_t *total_mem) {
// The emulator doesn't allow calls to cuMemGetInfo
#ifdef __DEVICE_EMULATION__
*free_mem = 512 * 1024 * 1024;
*total_mem = 768 * 1024 * 1024;
#else
CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem));
#endif
}
void loadReferenceTexture(MatchContext *ctx) {
Reference *ref = ctx->ref;
int numrows = ceil(ref->len / ((float)ref->pitch));
int blocksize = 4;
numrows += blocksize;
hipChannelFormatDesc refTextureDesc =
hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindSigned);
if (!ctx->on_cpu) {
char *toboardtimer = createTimer();
startTimer(toboardtimer);
#if REFTEX
#if REORDER_REF
CUDA_MALLOC_ARRAY((hipArray **)(&ref->d_ref_array), &refTextureDesc,
ref->pitch, numrows);
CUDA_SAFE_CALL(hipMemcpyToArray((hipArray *)(ref->d_ref_array), 0, 0,
ref->h_ref_array, numrows * ref->pitch,
hipMemcpyHostToDevice));
reftex.addressMode[0] = hipAddressModeClamp;
reftex.addressMode[1] = hipAddressModeClamp;
reftex.filterMode = hipFilterModePoint;
reftex.normalized = false;
BIND_TEX_ARRAY(reftex, (hipArray *)ref->d_ref_array, refTextureDesc);
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC((void **)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL(hipMemcpy((void *)(ref->d_ref_array), ref->str,
ref->len, hipMemcpyHostToDevice));
reftex.addressMode[0] = hipAddressModeClamp;
reftex.filterMode = hipFilterModePoint;
reftex.normalized = false; // access with normalized texture coordinates
hipChannelFormatDesc refDesc =
hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned);
BIND_TEX(0, reftex, (void *)(ref->d_ref_array), refDesc, ref->len);
ctx->ref->bytes_on_board += ref->len;
#endif
#else
#if REORDER_REF
size_t refpitch;
CUDA_MALLOC_PITCH((void **)(&ref->d_ref_array), &refpitch,
ref->pitch * sizeof(char), numrows);
CUDA_SAFE_CALL(hipMemcpy2D(
(ref->d_ref_array), refpitch, ref->h_ref_array, ref->pitch,
ref->pitch * sizeof(char), numrows, hipMemcpyHostToDevice));
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC((void **)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL(hipMemcpy((void *)(ref->d_ref_array), ref->str,
ref->len, hipMemcpyHostToDevice));
ctx->ref->bytes_on_board += ref->len;
#endif
#endif
stopTimer(toboardtimer);
ctx->statistics.t_ref_str_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
} else {
ref->d_ref_array = NULL;
}
}
void unloadReferenceString(Reference *ref) {
#if REFTEX
CUDA_SAFE_CALL(hipUnbindTexture(reftex));
#endif
#if REORDER_REF && REFTEX
CUDA_SAFE_CALL(hipFreeArray((hipArray *)(ref->d_ref_array)));
#else
CUDA_SAFE_CALL(hipFree((ref->d_ref_array)));
#endif
ref->d_ref_array = NULL;
}
void unloadReferenceTree(MatchContext *ctx) {
Reference *ref = ctx->ref;
#if REORDER_TREE
// Unload nodetex
#if NODETEX
CUDA_SAFE_CALL(hipUnbindTexture(nodetex));
CUDA_SAFE_CALL(hipFreeArray((hipArray *)(ref->d_node_tex_array)));
#else
CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array));
#endif
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array) {
#if CHILDTEX
CUDA_SAFE_CALL(hipUnbindTexture(childrentex));
CUDA_SAFE_CALL(hipFreeArray((hipArray *)(ref->d_children_tex_array)));
#else
CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array));
#endif
}
ref->d_children_tex_array = NULL;
#else
#if NODETEX
CUDA_SAFE_CALL(hipUnbindTexture(nodetex));
#endif
CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array));
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array) {
#if CHILDTEX
CUDA_SAFE_CALL(hipUnbindTexture(childrentex));
#endif
CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array));
ref->d_children_tex_array = NULL;
}
#endif
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(hipFree(ref->d_node_hist));
ref->d_node_hist = NULL;
CUDA_SAFE_CALL(hipFree(ref->d_child_hist));
ref->d_child_hist = NULL;
#endif
}
// loads a tree and text for [begin, end) in the reference
void loadReference(MatchContext *ctx) {
Reference *ref = ctx->ref;
ref->bytes_on_board = 0;
loadReferenceTexture(ctx);
if (!ctx->on_cpu) {
char *toboardtimer = createTimer();
startTimer(toboardtimer);
// node texels
ref->bytes_on_board +=
ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode));
// children texels
ref->bytes_on_board +=
ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren);
#if REORDER_TREE
#if NODETEX
hipChannelFormatDesc nodeTextureDesc = hipCreateChannelDesc(
32, 32, 32, 32, hipChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY((hipArray **)(&ref->d_node_tex_array),
&nodeTextureDesc, ref->tex_width,
ref->tex_node_height);
CUDA_SAFE_CALL(hipMemcpyToArray(
(hipArray *)(ref->d_node_tex_array), 0, 0, ref->h_node_tex_array,
ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode),
hipMemcpyHostToDevice));
nodetex.addressMode[0] = hipAddressModeClamp;
nodetex.addressMode[1] = hipAddressModeClamp;
nodetex.filterMode = hipFilterModePoint;
nodetex.normalized =
false; // access with normalized texture coordinates
BIND_TEX_ARRAY(nodetex, (hipArray *)ref->d_node_tex_array,
nodeTextureDesc);
#else
size_t nodepitch;
CUDA_MALLOC_PITCH((void **)(&ref->d_node_tex_array), &nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height);
CUDA_SAFE_CALL(hipMemcpy2D(
(ref->d_node_tex_array), nodepitch, ref->h_node_tex_array,
nodepitch, ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height, hipMemcpyHostToDevice));
#endif
if (ref->tex_children_height) {
#if CHILDTEX
hipChannelFormatDesc childrenTextureDesc = hipCreateChannelDesc(
32, 32, 32, 32, hipChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY((hipArray **)(&ref->d_children_tex_array),
&childrenTextureDesc, ref->tex_width,
ref->tex_children_height);
CUDA_SAFE_CALL(
hipMemcpyToArray((hipArray *)(ref->d_children_tex_array), 0,
0, ref->h_children_tex_array,
ref->tex_width * ref->tex_children_height *
sizeof(PixelOfChildren),
hipMemcpyHostToDevice));
childrentex.addressMode[0] = hipAddressModeClamp;
childrentex.addressMode[1] = hipAddressModeClamp;
childrentex.filterMode = hipFilterModePoint;
childrentex.normalized =
false; // access with normalized texture coordinates
BIND_TEX_ARRAY(childrentex,
(hipArray *)(ref->d_children_tex_array),
childrenTextureDesc);
#else
size_t childpitch;
CUDA_MALLOC_PITCH((void **)(&ref->d_children_tex_array),
&childpitch,
ref->tex_width * sizeof(PixelOfChildren),
ref->tex_children_height);
CUDA_SAFE_CALL(hipMemcpy2D((ref->d_children_tex_array), childpitch,
ref->h_children_tex_array, childpitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_children_height,
hipMemcpyHostToDevice));
#endif
}
#if TREE_ACCESS_HISTOGRAM
// node hist
ref->bytes_on_board +=
ref->tex_width * ref->tex_node_height * sizeof(int);
CUDA_MALLOC((void **)(&ref->d_node_hist),
ref->tex_width * ref->tex_node_height * sizeof(int));
CUDA_SAFE_CALL(
hipMemset((ref->d_node_hist), 0,
ref->tex_width * ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height) {
// children hist
ref->bytes_on_board +=
ref->tex_width * ref->tex_children_height * sizeof(int);
fprintf(stderr, "after child_hist ref->bytes_on_board:%ld\n",
ref->bytes_on_board);
CUDA_MALLOC((void **)(&ref->d_child_hist),
ref->tex_width * ref->tex_children_height *
sizeof(int));
CUDA_SAFE_CALL(hipMemset(
(ref->d_child_hist), 0,
ref->tex_width * ref->tex_children_height * sizeof(int)));
}
#endif
#else // NO TREE REORDERING
// Node tex, 1-dimensional
CUDA_MALLOC((void **)(&ref->d_node_tex_array),
ref->tex_node_height * sizeof(PixelOfNode));
CUDA_SAFE_CALL(hipMemcpy((ref->d_node_tex_array),
ref->h_node_tex_array,
ref->tex_node_height * sizeof(PixelOfNode),
hipMemcpyHostToDevice));
#if NODETEX
hipChannelFormatDesc nodeTextureDesc = hipCreateChannelDesc(
32, 32, 32, 32, hipChannelFormatKindUnsigned);
nodetex.addressMode[0] = hipAddressModeClamp;
nodetex.filterMode = hipFilterModePoint;
nodetex.normalized =
false; // access with normalized texture coordinates
BIND_TEX(0, nodetex, (void *)(ref->d_node_tex_array), nodeTextureDesc,
ref->tex_node_height * sizeof(PixelOfNode));
#endif
if (ref->tex_children_height) {
// Child tex, 1-dimensional
CUDA_MALLOC((void **)(&ref->d_children_tex_array),
ref->tex_children_height * sizeof(PixelOfChildren));
CUDA_SAFE_CALL(hipMemcpy(
(ref->d_children_tex_array), ref->h_children_tex_array,
ref->tex_children_height * sizeof(PixelOfChildren),
hipMemcpyHostToDevice));
#if CHILDTEX
hipChannelFormatDesc childTextureDesc = hipCreateChannelDesc(
32, 32, 32, 32, hipChannelFormatKindUnsigned);
childrentex.addressMode[0] = hipAddressModeClamp;
childrentex.filterMode = hipFilterModePoint;
childrentex.normalized =
false; // access with normalized texture coordinates
BIND_TEX(0, childrentex, (void *)(ref->d_children_tex_array),
childTextureDesc,
ref->tex_children_height * sizeof(PixelOfChildren));
#endif
}
#if TREE_ACCESS_HISTOGRAM
ref->bytes_on_board += ref->tex_node_height * sizeof(int);
CUDA_MALLOC((void **)(&ref->d_node_hist),
ref->tex_node_height * sizeof(int));
CUDA_SAFE_CALL(hipMemset((ref->d_node_hist), 0,
ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height) {
ref->bytes_on_board += ref->tex_children_height * sizeof(int);
CUDA_MALLOC((void **)(&ref->d_child_hist),
ref->tex_children_height * sizeof(int));
CUDA_SAFE_CALL(hipMemset((ref->d_child_hist), 0,
ref->tex_children_height * sizeof(int)));
}
#endif
#endif
#if TWO_LEVEL_NODE_TREE
PixelOfNode node_buf[NODE_THRESH];
memset(node_buf, 0, sizeof(node_buf));
for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes);
++i) {
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
node_buf[i] = ((PixelOfNode *)(ref->h_node_tex_array))[loc];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
node_buf[i] = ((PixelOfNode *)(ref->h_node_tex_array))[loc];
#elif MERGETEX
node_buf[i] =
((PixelOfNode *)(ref->h_node_tex_array))[myaddress.x * 2];
#else
node_buf[i] = ((PixelOfNode *)(ref->h_node_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL(
hipMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf)));
#endif
#if TWO_LEVEL_CHILD_TREE
PixelOfChildren child_buf[CHILD_THRESH];
memset(child_buf, 0, sizeof(child_buf));
for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes);
++i) {
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
child_buf[i] =
((PixelOfChildren *)(ref->h_node_tex_array))[loc + 1];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
child_buf[i] = ((PixelOfChildren *)(ref->h_children))[loc];
#elif MERGETEX
child_buf[i] = ((
PixelOfChildren *)(ref->h_node_tex_array))[myaddress.x * 2 + 1];
#else
child_buf[i] =
((PixelOfChildren *)(ref->h_children_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL(
hipMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf)));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_tree_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "done\n");
} else {
ref->d_node_tex_array = NULL;
ref->d_children_tex_array = NULL;
}
}
void dumpQueryBlockInfo(QuerySet *queries) {
fprintf(stderr, "\tProcessing queries %s to %s\n", queries->h_names[0],
queries->h_names[queries->count - 1]);
}
void loadQueries(MatchContext *ctx) {
QuerySet *queries = ctx->queries;
queries->bytes_on_board = 0;
unsigned int numQueries = queries->count;
if (!ctx->on_cpu) {
fprintf(stderr, "Allocating device memory for queries... ");
char *toboardtimer = createTimer();
startTimer(toboardtimer);
dumpQueryBlockInfo(queries);
CUDA_MALLOC((void **)&queries->d_tex_array, queries->texlen);
queries->bytes_on_board += queries->texlen;
CUDA_SAFE_CALL(
hipMemcpy((void *)queries->d_tex_array,
queries->h_tex_array + queries->h_addrs_tex_array[0],
queries->texlen, hipMemcpyHostToDevice));
#if QRYTEX
qrytex.addressMode[0] = hipAddressModeClamp;
qrytex.filterMode = hipFilterModePoint;
qrytex.normalized = false; // access with normalized texture coordinates
hipChannelFormatDesc qryDesc =
hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned);
BIND_TEX(0, qrytex, (void *)(queries->d_tex_array), qryDesc,
queries->texlen);
#endif
CUDA_MALLOC((void **)&queries->d_addrs_tex_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(hipMemcpy(
(void *)queries->d_addrs_tex_array, queries->h_addrs_tex_array,
numQueries * sizeof(int), hipMemcpyHostToDevice));
CUDA_MALLOC((void **)&queries->d_lengths_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(hipMemcpy(
(void *)queries->d_lengths_array, queries->h_lengths_array,
numQueries * sizeof(int), hipMemcpyHostToDevice));
stopTimer(toboardtimer);
ctx->statistics.t_queries_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board);
} else {
queries->d_addrs_tex_array = NULL;
queries->d_tex_array = NULL;
queries->d_lengths_array = NULL;
fprintf(stderr, " allocated %ld bytes\n",
2 * numQueries * sizeof(int) + queries->texlen);
}
}
void unloadQueries(MatchContext *ctx) {
QuerySet *queries = ctx->queries;
CUDA_SAFE_CALL(hipFree(queries->d_tex_array));
queries->d_tex_array = NULL;
CUDA_SAFE_CALL(hipFree(queries->d_addrs_tex_array));
queries->d_addrs_tex_array = NULL;
CUDA_SAFE_CALL(hipFree(queries->d_lengths_array));
queries->d_lengths_array = NULL;
queries->bytes_on_board = 0;
}
// Computes the location of the first MatchCoord for a given query. NOTE:
// Do NOT use this function if COALESCED_QUERIES == 1
inline int match_coord_addrs(int qryid, int qry_addrs, int match_length) {
return qry_addrs - qryid * (match_length + 1);
}
// Construct the offset table for a set of queries. This table will be used
// by the printing functions, and if COALESCED_QUERIES == 1, by the matching
// kernel.
void buildCoordOffsetArray(MatchContext *ctx, int **h_coord_offset_array,
unsigned int *num_coords) {
int numCoords = 0;
int match_length = ctx->min_match_length;
int numQueries = ctx->queries->count;
int *lengths = ctx->queries->h_lengths_array;
int *coord_offsets = (int *)calloc(numQueries, sizeof(int));
#if COALESCED_QUERIES
for (unsigned int i = 0; i < numQueries; i += WARP_SIZE) {
// Every query in this warp will need at least this many coords
int max_num_coords = 0;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) {
int num_coords = lengths[i + j] - match_length + 1;
if (max_num_coords < num_coords)
max_num_coords = num_coords;
}
unsigned int block_size = max_num_coords * WARP_SIZE;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) {
ctx->results.h_coord_tex_array[i + j] = numCoords + j;
}
numCoords += block_size;
}
#else
for (unsigned int i = 0; i < numQueries; ++i) {
int qryoffset = ctx->queries->h_addrs_tex_array[i];
coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length);
}
if (numQueries > 0) {
unsigned int last_qry = numQueries - 1;
unsigned int last_qry_len = lengths[last_qry] - match_length + 1;
numCoords = coord_offsets[last_qry] + last_qry_len;
fprintf(stderr, "Need %d match coords for this result array\n",
numCoords);
}
#endif
*num_coords = numCoords;
*h_coord_offset_array = coord_offsets;
}
void loadResultBuffer(MatchContext *ctx) {
unsigned int numQueries = ctx->queries->count;
assert(numQueries);
char *offsettimer = createTimer();
startTimer(offsettimer);
buildCoordOffsetArray(ctx, &(ctx->results.h_coord_tex_array),
&(ctx->results.numCoords));
stopTimer(offsettimer);
ctx->statistics.t_build_coord_offsets += getTimerValue(offsettimer);
deleteTimer(offsettimer);
unsigned int numCoords = ctx->results.numCoords;
fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...",
numQueries, numCoords * sizeof(MatchCoord));
size_t boardFreeMemory = 0;
size_t total_mem = 0;
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n", boardFreeMemory,
total_mem);
ctx->results.h_match_coords =
(MatchCoord *)calloc(numCoords, sizeof(MatchCoord));
if (ctx->results.h_match_coords == NULL) {
trap_dbg();
exit(EXIT_FAILURE);
}
if (!ctx->on_cpu) {
char *toboardtimer = createTimer();
startTimer(toboardtimer);
ctx->results.bytes_on_board = 0;
CUDA_MALLOC((void **)&ctx->results.d_match_coords,
numCoords * sizeof(MatchCoord));
ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord);
CUDA_SAFE_CALL(hipMemset((void *)ctx->results.d_match_coords, 0,
numCoords * sizeof(MatchCoord)));
#if COALESCED_QUERIES
CUDA_MALLOC((void **)&ctx->results.d_coord_tex_array,
numQueries * sizeof(int));
ctx->results.bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(hipMemcpy((void *)ctx->results.d_coord_tex_array,
ctx->results.h_coord_tex_array,
numQueries * sizeof(int),
hipMemcpyHostToDevice));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_match_coords_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
} else {
ctx->results.d_match_coords = NULL;
}
fprintf(stderr, "done\n");
}
void unloadResultBuffer(MatchContext *ctx) {
CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords));
ctx->results.d_match_coords = NULL;
ctx->results.bytes_on_board = 0;
#if COALESCED_QUERIES
CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords));
#endif
}
void transferResultsFromDevice(MatchContext *ctx) {
if (!ctx->on_cpu) {
char *fromboardtimer = createTimer();
startTimer(fromboardtimer);
CUDA_SAFE_CALL(hipMemcpy(ctx->results.h_match_coords,
ctx->results.d_match_coords,
ctx->results.numCoords * sizeof(MatchCoord),
hipMemcpyDeviceToHost));
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(hipMemcpy(ctx->ref->h_node_hist, ctx->ref->d_node_hist,
ctx->ref->tex_node_height *
ctx->ref->tex_width * sizeof(int),
hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(
ctx->ref->h_child_hist, ctx->ref->d_child_hist,
ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int),
hipMemcpyDeviceToHost));
if (ctx->statistics.node_hist_size <
ctx->ref->tex_width * ctx->ref->tex_node_height) {
int *temp = (int *)calloc(
ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int));
if (ctx->statistics.node_hist_size)
memcpy(temp, ctx->statistics.node_hist,
ctx->statistics.node_hist_size * sizeof(int));
ctx->statistics.node_hist = temp;
ctx->statistics.node_hist_size =
ctx->ref->tex_width * ctx->ref->tex_node_height;
}
if (ctx->statistics.child_hist_size <
ctx->ref->tex_width * ctx->ref->tex_children_height) {
temp = (int *)calloc(ctx->ref->tex_width *
ctx->ref->tex_children_height,
sizeof(int));
if (ctx->statistics.hist_size)
memcpy(temp, ctx->statistics.child_hist,
ctx->statistics.hist_size * sizeof(int));
ctx->statistics.child_hist = temp;
ctx->statistics.child_hist_size =
ctx->ref->tex_width * ctx->ref->tex_children_height;
}
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) {
ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i];
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) {
ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i];
}
#endif
stopTimer(fromboardtimer);
ctx->statistics.t_match_coords_from_board +=
getTimerValue(fromboardtimer);
deleteTimer(fromboardtimer);
}
}
int flushOutput();
int addToBuffer(char *string);
char numbuffer[32];
MatchCoord *coordForQueryChar(MatchContext *ctx, unsigned int qryid,
unsigned int qrychar) {
MatchResults *results = &(ctx->results);
MatchCoord *coords = results->h_match_coords;
#if COALESCED_QUERIES
return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE;
#else
return coords + results->h_coord_tex_array[qryid] + qrychar;
#endif
}
void coordsToPrintBuffers(MatchContext *ctx, ReferencePage *page,
MatchInfo **matches, Alignment **alignments,
unsigned int mem_avail, unsigned int *coord_idx,
unsigned int *match_idx, unsigned int *align_idx,
unsigned int *nextqry, unsigned int *nextqrychar) {
unsigned int numQueries = ctx->queries->count;
int match_length = ctx->min_match_length;
unsigned int cidx = *coord_idx;
unsigned int midx = 0;
unsigned int numCoords = ctx->results.numCoords;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
int DEBUG = 0;
if (DEBUG && cidx == 0) {
for (int j = 0; j < numCoords; ++j) {
MatchCoord *coord = ctx->results.h_match_coords + j;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
// fprintf(stdout, "node: %d\n",
// coord->node);
fprintf(stdout, "node: %d leaves:%d\n", coord->node.data,
lookupNumLeaves(page, coord->node));
}
}
exit(0);
}
// How much can we fit into mem_avail?
for (int j = cidx; j < numCoords; ++j) {
MatchCoord *coord = ctx->results.h_match_coords + j;
int queryAlignments = 0;
int queryMatches = 0;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
int numLeaves = lookupNumLeaves(page, coord->node);
queryAlignments += numLeaves;
queryMatches++;
}
int allMatches = numMatches + queryMatches;
int allAlignments = numAlignments + queryAlignments;
int neededSize =
allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment);
if (neededSize > mem_avail ||
(allMatches / BLOCKSIZE) >= MAX_GRID_DIMENSION) {
// adding this match won't fit on the board
break;
}
++cidx;
numMatches = allMatches;
numAlignments = allAlignments;
}
MatchInfo *M = (MatchInfo *)calloc(numMatches, sizeof(MatchInfo));
unsigned int alignmentOffset = 0;
int qry = *nextqry;
int qrychar = *nextqrychar;
bool set_full = false;
while (qry < numQueries) {
// h_lengths_array doesn't count the 'q' at the beginning of each query
int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length;
while (qrychar < qlen) {
if (midx >= numMatches) {
set_full = true;
break;
}
MatchCoord *coord = coordForQueryChar(ctx, qry, qrychar);
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
MatchInfo m;
m.resultsoffset = alignmentOffset;
m.qrystartpos = qrychar;
m.matchnode = coord->node;
m.edgematch = coord->edge_match_length;
m.numLeaves = lookupNumLeaves(page, m.matchnode);
m.queryid = qry;
alignmentOffset += m.numLeaves;
M[midx++] = m;
}
++qrychar;
}
if (set_full)
break;
++qry;
qrychar = 0;
}
*coord_idx = cidx;
*match_idx = midx;
*align_idx = alignmentOffset;
*matches = M;
*nextqry = qry;
*nextqrychar = qrychar;
fprintf(stderr, "Allocing %d bytes of host memory for %d alignments\n",
alignmentOffset * sizeof(Alignment), numAlignments);
*alignments =
(struct Alignment *)calloc(alignmentOffset, sizeof(Alignment));
// hipHostMalloc((void**)alignments, numAlignments * sizeof(Alignment));
}
void runPrintKernel(MatchContext *ctx, ReferencePage *page,
MatchInfo *h_matches, unsigned int numMatches,
Alignment *alignments, unsigned int numAlignments) {
MatchInfo *d_matches;
size_t matchesSize = numMatches * sizeof(MatchInfo);
CUDA_MALLOC((void **)&d_matches, matchesSize);
struct Alignment *d_alignments;
size_t alignmentSize = numAlignments * sizeof(Alignment);
CUDA_MALLOC((void **)&d_alignments, alignmentSize);
CUDA_SAFE_CALL(hipMemset((void *)d_alignments, 0, alignmentSize));
char *atimer = createTimer();
startTimer(atimer);
// Copy matches to card
fprintf(stderr, "prepared %d matches %d alignments\n", numMatches,
numAlignments);
fprintf(stderr, "Copying %d bytes to host memory for %d alignments\n",
numAlignments * sizeof(Alignment), numAlignments);
int DEBUG = 0;
if (DEBUG) {
for (int i = 0; i < numMatches; i++) {
printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n", i,
h_matches[i].resultsoffset, h_matches[i].queryid,
h_matches[i].matchnode.data, h_matches[i].numLeaves,
h_matches[i].edgematch, h_matches[i].qrystartpos);
}
exit(0);
}
CUDA_SAFE_CALL(
hipMemcpy(d_matches, h_matches, matchesSize, hipMemcpyHostToDevice));
stopTimer(atimer);
float mtime = getTimerValue(atimer);
// Launch the kernel
int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1);
fprintf(stderr, " Calling print kernel... ");
hipLaunchKernelGGL(( printKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
d_matches, numMatches, d_alignments,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int *)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode *)ctx->ref->d_node_tex_array,
#endif
#if !CHILDTEX
(_PixelOfChildren *)ctx->ref->d_children_tex_array,
#endif
ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array,
page->begin, page->end, page->shadow_left, page->shadow_right,
ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
,
ctx->ref->d_node_hist, ctx->ref->d_child_hist
#endif
);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
startTimer(atimer);
// Copy the results back to the host
CUDA_SAFE_CALL(hipMemcpy((void *)alignments, (void *)d_alignments,
alignmentSize, hipMemcpyDeviceToHost));
hipDeviceSynchronize();
stopTimer(atimer);
float atime = getTimerValue(atimer);
fprintf(stderr, "memcpy time= %f\n", atime + mtime);
deleteTimer(atimer);
// Cleanup
CUDA_SAFE_CALL(hipFree(d_alignments));
CUDA_SAFE_CALL(hipFree(d_matches));
}
// TODO: need reverse-complement printing support
void runPrintOnCPU(MatchContext *ctx, ReferencePage *page, MatchInfo *h_matches,
unsigned int numMatches, Alignment *alignments,
unsigned int numAlignments) {
unsigned int min_match_length = ctx->min_match_length;
int *addrs = ctx->queries->h_addrs_tex_array;
int *lengths = ctx->queries->h_lengths_array;
char *qrychars = ctx->queries->h_tex_array;
if (!numMatches)
return;
int qry = -1;
unsigned int qrylen;
for (int i = 0; i < numMatches; ++i) {
MatchInfo &match = h_matches[i];
if (match.queryid != qry) {
qry = match.queryid;
qrylen = lengths[qry];
}
if (!(match.edgematch & FRMASK)) {
printAlignments(page, alignments + match.resultsoffset,
#if COALESCED_QUERIES
qrychars + sizeof(int) * addrs[qry],
#else
qrychars + addrs[qry],
#endif
qrylen, match.matchnode, match.qrystartpos,
match.edgematch, min_match_length, 0,
ctx->forwardcoordinates);
}
}
}
int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen);
void getExactAlignments(MatchContext *ctx, ReferencePage *page, bool on_cpu) {
assert(!ctx->reverse && !ctx->forwardreverse);
size_t boardFreeMemory;
size_t total_mem;
if (!on_cpu) {
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
} else {
boardFreeMemory = 256 * 1024 * 1024;
total_mem = boardFreeMemory;
}
#ifdef __DEVICE_EMULATION__
boardFreeMemory = 512 * 1024 * 1024;
#endif
boardFreeMemory -= BREATHING_ROOM;
fprintf(stderr, "board free memory: %u\n", boardFreeMemory);
int rTotalMatches = 0;
int rTotalAlignments = 0;
int totalRounds = 0;
unsigned int last_coord = ctx->results.numCoords;
unsigned int next_coord = 0;
unsigned int nextqry = 0;
unsigned int nextqrychar = 0;
int lastqry = -1;
while (next_coord < last_coord) {
// see how many queries will fit on the board
totalRounds++;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
MatchInfo *h_matches = NULL;
Alignment *h_alignments = NULL;
int coord_left = next_coord;
char *btimer = createTimer();
startTimer(btimer);
coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments,
boardFreeMemory, &next_coord, &numMatches,
&numAlignments, &nextqry, &nextqrychar);
stopTimer(btimer);
float btime = getTimerValue(btimer);
ctx->statistics.t_coords_to_buffers += btime;
fprintf(stderr, "buffer prep time= %f\n", btime);
deleteTimer(btimer);
fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) "
"of %d using %d matches and %d alignments\n",
totalRounds, coord_left, next_coord, last_coord, numMatches,
numAlignments);
if (numMatches == 0)
continue;
char buf[256];
// assert(qryend > qrystart);
rTotalAlignments += numAlignments;
rTotalMatches += numMatches;
if (num_bind_tex_calls > 100) {
hipDeviceReset();
num_bind_tex_calls = 0;
loadReference(ctx);
loadQueries(ctx);
}
char *ktimer = createTimer();
startTimer(ktimer);
if (on_cpu) {
runPrintOnCPU(ctx, page, h_matches, numMatches, h_alignments,
numAlignments);
} else {
runPrintKernel(ctx, page, h_matches, numMatches, h_alignments,
numAlignments);
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_print_kernel += ktime;
fprintf(stderr, "print kernel time= %f\n", ktime);
deleteTimer(ktimer);
// char* stimer = createTimer();
// startTimer(stimer);
// mapQueriesEndToEnd(ctx,
// page,
// h_matches,
// numMatches,
// h_alignments,
// numAlignments);
//
// stopTimer(stimer);
//
// float stime = getTimerValue(stimer);
// fprintf(stderr, "postprocess time= %f\n", stime);
// deleteTimer(stimer);
// flushOutput();
// Process the alignments
char *otimer = createTimer();
startTimer(otimer);
for (int m = 0; m < numMatches; m++) {
int base = h_matches[m].resultsoffset;
for (int i = 0; i < h_matches[m].numLeaves; i++) {
// See if there are any more left maximal alignments for this
// match
if (h_alignments[base + i].left_in_ref == 0) {
break;
}
if (h_matches[m].queryid != lastqry) {
lastqry = h_matches[m].queryid;
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + lastqry));
addToBuffer("\n");
}
sprintf(buf, "%d\t%d\t%d\n", h_alignments[base + i].left_in_ref,
h_matches[m].qrystartpos + 1,
h_alignments[base + i].matchlen);
addToBuffer(buf);
// addMatchToBuffer(h_alignments[base+i].left_in_ref,
// h_matches[m].qrystartpos
// + 1,
// h_alignments[base+i].matchlen);
}
}
flushOutput();
stopTimer(otimer);
ctx->statistics.t_results_to_disk += getTimerValue(otimer);
deleteTimer(otimer);
free(h_matches);
free(h_alignments);
// hipHostFree((void*)h_alignments);
}
free(ctx->results.h_coord_tex_array);
free(ctx->results.h_match_coords);
ctx->results.h_coord_tex_array = NULL;
ctx->results.h_match_coords = NULL;
fprintf(stderr, "Finished processing %d matches and %d potential "
"alignments in %d rounds\n",
rTotalMatches, rTotalAlignments, totalRounds);
}
int getQueryBlock(MatchContext *ctx, size_t device_mem_avail) {
QuerySet *queries = ctx->queries;
char *queryTex = NULL;
int *queryAddrs = NULL;
int *queryLengths = NULL;
unsigned int numQueries;
unsigned int num_match_coords;
size_t queryLen;
char **names;
fprintf(stderr, "Loading query block... ");
char *queryreadtimer = createTimer();
startTimer(queryreadtimer);
getQueriesTexture(queries->qfile, &queryTex, &queryLen, &queryAddrs, &names,
&queryLengths, &numQueries, &num_match_coords,
device_mem_avail, ctx->min_match_length,
ctx->reverse || ctx->forwardreverse);
stopTimer(queryreadtimer);
ctx->statistics.t_queries_from_disk += getTimerValue(queryreadtimer);
deleteTimer(queryreadtimer);
queries->h_tex_array = queryTex;
queries->count = numQueries;
queries->h_addrs_tex_array = queryAddrs;
queries->texlen = queryLen;
queries->h_names = names;
queries->h_lengths_array = queryLengths;
ctx->results.numCoords = num_match_coords;
fprintf(stderr, "done.\n");
return numQueries;
}
void destroyQueryBlock(QuerySet *queries) {
free(queries->h_tex_array);
queries->h_tex_array = NULL;
for (int i = 0; i < queries->count; ++i)
free(queries->h_names[i]);
free(queries->h_names);
queries->count = 0;
queries->texlen = 0;
free(queries->h_addrs_tex_array);
queries->h_addrs_tex_array = NULL;
free(queries->h_lengths_array);
queries->h_lengths_array = NULL;
}
void resetStats(Statistics *stats) {
stats->t_end_to_end = 0.0;
stats->t_match_kernel = 0.0;
stats->t_print_kernel = 0.0;
stats->t_queries_to_board = 0.0;
stats->t_match_coords_to_board = 0.0;
stats->t_match_coords_from_board = 0.0;
stats->t_tree_to_board = 0.0;
stats->t_ref_str_to_board = 0.0;
stats->t_queries_from_disk = 0.0;
stats->t_ref_from_disk = 0.0;
stats->t_results_to_disk = 0.0;
stats->t_tree_construction = 0.0;
stats->t_tree_reorder = 0.0;
stats->t_tree_flatten = 0.0;
stats->t_reorder_ref_str = 0.0;
stats->t_build_coord_offsets = 0.0;
stats->t_coords_to_buffers = 0.0;
stats->bp_avg_query_length = 0.0;
#if TREE_ACCESS_HISTOGRAM
if (stats->node_hist_size) {
free(stats->node_hist);
stats->node_hist = NULL;
stats->node_hist_size = 0;
}
if (stats->child_hist_size) {
free(stats->child_hist);
stats->child_hist = NULL;
stats->child_hist_size = 0;
}
#endif
}
void writeStatisticsFile(Statistics *stats, char *stats_filename,
char *node_hist_filename = NULL,
char *child_hist_filename = NULL) {
if (stats_filename) {
FILE *f = fopen(stats_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n",
stats_filename);
} else {
fprintf(f, "Q");
fprintf(f, ",R");
fprintf(f, ",T");
fprintf(f, ",m");
fprintf(f, ",r");
fprintf(f, ",t");
fprintf(f, ",n");
fprintf(f, ",Total");
fprintf(f, ",Match kernel");
fprintf(f, ",Print Kernel");
fprintf(f, ",Queries to board");
fprintf(f, ",Match coords to board");
fprintf(f, ",Match coords from board");
fprintf(f, ",Tree to board");
fprintf(f, ",Ref str to board");
fprintf(f, ",Queries from disk");
fprintf(f, ",Ref from disk");
fprintf(f, ",Output to disk");
fprintf(f, ",Tree construction");
fprintf(f, ",Tree reorder");
fprintf(f, ",Tree flatten");
fprintf(f, ",Ref reorder");
fprintf(f, ",Build coord table");
fprintf(f, ",Coords to buffers");
fprintf(f, ",Avg qry length");
fprintf(f, "\n");
fprintf(f, "%d", QRYTEX);
fprintf(f, ",%d", REFTEX);
fprintf(f, ",%d", TREETEX);
fprintf(f, ",%d", MERGETEX);
fprintf(f, ",%d", REORDER_REF);
fprintf(f, ",%d", REORDER_TREE);
fprintf(f, ",%d", RENUMBER_TREE);
fprintf(f, ",%f", stats->t_end_to_end);
fprintf(f, ",%f", stats->t_match_kernel);
fprintf(f, ",%f", stats->t_print_kernel);
fprintf(f, ",%f", stats->t_queries_to_board);
fprintf(f, ",%f", stats->t_match_coords_to_board);
fprintf(f, ",%f", stats->t_match_coords_from_board);
fprintf(f, ",%f", stats->t_tree_to_board);
fprintf(f, ",%f", stats->t_ref_str_to_board);
fprintf(f, ",%f", stats->t_queries_from_disk);
fprintf(f, ",%f", stats->t_ref_from_disk);
fprintf(f, ",%f", stats->t_results_to_disk);
fprintf(f, ",%f", stats->t_tree_construction);
fprintf(f, ",%f", stats->t_tree_reorder);
fprintf(f, ",%f", stats->t_tree_flatten);
fprintf(f, ",%f", stats->t_reorder_ref_str);
fprintf(f, ",%f", stats->t_build_coord_offsets);
fprintf(f, ",%f", stats->t_coords_to_buffers);
fprintf(f, ",%f", stats->bp_avg_query_length);
fprintf(f, "\n");
fclose(f);
}
}
#if TREE_ACCESS_HISTOGRAM
if (node_hist_filename) {
FILE *f = fopen(node_hist_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n",
node_hist_filename);
} else {
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]);
}
}
if (child_hist_filename) {
FILE *f = fopen(child_hist_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n",
child_hist_filename);
} else {
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]);
}
}
float total_node_hits = 0;
float tree_top_node_hits = 0;
float total_child_hits = 0;
float tree_top_child_hits = 0;
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) {
total_node_hits += ctx->statistics.node_hist[i];
if (i < 256) {
tree_top_node_hits += ctx->statistics.node_hist[i];
}
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) {
total_child_hits += ctx->statistics.child_hist[i];
if (i < 256) {
tree_top_child_hits += ctx->statistics.child_hist[i];
}
}
fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n",
(int)tree_top_node_hits, (int)total_node_hits,
tree_top_node_hits / total_node_hits);
fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n",
(int)tree_top_child_hits, (int)total_child_hits,
tree_top_child_hits / total_child_hits);
#endif
}
void matchOnCPU(MatchContext *ctx, bool doRC) {
// TODO: CPU is matching is disabled.
if (doRC) {
// Match the reverse complement of the queries to the ref
computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode *)(ctx->ref->h_node_tex_array),
(PixelOfChildren *)(ctx->ref->h_children_tex_array),
ctx->queries->count, ctx->min_match_length, REVERSE);
} else {
computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode *)(ctx->ref->h_node_tex_array),
(PixelOfChildren *)(ctx->ref->h_children_tex_array),
ctx->queries->count, ctx->min_match_length, FORWARD);
}
}
void matchOnGPU(MatchContext *ctx, bool doRC) {
int numQueries = ctx->queries->count;
int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1);
// Match the reverse complement of the queries to the ref
if (doRC) {
// TODO: GPU RC is disabled
hipLaunchKernelGGL(( mummergpuRCKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
ctx->results.d_match_coords, ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array,
numQueries, ctx->min_match_length);
} else {
hipLaunchKernelGGL(( mummergpuKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
ctx->results.d_match_coords,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int *)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode *)(ctx->ref->d_node_tex_array),
#endif
#if !CHILDTEX
(_PixelOfChildren *)(ctx->ref->d_children_tex_array),
#endif
#if !REFTEX
(char *)ctx->ref->d_ref_array,
#endif
ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array,
numQueries, ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
,
ctx->ref->d_node_hist, ctx->ref->d_child_hist
#endif
);
}
// check if kernel execution generated an error
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void getMatchResults(MatchContext *ctx, unsigned int page_num) {
transferResultsFromDevice(ctx);
}
void matchQueryBlockToReferencePage(MatchContext *ctx, ReferencePage *page,
bool reverse_complement) {
char *ktimer = createTimer();
fprintf(stderr,
"Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n",
ctx->queries->bytes_on_board, ctx->ref->bytes_on_board,
ctx->results.bytes_on_board);
startTimer(ktimer);
if (ctx->on_cpu) {
matchOnCPU(ctx, reverse_complement);
} else {
matchOnGPU(ctx, reverse_complement);
hipDeviceSynchronize();
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_match_kernel += ktime;
fprintf(stderr, "match kernel time= %f\n", ktime);
deleteTimer(ktimer);
getMatchResults(ctx, page->id);
unloadResultBuffer(ctx);
}
int matchSubset(MatchContext *ctx, ReferencePage *page) {
loadQueries(ctx);
fprintf(stderr, "Matching queries %s - %s against ref coords %d - %d\n",
ctx->queries->h_names[0],
ctx->queries->h_names[ctx->queries->count - 1], page->begin,
page->end);
loadResultBuffer(ctx);
// TODO: renable RC support by calling this twice /w reverse/fwdreverse
// idiom.
matchQueryBlockToReferencePage(ctx, page, false);
if (USE_PRINT_KERNEL && !ctx->on_cpu) {
getExactAlignments(ctx, page, false);
}
else {
getExactAlignments(ctx, page, true);
}
flushOutput();
unloadQueries(ctx);
return 0;
}
int getFreeDeviceMemory(bool on_cpu) {
size_t free_mem = 0;
size_t total_mem = 0;
// We have to 'prime' CUDA by making an allocation here. cuMemGetInfo
// will return zeroes until we do a malloc.
int *p = NULL;
CUDA_SAFE_CALL(hipMalloc((void **)&p, sizeof(int)));
CUDA_SAFE_CALL(hipFree(p));
if (!on_cpu) {
boardMemory(&free_mem, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n", free_mem,
total_mem);
} else {
total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX
}
return free_mem;
}
int matchQueriesToReferencePage(MatchContext *ctx, ReferencePage *page) {
fprintf(stderr, "Beginning reference page %p\n", page);
int free_mem = getFreeDeviceMemory(ctx->on_cpu);
int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM;
ctx->ref = &(page->ref);
loadReference(ctx);
while (getQueryBlock(ctx, available_mem)) {
matchSubset(ctx, page);
ctx->statistics.bp_avg_query_length =
ctx->queries->texlen / (float)(ctx->queries->count) - 2;
destroyQueryBlock(ctx->queries);
if (num_bind_tex_calls > 100) {
hipDeviceReset();
num_bind_tex_calls = 0;
loadReference(ctx);
}
}
unloadReferenceString(ctx->ref);
unloadReferenceTree(ctx);
lseek(ctx->queries->qfile, 0, SEEK_SET);
return 0;
}
void initReferencePages(MatchContext *ctx, int *num_pages,
ReferencePage **pages_out) {
unsigned int bases_in_ref = ctx->full_ref_len - 3;
unsigned int page_size =
BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref;
unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size);
fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n",
num_reference_pages, bases_in_ref, page_size);
unsigned int page_overlap = MAX_QUERY_LEN + 1;
ReferencePage *pages =
(ReferencePage *)calloc(num_reference_pages, sizeof(ReferencePage));
pages[0].begin = 1;
pages[0].end = pages[0].begin + page_size + ceil(page_overlap / 2.0) +
1; // the 1 is for the 's' at the beginning
pages[0].shadow_left = -1;
pages[0].id = 0;
for (int i = 1; i < num_reference_pages - 1; ++i) {
pages[i].begin = pages[i - 1].end - page_overlap;
pages[i].end = pages[i].begin + page_size + page_overlap;
pages[i - 1].shadow_right = pages[i].begin;
pages[i].shadow_left = pages[i - 1].end;
pages[i].id = i;
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
pages[last_page].begin = pages[last_page - 1].end - page_overlap;
pages[last_page].end = ctx->full_ref_len - 1;
pages[last_page - 1].shadow_right = pages[last_page].begin;
pages[last_page].shadow_right = -1;
pages[last_page].shadow_left = pages[last_page - 1].end;
pages[last_page].id = last_page;
}
*pages_out = pages;
*num_pages = num_reference_pages;
}
int streamReferenceAgainstQueries(MatchContext *ctx) {
int num_reference_pages = 0;
ReferencePage *pages = NULL;
initReferencePages(ctx, &num_reference_pages, &pages);
buildReferenceTexture(&(pages[0].ref), ctx->full_ref, pages[0].begin,
pages[0].end, ctx->min_match_length, ctx->dotfilename,
ctx->texfilename, &(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[0]);
destroyReference(&(pages[0].ref));
for (int i = 1; i < num_reference_pages - 1; ++i) {
buildReferenceTexture(&(pages[i].ref), ctx->full_ref, pages[i].begin,
pages[i].end, ctx->min_match_length, NULL, NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[i]);
destroyReference(&(pages[i].ref));
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
buildReferenceTexture(&(pages[last_page].ref), ctx->full_ref,
pages[last_page].begin, pages[last_page].end,
ctx->min_match_length, NULL, NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[last_page]);
destroyReference(&(pages[last_page].ref));
}
free(pages);
return 0;
}
extern "C" int matchQueries(MatchContext *ctx) {
assert(sizeof(struct PixelOfNode) == sizeof(uint4));
assert(sizeof(struct PixelOfChildren) == sizeof(uint4));
#if TREE_ACCESS_HISTOGRAM
ctx->statistics.node_hist_size = 0;
ctx->statistics.child_hist_size = 0;
#endif
resetStats(&(ctx->statistics));
char *ttimer = createTimer();
startTimer(ttimer);
int ret;
fprintf(stderr, "Streaming reference pages against all queries\n");
ret = streamReferenceAgainstQueries(ctx);
stopTimer(ttimer);
ctx->statistics.t_end_to_end += getTimerValue(ttimer);
deleteTimer(ttimer);
writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out",
"child_hist.out");
return ret;
}
| 02c2733e2cb7690376d18448efae7a67a73f65e8.cu | // Includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/types.h>
#include <unistd.h>
#include <errno.h>
#include <sys/time.h>
#include <cuda.h>
#include <vector_types.h>
// includes, kernels
#include "common.cu"
#include "mummergpu.h"
#include "mummergpu_kernel.cu"
int USE_PRINT_KERNEL = 1;
#define BREATHING_ROOM (16 * 1024 * 1024)
#define BASES_PER_TREE_PAGE 8388608
//#define BASES_PER_TREE_PAGE 7000000
#define BLOCKSIZE 256
unsigned int cuda_calls = 0;
void trap_dbg() { fprintf(stderr, "Trapped\n"); }
#define CUDA_SAFE_CALL(call) \
do { \
cuda_calls++; \
cudaError err = call; \
if (cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \
__FILE__, __LINE__, err, cudaGetErrorString(err)); \
trap_dbg(); \
exit(EXIT_FAILURE); \
} \
} while (0)
#define CU_SAFE_CALL_NO_SYNC(call) \
do { \
CUresult err = call; \
if (CUDA_SUCCESS != err) { \
fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \
err, __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
} \
} while (0)
#define CUT_DEVICE_INIT_DRV(cuDevice) \
do { \
cuDevice = 0; \
int deviceCount = 0; \
CUresult err = cuInit(0); \
if (CUDA_SUCCESS == err) \
CU_SAFE_CALL_NO_SYNC(cuDeviceGetCount(&deviceCount)); \
if (deviceCount == 0) { \
fprintf(stderr, "There is no device.\n"); \
exit(EXIT_FAILURE); \
} \
int dev; \
for (dev = 0; dev < deviceCount; ++dev) { \
int major, minor; \
CU_SAFE_CALL_NO_SYNC( \
cuDeviceComputeCapability(&major, &minor, dev)); \
if (major >= 1) \
break; \
} \
if (dev == deviceCount) { \
fprintf(stderr, "There is no device supporting CUDA.\n"); \
exit(EXIT_FAILURE); \
} else \
CU_SAFE_CALL_NO_SYNC(cuDeviceGet(&cuDevice, dev)); \
} while (0)
unsigned int num_bind_tex_calls = 0;
#define BIND_TEX(offset, tex, arr, desc, len) \
do { \
CUDA_SAFE_CALL(cudaBindTexture(offset, tex, arr, desc, len)); \
++num_bind_tex_calls; \
} while (0)
#define BIND_TEX_ARRAY(tex, arr, desc) \
do { \
CUDA_SAFE_CALL(cudaBindTextureToArray(tex, arr, desc)); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC(ptr, size) \
do { \
cudaMalloc(ptr, size); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) \
do { \
cudaMallocPitch(ptr, out_pitch, rowsize, numrows); \
++num_bind_tex_calls; \
} while (0)
#define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) \
do { \
cudaMallocArray(ptr, desc, pitch, rows); \
++num_bind_tex_calls; \
} while (0)
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
extern "C" void computeGold(MatchResults *results, char *refstr, char *queries,
int *queryAddrs, int *queryLengths,
PixelOfNode *nodeTexture,
PixelOfChildren *childrenTexture, int numQueries,
int mismatch_length, int rc);
extern "C" void getReferenceString(const char *filename, char **refstr,
size_t *reflen);
extern "C" void
createTreeTexture(const char *filename, PixelOfNode **nodeTexture,
PixelOfChildren **childrenTexture, unsigned int *width,
unsigned int *node_height, unsigned int *children_height,
AuxiliaryNodeData **aux_data, int *num_match_coords,
int min_match_len, Statistics *statistics,
const char *dotfilename, const char *texfilename);
extern "C" void getQueriesTexture(int qfile, char **queryTexture,
size_t *queryLength, int **queryAddrs,
char ***queryNames, int **queryLengths,
unsigned int *numQueries,
unsigned int *num_match_coords,
unsigned int device_memory_avail,
int min_match_length, bool rc);
extern "C" int lookupNumLeaves(ReferencePage *page, TextureAddress addr);
void printAlignments(ReferencePage *page, Alignment *alignments, char *query,
int qrylen, TextureAddress nodeid, int qrypos,
int edge_depth, int min_match, bool rc,
bool forwardcoordinates);
int countLeafNodes(int nodeid);
extern "C" void mapQueriesEndToEnd(MatchContext *ctx, ReferencePage *page,
MatchInfo *h_matches,
unsigned int numMatches,
Alignment *h_alignments,
unsigned int numAligments);
char *createTimer() {
unsigned int *ptr = (unsigned int *)malloc(sizeof(struct Timer_t));
memset(ptr, 0, sizeof(struct Timer_t));
return (char *)ptr;
}
void startTimer(char *ptr) {
gettimeofday(&(((struct Timer_t *)ptr)->start_m), NULL);
}
void stopTimer(char *ptr) {
gettimeofday(&(((struct Timer_t *)ptr)->end_m), NULL);
}
float getTimerValue(char *ptr) {
Timer_t *timer = (Timer_t *)ptr;
if (timer == NULL) {
fprintf(stderr, "Uninitialized timer!!!\n");
return 0.0;
}
if (timer->end_m.tv_sec == 0) {
stopTimer(ptr);
}
return (float)(1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec) +
(0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec)));
}
void deleteTimer(char *ptr) { free((Timer_t *)ptr); }
extern "C" int createReference(const char *fromFile, Reference *ref) {
if (!fromFile || !ref)
return -1;
char *loadreftimer = createTimer();
startTimer(loadreftimer);
getReferenceString(fromFile, &(ref->str), &(ref->len));
stopTimer(loadreftimer);
ref->t_load_from_disk += getTimerValue(loadreftimer);
deleteTimer(loadreftimer);
return 0;
}
extern "C" int destroyReference(Reference *ref) {
free(ref->h_node_tex_array);
free(ref->h_children_tex_array);
free(ref->str);
#if REORDER_REF
free(ref->h_ref_array);
#endif
free(ref->aux_data);
#if TREE_ACCESS_HISTOGRAM
free(ref->h_node_hist);
free(ref->h_child_hist);
#endif
ref->str = NULL;
ref->len = 0;
return 0;
}
extern "C" int createQuerySet(const char *fromFile, QuerySet *queries) {
fprintf(stderr, "Opening %s...\n", fromFile);
int qfile = open(fromFile, O_RDONLY);
if (qfile == -1) {
fprintf(stderr, "Can't open %s: %d\n", fromFile, errno);
exit(1);
}
queries->qfile = qfile;
return 0;
}
extern "C" int destroyQuerySet(QuerySet *queries) {
if (queries->qfile)
close(queries->qfile);
return 0;
}
extern "C" void printStringForError(int err) {}
extern "C" int createMatchContext(Reference *ref, QuerySet *queries,
MatchResults *matches, bool on_cpu,
int min_match_length, char *stats_file,
bool reverse, bool forwardreverse,
bool forwardcoordinates, bool showQueryLength,
char *dotfilename, char *texfilename,
MatchContext *ctx) {
ctx->queries = queries;
ctx->ref = ref;
ctx->full_ref = ref->str;
ctx->full_ref_len = ref->len;
ctx->on_cpu = on_cpu;
ctx->min_match_length = min_match_length;
ctx->stats_file = stats_file;
ctx->reverse = reverse;
ctx->forwardreverse = forwardreverse;
ctx->forwardcoordinates = forwardcoordinates;
ctx->show_query_length = showQueryLength;
ctx->dotfilename = dotfilename;
ctx->texfilename = texfilename;
return 0;
}
extern "C" int destroyMatchContext(MatchContext *ctx) {
free(ctx->full_ref);
// destroyReference(ctx->ref);
destroyQuerySet(ctx->queries);
return 0;
}
void buildReferenceTexture(Reference *ref, char *full_ref, size_t begin,
size_t end, int min_match_len, char *dotfilename,
char *texfilename, Statistics *statistics) {
fprintf(stderr, "Building reference texture...\n");
PixelOfNode *nodeTexture = NULL;
PixelOfChildren *childrenTexture = NULL;
unsigned int width = 0;
unsigned int node_height = 0;
unsigned int children_height = 0;
AuxiliaryNodeData *aux_data = NULL;
int num_nodes;
char *loadreftimer = createTimer();
startTimer(loadreftimer);
ref->len = end - begin + 3;
ref->str = (char *)malloc(ref->len);
ref->str[0] = 's';
strncpy(ref->str + 1, full_ref + begin, ref->len - 3);
strcpy(ref->str + ref->len - 2, "$");
stopTimer(loadreftimer);
statistics->t_ref_from_disk +=
getTimerValue(loadreftimer) + ref->t_load_from_disk;
deleteTimer(loadreftimer);
createTreeTexture(ref->str, &nodeTexture, &childrenTexture, &width,
&node_height, &children_height, &aux_data, &num_nodes,
min_match_len, statistics, dotfilename, texfilename);
ref->h_node_tex_array = nodeTexture;
ref->h_children_tex_array = childrenTexture;
ref->tex_width = width;
ref->tex_node_height = node_height;
ref->tex_children_height = children_height;
#if TREE_ACCESS_HISTOGRAM
ref->h_node_hist = (int *)calloc(width * node_height, sizeof(int));
ref->h_child_hist = (int *)calloc(width * children_height, sizeof(int));
#endif
ref->aux_data = aux_data;
ref->num_nodes = num_nodes;
ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) +
(width * children_height * sizeof(PixelOfChildren));
fprintf(stderr, "This tree will need %d bytes on the board\n",
ref->bytes_on_board);
#if REORDER_REF
char *reordertimer = createTimer();
startTimer(reordertimer);
unsigned int refpitch = ref->pitch = 65536;
int numrows = ceil(ref->len / ((float)refpitch));
int blocksize = 4;
numrows += blocksize;
int refstrsize = numrows * refpitch;
ref->h_ref_array = (char *)malloc(refstrsize);
ref->bytes_on_board += refstrsize;
fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize);
int z_max = numrows * refpitch;
for (int z = 0; z < z_max; z++) {
ref->h_ref_array[z] = 'Z';
}
int x, y;
int maxx = 0, maxy = 0;
size_t reflen = ref->len;
char *refstr = ref->str;
int block_dim = refpitch * blocksize;
for (int i = 0; i < reflen; i++) {
int bigx = i % (block_dim); // ref string reorder
int bigy = i / (block_dim);
y = bigy * blocksize + bigx % blocksize;
x = bigx / blocksize;
// printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]);
assert(x < refpitch);
assert(y < numrows);
ref->h_ref_array[y * refpitch + x] = refstr[i];
if (x > maxx) {
maxx = x;
}
if (y > maxy) {
maxy = y;
}
}
if ((maxx >= refpitch) || (maxy >= numrows)) {
fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n",
maxx, refpitch, maxy, numrows);
exit(1);
}
stopTimer(reordertimer);
if (statistics)
statistics->t_reorder_ref_str += getTimerValue(reordertimer);
deleteTimer(reordertimer);
#else
fprintf(stderr, "The refstr requires %d bytes\n", ref->len);
ref->bytes_on_board += ref->len;
#endif
}
void boardMemory(size_t *free_mem, size_t *total_mem) {
// The emulator doesn't allow calls to cuMemGetInfo
#ifdef __DEVICE_EMULATION__
*free_mem = 512 * 1024 * 1024;
*total_mem = 768 * 1024 * 1024;
#else
CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem));
#endif
}
void loadReferenceTexture(MatchContext *ctx) {
Reference *ref = ctx->ref;
int numrows = ceil(ref->len / ((float)ref->pitch));
int blocksize = 4;
numrows += blocksize;
cudaChannelFormatDesc refTextureDesc =
cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSigned);
if (!ctx->on_cpu) {
char *toboardtimer = createTimer();
startTimer(toboardtimer);
#if REFTEX
#if REORDER_REF
CUDA_MALLOC_ARRAY((cudaArray **)(&ref->d_ref_array), &refTextureDesc,
ref->pitch, numrows);
CUDA_SAFE_CALL(cudaMemcpyToArray((cudaArray *)(ref->d_ref_array), 0, 0,
ref->h_ref_array, numrows * ref->pitch,
cudaMemcpyHostToDevice));
reftex.addressMode[0] = cudaAddressModeClamp;
reftex.addressMode[1] = cudaAddressModeClamp;
reftex.filterMode = cudaFilterModePoint;
reftex.normalized = false;
BIND_TEX_ARRAY(reftex, (cudaArray *)ref->d_ref_array, refTextureDesc);
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC((void **)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL(cudaMemcpy((void *)(ref->d_ref_array), ref->str,
ref->len, cudaMemcpyHostToDevice));
reftex.addressMode[0] = cudaAddressModeClamp;
reftex.filterMode = cudaFilterModePoint;
reftex.normalized = false; // access with normalized texture coordinates
cudaChannelFormatDesc refDesc =
cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
BIND_TEX(0, reftex, (void *)(ref->d_ref_array), refDesc, ref->len);
ctx->ref->bytes_on_board += ref->len;
#endif
#else
#if REORDER_REF
size_t refpitch;
CUDA_MALLOC_PITCH((void **)(&ref->d_ref_array), &refpitch,
ref->pitch * sizeof(char), numrows);
CUDA_SAFE_CALL(cudaMemcpy2D(
(ref->d_ref_array), refpitch, ref->h_ref_array, ref->pitch,
ref->pitch * sizeof(char), numrows, cudaMemcpyHostToDevice));
ctx->ref->bytes_on_board += numrows * ref->pitch;
#else
CUDA_MALLOC((void **)(&ref->d_ref_array), ref->len);
CUDA_SAFE_CALL(cudaMemcpy((void *)(ref->d_ref_array), ref->str,
ref->len, cudaMemcpyHostToDevice));
ctx->ref->bytes_on_board += ref->len;
#endif
#endif
stopTimer(toboardtimer);
ctx->statistics.t_ref_str_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
} else {
ref->d_ref_array = NULL;
}
}
void unloadReferenceString(Reference *ref) {
#if REFTEX
CUDA_SAFE_CALL(cudaUnbindTexture(reftex));
#endif
#if REORDER_REF && REFTEX
CUDA_SAFE_CALL(cudaFreeArray((cudaArray *)(ref->d_ref_array)));
#else
CUDA_SAFE_CALL(cudaFree((ref->d_ref_array)));
#endif
ref->d_ref_array = NULL;
}
void unloadReferenceTree(MatchContext *ctx) {
Reference *ref = ctx->ref;
#if REORDER_TREE
// Unload nodetex
#if NODETEX
CUDA_SAFE_CALL(cudaUnbindTexture(nodetex));
CUDA_SAFE_CALL(cudaFreeArray((cudaArray *)(ref->d_node_tex_array)));
#else
CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array));
#endif
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array) {
#if CHILDTEX
CUDA_SAFE_CALL(cudaUnbindTexture(childrentex));
CUDA_SAFE_CALL(cudaFreeArray((cudaArray *)(ref->d_children_tex_array)));
#else
CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array));
#endif
}
ref->d_children_tex_array = NULL;
#else
#if NODETEX
CUDA_SAFE_CALL(cudaUnbindTexture(nodetex));
#endif
CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array));
ref->d_node_tex_array = NULL;
// Unload childrentex
if (ref->d_children_tex_array) {
#if CHILDTEX
CUDA_SAFE_CALL(cudaUnbindTexture(childrentex));
#endif
CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array));
ref->d_children_tex_array = NULL;
}
#endif
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(cudaFree(ref->d_node_hist));
ref->d_node_hist = NULL;
CUDA_SAFE_CALL(cudaFree(ref->d_child_hist));
ref->d_child_hist = NULL;
#endif
}
// loads a tree and text for [begin, end) in the reference
void loadReference(MatchContext *ctx) {
Reference *ref = ctx->ref;
ref->bytes_on_board = 0;
loadReferenceTexture(ctx);
if (!ctx->on_cpu) {
char *toboardtimer = createTimer();
startTimer(toboardtimer);
// node texels
ref->bytes_on_board +=
ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode));
// children texels
ref->bytes_on_board +=
ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren);
#if REORDER_TREE
#if NODETEX
cudaChannelFormatDesc nodeTextureDesc = cudaCreateChannelDesc(
32, 32, 32, 32, cudaChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY((cudaArray **)(&ref->d_node_tex_array),
&nodeTextureDesc, ref->tex_width,
ref->tex_node_height);
CUDA_SAFE_CALL(cudaMemcpyToArray(
(cudaArray *)(ref->d_node_tex_array), 0, 0, ref->h_node_tex_array,
ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode),
cudaMemcpyHostToDevice));
nodetex.addressMode[0] = cudaAddressModeClamp;
nodetex.addressMode[1] = cudaAddressModeClamp;
nodetex.filterMode = cudaFilterModePoint;
nodetex.normalized =
false; // access with normalized texture coordinates
BIND_TEX_ARRAY(nodetex, (cudaArray *)ref->d_node_tex_array,
nodeTextureDesc);
#else
size_t nodepitch;
CUDA_MALLOC_PITCH((void **)(&ref->d_node_tex_array), &nodepitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height);
CUDA_SAFE_CALL(cudaMemcpy2D(
(ref->d_node_tex_array), nodepitch, ref->h_node_tex_array,
nodepitch, ref->tex_width * sizeof(PixelOfNode),
ref->tex_node_height, cudaMemcpyHostToDevice));
#endif
if (ref->tex_children_height) {
#if CHILDTEX
cudaChannelFormatDesc childrenTextureDesc = cudaCreateChannelDesc(
32, 32, 32, 32, cudaChannelFormatKindUnsigned);
CUDA_MALLOC_ARRAY((cudaArray **)(&ref->d_children_tex_array),
&childrenTextureDesc, ref->tex_width,
ref->tex_children_height);
CUDA_SAFE_CALL(
cudaMemcpyToArray((cudaArray *)(ref->d_children_tex_array), 0,
0, ref->h_children_tex_array,
ref->tex_width * ref->tex_children_height *
sizeof(PixelOfChildren),
cudaMemcpyHostToDevice));
childrentex.addressMode[0] = cudaAddressModeClamp;
childrentex.addressMode[1] = cudaAddressModeClamp;
childrentex.filterMode = cudaFilterModePoint;
childrentex.normalized =
false; // access with normalized texture coordinates
BIND_TEX_ARRAY(childrentex,
(cudaArray *)(ref->d_children_tex_array),
childrenTextureDesc);
#else
size_t childpitch;
CUDA_MALLOC_PITCH((void **)(&ref->d_children_tex_array),
&childpitch,
ref->tex_width * sizeof(PixelOfChildren),
ref->tex_children_height);
CUDA_SAFE_CALL(cudaMemcpy2D((ref->d_children_tex_array), childpitch,
ref->h_children_tex_array, childpitch,
ref->tex_width * sizeof(PixelOfNode),
ref->tex_children_height,
cudaMemcpyHostToDevice));
#endif
}
#if TREE_ACCESS_HISTOGRAM
// node hist
ref->bytes_on_board +=
ref->tex_width * ref->tex_node_height * sizeof(int);
CUDA_MALLOC((void **)(&ref->d_node_hist),
ref->tex_width * ref->tex_node_height * sizeof(int));
CUDA_SAFE_CALL(
cudaMemset((ref->d_node_hist), 0,
ref->tex_width * ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height) {
// children hist
ref->bytes_on_board +=
ref->tex_width * ref->tex_children_height * sizeof(int);
fprintf(stderr, "after child_hist ref->bytes_on_board:%ld\n",
ref->bytes_on_board);
CUDA_MALLOC((void **)(&ref->d_child_hist),
ref->tex_width * ref->tex_children_height *
sizeof(int));
CUDA_SAFE_CALL(cudaMemset(
(ref->d_child_hist), 0,
ref->tex_width * ref->tex_children_height * sizeof(int)));
}
#endif
#else // NO TREE REORDERING
// Node tex, 1-dimensional
CUDA_MALLOC((void **)(&ref->d_node_tex_array),
ref->tex_node_height * sizeof(PixelOfNode));
CUDA_SAFE_CALL(cudaMemcpy((ref->d_node_tex_array),
ref->h_node_tex_array,
ref->tex_node_height * sizeof(PixelOfNode),
cudaMemcpyHostToDevice));
#if NODETEX
cudaChannelFormatDesc nodeTextureDesc = cudaCreateChannelDesc(
32, 32, 32, 32, cudaChannelFormatKindUnsigned);
nodetex.addressMode[0] = cudaAddressModeClamp;
nodetex.filterMode = cudaFilterModePoint;
nodetex.normalized =
false; // access with normalized texture coordinates
BIND_TEX(0, nodetex, (void *)(ref->d_node_tex_array), nodeTextureDesc,
ref->tex_node_height * sizeof(PixelOfNode));
#endif
if (ref->tex_children_height) {
// Child tex, 1-dimensional
CUDA_MALLOC((void **)(&ref->d_children_tex_array),
ref->tex_children_height * sizeof(PixelOfChildren));
CUDA_SAFE_CALL(cudaMemcpy(
(ref->d_children_tex_array), ref->h_children_tex_array,
ref->tex_children_height * sizeof(PixelOfChildren),
cudaMemcpyHostToDevice));
#if CHILDTEX
cudaChannelFormatDesc childTextureDesc = cudaCreateChannelDesc(
32, 32, 32, 32, cudaChannelFormatKindUnsigned);
childrentex.addressMode[0] = cudaAddressModeClamp;
childrentex.filterMode = cudaFilterModePoint;
childrentex.normalized =
false; // access with normalized texture coordinates
BIND_TEX(0, childrentex, (void *)(ref->d_children_tex_array),
childTextureDesc,
ref->tex_children_height * sizeof(PixelOfChildren));
#endif
}
#if TREE_ACCESS_HISTOGRAM
ref->bytes_on_board += ref->tex_node_height * sizeof(int);
CUDA_MALLOC((void **)(&ref->d_node_hist),
ref->tex_node_height * sizeof(int));
CUDA_SAFE_CALL(cudaMemset((ref->d_node_hist), 0,
ref->tex_node_height * sizeof(int)));
if (ref->tex_children_height) {
ref->bytes_on_board += ref->tex_children_height * sizeof(int);
CUDA_MALLOC((void **)(&ref->d_child_hist),
ref->tex_children_height * sizeof(int));
CUDA_SAFE_CALL(cudaMemset((ref->d_child_hist), 0,
ref->tex_children_height * sizeof(int)));
}
#endif
#endif
#if TWO_LEVEL_NODE_TREE
PixelOfNode node_buf[NODE_THRESH];
memset(node_buf, 0, sizeof(node_buf));
for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes);
++i) {
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
node_buf[i] = ((PixelOfNode *)(ref->h_node_tex_array))[loc];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
node_buf[i] = ((PixelOfNode *)(ref->h_node_tex_array))[loc];
#elif MERGETEX
node_buf[i] =
((PixelOfNode *)(ref->h_node_tex_array))[myaddress.x * 2];
#else
node_buf[i] = ((PixelOfNode *)(ref->h_node_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL(
cudaMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf)));
#endif
#if TWO_LEVEL_CHILD_TREE
PixelOfChildren child_buf[CHILD_THRESH];
memset(child_buf, 0, sizeof(child_buf));
for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes);
++i) {
TextureAddress myaddress(id2addr(i));
#if MERGETEX && REORDER_TREE
myaddress.x &= 0x7FF;
myaddress.x *= 2;
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
child_buf[i] =
((PixelOfChildren *)(ref->h_node_tex_array))[loc + 1];
#elif REORDER_TREE
int loc = myaddress.x + myaddress.y * MAX_TEXTURE_DIMENSION;
child_buf[i] = ((PixelOfChildren *)(ref->h_children))[loc];
#elif MERGETEX
child_buf[i] = ((
PixelOfChildren *)(ref->h_node_tex_array))[myaddress.x * 2 + 1];
#else
child_buf[i] =
((PixelOfChildren *)(ref->h_children_tex_array))[myaddress.x];
#endif
}
CUDA_SAFE_CALL(
cudaMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf)));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_tree_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "done\n");
} else {
ref->d_node_tex_array = NULL;
ref->d_children_tex_array = NULL;
}
}
void dumpQueryBlockInfo(QuerySet *queries) {
fprintf(stderr, "\tProcessing queries %s to %s\n", queries->h_names[0],
queries->h_names[queries->count - 1]);
}
void loadQueries(MatchContext *ctx) {
QuerySet *queries = ctx->queries;
queries->bytes_on_board = 0;
unsigned int numQueries = queries->count;
if (!ctx->on_cpu) {
fprintf(stderr, "Allocating device memory for queries... ");
char *toboardtimer = createTimer();
startTimer(toboardtimer);
dumpQueryBlockInfo(queries);
CUDA_MALLOC((void **)&queries->d_tex_array, queries->texlen);
queries->bytes_on_board += queries->texlen;
CUDA_SAFE_CALL(
cudaMemcpy((void *)queries->d_tex_array,
queries->h_tex_array + queries->h_addrs_tex_array[0],
queries->texlen, cudaMemcpyHostToDevice));
#if QRYTEX
qrytex.addressMode[0] = cudaAddressModeClamp;
qrytex.filterMode = cudaFilterModePoint;
qrytex.normalized = false; // access with normalized texture coordinates
cudaChannelFormatDesc qryDesc =
cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
BIND_TEX(0, qrytex, (void *)(queries->d_tex_array), qryDesc,
queries->texlen);
#endif
CUDA_MALLOC((void **)&queries->d_addrs_tex_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(cudaMemcpy(
(void *)queries->d_addrs_tex_array, queries->h_addrs_tex_array,
numQueries * sizeof(int), cudaMemcpyHostToDevice));
CUDA_MALLOC((void **)&queries->d_lengths_array,
numQueries * sizeof(int));
queries->bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(cudaMemcpy(
(void *)queries->d_lengths_array, queries->h_lengths_array,
numQueries * sizeof(int), cudaMemcpyHostToDevice));
stopTimer(toboardtimer);
ctx->statistics.t_queries_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board);
} else {
queries->d_addrs_tex_array = NULL;
queries->d_tex_array = NULL;
queries->d_lengths_array = NULL;
fprintf(stderr, " allocated %ld bytes\n",
2 * numQueries * sizeof(int) + queries->texlen);
}
}
void unloadQueries(MatchContext *ctx) {
QuerySet *queries = ctx->queries;
CUDA_SAFE_CALL(cudaFree(queries->d_tex_array));
queries->d_tex_array = NULL;
CUDA_SAFE_CALL(cudaFree(queries->d_addrs_tex_array));
queries->d_addrs_tex_array = NULL;
CUDA_SAFE_CALL(cudaFree(queries->d_lengths_array));
queries->d_lengths_array = NULL;
queries->bytes_on_board = 0;
}
// Computes the location of the first MatchCoord for a given query. NOTE:
// Do NOT use this function if COALESCED_QUERIES == 1
inline int match_coord_addrs(int qryid, int qry_addrs, int match_length) {
return qry_addrs - qryid * (match_length + 1);
}
// Construct the offset table for a set of queries. This table will be used
// by the printing functions, and if COALESCED_QUERIES == 1, by the matching
// kernel.
void buildCoordOffsetArray(MatchContext *ctx, int **h_coord_offset_array,
unsigned int *num_coords) {
int numCoords = 0;
int match_length = ctx->min_match_length;
int numQueries = ctx->queries->count;
int *lengths = ctx->queries->h_lengths_array;
int *coord_offsets = (int *)calloc(numQueries, sizeof(int));
#if COALESCED_QUERIES
for (unsigned int i = 0; i < numQueries; i += WARP_SIZE) {
// Every query in this warp will need at least this many coords
int max_num_coords = 0;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) {
int num_coords = lengths[i + j] - match_length + 1;
if (max_num_coords < num_coords)
max_num_coords = num_coords;
}
unsigned int block_size = max_num_coords * WARP_SIZE;
for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) {
ctx->results.h_coord_tex_array[i + j] = numCoords + j;
}
numCoords += block_size;
}
#else
for (unsigned int i = 0; i < numQueries; ++i) {
int qryoffset = ctx->queries->h_addrs_tex_array[i];
coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length);
}
if (numQueries > 0) {
unsigned int last_qry = numQueries - 1;
unsigned int last_qry_len = lengths[last_qry] - match_length + 1;
numCoords = coord_offsets[last_qry] + last_qry_len;
fprintf(stderr, "Need %d match coords for this result array\n",
numCoords);
}
#endif
*num_coords = numCoords;
*h_coord_offset_array = coord_offsets;
}
void loadResultBuffer(MatchContext *ctx) {
unsigned int numQueries = ctx->queries->count;
assert(numQueries);
char *offsettimer = createTimer();
startTimer(offsettimer);
buildCoordOffsetArray(ctx, &(ctx->results.h_coord_tex_array),
&(ctx->results.numCoords));
stopTimer(offsettimer);
ctx->statistics.t_build_coord_offsets += getTimerValue(offsettimer);
deleteTimer(offsettimer);
unsigned int numCoords = ctx->results.numCoords;
fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...",
numQueries, numCoords * sizeof(MatchCoord));
size_t boardFreeMemory = 0;
size_t total_mem = 0;
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n", boardFreeMemory,
total_mem);
ctx->results.h_match_coords =
(MatchCoord *)calloc(numCoords, sizeof(MatchCoord));
if (ctx->results.h_match_coords == NULL) {
trap_dbg();
exit(EXIT_FAILURE);
}
if (!ctx->on_cpu) {
char *toboardtimer = createTimer();
startTimer(toboardtimer);
ctx->results.bytes_on_board = 0;
CUDA_MALLOC((void **)&ctx->results.d_match_coords,
numCoords * sizeof(MatchCoord));
ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord);
CUDA_SAFE_CALL(cudaMemset((void *)ctx->results.d_match_coords, 0,
numCoords * sizeof(MatchCoord)));
#if COALESCED_QUERIES
CUDA_MALLOC((void **)&ctx->results.d_coord_tex_array,
numQueries * sizeof(int));
ctx->results.bytes_on_board += numQueries * sizeof(int);
CUDA_SAFE_CALL(cudaMemcpy((void *)ctx->results.d_coord_tex_array,
ctx->results.h_coord_tex_array,
numQueries * sizeof(int),
cudaMemcpyHostToDevice));
#endif
stopTimer(toboardtimer);
ctx->statistics.t_match_coords_to_board += getTimerValue(toboardtimer);
deleteTimer(toboardtimer);
} else {
ctx->results.d_match_coords = NULL;
}
fprintf(stderr, "done\n");
}
void unloadResultBuffer(MatchContext *ctx) {
CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords));
ctx->results.d_match_coords = NULL;
ctx->results.bytes_on_board = 0;
#if COALESCED_QUERIES
CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords));
#endif
}
void transferResultsFromDevice(MatchContext *ctx) {
if (!ctx->on_cpu) {
char *fromboardtimer = createTimer();
startTimer(fromboardtimer);
CUDA_SAFE_CALL(cudaMemcpy(ctx->results.h_match_coords,
ctx->results.d_match_coords,
ctx->results.numCoords * sizeof(MatchCoord),
cudaMemcpyDeviceToHost));
#if TREE_ACCESS_HISTOGRAM
CUDA_SAFE_CALL(cudaMemcpy(ctx->ref->h_node_hist, ctx->ref->d_node_hist,
ctx->ref->tex_node_height *
ctx->ref->tex_width * sizeof(int),
cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(
ctx->ref->h_child_hist, ctx->ref->d_child_hist,
ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int),
cudaMemcpyDeviceToHost));
if (ctx->statistics.node_hist_size <
ctx->ref->tex_width * ctx->ref->tex_node_height) {
int *temp = (int *)calloc(
ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int));
if (ctx->statistics.node_hist_size)
memcpy(temp, ctx->statistics.node_hist,
ctx->statistics.node_hist_size * sizeof(int));
ctx->statistics.node_hist = temp;
ctx->statistics.node_hist_size =
ctx->ref->tex_width * ctx->ref->tex_node_height;
}
if (ctx->statistics.child_hist_size <
ctx->ref->tex_width * ctx->ref->tex_children_height) {
temp = (int *)calloc(ctx->ref->tex_width *
ctx->ref->tex_children_height,
sizeof(int));
if (ctx->statistics.hist_size)
memcpy(temp, ctx->statistics.child_hist,
ctx->statistics.hist_size * sizeof(int));
ctx->statistics.child_hist = temp;
ctx->statistics.child_hist_size =
ctx->ref->tex_width * ctx->ref->tex_children_height;
}
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) {
ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i];
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) {
ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i];
}
#endif
stopTimer(fromboardtimer);
ctx->statistics.t_match_coords_from_board +=
getTimerValue(fromboardtimer);
deleteTimer(fromboardtimer);
}
}
int flushOutput();
int addToBuffer(char *string);
char numbuffer[32];
MatchCoord *coordForQueryChar(MatchContext *ctx, unsigned int qryid,
unsigned int qrychar) {
MatchResults *results = &(ctx->results);
MatchCoord *coords = results->h_match_coords;
#if COALESCED_QUERIES
return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE;
#else
return coords + results->h_coord_tex_array[qryid] + qrychar;
#endif
}
void coordsToPrintBuffers(MatchContext *ctx, ReferencePage *page,
MatchInfo **matches, Alignment **alignments,
unsigned int mem_avail, unsigned int *coord_idx,
unsigned int *match_idx, unsigned int *align_idx,
unsigned int *nextqry, unsigned int *nextqrychar) {
unsigned int numQueries = ctx->queries->count;
int match_length = ctx->min_match_length;
unsigned int cidx = *coord_idx;
unsigned int midx = 0;
unsigned int numCoords = ctx->results.numCoords;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
int DEBUG = 0;
if (DEBUG && cidx == 0) {
for (int j = 0; j < numCoords; ++j) {
MatchCoord *coord = ctx->results.h_match_coords + j;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
// fprintf(stdout, "node: %d\n",
// coord->node);
fprintf(stdout, "node: %d leaves:%d\n", coord->node.data,
lookupNumLeaves(page, coord->node));
}
}
exit(0);
}
// How much can we fit into mem_avail?
for (int j = cidx; j < numCoords; ++j) {
MatchCoord *coord = ctx->results.h_match_coords + j;
int queryAlignments = 0;
int queryMatches = 0;
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
int numLeaves = lookupNumLeaves(page, coord->node);
queryAlignments += numLeaves;
queryMatches++;
}
int allMatches = numMatches + queryMatches;
int allAlignments = numAlignments + queryAlignments;
int neededSize =
allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment);
if (neededSize > mem_avail ||
(allMatches / BLOCKSIZE) >= MAX_GRID_DIMENSION) {
// adding this match won't fit on the board
break;
}
++cidx;
numMatches = allMatches;
numAlignments = allAlignments;
}
MatchInfo *M = (MatchInfo *)calloc(numMatches, sizeof(MatchInfo));
unsigned int alignmentOffset = 0;
int qry = *nextqry;
int qrychar = *nextqrychar;
bool set_full = false;
while (qry < numQueries) {
// h_lengths_array doesn't count the 'q' at the beginning of each query
int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length;
while (qrychar < qlen) {
if (midx >= numMatches) {
set_full = true;
break;
}
MatchCoord *coord = coordForQueryChar(ctx, qry, qrychar);
if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) {
MatchInfo m;
m.resultsoffset = alignmentOffset;
m.qrystartpos = qrychar;
m.matchnode = coord->node;
m.edgematch = coord->edge_match_length;
m.numLeaves = lookupNumLeaves(page, m.matchnode);
m.queryid = qry;
alignmentOffset += m.numLeaves;
M[midx++] = m;
}
++qrychar;
}
if (set_full)
break;
++qry;
qrychar = 0;
}
*coord_idx = cidx;
*match_idx = midx;
*align_idx = alignmentOffset;
*matches = M;
*nextqry = qry;
*nextqrychar = qrychar;
fprintf(stderr, "Allocing %d bytes of host memory for %d alignments\n",
alignmentOffset * sizeof(Alignment), numAlignments);
*alignments =
(struct Alignment *)calloc(alignmentOffset, sizeof(Alignment));
// cudaMallocHost((void**)alignments, numAlignments * sizeof(Alignment));
}
void runPrintKernel(MatchContext *ctx, ReferencePage *page,
MatchInfo *h_matches, unsigned int numMatches,
Alignment *alignments, unsigned int numAlignments) {
MatchInfo *d_matches;
size_t matchesSize = numMatches * sizeof(MatchInfo);
CUDA_MALLOC((void **)&d_matches, matchesSize);
struct Alignment *d_alignments;
size_t alignmentSize = numAlignments * sizeof(Alignment);
CUDA_MALLOC((void **)&d_alignments, alignmentSize);
CUDA_SAFE_CALL(cudaMemset((void *)d_alignments, 0, alignmentSize));
char *atimer = createTimer();
startTimer(atimer);
// Copy matches to card
fprintf(stderr, "prepared %d matches %d alignments\n", numMatches,
numAlignments);
fprintf(stderr, "Copying %d bytes to host memory for %d alignments\n",
numAlignments * sizeof(Alignment), numAlignments);
int DEBUG = 0;
if (DEBUG) {
for (int i = 0; i < numMatches; i++) {
printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n", i,
h_matches[i].resultsoffset, h_matches[i].queryid,
h_matches[i].matchnode.data, h_matches[i].numLeaves,
h_matches[i].edgematch, h_matches[i].qrystartpos);
}
exit(0);
}
CUDA_SAFE_CALL(
cudaMemcpy(d_matches, h_matches, matchesSize, cudaMemcpyHostToDevice));
stopTimer(atimer);
float mtime = getTimerValue(atimer);
// Launch the kernel
int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1);
fprintf(stderr, " Calling print kernel... ");
printKernel<<<dimGrid, dimBlock, 0>>>(
d_matches, numMatches, d_alignments,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int *)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode *)ctx->ref->d_node_tex_array,
#endif
#if !CHILDTEX
(_PixelOfChildren *)ctx->ref->d_children_tex_array,
#endif
ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array,
page->begin, page->end, page->shadow_left, page->shadow_right,
ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
,
ctx->ref->d_node_hist, ctx->ref->d_child_hist
#endif
);
cudaThreadSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
startTimer(atimer);
// Copy the results back to the host
CUDA_SAFE_CALL(cudaMemcpy((void *)alignments, (void *)d_alignments,
alignmentSize, cudaMemcpyDeviceToHost));
cudaThreadSynchronize();
stopTimer(atimer);
float atime = getTimerValue(atimer);
fprintf(stderr, "memcpy time= %f\n", atime + mtime);
deleteTimer(atimer);
// Cleanup
CUDA_SAFE_CALL(cudaFree(d_alignments));
CUDA_SAFE_CALL(cudaFree(d_matches));
}
// TODO: need reverse-complement printing support
void runPrintOnCPU(MatchContext *ctx, ReferencePage *page, MatchInfo *h_matches,
unsigned int numMatches, Alignment *alignments,
unsigned int numAlignments) {
unsigned int min_match_length = ctx->min_match_length;
int *addrs = ctx->queries->h_addrs_tex_array;
int *lengths = ctx->queries->h_lengths_array;
char *qrychars = ctx->queries->h_tex_array;
if (!numMatches)
return;
int qry = -1;
unsigned int qrylen;
for (int i = 0; i < numMatches; ++i) {
MatchInfo &match = h_matches[i];
if (match.queryid != qry) {
qry = match.queryid;
qrylen = lengths[qry];
}
if (!(match.edgematch & FRMASK)) {
printAlignments(page, alignments + match.resultsoffset,
#if COALESCED_QUERIES
qrychars + sizeof(int) * addrs[qry],
#else
qrychars + addrs[qry],
#endif
qrylen, match.matchnode, match.qrystartpos,
match.edgematch, min_match_length, 0,
ctx->forwardcoordinates);
}
}
}
int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen);
void getExactAlignments(MatchContext *ctx, ReferencePage *page, bool on_cpu) {
assert(!ctx->reverse && !ctx->forwardreverse);
size_t boardFreeMemory;
size_t total_mem;
if (!on_cpu) {
boardMemory(&boardFreeMemory, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n",
boardFreeMemory, total_mem);
} else {
boardFreeMemory = 256 * 1024 * 1024;
total_mem = boardFreeMemory;
}
#ifdef __DEVICE_EMULATION__
boardFreeMemory = 512 * 1024 * 1024;
#endif
boardFreeMemory -= BREATHING_ROOM;
fprintf(stderr, "board free memory: %u\n", boardFreeMemory);
int rTotalMatches = 0;
int rTotalAlignments = 0;
int totalRounds = 0;
unsigned int last_coord = ctx->results.numCoords;
unsigned int next_coord = 0;
unsigned int nextqry = 0;
unsigned int nextqrychar = 0;
int lastqry = -1;
while (next_coord < last_coord) {
// see how many queries will fit on the board
totalRounds++;
unsigned int numMatches = 0;
unsigned int numAlignments = 0;
MatchInfo *h_matches = NULL;
Alignment *h_alignments = NULL;
int coord_left = next_coord;
char *btimer = createTimer();
startTimer(btimer);
coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments,
boardFreeMemory, &next_coord, &numMatches,
&numAlignments, &nextqry, &nextqrychar);
stopTimer(btimer);
float btime = getTimerValue(btimer);
ctx->statistics.t_coords_to_buffers += btime;
fprintf(stderr, "buffer prep time= %f\n", btime);
deleteTimer(btimer);
fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) "
"of %d using %d matches and %d alignments\n",
totalRounds, coord_left, next_coord, last_coord, numMatches,
numAlignments);
if (numMatches == 0)
continue;
char buf[256];
// assert(qryend > qrystart);
rTotalAlignments += numAlignments;
rTotalMatches += numMatches;
if (num_bind_tex_calls > 100) {
cudaThreadExit();
num_bind_tex_calls = 0;
loadReference(ctx);
loadQueries(ctx);
}
char *ktimer = createTimer();
startTimer(ktimer);
if (on_cpu) {
runPrintOnCPU(ctx, page, h_matches, numMatches, h_alignments,
numAlignments);
} else {
runPrintKernel(ctx, page, h_matches, numMatches, h_alignments,
numAlignments);
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_print_kernel += ktime;
fprintf(stderr, "print kernel time= %f\n", ktime);
deleteTimer(ktimer);
// char* stimer = createTimer();
// startTimer(stimer);
// mapQueriesEndToEnd(ctx,
// page,
// h_matches,
// numMatches,
// h_alignments,
// numAlignments);
//
// stopTimer(stimer);
//
// float stime = getTimerValue(stimer);
// fprintf(stderr, "postprocess time= %f\n", stime);
// deleteTimer(stimer);
// flushOutput();
// Process the alignments
char *otimer = createTimer();
startTimer(otimer);
for (int m = 0; m < numMatches; m++) {
int base = h_matches[m].resultsoffset;
for (int i = 0; i < h_matches[m].numLeaves; i++) {
// See if there are any more left maximal alignments for this
// match
if (h_alignments[base + i].left_in_ref == 0) {
break;
}
if (h_matches[m].queryid != lastqry) {
lastqry = h_matches[m].queryid;
addToBuffer("> ");
addToBuffer(*(ctx->queries->h_names + lastqry));
addToBuffer("\n");
}
sprintf(buf, "%d\t%d\t%d\n", h_alignments[base + i].left_in_ref,
h_matches[m].qrystartpos + 1,
h_alignments[base + i].matchlen);
addToBuffer(buf);
// addMatchToBuffer(h_alignments[base+i].left_in_ref,
// h_matches[m].qrystartpos
// + 1,
// h_alignments[base+i].matchlen);
}
}
flushOutput();
stopTimer(otimer);
ctx->statistics.t_results_to_disk += getTimerValue(otimer);
deleteTimer(otimer);
free(h_matches);
free(h_alignments);
// cudaFreeHost((void*)h_alignments);
}
free(ctx->results.h_coord_tex_array);
free(ctx->results.h_match_coords);
ctx->results.h_coord_tex_array = NULL;
ctx->results.h_match_coords = NULL;
fprintf(stderr, "Finished processing %d matches and %d potential "
"alignments in %d rounds\n",
rTotalMatches, rTotalAlignments, totalRounds);
}
int getQueryBlock(MatchContext *ctx, size_t device_mem_avail) {
QuerySet *queries = ctx->queries;
char *queryTex = NULL;
int *queryAddrs = NULL;
int *queryLengths = NULL;
unsigned int numQueries;
unsigned int num_match_coords;
size_t queryLen;
char **names;
fprintf(stderr, "Loading query block... ");
char *queryreadtimer = createTimer();
startTimer(queryreadtimer);
getQueriesTexture(queries->qfile, &queryTex, &queryLen, &queryAddrs, &names,
&queryLengths, &numQueries, &num_match_coords,
device_mem_avail, ctx->min_match_length,
ctx->reverse || ctx->forwardreverse);
stopTimer(queryreadtimer);
ctx->statistics.t_queries_from_disk += getTimerValue(queryreadtimer);
deleteTimer(queryreadtimer);
queries->h_tex_array = queryTex;
queries->count = numQueries;
queries->h_addrs_tex_array = queryAddrs;
queries->texlen = queryLen;
queries->h_names = names;
queries->h_lengths_array = queryLengths;
ctx->results.numCoords = num_match_coords;
fprintf(stderr, "done.\n");
return numQueries;
}
void destroyQueryBlock(QuerySet *queries) {
free(queries->h_tex_array);
queries->h_tex_array = NULL;
for (int i = 0; i < queries->count; ++i)
free(queries->h_names[i]);
free(queries->h_names);
queries->count = 0;
queries->texlen = 0;
free(queries->h_addrs_tex_array);
queries->h_addrs_tex_array = NULL;
free(queries->h_lengths_array);
queries->h_lengths_array = NULL;
}
void resetStats(Statistics *stats) {
stats->t_end_to_end = 0.0;
stats->t_match_kernel = 0.0;
stats->t_print_kernel = 0.0;
stats->t_queries_to_board = 0.0;
stats->t_match_coords_to_board = 0.0;
stats->t_match_coords_from_board = 0.0;
stats->t_tree_to_board = 0.0;
stats->t_ref_str_to_board = 0.0;
stats->t_queries_from_disk = 0.0;
stats->t_ref_from_disk = 0.0;
stats->t_results_to_disk = 0.0;
stats->t_tree_construction = 0.0;
stats->t_tree_reorder = 0.0;
stats->t_tree_flatten = 0.0;
stats->t_reorder_ref_str = 0.0;
stats->t_build_coord_offsets = 0.0;
stats->t_coords_to_buffers = 0.0;
stats->bp_avg_query_length = 0.0;
#if TREE_ACCESS_HISTOGRAM
if (stats->node_hist_size) {
free(stats->node_hist);
stats->node_hist = NULL;
stats->node_hist_size = 0;
}
if (stats->child_hist_size) {
free(stats->child_hist);
stats->child_hist = NULL;
stats->child_hist_size = 0;
}
#endif
}
void writeStatisticsFile(Statistics *stats, char *stats_filename,
char *node_hist_filename = NULL,
char *child_hist_filename = NULL) {
if (stats_filename) {
FILE *f = fopen(stats_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n",
stats_filename);
} else {
fprintf(f, "Q");
fprintf(f, ",R");
fprintf(f, ",T");
fprintf(f, ",m");
fprintf(f, ",r");
fprintf(f, ",t");
fprintf(f, ",n");
fprintf(f, ",Total");
fprintf(f, ",Match kernel");
fprintf(f, ",Print Kernel");
fprintf(f, ",Queries to board");
fprintf(f, ",Match coords to board");
fprintf(f, ",Match coords from board");
fprintf(f, ",Tree to board");
fprintf(f, ",Ref str to board");
fprintf(f, ",Queries from disk");
fprintf(f, ",Ref from disk");
fprintf(f, ",Output to disk");
fprintf(f, ",Tree construction");
fprintf(f, ",Tree reorder");
fprintf(f, ",Tree flatten");
fprintf(f, ",Ref reorder");
fprintf(f, ",Build coord table");
fprintf(f, ",Coords to buffers");
fprintf(f, ",Avg qry length");
fprintf(f, "\n");
fprintf(f, "%d", QRYTEX);
fprintf(f, ",%d", REFTEX);
fprintf(f, ",%d", TREETEX);
fprintf(f, ",%d", MERGETEX);
fprintf(f, ",%d", REORDER_REF);
fprintf(f, ",%d", REORDER_TREE);
fprintf(f, ",%d", RENUMBER_TREE);
fprintf(f, ",%f", stats->t_end_to_end);
fprintf(f, ",%f", stats->t_match_kernel);
fprintf(f, ",%f", stats->t_print_kernel);
fprintf(f, ",%f", stats->t_queries_to_board);
fprintf(f, ",%f", stats->t_match_coords_to_board);
fprintf(f, ",%f", stats->t_match_coords_from_board);
fprintf(f, ",%f", stats->t_tree_to_board);
fprintf(f, ",%f", stats->t_ref_str_to_board);
fprintf(f, ",%f", stats->t_queries_from_disk);
fprintf(f, ",%f", stats->t_ref_from_disk);
fprintf(f, ",%f", stats->t_results_to_disk);
fprintf(f, ",%f", stats->t_tree_construction);
fprintf(f, ",%f", stats->t_tree_reorder);
fprintf(f, ",%f", stats->t_tree_flatten);
fprintf(f, ",%f", stats->t_reorder_ref_str);
fprintf(f, ",%f", stats->t_build_coord_offsets);
fprintf(f, ",%f", stats->t_coords_to_buffers);
fprintf(f, ",%f", stats->bp_avg_query_length);
fprintf(f, "\n");
fclose(f);
}
}
#if TREE_ACCESS_HISTOGRAM
if (node_hist_filename) {
FILE *f = fopen(node_hist_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n",
node_hist_filename);
} else {
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]);
}
}
if (child_hist_filename) {
FILE *f = fopen(child_hist_filename, "w");
if (!f) {
fprintf(stderr, "WARNING: could not open %s for writing\n",
child_hist_filename);
} else {
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i)
fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]);
}
}
float total_node_hits = 0;
float tree_top_node_hits = 0;
float total_child_hits = 0;
float tree_top_child_hits = 0;
for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) {
total_node_hits += ctx->statistics.node_hist[i];
if (i < 256) {
tree_top_node_hits += ctx->statistics.node_hist[i];
}
}
for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) {
total_child_hits += ctx->statistics.child_hist[i];
if (i < 256) {
tree_top_child_hits += ctx->statistics.child_hist[i];
}
}
fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n",
(int)tree_top_node_hits, (int)total_node_hits,
tree_top_node_hits / total_node_hits);
fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n",
(int)tree_top_child_hits, (int)total_child_hits,
tree_top_child_hits / total_child_hits);
#endif
}
void matchOnCPU(MatchContext *ctx, bool doRC) {
// TODO: CPU is matching is disabled.
if (doRC) {
// Match the reverse complement of the queries to the ref
computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode *)(ctx->ref->h_node_tex_array),
(PixelOfChildren *)(ctx->ref->h_children_tex_array),
ctx->queries->count, ctx->min_match_length, REVERSE);
} else {
computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array,
ctx->queries->h_addrs_tex_array,
ctx->queries->h_lengths_array,
(PixelOfNode *)(ctx->ref->h_node_tex_array),
(PixelOfChildren *)(ctx->ref->h_children_tex_array),
ctx->queries->count, ctx->min_match_length, FORWARD);
}
}
void matchOnGPU(MatchContext *ctx, bool doRC) {
int numQueries = ctx->queries->count;
int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries;
dim3 dimBlock(blocksize, 1, 1);
dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1);
// Match the reverse complement of the queries to the ref
if (doRC) {
// TODO: GPU RC is disabled
mummergpuRCKernel<<<dimGrid, dimBlock, 0>>>(
ctx->results.d_match_coords, ctx->queries->d_tex_array,
ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array,
numQueries, ctx->min_match_length);
} else {
mummergpuKernel<<<dimGrid, dimBlock, 0>>>(
ctx->results.d_match_coords,
#if COALESCED_QUERIES
ctx->results.d_coord_tex_array,
#endif
#if !QRYTEX
#if COALESCED_QUERIES
(int *)
#endif
ctx->queries->d_tex_array,
#endif
#if !NODETEX
(_PixelOfNode *)(ctx->ref->d_node_tex_array),
#endif
#if !CHILDTEX
(_PixelOfChildren *)(ctx->ref->d_children_tex_array),
#endif
#if !REFTEX
(char *)ctx->ref->d_ref_array,
#endif
ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array,
numQueries, ctx->min_match_length
#if TREE_ACCESS_HISTOGRAM
,
ctx->ref->d_node_hist, ctx->ref->d_child_hist
#endif
);
}
// check if kernel execution generated an error
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Kernel execution failed: %s.\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void getMatchResults(MatchContext *ctx, unsigned int page_num) {
transferResultsFromDevice(ctx);
}
void matchQueryBlockToReferencePage(MatchContext *ctx, ReferencePage *page,
bool reverse_complement) {
char *ktimer = createTimer();
fprintf(stderr,
"Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n",
ctx->queries->bytes_on_board, ctx->ref->bytes_on_board,
ctx->results.bytes_on_board);
startTimer(ktimer);
if (ctx->on_cpu) {
matchOnCPU(ctx, reverse_complement);
} else {
matchOnGPU(ctx, reverse_complement);
cudaThreadSynchronize();
}
stopTimer(ktimer);
float ktime = getTimerValue(ktimer);
ctx->statistics.t_match_kernel += ktime;
fprintf(stderr, "match kernel time= %f\n", ktime);
deleteTimer(ktimer);
getMatchResults(ctx, page->id);
unloadResultBuffer(ctx);
}
int matchSubset(MatchContext *ctx, ReferencePage *page) {
loadQueries(ctx);
fprintf(stderr, "Matching queries %s - %s against ref coords %d - %d\n",
ctx->queries->h_names[0],
ctx->queries->h_names[ctx->queries->count - 1], page->begin,
page->end);
loadResultBuffer(ctx);
// TODO: renable RC support by calling this twice /w reverse/fwdreverse
// idiom.
matchQueryBlockToReferencePage(ctx, page, false);
if (USE_PRINT_KERNEL && !ctx->on_cpu) {
getExactAlignments(ctx, page, false);
}
else {
getExactAlignments(ctx, page, true);
}
flushOutput();
unloadQueries(ctx);
return 0;
}
int getFreeDeviceMemory(bool on_cpu) {
size_t free_mem = 0;
size_t total_mem = 0;
// We have to 'prime' CUDA by making an allocation here. cuMemGetInfo
// will return zeroes until we do a malloc.
int *p = NULL;
CUDA_SAFE_CALL(cudaMalloc((void **)&p, sizeof(int)));
CUDA_SAFE_CALL(cudaFree(p));
if (!on_cpu) {
boardMemory(&free_mem, &total_mem);
fprintf(stderr, "board free memory: %u total memory: %u\n", free_mem,
total_mem);
} else {
total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX
}
return free_mem;
}
int matchQueriesToReferencePage(MatchContext *ctx, ReferencePage *page) {
fprintf(stderr, "Beginning reference page %p\n", page);
int free_mem = getFreeDeviceMemory(ctx->on_cpu);
int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM;
ctx->ref = &(page->ref);
loadReference(ctx);
while (getQueryBlock(ctx, available_mem)) {
matchSubset(ctx, page);
ctx->statistics.bp_avg_query_length =
ctx->queries->texlen / (float)(ctx->queries->count) - 2;
destroyQueryBlock(ctx->queries);
if (num_bind_tex_calls > 100) {
cudaThreadExit();
num_bind_tex_calls = 0;
loadReference(ctx);
}
}
unloadReferenceString(ctx->ref);
unloadReferenceTree(ctx);
lseek(ctx->queries->qfile, 0, SEEK_SET);
return 0;
}
void initReferencePages(MatchContext *ctx, int *num_pages,
ReferencePage **pages_out) {
unsigned int bases_in_ref = ctx->full_ref_len - 3;
unsigned int page_size =
BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref;
unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size);
fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n",
num_reference_pages, bases_in_ref, page_size);
unsigned int page_overlap = MAX_QUERY_LEN + 1;
ReferencePage *pages =
(ReferencePage *)calloc(num_reference_pages, sizeof(ReferencePage));
pages[0].begin = 1;
pages[0].end = pages[0].begin + page_size + ceil(page_overlap / 2.0) +
1; // the 1 is for the 's' at the beginning
pages[0].shadow_left = -1;
pages[0].id = 0;
for (int i = 1; i < num_reference_pages - 1; ++i) {
pages[i].begin = pages[i - 1].end - page_overlap;
pages[i].end = pages[i].begin + page_size + page_overlap;
pages[i - 1].shadow_right = pages[i].begin;
pages[i].shadow_left = pages[i - 1].end;
pages[i].id = i;
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
pages[last_page].begin = pages[last_page - 1].end - page_overlap;
pages[last_page].end = ctx->full_ref_len - 1;
pages[last_page - 1].shadow_right = pages[last_page].begin;
pages[last_page].shadow_right = -1;
pages[last_page].shadow_left = pages[last_page - 1].end;
pages[last_page].id = last_page;
}
*pages_out = pages;
*num_pages = num_reference_pages;
}
int streamReferenceAgainstQueries(MatchContext *ctx) {
int num_reference_pages = 0;
ReferencePage *pages = NULL;
initReferencePages(ctx, &num_reference_pages, &pages);
buildReferenceTexture(&(pages[0].ref), ctx->full_ref, pages[0].begin,
pages[0].end, ctx->min_match_length, ctx->dotfilename,
ctx->texfilename, &(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[0]);
destroyReference(&(pages[0].ref));
for (int i = 1; i < num_reference_pages - 1; ++i) {
buildReferenceTexture(&(pages[i].ref), ctx->full_ref, pages[i].begin,
pages[i].end, ctx->min_match_length, NULL, NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[i]);
destroyReference(&(pages[i].ref));
}
if (num_reference_pages > 1) {
int last_page = num_reference_pages - 1;
buildReferenceTexture(&(pages[last_page].ref), ctx->full_ref,
pages[last_page].begin, pages[last_page].end,
ctx->min_match_length, NULL, NULL,
&(ctx->statistics));
matchQueriesToReferencePage(ctx, &pages[last_page]);
destroyReference(&(pages[last_page].ref));
}
free(pages);
return 0;
}
extern "C" int matchQueries(MatchContext *ctx) {
assert(sizeof(struct PixelOfNode) == sizeof(uint4));
assert(sizeof(struct PixelOfChildren) == sizeof(uint4));
#if TREE_ACCESS_HISTOGRAM
ctx->statistics.node_hist_size = 0;
ctx->statistics.child_hist_size = 0;
#endif
resetStats(&(ctx->statistics));
char *ttimer = createTimer();
startTimer(ttimer);
int ret;
fprintf(stderr, "Streaming reference pages against all queries\n");
ret = streamReferenceAgainstQueries(ctx);
stopTimer(ttimer);
ctx->statistics.t_end_to_end += getTimerValue(ttimer);
deleteTimer(ttimer);
writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out",
"child_hist.out");
return ret;
}
|
07494239ea935a05e76b7385096fd1051e998a63.hip | // !!! This is a file automatically generated by hipify!!!
// The MIT License (MIT)
//
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <stdio.h>
#include <iostream>
#include <vector>
// #define VERBOSE_OUTPUT
// Define some error checking macros.
#define cudaErrCheck(stat) \
{ \
cudaErrCheck_((stat), __FILE__, __LINE__); \
}
void
cudaErrCheck_(hipError_t stat, const char* file, int line)
{
if (stat != hipSuccess) {
fprintf(
stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#ifdef VERBOSE_OUTPUT
static inline void
print_device_vector(float* vec, size_t size)
{
std::vector<float> debugPrintFloat(size);
cudaErrCheck(hipMemcpy(
&debugPrintFloat[0], vec, size * sizeof(float), hipMemcpyDeviceToHost));
for (size_t i = 0; i < size; ++i) {
std::cout << debugPrintFloat[i] << ", ";
}
std::cout << std::endl;
}
static inline void
print_device_vectors(float** vec, size_t numvecs, size_t size)
{
std::vector<float*> debugPrintFloat(numvecs);
cudaErrCheck(hipMemcpy(
debugPrintFloat.data(), vec, numvecs * sizeof(float*),
hipMemcpyDeviceToHost));
for (int i = 0; i < numvecs; ++i) {
std::cout << debugPrintFloat[i] << ": ";
print_device_vector(debugPrintFloat[i], size);
}
}
static inline void
print_device_vector(int* vec, size_t size)
{
std::vector<int> debugPrintInt(size);
cudaErrCheck(hipMemcpy(
&debugPrintInt[0], vec, size * sizeof(int), hipMemcpyDeviceToHost));
for (size_t i = 0; i < size; ++i) {
std::cout << debugPrintInt[i] << ", ";
}
std::cout << std::endl;
}
#endif
__global__ void
storeStates_FP32(
float** storage, float** states, int* sizesX, int* sizesY, int* storeids,
int batchStride)
{
int stateId = blockIdx.x;
int batchId = blockIdx.y;
int storeId = storeids[batchId];
if (storeId < 0)
return; // no empty slots
int sizeX = sizesX[stateId];
int sizeY = sizesY[stateId];
float* pDst = storage[stateId] + storeId * sizeX * sizeY;
float* pSrc = states[stateId] + batchId * sizeY;
for (int x = 0; x < sizeX; ++x) {
for (int i = threadIdx.x; i < sizeY; i += blockDim.x) {
pDst[i] = pSrc[i];
}
pDst += sizeY;
pSrc += batchStride * sizeY;
}
}
__global__ void
restoreStates_FP32(
float** storage, float** states, int* sizesX, int* sizesY, int* storeids,
int batchStride)
{
int stateId = blockIdx.x;
int batchId = blockIdx.y;
int storeId = storeids[batchId];
if (storeId < 0)
return; // no empty slots
int sizeX = sizesX[stateId];
int sizeY = sizesY[stateId];
float* pSrc = storage[stateId] + storeId * sizeX * sizeY;
float* pDst = states[stateId] + batchId * sizeY;
for (int x = 0; x < sizeX; ++x) {
for (int i = threadIdx.x; i < sizeY; i += blockDim.x) {
pDst[i] = pSrc[i];
}
pSrc += sizeY;
pDst += batchStride * sizeY;
}
}
__global__ void
storeStates_FP16(
__half** storage, float** states, int* sizesX, int* sizesY, int* storeids,
int batchStride)
{
int stateId = blockIdx.x;
int batchId = blockIdx.y;
int storeId = storeids[batchId];
if (storeId < 0)
return; // no empty slots
int sizeX = sizesX[stateId];
int sizeY = sizesY[stateId];
__half* pDst = storage[stateId] + storeId * sizeX * sizeY;
float* pSrc = states[stateId] + batchId * sizeY;
for (int x = 0; x < sizeX; ++x) {
for (int i = threadIdx.x; i < sizeY; i += blockDim.x) {
pDst[i] = __float2half(pSrc[i]);
}
pDst += sizeY;
pSrc += batchStride * sizeY;
}
}
__global__ void
restoreStates_FP16(
__half** storage, float** states, int* sizesX, int* sizesY, int* storeids,
int batchStride)
{
int stateId = blockIdx.x;
int batchId = blockIdx.y;
int storeId = storeids[batchId];
if (storeId < 0)
return; // no empty slots
int sizeX = sizesX[stateId];
int sizeY = sizesY[stateId];
__half* pSrc = storage[stateId] + storeId * sizeX * sizeY;
float* pDst = states[stateId] + batchId * sizeY;
for (int x = 0; x < sizeX; ++x) {
for (int i = threadIdx.x; i < sizeY; i += blockDim.x) {
pDst[i] = __half2float(pSrc[i]);
}
pSrc += sizeY;
pDst += batchStride * sizeY;
}
}
void
launchRestoreGPUKernel_FP32(
float** storage, float** states, int* sizesX, int* sizesY, int numStates,
int* storeids, int batchSize, int batchStride, hipStream_t stream)
{
dim3 threadsPerBlock(256, 1);
dim3 numBlocks(numStates, batchSize);
hipLaunchKernelGGL(( restoreStates_FP32), dim3(numBlocks), dim3(threadsPerBlock), 0, stream,
storage, states, sizesX, sizesY, storeids, batchStride);
#ifdef VERBOSE_OUTPUT
std::cout << "Restoring the instances:" << std::endl;
print_device_vector(storeids, batchSize);
print_device_vector(sizes, numStates);
print_device_vectors(states, numStates, 2);
#endif
}
void
launchStoreGPUKernel_FP32(
float** storage, float** states, int* sizesX, int* sizesY, int numStates,
int* storeids, int batchSize, int batchStride, hipStream_t stream)
{
dim3 threadsPerBlock(256, 1);
dim3 numBlocks(numStates, batchSize);
hipLaunchKernelGGL(( storeStates_FP32), dim3(numBlocks), dim3(threadsPerBlock), 0, stream,
storage, states, sizesX, sizesY, storeids, batchStride);
#ifdef VERBOSE_OUTPUT
std::cout << "Storing the instances:" << std::endl;
print_device_vector(storeids, batchSize);
print_device_vector(sizes, numStates);
print_device_vectors(states, numStates, 2);
#endif
}
void
launchRestoreGPUKernel_FP16(
__half** storage, float** states, int* sizesX, int* sizesY, int numStates,
int* storeids, int batchSize, int batchStride, hipStream_t stream)
{
dim3 threadsPerBlock(256, 1);
dim3 numBlocks(numStates, batchSize);
hipLaunchKernelGGL(( restoreStates_FP16), dim3(numBlocks), dim3(threadsPerBlock), 0, stream,
storage, states, sizesX, sizesY, storeids, batchStride);
#ifdef VERBOSE_OUTPUT
std::cout << "Restoring the instances:" << std::endl;
print_device_vector(storeids, batchSize);
print_device_vector(sizes, numStates);
print_device_vectors(states, numStates, 2);
#endif
}
void
launchStoreGPUKernel_FP16(
__half** storage, float** states, int* sizesX, int* sizesY, int numStates,
int* storeids, int batchSize, int batchStride, hipStream_t stream)
{
dim3 threadsPerBlock(256, 1);
dim3 numBlocks(numStates, batchSize);
hipLaunchKernelGGL(( storeStates_FP16), dim3(numBlocks), dim3(threadsPerBlock), 0, stream,
storage, states, sizesX, sizesY, storeids, batchStride);
#ifdef VERBOSE_OUTPUT
std::cout << "Storing the instances:" << std::endl;
print_device_vector(storeids, batchSize);
print_device_vector(sizes, numStates);
print_device_vectors(states, numStates, 2);
#endif
}
| 07494239ea935a05e76b7385096fd1051e998a63.cu | // The MIT License (MIT)
//
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include <cuda.h>
#include <cuda_fp16.h>
#include <stdio.h>
#include <iostream>
#include <vector>
// #define VERBOSE_OUTPUT
// Define some error checking macros.
#define cudaErrCheck(stat) \
{ \
cudaErrCheck_((stat), __FILE__, __LINE__); \
}
void
cudaErrCheck_(cudaError_t stat, const char* file, int line)
{
if (stat != cudaSuccess) {
fprintf(
stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#ifdef VERBOSE_OUTPUT
static inline void
print_device_vector(float* vec, size_t size)
{
std::vector<float> debugPrintFloat(size);
cudaErrCheck(cudaMemcpy(
&debugPrintFloat[0], vec, size * sizeof(float), cudaMemcpyDeviceToHost));
for (size_t i = 0; i < size; ++i) {
std::cout << debugPrintFloat[i] << ", ";
}
std::cout << std::endl;
}
static inline void
print_device_vectors(float** vec, size_t numvecs, size_t size)
{
std::vector<float*> debugPrintFloat(numvecs);
cudaErrCheck(cudaMemcpy(
debugPrintFloat.data(), vec, numvecs * sizeof(float*),
cudaMemcpyDeviceToHost));
for (int i = 0; i < numvecs; ++i) {
std::cout << debugPrintFloat[i] << ": ";
print_device_vector(debugPrintFloat[i], size);
}
}
static inline void
print_device_vector(int* vec, size_t size)
{
std::vector<int> debugPrintInt(size);
cudaErrCheck(cudaMemcpy(
&debugPrintInt[0], vec, size * sizeof(int), cudaMemcpyDeviceToHost));
for (size_t i = 0; i < size; ++i) {
std::cout << debugPrintInt[i] << ", ";
}
std::cout << std::endl;
}
#endif
__global__ void
storeStates_FP32(
float** storage, float** states, int* sizesX, int* sizesY, int* storeids,
int batchStride)
{
int stateId = blockIdx.x;
int batchId = blockIdx.y;
int storeId = storeids[batchId];
if (storeId < 0)
return; // no empty slots
int sizeX = sizesX[stateId];
int sizeY = sizesY[stateId];
float* pDst = storage[stateId] + storeId * sizeX * sizeY;
float* pSrc = states[stateId] + batchId * sizeY;
for (int x = 0; x < sizeX; ++x) {
for (int i = threadIdx.x; i < sizeY; i += blockDim.x) {
pDst[i] = pSrc[i];
}
pDst += sizeY;
pSrc += batchStride * sizeY;
}
}
__global__ void
restoreStates_FP32(
float** storage, float** states, int* sizesX, int* sizesY, int* storeids,
int batchStride)
{
int stateId = blockIdx.x;
int batchId = blockIdx.y;
int storeId = storeids[batchId];
if (storeId < 0)
return; // no empty slots
int sizeX = sizesX[stateId];
int sizeY = sizesY[stateId];
float* pSrc = storage[stateId] + storeId * sizeX * sizeY;
float* pDst = states[stateId] + batchId * sizeY;
for (int x = 0; x < sizeX; ++x) {
for (int i = threadIdx.x; i < sizeY; i += blockDim.x) {
pDst[i] = pSrc[i];
}
pSrc += sizeY;
pDst += batchStride * sizeY;
}
}
__global__ void
storeStates_FP16(
__half** storage, float** states, int* sizesX, int* sizesY, int* storeids,
int batchStride)
{
int stateId = blockIdx.x;
int batchId = blockIdx.y;
int storeId = storeids[batchId];
if (storeId < 0)
return; // no empty slots
int sizeX = sizesX[stateId];
int sizeY = sizesY[stateId];
__half* pDst = storage[stateId] + storeId * sizeX * sizeY;
float* pSrc = states[stateId] + batchId * sizeY;
for (int x = 0; x < sizeX; ++x) {
for (int i = threadIdx.x; i < sizeY; i += blockDim.x) {
pDst[i] = __float2half(pSrc[i]);
}
pDst += sizeY;
pSrc += batchStride * sizeY;
}
}
__global__ void
restoreStates_FP16(
__half** storage, float** states, int* sizesX, int* sizesY, int* storeids,
int batchStride)
{
int stateId = blockIdx.x;
int batchId = blockIdx.y;
int storeId = storeids[batchId];
if (storeId < 0)
return; // no empty slots
int sizeX = sizesX[stateId];
int sizeY = sizesY[stateId];
__half* pSrc = storage[stateId] + storeId * sizeX * sizeY;
float* pDst = states[stateId] + batchId * sizeY;
for (int x = 0; x < sizeX; ++x) {
for (int i = threadIdx.x; i < sizeY; i += blockDim.x) {
pDst[i] = __half2float(pSrc[i]);
}
pSrc += sizeY;
pDst += batchStride * sizeY;
}
}
void
launchRestoreGPUKernel_FP32(
float** storage, float** states, int* sizesX, int* sizesY, int numStates,
int* storeids, int batchSize, int batchStride, cudaStream_t stream)
{
dim3 threadsPerBlock(256, 1);
dim3 numBlocks(numStates, batchSize);
restoreStates_FP32<<<numBlocks, threadsPerBlock, 0, stream>>>(
storage, states, sizesX, sizesY, storeids, batchStride);
#ifdef VERBOSE_OUTPUT
std::cout << "Restoring the instances:" << std::endl;
print_device_vector(storeids, batchSize);
print_device_vector(sizes, numStates);
print_device_vectors(states, numStates, 2);
#endif
}
void
launchStoreGPUKernel_FP32(
float** storage, float** states, int* sizesX, int* sizesY, int numStates,
int* storeids, int batchSize, int batchStride, cudaStream_t stream)
{
dim3 threadsPerBlock(256, 1);
dim3 numBlocks(numStates, batchSize);
storeStates_FP32<<<numBlocks, threadsPerBlock, 0, stream>>>(
storage, states, sizesX, sizesY, storeids, batchStride);
#ifdef VERBOSE_OUTPUT
std::cout << "Storing the instances:" << std::endl;
print_device_vector(storeids, batchSize);
print_device_vector(sizes, numStates);
print_device_vectors(states, numStates, 2);
#endif
}
void
launchRestoreGPUKernel_FP16(
__half** storage, float** states, int* sizesX, int* sizesY, int numStates,
int* storeids, int batchSize, int batchStride, cudaStream_t stream)
{
dim3 threadsPerBlock(256, 1);
dim3 numBlocks(numStates, batchSize);
restoreStates_FP16<<<numBlocks, threadsPerBlock, 0, stream>>>(
storage, states, sizesX, sizesY, storeids, batchStride);
#ifdef VERBOSE_OUTPUT
std::cout << "Restoring the instances:" << std::endl;
print_device_vector(storeids, batchSize);
print_device_vector(sizes, numStates);
print_device_vectors(states, numStates, 2);
#endif
}
void
launchStoreGPUKernel_FP16(
__half** storage, float** states, int* sizesX, int* sizesY, int numStates,
int* storeids, int batchSize, int batchStride, cudaStream_t stream)
{
dim3 threadsPerBlock(256, 1);
dim3 numBlocks(numStates, batchSize);
storeStates_FP16<<<numBlocks, threadsPerBlock, 0, stream>>>(
storage, states, sizesX, sizesY, storeids, batchStride);
#ifdef VERBOSE_OUTPUT
std::cout << "Storing the instances:" << std::endl;
print_device_vector(storeids, batchSize);
print_device_vector(sizes, numStates);
print_device_vectors(states, numStates, 2);
#endif
}
|
53fb7dc0a8e267a2feb4bccad0b9ec1f1ed5d5b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "IndiceTools_GPU.h"
#include "RipplingMath.h"
using namespace gpu;
// Attention : Choix du nom est impotant!
// VagueDevice.cu et non Vague.cu
// Dans ce dernier cas, problme de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diffrents!
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t)
{
RipplingMath ripplingMath = RipplingMath(w);
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
const int WH = w * h;
uchar4 color;
int pixelI;
int pixelJ;
int s = TID;
while (s < WH)
{
IndiceTools::toIJ(s, w, &pixelI, &pixelJ);
ripplingMath.colorIJ(&color, pixelI, pixelJ, t);
ptrDevPixels[s] = color;
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 53fb7dc0a8e267a2feb4bccad0b9ec1f1ed5d5b6.cu | #include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "IndiceTools_GPU.h"
#include "RipplingMath.h"
using namespace gpu;
// Attention : Choix du nom est impotant!
// VagueDevice.cu et non Vague.cu
// Dans ce dernier cas, probl�me de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diff�rents!
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t)
{
RipplingMath ripplingMath = RipplingMath(w);
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
const int WH = w * h;
uchar4 color;
int pixelI;
int pixelJ;
int s = TID;
while (s < WH)
{
IndiceTools::toIJ(s, w, &pixelI, &pixelJ);
ripplingMath.colorIJ(&color, pixelI, pixelJ, t);
ptrDevPixels[s] = color;
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
7790ddfce8e6f039ec39b6600514c15d6fa6227e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
// Compute C = A * B
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows,
int numBColumns, int numCRows,
int numCColumns) {
//@@ Insert code to implement matrix multiplication here
int Row, Col;
int j;
Row = blockIdx.y * blockDim.y + threadIdx.y;
Col = blockIdx.x * blockDim.x + threadIdx.x;
if(Row < numCRows && Col < numCColumns)
{
float PValue = 0;
for(j = 0;j < numAColumns;j++){
PValue += A[Row*numAColumns+j] * B[j*numBColumns + Col];}
C[Row*numCColumns + Col] = PValue;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
int hostALength;
int hostBLength;
int hostCLength;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows,
&numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
hostALength = numARows * numAColumns;
hostBLength = numBRows * numBColumns;
hostCLength = numCRows * numCColumns;
//@@ Allocate the hostC matrix
hostC = (float *)malloc(sizeof(float) * numCColumns * numCRows);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipMalloc((void **)&deviceA, hostALength * sizeof(float));
hipMalloc((void **)&deviceB, hostBLength * sizeof(float));
hipMalloc((void **)&deviceC, hostCLength * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceA, hostA, hostALength * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, hostBLength * sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 dimGrid(ceil(float(numCColumns) / 16.0), ceil(float(numCRows) / 16.0), 1);
wbLog(TRACE, "The dimensions of grid are ",ceil(float(numCRows) / 16.0), ceil(float(numCColumns) / 16.0));
dim3 dimBlock(16, 16, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( matrixMultiply), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceA, deviceB, deviceC, numARows,
numAColumns, numBRows,
numBColumns, numCRows,
numCColumns);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostC, deviceC, hostCLength * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
| 7790ddfce8e6f039ec39b6600514c15d6fa6227e.cu | #include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
// Compute C = A * B
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows,
int numBColumns, int numCRows,
int numCColumns) {
//@@ Insert code to implement matrix multiplication here
int Row, Col;
int j;
Row = blockIdx.y * blockDim.y + threadIdx.y;
Col = blockIdx.x * blockDim.x + threadIdx.x;
if(Row < numCRows && Col < numCColumns)
{
float PValue = 0;
for(j = 0;j < numAColumns;j++){
PValue += A[Row*numAColumns+j] * B[j*numBColumns + Col];}
C[Row*numCColumns + Col] = PValue;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
int hostALength;
int hostBLength;
int hostCLength;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows,
&numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
hostALength = numARows * numAColumns;
hostBLength = numBRows * numBColumns;
hostCLength = numCRows * numCColumns;
//@@ Allocate the hostC matrix
hostC = (float *)malloc(sizeof(float) * numCColumns * numCRows);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc((void **)&deviceA, hostALength * sizeof(float));
cudaMalloc((void **)&deviceB, hostBLength * sizeof(float));
cudaMalloc((void **)&deviceC, hostCLength * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceA, hostA, hostALength * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, hostBLength * sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 dimGrid(ceil(float(numCColumns) / 16.0), ceil(float(numCRows) / 16.0), 1);
wbLog(TRACE, "The dimensions of grid are ",ceil(float(numCRows) / 16.0), ceil(float(numCColumns) / 16.0));
dim3 dimBlock(16, 16, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
matrixMultiply<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC, numARows,
numAColumns, numBRows,
numBColumns, numCRows,
numCColumns);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostC, deviceC, hostCLength * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
2783a7080ab36d54892a22573c6c39361364b6a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "math/MandelbrotMath.h"
#include "DomaineMath_GPU.h"
#include "IndiceTools_GPU.h"
using namespace gpu;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, uint t, DomaineMath domaineMath)
{
MandelbrotMath mandelbrotMath = MandelbrotMath(t);
const int WH = w * h;
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
int i = 0;
int j = 0;
double x = 0;
double y = 0;
int s = TID;
while (s < WH)
{
IndiceTools::toIJ(s, w, &i, &j);
domaineMath.toXY(i, j, &x, &y);
mandelbrotMath.colorXY(&ptrDevPixels[s], (float)x, (float)y);
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 2783a7080ab36d54892a22573c6c39361364b6a0.cu | #include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "math/MandelbrotMath.h"
#include "DomaineMath_GPU.h"
#include "IndiceTools_GPU.h"
using namespace gpu;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, uint t, DomaineMath domaineMath)
{
MandelbrotMath mandelbrotMath = MandelbrotMath(t);
const int WH = w * h;
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
int i = 0;
int j = 0;
double x = 0;
double y = 0;
int s = TID;
while (s < WH)
{
IndiceTools::toIJ(s, w, &i, &j);
domaineMath.toXY(i, j, &x, &y);
mandelbrotMath.colorXY(&ptrDevPixels[s], (float)x, (float)y);
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
25cac5e4e72529a3b16c730df0af971d93f74576.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if 0
#######################################################################################
# The MIT License
# Copyright (c) 2014 Hannes Schulz, University of Bonn <schulz@ais.uni-bonn.de>
# Copyright (c) 2013 Benedikt Waldvogel, University of Bonn <mail@bwaldvogel.de>
# Copyright (c) 2008-2009 Sebastian Nowozin <nowozin@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#######################################################################################
#endif
#define BOOST_TEST_MODULE example
#include <assert.h>
#include <boost/functional/hash.hpp>
#include <boost/make_shared.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/test/included/unit_test.hpp>
#include <cuv/ndarray.hpp>
#include "random_tree_image_gpu.h"
#include "random_tree_image.h"
#include "score.h"
#include "test_common.h"
#include "utils.h"
using namespace curfil;
#define DUMP_IMAGE 0
static const int SEED = 4711;
class Fixture {
public:
Fixture() {
clearImageCache();
}
};
BOOST_FIXTURE_TEST_SUITE(RandomTreeImageGPUTest, Fixture)
template<class W>
__global__
static void calculcateScoreKernel(ScoreType* result, const size_t numClasses,
const W* leftClasses, const W* rightClasses, const unsigned int leftRightStride,
const W* allClasses, const ScoreType totalLeft, const ScoreType totalRight) {
ScoreType score = NormalizedInformationGainScore::calculateScore(numClasses, leftClasses, rightClasses,
leftRightStride,
allClasses, totalLeft, totalRight);
*result = score;
}
static ScoreType scoreOnGPU(const size_t numClasses, const cuv::ndarray<int, cuv::host_memory_space>& leftClasses,
const cuv::ndarray<int, cuv::host_memory_space>& rightClasses,
const cuv::ndarray<int, cuv::host_memory_space>& allClasses,
const ScoreType totalLeft, const ScoreType totalRight) {
cuv::ndarray<ScoreType, cuv::dev_memory_space> result(1);
const cuv::ndarray<int, cuv::dev_memory_space> leftClassesDevice(leftClasses);
const cuv::ndarray<int, cuv::dev_memory_space> rightClassesDevice(rightClasses);
const cuv::ndarray<int, cuv::dev_memory_space> allClassesDevice(allClasses);
const unsigned int leftRightStride = leftClassesDevice.stride(0);
BOOST_REQUIRE_EQUAL(leftRightStride, rightClassesDevice.stride(0));
hipLaunchKernelGGL(( calculcateScoreKernel), dim3(1),dim3(1), 0, 0, result.ptr(), numClasses, leftClassesDevice.ptr(), rightClassesDevice.ptr(),
leftRightStride, allClassesDevice.ptr(), totalLeft, totalRight);
cudaSafeCall(hipDeviceSynchronize());
double res = result[0];
return res;
}
static ScoreType scoreOnGPU(const size_t size, const WeightType* leftClass, const WeightType* rightClass,
const WeightType* allClasses, const ScoreType totalLeft, const ScoreType totalRight) {
cuv::ndarray<int, cuv::host_memory_space> leftClassArray(size);
cuv::ndarray<int, cuv::host_memory_space> rightClassArray(size);
cuv::ndarray<int, cuv::host_memory_space> allClassesArray(size);
for (size_t i = 0; i < size; i++) {
leftClassArray[i] = leftClass[i];
rightClassArray[i] = rightClass[i];
allClassesArray[i] = allClasses[i];
}
return scoreOnGPU(size, leftClassArray, rightClassArray, allClassesArray, totalLeft, totalRight);
}
BOOST_AUTO_TEST_CASE(testInformationGainScore) {
const int numClasses = 2;
cuv::ndarray<int, cuv::host_memory_space> left(numClasses);
cuv::ndarray<int, cuv::host_memory_space> right(numClasses);
cuv::ndarray<int, cuv::host_memory_space> allClass(numClasses);
for (size_t num = 1; num < 10; num++) {
// best case scenario: score=0
left[0] = num;
right[0] = num;
left[1] = num;
right[1] = num;
allClass[0] = 2 * num;
allClass[1] = 2 * num;
ScoreType totalLeft = 2 * num;
ScoreType totalRight = 2 * num;
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
ScoreType score = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_CLOSE(0, score, 0);
BOOST_CHECK_CLOSE(score, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-6);
// best case scenario: score=1
left[0] = 0;
right[0] = 2 * num;
left[1] = 2 * num;
right[1] = 0;
allClass[0] = 2 * num;
allClass[1] = 2 * num;
totalLeft = 2 * num;
totalRight = 2 * num;
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
score = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_CLOSE(1, score, 0);
BOOST_CHECK_CLOSE(score, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-6);
}
left[0] = 5;
right[0] = 3;
left[1] = 8;
right[1] = 1;
allClass[0] = 8;
allClass[1] = 9;
double totalLeft = left[0] + left[1];
double totalRight = right[0] + right[1];
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
ScoreType score1 = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_CLOSE(0.080185, score1, 1e-4);
BOOST_CHECK_CLOSE(score1, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-5);
left[0] = 2;
right[0] = 6;
left[1] = 8;
right[1] = 1;
totalLeft = left[0] + left[1];
totalRight = right[0] + right[1];
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
ScoreType score2 = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_GT(score2, score1);
BOOST_CHECK_CLOSE(0.33339, score2, 1e-3);
BOOST_CHECK_CLOSE(score2, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-6);
// case 1 (a real case)
// histogram: [ 86 241 291 3 267 ]
// histogram left: [ 56 241 290 3 18 ]
// histogram right: [ 30 0 1 0 249 ]
{
const size_t size = 5;
const WeightType all[] = { 86, 241, 291, 3, 267 };
const WeightType left[] = { 56, 241, 290, 3, 18 };
const WeightType right[] = { 30, 0, 1, 0, 249 };
const unsigned int leftRightStride = 1;
const size_t totalLeft = std::accumulate(left, left + size, 0);
const size_t totalRight = std::accumulate(right, right + size, 0);
BOOST_REQUIRE_EQUAL(totalLeft + totalRight, std::accumulate(all, all + size, 0));
ScoreType score = NormalizedInformationGainScore::calculateScore(size, left, right,
leftRightStride, all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 0.491311, 1e-3);
BOOST_CHECK_CLOSE(score, scoreOnGPU(size, left, right, all, totalLeft, totalRight), 1e-6);
score = InformationGainScore::calculateScore(size, left, right,
leftRightStride, all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 0.690912, 1e-3);
}
{
// case 2 (constructed, obviously)
// histogram: [ 50 100 50 0 100 ]
// histogram left: [ 50 100 0 0 0 ]
// histogram right: [ 0 0 50 0 100 ]
const size_t size = 5;
const WeightType all[] = { 50, 100, 50, 0, 100 };
const WeightType left[] = { 50, 100, 0, 0, 0 };
const WeightType right[] = { 0, 0, 50, 0, 100 };
const unsigned int leftRightStride = 1;
const size_t totalLeft = std::accumulate(left, left + size, 0);
const size_t totalRight = std::accumulate(right, right + size, 0);
ScoreType score = NormalizedInformationGainScore::calculateScore(size, left, right, leftRightStride,
all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 0.68533, 1e-3);
score = InformationGainScore::calculateScore(size, left, right, leftRightStride,
all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 1.0, 1e-3);
}
}
template<class T>
static void updateHash(const boost::hash<size_t>& hasher, size_t& hash, const T& value) {
// extract from boost headers
hash ^= hasher(value) + 0x9e3779b9 + (hash << 6) + (hash >> 2);
}
template<class T>
static size_t checkScores(const cuv::ndarray<ScoreType, cuv::host_memory_space>& scores, T numFeatures,
T numThresholds) {
BOOST_CHECK_EQUAL(2, static_cast<int>(scores.ndim()));
BOOST_CHECK_EQUAL(static_cast<size_t>(numThresholds), static_cast<size_t>(scores.shape(0)));
BOOST_CHECK_EQUAL(static_cast<size_t>(numFeatures), static_cast<size_t>(scores.shape(1)));
size_t hash = 0;
boost::hash<size_t> hasher;
for (T feat = 0; feat < numFeatures; feat++) {
for (T thresh = 0; thresh < numThresholds; thresh++) {
const ScoreType score = scores(thresh, feat);
BOOST_CHECK_GE(score, 0.0);
BOOST_CHECK_LE(score, 1.0);
updateHash(hasher, hash, score);
}
}
return hash;
}
static size_t checkCounters(TrainingConfiguration& configuration,
const cuv::ndarray<WeightType, cuv::dev_memory_space> countersDevice,
const std::vector<PixelInstance>& samples) {
const cuv::ndarray<WeightType, cuv::host_memory_space> counters(countersDevice);
size_t hash = 0;
boost::hash<size_t> hasher;
std::map<size_t, size_t> samplesPerLabel;
for (size_t sample = 0; sample < samples.size(); sample++) {
samplesPerLabel[samples[sample].getLabel()]++;
}
size_t numLabels = samplesPerLabel.size();
assert(numLabels > 0);
const size_t features = configuration.getFeatureCount();
const size_t thresholds = configuration.getThresholds();
BOOST_CHECK_EQUAL(4, static_cast<int>(counters.ndim()));
BOOST_CHECK_EQUAL(features, static_cast<size_t>(counters.shape(0)));
BOOST_CHECK_EQUAL(thresholds, static_cast<size_t>(counters.shape(1)));
BOOST_CHECK_EQUAL(numLabels, static_cast<size_t>(counters.shape(2)));
BOOST_CHECK_EQUAL(2lu, static_cast<size_t>(counters.shape()[3]));
for (size_t label = 0; label < numLabels; label++) {
for (size_t thresh = 0; thresh < thresholds; thresh++) {
for (size_t feat = 0; feat < features; feat++) {
const size_t left = counters(feat, thresh, label, 0);
const size_t right = counters(feat, thresh, label, 1);
const size_t numSamples = samplesPerLabel[label];
BOOST_CHECK_EQUAL(numSamples, left + right);
updateHash(hasher, hash, left);
updateHash(hasher, hash, right);
}
}
}
return hash;
}
BOOST_AUTO_TEST_CASE(testDepthFeatureSimple) {
const int NUM_FEAT = 1;
const int NUM_THRESH = 100;
unsigned int samplesPerImage = 500;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 16;
static const int NUM_THREADS = 1;
static const int maxImages = 0;
static const int imageCacheSize = 1;
unsigned int maxSamplesPerBatch = 5000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
static const int width = 16;
static const int height = 20;
std::vector<RGBDImage> images(1, RGBDImage(width, height));
std::vector<PixelInstance> samples;
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(NUM_FEAT, NUM_THRESH,
boost::make_shared<cuv::default_allocator>());
for (int i = 0; i < NUM_THRESH; i++) {
featuresAndThresholds.thresholds()(i, 0) = (i - 50) / 10.0f;
}
featuresAndThresholds.types()[0] = DEPTH;
featuresAndThresholds.offset1X()[0] = 1;
featuresAndThresholds.offset1Y()[0] = 1;
featuresAndThresholds.region1X()[0] = 2;
featuresAndThresholds.region1Y()[0] = 2;
featuresAndThresholds.offset2X()[0] = -3;
featuresAndThresholds.offset2Y()[0] = -1;
featuresAndThresholds.region2X()[0] = 1;
featuresAndThresholds.region2Y()[0] = 1;
featuresAndThresholds.channel1()[0] = 0;
featuresAndThresholds.channel2()[0] = 0;
const int NUM_LABELS = 2;
samples.push_back(PixelInstance(&images[0], 0, Depth(1.0), 6, 3));
samples.push_back(PixelInstance(&images[0], 1, Depth(1.0), 6, 3));
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples), node,
cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
BOOST_CHECK(isnan(static_cast<FeatureResponseType>(featureResponses(0, 0))));
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
checkScores(scores, NUM_FEAT, NUM_THRESH);
}
images[0].reset();
clearImageCache();
images[0].setDepth(7, 3, Depth(1.5f));
images[0].setDepth(7, 4, Depth(1.7f));
images[0].setDepth(8, 4, Depth(4.5f));
images[0].setDepth(3, 2, Depth(3.9f));
#if DUMP_IMAGE
image.dumpDepth(std::cout);
#endif
images[0].calculateIntegral();
#if DUMP_IMAGE
CURFIL_INFO("integral:");
image.dumpDepth(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
checkCounters(configuration, counters, samples);
checkScores(scores, NUM_FEAT, NUM_THRESH);
BOOST_CHECK_CLOSE((1.5 + 1.7 + 4.5) / 3 - 3.9, static_cast<FeatureResponseType>(featureResponses(0, 0)), 1e-6);
for (int label = 0; label < NUM_LABELS; label++) {
for (int thresh = 0; thresh < NUM_THRESH; thresh++) {
for (int feat = 0; feat < NUM_FEAT; feat++) {
BOOST_CHECK_EQUAL(1,
static_cast<int>(counters(feat, thresh, label, 0) + counters(feat, thresh, label, 1)));
BOOST_CHECK_EQUAL(static_cast<unsigned int>(thresh >= 37),
static_cast<unsigned int>(counters(feat, thresh, label, 0)));
}
}
}
images[0].reset();
clearImageCache();
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
images[0].setDepth(x, y, Depth(1.0f));
}
}
images[0].calculateIntegral();
#if DUMP_IMAGE
image.dumpDepth(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
BOOST_CHECK_CLOSE(0, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
checkScores(scores, NUM_FEAT, NUM_THRESH);
checkCounters(configuration, counters, samples);
for (int label = 0; label < NUM_LABELS; label++) {
for (int thresh = 0; thresh < 100; thresh++) {
for (int feat = 0; feat < NUM_FEAT; feat++) {
BOOST_CHECK_EQUAL(1,
static_cast<int>(counters(feat, thresh, label, 0) + counters(feat, thresh, label, 1)));
BOOST_CHECK_EQUAL(static_cast<unsigned int>(thresh >= 50),
static_cast<unsigned int>(counters(feat, thresh, label, 0)));
}
}
}
}
}
}
BOOST_AUTO_TEST_CASE(testColorFeatureSimple) {
unsigned int samplesPerImage = 500;
unsigned int NUM_FEAT = 1;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 16;
static const uint16_t NUM_THRESH = 3;
static const int NUM_THREADS = 1;
static const int maxImages = 0;
static const int imageCacheSize = 1;
unsigned int maxSamplesPerBatch = 5000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
static const int width = 16;
static const int height = 20;
std::vector<RGBDImage> images(1, RGBDImage(width, height));
std::vector<PixelInstance> samples;
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(NUM_FEAT, NUM_THRESH,
boost::make_shared<cuv::default_allocator>());
featuresAndThresholds.types()[0] = COLOR;
featuresAndThresholds.offset1X()[0] = 3;
featuresAndThresholds.offset1Y()[0] = -2;
featuresAndThresholds.region1X()[0] = 4;
featuresAndThresholds.region1Y()[0] = 2;
featuresAndThresholds.offset2X()[0] = -3;
featuresAndThresholds.offset2Y()[0] = -2;
featuresAndThresholds.region2X()[0] = 1;
featuresAndThresholds.region2Y()[0] = 2;
featuresAndThresholds.channel1()[0] = 0;
featuresAndThresholds.channel2()[0] = 0;
featuresAndThresholds.thresholds()(0, 0) = -1.0f;
featuresAndThresholds.thresholds()(1, 0) = 0.0f;
featuresAndThresholds.thresholds()(2, 0) = 1.0f;
const int NUM_LABELS = 2;
samples.push_back(PixelInstance(&images[0], 0, Depth(1.0), 6, 4));
samples.push_back(PixelInstance(&images[0], 1, Depth(1.0), 6, 4));
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples), node,
cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
BOOST_CHECK_CLOSE(0, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
checkCounters(configuration, counters, samples);
assert(static_cast<int>(samples.size()) == NUM_LABELS);
for (size_t label = 0; label < samples.size(); label++) {
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 0, label, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, label, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 1, label, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 2, label, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 2, label, 1)));
}
}
images[0].reset();
clearImageCache();
images[0].setColor(7, 2, 0, 0.5f);
#if DUMP_IMAGE
image.dump(std::cout);
#endif
images[0].calculateIntegral();
#if DUMP_IMAGE
std::cout << "integral" << std::endl;
image.dump(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
checkCounters(configuration, counters, samples);
BOOST_CHECK_CLOSE(0.5, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
for (size_t label = 0; label < samples.size(); label++) {
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 0, label, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 1, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, label, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 2, label, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 2, label, 1)));
}
}
images[0].reset();
clearImageCache();
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
images[0].setColor(x, y, 0, 1.0f);
}
}
images[0].calculateIntegral();
#if DUMP_IMAGE
image.dump(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
BOOST_CHECK_CLOSE(8 * 4 - 4 * 2.0, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
checkCounters(configuration, counters, samples);
for (size_t label = 0; label < samples.size(); label++) {
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 0, label, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 1, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, label, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 2, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 2, label, 1)));
}
}
}
BOOST_AUTO_TEST_CASE(testColorFeatureComplex) {
const size_t NUM_THRESH = 2;
const size_t NUM_FEAT = 3;
const int maxImages = 5;
const int imageCacheSize = 5; // make sure the cache is at least as big as #images
unsigned int samplesPerImage = 500;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 16;
static const int NUM_THREADS = 1;
unsigned int maxSamplesPerBatch = 5000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
const int width = 12;
const int height = 15;
std::vector<RGBDImage> images(2, RGBDImage(width, height));
std::vector<PixelInstance> samples;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int c = 0; c < 3; c++) {
float v = 100 * c + y * width + x;
images[0].setColor(x, y, c, v);
images[1].setColor(x, y, c, v / 2.0f);
}
}
}
BOOST_CHECK_CLOSE(179, images[0].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(279, images[0].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(379, images[0].getColor(width - 1, height - 1, 2), 0);
BOOST_CHECK_CLOSE(179 / 2.0, images[1].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(279 / 2.0, images[1].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(379 / 2.0, images[1].getColor(width - 1, height - 1, 2), 0);
images[0].calculateIntegral();
images[1].calculateIntegral();
BOOST_CHECK_CLOSE(16110, images[0].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(34110, images[0].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(52110, images[0].getColor(width - 1, height - 1, 2), 0);
BOOST_CHECK_CLOSE(16110 / 2.0, images[1].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(34110 / 2.0, images[1].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(52110 / 2.0, images[1].getColor(width - 1, height - 1, 2), 0);
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(NUM_FEAT, NUM_THRESH,
boost::make_shared<cuv::default_allocator>());
featuresAndThresholds.types()[0] = COLOR;
featuresAndThresholds.offset1X()[0] = 2;
featuresAndThresholds.offset1Y()[0] = -1;
featuresAndThresholds.region1X()[0] = 2;
featuresAndThresholds.region1Y()[0] = 1;
featuresAndThresholds.offset2X()[0] = -3;
featuresAndThresholds.offset2Y()[0] = 4;
featuresAndThresholds.region2X()[0] = 1;
featuresAndThresholds.region2Y()[0] = 2;
featuresAndThresholds.channel1()[0] = 0;
featuresAndThresholds.channel2()[0] = 2;
featuresAndThresholds.types()[1] = COLOR;
featuresAndThresholds.offset1X()[1] = 2;
featuresAndThresholds.offset1Y()[1] = -1;
featuresAndThresholds.region1X()[1] = 2;
featuresAndThresholds.region1Y()[1] = 2;
featuresAndThresholds.offset2X()[1] = -3;
featuresAndThresholds.offset2Y()[1] = 4;
featuresAndThresholds.region2X()[1] = 1;
featuresAndThresholds.region2Y()[1] = 1;
featuresAndThresholds.channel1()[1] = 1;
featuresAndThresholds.channel2()[1] = 2;
featuresAndThresholds.types()[2] = COLOR;
featuresAndThresholds.offset1X()[2] = -2;
featuresAndThresholds.offset1Y()[2] = 1;
featuresAndThresholds.region1X()[2] = 3;
featuresAndThresholds.region1Y()[2] = 1;
featuresAndThresholds.offset2X()[2] = 3;
featuresAndThresholds.offset2Y()[2] = -4;
featuresAndThresholds.region2X()[2] = 3;
featuresAndThresholds.region2Y()[2] = 3;
featuresAndThresholds.channel1()[2] = 1;
featuresAndThresholds.channel2()[2] = 0;
featuresAndThresholds.thresholds()(0, 0) = 0.0f;
featuresAndThresholds.thresholds()(1, 0) = -500.0f;
featuresAndThresholds.thresholds()(0, 1) = -300.0f;
featuresAndThresholds.thresholds()(1, 1) = 0.0f;
featuresAndThresholds.thresholds()(0, 2) = 0.0f;
featuresAndThresholds.thresholds()(1, 2) = 500.0f;
const int NUM_LABELS = 2;
samples.push_back(PixelInstance(&images[0], 0, Depth(1.0), 6, 4));
samples.push_back(PixelInstance(&images[1], 0, Depth(2.0), 6, 4));
samples.push_back(PixelInstance(&images[0], 1, Depth(1.5), 5, 5));
samples.push_back(PixelInstance(&images[1], 1, Depth(3.1), 3, 4));
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
// 2 images, 3 features, 4 samples
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(
featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
BOOST_CHECK_EQUAL(2, static_cast<int>(featureResponses.ndim()));
BOOST_CHECK_EQUAL(3, static_cast<int>(featureResponses.shape(0)));
BOOST_CHECK_EQUAL(4, static_cast<int>(featureResponses.shape(1)));
checkScores(scores, NUM_FEAT, NUM_THRESH);
// values verified by manual calculation
// sample 0, feat 0
BOOST_CHECK_CLOSE(-2040, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
// sample 0, feat 1
BOOST_CHECK_CLOSE(1186, static_cast<FeatureResponseType>(featureResponses(1, 0)), 0);
// sample 0, feat 2
BOOST_CHECK(isnan(static_cast<FeatureResponseType>(featureResponses(2, 0))));
// sample 1, feat 0
BOOST_CHECK_CLOSE(-444, static_cast<FeatureResponseType>(featureResponses(0, 1)), 0);
// sample 1, feat 1
BOOST_CHECK_CLOSE(-244, static_cast<FeatureResponseType>(featureResponses(1, 1)), 0);
// sample 1, feat 2
BOOST_CHECK_CLOSE(244, static_cast<FeatureResponseType>(featureResponses(2, 1)), 0);
// sample 2, feat 0
BOOST_CHECK_CLOSE(-884, static_cast<FeatureResponseType>(featureResponses(0, 2)), 0);
// sample 2, feat 1
BOOST_CHECK_CLOSE(-484, static_cast<FeatureResponseType>(featureResponses(1, 2)), 0);
// sample 2, feat 2
BOOST_CHECK_CLOSE(572, static_cast<FeatureResponseType>(featureResponses(2, 2)), 0);
// sample 3, feat 0
BOOST_CHECK_CLOSE(-424, static_cast<FeatureResponseType>(featureResponses(0, 3)), 0);
// sample 3, feat 1
BOOST_CHECK_CLOSE(-224, static_cast<FeatureResponseType>(featureResponses(1, 3)), 0);
// sample 3, feat 2
BOOST_CHECK_CLOSE(224, static_cast<FeatureResponseType>(featureResponses(2, 3)), 0);
checkCounters(configuration, counters, samples);
// -2040 sample 0, feat 0 0
// -444 sample 1, feat 0 0
// -884 sample 2, feat 0 1
// -424 sample 3, feat 0 1
// 1186 sample 0, feat 1 0
// -244 sample 1, feat 1 0
// -484 sample 2, feat 1 1
// -224 sample 3, feat 1 1
// 1551 sample 0, feat 2 0
// 244 sample 1, feat 2 0
// 572 sample 2, feat 2 1
// 224 sample 3, feat 2 1
// feat 0, thresh 0 (0.0f)
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(0, 0, 0, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, 0, 1)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(0, 0, 1, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, 1, 1)));
// feat 0, thresh 1 (-500.0f)
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 0, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 0, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 1, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 1, 1)));
// feat 1, thresh 0 (-300.0f)
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(1, 0, 0, 0)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(1, 0, 0, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 0, 1, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 0, 1, 1)));
// feat 1, thresh 1 (0.0f)
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 1, 0, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 1, 0, 1)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(1, 1, 1, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(1, 1, 1, 1)));
// feat 2, thresh 0 (0.0f)
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(2, 0, 0, 0)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(2, 0, 0, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(2, 0, 1, 0)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(2, 0, 1, 1)));
// feat 2, thresh 1 (+500.0f)
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 0, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 0, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 1, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 1, 1)));
}
BOOST_AUTO_TEST_CASE(testColorFeatureManySamples) {
const int NUM_FEAT = 1000;
const int NUM_THRESH = 50;
unsigned int samplesPerImage = 100;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 50;
static const int NUM_THREADS = 1;
static const int maxImages = 5;
static const int imageCacheSize = 5;
unsigned int maxSamplesPerBatch = 100000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
std::vector<PixelInstance> samples;
const int width = 640;
const int height = 480;
std::vector<RGBDImage> images(10, RGBDImage(width, height));
for (size_t image = 0; image < images.size(); image++) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int c = 0; c < 3; c++) {
float v = 10000 * image + 100 * c + y * width + x;
images[image].setColor(x, y, c, v);
}
}
}
images[image].calculateIntegral();
}
const size_t NUM_LABELS = 10;
const int NUM_SAMPLES = samplesPerImage * images.size();
for (int i = 0; i < NUM_SAMPLES; i++) {
PixelInstance sample(
&images.at(i / (NUM_SAMPLES / images.size())), // image
i / 100, // label
Depth((i % 20) / 10.0 + 1.0), // depth
i % width, // x
i % height // y
);
samples.push_back(sample);
}
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds =
featureFunction.generateRandomFeatures(batches[0], configuration.getRandomSeed(),
true, cuv::dev_memory_space());
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(
node, batches, featuresAndThresholds);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(
featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
size_t scoreHash = checkScores(scores, NUM_FEAT, NUM_THRESH);
size_t counterHash = checkCounters(configuration, counters, samples);
// magic number. used to check for regressions
BOOST_CHECK_EQUAL(4437303196209240250lu, counterHash);
BOOST_CHECK_EQUAL(13702092111133522162lu, scoreHash);
}
}
static void checkNode(boost::shared_ptr<const RandomTree<PixelInstance, ImageFeatureFunction> > node,
const boost::shared_ptr<const TreeNodes>& treeData,
const SplitFunction<PixelInstance, ImageFeatureFunction>* split = 0) {
const size_t numLabels = node->getHistogram().size();
const size_t nodeNr = node->getNodeId();
assert(nodeNr - node->getTreeId() < treeData->numNodes());
TreeNodeData data = getTreeNode(nodeNr, treeData);
if (node->isLeaf()) {
BOOST_CHECK_EQUAL(-1, static_cast<int>(data.leftNodeOffset));
BOOST_CHECK(isnan(static_cast<float>(data.threshold)));
for (size_t label = 0; label < numLabels; label++) {
BOOST_CHECK_EQUAL(static_cast<float>(node->getNormalizedHistogram()[label]),
static_cast<float>(data.histogram(label)));
}
} else {
BOOST_REQUIRE(split);
const ImageFeatureFunction& feature = split->getFeature();
const float expectedThreshold = split->getThreshold();
const int expectedLeftNodeOffset = node->getLeft()->getNodeId() - node->getNodeId();
BOOST_CHECK_EQUAL(expectedLeftNodeOffset, static_cast<int>(data.leftNodeOffset));
BOOST_CHECK_EQUAL(expectedThreshold, static_cast<float>(data.threshold));
BOOST_CHECK_EQUAL(static_cast<int>(feature.getType()), static_cast<int>(data.type));
BOOST_CHECK_EQUAL(feature.getOffset1().getX(), static_cast<int>(data.offset1X));
BOOST_CHECK_EQUAL(feature.getOffset1().getY(), static_cast<int>(data.offset1Y));
BOOST_CHECK_EQUAL(feature.getRegion1().getX(), static_cast<int>(data.region1X));
BOOST_CHECK_EQUAL(feature.getRegion1().getY(), static_cast<int>(data.region1Y));
BOOST_CHECK_EQUAL(feature.getOffset2().getX(), static_cast<int>(data.offset2X));
BOOST_CHECK_EQUAL(feature.getOffset2().getY(), static_cast<int>(data.offset2Y));
BOOST_CHECK_EQUAL(feature.getRegion2().getX(), static_cast<int>(data.region2X));
BOOST_CHECK_EQUAL(feature.getRegion2().getY(), static_cast<int>(data.region2Y));
BOOST_CHECK_EQUAL(feature.getChannel1(), static_cast<uint8_t>(data.channel1));
BOOST_CHECK_EQUAL(feature.getChannel2(), static_cast<uint8_t>(data.channel2));
}
}
BOOST_AUTO_TEST_CASE(testRecallOnGPU) {
const int NUM_FEAT = 1000;
const int NUM_THRESH = 50;
unsigned int samplesPerImage = 100;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 50;
static const int NUM_THREADS = 1;
static const int maxImages = 5;
static const int imageCacheSize = 5;
unsigned int maxSamplesPerBatch = 100000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
std::vector<PixelInstance> samples;
const int width = 640;
const int height = 480;
std::vector<RGBDImage> images(10, RGBDImage(width, height));
for (size_t image = 0; image < images.size(); image++) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int c = 0; c < 3; c++) {
float v = 10000 * image + 100 * c + y * width + x;
images[image].setColor(x, y, c, v);
}
}
}
images[image].calculateIntegral();
}
const size_t NUM_LABELS = 3;
const int NUM_SAMPLES = samplesPerImage * images.size();
for (int i = 0; i < NUM_SAMPLES; i++) {
PixelInstance sample(
&images.at(i / (NUM_SAMPLES / images.size())), // image
(i / 100) % NUM_LABELS, // label
Depth((i % 20) / 10.0 + 0.1), // depth
i % width, // x
i % height // y
);
samples.push_back(sample);
}
/**
* n0
* / \
* / \
* n1 n2
* / \
* / \
* n3 n4
*/
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n0 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(0, 0, getPointers(samples),
NUM_LABELS);
std::vector<WeightType> histN1(NUM_LABELS, 0);
histN1[0] = 10;
histN1[1] = 10;
histN1[2] = 10;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n1 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(1, 1, n0, histN1);
std::vector<WeightType> histN2(NUM_LABELS, 0);
histN2[0] = 60;
histN2[1] = 60;
histN2[2] = 20;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n2 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(2, 1, n0, histN2);
std::vector<WeightType> histN3(NUM_LABELS, 0);
histN3[0] = 10;
histN3[2] = 20;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n3 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(3, 2, n1, histN3);
std::vector<WeightType> histN4(NUM_LABELS, 0);
histN4[1] = 50;
histN4[2] = 20;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n4 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(4, 2, n1, histN4);
size_t featureId1 = 1;
float threshold1 = 28.391;
ScoreType score1 = 0.392;
ImageFeatureFunction feature1(COLOR,
Offset(-10, 5), Region(7, 3), 1,
Offset(27, -19), Region(65, 73), 2);
SplitFunction<PixelInstance, ImageFeatureFunction> split1(featureId1, feature1, threshold1, score1);
n1->addChildren(split1, n3, n4);
size_t featureId2 = 2;
float threshold2 = -29.1245;
ScoreType score2 = 0.9371;
ImageFeatureFunction feature2(DEPTH,
Offset(-18, 25), Region(4, 19), 0,
Offset(9, 28), Region(1, 16), 0);
SplitFunction<PixelInstance, ImageFeatureFunction> split2(featureId2, feature2, threshold2, score2);
n0->addChildren(split2, n1, n2);
BOOST_CHECK(n0->isRoot());
BOOST_CHECK_EQUAL(5, n0->countNodes());
cuv::ndarray<WeightType, cuv::host_memory_space> classLabelPriorDistribution(NUM_LABELS);
for (size_t i = 0; i < NUM_LABELS; i++) {
classLabelPriorDistribution[i] = 100;
}
boost::shared_ptr<RandomTreeImage> randomTreeImage = boost::make_shared<RandomTreeImage>(n0, configuration,
classLabelPriorDistribution);
randomTreeImage->normalizeHistograms(0.0);
boost::shared_ptr<const TreeNodes> treeData = convertTree(randomTreeImage);
BOOST_CHECK_EQUAL(n0->countNodes(), static_cast<size_t>(treeData->numNodes()));
BOOST_CHECK_EQUAL(n0->getNumClasses(), static_cast<size_t>(treeData->numLabels()));
checkNode(n0, treeData, &split2);
checkNode(n1, treeData, &split1);
BOOST_REQUIRE(n2->isLeaf());
BOOST_REQUIRE(n3->isLeaf());
BOOST_REQUIRE(n4->isLeaf());
checkNode(n2, treeData);
checkNode(n3, treeData);
checkNode(n4, treeData);
// do classify
const size_t treeCacheSize = 3;
{
RGBDImage image(640, 480);
image.calculateIntegral();
{
utils::Profile classifyImageTimer("classifyImage");
cuv::ndarray<float, cuv::dev_memory_space> output(
cuv::extents[NUM_LABELS][image.getHeight()][image.getWidth()]);
cudaSafeCall(hipMemset(output.ptr(), 0, static_cast<size_t>(output.size() * sizeof(float))));
classifyImage(treeCacheSize, output, image, NUM_LABELS, treeData);
}
}
}
BOOST_AUTO_TEST_CASE(testRecallLargeForest) {
unsigned int samplesPerImage = 100;
const int NUM_FEAT = 1000;
const int NUM_THRESH = 50;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 50;
static const int NUM_THREADS = 1;
static const int maxImages = 5;
static const int imageCacheSize = 5;
unsigned int maxSamplesPerBatch = 100000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
std::vector<PixelInstance> samples;
const size_t NUM_LABELS = 3;
const int width = 640;
const int height = 480;
std::vector<RGBDImage> images(10, RGBDImage(width, height));
const int NUM_SAMPLES = samplesPerImage * images.size();
for (int i = 0; i < NUM_SAMPLES; i++) {
PixelInstance sample(
&images.at(i / (NUM_SAMPLES / images.size())), // image
(i / 100) % NUM_LABELS, // label
Depth((i % 20) / 10.0 + 0.1), // depth
i % width, // x
i % height // y
);
samples.push_back(sample);
}
// explicitly test a tree that exceeds the maximal number of nodes per layer
const size_t numNodes[] = { 10, 100, 3 * NODES_PER_TREE_LAYER + 2 };
Sampler sampler(4711, 0, 1000);
Sampler typeSampler(4711, 0, 1);
Sampler channelSampler(4711, 0, 5);
Sampler offsetSampler(4711, -120, 120);
Sampler regionSampler(4711, 0, 20);
for (size_t treeId = 0; treeId < 3; treeId++) {
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > rootNode =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(treeId, 0, getPointers(samples),
NUM_LABELS);
std::vector<boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > > nodes;
std::map<boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> >,
SplitFunction<PixelInstance, ImageFeatureFunction> > splits;
nodes.push_back(rootNode);
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > previousNode = rootNode;
/**
* creates a degenerated tree with N nodes
*
* n0
* / \
* / \
* n1 n2
* / \
* / \
* n3 n4
* /
* /
* n5
* / \
* / \
* n6 n7
* /
* /
* ...
*/
for (size_t nodeId = 1; nodeId < numNodes[treeId]; nodeId += 2) {
const size_t level = (nodeId + 1) / 2;
assert(level > 0 && level < numNodes[treeId]);
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > leftNode =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(nodeId + treeId, level,
getPointers(samples), NUM_LABELS, previousNode);
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > rightNode =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(nodeId + 1 + treeId, level,
getPointers(samples), NUM_LABELS, previousNode);
size_t featureId = sampler.getNext();
float threshold = sampler.getNext() / 200.0 - 100.0;
ScoreType score = sampler.getNext() / 1000.0;
assertProbability(score);
ImageFeatureFunction feature(static_cast<FeatureType>(typeSampler.getNext()),
Offset(offsetSampler.getNext(), offsetSampler.getNext()),
Region(regionSampler.getNext(), regionSampler.getNext()),
channelSampler.getNext(),
Offset(offsetSampler.getNext(), offsetSampler.getNext()),
Region(regionSampler.getNext(), regionSampler.getNext()),
channelSampler.getNext());
SplitFunction<PixelInstance, ImageFeatureFunction> split(featureId, feature, threshold, score);
previousNode->addChildren(split, leftNode, rightNode);
splits[previousNode] = split;
nodes.push_back(leftNode);
nodes.push_back(rightNode);
previousNode = leftNode;
}
BOOST_CHECK_EQUAL(treeId, rootNode->getTreeId());
BOOST_CHECK(rootNode->isRoot());
BOOST_CHECK_EQUAL(numNodes[treeId] + 1, rootNode->countNodes());
cuv::ndarray<WeightType, cuv::host_memory_space> classLabelPriorDistribution(NUM_LABELS);
for (size_t i = 0; i < NUM_LABELS; i++) {
classLabelPriorDistribution[i] = 100;
}
boost::shared_ptr<RandomTreeImage> randomTreeImage =
boost::make_shared<RandomTreeImage>(rootNode, configuration, classLabelPriorDistribution);
randomTreeImage->normalizeHistograms(0.0);
boost::shared_ptr<const TreeNodes> treeData = convertTree(randomTreeImage);
BOOST_CHECK_EQUAL(rootNode->countNodes(), static_cast<size_t>(treeData->numNodes()));
BOOST_CHECK_EQUAL(rootNode->getNumClasses(), static_cast<size_t>(treeData->numLabels()));
CURFIL_INFO("checking nodes");
assert(nodes.size() == numNodes[treeId] + 1);
for (size_t nodeId = 0; nodeId < numNodes[treeId]; nodeId++) {
const boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > node = nodes[nodeId];
if (node->isLeaf()) {
checkNode(node, treeData);
} else {
checkNode(node, treeData, &splits[node]);
}
}
CURFIL_INFO("checked " << numNodes[treeId] << " nodes of tree " << treeId);
}
}
BOOST_AUTO_TEST_SUITE_END()
| 25cac5e4e72529a3b16c730df0af971d93f74576.cu | #if 0
#######################################################################################
# The MIT License
# Copyright (c) 2014 Hannes Schulz, University of Bonn <schulz@ais.uni-bonn.de>
# Copyright (c) 2013 Benedikt Waldvogel, University of Bonn <mail@bwaldvogel.de>
# Copyright (c) 2008-2009 Sebastian Nowozin <nowozin@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#######################################################################################
#endif
#define BOOST_TEST_MODULE example
#include <assert.h>
#include <boost/functional/hash.hpp>
#include <boost/make_shared.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/test/included/unit_test.hpp>
#include <cuv/ndarray.hpp>
#include "random_tree_image_gpu.h"
#include "random_tree_image.h"
#include "score.h"
#include "test_common.h"
#include "utils.h"
using namespace curfil;
#define DUMP_IMAGE 0
static const int SEED = 4711;
class Fixture {
public:
Fixture() {
clearImageCache();
}
};
BOOST_FIXTURE_TEST_SUITE(RandomTreeImageGPUTest, Fixture)
template<class W>
__global__
static void calculcateScoreKernel(ScoreType* result, const size_t numClasses,
const W* leftClasses, const W* rightClasses, const unsigned int leftRightStride,
const W* allClasses, const ScoreType totalLeft, const ScoreType totalRight) {
ScoreType score = NormalizedInformationGainScore::calculateScore(numClasses, leftClasses, rightClasses,
leftRightStride,
allClasses, totalLeft, totalRight);
*result = score;
}
static ScoreType scoreOnGPU(const size_t numClasses, const cuv::ndarray<int, cuv::host_memory_space>& leftClasses,
const cuv::ndarray<int, cuv::host_memory_space>& rightClasses,
const cuv::ndarray<int, cuv::host_memory_space>& allClasses,
const ScoreType totalLeft, const ScoreType totalRight) {
cuv::ndarray<ScoreType, cuv::dev_memory_space> result(1);
const cuv::ndarray<int, cuv::dev_memory_space> leftClassesDevice(leftClasses);
const cuv::ndarray<int, cuv::dev_memory_space> rightClassesDevice(rightClasses);
const cuv::ndarray<int, cuv::dev_memory_space> allClassesDevice(allClasses);
const unsigned int leftRightStride = leftClassesDevice.stride(0);
BOOST_REQUIRE_EQUAL(leftRightStride, rightClassesDevice.stride(0));
calculcateScoreKernel<<<1,1>>>(result.ptr(), numClasses, leftClassesDevice.ptr(), rightClassesDevice.ptr(),
leftRightStride, allClassesDevice.ptr(), totalLeft, totalRight);
cudaSafeCall(cudaThreadSynchronize());
double res = result[0];
return res;
}
static ScoreType scoreOnGPU(const size_t size, const WeightType* leftClass, const WeightType* rightClass,
const WeightType* allClasses, const ScoreType totalLeft, const ScoreType totalRight) {
cuv::ndarray<int, cuv::host_memory_space> leftClassArray(size);
cuv::ndarray<int, cuv::host_memory_space> rightClassArray(size);
cuv::ndarray<int, cuv::host_memory_space> allClassesArray(size);
for (size_t i = 0; i < size; i++) {
leftClassArray[i] = leftClass[i];
rightClassArray[i] = rightClass[i];
allClassesArray[i] = allClasses[i];
}
return scoreOnGPU(size, leftClassArray, rightClassArray, allClassesArray, totalLeft, totalRight);
}
BOOST_AUTO_TEST_CASE(testInformationGainScore) {
const int numClasses = 2;
cuv::ndarray<int, cuv::host_memory_space> left(numClasses);
cuv::ndarray<int, cuv::host_memory_space> right(numClasses);
cuv::ndarray<int, cuv::host_memory_space> allClass(numClasses);
for (size_t num = 1; num < 10; num++) {
// best case scenario: score=0
left[0] = num;
right[0] = num;
left[1] = num;
right[1] = num;
allClass[0] = 2 * num;
allClass[1] = 2 * num;
ScoreType totalLeft = 2 * num;
ScoreType totalRight = 2 * num;
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
ScoreType score = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_CLOSE(0, score, 0);
BOOST_CHECK_CLOSE(score, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-6);
// best case scenario: score=1
left[0] = 0;
right[0] = 2 * num;
left[1] = 2 * num;
right[1] = 0;
allClass[0] = 2 * num;
allClass[1] = 2 * num;
totalLeft = 2 * num;
totalRight = 2 * num;
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
score = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_CLOSE(1, score, 0);
BOOST_CHECK_CLOSE(score, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-6);
}
left[0] = 5;
right[0] = 3;
left[1] = 8;
right[1] = 1;
allClass[0] = 8;
allClass[1] = 9;
double totalLeft = left[0] + left[1];
double totalRight = right[0] + right[1];
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
ScoreType score1 = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_CLOSE(0.080185, score1, 1e-4);
BOOST_CHECK_CLOSE(score1, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-5);
left[0] = 2;
right[0] = 6;
left[1] = 8;
right[1] = 1;
totalLeft = left[0] + left[1];
totalRight = right[0] + right[1];
BOOST_REQUIRE_EQUAL(left.stride(0), 1);
ScoreType score2 = NormalizedInformationGainScore::calculateScore(numClasses, left.ptr(), right.ptr(),
left.stride(0), allClass.ptr(), totalLeft, totalRight);
BOOST_CHECK_GT(score2, score1);
BOOST_CHECK_CLOSE(0.33339, score2, 1e-3);
BOOST_CHECK_CLOSE(score2, scoreOnGPU(numClasses, left, right, allClass, totalLeft, totalRight), 1e-6);
// case 1 (a real case)
// histogram: [ 86 241 291 3 267 ]
// histogram left: [ 56 241 290 3 18 ]
// histogram right: [ 30 0 1 0 249 ]
{
const size_t size = 5;
const WeightType all[] = { 86, 241, 291, 3, 267 };
const WeightType left[] = { 56, 241, 290, 3, 18 };
const WeightType right[] = { 30, 0, 1, 0, 249 };
const unsigned int leftRightStride = 1;
const size_t totalLeft = std::accumulate(left, left + size, 0);
const size_t totalRight = std::accumulate(right, right + size, 0);
BOOST_REQUIRE_EQUAL(totalLeft + totalRight, std::accumulate(all, all + size, 0));
ScoreType score = NormalizedInformationGainScore::calculateScore(size, left, right,
leftRightStride, all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 0.491311, 1e-3);
BOOST_CHECK_CLOSE(score, scoreOnGPU(size, left, right, all, totalLeft, totalRight), 1e-6);
score = InformationGainScore::calculateScore(size, left, right,
leftRightStride, all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 0.690912, 1e-3);
}
{
// case 2 (constructed, obviously)
// histogram: [ 50 100 50 0 100 ]
// histogram left: [ 50 100 0 0 0 ]
// histogram right: [ 0 0 50 0 100 ]
const size_t size = 5;
const WeightType all[] = { 50, 100, 50, 0, 100 };
const WeightType left[] = { 50, 100, 0, 0, 0 };
const WeightType right[] = { 0, 0, 50, 0, 100 };
const unsigned int leftRightStride = 1;
const size_t totalLeft = std::accumulate(left, left + size, 0);
const size_t totalRight = std::accumulate(right, right + size, 0);
ScoreType score = NormalizedInformationGainScore::calculateScore(size, left, right, leftRightStride,
all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 0.68533, 1e-3);
score = InformationGainScore::calculateScore(size, left, right, leftRightStride,
all, totalLeft, totalRight);
BOOST_CHECK_CLOSE(score, 1.0, 1e-3);
}
}
template<class T>
static void updateHash(const boost::hash<size_t>& hasher, size_t& hash, const T& value) {
// extract from boost headers
hash ^= hasher(value) + 0x9e3779b9 + (hash << 6) + (hash >> 2);
}
template<class T>
static size_t checkScores(const cuv::ndarray<ScoreType, cuv::host_memory_space>& scores, T numFeatures,
T numThresholds) {
BOOST_CHECK_EQUAL(2, static_cast<int>(scores.ndim()));
BOOST_CHECK_EQUAL(static_cast<size_t>(numThresholds), static_cast<size_t>(scores.shape(0)));
BOOST_CHECK_EQUAL(static_cast<size_t>(numFeatures), static_cast<size_t>(scores.shape(1)));
size_t hash = 0;
boost::hash<size_t> hasher;
for (T feat = 0; feat < numFeatures; feat++) {
for (T thresh = 0; thresh < numThresholds; thresh++) {
const ScoreType score = scores(thresh, feat);
BOOST_CHECK_GE(score, 0.0);
BOOST_CHECK_LE(score, 1.0);
updateHash(hasher, hash, score);
}
}
return hash;
}
static size_t checkCounters(TrainingConfiguration& configuration,
const cuv::ndarray<WeightType, cuv::dev_memory_space> countersDevice,
const std::vector<PixelInstance>& samples) {
const cuv::ndarray<WeightType, cuv::host_memory_space> counters(countersDevice);
size_t hash = 0;
boost::hash<size_t> hasher;
std::map<size_t, size_t> samplesPerLabel;
for (size_t sample = 0; sample < samples.size(); sample++) {
samplesPerLabel[samples[sample].getLabel()]++;
}
size_t numLabels = samplesPerLabel.size();
assert(numLabels > 0);
const size_t features = configuration.getFeatureCount();
const size_t thresholds = configuration.getThresholds();
BOOST_CHECK_EQUAL(4, static_cast<int>(counters.ndim()));
BOOST_CHECK_EQUAL(features, static_cast<size_t>(counters.shape(0)));
BOOST_CHECK_EQUAL(thresholds, static_cast<size_t>(counters.shape(1)));
BOOST_CHECK_EQUAL(numLabels, static_cast<size_t>(counters.shape(2)));
BOOST_CHECK_EQUAL(2lu, static_cast<size_t>(counters.shape()[3]));
for (size_t label = 0; label < numLabels; label++) {
for (size_t thresh = 0; thresh < thresholds; thresh++) {
for (size_t feat = 0; feat < features; feat++) {
const size_t left = counters(feat, thresh, label, 0);
const size_t right = counters(feat, thresh, label, 1);
const size_t numSamples = samplesPerLabel[label];
BOOST_CHECK_EQUAL(numSamples, left + right);
updateHash(hasher, hash, left);
updateHash(hasher, hash, right);
}
}
}
return hash;
}
BOOST_AUTO_TEST_CASE(testDepthFeatureSimple) {
const int NUM_FEAT = 1;
const int NUM_THRESH = 100;
unsigned int samplesPerImage = 500;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 16;
static const int NUM_THREADS = 1;
static const int maxImages = 0;
static const int imageCacheSize = 1;
unsigned int maxSamplesPerBatch = 5000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
static const int width = 16;
static const int height = 20;
std::vector<RGBDImage> images(1, RGBDImage(width, height));
std::vector<PixelInstance> samples;
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(NUM_FEAT, NUM_THRESH,
boost::make_shared<cuv::default_allocator>());
for (int i = 0; i < NUM_THRESH; i++) {
featuresAndThresholds.thresholds()(i, 0) = (i - 50) / 10.0f;
}
featuresAndThresholds.types()[0] = DEPTH;
featuresAndThresholds.offset1X()[0] = 1;
featuresAndThresholds.offset1Y()[0] = 1;
featuresAndThresholds.region1X()[0] = 2;
featuresAndThresholds.region1Y()[0] = 2;
featuresAndThresholds.offset2X()[0] = -3;
featuresAndThresholds.offset2Y()[0] = -1;
featuresAndThresholds.region2X()[0] = 1;
featuresAndThresholds.region2Y()[0] = 1;
featuresAndThresholds.channel1()[0] = 0;
featuresAndThresholds.channel2()[0] = 0;
const int NUM_LABELS = 2;
samples.push_back(PixelInstance(&images[0], 0, Depth(1.0), 6, 3));
samples.push_back(PixelInstance(&images[0], 1, Depth(1.0), 6, 3));
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples), node,
cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
BOOST_CHECK(isnan(static_cast<FeatureResponseType>(featureResponses(0, 0))));
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
checkScores(scores, NUM_FEAT, NUM_THRESH);
}
images[0].reset();
clearImageCache();
images[0].setDepth(7, 3, Depth(1.5f));
images[0].setDepth(7, 4, Depth(1.7f));
images[0].setDepth(8, 4, Depth(4.5f));
images[0].setDepth(3, 2, Depth(3.9f));
#if DUMP_IMAGE
image.dumpDepth(std::cout);
#endif
images[0].calculateIntegral();
#if DUMP_IMAGE
CURFIL_INFO("integral:");
image.dumpDepth(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
checkCounters(configuration, counters, samples);
checkScores(scores, NUM_FEAT, NUM_THRESH);
BOOST_CHECK_CLOSE((1.5 + 1.7 + 4.5) / 3 - 3.9, static_cast<FeatureResponseType>(featureResponses(0, 0)), 1e-6);
for (int label = 0; label < NUM_LABELS; label++) {
for (int thresh = 0; thresh < NUM_THRESH; thresh++) {
for (int feat = 0; feat < NUM_FEAT; feat++) {
BOOST_CHECK_EQUAL(1,
static_cast<int>(counters(feat, thresh, label, 0) + counters(feat, thresh, label, 1)));
BOOST_CHECK_EQUAL(static_cast<unsigned int>(thresh >= 37),
static_cast<unsigned int>(counters(feat, thresh, label, 0)));
}
}
}
images[0].reset();
clearImageCache();
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
images[0].setDepth(x, y, Depth(1.0f));
}
}
images[0].calculateIntegral();
#if DUMP_IMAGE
image.dumpDepth(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
BOOST_CHECK_CLOSE(0, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
checkScores(scores, NUM_FEAT, NUM_THRESH);
checkCounters(configuration, counters, samples);
for (int label = 0; label < NUM_LABELS; label++) {
for (int thresh = 0; thresh < 100; thresh++) {
for (int feat = 0; feat < NUM_FEAT; feat++) {
BOOST_CHECK_EQUAL(1,
static_cast<int>(counters(feat, thresh, label, 0) + counters(feat, thresh, label, 1)));
BOOST_CHECK_EQUAL(static_cast<unsigned int>(thresh >= 50),
static_cast<unsigned int>(counters(feat, thresh, label, 0)));
}
}
}
}
}
}
BOOST_AUTO_TEST_CASE(testColorFeatureSimple) {
unsigned int samplesPerImage = 500;
unsigned int NUM_FEAT = 1;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 16;
static const uint16_t NUM_THRESH = 3;
static const int NUM_THREADS = 1;
static const int maxImages = 0;
static const int imageCacheSize = 1;
unsigned int maxSamplesPerBatch = 5000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
static const int width = 16;
static const int height = 20;
std::vector<RGBDImage> images(1, RGBDImage(width, height));
std::vector<PixelInstance> samples;
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(NUM_FEAT, NUM_THRESH,
boost::make_shared<cuv::default_allocator>());
featuresAndThresholds.types()[0] = COLOR;
featuresAndThresholds.offset1X()[0] = 3;
featuresAndThresholds.offset1Y()[0] = -2;
featuresAndThresholds.region1X()[0] = 4;
featuresAndThresholds.region1Y()[0] = 2;
featuresAndThresholds.offset2X()[0] = -3;
featuresAndThresholds.offset2Y()[0] = -2;
featuresAndThresholds.region2X()[0] = 1;
featuresAndThresholds.region2Y()[0] = 2;
featuresAndThresholds.channel1()[0] = 0;
featuresAndThresholds.channel2()[0] = 0;
featuresAndThresholds.thresholds()(0, 0) = -1.0f;
featuresAndThresholds.thresholds()(1, 0) = 0.0f;
featuresAndThresholds.thresholds()(2, 0) = 1.0f;
const int NUM_LABELS = 2;
samples.push_back(PixelInstance(&images[0], 0, Depth(1.0), 6, 4));
samples.push_back(PixelInstance(&images[0], 1, Depth(1.0), 6, 4));
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples), node,
cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
BOOST_CHECK_CLOSE(0, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
checkCounters(configuration, counters, samples);
assert(static_cast<int>(samples.size()) == NUM_LABELS);
for (size_t label = 0; label < samples.size(); label++) {
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 0, label, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, label, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 1, label, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 2, label, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 2, label, 1)));
}
}
images[0].reset();
clearImageCache();
images[0].setColor(7, 2, 0, 0.5f);
#if DUMP_IMAGE
image.dump(std::cout);
#endif
images[0].calculateIntegral();
#if DUMP_IMAGE
std::cout << "integral" << std::endl;
image.dump(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
checkCounters(configuration, counters, samples);
BOOST_CHECK_CLOSE(0.5, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
for (size_t label = 0; label < samples.size(); label++) {
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 0, label, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 1, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, label, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 2, label, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 2, label, 1)));
}
}
images[0].reset();
clearImageCache();
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
images[0].setColor(x, y, 0, 1.0f);
}
}
images[0].calculateIntegral();
#if DUMP_IMAGE
image.dump(std::cout);
#endif
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
BOOST_CHECK_CLOSE(8 * 4 - 4 * 2.0, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
checkCounters(configuration, counters, samples);
for (size_t label = 0; label < samples.size(); label++) {
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 0, label, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 1, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, label, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 2, label, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 2, label, 1)));
}
}
}
BOOST_AUTO_TEST_CASE(testColorFeatureComplex) {
const size_t NUM_THRESH = 2;
const size_t NUM_FEAT = 3;
const int maxImages = 5;
const int imageCacheSize = 5; // make sure the cache is at least as big as #images
unsigned int samplesPerImage = 500;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 16;
static const int NUM_THREADS = 1;
unsigned int maxSamplesPerBatch = 5000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
const int width = 12;
const int height = 15;
std::vector<RGBDImage> images(2, RGBDImage(width, height));
std::vector<PixelInstance> samples;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int c = 0; c < 3; c++) {
float v = 100 * c + y * width + x;
images[0].setColor(x, y, c, v);
images[1].setColor(x, y, c, v / 2.0f);
}
}
}
BOOST_CHECK_CLOSE(179, images[0].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(279, images[0].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(379, images[0].getColor(width - 1, height - 1, 2), 0);
BOOST_CHECK_CLOSE(179 / 2.0, images[1].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(279 / 2.0, images[1].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(379 / 2.0, images[1].getColor(width - 1, height - 1, 2), 0);
images[0].calculateIntegral();
images[1].calculateIntegral();
BOOST_CHECK_CLOSE(16110, images[0].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(34110, images[0].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(52110, images[0].getColor(width - 1, height - 1, 2), 0);
BOOST_CHECK_CLOSE(16110 / 2.0, images[1].getColor(width - 1, height - 1, 0), 0);
BOOST_CHECK_CLOSE(34110 / 2.0, images[1].getColor(width - 1, height - 1, 1), 0);
BOOST_CHECK_CLOSE(52110 / 2.0, images[1].getColor(width - 1, height - 1, 2), 0);
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds(NUM_FEAT, NUM_THRESH,
boost::make_shared<cuv::default_allocator>());
featuresAndThresholds.types()[0] = COLOR;
featuresAndThresholds.offset1X()[0] = 2;
featuresAndThresholds.offset1Y()[0] = -1;
featuresAndThresholds.region1X()[0] = 2;
featuresAndThresholds.region1Y()[0] = 1;
featuresAndThresholds.offset2X()[0] = -3;
featuresAndThresholds.offset2Y()[0] = 4;
featuresAndThresholds.region2X()[0] = 1;
featuresAndThresholds.region2Y()[0] = 2;
featuresAndThresholds.channel1()[0] = 0;
featuresAndThresholds.channel2()[0] = 2;
featuresAndThresholds.types()[1] = COLOR;
featuresAndThresholds.offset1X()[1] = 2;
featuresAndThresholds.offset1Y()[1] = -1;
featuresAndThresholds.region1X()[1] = 2;
featuresAndThresholds.region1Y()[1] = 2;
featuresAndThresholds.offset2X()[1] = -3;
featuresAndThresholds.offset2Y()[1] = 4;
featuresAndThresholds.region2X()[1] = 1;
featuresAndThresholds.region2Y()[1] = 1;
featuresAndThresholds.channel1()[1] = 1;
featuresAndThresholds.channel2()[1] = 2;
featuresAndThresholds.types()[2] = COLOR;
featuresAndThresholds.offset1X()[2] = -2;
featuresAndThresholds.offset1Y()[2] = 1;
featuresAndThresholds.region1X()[2] = 3;
featuresAndThresholds.region1Y()[2] = 1;
featuresAndThresholds.offset2X()[2] = 3;
featuresAndThresholds.offset2Y()[2] = -4;
featuresAndThresholds.region2X()[2] = 3;
featuresAndThresholds.region2Y()[2] = 3;
featuresAndThresholds.channel1()[2] = 1;
featuresAndThresholds.channel2()[2] = 0;
featuresAndThresholds.thresholds()(0, 0) = 0.0f;
featuresAndThresholds.thresholds()(1, 0) = -500.0f;
featuresAndThresholds.thresholds()(0, 1) = -300.0f;
featuresAndThresholds.thresholds()(1, 1) = 0.0f;
featuresAndThresholds.thresholds()(0, 2) = 0.0f;
featuresAndThresholds.thresholds()(1, 2) = 500.0f;
const int NUM_LABELS = 2;
samples.push_back(PixelInstance(&images[0], 0, Depth(1.0), 6, 4));
samples.push_back(PixelInstance(&images[1], 0, Depth(2.0), 6, 4));
samples.push_back(PixelInstance(&images[0], 1, Depth(1.5), 5, 5));
samples.push_back(PixelInstance(&images[1], 1, Depth(3.1), 3, 4));
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
// 2 images, 3 features, 4 samples
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
cuv::ndarray<FeatureResponseType, cuv::host_memory_space> featureResponses;
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(node, batches, featuresAndThresholds,
&featureResponses);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(
featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
BOOST_CHECK_EQUAL(2, static_cast<int>(featureResponses.ndim()));
BOOST_CHECK_EQUAL(3, static_cast<int>(featureResponses.shape(0)));
BOOST_CHECK_EQUAL(4, static_cast<int>(featureResponses.shape(1)));
checkScores(scores, NUM_FEAT, NUM_THRESH);
// values verified by manual calculation
// sample 0, feat 0
BOOST_CHECK_CLOSE(-2040, static_cast<FeatureResponseType>(featureResponses(0, 0)), 0);
// sample 0, feat 1
BOOST_CHECK_CLOSE(1186, static_cast<FeatureResponseType>(featureResponses(1, 0)), 0);
// sample 0, feat 2
BOOST_CHECK(isnan(static_cast<FeatureResponseType>(featureResponses(2, 0))));
// sample 1, feat 0
BOOST_CHECK_CLOSE(-444, static_cast<FeatureResponseType>(featureResponses(0, 1)), 0);
// sample 1, feat 1
BOOST_CHECK_CLOSE(-244, static_cast<FeatureResponseType>(featureResponses(1, 1)), 0);
// sample 1, feat 2
BOOST_CHECK_CLOSE(244, static_cast<FeatureResponseType>(featureResponses(2, 1)), 0);
// sample 2, feat 0
BOOST_CHECK_CLOSE(-884, static_cast<FeatureResponseType>(featureResponses(0, 2)), 0);
// sample 2, feat 1
BOOST_CHECK_CLOSE(-484, static_cast<FeatureResponseType>(featureResponses(1, 2)), 0);
// sample 2, feat 2
BOOST_CHECK_CLOSE(572, static_cast<FeatureResponseType>(featureResponses(2, 2)), 0);
// sample 3, feat 0
BOOST_CHECK_CLOSE(-424, static_cast<FeatureResponseType>(featureResponses(0, 3)), 0);
// sample 3, feat 1
BOOST_CHECK_CLOSE(-224, static_cast<FeatureResponseType>(featureResponses(1, 3)), 0);
// sample 3, feat 2
BOOST_CHECK_CLOSE(224, static_cast<FeatureResponseType>(featureResponses(2, 3)), 0);
checkCounters(configuration, counters, samples);
// -2040 sample 0, feat 0 → 0
// -444 sample 1, feat 0 → 0
// -884 sample 2, feat 0 → 1
// -424 sample 3, feat 0 → 1
// 1186 sample 0, feat 1 → 0
// -244 sample 1, feat 1 → 0
// -484 sample 2, feat 1 → 1
// -224 sample 3, feat 1 → 1
// 1551 sample 0, feat 2 → 0
// 244 sample 1, feat 2 → 0
// 572 sample 2, feat 2 → 1
// 224 sample 3, feat 2 → 1
// feat 0, thresh 0 (0.0f)
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(0, 0, 0, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, 0, 1)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(0, 0, 1, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(0, 0, 1, 1)));
// feat 0, thresh 1 (-500.0f)
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 0, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 0, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 1, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(0, 1, 1, 1)));
// feat 1, thresh 0 (-300.0f)
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(1, 0, 0, 0)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(1, 0, 0, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 0, 1, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 0, 1, 1)));
// feat 1, thresh 1 (0.0f)
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 1, 0, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(1, 1, 0, 1)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(1, 1, 1, 0)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(1, 1, 1, 1)));
// feat 2, thresh 0 (0.0f)
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(2, 0, 0, 0)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(2, 0, 0, 1)));
BOOST_CHECK_EQUAL(0, static_cast<int>(counters(2, 0, 1, 0)));
BOOST_CHECK_EQUAL(2, static_cast<int>(counters(2, 0, 1, 1)));
// feat 2, thresh 1 (+500.0f)
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 0, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 0, 1)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 1, 0)));
BOOST_CHECK_EQUAL(1, static_cast<int>(counters(2, 1, 1, 1)));
}
BOOST_AUTO_TEST_CASE(testColorFeatureManySamples) {
const int NUM_FEAT = 1000;
const int NUM_THRESH = 50;
unsigned int samplesPerImage = 100;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 50;
static const int NUM_THREADS = 1;
static const int maxImages = 5;
static const int imageCacheSize = 5;
unsigned int maxSamplesPerBatch = 100000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
std::vector<PixelInstance> samples;
const int width = 640;
const int height = 480;
std::vector<RGBDImage> images(10, RGBDImage(width, height));
for (size_t image = 0; image < images.size(); image++) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int c = 0; c < 3; c++) {
float v = 10000 * image + 100 * c + y * width + x;
images[image].setColor(x, y, c, v);
}
}
}
images[image].calculateIntegral();
}
const size_t NUM_LABELS = 10;
const int NUM_SAMPLES = samplesPerImage * images.size();
for (int i = 0; i < NUM_SAMPLES; i++) {
PixelInstance sample(
&images.at(i / (NUM_SAMPLES / images.size())), // image
i / 100, // label
Depth((i % 20) / 10.0 + 1.0), // depth
i % width, // x
i % height // y
);
samples.push_back(sample);
}
RandomTree<PixelInstance, ImageFeatureFunction> node(0, 0, getPointers(samples), NUM_LABELS);
cuv::ndarray<WeightType, cuv::dev_memory_space> histogram(node.getHistogram());
{
std::vector<std::vector<const PixelInstance*> > batches = featureFunction.prepare(getPointers(samples),
node, cuv::dev_memory_space(), false);
ImageFeaturesAndThresholds<cuv::dev_memory_space> featuresAndThresholds =
featureFunction.generateRandomFeatures(batches[0], configuration.getRandomSeed(),
true, cuv::dev_memory_space());
cuv::ndarray<WeightType, cuv::dev_memory_space> counters =
featureFunction.calculateFeatureResponsesAndHistograms(
node, batches, featuresAndThresholds);
cuv::ndarray<ScoreType, cuv::host_memory_space> scores(
featureFunction.calculateScores(counters,
featuresAndThresholds, histogram));
size_t scoreHash = checkScores(scores, NUM_FEAT, NUM_THRESH);
size_t counterHash = checkCounters(configuration, counters, samples);
// magic number. used to check for regressions
BOOST_CHECK_EQUAL(4437303196209240250lu, counterHash);
BOOST_CHECK_EQUAL(13702092111133522162lu, scoreHash);
}
}
static void checkNode(boost::shared_ptr<const RandomTree<PixelInstance, ImageFeatureFunction> > node,
const boost::shared_ptr<const TreeNodes>& treeData,
const SplitFunction<PixelInstance, ImageFeatureFunction>* split = 0) {
const size_t numLabels = node->getHistogram().size();
const size_t nodeNr = node->getNodeId();
assert(nodeNr - node->getTreeId() < treeData->numNodes());
TreeNodeData data = getTreeNode(nodeNr, treeData);
if (node->isLeaf()) {
BOOST_CHECK_EQUAL(-1, static_cast<int>(data.leftNodeOffset));
BOOST_CHECK(isnan(static_cast<float>(data.threshold)));
for (size_t label = 0; label < numLabels; label++) {
BOOST_CHECK_EQUAL(static_cast<float>(node->getNormalizedHistogram()[label]),
static_cast<float>(data.histogram(label)));
}
} else {
BOOST_REQUIRE(split);
const ImageFeatureFunction& feature = split->getFeature();
const float expectedThreshold = split->getThreshold();
const int expectedLeftNodeOffset = node->getLeft()->getNodeId() - node->getNodeId();
BOOST_CHECK_EQUAL(expectedLeftNodeOffset, static_cast<int>(data.leftNodeOffset));
BOOST_CHECK_EQUAL(expectedThreshold, static_cast<float>(data.threshold));
BOOST_CHECK_EQUAL(static_cast<int>(feature.getType()), static_cast<int>(data.type));
BOOST_CHECK_EQUAL(feature.getOffset1().getX(), static_cast<int>(data.offset1X));
BOOST_CHECK_EQUAL(feature.getOffset1().getY(), static_cast<int>(data.offset1Y));
BOOST_CHECK_EQUAL(feature.getRegion1().getX(), static_cast<int>(data.region1X));
BOOST_CHECK_EQUAL(feature.getRegion1().getY(), static_cast<int>(data.region1Y));
BOOST_CHECK_EQUAL(feature.getOffset2().getX(), static_cast<int>(data.offset2X));
BOOST_CHECK_EQUAL(feature.getOffset2().getY(), static_cast<int>(data.offset2Y));
BOOST_CHECK_EQUAL(feature.getRegion2().getX(), static_cast<int>(data.region2X));
BOOST_CHECK_EQUAL(feature.getRegion2().getY(), static_cast<int>(data.region2Y));
BOOST_CHECK_EQUAL(feature.getChannel1(), static_cast<uint8_t>(data.channel1));
BOOST_CHECK_EQUAL(feature.getChannel2(), static_cast<uint8_t>(data.channel2));
}
}
BOOST_AUTO_TEST_CASE(testRecallOnGPU) {
const int NUM_FEAT = 1000;
const int NUM_THRESH = 50;
unsigned int samplesPerImage = 100;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 50;
static const int NUM_THREADS = 1;
static const int maxImages = 5;
static const int imageCacheSize = 5;
unsigned int maxSamplesPerBatch = 100000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
ImageFeatureEvaluation featureFunction(0, configuration);
std::vector<PixelInstance> samples;
const int width = 640;
const int height = 480;
std::vector<RGBDImage> images(10, RGBDImage(width, height));
for (size_t image = 0; image < images.size(); image++) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
for (int c = 0; c < 3; c++) {
float v = 10000 * image + 100 * c + y * width + x;
images[image].setColor(x, y, c, v);
}
}
}
images[image].calculateIntegral();
}
const size_t NUM_LABELS = 3;
const int NUM_SAMPLES = samplesPerImage * images.size();
for (int i = 0; i < NUM_SAMPLES; i++) {
PixelInstance sample(
&images.at(i / (NUM_SAMPLES / images.size())), // image
(i / 100) % NUM_LABELS, // label
Depth((i % 20) / 10.0 + 0.1), // depth
i % width, // x
i % height // y
);
samples.push_back(sample);
}
/**
* n0
* / \
* / \
* n1 n2
* / \
* / \
* n3 n4
*/
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n0 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(0, 0, getPointers(samples),
NUM_LABELS);
std::vector<WeightType> histN1(NUM_LABELS, 0);
histN1[0] = 10;
histN1[1] = 10;
histN1[2] = 10;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n1 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(1, 1, n0, histN1);
std::vector<WeightType> histN2(NUM_LABELS, 0);
histN2[0] = 60;
histN2[1] = 60;
histN2[2] = 20;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n2 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(2, 1, n0, histN2);
std::vector<WeightType> histN3(NUM_LABELS, 0);
histN3[0] = 10;
histN3[2] = 20;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n3 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(3, 2, n1, histN3);
std::vector<WeightType> histN4(NUM_LABELS, 0);
histN4[1] = 50;
histN4[2] = 20;
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > n4 =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(4, 2, n1, histN4);
size_t featureId1 = 1;
float threshold1 = 28.391;
ScoreType score1 = 0.392;
ImageFeatureFunction feature1(COLOR,
Offset(-10, 5), Region(7, 3), 1,
Offset(27, -19), Region(65, 73), 2);
SplitFunction<PixelInstance, ImageFeatureFunction> split1(featureId1, feature1, threshold1, score1);
n1->addChildren(split1, n3, n4);
size_t featureId2 = 2;
float threshold2 = -29.1245;
ScoreType score2 = 0.9371;
ImageFeatureFunction feature2(DEPTH,
Offset(-18, 25), Region(4, 19), 0,
Offset(9, 28), Region(1, 16), 0);
SplitFunction<PixelInstance, ImageFeatureFunction> split2(featureId2, feature2, threshold2, score2);
n0->addChildren(split2, n1, n2);
BOOST_CHECK(n0->isRoot());
BOOST_CHECK_EQUAL(5, n0->countNodes());
cuv::ndarray<WeightType, cuv::host_memory_space> classLabelPriorDistribution(NUM_LABELS);
for (size_t i = 0; i < NUM_LABELS; i++) {
classLabelPriorDistribution[i] = 100;
}
boost::shared_ptr<RandomTreeImage> randomTreeImage = boost::make_shared<RandomTreeImage>(n0, configuration,
classLabelPriorDistribution);
randomTreeImage->normalizeHistograms(0.0);
boost::shared_ptr<const TreeNodes> treeData = convertTree(randomTreeImage);
BOOST_CHECK_EQUAL(n0->countNodes(), static_cast<size_t>(treeData->numNodes()));
BOOST_CHECK_EQUAL(n0->getNumClasses(), static_cast<size_t>(treeData->numLabels()));
checkNode(n0, treeData, &split2);
checkNode(n1, treeData, &split1);
BOOST_REQUIRE(n2->isLeaf());
BOOST_REQUIRE(n3->isLeaf());
BOOST_REQUIRE(n4->isLeaf());
checkNode(n2, treeData);
checkNode(n3, treeData);
checkNode(n4, treeData);
// do classify
const size_t treeCacheSize = 3;
{
RGBDImage image(640, 480);
image.calculateIntegral();
{
utils::Profile classifyImageTimer("classifyImage");
cuv::ndarray<float, cuv::dev_memory_space> output(
cuv::extents[NUM_LABELS][image.getHeight()][image.getWidth()]);
cudaSafeCall(cudaMemset(output.ptr(), 0, static_cast<size_t>(output.size() * sizeof(float))));
classifyImage(treeCacheSize, output, image, NUM_LABELS, treeData);
}
}
}
BOOST_AUTO_TEST_CASE(testRecallLargeForest) {
unsigned int samplesPerImage = 100;
const int NUM_FEAT = 1000;
const int NUM_THRESH = 50;
unsigned int minSampleCount = 32;
int maxDepth = 15;
uint16_t boxRadius = 127;
uint16_t regionSize = 50;
static const int NUM_THREADS = 1;
static const int maxImages = 5;
static const int imageCacheSize = 5;
unsigned int maxSamplesPerBatch = 100000;
AccelerationMode accelerationMode = GPU_AND_CPU_COMPARE;
TrainingConfiguration configuration(SEED, samplesPerImage, NUM_FEAT, minSampleCount, maxDepth, boxRadius,
regionSize, NUM_THRESH, NUM_THREADS, maxImages, imageCacheSize, maxSamplesPerBatch, accelerationMode);
std::vector<PixelInstance> samples;
const size_t NUM_LABELS = 3;
const int width = 640;
const int height = 480;
std::vector<RGBDImage> images(10, RGBDImage(width, height));
const int NUM_SAMPLES = samplesPerImage * images.size();
for (int i = 0; i < NUM_SAMPLES; i++) {
PixelInstance sample(
&images.at(i / (NUM_SAMPLES / images.size())), // image
(i / 100) % NUM_LABELS, // label
Depth((i % 20) / 10.0 + 0.1), // depth
i % width, // x
i % height // y
);
samples.push_back(sample);
}
// explicitly test a tree that exceeds the maximal number of nodes per layer
const size_t numNodes[] = { 10, 100, 3 * NODES_PER_TREE_LAYER + 2 };
Sampler sampler(4711, 0, 1000);
Sampler typeSampler(4711, 0, 1);
Sampler channelSampler(4711, 0, 5);
Sampler offsetSampler(4711, -120, 120);
Sampler regionSampler(4711, 0, 20);
for (size_t treeId = 0; treeId < 3; treeId++) {
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > rootNode =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(treeId, 0, getPointers(samples),
NUM_LABELS);
std::vector<boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > > nodes;
std::map<boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> >,
SplitFunction<PixelInstance, ImageFeatureFunction> > splits;
nodes.push_back(rootNode);
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > previousNode = rootNode;
/**
* creates a degenerated tree with N nodes
*
* n0
* / \
* / \
* n1 n2
* / \
* / \
* n3 n4
* /
* /
* n5
* / \
* / \
* n6 n7
* /
* /
* ...
*/
for (size_t nodeId = 1; nodeId < numNodes[treeId]; nodeId += 2) {
const size_t level = (nodeId + 1) / 2;
assert(level > 0 && level < numNodes[treeId]);
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > leftNode =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(nodeId + treeId, level,
getPointers(samples), NUM_LABELS, previousNode);
boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > rightNode =
boost::make_shared<RandomTree<PixelInstance, ImageFeatureFunction> >(nodeId + 1 + treeId, level,
getPointers(samples), NUM_LABELS, previousNode);
size_t featureId = sampler.getNext();
float threshold = sampler.getNext() / 200.0 - 100.0;
ScoreType score = sampler.getNext() / 1000.0;
assertProbability(score);
ImageFeatureFunction feature(static_cast<FeatureType>(typeSampler.getNext()),
Offset(offsetSampler.getNext(), offsetSampler.getNext()),
Region(regionSampler.getNext(), regionSampler.getNext()),
channelSampler.getNext(),
Offset(offsetSampler.getNext(), offsetSampler.getNext()),
Region(regionSampler.getNext(), regionSampler.getNext()),
channelSampler.getNext());
SplitFunction<PixelInstance, ImageFeatureFunction> split(featureId, feature, threshold, score);
previousNode->addChildren(split, leftNode, rightNode);
splits[previousNode] = split;
nodes.push_back(leftNode);
nodes.push_back(rightNode);
previousNode = leftNode;
}
BOOST_CHECK_EQUAL(treeId, rootNode->getTreeId());
BOOST_CHECK(rootNode->isRoot());
BOOST_CHECK_EQUAL(numNodes[treeId] + 1, rootNode->countNodes());
cuv::ndarray<WeightType, cuv::host_memory_space> classLabelPriorDistribution(NUM_LABELS);
for (size_t i = 0; i < NUM_LABELS; i++) {
classLabelPriorDistribution[i] = 100;
}
boost::shared_ptr<RandomTreeImage> randomTreeImage =
boost::make_shared<RandomTreeImage>(rootNode, configuration, classLabelPriorDistribution);
randomTreeImage->normalizeHistograms(0.0);
boost::shared_ptr<const TreeNodes> treeData = convertTree(randomTreeImage);
BOOST_CHECK_EQUAL(rootNode->countNodes(), static_cast<size_t>(treeData->numNodes()));
BOOST_CHECK_EQUAL(rootNode->getNumClasses(), static_cast<size_t>(treeData->numLabels()));
CURFIL_INFO("checking nodes");
assert(nodes.size() == numNodes[treeId] + 1);
for (size_t nodeId = 0; nodeId < numNodes[treeId]; nodeId++) {
const boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> > node = nodes[nodeId];
if (node->isLeaf()) {
checkNode(node, treeData);
} else {
checkNode(node, treeData, &splits[node]);
}
}
CURFIL_INFO("checked " << numNodes[treeId] << " nodes of tree " << treeId);
}
}
BOOST_AUTO_TEST_SUITE_END()
|
27a1495304a67157f3ffabc6abe4cb5afbf3d3f9.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <chrono>
#include <hip/hip_runtime.h>
using namespace std;
struct Particle {
float pos[3] = {0,0,0};
float vel[3] = {0,0,0};
};
//kernel code to run on the GPU
__global__ void gpu_push(Particle *particles, float dt, size_t N) {
int p = blockIdx.x*blockDim.x + threadIdx.x;
if (p<N) {
Particle &part = particles[p];
for (int i=0;i<3;i++)
part.pos[i] += part.vel[i]*dt;
}
}
//code to push a single particle
void push(Particle *part, double dt) {
for (int i=0;i<3;i++)
part->pos[i] += part->vel[i]*dt;
}
int main(int n_args, char *args[]) {
hipFree(0);
size_t num_particles = 1000000;
//Particle *particles = new Particle[num_particles];
Particle *particles;
hipHostMalloc(&particles,sizeof(Particle)*num_particles,hipHostMallocDefault); //allocate pinned memory
//set some initial values
for (size_t i=0;i<num_particles;i++)
particles[i].vel[0]=1/(double)num_particles;
const float dt = 0.1;
//*** CPU particle push ***
auto start_cpu = chrono::system_clock::now();
for (size_t i=0;i<num_particles;i++) push(&particles[i],dt);
auto end_cpu = chrono::system_clock::now();
//*** GPU particle push ***
auto start_gpu = chrono::system_clock::now();
Particle *devParticles;
hipMalloc((void**)&devParticles, sizeof(Particle)*num_particles);
hipMemcpy(devParticles,particles,sizeof(Particle)*num_particles,hipMemcpyHostToDevice);
const int threads_per_block = 1024;
int num_blocks = (num_particles-1)/threads_per_block + 1;
cout<<"Creating "<<num_blocks*threads_per_block<<" threads"<<endl;
hipLaunchKernelGGL(( gpu_push), dim3(num_blocks),dim3(threads_per_block), 0, 0, devParticles, dt, num_particles);
hipMemcpy(particles,devParticles,sizeof(Particle)*num_particles,hipMemcpyDeviceToHost);
auto end_gpu = chrono::system_clock::now();
//output timing info
std::chrono::duration<double,std::nano> elapsed_cpu = end_cpu - start_cpu;
std::chrono::duration<double,std::nano> elapsed_gpu = end_gpu - start_gpu;
cout<<"Time per particle on CPU: "<<elapsed_cpu.count()/num_particles<<" (ns)"<<endl;
cout<<"Time per particle on GPU: "<<elapsed_gpu.count()/num_particles<<" (ns)"<<endl;
// delete[] particles;
hipHostFree(particles);
return 0;
}
| 27a1495304a67157f3ffabc6abe4cb5afbf3d3f9.cu | #include <iostream>
#include <chrono>
#include <cuda_runtime.h>
using namespace std;
struct Particle {
float pos[3] = {0,0,0};
float vel[3] = {0,0,0};
};
//kernel code to run on the GPU
__global__ void gpu_push(Particle *particles, float dt, size_t N) {
int p = blockIdx.x*blockDim.x + threadIdx.x;
if (p<N) {
Particle &part = particles[p];
for (int i=0;i<3;i++)
part.pos[i] += part.vel[i]*dt;
}
}
//code to push a single particle
void push(Particle *part, double dt) {
for (int i=0;i<3;i++)
part->pos[i] += part->vel[i]*dt;
}
int main(int n_args, char *args[]) {
cudaFree(0);
size_t num_particles = 1000000;
//Particle *particles = new Particle[num_particles];
Particle *particles;
cudaHostAlloc(&particles,sizeof(Particle)*num_particles,cudaHostAllocDefault); //allocate pinned memory
//set some initial values
for (size_t i=0;i<num_particles;i++)
particles[i].vel[0]=1/(double)num_particles;
const float dt = 0.1;
//*** CPU particle push ***
auto start_cpu = chrono::system_clock::now();
for (size_t i=0;i<num_particles;i++) push(&particles[i],dt);
auto end_cpu = chrono::system_clock::now();
//*** GPU particle push ***
auto start_gpu = chrono::system_clock::now();
Particle *devParticles;
cudaMalloc((void**)&devParticles, sizeof(Particle)*num_particles);
cudaMemcpy(devParticles,particles,sizeof(Particle)*num_particles,cudaMemcpyHostToDevice);
const int threads_per_block = 1024;
int num_blocks = (num_particles-1)/threads_per_block + 1;
cout<<"Creating "<<num_blocks*threads_per_block<<" threads"<<endl;
gpu_push<<<num_blocks,threads_per_block>>>(devParticles, dt, num_particles);
cudaMemcpy(particles,devParticles,sizeof(Particle)*num_particles,cudaMemcpyDeviceToHost);
auto end_gpu = chrono::system_clock::now();
//output timing info
std::chrono::duration<double,std::nano> elapsed_cpu = end_cpu - start_cpu;
std::chrono::duration<double,std::nano> elapsed_gpu = end_gpu - start_gpu;
cout<<"Time per particle on CPU: "<<elapsed_cpu.count()/num_particles<<" (ns)"<<endl;
cout<<"Time per particle on GPU: "<<elapsed_gpu.count()/num_particles<<" (ns)"<<endl;
// delete[] particles;
cudaFreeHost(particles);
return 0;
}
|
dc051ec75401af0b2906429eb3a6799e0b5061aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
const int LEN = 4;
// get a line centered at *ctrp, along the direction of *vec
__device__ void line2D(double out[][2*LEN+1], double *ctrp, double *vec) {
double x = ctrp[1];
double y = ctrp[0];
double gx = vec[0];
double gy = vec[1];
for (int i = -LEN; i <= LEN; i++){
out[0][i+LEN] = x+i*gx;
out[1][i+LEN] = y+i*gy;
}
}
// single point calculation
__device__ double dire_weight(double *x, double *y) {
return y[0]*x[0]+y[1]*x[1];
}
__device__ double mag_weight(double x, double y) {
return 0.5*(1+y-x);
}
__device__ double thresholding(double x) {
// set 3 thresholds
double th1 = 0.1;
double th2 = 0.2;
double th3 = 0.3;
if (x <= th1) {
return th1;
}
else if (x <= th2) {
return th2;
}
else {
return th3;
}
}
__global__ void etfStraight(
double *xout, double *yout, double *outmag,
double *tx, double *ty,
double *im, double *gmag,
int height, int width) {
// calculate pixels' location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// Only execute valid pixels
if (x>=width || y>=height) {
return;
}
// get parallel line centered at current point
double ctrp[2] = {y,x};
// direction vector
double ctrV[2] = {tx[y*width+x],ty[y*width+x]};
double p_line[2][2*LEN+1] = {0.0};
line2D(p_line, ctrp, ctrV);
/*
int idx_line[2][2*LEN+1] = {0};
for (int i = 0; i < 2*LEN+1; i++){
idx_line[0][i] = (int)(p_line[0][i]+0.5);
idx_line[1][i] = (int)(p_line[1][i]+0.5);
}
*/
double sum_wd = 0.0;
double temp[2] = {0.0};
for (int i = 0; i < 2*LEN+1; i++){
if (p_line[0][i]>=0 && p_line[0][i]<width &&
p_line[1][i]>=0 && p_line[1][i]<height){
int posx = (int)(p_line[0][i]+0.5);
int posy = (int)(p_line[1][i]+0.5);
int ind = posy*width+posx;
//double ctrV[2] = {tx[y*width+x],ty[y*width+x]};
double winV[2] = {tx[ind],ty[ind]};
double wd = dire_weight(ctrV, winV);
sum_wd += wd;
double wm = mag_weight(gmag[y*width+x], gmag[ind]);
temp[0] += wd*wm*tx[ind];
temp[1] += wd*wm*ty[ind];
}
}
if (sum_wd/9<-0.1){
temp[0] = -temp[0];
temp[1] = -temp[1];
}
double temp_mag = sqrt(temp[0]*temp[0]+temp[1]*temp[1]);
if (temp_mag != 0){
outmag[y*width+x] = temp_mag;
xout[y*width+x] = temp[0]/temp_mag;
yout[y*width+x] = temp[1]/temp_mag;
}
__syncthreads();
/*
// cross check
vec[0] = -yout[y*width+x];
vec[1] = xout[y*width+x]; // new direction vector
line2D(p_line, ctrp, vec);
double lmax = -1.0;
double rmax = -1.0;
int lind = 0;
int rind = 0;
for (int i = 0; i < 2*LEN+1; i++){
if (p_line[0][i]>=0 && p_line[0][i]<width &&
p_line[1][i]>=0 && p_line[1][i]<height){
int posx = (int)(p_line[0][i]+0.5);
int posy = (int)(p_line[1][i]+0.5);
int ind = posy*width+posx;
double tmp = thresholding(im[ind]);
if (i < LEN) {
if (tmp>=lmax){
lmax = tmp;
lind = i;
}
}
else if (i > LEN) {
if (tmp>rmax){
rmax = tmp;
rind = i;
}
}
}
}
// check condition
double scale = 1.0;
double tmp = thresholding(im[y*width+x]);
/*if (tmp < lmax && tmp < rmax) {
scale = 0.0;
}
else if (tmp > lmax && tmp > rmax) {
scale = 1.0;
}
else if (tmp == lmax && tmp == rmax) {
if
else if (LEN-lind < rind-LEN) {
if (tmp == lmax) {
}
else if (tmp >= lmax && tmp < rmax) {
}*
if (tmp == 0.1) {
scale = 0.0;
}
else if (tmp == 0.3) {
scale = 1.0;
}
else {
if (tmp < lmax && tmp < rmax){
scale = 0.0;
}
else if (tmp > lmax && tmp > rmax){
scale = 1.0;
}
else if (tmp == lmax && tmp == rmax){
scale = 1.0;
}
else if (tmp >= lmax && tmp <= rmax){
if (LEN-lind < rind-LEN){
scale = 0.0;
}
else {
scale = 1.0;
}
}
else if (tmp <= lmax && tmp >= rmax){
if (LEN-lind <= rind-LEN){
scale = 1.0;
}
else{
scale = 0.0;
}
}
}
xout[y*width+x] = xout[y*width+x]*scale;
yout[y*width+x] = yout[y*width+x]*scale;
__syncthreads();
*/
}
| dc051ec75401af0b2906429eb3a6799e0b5061aa.cu | #include <math.h>
const int LEN = 4;
// get a line centered at *ctrp, along the direction of *vec
__device__ void line2D(double out[][2*LEN+1], double *ctrp, double *vec) {
double x = ctrp[1];
double y = ctrp[0];
double gx = vec[0];
double gy = vec[1];
for (int i = -LEN; i <= LEN; i++){
out[0][i+LEN] = x+i*gx;
out[1][i+LEN] = y+i*gy;
}
}
// single point calculation
__device__ double dire_weight(double *x, double *y) {
return y[0]*x[0]+y[1]*x[1];
}
__device__ double mag_weight(double x, double y) {
return 0.5*(1+y-x);
}
__device__ double thresholding(double x) {
// set 3 thresholds
double th1 = 0.1;
double th2 = 0.2;
double th3 = 0.3;
if (x <= th1) {
return th1;
}
else if (x <= th2) {
return th2;
}
else {
return th3;
}
}
__global__ void etfStraight(
double *xout, double *yout, double *outmag,
double *tx, double *ty,
double *im, double *gmag,
int height, int width) {
// calculate pixels' location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// Only execute valid pixels
if (x>=width || y>=height) {
return;
}
// get parallel line centered at current point
double ctrp[2] = {y,x};
// direction vector
double ctrV[2] = {tx[y*width+x],ty[y*width+x]};
double p_line[2][2*LEN+1] = {0.0};
line2D(p_line, ctrp, ctrV);
/*
int idx_line[2][2*LEN+1] = {0};
for (int i = 0; i < 2*LEN+1; i++){
idx_line[0][i] = (int)(p_line[0][i]+0.5);
idx_line[1][i] = (int)(p_line[1][i]+0.5);
}
*/
double sum_wd = 0.0;
double temp[2] = {0.0};
for (int i = 0; i < 2*LEN+1; i++){
if (p_line[0][i]>=0 && p_line[0][i]<width &&
p_line[1][i]>=0 && p_line[1][i]<height){
int posx = (int)(p_line[0][i]+0.5);
int posy = (int)(p_line[1][i]+0.5);
int ind = posy*width+posx;
//double ctrV[2] = {tx[y*width+x],ty[y*width+x]};
double winV[2] = {tx[ind],ty[ind]};
double wd = dire_weight(ctrV, winV);
sum_wd += wd;
double wm = mag_weight(gmag[y*width+x], gmag[ind]);
temp[0] += wd*wm*tx[ind];
temp[1] += wd*wm*ty[ind];
}
}
if (sum_wd/9<-0.1){
temp[0] = -temp[0];
temp[1] = -temp[1];
}
double temp_mag = sqrt(temp[0]*temp[0]+temp[1]*temp[1]);
if (temp_mag != 0){
outmag[y*width+x] = temp_mag;
xout[y*width+x] = temp[0]/temp_mag;
yout[y*width+x] = temp[1]/temp_mag;
}
__syncthreads();
/*
// cross check
vec[0] = -yout[y*width+x];
vec[1] = xout[y*width+x]; // new direction vector
line2D(p_line, ctrp, vec);
double lmax = -1.0;
double rmax = -1.0;
int lind = 0;
int rind = 0;
for (int i = 0; i < 2*LEN+1; i++){
if (p_line[0][i]>=0 && p_line[0][i]<width &&
p_line[1][i]>=0 && p_line[1][i]<height){
int posx = (int)(p_line[0][i]+0.5);
int posy = (int)(p_line[1][i]+0.5);
int ind = posy*width+posx;
double tmp = thresholding(im[ind]);
if (i < LEN) {
if (tmp>=lmax){
lmax = tmp;
lind = i;
}
}
else if (i > LEN) {
if (tmp>rmax){
rmax = tmp;
rind = i;
}
}
}
}
// check condition
double scale = 1.0;
double tmp = thresholding(im[y*width+x]);
/*if (tmp < lmax && tmp < rmax) {
scale = 0.0;
}
else if (tmp > lmax && tmp > rmax) {
scale = 1.0;
}
else if (tmp == lmax && tmp == rmax) {
if
else if (LEN-lind < rind-LEN) {
if (tmp == lmax) {
}
else if (tmp >= lmax && tmp < rmax) {
}*
if (tmp == 0.1) {
scale = 0.0;
}
else if (tmp == 0.3) {
scale = 1.0;
}
else {
if (tmp < lmax && tmp < rmax){
scale = 0.0;
}
else if (tmp > lmax && tmp > rmax){
scale = 1.0;
}
else if (tmp == lmax && tmp == rmax){
scale = 1.0;
}
else if (tmp >= lmax && tmp <= rmax){
if (LEN-lind < rind-LEN){
scale = 0.0;
}
else {
scale = 1.0;
}
}
else if (tmp <= lmax && tmp >= rmax){
if (LEN-lind <= rind-LEN){
scale = 1.0;
}
else{
scale = 0.0;
}
}
}
xout[y*width+x] = xout[y*width+x]*scale;
yout[y*width+x] = yout[y*width+x]*scale;
__syncthreads();
*/
}
|
6fa0cd1c9fd3bfaffa17871953a89c48a13c23a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>
//
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
#include <array/NDArrayFactory.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Segment ops linear kernels
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentMeanLinearKernel(void* input, Nd4jLong const* inputShape, int* starts, int* lengths, Nd4jLong numOfClasses, void* output, Nd4jLong const* outputShape) {
__shared__ T* val;
__shared__ Nd4jLong xLen, zLen, segment, zIndex;
__shared__ T* x;
__shared__ T* z;
__shared__ int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
segment = blockIdx.x / threadsPerSegment;
x = reinterpret_cast<T*>(input);
z = reinterpret_cast<T*>(output);
// extern __shared__ unsigned char shmem[];
// val = reinterpret_cast<T*>(shmem);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
//[zIndex] =
if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
start = starts[segment];
finish = start + lengths[segment];
//val[segment] = ;
z[zIndex] = T(x[shape::getIndexOffset(start, inputShape)] / lengths[segment]);
// val[segment] = z[zIndex];
}
}
__syncthreads();
for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
if (lengths[segment])
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], T(x[xIndex] / lengths[segment]));
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void unsortedSegmentMeanLinearKernel(void* input, Nd4jLong const* inputShape, void* indices, Nd4jLong const* indicesShape, int* starts, int* lengths, Nd4jLong numOfClasses, void* output, Nd4jLong const* outputShape) {
__shared__ T* val;
__shared__ Nd4jLong xLen, zLen, zIndex;
__shared__ T* x;
__shared__ T* z;
__shared__ I* y; //int threadsPerSegment, start, finish;
auto segment = blockIdx.x;// /
if (threadIdx.x == 0) {
// threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
// threadsPerSegment;
x = reinterpret_cast<T*>(input);
z = reinterpret_cast<T*>(output);
y = reinterpret_cast<I*>(indices);
// extern __shared__ unsigned char shmem[];
// val = reinterpret_cast<T*>(shmem);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
// if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
//start = starts[segment];
//finish = start + lengths[segment];
if (lengths[segment] > 0)
z[zIndex] = T(x[shape::getIndexOffset(starts[segment], inputShape)] / T(lengths[segment]));
else
z[zIndex] = 0; //DataTypeUtils::max<T>();
// val[segment] = z[zIndex];
// }
}
__syncthreads();
if (lengths[segment] > 0)
for (auto e = threadIdx.x; e < xLen; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
auto yIndex = shape::getIndexOffset(e, indicesShape);
if (y[yIndex] == segment && e != starts[segment]) {
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], T(x[xIndex]/T(lengths[segment])));
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// SegmentMean kernel
template <typename T, typename I>
static __global__ void segmentMeanTadKernel(void* inputBuf, Nd4jLong const* inputShape, Nd4jLong const* inputTads, Nd4jLong const* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf, Nd4jLong const* outputShape, Nd4jLong const* outputTads, Nd4jLong const* outputTadOffsets) {
__shared__ T* val;
__shared__ Nd4jLong len, zIndex, total;
__shared__ T* z;
__shared__ int threadsPerSegment, start, finish;
auto segment = indices[blockIdx.x]; // / threadsPerSegment;
if (threadIdx.x == 0) {
z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment];
len = shape::length(inputTads);
start = starts[segment];
finish = start + lengths[segment];
total = shape::sizeAt(inputShape, 0);
}
__syncthreads();
auto idx = blockIdx.x;
if (blockIdx.x <= total) {
auto x = reinterpret_cast<T *>(inputBuf) + inputTadOffsets[idx];
if (blockIdx.x == start) {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], T(x[xIndex]/lengths[segment]));
}
}
else {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
if (lengths[segment])
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], T(x[xIndex]/lengths[segment]));
}
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// segmen mean
template <typename T, typename I>
static void segmentMeanFunctor_(LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
auto stream = context->getCudaStream();
Nd4jLong numClasses = indices->e<Nd4jLong>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
NDArray::prepareSpecialUse({output}, {input, indices});
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
if (input->isVector()) {
hipLaunchKernelGGL(( segmentMeanLinearKernel<T,I>), dim3(numClasses), dim3(input->lengthOf()), numClasses * 32 + 32, *stream, input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
hipLaunchKernelGGL(( segmentMeanTadKernel<T,I>), dim3(input->sizeAt(0)), dim3(512), 2048, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
void segmentMeanFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), segmentMeanFunctor_, (context, input, indices, output), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentMeanFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
hipLaunchKernelGGL(( unsortedSegmentMeanLinearKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
output->assign(0);
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions);
Nd4jLong const* inputTads = packX.specialShapeInfo();
Nd4jLong const* inputTadOffsets = packX.specialOffsets();
Nd4jLong const* outputTads = packZ.specialShapeInfo();
Nd4jLong const* outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
hipLaunchKernelGGL(( segmentMeanTadKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentMeanFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentMeanFunctor_, (context, input, indices, numOfClasses, output),
NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentMeanBPLinearKernel(void* inputBuf, Nd4jLong const* inputShape, void* eps, Nd4jLong const* epsShape, void* indicesBuf, Nd4jLong const* indicesShape,
int* lengths, void* outputBuf, Nd4jLong const* outputShape) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape);
auto xOffset = shape::getIndexOffset(e, inputShape);
auto yOffset = shape::getIndexOffset(e, indicesShape);
auto classIndex = y[yOffset];
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape);
z[zOffset] = T(gradOut[gradOffsetO] / float(lengths[classIndex]));
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentMeanBPTadKernel(void* inputBuf, Nd4jLong const* inputShape, void* eps, Nd4jLong const* epsShape,
void* indicesBuf, Nd4jLong const* indicesShape, int* lengths, void* outputBuf, Nd4jLong const* outputShape,Nd4jLong const* inputTad,
Nd4jLong const* inputOffsets, Nd4jLong const* gradOutTad, Nd4jLong const* gradOutOffsets, Nd4jLong const* outTad, Nd4jLong const* outOffsets) {
__shared__ T* x;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
// auto yIndex = shape::getIndexOffset(i, indicesShape);
auto segment = y[i]; //yIndex];
T* currentOut = z + outOffsets[i];
T* outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
auto zIndex = shape::getIndexOffset(e, outTad);
auto gradIndex = shape::getIndexOffset(e, gradOutTad);
if (lengths[segment] > 0)
currentOut[zIndex] = T(outGrad[gradIndex] / float(lengths[segment]));
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// backrop for mean
template <typename T, typename I>
int segmentMeanFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
auto numClasses = indices->e<int>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
hipLaunchKernelGGL(( segmentMeanBPLinearKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions);
// auto packGradIn = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempRes.shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->shapeInfo(), dimensions);
Nd4jLong const* inputTads = packX.specialShapeInfo();
Nd4jLong const* inputTadOffsets = packX.specialOffsets();
Nd4jLong const* outputTads = packZ.specialShapeInfo();
Nd4jLong const* outputTadOffsets = packZ.specialOffsets();
Nd4jLong const* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong const* gradOutTadOffsets = packGradOut.specialOffsets();
hipLaunchKernelGGL(( segmentMeanBPTadKernel<T,I>), dim3(indices->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths,
output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
// segmen mean bp main
int segmentMeanFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentMeanFunctorBP_, (context, input,
indices, gradOut, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static int unsortedSegmentMeanFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
auto numClasses = indices->e<int>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
hipLaunchKernelGGL(( segmentMeanBPLinearKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions);
// auto packGradIn = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempRes.shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->shapeInfo(), dimensions);
Nd4jLong const* inputTads = packX.specialShapeInfo();
Nd4jLong const* inputTadOffsets = packX.specialOffsets();
Nd4jLong const* outputTads = packZ.specialShapeInfo();
Nd4jLong const* outputTadOffsets = packZ.specialOffsets();
Nd4jLong const* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong const* gradOutTadOffsets = packGradOut.specialOffsets();
hipLaunchKernelGGL(( segmentMeanBPTadKernel<T,I>), dim3(indices->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths,
output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int unsortedSegmentMeanFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentMeanFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
}
}
} | 6fa0cd1c9fd3bfaffa17871953a89c48a13c23a6.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>
//
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
#include <array/NDArrayFactory.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Segment ops linear kernels
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentMeanLinearKernel(void* input, Nd4jLong const* inputShape, int* starts, int* lengths, Nd4jLong numOfClasses, void* output, Nd4jLong const* outputShape) {
__shared__ T* val;
__shared__ Nd4jLong xLen, zLen, segment, zIndex;
__shared__ T* x;
__shared__ T* z;
__shared__ int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
segment = blockIdx.x / threadsPerSegment;
x = reinterpret_cast<T*>(input);
z = reinterpret_cast<T*>(output);
// extern __shared__ unsigned char shmem[];
// val = reinterpret_cast<T*>(shmem);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
//[zIndex] =
if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
start = starts[segment];
finish = start + lengths[segment];
//val[segment] = ;
z[zIndex] = T(x[shape::getIndexOffset(start, inputShape)] / lengths[segment]);
// val[segment] = z[zIndex];
}
}
__syncthreads();
for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
if (lengths[segment])
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], T(x[xIndex] / lengths[segment]));
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void unsortedSegmentMeanLinearKernel(void* input, Nd4jLong const* inputShape, void* indices, Nd4jLong const* indicesShape, int* starts, int* lengths, Nd4jLong numOfClasses, void* output, Nd4jLong const* outputShape) {
__shared__ T* val;
__shared__ Nd4jLong xLen, zLen, zIndex;
__shared__ T* x;
__shared__ T* z;
__shared__ I* y; //int threadsPerSegment, start, finish;
auto segment = blockIdx.x;// /
if (threadIdx.x == 0) {
// threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
// threadsPerSegment;
x = reinterpret_cast<T*>(input);
z = reinterpret_cast<T*>(output);
y = reinterpret_cast<I*>(indices);
// extern __shared__ unsigned char shmem[];
// val = reinterpret_cast<T*>(shmem);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
// if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
//start = starts[segment];
//finish = start + lengths[segment];
if (lengths[segment] > 0)
z[zIndex] = T(x[shape::getIndexOffset(starts[segment], inputShape)] / T(lengths[segment]));
else
z[zIndex] = 0; //DataTypeUtils::max<T>();
// val[segment] = z[zIndex];
// }
}
__syncthreads();
if (lengths[segment] > 0)
for (auto e = threadIdx.x; e < xLen; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
auto yIndex = shape::getIndexOffset(e, indicesShape);
if (y[yIndex] == segment && e != starts[segment]) {
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], T(x[xIndex]/T(lengths[segment])));
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// SegmentMean kernel
template <typename T, typename I>
static __global__ void segmentMeanTadKernel(void* inputBuf, Nd4jLong const* inputShape, Nd4jLong const* inputTads, Nd4jLong const* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf, Nd4jLong const* outputShape, Nd4jLong const* outputTads, Nd4jLong const* outputTadOffsets) {
__shared__ T* val;
__shared__ Nd4jLong len, zIndex, total;
__shared__ T* z;
__shared__ int threadsPerSegment, start, finish;
auto segment = indices[blockIdx.x]; // / threadsPerSegment;
if (threadIdx.x == 0) {
z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment];
len = shape::length(inputTads);
start = starts[segment];
finish = start + lengths[segment];
total = shape::sizeAt(inputShape, 0);
}
__syncthreads();
auto idx = blockIdx.x;
if (blockIdx.x <= total) {
auto x = reinterpret_cast<T *>(inputBuf) + inputTadOffsets[idx];
if (blockIdx.x == start) {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], T(x[xIndex]/lengths[segment]));
}
}
else {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
if (lengths[segment])
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], T(x[xIndex]/lengths[segment]));
}
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// segmen mean
template <typename T, typename I>
static void segmentMeanFunctor_(LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
auto stream = context->getCudaStream();
Nd4jLong numClasses = indices->e<Nd4jLong>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
NDArray::prepareSpecialUse({output}, {input, indices});
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
if (input->isVector()) {
segmentMeanLinearKernel<T,I><<<numClasses, input->lengthOf(), numClasses * 32 + 32, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
segmentMeanTadKernel<T,I><<<input->sizeAt(0), 512, 2048, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
void segmentMeanFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), segmentMeanFunctor_, (context, input, indices, output), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentMeanFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
unsortedSegmentMeanLinearKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
output->assign(0);
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions);
Nd4jLong const* inputTads = packX.specialShapeInfo();
Nd4jLong const* inputTadOffsets = packX.specialOffsets();
Nd4jLong const* outputTads = packZ.specialShapeInfo();
Nd4jLong const* outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
segmentMeanTadKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentMeanFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentMeanFunctor_, (context, input, indices, numOfClasses, output),
NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentMeanBPLinearKernel(void* inputBuf, Nd4jLong const* inputShape, void* eps, Nd4jLong const* epsShape, void* indicesBuf, Nd4jLong const* indicesShape,
int* lengths, void* outputBuf, Nd4jLong const* outputShape) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape);
auto xOffset = shape::getIndexOffset(e, inputShape);
auto yOffset = shape::getIndexOffset(e, indicesShape);
auto classIndex = y[yOffset];
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape);
z[zOffset] = T(gradOut[gradOffsetO] / float(lengths[classIndex]));
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentMeanBPTadKernel(void* inputBuf, Nd4jLong const* inputShape, void* eps, Nd4jLong const* epsShape,
void* indicesBuf, Nd4jLong const* indicesShape, int* lengths, void* outputBuf, Nd4jLong const* outputShape,Nd4jLong const* inputTad,
Nd4jLong const* inputOffsets, Nd4jLong const* gradOutTad, Nd4jLong const* gradOutOffsets, Nd4jLong const* outTad, Nd4jLong const* outOffsets) {
__shared__ T* x;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
// auto yIndex = shape::getIndexOffset(i, indicesShape);
auto segment = y[i]; //yIndex];
T* currentOut = z + outOffsets[i];
T* outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
auto zIndex = shape::getIndexOffset(e, outTad);
auto gradIndex = shape::getIndexOffset(e, gradOutTad);
if (lengths[segment] > 0)
currentOut[zIndex] = T(outGrad[gradIndex] / float(lengths[segment]));
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// backrop for mean
template <typename T, typename I>
int segmentMeanFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
auto numClasses = indices->e<int>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
segmentMeanBPLinearKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions);
// auto packGradIn = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempRes.shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->shapeInfo(), dimensions);
Nd4jLong const* inputTads = packX.specialShapeInfo();
Nd4jLong const* inputTadOffsets = packX.specialOffsets();
Nd4jLong const* outputTads = packZ.specialShapeInfo();
Nd4jLong const* outputTadOffsets = packZ.specialOffsets();
Nd4jLong const* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong const* gradOutTadOffsets = packGradOut.specialOffsets();
segmentMeanBPTadKernel<T,I><<<indices->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths,
output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
// segmen mean bp main
int segmentMeanFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentMeanFunctorBP_, (context, input,
indices, gradOut, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static int unsortedSegmentMeanFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
auto numClasses = indices->e<int>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
segmentMeanBPLinearKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimensions);
// auto packGradIn = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempRes.shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->shapeInfo(), dimensions);
Nd4jLong const* inputTads = packX.specialShapeInfo();
Nd4jLong const* inputTadOffsets = packX.specialOffsets();
Nd4jLong const* outputTads = packZ.specialShapeInfo();
Nd4jLong const* outputTadOffsets = packZ.specialOffsets();
Nd4jLong const* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong const* gradOutTadOffsets = packGradOut.specialOffsets();
segmentMeanBPTadKernel<T,I><<<indices->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths,
output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int unsortedSegmentMeanFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentMeanFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
}
}
} |
9f0cc479be62bc87d3ca3db3bf7522d13f5d5d94.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
timer().startGpuTimer();
// TODO
timer().endGpuTimer();
}
}
}
| 9f0cc479be62bc87d3ca3db3bf7522d13f5d5d94.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
timer().startGpuTimer();
// TODO
timer().endGpuTimer();
}
}
}
|
6de9e2b138fcaac137f353629310c8ec2536d5d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, kernels
#include "scan_naive_kernel.cu"
void runTest( int argc, char** argv);
extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len);
extern "C" void computeGold( float* reference, float* idata, const unsigned int len);
void checkCUDAError(const char *msg);
int checkResults(float *, float *, int, float);
int
main( int argc, char** argv)
{
runTest( argc, argv);
exit(0);
}
void
runTest( int argc, char** argv)
{
unsigned int num_elements = 512;
const unsigned int mem_size = sizeof( float) * num_elements;
const unsigned int shared_mem_size = sizeof(float) * num_elements;
// allocate host memory to store the input data
float* h_data = (float*) malloc(mem_size);
// initialize the input data on the host to be integer values
// between 0 and 10
for( unsigned int i = 0; i < num_elements; ++i){
h_data[i] = floorf(10*(rand()/(float)RAND_MAX));
}
// compute reference solution
float* reference = (float*) malloc( mem_size);
computeGold( reference, h_data, num_elements);
// allocate device memory input and output arrays
float* d_idata;
float* d_odata;
hipMalloc( (void**) &d_idata, mem_size);
hipMalloc( (void**) &d_odata, mem_size);
// copy host memory to device input array
hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice);
// setup execution parameters
// Note that these scans only support a single thread-block worth of data,
dim3 grid(1, 1, 1);
dim3 threads(512, 1, 1);
printf("Running parallel prefix sum (scan) of %d elements\n", num_elements);
hipLaunchKernelGGL(( scan_naive), dim3(grid), dim3(threads), 2 * shared_mem_size , 0, d_odata, d_idata, num_elements);
hipDeviceSynchronize();
// copy result from device to host
hipMemcpy( h_data, d_odata, sizeof(float) * num_elements, hipMemcpyDeviceToHost);
float epsilon = 0.0f;
unsigned int result_regtest = checkResults( reference, h_data, num_elements, epsilon);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
free( h_data);
free( reference);
hipFree(d_idata);
hipFree(d_odata);
}
void
checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
printf("CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int
checkResults(float *reference, float *gpu_result, int num_elements, float threshold)
{
int checkMark = 1;
for(int i = 0; i < num_elements; i++)
if((reference[i] - gpu_result[i]) > threshold){
checkMark = 0;
break;
}
return checkMark;
}
| 6de9e2b138fcaac137f353629310c8ec2536d5d5.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, kernels
#include "scan_naive_kernel.cu"
void runTest( int argc, char** argv);
extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len);
extern "C" void computeGold( float* reference, float* idata, const unsigned int len);
void checkCUDAError(const char *msg);
int checkResults(float *, float *, int, float);
int
main( int argc, char** argv)
{
runTest( argc, argv);
exit(0);
}
void
runTest( int argc, char** argv)
{
unsigned int num_elements = 512;
const unsigned int mem_size = sizeof( float) * num_elements;
const unsigned int shared_mem_size = sizeof(float) * num_elements;
// allocate host memory to store the input data
float* h_data = (float*) malloc(mem_size);
// initialize the input data on the host to be integer values
// between 0 and 10
for( unsigned int i = 0; i < num_elements; ++i){
h_data[i] = floorf(10*(rand()/(float)RAND_MAX));
}
// compute reference solution
float* reference = (float*) malloc( mem_size);
computeGold( reference, h_data, num_elements);
// allocate device memory input and output arrays
float* d_idata;
float* d_odata;
cudaMalloc( (void**) &d_idata, mem_size);
cudaMalloc( (void**) &d_odata, mem_size);
// copy host memory to device input array
cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice);
// setup execution parameters
// Note that these scans only support a single thread-block worth of data,
dim3 grid(1, 1, 1);
dim3 threads(512, 1, 1);
printf("Running parallel prefix sum (scan) of %d elements\n", num_elements);
scan_naive<<< grid, threads, 2 * shared_mem_size >>>(d_odata, d_idata, num_elements);
cudaThreadSynchronize();
// copy result from device to host
cudaMemcpy( h_data, d_odata, sizeof(float) * num_elements, cudaMemcpyDeviceToHost);
float epsilon = 0.0f;
unsigned int result_regtest = checkResults( reference, h_data, num_elements, epsilon);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
free( h_data);
free( reference);
cudaFree(d_idata);
cudaFree(d_odata);
}
void
checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
printf("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int
checkResults(float *reference, float *gpu_result, int num_elements, float threshold)
{
int checkMark = 1;
for(int i = 0; i < num_elements; i++)
if((reference[i] - gpu_result[i]) > threshold){
checkMark = 0;
break;
}
return checkMark;
}
|
35c3e7300eaa4df3190c3f063730e78e01019c20.hip | // !!! This is a file automatically generated by hipify!!!
/*
1-bit BMMA code.
Runs at 500TOPS for matrix size of 4096x4096x8192.
Borrows largely from CUDA-SDK.
By Boyuan
*/
#include <assert.h>
#include <hip/hip_runtime.h>
#include <mma.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#define CHUNK_K 4
#define SKEW 1
#define WARPS_PER_BLOCK 8
#define WARP_SIZE 32
#define THREADS_PER_BLOCK WARP_SIZE * WARPS_PER_BLOCK
#define CHUNK_LINE_BYTES CHUNK_K * sizeof(int4)
#define WARP_COPY_BYTES WARP_SIZE * sizeof(int4)
#define CHUNK_COPY_LINES_PER_WARP WARP_COPY_BYTES / CHUNK_LINE_BYTES
#define CHUNK_COPY_LINE_LANES WARP_SIZE / CHUNK_COPY_LINES_PER_WARP
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES WARP_ROW_TILES * BLOCK_ROW_WARPS
#define BLOCK_COL_TILES WARP_COL_TILES * BLOCK_COL_WARPS
#define M 8
#define N 8
#define K 128
#define checkKernelErrors(expr) \
do { \
expr; \
\
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
hipGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
using namespace nvcuda::wmma::experimental;
typedef union {
int4 vec;
int a[4];
} U4;
// Assume that Kernel size is 3x3.
// Assume CIN is 128.
__global__ void APConv_w1a2_pack_pool(const int4 *W, const int4 *X, int *Output, int Height, int Width, int CIN, int COUT) {
// GEMM Configuration
int X_bit_offset = (Height+2) * (Width+2) * CIN/128;
// int W_bit_offset = 9*CIN*COUT/128;
int BIT=2;
int X_ROW_BIT = (Width+2)*CIN/128;
int W_ROW_BIT = 9*(CIN/128);
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// // for(int i = 0; i<Height*Width*CIN/32*BIT; i++) {
// // printf("X[%d]: %x\n", i, *((int*)X+i));
// // }
// for(int i = 0; i<COUT*9*CIN/32; i++) {
// printf("W[%d]: %x\n", i, *((int*)W+i));
// }
// }
extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here.
wmma::fragment<wmma::accumulator, 8, 8, 128, int> c[WARP_COL_TILES]
[WARP_ROW_TILES];
wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_i = (block_pos/(COUT/64)) / (Width/8) * 4;
const unsigned int block_j = (block_pos/(COUT/64)) % (Width/8) * 8;
const unsigned int block_z = block_pos % (COUT/64) * 64;
if (block_i >= Height) {
break;
}
int image_starting_idx = block_i * (Width+2) * CIN/128 + block_j * CIN/128;
for(int i=0; i < WARP_COL_TILES; i++)
for(int j=0; j < WARP_ROW_TILES; j++)
wmma::fill_fragment(c[i][j], 0);
// On the K dimension, there are 9*CIN/128 element to solve.
// This for loop computes [0,1,2,...,int(9*CIN/128/CHUNK_K)*CHUNK_K-1]. Next for loop computes [int(9*CIN/128/CHUNK_K)*CHUNK_K, ..., 9*CIN/128-1]
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k+CHUNK_K < 9*CIN/128; tile_k += CHUNK_K) {
int SHMEM_i = threadIdx.x/4;
int bit_flag = SHMEM_i / (64/BIT); // bit_flag = 0/1, indicates
int SHMEM_offset = SHMEM_i % (64/BIT);
int row = SHMEM_offset / 8;
int col = SHMEM_offset % 8;
int t = threadIdx.x % 4;
int sub_row = (tile_k+t)/(3*CIN/128);
int sub_col = (tile_k+t)%(3*CIN/128);
int GL_idx = image_starting_idx + bit_flag*X_bit_offset + row*X_ROW_BIT + col*CIN/128 + sub_row*X_ROW_BIT + sub_col;
// if (block_pos == 0 && tile_k ==0 && SHMEM_i == 1) {
// printf("tile_k: %d, block_i: %d, block_j: %d, row: %d, col: %d, sub_row: %d, sub_col: %d, GL_idx: %d\n", tile_k, block_i, block_j, row, col, sub_row, sub_col, GL_idx);
// printf("X[17]: %x %x %x %x\n", *((int*)X+ 4*17), *((int*)X+ 4*17+1), *((int*)X+ 4*17+2), *((int*)X+ 4*17+3));
// }
shmem[SHMEM_i][t] = X[GL_idx];
SHMEM_i += 64;
int weight_load_idx = (block_z + threadIdx.x/4) * W_ROW_BIT + tile_k + t;
shmem[SHMEM_i][t] = W[weight_load_idx];
__syncthreads();
// if (block_pos == 1 && warpId == 0 && laneId == 0) {
// for(int i = 0; i < 128; i++) {
// for(int j = 0; j < 16; j++) {
// int *tile_ptr = (int*)&shmem[0][0] + i*20 + j;
// printf("tile_k: %d, i: %d, j: %d, val: %x\n", tile_k, i, j, *tile_ptr);
// }
// }
// }
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][k_step];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
// if (block_pos == 0 && warpId == 4 && laneId == 0) {
// printf("tile_k: %d, k_step: %d, shmem_idx_a: %d\n", tile_k, k_step, shmem_idx_a);
// for(int t = 0; t<a[i].num_elements; t++) {
// printf("tile_k: %d, k_step: %d, a[%d].x[%d]: %x\n", tile_k, k_step, i, t, a[i].x[t]);
// }
// }
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = 64 +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
// if (block_pos == 0 && warpId == 0 && laneId == 0 && tile_k == 0) {
// for(int t = 0; t<b[j].num_elements; t++) {
// printf("b[%d].x[%d]: %x\n", j, t, b[j].x[t]);
// }
// }
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
}
__syncthreads();
}
#pragma unroll
for (int tile_k = int(9*CIN/128/CHUNK_K)*CHUNK_K; tile_k < 9*CIN/128; tile_k++) {
int SHMEM_i = threadIdx.x/4;
int bit_flag = SHMEM_i / (64/BIT);
int SHMEM_offset = SHMEM_i % (64/BIT);
int row = SHMEM_offset / 8;
int col = SHMEM_offset % 8;
int t = threadIdx.x % 4;
int sub_row = (tile_k)/(3*CIN/128);
int sub_col = (tile_k)%(3*CIN/128);
int GL_idx = image_starting_idx + bit_flag*X_bit_offset + row*X_ROW_BIT + col*CIN/128 + sub_row*X_ROW_BIT + sub_col;
*((int*)&shmem[SHMEM_i][0] + t) = *((int*)&X[GL_idx] + t);
SHMEM_i += 64;
int weight_load_idx = (block_z + threadIdx.x/4) * W_ROW_BIT + tile_k;
*((int*)&shmem[SHMEM_i][0] + t) = *((int*)&W[weight_load_idx] + t);
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][0];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = 64 +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][0];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
__syncthreads();
}
// if (block_pos == 0 && warpId == 4 && laneId == 0) {
// for(int t = 0; t<c[0][0].num_elements; t++) {
// printf("c[0][0].x[%d]: %d\n", t, c[0][0].x[t]);
// }
// }
// This pointer is used to access the C and D matrix tiles this warp computes.
int *shmem_warp_tile_ptr = (int*)&shmem[0][0] +
(warpId / 2) * 64 * 8 * 2 +
(warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO.
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
int *tile_ptr = shmem_warp_tile_ptr + i*64*8 + j*8;
wmma::store_matrix_sync(tile_ptr, c[i][j], 64, wmma::mem_row_major);
}
}
__syncthreads();
// if (block_pos == 1 && warpId == 0 && laneId == 0) {
// for(int i = 0; i < 64; i++) {
// for(int j = 0; j < 64; j++) {
// int *tile_ptr = (int*)&shmem[0][0] + i*64 + j;
// printf("i: %d, j: %d, val: %d\n", i, j, *tile_ptr);
// }
// }
// }
int val[4];
int final_val;
int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+(warpId/4)*16*64 + (warpId%4)*2*64 + laneId;
int mask = 1;
int bit0, bit1;
unsigned r0, r1;
int tmp0, tmp1;
int Output_row = warpId/4;
int Output_col = warpId%4;
int* dst_gmem_warp_stream_ptr;
tmp0 = *shmem_warp_stream_ptr;
tmp1 = *(shmem_warp_stream_ptr+32*64);
val[0] = tmp0 + 2*tmp1;
tmp0 = *(shmem_warp_stream_ptr+64);
tmp1 = *(shmem_warp_stream_ptr+64+32*64);
val[1] = tmp0 + 2*tmp1;
tmp0 = *(shmem_warp_stream_ptr+8*64);
tmp1 = *(shmem_warp_stream_ptr+8*64+32*64);
val[2] = tmp0 + 2*tmp1;
tmp0 = *(shmem_warp_stream_ptr+9*64);
tmp1 = *(shmem_warp_stream_ptr+9*64+32*64);
val[3] = tmp0 + 2*tmp1;
final_val = (val[0]+val[1]+val[2]+val[3])/4;
bit0 = final_val & (mask << 0);
bit1 = (final_val & (mask << 1)) >> 1;
r0 = __ballot_sync(0xFFFFFFFF, bit0);
r1 = __ballot_sync(0xFFFFFFFF, bit1);
if (laneId == 0) {
// printf("r0: %x, r1: %x\n", r0, r1);
dst_gmem_warp_stream_ptr = Output + block_i/2 * Width/2 * COUT/32 + block_j/2*COUT/32 + block_z/32
+ Output_row*Width/2*COUT/32 + Output_col*COUT/32;
*dst_gmem_warp_stream_ptr = __brev(r0);
*(dst_gmem_warp_stream_ptr+Width/2*Height/2*COUT/32) = __brev(r1);
}
tmp0 = *(shmem_warp_stream_ptr+32);
tmp1 = *(shmem_warp_stream_ptr+32+32*64);
val[0] = tmp0 + 2*tmp1;
tmp0 = *(shmem_warp_stream_ptr+32+64);
tmp1 = *(shmem_warp_stream_ptr+32+64+32*64);
val[1] = tmp0 + 2*tmp1;
tmp0 = *(shmem_warp_stream_ptr+32+8*64);
tmp1 = *(shmem_warp_stream_ptr+32+8*64+32*64);
val[2] = tmp0 + 2*tmp1;
tmp0 = *(shmem_warp_stream_ptr+32+9*64);
tmp1 = *(shmem_warp_stream_ptr+32+9*64+32*64);
val[3] = tmp0 + 2*tmp1;
final_val = (val[0]+val[1]+val[2]+val[3])/4;
bit0 = final_val & (mask << 0);
bit1 = (final_val & (mask << 1)) >> 1;
r0 = __ballot_sync(0xFFFFFFFF, bit0);
r1 = __ballot_sync(0xFFFFFFFF, bit1);
if (laneId == 0) {
dst_gmem_warp_stream_ptr = Output + block_i/2 * Width/2 * COUT/32 + block_j/2*COUT/32 + block_z/32
+ Output_row*Width/2*COUT/32 + Output_col*COUT/32+1;
*dst_gmem_warp_stream_ptr = __brev(r0);
*(dst_gmem_warp_stream_ptr+Width/2*Height/2*COUT/32) = __brev(r1);
}
__syncthreads();
}
}
void init_matrices(int4 *X, int4 *W, int Height, int Width, int CIN, int COUT, int X_BIT, int W_BIT){
int *X_int = (int*) X;
int *W_int = (int*) W;
for(int b = 0; b<X_BIT; b++) {
for(int i=0; i < Height+2; i++) {
for(int j=0; j < Width+2; j++) {
for(int k = 0; k < CIN/32; k++) {
// X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = 0xFFFFFFFF;
// X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = i;
// X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = j;
X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = rand();
}
}
}
}
for(int b=0; b<W_BIT; b++) {
for(int i = 0; i < COUT; i++) {
for(int j = 0; j < 9*CIN/32; j++) {
// W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = 0xFFFFFFFF;
// W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = i;
W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = rand();
}
}
}
}
// int popcnt(int i) {
// // Java: use int, and use >>> instead of >>
// // C or C++: use int
// i = i - ((i >> 1) & 0x55555555);
// i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
// return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
// }
int int_pow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp % 2)
result *= base;
exp /= 2;
base *= base;
}
return result;
}
void compute_ref(int4 *W, int4 *X, int *ref_C, int Height, int Width, int CIN, int COUT, int W_BIT, int X_BIT) {
int *X_int = (int*) X;
int *W_int = (int*) W;
for (int co=0; co<COUT; co++) {
for (int m = 0; m < Height; m++) {
for (int n = 0; n < Width; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int i=0; i<3; i++) {
for(int j=0; j<3; j++) {
for(int k_tile=0; k_tile<CIN/32; k_tile++) {
int x_int = X_int[xb*(Height+2)*(Width+2)*CIN/32 + (m+i)*(Width+2)*CIN/32 + (n+j)*CIN/32 + k_tile];
int w_int = W_int[wb*COUT*9*CIN/32 + co*9*CIN/32 + i*3*CIN/32 + j*CIN/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
// if(m==0 && n==1 && co == 0) {
// printf("xb: %d, i: %d, j: %d, k_tile: %d, x_int: %x, w_int: %x, tmp: %d, idx: %d\n", xb, i, j, k_tile, x_int, w_int, tmp, xb*Height*Width*CIN/32 + (m+i)*Width*CIN/32 + (n+j)*CIN/32 + k_tile);
// }
}
}
}
}
}
ref_C[m*Width*COUT + n*COUT + co]= tmp;
}
}
}
}
void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int Height, int Width, int CIN, int COUT, int W_BIT, int X_BIT, int OUT_BIT) {
int *W_int = (int*) W;
int *X_int = (int*) X;
int C_ref_before_decompose[Height*Width*COUT];
for (int co=0; co<COUT; co++) {
for (int m = 0; m < Height; m++) {
for (int n = 0; n < Width; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int i=0; i<3; i++) {
for(int j=0; j<3; j++) {
for(int k_tile=0; k_tile<CIN/32; k_tile++) {
int x_int = X_int[xb*(Height+2)*(Width+2)*CIN/32 + (m+i)*(Width+2)*CIN/32 + (n+j)*CIN/32 + k_tile];
int w_int = W_int[wb*COUT*9*CIN/32 + co*9*CIN/32 + i*3*CIN/32 + j*CIN/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
// if(m==0 && n==1 && co == 0) {
// printf("xb: %d, i: %d, j: %d, k_tile: %d, x_int: %x, w_int: %x, tmp: %d, idx: %d\n", xb, i, j, k_tile, x_int, w_int, tmp, xb*Height*Width*CIN/32 + (m+i)*Width*CIN/32 + (n+j)*CIN/32 + k_tile);
// }
}
}
}
}
}
C_ref_before_decompose[m*Width*COUT + n*COUT + co]= tmp;
}
}
}
for(int m=0; m<Height; m++) {
for(int n=0; n<Width; n++) {
int val[OUT_BIT];
for(int b=0; b<OUT_BIT; b++) {
val[b] = 0;
}
for(int co_tile = 0; co_tile<COUT/32; co_tile++) {
for(int co=0; co<32; co++) {
int tmp = C_ref_before_decompose[m*Width*COUT + n*COUT + co_tile*32+co];
tmp = (tmp - 0); // Can be modified for other quantized parameters.
for(int b=0; b<OUT_BIT; b++) {
int mask = 1;
val[b] = val[b] << 1;
val[b] = val[b] | (((mask<<b) & tmp) >> b);
}
}
for(int b=0; b<OUT_BIT; b++) {
ref_C[b*Height*Width*COUT/32+m*Width*COUT/32+n*COUT/32 + co_tile] = val[b];
}
}
}
}
}
void compute_ref_pack_pool(int4 *W, int4 *X, int *ref_C, int Height, int Width, int CIN, int COUT, int W_BIT, int X_BIT, int OUT_BIT) {
int *W_int = (int*) W;
int *X_int = (int*) X;
int C_ref_before_decompose[Height*Width*COUT];
for (int co=0; co<COUT; co++) {
for (int m = 0; m < Height; m++) {
for (int n = 0; n < Width; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int i=0; i<3; i++) {
for(int j=0; j<3; j++) {
for(int k_tile=0; k_tile<CIN/32; k_tile++) {
int x_int = X_int[xb*(Height+2)*(Width+2)*CIN/32 + (m+i)*(Width+2)*CIN/32 + (n+j)*CIN/32 + k_tile];
int w_int = W_int[wb*COUT*9*CIN/32 + co*9*CIN/32 + i*3*CIN/32 + j*CIN/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
// if(m==0 && n==1 && co == 0) {
// printf("xb: %d, i: %d, j: %d, k_tile: %d, x_int: %x, w_int: %x, tmp: %d, idx: %d\n", xb, i, j, k_tile, x_int, w_int, tmp, xb*Height*Width*CIN/32 + (m+i)*Width*CIN/32 + (n+j)*CIN/32 + k_tile);
// }
}
}
}
}
}
C_ref_before_decompose[m*Width*COUT + n*COUT + co]= tmp;
}
}
}
int size_after_pool = (int)((float)Height /2 * (float)Width/2 * COUT);
// printf("Height: %d, Width: %d, COUT: %d, size_after_pool: %d\n", Height, Width, COUT, (int)size_after_pool);
int half_width = Width/2;
int half_height = Height/2;
int C_ref_after_pool[size_after_pool];
for(int m=0; m<half_height; m++) {
for(int n=0; n<half_width; n++) {
for(int co=0; co<COUT; co++) {
int val1 = C_ref_before_decompose[2*m*Width*COUT+2*n*COUT+co];
int val2 = C_ref_before_decompose[2*m*Width*COUT+(2*n+1)*COUT+co];
int val3 = C_ref_before_decompose[(2*m+1)*Width*COUT+2*n*COUT+co];
int val4 = C_ref_before_decompose[(2*m+1)*Width*COUT+(2*n+1)*COUT+co];
C_ref_after_pool[m*half_width*COUT+n*COUT+co] = (val1+val2+val3+val4)/4;
}
}
}
// for(int co=32; co<64; co++) {
// int val1 = C_ref_before_decompose[12*Width*COUT+ 8*COUT+co];
// int val2 = C_ref_before_decompose[12*Width*COUT+ 9*COUT+co];
// int val3 = C_ref_before_decompose[13*Width*COUT+ 8*COUT+co];
// int val4 = C_ref_before_decompose[13*Width*COUT+ 9*COUT+co];
// int val = C_ref_after_pool[6*half_width*COUT+4*COUT+co];
// printf("co: %d, val1: %d, val2: %d, val3: %d, val4: %d, val: %x\n", co, val1, val2, val3, val4, val);
// }
for(int m=0; m<half_height; m++) {
for(int n=0; n<half_width; n++) {
int val[OUT_BIT];
for(int b=0; b<OUT_BIT; b++) {
val[b] = 0;
}
for(int co_tile = 0; co_tile<COUT/32; co_tile++) {
for(int co=0; co<32; co++) {
int tmp = C_ref_after_pool[m*half_width*COUT + n*COUT + co_tile*32+co];
tmp = (tmp - 0); // Can be modified for other quantized parameters.
for(int b=0; b<OUT_BIT; b++) {
int mask = 1;
val[b] = val[b] << 1;
val[b] = val[b] | (((mask<<b) & tmp) >> b);
}
}
for(int b=0; b<OUT_BIT; b++) {
ref_C[b*half_height*half_width*COUT/32+m*half_width*COUT/32+n*COUT/32 + co_tile] = val[b];
}
}
}
}
}
// 452ccfff = 01000101001011001100111111111111
// fff334a2 = 11111111111100110011010010100010
void validate_results(int *C, int* ref_C, int Height, int Width, int COUT) {
printf("Checking computed result for correctness: \n");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int i = 0; i<Height; i++) {
for(int j = 0; j<Width; j++) {
for(int co=0; co<COUT; co++) {
int idx = i*Width*COUT+j*COUT+co;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("i: %d, j: %d, co: %d, C: %d, ref_C: %d\n", i, j, co, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
void validate_results_pack(int *C, int* ref_C, int Height, int Width, int COUT, int OUT_BIT) {
printf("Checking computed result for correctness: \n");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int i = 0; i<Height; i++) {
for(int j = 0; j<Width; j++) {
for(int co=0; co<COUT/32; co++) {
for(int b=0; b<OUT_BIT; b++) {
int idx = b*Height*Width*COUT/32 + i*Width*COUT/32+j*COUT/32+co;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("xb: %d, i: %d, j: %d, co: %d, C: %x, ref_C: %x\n", b, i, j, co, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
#define verify_output
int main(int argc, char **argv) {
printf("Initializing...\n");
int dev = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
int Height = 32;
int Width = 32;
int X_BIT = 2;
int W_BIT = 1;
// for(int CIN = 128; CIN <= 2048; CIN+=128) {
int CIN = 256;
int COUT = CIN;
int4 *X = NULL;
int4 *W = NULL;
int *Output = NULL;
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&Output), sizeof(int4) * Height * Width * COUT ));
#ifdef verify_output
printf("Preparing validation data for GPU...\n");
int4 *W_h = NULL;
int4 *X_h = NULL;
int *Output_h = NULL;
X_h = (int4 *)malloc(sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT);
W_h = (int4 *)malloc(sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT);
Output_h = (int *)malloc(sizeof(int) * (Height+2) * (Width+2) * COUT);
init_matrices(X_h, W_h, Height, Width, CIN, COUT, X_BIT, W_BIT);
checkCudaErrors(hipMemcpy(X, X_h, sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(W, W_h, sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT, hipMemcpyHostToDevice));
#endif
int SHMEM_SZ = 65536;
checkCudaErrors(hipFuncSetAttribute(
APConv_w1a2_pack_pool, hipFuncAttributeMaxDynamicSharedMemorySize,
SHMEM_SZ));
// Run ours NUM_PROFILES times and record time.
float bmma_ms_avg = 0.0f;
int NUM_PROFILES = 1;
for(int iter=0; iter<NUM_PROFILES; ++iter){
float bmma_ms = 0.0f;
hipEvent_t bmma_start;
hipEvent_t bmma_end;
hipEventCreate(&bmma_start);
hipEventCreate(&bmma_end);
hipEventRecord(bmma_start);
checkKernelErrors(
hipLaunchKernelGGL(( (APConv_w1a2_pack_pool), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK),
SHMEM_SZ, 0, W, X, Output, Height, Width, CIN, COUT)));
hipEventRecord(bmma_end);
hipEventSynchronize(bmma_end);
hipEventElapsedTime(&bmma_ms, bmma_start, bmma_end);
hipEventDestroy(bmma_start);
hipEventDestroy(bmma_end);
bmma_ms_avg += bmma_ms;
}
bmma_ms_avg = bmma_ms_avg/(double)NUM_PROFILES;
printf("H: %d, W: %d, CIN: %d, COUT: %d, W_BIT: %d, X_BIT: %d\n", Height, Width, CIN, COUT, W_BIT, X_BIT);
printf("Time: %f ms\n", bmma_ms_avg);
printf("TOPS: %.2f\n\n", (((double)9 * CIN * Height * Width * COUT * 2)/(bmma_ms_avg/1000.)) / 1e12);
#ifdef verify_output
printf("Validating results...\n");
checkCudaErrors(hipMemcpy(Output_h, Output, sizeof(int) * Height * Width * COUT, hipMemcpyDeviceToHost));
int *C_ref = (int *)malloc(sizeof(int) * Height * Width * COUT * X_BIT);
/* Copmpute reference matrix on CPU */
compute_ref_pack_pool(W_h, X_h, C_ref, Height, Width, CIN, COUT, W_BIT, X_BIT, X_BIT);
/* validation results */
validate_results_pack(Output_h, C_ref, Height/2, Width/2, COUT, X_BIT);
free(C_ref);
free(X_h);
free(W_h);
free(Output_h);
#endif
checkCudaErrors(hipFree(reinterpret_cast<void *>(W)));
checkCudaErrors(hipFree(reinterpret_cast<void *>(X)));
checkCudaErrors(hipFree(reinterpret_cast<void *>(Output)));
// }
return EXIT_SUCCESS;
}
// 758190d5 = 1110101100000011001000011010101
// bbac6 = 0000000000010111011101011000110 | 35c3e7300eaa4df3190c3f063730e78e01019c20.cu | /*
1-bit BMMA code.
Runs at 500TOPS for matrix size of 4096x4096x8192.
Borrows largely from CUDA-SDK.
By Boyuan
*/
#include <assert.h>
#include <cuda.h>
#include <mma.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#define CHUNK_K 4
#define SKEW 1
#define WARPS_PER_BLOCK 8
#define WARP_SIZE 32
#define THREADS_PER_BLOCK WARP_SIZE * WARPS_PER_BLOCK
#define CHUNK_LINE_BYTES CHUNK_K * sizeof(int4)
#define WARP_COPY_BYTES WARP_SIZE * sizeof(int4)
#define CHUNK_COPY_LINES_PER_WARP WARP_COPY_BYTES / CHUNK_LINE_BYTES
#define CHUNK_COPY_LINE_LANES WARP_SIZE / CHUNK_COPY_LINES_PER_WARP
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES WARP_ROW_TILES * BLOCK_ROW_WARPS
#define BLOCK_COL_TILES WARP_COL_TILES * BLOCK_COL_WARPS
#define M 8
#define N 8
#define K 128
#define checkKernelErrors(expr) \
do { \
expr; \
\
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
cudaGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
using namespace nvcuda::wmma::experimental;
typedef union {
int4 vec;
int a[4];
} U4;
// Assume that Kernel size is 3x3.
// Assume CIN is 128.
__global__ void APConv_w1a2_pack_pool(const int4 *W, const int4 *X, int *Output, int Height, int Width, int CIN, int COUT) {
// GEMM Configuration
int X_bit_offset = (Height+2) * (Width+2) * CIN/128;
// int W_bit_offset = 9*CIN*COUT/128;
int BIT=2;
int X_ROW_BIT = (Width+2)*CIN/128;
int W_ROW_BIT = 9*(CIN/128);
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// // for(int i = 0; i<Height*Width*CIN/32*BIT; i++) {
// // printf("X[%d]: %x\n", i, *((int*)X+i));
// // }
// for(int i = 0; i<COUT*9*CIN/32; i++) {
// printf("W[%d]: %x\n", i, *((int*)W+i));
// }
// }
extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here.
wmma::fragment<wmma::accumulator, 8, 8, 128, int> c[WARP_COL_TILES]
[WARP_ROW_TILES];
wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_i = (block_pos/(COUT/64)) / (Width/8) * 4;
const unsigned int block_j = (block_pos/(COUT/64)) % (Width/8) * 8;
const unsigned int block_z = block_pos % (COUT/64) * 64;
if (block_i >= Height) {
break;
}
int image_starting_idx = block_i * (Width+2) * CIN/128 + block_j * CIN/128;
for(int i=0; i < WARP_COL_TILES; i++)
for(int j=0; j < WARP_ROW_TILES; j++)
wmma::fill_fragment(c[i][j], 0);
// On the K dimension, there are 9*CIN/128 element to solve.
// This for loop computes [0,1,2,...,int(9*CIN/128/CHUNK_K)*CHUNK_K-1]. Next for loop computes [int(9*CIN/128/CHUNK_K)*CHUNK_K, ..., 9*CIN/128-1]
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k+CHUNK_K < 9*CIN/128; tile_k += CHUNK_K) {
int SHMEM_i = threadIdx.x/4;
int bit_flag = SHMEM_i / (64/BIT); // bit_flag = 0/1, indicates
int SHMEM_offset = SHMEM_i % (64/BIT);
int row = SHMEM_offset / 8;
int col = SHMEM_offset % 8;
int t = threadIdx.x % 4;
int sub_row = (tile_k+t)/(3*CIN/128);
int sub_col = (tile_k+t)%(3*CIN/128);
int GL_idx = image_starting_idx + bit_flag*X_bit_offset + row*X_ROW_BIT + col*CIN/128 + sub_row*X_ROW_BIT + sub_col;
// if (block_pos == 0 && tile_k ==0 && SHMEM_i == 1) {
// printf("tile_k: %d, block_i: %d, block_j: %d, row: %d, col: %d, sub_row: %d, sub_col: %d, GL_idx: %d\n", tile_k, block_i, block_j, row, col, sub_row, sub_col, GL_idx);
// printf("X[17]: %x %x %x %x\n", *((int*)X+ 4*17), *((int*)X+ 4*17+1), *((int*)X+ 4*17+2), *((int*)X+ 4*17+3));
// }
shmem[SHMEM_i][t] = X[GL_idx];
SHMEM_i += 64;
int weight_load_idx = (block_z + threadIdx.x/4) * W_ROW_BIT + tile_k + t;
shmem[SHMEM_i][t] = W[weight_load_idx];
__syncthreads();
// if (block_pos == 1 && warpId == 0 && laneId == 0) {
// for(int i = 0; i < 128; i++) {
// for(int j = 0; j < 16; j++) {
// int *tile_ptr = (int*)&shmem[0][0] + i*20 + j;
// printf("tile_k: %d, i: %d, j: %d, val: %x\n", tile_k, i, j, *tile_ptr);
// }
// }
// }
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][k_step];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
// if (block_pos == 0 && warpId == 4 && laneId == 0) {
// printf("tile_k: %d, k_step: %d, shmem_idx_a: %d\n", tile_k, k_step, shmem_idx_a);
// for(int t = 0; t<a[i].num_elements; t++) {
// printf("tile_k: %d, k_step: %d, a[%d].x[%d]: %x\n", tile_k, k_step, i, t, a[i].x[t]);
// }
// }
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = 64 +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
// if (block_pos == 0 && warpId == 0 && laneId == 0 && tile_k == 0) {
// for(int t = 0; t<b[j].num_elements; t++) {
// printf("b[%d].x[%d]: %x\n", j, t, b[j].x[t]);
// }
// }
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
}
__syncthreads();
}
#pragma unroll
for (int tile_k = int(9*CIN/128/CHUNK_K)*CHUNK_K; tile_k < 9*CIN/128; tile_k++) {
int SHMEM_i = threadIdx.x/4;
int bit_flag = SHMEM_i / (64/BIT);
int SHMEM_offset = SHMEM_i % (64/BIT);
int row = SHMEM_offset / 8;
int col = SHMEM_offset % 8;
int t = threadIdx.x % 4;
int sub_row = (tile_k)/(3*CIN/128);
int sub_col = (tile_k)%(3*CIN/128);
int GL_idx = image_starting_idx + bit_flag*X_bit_offset + row*X_ROW_BIT + col*CIN/128 + sub_row*X_ROW_BIT + sub_col;
*((int*)&shmem[SHMEM_i][0] + t) = *((int*)&X[GL_idx] + t);
SHMEM_i += 64;
int weight_load_idx = (block_z + threadIdx.x/4) * W_ROW_BIT + tile_k;
*((int*)&shmem[SHMEM_i][0] + t) = *((int*)&W[weight_load_idx] + t);
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][0];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = 64 +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][0];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
__syncthreads();
}
// if (block_pos == 0 && warpId == 4 && laneId == 0) {
// for(int t = 0; t<c[0][0].num_elements; t++) {
// printf("c[0][0].x[%d]: %d\n", t, c[0][0].x[t]);
// }
// }
// This pointer is used to access the C and D matrix tiles this warp computes.
int *shmem_warp_tile_ptr = (int*)&shmem[0][0] +
(warpId / 2) * 64 * 8 * 2 +
(warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO.
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
int *tile_ptr = shmem_warp_tile_ptr + i*64*8 + j*8;
wmma::store_matrix_sync(tile_ptr, c[i][j], 64, wmma::mem_row_major);
}
}
__syncthreads();
// if (block_pos == 1 && warpId == 0 && laneId == 0) {
// for(int i = 0; i < 64; i++) {
// for(int j = 0; j < 64; j++) {
// int *tile_ptr = (int*)&shmem[0][0] + i*64 + j;
// printf("i: %d, j: %d, val: %d\n", i, j, *tile_ptr);
// }
// }
// }
int val[4];
int final_val;
int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+(warpId/4)*16*64 + (warpId%4)*2*64 + laneId;
int mask = 1;
int bit0, bit1;
unsigned r0, r1;
int tmp0, tmp1;
int Output_row = warpId/4;
int Output_col = warpId%4;
int* dst_gmem_warp_stream_ptr;
tmp0 = *shmem_warp_stream_ptr;
tmp1 = *(shmem_warp_stream_ptr+32*64);
val[0] = tmp0 + 2*tmp1;
tmp0 = *(shmem_warp_stream_ptr+64);
tmp1 = *(shmem_warp_stream_ptr+64+32*64);
val[1] = tmp0 + 2*tmp1;
tmp0 = *(shmem_warp_stream_ptr+8*64);
tmp1 = *(shmem_warp_stream_ptr+8*64+32*64);
val[2] = tmp0 + 2*tmp1;
tmp0 = *(shmem_warp_stream_ptr+9*64);
tmp1 = *(shmem_warp_stream_ptr+9*64+32*64);
val[3] = tmp0 + 2*tmp1;
final_val = (val[0]+val[1]+val[2]+val[3])/4;
bit0 = final_val & (mask << 0);
bit1 = (final_val & (mask << 1)) >> 1;
r0 = __ballot_sync(0xFFFFFFFF, bit0);
r1 = __ballot_sync(0xFFFFFFFF, bit1);
if (laneId == 0) {
// printf("r0: %x, r1: %x\n", r0, r1);
dst_gmem_warp_stream_ptr = Output + block_i/2 * Width/2 * COUT/32 + block_j/2*COUT/32 + block_z/32
+ Output_row*Width/2*COUT/32 + Output_col*COUT/32;
*dst_gmem_warp_stream_ptr = __brev(r0);
*(dst_gmem_warp_stream_ptr+Width/2*Height/2*COUT/32) = __brev(r1);
}
tmp0 = *(shmem_warp_stream_ptr+32);
tmp1 = *(shmem_warp_stream_ptr+32+32*64);
val[0] = tmp0 + 2*tmp1;
tmp0 = *(shmem_warp_stream_ptr+32+64);
tmp1 = *(shmem_warp_stream_ptr+32+64+32*64);
val[1] = tmp0 + 2*tmp1;
tmp0 = *(shmem_warp_stream_ptr+32+8*64);
tmp1 = *(shmem_warp_stream_ptr+32+8*64+32*64);
val[2] = tmp0 + 2*tmp1;
tmp0 = *(shmem_warp_stream_ptr+32+9*64);
tmp1 = *(shmem_warp_stream_ptr+32+9*64+32*64);
val[3] = tmp0 + 2*tmp1;
final_val = (val[0]+val[1]+val[2]+val[3])/4;
bit0 = final_val & (mask << 0);
bit1 = (final_val & (mask << 1)) >> 1;
r0 = __ballot_sync(0xFFFFFFFF, bit0);
r1 = __ballot_sync(0xFFFFFFFF, bit1);
if (laneId == 0) {
dst_gmem_warp_stream_ptr = Output + block_i/2 * Width/2 * COUT/32 + block_j/2*COUT/32 + block_z/32
+ Output_row*Width/2*COUT/32 + Output_col*COUT/32+1;
*dst_gmem_warp_stream_ptr = __brev(r0);
*(dst_gmem_warp_stream_ptr+Width/2*Height/2*COUT/32) = __brev(r1);
}
__syncthreads();
}
}
void init_matrices(int4 *X, int4 *W, int Height, int Width, int CIN, int COUT, int X_BIT, int W_BIT){
int *X_int = (int*) X;
int *W_int = (int*) W;
for(int b = 0; b<X_BIT; b++) {
for(int i=0; i < Height+2; i++) {
for(int j=0; j < Width+2; j++) {
for(int k = 0; k < CIN/32; k++) {
// X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = 0xFFFFFFFF;
// X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = i;
// X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = j;
X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = rand();
}
}
}
}
for(int b=0; b<W_BIT; b++) {
for(int i = 0; i < COUT; i++) {
for(int j = 0; j < 9*CIN/32; j++) {
// W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = 0xFFFFFFFF;
// W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = i;
W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = rand();
}
}
}
}
// int popcnt(int i) {
// // Java: use int, and use >>> instead of >>
// // C or C++: use int
// i = i - ((i >> 1) & 0x55555555);
// i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
// return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
// }
int int_pow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp % 2)
result *= base;
exp /= 2;
base *= base;
}
return result;
}
void compute_ref(int4 *W, int4 *X, int *ref_C, int Height, int Width, int CIN, int COUT, int W_BIT, int X_BIT) {
int *X_int = (int*) X;
int *W_int = (int*) W;
for (int co=0; co<COUT; co++) {
for (int m = 0; m < Height; m++) {
for (int n = 0; n < Width; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int i=0; i<3; i++) {
for(int j=0; j<3; j++) {
for(int k_tile=0; k_tile<CIN/32; k_tile++) {
int x_int = X_int[xb*(Height+2)*(Width+2)*CIN/32 + (m+i)*(Width+2)*CIN/32 + (n+j)*CIN/32 + k_tile];
int w_int = W_int[wb*COUT*9*CIN/32 + co*9*CIN/32 + i*3*CIN/32 + j*CIN/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
// if(m==0 && n==1 && co == 0) {
// printf("xb: %d, i: %d, j: %d, k_tile: %d, x_int: %x, w_int: %x, tmp: %d, idx: %d\n", xb, i, j, k_tile, x_int, w_int, tmp, xb*Height*Width*CIN/32 + (m+i)*Width*CIN/32 + (n+j)*CIN/32 + k_tile);
// }
}
}
}
}
}
ref_C[m*Width*COUT + n*COUT + co]= tmp;
}
}
}
}
void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int Height, int Width, int CIN, int COUT, int W_BIT, int X_BIT, int OUT_BIT) {
int *W_int = (int*) W;
int *X_int = (int*) X;
int C_ref_before_decompose[Height*Width*COUT];
for (int co=0; co<COUT; co++) {
for (int m = 0; m < Height; m++) {
for (int n = 0; n < Width; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int i=0; i<3; i++) {
for(int j=0; j<3; j++) {
for(int k_tile=0; k_tile<CIN/32; k_tile++) {
int x_int = X_int[xb*(Height+2)*(Width+2)*CIN/32 + (m+i)*(Width+2)*CIN/32 + (n+j)*CIN/32 + k_tile];
int w_int = W_int[wb*COUT*9*CIN/32 + co*9*CIN/32 + i*3*CIN/32 + j*CIN/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
// if(m==0 && n==1 && co == 0) {
// printf("xb: %d, i: %d, j: %d, k_tile: %d, x_int: %x, w_int: %x, tmp: %d, idx: %d\n", xb, i, j, k_tile, x_int, w_int, tmp, xb*Height*Width*CIN/32 + (m+i)*Width*CIN/32 + (n+j)*CIN/32 + k_tile);
// }
}
}
}
}
}
C_ref_before_decompose[m*Width*COUT + n*COUT + co]= tmp;
}
}
}
for(int m=0; m<Height; m++) {
for(int n=0; n<Width; n++) {
int val[OUT_BIT];
for(int b=0; b<OUT_BIT; b++) {
val[b] = 0;
}
for(int co_tile = 0; co_tile<COUT/32; co_tile++) {
for(int co=0; co<32; co++) {
int tmp = C_ref_before_decompose[m*Width*COUT + n*COUT + co_tile*32+co];
tmp = (tmp - 0); // Can be modified for other quantized parameters.
for(int b=0; b<OUT_BIT; b++) {
int mask = 1;
val[b] = val[b] << 1;
val[b] = val[b] | (((mask<<b) & tmp) >> b);
}
}
for(int b=0; b<OUT_BIT; b++) {
ref_C[b*Height*Width*COUT/32+m*Width*COUT/32+n*COUT/32 + co_tile] = val[b];
}
}
}
}
}
void compute_ref_pack_pool(int4 *W, int4 *X, int *ref_C, int Height, int Width, int CIN, int COUT, int W_BIT, int X_BIT, int OUT_BIT) {
int *W_int = (int*) W;
int *X_int = (int*) X;
int C_ref_before_decompose[Height*Width*COUT];
for (int co=0; co<COUT; co++) {
for (int m = 0; m < Height; m++) {
for (int n = 0; n < Width; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int i=0; i<3; i++) {
for(int j=0; j<3; j++) {
for(int k_tile=0; k_tile<CIN/32; k_tile++) {
int x_int = X_int[xb*(Height+2)*(Width+2)*CIN/32 + (m+i)*(Width+2)*CIN/32 + (n+j)*CIN/32 + k_tile];
int w_int = W_int[wb*COUT*9*CIN/32 + co*9*CIN/32 + i*3*CIN/32 + j*CIN/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
// if(m==0 && n==1 && co == 0) {
// printf("xb: %d, i: %d, j: %d, k_tile: %d, x_int: %x, w_int: %x, tmp: %d, idx: %d\n", xb, i, j, k_tile, x_int, w_int, tmp, xb*Height*Width*CIN/32 + (m+i)*Width*CIN/32 + (n+j)*CIN/32 + k_tile);
// }
}
}
}
}
}
C_ref_before_decompose[m*Width*COUT + n*COUT + co]= tmp;
}
}
}
int size_after_pool = (int)((float)Height /2 * (float)Width/2 * COUT);
// printf("Height: %d, Width: %d, COUT: %d, size_after_pool: %d\n", Height, Width, COUT, (int)size_after_pool);
int half_width = Width/2;
int half_height = Height/2;
int C_ref_after_pool[size_after_pool];
for(int m=0; m<half_height; m++) {
for(int n=0; n<half_width; n++) {
for(int co=0; co<COUT; co++) {
int val1 = C_ref_before_decompose[2*m*Width*COUT+2*n*COUT+co];
int val2 = C_ref_before_decompose[2*m*Width*COUT+(2*n+1)*COUT+co];
int val3 = C_ref_before_decompose[(2*m+1)*Width*COUT+2*n*COUT+co];
int val4 = C_ref_before_decompose[(2*m+1)*Width*COUT+(2*n+1)*COUT+co];
C_ref_after_pool[m*half_width*COUT+n*COUT+co] = (val1+val2+val3+val4)/4;
}
}
}
// for(int co=32; co<64; co++) {
// int val1 = C_ref_before_decompose[12*Width*COUT+ 8*COUT+co];
// int val2 = C_ref_before_decompose[12*Width*COUT+ 9*COUT+co];
// int val3 = C_ref_before_decompose[13*Width*COUT+ 8*COUT+co];
// int val4 = C_ref_before_decompose[13*Width*COUT+ 9*COUT+co];
// int val = C_ref_after_pool[6*half_width*COUT+4*COUT+co];
// printf("co: %d, val1: %d, val2: %d, val3: %d, val4: %d, val: %x\n", co, val1, val2, val3, val4, val);
// }
for(int m=0; m<half_height; m++) {
for(int n=0; n<half_width; n++) {
int val[OUT_BIT];
for(int b=0; b<OUT_BIT; b++) {
val[b] = 0;
}
for(int co_tile = 0; co_tile<COUT/32; co_tile++) {
for(int co=0; co<32; co++) {
int tmp = C_ref_after_pool[m*half_width*COUT + n*COUT + co_tile*32+co];
tmp = (tmp - 0); // Can be modified for other quantized parameters.
for(int b=0; b<OUT_BIT; b++) {
int mask = 1;
val[b] = val[b] << 1;
val[b] = val[b] | (((mask<<b) & tmp) >> b);
}
}
for(int b=0; b<OUT_BIT; b++) {
ref_C[b*half_height*half_width*COUT/32+m*half_width*COUT/32+n*COUT/32 + co_tile] = val[b];
}
}
}
}
}
// 452ccfff = 01000101001011001100111111111111
// fff334a2 = 11111111111100110011010010100010
void validate_results(int *C, int* ref_C, int Height, int Width, int COUT) {
printf("Checking computed result for correctness: \n");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int i = 0; i<Height; i++) {
for(int j = 0; j<Width; j++) {
for(int co=0; co<COUT; co++) {
int idx = i*Width*COUT+j*COUT+co;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("i: %d, j: %d, co: %d, C: %d, ref_C: %d\n", i, j, co, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
void validate_results_pack(int *C, int* ref_C, int Height, int Width, int COUT, int OUT_BIT) {
printf("Checking computed result for correctness: \n");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int i = 0; i<Height; i++) {
for(int j = 0; j<Width; j++) {
for(int co=0; co<COUT/32; co++) {
for(int b=0; b<OUT_BIT; b++) {
int idx = b*Height*Width*COUT/32 + i*Width*COUT/32+j*COUT/32+co;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("xb: %d, i: %d, j: %d, co: %d, C: %x, ref_C: %x\n", b, i, j, co, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
#define verify_output
int main(int argc, char **argv) {
printf("Initializing...\n");
int dev = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
int Height = 32;
int Width = 32;
int X_BIT = 2;
int W_BIT = 1;
// for(int CIN = 128; CIN <= 2048; CIN+=128) {
int CIN = 256;
int COUT = CIN;
int4 *X = NULL;
int4 *W = NULL;
int *Output = NULL;
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&Output), sizeof(int4) * Height * Width * COUT ));
#ifdef verify_output
printf("Preparing validation data for GPU...\n");
int4 *W_h = NULL;
int4 *X_h = NULL;
int *Output_h = NULL;
X_h = (int4 *)malloc(sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT);
W_h = (int4 *)malloc(sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT);
Output_h = (int *)malloc(sizeof(int) * (Height+2) * (Width+2) * COUT);
init_matrices(X_h, W_h, Height, Width, CIN, COUT, X_BIT, W_BIT);
checkCudaErrors(cudaMemcpy(X, X_h, sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(W, W_h, sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT, cudaMemcpyHostToDevice));
#endif
int SHMEM_SZ = 65536;
checkCudaErrors(cudaFuncSetAttribute(
APConv_w1a2_pack_pool, cudaFuncAttributeMaxDynamicSharedMemorySize,
SHMEM_SZ));
// Run ours NUM_PROFILES times and record time.
float bmma_ms_avg = 0.0f;
int NUM_PROFILES = 1;
for(int iter=0; iter<NUM_PROFILES; ++iter){
float bmma_ms = 0.0f;
cudaEvent_t bmma_start;
cudaEvent_t bmma_end;
cudaEventCreate(&bmma_start);
cudaEventCreate(&bmma_end);
cudaEventRecord(bmma_start);
checkKernelErrors(
(APConv_w1a2_pack_pool<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK,
SHMEM_SZ>>>(W, X, Output, Height, Width, CIN, COUT)));
cudaEventRecord(bmma_end);
cudaEventSynchronize(bmma_end);
cudaEventElapsedTime(&bmma_ms, bmma_start, bmma_end);
cudaEventDestroy(bmma_start);
cudaEventDestroy(bmma_end);
bmma_ms_avg += bmma_ms;
}
bmma_ms_avg = bmma_ms_avg/(double)NUM_PROFILES;
printf("H: %d, W: %d, CIN: %d, COUT: %d, W_BIT: %d, X_BIT: %d\n", Height, Width, CIN, COUT, W_BIT, X_BIT);
printf("Time: %f ms\n", bmma_ms_avg);
printf("TOPS: %.2f\n\n", (((double)9 * CIN * Height * Width * COUT * 2)/(bmma_ms_avg/1000.)) / 1e12);
#ifdef verify_output
printf("Validating results...\n");
checkCudaErrors(cudaMemcpy(Output_h, Output, sizeof(int) * Height * Width * COUT, cudaMemcpyDeviceToHost));
int *C_ref = (int *)malloc(sizeof(int) * Height * Width * COUT * X_BIT);
/* Copmpute reference matrix on CPU */
compute_ref_pack_pool(W_h, X_h, C_ref, Height, Width, CIN, COUT, W_BIT, X_BIT, X_BIT);
/* validation results */
validate_results_pack(Output_h, C_ref, Height/2, Width/2, COUT, X_BIT);
free(C_ref);
free(X_h);
free(W_h);
free(Output_h);
#endif
checkCudaErrors(cudaFree(reinterpret_cast<void *>(W)));
checkCudaErrors(cudaFree(reinterpret_cast<void *>(X)));
checkCudaErrors(cudaFree(reinterpret_cast<void *>(Output)));
// }
return EXIT_SUCCESS;
}
// 758190d5 = 1110101100000011001000011010101
// bbac6 = 0000000000010111011101011000110 |
856e41ec55c80ebd767d4da34a055fbac8168ebe.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <include/labwork.h>
#include <hip/hip_runtime_api.h>
#include <omp.h>
#define ACTIVE_THREADS 4
int main(int argc, char **argv) {
printf("USTH ICT Master 2018, Advanced Programming for HPC.\n");
if (argc < 2) {
printf("Usage: labwork <lwNum> <inputImage>\n");
printf(" lwNum labwork number\n");
printf(" inputImage the input file name, in JPEG format\n");
return 0;
}
int lwNum = atoi(argv[1]);
std::string inputFilename;
// pre-initialize CUDA to avoid incorrect profiling
printf("Warming up...\n");
char *temp;
hipMalloc(&temp, 1024);
Labwork labwork;
if (lwNum != 2 ) {
inputFilename = std::string(argv[2]);
labwork.loadInputImage(inputFilename);
}
printf("Starting labwork %d\n", lwNum);
Timer timer;
Timer timerLab1; // We use another timer in lab1, so better to use another one here
timer.start();
switch (lwNum) {
case 1:
timerLab1.start();
labwork.labwork1_CPU();
labwork.saveOutputImage("labwork2-cpu-out.jpg");
printf("labwork 1 CPU ellapsed %.1fms\n", lwNum, timerLab1.getElapsedTimeInMilliSec());
timerLab1.start();
labwork.labwork1_OpenMP();
labwork.saveOutputImage("labwork2-openmp-out.jpg");
printf("labwork 1 OpenMP ellapsed %.1fms\n",lwNum, timerLab1.getElapsedTimeInMilliSec());
break;
case 2:
labwork.labwork2_GPU();
break;
case 3:
labwork.labwork3_GPU();
labwork.saveOutputImage("labwork3-gpu-out.jpg");
break;
case 4:
labwork.labwork4_GPU();
labwork.saveOutputImage("labwork4-gpu-out.jpg");
break;
case 5:
// Speedup of shared memory
//timerLab1.start();
//labwork.labwork5_CPU();
//printf("labwork 5 CPU ellapsed %.1fms\n",lwNum, timerLab1.getElapsedTimeInMilliSec());
// Speedup of global memory
timerLab1.start();
labwork.labwork5_GPU();
printf("labwork 5 GPU non-shared memory ellapsed %.1fms\n",lwNum, timerLab1.getElapsedTimeInMilliSec());
// Speedup of shared memory
timerLab1.start();
labwork.labwork5_GPUShared();
printf("labwork 5 GPU shared memory ellapsed %.1fms\n",lwNum, timerLab1.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork5-gpu-out.jpg");
break;
case 6:
labwork.labwork6_GPU();
labwork.saveOutputImage("labwork6-gpu-out.jpg");
break;
case 7:
labwork.labwork7_GPU();
labwork.saveOutputImage("labwork7-gpu-out.jpg");
break;
case 8:
labwork.labwork8_GPU();
labwork.saveOutputImage("labwork8-gpu-out.jpg");
break;
case 9:
labwork.labwork9_GPU();
labwork.saveOutputImage("labwork9-gpu-out.jpg");
break;
case 10:
labwork.labwork10_GPU();
labwork.saveOutputImage("labwork10-gpu-out.jpg");
break;
}
printf("labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
}
void Labwork::loadInputImage(std::string inputFileName) {
inputImage = jpegLoader.load(inputFileName);
}
void Labwork::saveOutputImage(std::string outputFileName) {
jpegLoader.save(outputFileName, outputImage, inputImage->width, inputImage->height, 90);
}
void Labwork::labwork1_CPU() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
void Labwork::labwork1_OpenMP() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
#pragma opm parallele for
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
#pragma opm parallele for
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
int getSPcores(hipDeviceProp_t devProp) {
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major) {
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
void Labwork::labwork2_GPU() {
int nbDevices;
printf("Scanning devices ..\n");
hipGetDeviceCount(&nbDevices); // Get the number of devices
printf("We got %d devices here\n\n",nbDevices);
for (int i = 0; i < nbDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device name: %s\n", prop.name); // Display the name of the device
printf("Device Number: %d\n", i); // Display the id of the device
printf("Number of core : %d\n",getSPcores(prop)); // Display number of core
printf("Multiprocessor count: %d\n", prop.multiProcessorCount); // Display the number of Multi processor
printf("Warp Size : %d threads\n", prop.warpSize); // Display the wrapSize
printf("Memory Clock Rate : %d kHz\n",
prop.memoryClockRate); // Display Memory ClockRate
printf("Memory Bus Width : %d bits\n",
prop.memoryBusWidth); // Display Memory bus Width
printf("Peak Memory Bandwidth : %f GB/s\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1e6); //Display memory Brandwith
}
}
//Making the kernel
__global__ void grayscale(uchar3 *input, uchar3 *output) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
output[tid].x = (input[tid].x + input[tid].y + input[tid].z) / 3;
output[tid].z = output[tid].y = output[tid].x;
}
void Labwork::labwork3_GPU() {
// Get the basic variable such as the pixelcount block size etc ..
int pixelCount = inputImage->width * inputImage->height;
int blockSize = 1024;
int numBlock = pixelCount / blockSize;
uchar3 *devInput;
uchar3 *devGray;
// Initialize the output image
outputImage = static_cast<char *>(malloc(pixelCount * 3));
// Allocate the memory in the device for the Deviceinput and the Deviceouput
hipMalloc(&devInput, pixelCount * sizeof(uchar3));
hipMalloc(&devGray, pixelCount * sizeof(uchar3));
// Copy from the HostInput to the devInput (here, the image)
hipMemcpy(devInput, inputImage->buffer,pixelCount * sizeof(uchar3),hipMemcpyHostToDevice);
// Do the thing you want to do
hipLaunchKernelGGL(( grayscale), dim3(numBlock), dim3(blockSize), 0, 0, devInput, devGray);
// Copy from the DeviceOutput to the HostOutput (here the image in grayscale)
hipMemcpy(outputImage, devGray,pixelCount * sizeof(uchar3),hipMemcpyDeviceToHost);
// Don't forget to free
hipFree(devInput);
hipFree(devGray);
}
__global__ void grayscale2D(uchar3 *input, uchar3 *output, int imageWidth, int imageHeight) {
// We need to know where we are rigth now
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
// Checking if we are still in the image
if(x>=imageWidth || y>=imageHeight) return;
int tid = imageWidth * y + x; // RowSize * y + x
// We turn the pixel gray
unsigned char value = (input[tid].x + input[tid].y + input[tid].z) / 3;
output[tid].z = output[tid].y = output[tid].x = value;
}
void Labwork::labwork4_GPU() {
// Get the basic variable such as the pixelcount block size etc ..
int pixelCount = inputImage->width * inputImage->height;
dim3 blockSize = dim3(32,32);
dim3 gridSize = dim3(ceil(inputImage->width/blockSize.x), ceil(inputImage->height/blockSize.y));
uchar3 *devInput;
uchar3 *devGray;
// Initialize the output image
outputImage = static_cast<char *>(malloc(pixelCount * 3));
// Allocate the memory in the device for the Deviceinput and the Deviceouput
hipMalloc(&devInput, pixelCount * sizeof(uchar3));
hipMalloc(&devGray, pixelCount * sizeof(uchar3));
// Copy from the HostInput to the devInput (here, the image)
hipMemcpy(devInput, inputImage->buffer,pixelCount * sizeof(uchar3),hipMemcpyHostToDevice);
// Do the thing you want to do
hipLaunchKernelGGL(( grayscale2D), dim3(gridSize), dim3(blockSize), 0, 0, devInput, devGray, inputImage->width, inputImage->height);
// Copy from the DeviceOutput to the HostOutput (here the image in grayscale)
hipMemcpy(outputImage, devGray,pixelCount * sizeof(uchar3),hipMemcpyDeviceToHost);
// Don't forget to free
hipFree(devInput);
hipFree(devGray);
}
// CPU implementation of Gaussian Blur
void Labwork::labwork5_CPU() {
int kernel[] = { 0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59,13, 1,
2, 22, 97,159, 97,22, 2,
1, 13, 59, 97, 59, 13,1,
0, 3, 13, 22, 13, 3,0,
0, 0, 1, 2, 1, 0,0 };
int pixelCount = inputImage->width * inputImage->height;
outputImage = (char*) malloc(pixelCount * sizeof(char) * 3);
for (int row = 0; row < inputImage->height; row++) {
for (int col = 0; col < inputImage->width; col++) {
int sum = 0;
int c = 0;
for (int y = -3; y <= 3; y++) {
for (int x = -3; x <= 3; x++) {
int i = col + x;
int j = row + y;
if (i < 0) continue;
if (i >= inputImage->width) continue;
if (j < 0) continue;
if (j >= inputImage->height) continue;
int tid = j * inputImage->width + i;
unsigned char gray = (inputImage->buffer[tid * 3] + inputImage->buffer[tid * 3 + 1] + inputImage->buffer[tid * 3 + 2])/3;
int coefficient = kernel[(y+3) * 7 + x + 3];
sum = sum + gray * coefficient;
c += coefficient;
}
}
sum /= c;
int posOut = row * inputImage->width + col;
outputImage[posOut * 3] = outputImage[posOut * 3 + 1] = outputImage[posOut * 3 + 2] = sum;
}
}
}
__global__ void blur(uchar3 *input, uchar3 *output, int* kernel, int imageWidth, int imageHeight) {
// We need to know where we are rigth now
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
// Checking if we are still in the image
if(tidx>=imageWidth || tidy>=imageHeight) return;
int sum = 0;
int c = 0;
// moving on in the matrix
for (int y = -3; y <= 3; y++) {
for (int x = -3; x <= 3; x++) {
int i = tidx + x;
int j = tidy + y;
//We won't take pixel that's outside the image.
if (i < 0 || j < 0 || i >= imageWidth || j >= imageHeight) continue;
int tid = imageWidth * j + i; // RowSize * j + i, get the position of our pixel
//Aplying gray filter on the pixel
unsigned char gray = (input[tid].x + input[tid].y + input[tid].z) / 3;
//Applying Gaussian blur and stuff
int coefficient = kernel[(y+3) * 7 + x + 3];
sum = sum + gray * coefficient;
c += coefficient;
}
}
sum /= c;
int posOut = tidy * imageWidth + tidx;
output[posOut].y = output[posOut].x = output[posOut].z = sum;
}
__global__ void blurSharedMemory(uchar3 *input, uchar3 *output, int *kernel, int imageWidth, int imageHeight) {
// We need to know where we are rigth now
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
// Creating the shared matrice
__shared__ int sharedKernel[49];
if (tid < 49) sharedKernel[tid] = kernel[tid];
__syncthreads();
// Checking if we are still in the image
if(tidx>=imageWidth || tidy>=imageHeight) return;
int sum = 0;
int c = 0;
// moving on in the matrix
for (int y = -3; y <= 3; y++) {
for (int x = -3; x <= 3; x++) {
int i = tidx + x;
int j = tidy + y;
//We won't take pixel that's outside the image.
if (i < 0 || j < 0 || i >= imageWidth || j >= imageHeight) continue;
int tid = imageWidth * j + i; // RowSize * j + i, get the position of our pixel
//Aplying gray filter on the pixel
unsigned char gray = (input[tid].x + input[tid].y + input[tid].z) / 3;
//Applying Gaussian blur and stuff
int coefficient = sharedKernel[(y+3) * 7 + x + 3];
sum = sum + gray * coefficient;
c += coefficient;
}
}
sum /= c;
int posOut = tidy * imageWidth + tidx;
output[posOut].y = output[posOut].x = output[posOut].z = sum;
}
void Labwork::labwork5_GPU() {
int kernel[] = { 0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59,13, 1,
2, 22, 97,159, 97,22, 2,
1, 13, 59, 97, 59, 13,1,
0, 3, 13, 22, 13, 3,0,
0, 0, 1, 2, 1, 0,0 };
// Get the basic variable such as the pixelcount block size etc ..
int pixelCount = inputImage->width * inputImage->height;
dim3 blockSize = dim3(32,32);
dim3 gridSize = dim3(ceil(inputImage->width/blockSize.x), ceil(inputImage->height/blockSize.y));
uchar3 *devInput;
uchar3 *devBlur;
// Matrice memory
int *devKernel;
// Initialize the output image
outputImage = static_cast<char *>(malloc(pixelCount * 3));
// Allocate the memory in the device for the Deviceinput and the Deviceouput
hipMalloc(&devInput, pixelCount * sizeof(uchar3));
hipMalloc(&devBlur, pixelCount * sizeof(uchar3));
// Allocate memory for the matrice
hipMalloc(&devKernel, sizeof(kernel));
// Copy from the HostInput to the devInput (here, the image)
hipMemcpy(devInput, inputImage->buffer,pixelCount * sizeof(uchar3),hipMemcpyHostToDevice);
// Copy the kernel matrice into the device
hipMemcpy(devKernel, kernel,sizeof(kernel),hipMemcpyHostToDevice);
// Do the thing you want to do
hipLaunchKernelGGL(( blur), dim3(gridSize), dim3(blockSize), 0, 0, devInput, devBlur, devKernel, inputImage->width, inputImage->height);
// Copy from the DeviceOutput to the HostOutput (here the image in grayscale)
hipMemcpy(outputImage, devBlur,pixelCount * sizeof(uchar3),hipMemcpyDeviceToHost);
// Don't forget to free
hipFree(devInput);
hipFree(devBlur);
hipFree(devKernel);
}
void Labwork::labwork5_GPUShared() {
int kernel[] = { 0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59,13, 1,
2, 22, 97,159, 97,22, 2,
1, 13, 59, 97, 59, 13,1,
0, 3, 13, 22, 13, 3,0,
0, 0, 1, 2, 1, 0,0 };
// Get the basic variable such as the pixelcount block size etc ..
int pixelCount = inputImage->width * inputImage->height;
dim3 blockSize = dim3(32,32);
dim3 gridSize = dim3(ceil(inputImage->width/blockSize.x), ceil(inputImage->height/blockSize.y));
uchar3 *devInput;
uchar3 *devBlur;
// Matrice memory
int *devKernel;
// Initialize the output image
outputImage = static_cast<char *>(malloc(pixelCount * 3));
// Allocate the memory in the device for the Deviceinput and the Deviceouput
hipMalloc(&devInput, pixelCount * sizeof(uchar3));
hipMalloc(&devBlur, pixelCount * sizeof(uchar3));
// Allocate memory for the matrice
hipMalloc(&devKernel, sizeof(kernel));
// Copy from the HostInput to the devInput (here, the image)
hipMemcpy(devInput, inputImage->buffer,pixelCount * sizeof(uchar3),hipMemcpyHostToDevice);
// Copy the kernel matrice into the device
hipMemcpy(devKernel, kernel,sizeof(kernel),hipMemcpyHostToDevice);
// Do the thing you want to do
hipLaunchKernelGGL(( blurSharedMemory), dim3(gridSize), dim3(blockSize), 0, 0, devInput, devBlur, devKernel, inputImage->width, inputImage->height);
// Copy from the DeviceOutput to the HostOutput (here the image in grayscale)
hipMemcpy(outputImage, devBlur,pixelCount * sizeof(uchar3),hipMemcpyDeviceToHost);
// Don't forget to free
hipFree(devInput);
hipFree(devBlur);
hipFree(devKernel);
}
void Labwork::labwork6_GPU() {
}
void Labwork::labwork7_GPU() {
}
void Labwork::labwork8_GPU() {
}
void Labwork::labwork9_GPU() {
}
void Labwork::labwork10_GPU() {
}
| 856e41ec55c80ebd767d4da34a055fbac8168ebe.cu | #include <stdio.h>
#include <include/labwork.h>
#include <cuda_runtime_api.h>
#include <omp.h>
#define ACTIVE_THREADS 4
int main(int argc, char **argv) {
printf("USTH ICT Master 2018, Advanced Programming for HPC.\n");
if (argc < 2) {
printf("Usage: labwork <lwNum> <inputImage>\n");
printf(" lwNum labwork number\n");
printf(" inputImage the input file name, in JPEG format\n");
return 0;
}
int lwNum = atoi(argv[1]);
std::string inputFilename;
// pre-initialize CUDA to avoid incorrect profiling
printf("Warming up...\n");
char *temp;
cudaMalloc(&temp, 1024);
Labwork labwork;
if (lwNum != 2 ) {
inputFilename = std::string(argv[2]);
labwork.loadInputImage(inputFilename);
}
printf("Starting labwork %d\n", lwNum);
Timer timer;
Timer timerLab1; // We use another timer in lab1, so better to use another one here
timer.start();
switch (lwNum) {
case 1:
timerLab1.start();
labwork.labwork1_CPU();
labwork.saveOutputImage("labwork2-cpu-out.jpg");
printf("labwork 1 CPU ellapsed %.1fms\n", lwNum, timerLab1.getElapsedTimeInMilliSec());
timerLab1.start();
labwork.labwork1_OpenMP();
labwork.saveOutputImage("labwork2-openmp-out.jpg");
printf("labwork 1 OpenMP ellapsed %.1fms\n",lwNum, timerLab1.getElapsedTimeInMilliSec());
break;
case 2:
labwork.labwork2_GPU();
break;
case 3:
labwork.labwork3_GPU();
labwork.saveOutputImage("labwork3-gpu-out.jpg");
break;
case 4:
labwork.labwork4_GPU();
labwork.saveOutputImage("labwork4-gpu-out.jpg");
break;
case 5:
// Speedup of shared memory
//timerLab1.start();
//labwork.labwork5_CPU();
//printf("labwork 5 CPU ellapsed %.1fms\n",lwNum, timerLab1.getElapsedTimeInMilliSec());
// Speedup of global memory
timerLab1.start();
labwork.labwork5_GPU();
printf("labwork 5 GPU non-shared memory ellapsed %.1fms\n",lwNum, timerLab1.getElapsedTimeInMilliSec());
// Speedup of shared memory
timerLab1.start();
labwork.labwork5_GPUShared();
printf("labwork 5 GPU shared memory ellapsed %.1fms\n",lwNum, timerLab1.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork5-gpu-out.jpg");
break;
case 6:
labwork.labwork6_GPU();
labwork.saveOutputImage("labwork6-gpu-out.jpg");
break;
case 7:
labwork.labwork7_GPU();
labwork.saveOutputImage("labwork7-gpu-out.jpg");
break;
case 8:
labwork.labwork8_GPU();
labwork.saveOutputImage("labwork8-gpu-out.jpg");
break;
case 9:
labwork.labwork9_GPU();
labwork.saveOutputImage("labwork9-gpu-out.jpg");
break;
case 10:
labwork.labwork10_GPU();
labwork.saveOutputImage("labwork10-gpu-out.jpg");
break;
}
printf("labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
}
void Labwork::loadInputImage(std::string inputFileName) {
inputImage = jpegLoader.load(inputFileName);
}
void Labwork::saveOutputImage(std::string outputFileName) {
jpegLoader.save(outputFileName, outputImage, inputImage->width, inputImage->height, 90);
}
void Labwork::labwork1_CPU() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
void Labwork::labwork1_OpenMP() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
#pragma opm parallele for
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
#pragma opm parallele for
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
int getSPcores(cudaDeviceProp devProp) {
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major) {
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
void Labwork::labwork2_GPU() {
int nbDevices;
printf("Scanning devices ..\n");
cudaGetDeviceCount(&nbDevices); // Get the number of devices
printf("We got %d devices here\n\n",nbDevices);
for (int i = 0; i < nbDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device name: %s\n", prop.name); // Display the name of the device
printf("Device Number: %d\n", i); // Display the id of the device
printf("Number of core : %d\n",getSPcores(prop)); // Display number of core
printf("Multiprocessor count: %d\n", prop.multiProcessorCount); // Display the number of Multi processor
printf("Warp Size : %d threads\n", prop.warpSize); // Display the wrapSize
printf("Memory Clock Rate : %d kHz\n",
prop.memoryClockRate); // Display Memory ClockRate
printf("Memory Bus Width : %d bits\n",
prop.memoryBusWidth); // Display Memory bus Width
printf("Peak Memory Bandwidth : %f GB/s\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1e6); //Display memory Brandwith
}
}
//Making the kernel
__global__ void grayscale(uchar3 *input, uchar3 *output) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
output[tid].x = (input[tid].x + input[tid].y + input[tid].z) / 3;
output[tid].z = output[tid].y = output[tid].x;
}
void Labwork::labwork3_GPU() {
// Get the basic variable such as the pixelcount block size etc ..
int pixelCount = inputImage->width * inputImage->height;
int blockSize = 1024;
int numBlock = pixelCount / blockSize;
uchar3 *devInput;
uchar3 *devGray;
// Initialize the output image
outputImage = static_cast<char *>(malloc(pixelCount * 3));
// Allocate the memory in the device for the Deviceinput and the Deviceouput
cudaMalloc(&devInput, pixelCount * sizeof(uchar3));
cudaMalloc(&devGray, pixelCount * sizeof(uchar3));
// Copy from the HostInput to the devInput (here, the image)
cudaMemcpy(devInput, inputImage->buffer,pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice);
// Do the thing you want to do
grayscale<<<numBlock, blockSize>>>(devInput, devGray);
// Copy from the DeviceOutput to the HostOutput (here the image in grayscale)
cudaMemcpy(outputImage, devGray,pixelCount * sizeof(uchar3),cudaMemcpyDeviceToHost);
// Don't forget to free
cudaFree(devInput);
cudaFree(devGray);
}
__global__ void grayscale2D(uchar3 *input, uchar3 *output, int imageWidth, int imageHeight) {
// We need to know where we are rigth now
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
// Checking if we are still in the image
if(x>=imageWidth || y>=imageHeight) return;
int tid = imageWidth * y + x; // RowSize * y + x
// We turn the pixel gray
unsigned char value = (input[tid].x + input[tid].y + input[tid].z) / 3;
output[tid].z = output[tid].y = output[tid].x = value;
}
void Labwork::labwork4_GPU() {
// Get the basic variable such as the pixelcount block size etc ..
int pixelCount = inputImage->width * inputImage->height;
dim3 blockSize = dim3(32,32);
dim3 gridSize = dim3(ceil(inputImage->width/blockSize.x), ceil(inputImage->height/blockSize.y));
uchar3 *devInput;
uchar3 *devGray;
// Initialize the output image
outputImage = static_cast<char *>(malloc(pixelCount * 3));
// Allocate the memory in the device for the Deviceinput and the Deviceouput
cudaMalloc(&devInput, pixelCount * sizeof(uchar3));
cudaMalloc(&devGray, pixelCount * sizeof(uchar3));
// Copy from the HostInput to the devInput (here, the image)
cudaMemcpy(devInput, inputImage->buffer,pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice);
// Do the thing you want to do
grayscale2D<<<gridSize, blockSize>>>(devInput, devGray, inputImage->width, inputImage->height);
// Copy from the DeviceOutput to the HostOutput (here the image in grayscale)
cudaMemcpy(outputImage, devGray,pixelCount * sizeof(uchar3),cudaMemcpyDeviceToHost);
// Don't forget to free
cudaFree(devInput);
cudaFree(devGray);
}
// CPU implementation of Gaussian Blur
void Labwork::labwork5_CPU() {
int kernel[] = { 0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59,13, 1,
2, 22, 97,159, 97,22, 2,
1, 13, 59, 97, 59, 13,1,
0, 3, 13, 22, 13, 3,0,
0, 0, 1, 2, 1, 0,0 };
int pixelCount = inputImage->width * inputImage->height;
outputImage = (char*) malloc(pixelCount * sizeof(char) * 3);
for (int row = 0; row < inputImage->height; row++) {
for (int col = 0; col < inputImage->width; col++) {
int sum = 0;
int c = 0;
for (int y = -3; y <= 3; y++) {
for (int x = -3; x <= 3; x++) {
int i = col + x;
int j = row + y;
if (i < 0) continue;
if (i >= inputImage->width) continue;
if (j < 0) continue;
if (j >= inputImage->height) continue;
int tid = j * inputImage->width + i;
unsigned char gray = (inputImage->buffer[tid * 3] + inputImage->buffer[tid * 3 + 1] + inputImage->buffer[tid * 3 + 2])/3;
int coefficient = kernel[(y+3) * 7 + x + 3];
sum = sum + gray * coefficient;
c += coefficient;
}
}
sum /= c;
int posOut = row * inputImage->width + col;
outputImage[posOut * 3] = outputImage[posOut * 3 + 1] = outputImage[posOut * 3 + 2] = sum;
}
}
}
__global__ void blur(uchar3 *input, uchar3 *output, int* kernel, int imageWidth, int imageHeight) {
// We need to know where we are rigth now
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
// Checking if we are still in the image
if(tidx>=imageWidth || tidy>=imageHeight) return;
int sum = 0;
int c = 0;
// moving on in the matrix
for (int y = -3; y <= 3; y++) {
for (int x = -3; x <= 3; x++) {
int i = tidx + x;
int j = tidy + y;
//We won't take pixel that's outside the image.
if (i < 0 || j < 0 || i >= imageWidth || j >= imageHeight) continue;
int tid = imageWidth * j + i; // RowSize * j + i, get the position of our pixel
//Aplying gray filter on the pixel
unsigned char gray = (input[tid].x + input[tid].y + input[tid].z) / 3;
//Applying Gaussian blur and stuff
int coefficient = kernel[(y+3) * 7 + x + 3];
sum = sum + gray * coefficient;
c += coefficient;
}
}
sum /= c;
int posOut = tidy * imageWidth + tidx;
output[posOut].y = output[posOut].x = output[posOut].z = sum;
}
__global__ void blurSharedMemory(uchar3 *input, uchar3 *output, int *kernel, int imageWidth, int imageHeight) {
// We need to know where we are rigth now
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
// Creating the shared matrice
__shared__ int sharedKernel[49];
if (tid < 49) sharedKernel[tid] = kernel[tid];
__syncthreads();
// Checking if we are still in the image
if(tidx>=imageWidth || tidy>=imageHeight) return;
int sum = 0;
int c = 0;
// moving on in the matrix
for (int y = -3; y <= 3; y++) {
for (int x = -3; x <= 3; x++) {
int i = tidx + x;
int j = tidy + y;
//We won't take pixel that's outside the image.
if (i < 0 || j < 0 || i >= imageWidth || j >= imageHeight) continue;
int tid = imageWidth * j + i; // RowSize * j + i, get the position of our pixel
//Aplying gray filter on the pixel
unsigned char gray = (input[tid].x + input[tid].y + input[tid].z) / 3;
//Applying Gaussian blur and stuff
int coefficient = sharedKernel[(y+3) * 7 + x + 3];
sum = sum + gray * coefficient;
c += coefficient;
}
}
sum /= c;
int posOut = tidy * imageWidth + tidx;
output[posOut].y = output[posOut].x = output[posOut].z = sum;
}
void Labwork::labwork5_GPU() {
int kernel[] = { 0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59,13, 1,
2, 22, 97,159, 97,22, 2,
1, 13, 59, 97, 59, 13,1,
0, 3, 13, 22, 13, 3,0,
0, 0, 1, 2, 1, 0,0 };
// Get the basic variable such as the pixelcount block size etc ..
int pixelCount = inputImage->width * inputImage->height;
dim3 blockSize = dim3(32,32);
dim3 gridSize = dim3(ceil(inputImage->width/blockSize.x), ceil(inputImage->height/blockSize.y));
uchar3 *devInput;
uchar3 *devBlur;
// Matrice memory
int *devKernel;
// Initialize the output image
outputImage = static_cast<char *>(malloc(pixelCount * 3));
// Allocate the memory in the device for the Deviceinput and the Deviceouput
cudaMalloc(&devInput, pixelCount * sizeof(uchar3));
cudaMalloc(&devBlur, pixelCount * sizeof(uchar3));
// Allocate memory for the matrice
cudaMalloc(&devKernel, sizeof(kernel));
// Copy from the HostInput to the devInput (here, the image)
cudaMemcpy(devInput, inputImage->buffer,pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice);
// Copy the kernel matrice into the device
cudaMemcpy(devKernel, kernel,sizeof(kernel),cudaMemcpyHostToDevice);
// Do the thing you want to do
blur<<<gridSize, blockSize>>>(devInput, devBlur, devKernel, inputImage->width, inputImage->height);
// Copy from the DeviceOutput to the HostOutput (here the image in grayscale)
cudaMemcpy(outputImage, devBlur,pixelCount * sizeof(uchar3),cudaMemcpyDeviceToHost);
// Don't forget to free
cudaFree(devInput);
cudaFree(devBlur);
cudaFree(devKernel);
}
void Labwork::labwork5_GPUShared() {
int kernel[] = { 0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59,13, 1,
2, 22, 97,159, 97,22, 2,
1, 13, 59, 97, 59, 13,1,
0, 3, 13, 22, 13, 3,0,
0, 0, 1, 2, 1, 0,0 };
// Get the basic variable such as the pixelcount block size etc ..
int pixelCount = inputImage->width * inputImage->height;
dim3 blockSize = dim3(32,32);
dim3 gridSize = dim3(ceil(inputImage->width/blockSize.x), ceil(inputImage->height/blockSize.y));
uchar3 *devInput;
uchar3 *devBlur;
// Matrice memory
int *devKernel;
// Initialize the output image
outputImage = static_cast<char *>(malloc(pixelCount * 3));
// Allocate the memory in the device for the Deviceinput and the Deviceouput
cudaMalloc(&devInput, pixelCount * sizeof(uchar3));
cudaMalloc(&devBlur, pixelCount * sizeof(uchar3));
// Allocate memory for the matrice
cudaMalloc(&devKernel, sizeof(kernel));
// Copy from the HostInput to the devInput (here, the image)
cudaMemcpy(devInput, inputImage->buffer,pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice);
// Copy the kernel matrice into the device
cudaMemcpy(devKernel, kernel,sizeof(kernel),cudaMemcpyHostToDevice);
// Do the thing you want to do
blurSharedMemory<<<gridSize, blockSize>>>(devInput, devBlur, devKernel, inputImage->width, inputImage->height);
// Copy from the DeviceOutput to the HostOutput (here the image in grayscale)
cudaMemcpy(outputImage, devBlur,pixelCount * sizeof(uchar3),cudaMemcpyDeviceToHost);
// Don't forget to free
cudaFree(devInput);
cudaFree(devBlur);
cudaFree(devKernel);
}
void Labwork::labwork6_GPU() {
}
void Labwork::labwork7_GPU() {
}
void Labwork::labwork8_GPU() {
}
void Labwork::labwork9_GPU() {
}
void Labwork::labwork10_GPU() {
}
|
af13a0d0fdc0cbe8b21e431cca906fb22d466210.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
//
//
#include <loops/type_conversions.h>
#include <types/types.h>
#include <helpers/DebugHelper.h>
namespace sd {
template<typename S, typename T>
void TypeCast::convertGenericCuda(Nd4jPointer *extras, void *dx, Nd4jLong N, void *dz) {
auto stream = reinterpret_cast<hipStream_t *>(&extras[1]);
hipLaunchKernelGGL(( sd::convertKernel<S, T>), dim3(256), dim3(1024), 1024, *stream, dx, N, dz);
sd::DebugHelper::checkErrorCode(stream, "convertGeneric(...) failed");
};
template<typename S, typename T>
__device__ void convertKernelGeneric(S *x, Nd4jLong N, T *z) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < N; i+= blockDim.x * gridDim.x) {
// despite it's stupid, it simplifies conversion to bottom dtypes
// FIXME: get rid of through-float though
z[i] = static_cast<T>(static_cast<float>(x[i]));
}
};
// Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
#ifdef CHECK_BANK_CONFLICTS
#define TEMP(index) CUT_BANK_CHECKER(temp, index)
#else
#define TEMP(index) temp[index]
#endif
template <bool isNP2>
__device__ void loadSharedChunkFromMem(int *s_data, const int *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) {
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
// compute spacing to avoid bank conflicts
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Cache the computational window in shared memory
// pad values beyond n with zeros
s_data[ai + bankOffsetA] = g_idata[mem_ai];
if (isNP2) { // compile-time decision
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2>
__device__ void storeSharedChunkToMem(int* g_odata, int* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) {
__syncthreads();
// write results to global memory
g_odata[mem_ai] = s_data[ai + bankOffsetA];
if (isNP2) { // compile-time decision
if (bi < n)
g_odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__ void clearLastElement(int* s_data, int *g_blockSums, int blockIndex) {
if (threadIdx.x == 0)
{
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) { // compile-time decision
// write this block's total sum to the corresponding index in the blockSums array
g_blockSums[blockIndex] = s_data[index];
}
// zero the last element in the scan so it will propagate back to the front
s_data[index] = 0;
}
}
__device__ unsigned int buildSum(int *s_data) {
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ void scanRootToLeaves(int *s_data, unsigned int stride) {
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum>
__device__ void prescanBlock(int *data, int blockIndex, int *blockSums) {
int stride = buildSum(data); // build the sum in place up the tree
clearLastElement<storeSum>(data, blockSums,
(blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves(data, stride); // traverse down tree to build the scan
}
template <bool storeSum, bool isNP2>
__global__ void prescan(int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ int s_data[];
// load data into shared memory
loadSharedChunkFromMem<isNP2>(reinterpret_cast<int *>(s_data), g_idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)):baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
// scan the data in each block
prescanBlock<storeSum>(s_data, blockIndex, g_blockSums);
// write results to device memory
storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
}
__global__ void uniformAdd(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex) {
__shared__ float uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
/*
* This kernel does prefix sum in parallel, to calculate offsets for each block
*/
template<typename T>
__device__ inline void encoderKernelP2Generic(void *dx, Nd4jLong n, void *dz) {
// TODO: to be remove
}
//////////////////////////////////////////////////////////////////////////
/*
* PLEASE NOTE: This kernel doesn't allow loop for data. Basically: grid will be huge.
*/
template<typename T>
__global__ static void execEncoderKernelP1(const void *dx, Nd4jLong N, void *dz, float threshold) {
auto x = reinterpret_cast<const T *> (dx);
auto z = reinterpret_cast<int *> (dz);
//basically, for phase One we want do calculation: how many eligible values we have, and which blocks will be holding data
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
int pass = tid < N && sd::math::nd4j_abs<T>(x[tid]) >= static_cast<T>(threshold) ? 1 : 0;
int bp=__syncthreads_count(pass);
if (threadIdx.x == 0) {
// saving out per-block passes
z[blockIdx.x+1] = bp;
// saving out sum
atomicAdd(&z[0], bp);
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void encoderKernelP1Generic(dim3 &launchDims, hipStream_t *stream, const void *dx, Nd4jLong N, void *dz, float threshold) {
hipLaunchKernelGGL(( execEncoderKernelP1<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, dx, N, dz, threshold);
sd::DebugHelper::checkErrorCode(stream, "encoderP1(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT encoderKernelP1Generic, (dim3 &launchDims, hipStream_t *stream, const void *dx, Nd4jLong N, void *dz, float threshold), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
/*
* PLEASE NOTE: This kernel doesn't allow loop for data. Basically: grid will be huge.
*
* Based on: https://github.com/knotman90/cuStreamComp <-- efficient CUDA stream compaction algorithm
*/
template<typename T>
__global__ static void execEncoderKernelP3(void *dx, int *offsets, Nd4jLong N, void *dz) {
auto x = reinterpret_cast<T *> (dx);
auto z = reinterpret_cast<int *> (dz);
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int warpTotals[];
// fetch block offset only once
__shared__ float threshold;
__shared__ FloatBits fb;
__shared__ int bo;
__shared__ int limit;
if (threadIdx.x == 0) {
limit = z[0];
fb.i_ = z[2];
threshold = fb.f_;
bo = offsets[blockIdx.x];
}
__syncthreads();
// out-of-limit threads do not play here
auto value = tid < N ? x[tid] : (T) 0.f;
// out-of-limit threads just declare they have no changes
auto pred = tid >= N ? 0 : sd::math::nd4j_abs<T>(value) >= static_cast<T>(threshold) ? 1 : 0;
auto w_i = threadIdx.x / warpSize; // warp index (or, warp number) - index of the Warp within TOTAL_WARPS
auto t_i = threadIdx.x % warpSize; // thread index within a warp
unsigned int t_m = INT_MAX >> (warpSize - t_i - 1); //thread mask (ERROR IN THE PAPER minus one is required)
int b = __ballot_sync(t_m, pred); // balres = number whose ith bit isone if the ith's thread pred is true masked up to the current index in warp
auto t_u = __popc(b); // popc count the number of bit one. simply count the number predicated true BEFORE MY INDEX
if (t_i == warpSize - 1)
warpTotals[w_i] = t_u + pred;
__syncthreads();
int w_i_u = 0;
for (int j = 0; j <= 5; j++) {
unsigned int b_j = __ballot_sync(t_m, warpTotals[t_i] & pow2i(j)); //# of the ones in the j'th digit of the warp offsets
w_i_u += (__popc(b_j) << j);
}
// we just ignore all results coming from non-0 threads
if (w_i == 0 && t_i < blockDim.x / warpSize)
warpTotals[t_i] = w_i_u;
__syncthreads();
// pred is always false if we're out-of-limits
if (pred) {
int idx = t_u + warpTotals[w_i] + bo + 4;
if (idx < limit + 4) {
z[idx] = value > static_cast<T>(0.0f) ? tid + 1 : -(tid + 1);
x[tid] = value > static_cast<T>(0.0f) ? x[tid] - threshold : x[tid] + threshold;
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void encoderKernelP3Generic(dim3 &launchDims, hipStream_t *stream, void *dx, int *offsets, Nd4jLong N, void *dz) {
hipLaunchKernelGGL(( execEncoderKernelP3<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, dx, offsets, N, dz);
sd::DebugHelper::checkErrorCode(stream, "encoderP3(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT encoderKernelP3Generic, (dim3 &launchDims, hipStream_t *stream, void *dx, int *offsets, Nd4jLong N, void *dz), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
/*
* This kernel handles decode from sparse threshold array, to dense array
*
* PLEASE NOTE: Z is expected to be memset to 0
*/
template<typename T>
__global__ static void execDecoderKernel(const void *dx, Nd4jLong N, void *dz) {
auto x = reinterpret_cast<const int *> (dx);
auto z = reinterpret_cast<T *> (dz);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float threshold;
__shared__ int limit;
__shared__ FloatBits fb;
if (threadIdx.x == 0) {
limit = x[0];
fb.i_ = x[2];
threshold = fb.f_;
}
__syncthreads();
for (int e = tid; e < limit; e += blockDim.x * gridDim.x) {
int el = x[e+4];
int ael = sd::math::nd4j_abs<int>(el) - 1;
// TODO: investigate, if += would work better here, as in "decoded accumulation"
z[ael] += el > 0 ? threshold : -threshold;
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void decoderKernelGeneric(dim3 &launchDims, hipStream_t *stream, const void *dx, Nd4jLong N, void *dz) {
hipLaunchKernelGGL(( execDecoderKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, dx, N, dz);
sd::DebugHelper::checkErrorCode(stream, "execDecoder(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT decoderKernelGeneric, (dim3 &launchDims, hipStream_t *stream, const void *dx, Nd4jLong N, void *dz), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void execCudaEncodeBitmapKernel(void *vdx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
auto dx = reinterpret_cast<T *>(vdx);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
T off(0.0f);
__shared__ int counter;
__shared__ int *shmem;
__shared__ T *vals;
if (threadIdx.x == 0){
extern __shared__ char mem[];
shmem = reinterpret_cast<int*>(mem);
vals = reinterpret_cast<T *>(shmem + blockDim.x);
counter = 0;
}
__syncthreads();
Nd4jLong loopRemainder = N % (blockDim.x * gridDim.x);
Nd4jLong loopLimit = N + (blockDim.x * gridDim.x - loopRemainder);
for (Nd4jLong i = tid; i < loopLimit; i += blockDim.x * gridDim.x) {
// all threads in block reading stuff
T val = i < N ? dx[i] : off;
T abs = sd::math::nd4j_abs<T>(val);
int byteId = i / 16 + 4;
int bitId = i % 16;
shmem[threadIdx.x] = 0;
vals[threadIdx.x] = val;
if (abs >= static_cast<T>(threshold) && i < N) {
shmem[threadIdx.x] = 1 << (bitId);
atomicAdd(&counter, 1);
if (val < static_cast<T>(0.0f)) {
shmem[threadIdx.x] |= 1 << (bitId + 16);
vals[threadIdx.x] += static_cast<T>(threshold);
} else {
vals[threadIdx.x] -= static_cast<T>(threshold);
}
} else if (abs >= static_cast<T>(threshold) / static_cast<T>(2.0f) && val < static_cast<T>(0.0f) && i < N) {
atomicAdd(&counter, 1);
shmem[threadIdx.x] = 1 << (bitId + 16);
vals[threadIdx.x] += static_cast<T>(threshold) / static_cast<T>(2.0f);
}
__syncthreads();
if (threadIdx.x % 16 == 0 && i < N) {
int byte = 0;
for (int e = 0; e < 16; e++) {
if (i + e >= N)
continue;
byte |= shmem[threadIdx.x + e];
}
dz[byteId] = byte;
}
__syncthreads();
if (i < N)
dx[i] = vals[threadIdx.x];
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(scalar, counter);
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void cudaEncodeBitmapGeneric(dim3 &launchDims, hipStream_t *stream, void *vdx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
hipLaunchKernelGGL(( execCudaEncodeBitmapKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vdx, N, dz, scalar, reductionBuffer, threshold);
sd::DebugHelper::checkErrorCode(stream, "encodeBitmap(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT cudaEncodeBitmapGeneric, (dim3 &launchDims, hipStream_t *stream, void *vdx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void execCudaDecodeBitmapKernel(const void *dx, Nd4jLong N, void *vdz) {
auto dz = static_cast<T*>(vdz);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ T *shmem;
__shared__ FloatBits fb;
__shared__ float threshold;
__shared__ const int *x;
if (threadIdx.x == 0){
extern __shared__ char mem[];
shmem = reinterpret_cast<T*>(mem);
x = reinterpret_cast<const int *>(dx);
fb.i_ = x[2];
threshold = fb.f_;
}
__syncthreads();
int lim = N / 16 + 5;
for (int i = tid; i < N; i += blockDim.x * gridDim.x) {
int byteId = i / 16 + 4;
// printf("I: [%i]; byteId: [%i]\n", i, byteId);
shmem[threadIdx.x] = dz[i];
__syncthreads();
if (threadIdx.x % 16 == 0) {
int byte = x[byteId];
for (int e = 0; e < 16; e++) {
if (i + e >= N)
continue;
int bitId = (i + e) % 16;
bool hasBit = (byte & 1 << (bitId) ) != 0;
bool hasSign = (byte & 1 << (bitId + 16) ) != 0;
if (hasBit) {
if (hasSign)
shmem[threadIdx.x + bitId] -= threshold;
else
shmem[threadIdx.x + bitId] += threshold;
} else if (hasSign) {
shmem[threadIdx.x + bitId] -= threshold / 2;
}
}
}
__syncthreads();
dz[i] = shmem[threadIdx.x];
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void cudaDecodeBitmapGeneric(dim3 &launchDims, hipStream_t *stream, const void *dx, Nd4jLong N, void *vdz) {
hipLaunchKernelGGL(( execCudaDecodeBitmapKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, dx, N, vdz);
sd::DebugHelper::checkErrorCode(stream, "cudeDecodeBitmap(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT cudaDecodeBitmapGeneric, (dim3 &launchDims, hipStream_t *stream, const void *dx, Nd4jLong N, void *vdz), FLOAT_TYPES);
template <bool storeSum, bool isNP2>
__host__ void prescanLauncher(dim3 &blocks, dim3 &threads, int shmem, hipStream_t *stream, int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex) {
//printf("Prescan grid: <%i/%i/%i>; threads: <%i/%i/%i>; shareMemSize: %i\n", blocks.x, blocks.y, blocks.z, threads.x, threads.y, threads.z, shmem);
shmem = sd::math::nd4j_max<int>(shmem, 16384);
hipLaunchKernelGGL(( prescan<storeSum, isNP2>), dim3(blocks), dim3(threads), shmem, *stream, g_odata, g_idata, g_blockSums, n, blockIndex, baseIndex);
};
template <typename S, typename T>
__global__ void convertKernel(void *dx, Nd4jLong N, void *dz) {
auto x = reinterpret_cast<S *>(dx);
auto z = reinterpret_cast<T *>(dz);
sd::convertKernelGeneric(x, N, z);
}
#define LIBND4J_BOOLS_LOCAL \
(randomName0, 0), \
(randomName1, 1)
BUILD_DOUBLE_TEMPLATE(template void TypeCast::convertGenericCuda, (Nd4jPointer * extras, void *dx, Nd4jLong N, void *dz), LIBND4J_TYPES_EXTENDED, LIBND4J_TYPES_EXTENDED);
BUILD_DOUBLE_TEMPLATE(template void prescanLauncher, (dim3 &blocks, dim3 &threads, int shmem, hipStream_t *stream, int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex), LIBND4J_BOOLS_LOCAL, LIBND4J_BOOLS_LOCAL);
#undef LIBND4J_BOOLS_LOCAL
} | af13a0d0fdc0cbe8b21e431cca906fb22d466210.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
//
//
#include <loops/type_conversions.h>
#include <types/types.h>
#include <helpers/DebugHelper.h>
namespace sd {
template<typename S, typename T>
void TypeCast::convertGenericCuda(Nd4jPointer *extras, void *dx, Nd4jLong N, void *dz) {
auto stream = reinterpret_cast<cudaStream_t *>(&extras[1]);
sd::convertKernel<S, T><<<256, 1024, 1024, *stream>>>(dx, N, dz);
sd::DebugHelper::checkErrorCode(stream, "convertGeneric(...) failed");
};
template<typename S, typename T>
__device__ void convertKernelGeneric(S *x, Nd4jLong N, T *z) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < N; i+= blockDim.x * gridDim.x) {
// despite it's stupid, it simplifies conversion to bottom dtypes
// FIXME: get rid of through-float though
z[i] = static_cast<T>(static_cast<float>(x[i]));
}
};
// Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
#ifdef CHECK_BANK_CONFLICTS
#define TEMP(index) CUT_BANK_CHECKER(temp, index)
#else
#define TEMP(index) temp[index]
#endif
template <bool isNP2>
__device__ void loadSharedChunkFromMem(int *s_data, const int *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) {
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
// compute spacing to avoid bank conflicts
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Cache the computational window in shared memory
// pad values beyond n with zeros
s_data[ai + bankOffsetA] = g_idata[mem_ai];
if (isNP2) { // compile-time decision
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2>
__device__ void storeSharedChunkToMem(int* g_odata, int* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) {
__syncthreads();
// write results to global memory
g_odata[mem_ai] = s_data[ai + bankOffsetA];
if (isNP2) { // compile-time decision
if (bi < n)
g_odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__ void clearLastElement(int* s_data, int *g_blockSums, int blockIndex) {
if (threadIdx.x == 0)
{
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) { // compile-time decision
// write this block's total sum to the corresponding index in the blockSums array
g_blockSums[blockIndex] = s_data[index];
}
// zero the last element in the scan so it will propagate back to the front
s_data[index] = 0;
}
}
__device__ unsigned int buildSum(int *s_data) {
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ void scanRootToLeaves(int *s_data, unsigned int stride) {
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum>
__device__ void prescanBlock(int *data, int blockIndex, int *blockSums) {
int stride = buildSum(data); // build the sum in place up the tree
clearLastElement<storeSum>(data, blockSums,
(blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves(data, stride); // traverse down tree to build the scan
}
template <bool storeSum, bool isNP2>
__global__ void prescan(int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ int s_data[];
// load data into shared memory
loadSharedChunkFromMem<isNP2>(reinterpret_cast<int *>(s_data), g_idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)):baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
// scan the data in each block
prescanBlock<storeSum>(s_data, blockIndex, g_blockSums);
// write results to device memory
storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
}
__global__ void uniformAdd(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex) {
__shared__ float uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
/*
* This kernel does prefix sum in parallel, to calculate offsets for each block
*/
template<typename T>
__device__ inline void encoderKernelP2Generic(void *dx, Nd4jLong n, void *dz) {
// TODO: to be remove
}
//////////////////////////////////////////////////////////////////////////
/*
* PLEASE NOTE: This kernel doesn't allow loop for data. Basically: grid will be huge.
*/
template<typename T>
__global__ static void execEncoderKernelP1(const void *dx, Nd4jLong N, void *dz, float threshold) {
auto x = reinterpret_cast<const T *> (dx);
auto z = reinterpret_cast<int *> (dz);
//basically, for phase One we want do calculation: how many eligible values we have, and which blocks will be holding data
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
int pass = tid < N && sd::math::nd4j_abs<T>(x[tid]) >= static_cast<T>(threshold) ? 1 : 0;
int bp=__syncthreads_count(pass);
if (threadIdx.x == 0) {
// saving out per-block passes
z[blockIdx.x+1] = bp;
// saving out sum
atomicAdd(&z[0], bp);
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void encoderKernelP1Generic(dim3 &launchDims, cudaStream_t *stream, const void *dx, Nd4jLong N, void *dz, float threshold) {
execEncoderKernelP1<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dx, N, dz, threshold);
sd::DebugHelper::checkErrorCode(stream, "encoderP1(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT encoderKernelP1Generic, (dim3 &launchDims, cudaStream_t *stream, const void *dx, Nd4jLong N, void *dz, float threshold), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
/*
* PLEASE NOTE: This kernel doesn't allow loop for data. Basically: grid will be huge.
*
* Based on: https://github.com/knotman90/cuStreamComp <-- efficient CUDA stream compaction algorithm
*/
template<typename T>
__global__ static void execEncoderKernelP3(void *dx, int *offsets, Nd4jLong N, void *dz) {
auto x = reinterpret_cast<T *> (dx);
auto z = reinterpret_cast<int *> (dz);
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int warpTotals[];
// fetch block offset only once
__shared__ float threshold;
__shared__ FloatBits fb;
__shared__ int bo;
__shared__ int limit;
if (threadIdx.x == 0) {
limit = z[0];
fb.i_ = z[2];
threshold = fb.f_;
bo = offsets[blockIdx.x];
}
__syncthreads();
// out-of-limit threads do not play here
auto value = tid < N ? x[tid] : (T) 0.f;
// out-of-limit threads just declare they have no changes
auto pred = tid >= N ? 0 : sd::math::nd4j_abs<T>(value) >= static_cast<T>(threshold) ? 1 : 0;
auto w_i = threadIdx.x / warpSize; // warp index (or, warp number) - index of the Warp within TOTAL_WARPS
auto t_i = threadIdx.x % warpSize; // thread index within a warp
unsigned int t_m = INT_MAX >> (warpSize - t_i - 1); //thread mask (ERROR IN THE PAPER minus one is required)
int b = __ballot_sync(t_m, pred); // balres = number whose ith bit isone if the ith's thread pred is true masked up to the current index in warp
auto t_u = __popc(b); // popc count the number of bit one. simply count the number predicated true BEFORE MY INDEX
if (t_i == warpSize - 1)
warpTotals[w_i] = t_u + pred;
__syncthreads();
int w_i_u = 0;
for (int j = 0; j <= 5; j++) {
unsigned int b_j = __ballot_sync(t_m, warpTotals[t_i] & pow2i(j)); //# of the ones in the j'th digit of the warp offsets
w_i_u += (__popc(b_j) << j);
}
// we just ignore all results coming from non-0 threads
if (w_i == 0 && t_i < blockDim.x / warpSize)
warpTotals[t_i] = w_i_u;
__syncthreads();
// pred is always false if we're out-of-limits
if (pred) {
int idx = t_u + warpTotals[w_i] + bo + 4;
if (idx < limit + 4) {
z[idx] = value > static_cast<T>(0.0f) ? tid + 1 : -(tid + 1);
x[tid] = value > static_cast<T>(0.0f) ? x[tid] - threshold : x[tid] + threshold;
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void encoderKernelP3Generic(dim3 &launchDims, cudaStream_t *stream, void *dx, int *offsets, Nd4jLong N, void *dz) {
execEncoderKernelP3<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dx, offsets, N, dz);
sd::DebugHelper::checkErrorCode(stream, "encoderP3(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT encoderKernelP3Generic, (dim3 &launchDims, cudaStream_t *stream, void *dx, int *offsets, Nd4jLong N, void *dz), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
/*
* This kernel handles decode from sparse threshold array, to dense array
*
* PLEASE NOTE: Z is expected to be memset to 0
*/
template<typename T>
__global__ static void execDecoderKernel(const void *dx, Nd4jLong N, void *dz) {
auto x = reinterpret_cast<const int *> (dx);
auto z = reinterpret_cast<T *> (dz);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float threshold;
__shared__ int limit;
__shared__ FloatBits fb;
if (threadIdx.x == 0) {
limit = x[0];
fb.i_ = x[2];
threshold = fb.f_;
}
__syncthreads();
for (int e = tid; e < limit; e += blockDim.x * gridDim.x) {
int el = x[e+4];
int ael = sd::math::nd4j_abs<int>(el) - 1;
// TODO: investigate, if += would work better here, as in "decoded accumulation"
z[ael] += el > 0 ? threshold : -threshold;
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void decoderKernelGeneric(dim3 &launchDims, cudaStream_t *stream, const void *dx, Nd4jLong N, void *dz) {
execDecoderKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dx, N, dz);
sd::DebugHelper::checkErrorCode(stream, "execDecoder(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT decoderKernelGeneric, (dim3 &launchDims, cudaStream_t *stream, const void *dx, Nd4jLong N, void *dz), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void execCudaEncodeBitmapKernel(void *vdx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
auto dx = reinterpret_cast<T *>(vdx);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
T off(0.0f);
__shared__ int counter;
__shared__ int *shmem;
__shared__ T *vals;
if (threadIdx.x == 0){
extern __shared__ char mem[];
shmem = reinterpret_cast<int*>(mem);
vals = reinterpret_cast<T *>(shmem + blockDim.x);
counter = 0;
}
__syncthreads();
Nd4jLong loopRemainder = N % (blockDim.x * gridDim.x);
Nd4jLong loopLimit = N + (blockDim.x * gridDim.x - loopRemainder);
for (Nd4jLong i = tid; i < loopLimit; i += blockDim.x * gridDim.x) {
// all threads in block reading stuff
T val = i < N ? dx[i] : off;
T abs = sd::math::nd4j_abs<T>(val);
int byteId = i / 16 + 4;
int bitId = i % 16;
shmem[threadIdx.x] = 0;
vals[threadIdx.x] = val;
if (abs >= static_cast<T>(threshold) && i < N) {
shmem[threadIdx.x] = 1 << (bitId);
atomicAdd(&counter, 1);
if (val < static_cast<T>(0.0f)) {
shmem[threadIdx.x] |= 1 << (bitId + 16);
vals[threadIdx.x] += static_cast<T>(threshold);
} else {
vals[threadIdx.x] -= static_cast<T>(threshold);
}
} else if (abs >= static_cast<T>(threshold) / static_cast<T>(2.0f) && val < static_cast<T>(0.0f) && i < N) {
atomicAdd(&counter, 1);
shmem[threadIdx.x] = 1 << (bitId + 16);
vals[threadIdx.x] += static_cast<T>(threshold) / static_cast<T>(2.0f);
}
__syncthreads();
if (threadIdx.x % 16 == 0 && i < N) {
int byte = 0;
for (int e = 0; e < 16; e++) {
if (i + e >= N)
continue;
byte |= shmem[threadIdx.x + e];
}
dz[byteId] = byte;
}
__syncthreads();
if (i < N)
dx[i] = vals[threadIdx.x];
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(scalar, counter);
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void cudaEncodeBitmapGeneric(dim3 &launchDims, cudaStream_t *stream, void *vdx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold) {
execCudaEncodeBitmapKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vdx, N, dz, scalar, reductionBuffer, threshold);
sd::DebugHelper::checkErrorCode(stream, "encodeBitmap(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT cudaEncodeBitmapGeneric, (dim3 &launchDims, cudaStream_t *stream, void *vdx, Nd4jLong N, int *dz, int *scalar, int *reductionBuffer, float threshold), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void execCudaDecodeBitmapKernel(const void *dx, Nd4jLong N, void *vdz) {
auto dz = static_cast<T*>(vdz);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ T *shmem;
__shared__ FloatBits fb;
__shared__ float threshold;
__shared__ const int *x;
if (threadIdx.x == 0){
extern __shared__ char mem[];
shmem = reinterpret_cast<T*>(mem);
x = reinterpret_cast<const int *>(dx);
fb.i_ = x[2];
threshold = fb.f_;
}
__syncthreads();
int lim = N / 16 + 5;
for (int i = tid; i < N; i += blockDim.x * gridDim.x) {
int byteId = i / 16 + 4;
// printf("I: [%i]; byteId: [%i]\n", i, byteId);
shmem[threadIdx.x] = dz[i];
__syncthreads();
if (threadIdx.x % 16 == 0) {
int byte = x[byteId];
for (int e = 0; e < 16; e++) {
if (i + e >= N)
continue;
int bitId = (i + e) % 16;
bool hasBit = (byte & 1 << (bitId) ) != 0;
bool hasSign = (byte & 1 << (bitId + 16) ) != 0;
if (hasBit) {
if (hasSign)
shmem[threadIdx.x + bitId] -= threshold;
else
shmem[threadIdx.x + bitId] += threshold;
} else if (hasSign) {
shmem[threadIdx.x + bitId] -= threshold / 2;
}
}
}
__syncthreads();
dz[i] = shmem[threadIdx.x];
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void cudaDecodeBitmapGeneric(dim3 &launchDims, cudaStream_t *stream, const void *dx, Nd4jLong N, void *vdz) {
execCudaDecodeBitmapKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dx, N, vdz);
sd::DebugHelper::checkErrorCode(stream, "cudeDecodeBitmap(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT cudaDecodeBitmapGeneric, (dim3 &launchDims, cudaStream_t *stream, const void *dx, Nd4jLong N, void *vdz), FLOAT_TYPES);
template <bool storeSum, bool isNP2>
__host__ void prescanLauncher(dim3 &blocks, dim3 &threads, int shmem, cudaStream_t *stream, int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex) {
//printf("Prescan grid: <%i/%i/%i>; threads: <%i/%i/%i>; shareMemSize: %i\n", blocks.x, blocks.y, blocks.z, threads.x, threads.y, threads.z, shmem);
shmem = sd::math::nd4j_max<int>(shmem, 16384);
prescan<storeSum, isNP2><<<blocks, threads, shmem, *stream>>>(g_odata, g_idata, g_blockSums, n, blockIndex, baseIndex);
};
template <typename S, typename T>
__global__ void convertKernel(void *dx, Nd4jLong N, void *dz) {
auto x = reinterpret_cast<S *>(dx);
auto z = reinterpret_cast<T *>(dz);
sd::convertKernelGeneric(x, N, z);
}
#define LIBND4J_BOOLS_LOCAL \
(randomName0, 0), \
(randomName1, 1)
BUILD_DOUBLE_TEMPLATE(template void TypeCast::convertGenericCuda, (Nd4jPointer * extras, void *dx, Nd4jLong N, void *dz), LIBND4J_TYPES_EXTENDED, LIBND4J_TYPES_EXTENDED);
BUILD_DOUBLE_TEMPLATE(template void prescanLauncher, (dim3 &blocks, dim3 &threads, int shmem, cudaStream_t *stream, int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex), LIBND4J_BOOLS_LOCAL, LIBND4J_BOOLS_LOCAL);
#undef LIBND4J_BOOLS_LOCAL
} |
d488c5697e31db14e5c079f56e3add56be3d4c8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define GPU_BUFFER1 gpu_buffers[gpu_ind][buffer_ind1]
#define GPU_BUFFER2 gpu_buffers[gpu_ind][buffer_ind2]
#define GPU_BUFFER_OUT gpu_buffers[gpu_ind][out_buffer_ind]
#define BUFFER_SZ1 buffer_sz[gpu_ind][buffer_ind1]
#define BUFFER_SZ2 buffer_sz[gpu_ind][buffer_ind2]
#define OUT_BUFFER_SZ buffer_sz[gpu_ind][out_buffer_ind]
#define DATA_OUT(A, B) data_out[(A)*buffer2_dim2 + (B)]
#define DATA_OUT_IND(A, B) ((A)*buffer2_dim2 + (B))
#define DATA1(A, B) data1[(A)*buffer1_dim2 + (B)]
#define DATA1_IND(A, B) ((A)*buffer1_dim2 + (B))
#define DATA2(A, B) data2[(A)*buffer2_dim2 + (B)]
#define DATA2_IND(A, B) ((A)*buffer2_dim2 + (B))
#define DATA_OUT_SZ (buffer1_dim1*buffer2_dim2*sizeof(DATA_TYPE))
#define DATA_OUT_NUMEL (buffer1_dim1*buffer2_dim2)
__global__ void dot_kernel(float * data1, float * data2, float * data_out, int buffer1_dim1, int buffer1_dim2, int buffer2_dim1,
int buffer2_dim2, int data_out_numel, int increment){
int ind = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
int min_duplicates_per_thread = (int)floor((double)data_out_numel / THREAD_CAPACITY);
int n_additional_duplicates = data_out_numel % THREAD_CAPACITY;
int n_duplicates = min_duplicates_per_thread;
if(ind < n_additional_duplicates) n_duplicates++;
unsigned ind_g, data1_ind, data2_ind;
for(int dup = 0; dup < n_duplicates; dup++){
ind_g = dup*THREAD_CAPACITY + ind;
#ifdef DEBUG
if(ind_g >= data_out_numel) assert(0); // out of bounds
#endif
// we are computing the output data_out[i,j]... determine start indices of data1 & data2 for summation:
data1_ind = buffer1_dim2 * (ind_g / buffer2_dim2); // i = ind_g / buffer2_dim2; data1_ind = DATA1_IND(i,0);
data2_ind = ind_g % buffer2_dim2; // j = ind_g % buffer2_dim2; DATA2_IND(0,j);
if(increment != 1)
data_out[ind_g] = 0;
for(int k = 0; k < buffer1_dim2; k++){
data_out[ind_g] += data1[data1_ind] * data2[data2_ind];
data1_ind ++;
data2_ind += buffer2_dim2;
}
}
}
static PyObject *dot(PyObject *self, PyObject *args){
hipError_t err;
int gpu_ind, buffer_ind1, buffer_ind2, out_buffer_ind, increment;
PyObject *buffer_shape1, *buffer_shape2;
if (!PyArg_ParseTuple(args, "iO!iO!iii", &buffer_ind1, &PyTuple_Type, &buffer_shape1, &buffer_ind2,
&PyTuple_Type, &buffer_shape2, &out_buffer_ind, &increment, &gpu_ind))
return NULL;
if(buffer_ind1 >= N_BUFFERS || buffer_ind1 < 0 ||
out_buffer_ind >= N_BUFFERS || out_buffer_ind < 0 ||
buffer_ind2 >= N_BUFFERS || buffer_ind2 < 0){
printf("buffer index incorrect, set_buffers().\n");
return NULL;
}
if(gpu_ind >= N_GPUS || gpu_ind < 0){
printf("gpu index incorrect, set_buffers().\n");
return NULL;
}
if(increment != 0 && increment != 1){
printf("increment value not set to zero or one\n");
return NULL;
}
// get sizes
long buffer1_dim1 = PyLong_AsLong(PyTuple_GetItem(buffer_shape1,0));
long buffer1_dim2 = PyLong_AsLong(PyTuple_GetItem(buffer_shape1,1));
long buffer2_dim1 = PyLong_AsLong(PyTuple_GetItem(buffer_shape2,0));
long buffer2_dim2 = PyLong_AsLong(PyTuple_GetItem(buffer_shape2,1));
if(buffer1_dim2 != buffer2_dim1){
printf("inner dot product dimensions do not match, (%li, %li), (%li, %li)\n", buffer1_dim1, buffer1_dim2, buffer2_dim1, buffer2_dim2);
return NULL;
}
if(buffer1_dim1*buffer1_dim2*sizeof(DATA_TYPE) != BUFFER_SZ1 || buffer2_dim1*buffer2_dim2*sizeof(DATA_TYPE) != BUFFER_SZ2){
printf("specified input sizes do not equal to stored gpu buffer. dot_cpu()\n");
printf("%li %li %li %li", buffer1_dim1*buffer1_dim2*sizeof(DATA_TYPE), BUFFER_SZ1, buffer2_dim1*buffer2_dim2*sizeof(DATA_TYPE), BUFFER_SZ2);
return NULL;
}
if(OUT_BUFFER_SZ == 0){ // init output buffer
err = hipMalloc((void**) &GPU_BUFFER_OUT, DATA_OUT_SZ); MALLOC_ERR_CHECK
OUT_BUFFER_SZ = DATA_OUT_SZ;
}else if(DATA_OUT_SZ != OUT_BUFFER_SZ){ // does the output size match the buffer size?
printf("output buffer size not allocated to correct size\n");
return NULL;
}
hipSetDevice(gpu_ind); CHECK_CUDA_ERR
// determine number of blocks
int n_blocks = (int)ceil((double)DATA_OUT_NUMEL/MAX_THREADS_PER_BLOCK);
if(n_blocks >= MAX_BLOCKS) n_blocks = MAX_BLOCKS;
// run kernel
hipLaunchKernelGGL(( dot_kernel) , dim3(n_blocks), dim3(MAX_THREADS_PER_BLOCK) , 0, 0, GPU_BUFFER1, GPU_BUFFER2, GPU_BUFFER_OUT, buffer1_dim1, buffer1_dim2,
buffer2_dim1, buffer2_dim2, DATA_OUT_NUMEL, increment);
hipSetDevice(0); CHECK_CUDA_ERR
Py_INCREF(Py_None);
return Py_None;
}
| d488c5697e31db14e5c079f56e3add56be3d4c8e.cu | #define GPU_BUFFER1 gpu_buffers[gpu_ind][buffer_ind1]
#define GPU_BUFFER2 gpu_buffers[gpu_ind][buffer_ind2]
#define GPU_BUFFER_OUT gpu_buffers[gpu_ind][out_buffer_ind]
#define BUFFER_SZ1 buffer_sz[gpu_ind][buffer_ind1]
#define BUFFER_SZ2 buffer_sz[gpu_ind][buffer_ind2]
#define OUT_BUFFER_SZ buffer_sz[gpu_ind][out_buffer_ind]
#define DATA_OUT(A, B) data_out[(A)*buffer2_dim2 + (B)]
#define DATA_OUT_IND(A, B) ((A)*buffer2_dim2 + (B))
#define DATA1(A, B) data1[(A)*buffer1_dim2 + (B)]
#define DATA1_IND(A, B) ((A)*buffer1_dim2 + (B))
#define DATA2(A, B) data2[(A)*buffer2_dim2 + (B)]
#define DATA2_IND(A, B) ((A)*buffer2_dim2 + (B))
#define DATA_OUT_SZ (buffer1_dim1*buffer2_dim2*sizeof(DATA_TYPE))
#define DATA_OUT_NUMEL (buffer1_dim1*buffer2_dim2)
__global__ void dot_kernel(float * data1, float * data2, float * data_out, int buffer1_dim1, int buffer1_dim2, int buffer2_dim1,
int buffer2_dim2, int data_out_numel, int increment){
int ind = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
int min_duplicates_per_thread = (int)floor((double)data_out_numel / THREAD_CAPACITY);
int n_additional_duplicates = data_out_numel % THREAD_CAPACITY;
int n_duplicates = min_duplicates_per_thread;
if(ind < n_additional_duplicates) n_duplicates++;
unsigned ind_g, data1_ind, data2_ind;
for(int dup = 0; dup < n_duplicates; dup++){
ind_g = dup*THREAD_CAPACITY + ind;
#ifdef DEBUG
if(ind_g >= data_out_numel) assert(0); // out of bounds
#endif
// we are computing the output data_out[i,j]... determine start indices of data1 & data2 for summation:
data1_ind = buffer1_dim2 * (ind_g / buffer2_dim2); // i = ind_g / buffer2_dim2; data1_ind = DATA1_IND(i,0);
data2_ind = ind_g % buffer2_dim2; // j = ind_g % buffer2_dim2; DATA2_IND(0,j);
if(increment != 1)
data_out[ind_g] = 0;
for(int k = 0; k < buffer1_dim2; k++){
data_out[ind_g] += data1[data1_ind] * data2[data2_ind];
data1_ind ++;
data2_ind += buffer2_dim2;
}
}
}
static PyObject *dot(PyObject *self, PyObject *args){
cudaError_t err;
int gpu_ind, buffer_ind1, buffer_ind2, out_buffer_ind, increment;
PyObject *buffer_shape1, *buffer_shape2;
if (!PyArg_ParseTuple(args, "iO!iO!iii", &buffer_ind1, &PyTuple_Type, &buffer_shape1, &buffer_ind2,
&PyTuple_Type, &buffer_shape2, &out_buffer_ind, &increment, &gpu_ind))
return NULL;
if(buffer_ind1 >= N_BUFFERS || buffer_ind1 < 0 ||
out_buffer_ind >= N_BUFFERS || out_buffer_ind < 0 ||
buffer_ind2 >= N_BUFFERS || buffer_ind2 < 0){
printf("buffer index incorrect, set_buffers().\n");
return NULL;
}
if(gpu_ind >= N_GPUS || gpu_ind < 0){
printf("gpu index incorrect, set_buffers().\n");
return NULL;
}
if(increment != 0 && increment != 1){
printf("increment value not set to zero or one\n");
return NULL;
}
// get sizes
long buffer1_dim1 = PyLong_AsLong(PyTuple_GetItem(buffer_shape1,0));
long buffer1_dim2 = PyLong_AsLong(PyTuple_GetItem(buffer_shape1,1));
long buffer2_dim1 = PyLong_AsLong(PyTuple_GetItem(buffer_shape2,0));
long buffer2_dim2 = PyLong_AsLong(PyTuple_GetItem(buffer_shape2,1));
if(buffer1_dim2 != buffer2_dim1){
printf("inner dot product dimensions do not match, (%li, %li), (%li, %li)\n", buffer1_dim1, buffer1_dim2, buffer2_dim1, buffer2_dim2);
return NULL;
}
if(buffer1_dim1*buffer1_dim2*sizeof(DATA_TYPE) != BUFFER_SZ1 || buffer2_dim1*buffer2_dim2*sizeof(DATA_TYPE) != BUFFER_SZ2){
printf("specified input sizes do not equal to stored gpu buffer. dot_cpu()\n");
printf("%li %li %li %li", buffer1_dim1*buffer1_dim2*sizeof(DATA_TYPE), BUFFER_SZ1, buffer2_dim1*buffer2_dim2*sizeof(DATA_TYPE), BUFFER_SZ2);
return NULL;
}
if(OUT_BUFFER_SZ == 0){ // init output buffer
err = cudaMalloc((void**) &GPU_BUFFER_OUT, DATA_OUT_SZ); MALLOC_ERR_CHECK
OUT_BUFFER_SZ = DATA_OUT_SZ;
}else if(DATA_OUT_SZ != OUT_BUFFER_SZ){ // does the output size match the buffer size?
printf("output buffer size not allocated to correct size\n");
return NULL;
}
cudaSetDevice(gpu_ind); CHECK_CUDA_ERR
// determine number of blocks
int n_blocks = (int)ceil((double)DATA_OUT_NUMEL/MAX_THREADS_PER_BLOCK);
if(n_blocks >= MAX_BLOCKS) n_blocks = MAX_BLOCKS;
// run kernel
dot_kernel <<< n_blocks, MAX_THREADS_PER_BLOCK >>> (GPU_BUFFER1, GPU_BUFFER2, GPU_BUFFER_OUT, buffer1_dim1, buffer1_dim2,
buffer2_dim1, buffer2_dim2, DATA_OUT_NUMEL, increment);
cudaSetDevice(0); CHECK_CUDA_ERR
Py_INCREF(Py_None);
return Py_None;
}
|
9264702c21c579ea8fc65b6e734684749426d60f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
const float scale = 40.0;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
float LA = - scale *(float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale *(float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(1.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex arig(0.0,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (2.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex bor(-10.0,0.0);
hipComplex ft(14.0,0.0);
hipComplex sev(7.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/
// This is the baby attempt to make a quasiconformal transformation
// I have some reasoning a few days ago I need to append to my math diary
/*for(v=0;v<30;v++)
{
the way of playing the game has a way of changing the rules: James Gleick's Chaos
cue = cue - (moux*(cosc(cue) - ai*moux*sins(cue))/(cosc(cue)+ai*mouy*sins(cue)));
accume = accume *cue;
}
cue = accume;*/
cue = (thess(cue/ft,fixon) + moux*thess(cue*vro/ft,fixon) )*(thess(cue/sev,fixon) + mouy*thess(vro*cue/sev,fixon) + thess(arig,fixon))-( thess(cue/ft,fixon) + moux*thess(vro*cue/ft,fixon)+thess(cue/sev,fixon) + mouy*thess(vro*sev/ft,fixon) + thess(arig,fixon));
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
} | 9264702c21c579ea8fc65b6e734684749426d60f.cu | #include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
const float scale = 40.0;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
float LA = - scale *(float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale *(float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(1.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex arig(0.0,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (2.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex bor(-10.0,0.0);
cuComplex ft(14.0,0.0);
cuComplex sev(7.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/
// This is the baby attempt to make a quasiconformal transformation
// I have some reasoning a few days ago I need to append to my math diary
/*for(v=0;v<30;v++)
{
the way of playing the game has a way of changing the rules: James Gleick's Chaos
cue = cue - (moux*(cosc(cue) - ai*moux*sins(cue))/(cosc(cue)+ai*mouy*sins(cue)));
accume = accume *cue;
}
cue = accume;*/
cue = (thess(cue/ft,fixon) + moux*thess(cue*vro/ft,fixon) )*(thess(cue/sev,fixon) + mouy*thess(vro*cue/sev,fixon) + thess(arig,fixon))-( thess(cue/ft,fixon) + moux*thess(vro*cue/ft,fixon)+thess(cue/sev,fixon) + mouy*thess(vro*sev/ft,fixon) + thess(arig,fixon));
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
} |
c64f35fe9abb8abb9be8d99e38896bb6876d7f55.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "matx.h"
#include "matx/transforms/transpose.h"
#include <cassert>
#include <cstdio>
#include <math.h>
#include <memory>
using namespace matx;
#define FFT_TYPE HIPFFT_C2C
/** Create a spectrogram of a signal
*
* This example creates a set of data representing signal power versus frequency
* and time. Traditionally the signal power is plotted as the Z dimension using
* color, and time/frequency are the X/Y axes. The time taken to run the
* spectrogram is computed, and a simple scatter plot is output. This version
* does uses CUDA graphs, and records the workload on the second iteration of
* the intialization loop. The first iteration is used only for plan caching and
* should not include any graph recording.
*/
int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv)
{
MATX_ENTER_HANDLER();
using complex = cuda::std::complex<float>;
hipGraph_t graph;
hipGraphExec_t instance;
hipStream_t stream;
hipStreamCreate(&stream);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float fs = 10000;
index_t N = 100000;
float amp = static_cast<float>(2 * sqrt(2));
index_t nperseg = 256;
index_t nfft = 256;
index_t noverlap = nperseg / 8;
index_t nstep = nperseg - noverlap;
constexpr uint32_t num_iterations = 20;
float time_ms;
std::array<index_t, 1> num_samps{N};
std::array<index_t, 1> half_win{nfft / 2 + 1};
std::array<index_t, 1> s_time_shape{(N - noverlap) / nstep};
tensor_t<float, 1> time({N});
tensor_t<float, 1> modulation({N});
tensor_t<float, 1> carrier({N});
tensor_t<float, 1> noise({N});
tensor_t<float, 1> x({N});
auto freqs = make_tensor<float>(half_win);
tensor_t<complex, 2> fftStackedMatrix(
{(N - noverlap) / nstep, nfft / 2 + 1});
tensor_t<float, 1> s_time({(N - noverlap) / nstep});
// Set up all static buffers
// time = np.arange(N) / float(fs)
(time = linspace<0>(num_samps, 0.0f, static_cast<float>(N) - 1.0f) / fs)
.run(stream);
// mod = 500 * np.cos(2*np.pi*0.25*time)
(modulation = 500 * cos(2 * M_PI * 0.25 * time)).run(stream);
// carrier = amp * np.sin(2*np.pi*3e3*time + modulation)
(carrier = amp * sin(2 * M_PI * 3000 * time + modulation)).run(stream);
// noise = 0.01 * fs / 2 * np.random.randn(time.shape)
(noise = sqrt(0.01 * fs / 2) * random<float>({N}, NORMAL)).run(stream);
// noise *= np.exp(-time/5)
(noise = noise * exp(-1.0f * time / 5.0f)).run(stream);
// x = carrier + noise
(x = carrier + noise).run(stream);
for (uint32_t i = 0; i < 2; i++) {
// Record graph on second loop to get rid of plan caching in the graph
if (i == 1) {
hipStreamBeginCapture(stream, hipStreamCaptureModeGlobal);
}
// DFT Sample Frequencies (rfftfreq)
(freqs = (1.0 / (static_cast<float>(nfft) * 1 / fs)) *
linspace<0>(half_win, 0.0f, static_cast<float>(nfft) / 2.0f))
.run(stream);
// Create overlapping matrix of segments.
auto stackedMatrix = overlap(x, {nperseg}, {nstep});
// FFT along rows
(fftStackedMatrix = fft(stackedMatrix)).run(stream);
// Absolute value
(fftStackedMatrix = conj(fftStackedMatrix) * fftStackedMatrix)
.run(stream);
// Get real part and transpose
auto Sxx = fftStackedMatrix.RealView().Permute({1, 0});
// Spectral time axis
(s_time = linspace<0>(s_time_shape, static_cast<float>(nperseg) / 2.0f,
static_cast<float>(N - nperseg) / 2.0f + 1) /
fs)
.run(stream);
if (i == 1) {
hipStreamEndCapture(stream, &graph);
hipGraphInstantiate(&instance, graph, NULL, NULL, 0);
#if MATX_ENABLE_VIZ
// Generate a spectrogram visualization using a contour plot
viz::contour(time, freqs, Sxx);
#else
printf("Not outputting plot since visualizations disabled\n");
#endif
}
}
hipStreamSynchronize(0);
// Time graph execution of same kernels
hipEventRecord(start, stream);
for (uint32_t i = 0; i < 10; i++) {
hipGraphLaunch(instance, stream);
}
hipEventRecord(stop, stream);
hipStreamSynchronize(stream);
hipEventElapsedTime(&time_ms, start, stop);
printf("Spectrogram Time With Graphs = %.2fus per iteration\n",
time_ms * 1e3 / num_iterations);
hipEventDestroy(start);
hipEventDestroy(stop);
hipStreamDestroy(stream);
CUDA_CHECK_LAST_ERROR();
MATX_EXIT_HANDLER();
}
| c64f35fe9abb8abb9be8d99e38896bb6876d7f55.cu | ////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "matx.h"
#include "matx/transforms/transpose.h"
#include <cassert>
#include <cstdio>
#include <math.h>
#include <memory>
using namespace matx;
#define FFT_TYPE CUFFT_C2C
/** Create a spectrogram of a signal
*
* This example creates a set of data representing signal power versus frequency
* and time. Traditionally the signal power is plotted as the Z dimension using
* color, and time/frequency are the X/Y axes. The time taken to run the
* spectrogram is computed, and a simple scatter plot is output. This version
* does uses CUDA graphs, and records the workload on the second iteration of
* the intialization loop. The first iteration is used only for plan caching and
* should not include any graph recording.
*/
int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv)
{
MATX_ENTER_HANDLER();
using complex = cuda::std::complex<float>;
cudaGraph_t graph;
cudaGraphExec_t instance;
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float fs = 10000;
index_t N = 100000;
float amp = static_cast<float>(2 * sqrt(2));
index_t nperseg = 256;
index_t nfft = 256;
index_t noverlap = nperseg / 8;
index_t nstep = nperseg - noverlap;
constexpr uint32_t num_iterations = 20;
float time_ms;
std::array<index_t, 1> num_samps{N};
std::array<index_t, 1> half_win{nfft / 2 + 1};
std::array<index_t, 1> s_time_shape{(N - noverlap) / nstep};
tensor_t<float, 1> time({N});
tensor_t<float, 1> modulation({N});
tensor_t<float, 1> carrier({N});
tensor_t<float, 1> noise({N});
tensor_t<float, 1> x({N});
auto freqs = make_tensor<float>(half_win);
tensor_t<complex, 2> fftStackedMatrix(
{(N - noverlap) / nstep, nfft / 2 + 1});
tensor_t<float, 1> s_time({(N - noverlap) / nstep});
// Set up all static buffers
// time = np.arange(N) / float(fs)
(time = linspace<0>(num_samps, 0.0f, static_cast<float>(N) - 1.0f) / fs)
.run(stream);
// mod = 500 * np.cos(2*np.pi*0.25*time)
(modulation = 500 * cos(2 * M_PI * 0.25 * time)).run(stream);
// carrier = amp * np.sin(2*np.pi*3e3*time + modulation)
(carrier = amp * sin(2 * M_PI * 3000 * time + modulation)).run(stream);
// noise = 0.01 * fs / 2 * np.random.randn(time.shape)
(noise = sqrt(0.01 * fs / 2) * random<float>({N}, NORMAL)).run(stream);
// noise *= np.exp(-time/5)
(noise = noise * exp(-1.0f * time / 5.0f)).run(stream);
// x = carrier + noise
(x = carrier + noise).run(stream);
for (uint32_t i = 0; i < 2; i++) {
// Record graph on second loop to get rid of plan caching in the graph
if (i == 1) {
cudaStreamBeginCapture(stream, cudaStreamCaptureModeGlobal);
}
// DFT Sample Frequencies (rfftfreq)
(freqs = (1.0 / (static_cast<float>(nfft) * 1 / fs)) *
linspace<0>(half_win, 0.0f, static_cast<float>(nfft) / 2.0f))
.run(stream);
// Create overlapping matrix of segments.
auto stackedMatrix = overlap(x, {nperseg}, {nstep});
// FFT along rows
(fftStackedMatrix = fft(stackedMatrix)).run(stream);
// Absolute value
(fftStackedMatrix = conj(fftStackedMatrix) * fftStackedMatrix)
.run(stream);
// Get real part and transpose
auto Sxx = fftStackedMatrix.RealView().Permute({1, 0});
// Spectral time axis
(s_time = linspace<0>(s_time_shape, static_cast<float>(nperseg) / 2.0f,
static_cast<float>(N - nperseg) / 2.0f + 1) /
fs)
.run(stream);
if (i == 1) {
cudaStreamEndCapture(stream, &graph);
cudaGraphInstantiate(&instance, graph, NULL, NULL, 0);
#if MATX_ENABLE_VIZ
// Generate a spectrogram visualization using a contour plot
viz::contour(time, freqs, Sxx);
#else
printf("Not outputting plot since visualizations disabled\n");
#endif
}
}
cudaStreamSynchronize(0);
// Time graph execution of same kernels
cudaEventRecord(start, stream);
for (uint32_t i = 0; i < 10; i++) {
cudaGraphLaunch(instance, stream);
}
cudaEventRecord(stop, stream);
cudaStreamSynchronize(stream);
cudaEventElapsedTime(&time_ms, start, stop);
printf("Spectrogram Time With Graphs = %.2fus per iteration\n",
time_ms * 1e3 / num_iterations);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaStreamDestroy(stream);
CUDA_CHECK_LAST_ERROR();
MATX_EXIT_HANDLER();
}
|
b21d7114849cc9064ae17eb839e0b9e21be98686.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include "hip/hip_runtime_api.h"
#include <hipcub/hipcub.hpp>
#include <functional>
#include <stdint.h>
#include <stdio.h>
#include <vector>
#include "kernel.h"
#include "bboxUtils.h"
// CUB's bug workaround:
// To work properly for large batch size CUB segmented sort needs ridiculous
// workspace alignment.
const uintptr_t ALIGNMENT = 1 << 20;
// IOU
template <typename TFloat>
__device__ __host__ inline float IoU(const Bbox<TFloat>& a, const Bbox<TFloat>& b)
{
TFloat left = max(a.xmin, b.xmin), right = min(a.xmax, b.xmax);
TFloat top = max(a.ymin, b.ymin), bottom = min(a.ymax, b.ymax);
TFloat width = max((TFloat)(right - left + (TFloat) 1.0), (TFloat) 0.0);
TFloat height = max((TFloat)(bottom - top + (TFloat) 1.0), (TFloat) 0.0);
TFloat interS = width * height;
TFloat Sa = (a.xmax - a.xmin + (TFloat) 1) * (a.ymax - a.ymin + (TFloat) 1);
TFloat Sb = (b.xmax - b.xmin + (TFloat) 1) * (b.ymax - b.ymin + (TFloat) 1);
return (float) interS / (float) (Sa + Sb - interS);
}
// NMS KERNEL FOR SMALL BATCH SIZE
template <typename T_PROPOSALS, typename T_ROIS, int DIM, int TSIZE>
__global__ __launch_bounds__(DIM) void nmsKernel1(const int propSize,
Bbox<T_PROPOSALS> const* __restrict__ preNmsProposals,
T_ROIS* __restrict__ afterNmsProposals,
const int preNmsTopN,
const float nmsThres,
const int afterNmsTopN)
{
__shared__ bool kept_boxes[TSIZE * DIM];
int kept = 0;
int batch_offset = blockIdx.x * propSize;
int max_box_idx = batch_offset + preNmsTopN;
int batch_offset_out = blockIdx.x * afterNmsTopN;
int flag_idx[TSIZE];
int boxes_idx[TSIZE];
Bbox<T_PROPOSALS> cur_boxes[TSIZE];
// initialize kept_boxes
#pragma unroll
for (int i = 0; i < TSIZE; i++)
{
boxes_idx[i] = threadIdx.x + batch_offset + DIM * i;
flag_idx[i] = threadIdx.x + DIM * i;
if (boxes_idx[i] < max_box_idx)
{
cur_boxes[i] = preNmsProposals[boxes_idx[i]];
kept_boxes[flag_idx[i]] = true;
}
else
{
kept_boxes[flag_idx[i]] = false;
boxes_idx[i] = -1.0f;
flag_idx[i] = -1.0f;
}
}
int ref_box_idx = 0 + batch_offset;
// remove the overlapped boxes
while ((kept < afterNmsTopN) && (ref_box_idx < max_box_idx))
{
Bbox<T_PROPOSALS> ref_box;
ref_box = preNmsProposals[ref_box_idx];
#pragma unroll
for (int i = 0; i < TSIZE; i++)
{
if (boxes_idx[i] > ref_box_idx)
{
if (IoU(ref_box, cur_boxes[i]) > nmsThres)
{
kept_boxes[flag_idx[i]] = false;
}
}
else if (boxes_idx[i] == ref_box_idx)
{
afterNmsProposals[(batch_offset_out + kept) * 4 + 0] = ref_box.xmin;
afterNmsProposals[(batch_offset_out + kept) * 4 + 1] = ref_box.ymin;
afterNmsProposals[(batch_offset_out + kept) * 4 + 2] = ref_box.xmax;
afterNmsProposals[(batch_offset_out + kept) * 4 + 3] = ref_box.ymax;
}
}
__syncthreads();
do
{
ref_box_idx++;
} while (!kept_boxes[ref_box_idx - batch_offset] && ref_box_idx < max_box_idx);
kept++;
}
}
// NMS KERNEL FOR LARGE BATCH SIZE
template <typename T_PROPOSALS, typename T_ROIS, int DIM, int TSIZE>
__global__ __launch_bounds__(DIM) void nmsKernel2(const int propSize,
Bbox<T_PROPOSALS> const* __restrict__ proposals,
T_ROIS* __restrict__ filtered,
const int preNmsTopN,
const float nmsThres,
const int afterNmsTopN)
{
Bbox<T_PROPOSALS> const* cProposals = proposals + blockIdx.x * propSize;
Bbox<T_PROPOSALS> t[TSIZE];
uint64_t del = 0;
for (int i = 0; i < TSIZE; i++)
{
if (i < TSIZE - 1 || i * DIM + threadIdx.x < preNmsTopN)
{
t[i] = cProposals[i * DIM + threadIdx.x];
}
}
__shared__ Bbox<T_PROPOSALS> last;
__shared__ bool kept;
__shared__ int foundBatch;
if (threadIdx.x == 0)
foundBatch = 0;
for (int i = 0; i < TSIZE; i++)
{
for (int j = 0; j < DIM; j++)
{
int offset = i * DIM;
int index = offset + j;
if (index >= preNmsTopN)
break;
__syncthreads();
if (threadIdx.x == j)
{
kept = 0 == (del & ((uint64_t) 1 << i));
last = t[i];
if (kept)
{
int cnt = blockIdx.x * afterNmsTopN + foundBatch;
filtered[cnt * 4 + 0] = t[i].xmin;
filtered[cnt * 4 + 1] = t[i].ymin;
filtered[cnt * 4 + 2] = t[i].xmax;
filtered[cnt * 4 + 3] = t[i].ymax;
foundBatch++;
}
}
__syncthreads();
if (foundBatch == afterNmsTopN)
{
return;
}
if (kept)
{
Bbox<T_PROPOSALS> test = last;
for (int k = 0; k < TSIZE; k++)
{
if (index < k * DIM + threadIdx.x
&& IoU<T_PROPOSALS>(test, t[k]) > nmsThres)
{
del |= (uint64_t) 1 << k;
}
}
}
}
}
}
// NMS LAUNCH
template <typename T_PROPOSALS, DLayout_t L_PROPOSALS, typename T_ROIS>
pluginStatus_t nmsLaunch(hipStream_t stream,
const int batch,
const int propSize,
void* proposals,
void* filtered,
const int preNmsTopN,
const float nmsThres,
const int afterNmsTopN)
{
const int blockSize = 1024;
#define P1(tsize) nmsKernel1<T_PROPOSALS, T_ROIS, blockSize, (tsize)>
#define P2(tsize) nmsKernel2<T_PROPOSALS, T_ROIS, blockSize, (tsize)>
void (*kernel[64])(int, Bbox<T_PROPOSALS> const*, T_ROIS*, int, float, int) = {
P1(1), P1(2), P1(3), P1(4), P1(5), P1(6), P1(7), P1(8), P1(9), P1(10), P1(11), P1(12), P2(13), P2(14), P2(15), P2(16),
P2(17), P2(18), P2(19), P2(20), P2(21), P2(22), P2(23), P2(24), P2(25), P2(26), P2(27), P2(28), P2(29), P2(30), P2(31), P2(32),
P2(33), P2(34), P2(35), P2(36), P2(37), P2(38), P2(39), P2(40), P2(41), P2(42), P2(43), P2(44), P2(45), P2(46), P2(47), P2(48),
P2(49), P2(50), P2(51), P2(52), P2(53), P2(54), P2(55), P2(56), P2(57), P2(58), P2(59), P2(60), P2(61), P2(62), P2(63), P2(64)};
ASSERT_PARAM(preNmsTopN < 64 * blockSize);
CSC(hipMemsetAsync(filtered, 0, batch * afterNmsTopN * 4 * sizeof(T_ROIS), stream), STATUS_FAILURE);
kernel[(preNmsTopN + blockSize - 1) / blockSize -hipLaunchKernelGGL(( 1)], dim3(batch), dim3(blockSize), 0, stream, propSize,
(Bbox<T_PROPOSALS>*) proposals,
(T_ROIS*) filtered,
preNmsTopN,
nmsThres,
afterNmsTopN);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// SET OFFSET
// Works for up to 2Gi elements (cub's limitation)!
__global__ void setOffset(int stride, int size, int* output)
{
// One block, because batch size shouldn't be too large.
for (int i = threadIdx.x; i < size; i += blockDim.x)
{
output[i] = i * stride;
}
}
// NMS GPU
template <typename T_SCORES, typename T_ROIS>
pluginStatus_t nmsGpu(hipStream_t stream,
const int N,
const int R,
const int preNmsTop,
const int nmsMaxOut,
const float iouThreshold,
//const float minBoxSize,
//const float * imInfo,
void* fgScores,
const void* proposals,
void* workspace,
void* rois)
{
int8_t* vworkspace = alignPtr((int8_t*) workspace, ALIGNMENT);
DEBUG_PRINTF("&&&& [NMS] PROPOSALS %u\n", hash(proposals, N * R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(fgScores, N * R * sizeof(float)));
pluginStatus_t error;
DEBUG_PRINTF("&&&& [NMS] DISCARD\n");
DEBUG_PRINTF("&&&& [NMS] PROPOSALS %u\n", hash(proposals, N * R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(fgScores, N * R * sizeof(float)));
// Generate offsets
int* offsets = (int*) vworkspace;
hipLaunchKernelGGL(( setOffset), dim3(1), dim3(1024), 0, stream, R, N + 1, offsets);
CSC(hipGetLastError(), STATUS_FAILURE);
vworkspace = vworkspace + N + 1;
vworkspace = alignPtr(vworkspace, ALIGNMENT);
// Sort (batched)
std::size_t tempStorageBytes = 0;
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
NULL, tempStorageBytes,
(T_SCORES*) fgScores, (T_SCORES*) fgScores,
(Bbox<T_ROIS>*) proposals, (Bbox<T_ROIS>*) proposals,
N * R, N,
offsets, offsets + 1, 0, 8 * sizeof(T_SCORES), stream);
CSC(hipGetLastError(), STATUS_FAILURE);
T_SCORES* scoresOut = (T_SCORES*) vworkspace;
vworkspace = (int8_t*) (scoresOut + N * R);
vworkspace = alignPtr(vworkspace, ALIGNMENT);
Bbox<T_ROIS>* proposalsOut = (Bbox<T_ROIS>*) vworkspace;
vworkspace = (int8_t*) (proposalsOut + N * R);
vworkspace = alignPtr(vworkspace, ALIGNMENT);
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
vworkspace, tempStorageBytes,
(T_SCORES*) fgScores, (T_SCORES*) scoresOut,
(Bbox<T_ROIS>*) proposals, (Bbox<T_ROIS>*) proposalsOut,
N * R, N,
offsets, offsets + 1,
0, 8 * sizeof(T_SCORES), stream);
CSC(hipGetLastError(), STATUS_FAILURE);
DEBUG_PRINTF("&&&& [NMS] POST CUB\n");
DEBUG_PRINTF("&&&& [NMS] PROPOSALS %u\n", hash(proposalsOut, N * R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(scoresOut, N * R * sizeof(float)));
error = nmsLaunch<T_ROIS, NC4HW, T_ROIS>(stream,
N,
R,
proposalsOut,
rois,
preNmsTop,
iouThreshold,
nmsMaxOut);
DEBUG_PRINTF("&&&& [NMS] POST LAUNCH\n");
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(rois, N * nmsMaxOut * 4 * sizeof(float)));
if (error != STATUS_SUCCESS)
{
return error;
}
return STATUS_SUCCESS;
}
// NMS LAUNCH CONFIG
typedef pluginStatus_t (*nmsFun)(hipStream_t,
const int, // N
const int, // R
const int, // preNmsTop
const int, // nmsMaxOut
const float, // iouThreshold
//const float, // minBoxSize
//const float *, // imInfo
void*, // fgScores
const void*, // proposals,
void*, // workspace,
void*); // rois
struct nmsLaunchConfig
{
DataType t_fgScores;
DLayout_t l_fgScores;
DataType t_proposals;
DLayout_t l_proposals;
DataType t_rois;
nmsFun function;
nmsLaunchConfig(DataType t_fgScores,
DLayout_t l_fgScores,
DataType t_proposals,
DLayout_t l_proposals,
DataType t_rois,
nmsFun function)
: t_fgScores(t_fgScores)
, l_fgScores(l_fgScores)
, t_proposals(t_proposals)
, l_proposals(l_proposals)
, t_rois(t_rois)
, function(function)
{
}
nmsLaunchConfig(DataType t_fgScores,
DLayout_t l_fgScores,
DataType t_proposals,
DLayout_t l_proposals,
DataType t_rois)
: t_fgScores(t_fgScores)
, l_fgScores(l_fgScores)
, t_proposals(t_proposals)
, l_proposals(l_proposals)
, t_rois(t_rois)
{
}
bool operator==(const nmsLaunchConfig& other)
{
return (t_fgScores == other.t_fgScores) && (l_fgScores == other.l_fgScores) && (t_proposals == other.t_proposals) && (l_proposals == other.l_proposals) && (t_rois == other.t_rois);
}
};
static std::vector<nmsLaunchConfig> nmsLCVec;
#define FLOAT32 nvinfer1::DataType::kFLOAT
bool initNmsLC()
{
nmsLCVec.reserve(1);
nmsLCVec.push_back(nmsLaunchConfig(FLOAT32, NCHW,
FLOAT32, NC4HW,
FLOAT32,
nmsGpu<float, float>));
return true;
}
static bool initializedNmsLC = initNmsLC();
// NMS
pluginStatus_t nms(hipStream_t stream,
const int N,
const int R,
const int preNmsTop,
const int nmsMaxOut,
const float iouThreshold,
const DataType t_fgScores,
const DLayout_t l_fgScores,
void* fgScores,
const DataType t_proposals,
const DLayout_t l_proposals,
const void* proposals,
void* workspace,
const DataType t_rois,
void* rois)
{
if (!initializedNmsLC)
return STATUS_NOT_INITIALIZED;
nmsLaunchConfig lc(t_fgScores, l_fgScores, t_proposals, l_proposals, t_rois);
for (unsigned i = 0; i < nmsLCVec.size(); i++)
{
if (nmsLCVec[i] == lc)
{
DEBUG_PRINTF("NMS KERNEL %d\n", i);
return nmsLCVec[i].function(stream,
N, R,
preNmsTop,
nmsMaxOut,
iouThreshold,
fgScores,
proposals,
workspace,
rois);
}
}
return STATUS_BAD_PARAM;
}
| b21d7114849cc9064ae17eb839e0b9e21be98686.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include "cuda_runtime_api.h"
#include <cub/cub.cuh>
#include <functional>
#include <stdint.h>
#include <stdio.h>
#include <vector>
#include "kernel.h"
#include "bboxUtils.h"
// CUB's bug workaround:
// To work properly for large batch size CUB segmented sort needs ridiculous
// workspace alignment.
const uintptr_t ALIGNMENT = 1 << 20;
// IOU
template <typename TFloat>
__device__ __host__ inline float IoU(const Bbox<TFloat>& a, const Bbox<TFloat>& b)
{
TFloat left = max(a.xmin, b.xmin), right = min(a.xmax, b.xmax);
TFloat top = max(a.ymin, b.ymin), bottom = min(a.ymax, b.ymax);
TFloat width = max((TFloat)(right - left + (TFloat) 1.0), (TFloat) 0.0);
TFloat height = max((TFloat)(bottom - top + (TFloat) 1.0), (TFloat) 0.0);
TFloat interS = width * height;
TFloat Sa = (a.xmax - a.xmin + (TFloat) 1) * (a.ymax - a.ymin + (TFloat) 1);
TFloat Sb = (b.xmax - b.xmin + (TFloat) 1) * (b.ymax - b.ymin + (TFloat) 1);
return (float) interS / (float) (Sa + Sb - interS);
}
// NMS KERNEL FOR SMALL BATCH SIZE
template <typename T_PROPOSALS, typename T_ROIS, int DIM, int TSIZE>
__global__ __launch_bounds__(DIM) void nmsKernel1(const int propSize,
Bbox<T_PROPOSALS> const* __restrict__ preNmsProposals,
T_ROIS* __restrict__ afterNmsProposals,
const int preNmsTopN,
const float nmsThres,
const int afterNmsTopN)
{
__shared__ bool kept_boxes[TSIZE * DIM];
int kept = 0;
int batch_offset = blockIdx.x * propSize;
int max_box_idx = batch_offset + preNmsTopN;
int batch_offset_out = blockIdx.x * afterNmsTopN;
int flag_idx[TSIZE];
int boxes_idx[TSIZE];
Bbox<T_PROPOSALS> cur_boxes[TSIZE];
// initialize kept_boxes
#pragma unroll
for (int i = 0; i < TSIZE; i++)
{
boxes_idx[i] = threadIdx.x + batch_offset + DIM * i;
flag_idx[i] = threadIdx.x + DIM * i;
if (boxes_idx[i] < max_box_idx)
{
cur_boxes[i] = preNmsProposals[boxes_idx[i]];
kept_boxes[flag_idx[i]] = true;
}
else
{
kept_boxes[flag_idx[i]] = false;
boxes_idx[i] = -1.0f;
flag_idx[i] = -1.0f;
}
}
int ref_box_idx = 0 + batch_offset;
// remove the overlapped boxes
while ((kept < afterNmsTopN) && (ref_box_idx < max_box_idx))
{
Bbox<T_PROPOSALS> ref_box;
ref_box = preNmsProposals[ref_box_idx];
#pragma unroll
for (int i = 0; i < TSIZE; i++)
{
if (boxes_idx[i] > ref_box_idx)
{
if (IoU(ref_box, cur_boxes[i]) > nmsThres)
{
kept_boxes[flag_idx[i]] = false;
}
}
else if (boxes_idx[i] == ref_box_idx)
{
afterNmsProposals[(batch_offset_out + kept) * 4 + 0] = ref_box.xmin;
afterNmsProposals[(batch_offset_out + kept) * 4 + 1] = ref_box.ymin;
afterNmsProposals[(batch_offset_out + kept) * 4 + 2] = ref_box.xmax;
afterNmsProposals[(batch_offset_out + kept) * 4 + 3] = ref_box.ymax;
}
}
__syncthreads();
do
{
ref_box_idx++;
} while (!kept_boxes[ref_box_idx - batch_offset] && ref_box_idx < max_box_idx);
kept++;
}
}
// NMS KERNEL FOR LARGE BATCH SIZE
template <typename T_PROPOSALS, typename T_ROIS, int DIM, int TSIZE>
__global__ __launch_bounds__(DIM) void nmsKernel2(const int propSize,
Bbox<T_PROPOSALS> const* __restrict__ proposals,
T_ROIS* __restrict__ filtered,
const int preNmsTopN,
const float nmsThres,
const int afterNmsTopN)
{
Bbox<T_PROPOSALS> const* cProposals = proposals + blockIdx.x * propSize;
Bbox<T_PROPOSALS> t[TSIZE];
uint64_t del = 0;
for (int i = 0; i < TSIZE; i++)
{
if (i < TSIZE - 1 || i * DIM + threadIdx.x < preNmsTopN)
{
t[i] = cProposals[i * DIM + threadIdx.x];
}
}
__shared__ Bbox<T_PROPOSALS> last;
__shared__ bool kept;
__shared__ int foundBatch;
if (threadIdx.x == 0)
foundBatch = 0;
for (int i = 0; i < TSIZE; i++)
{
for (int j = 0; j < DIM; j++)
{
int offset = i * DIM;
int index = offset + j;
if (index >= preNmsTopN)
break;
__syncthreads();
if (threadIdx.x == j)
{
kept = 0 == (del & ((uint64_t) 1 << i));
last = t[i];
if (kept)
{
int cnt = blockIdx.x * afterNmsTopN + foundBatch;
filtered[cnt * 4 + 0] = t[i].xmin;
filtered[cnt * 4 + 1] = t[i].ymin;
filtered[cnt * 4 + 2] = t[i].xmax;
filtered[cnt * 4 + 3] = t[i].ymax;
foundBatch++;
}
}
__syncthreads();
if (foundBatch == afterNmsTopN)
{
return;
}
if (kept)
{
Bbox<T_PROPOSALS> test = last;
for (int k = 0; k < TSIZE; k++)
{
if (index < k * DIM + threadIdx.x
&& IoU<T_PROPOSALS>(test, t[k]) > nmsThres)
{
del |= (uint64_t) 1 << k;
}
}
}
}
}
}
// NMS LAUNCH
template <typename T_PROPOSALS, DLayout_t L_PROPOSALS, typename T_ROIS>
pluginStatus_t nmsLaunch(cudaStream_t stream,
const int batch,
const int propSize,
void* proposals,
void* filtered,
const int preNmsTopN,
const float nmsThres,
const int afterNmsTopN)
{
const int blockSize = 1024;
#define P1(tsize) nmsKernel1<T_PROPOSALS, T_ROIS, blockSize, (tsize)>
#define P2(tsize) nmsKernel2<T_PROPOSALS, T_ROIS, blockSize, (tsize)>
void (*kernel[64])(int, Bbox<T_PROPOSALS> const*, T_ROIS*, int, float, int) = {
P1(1), P1(2), P1(3), P1(4), P1(5), P1(6), P1(7), P1(8), P1(9), P1(10), P1(11), P1(12), P2(13), P2(14), P2(15), P2(16),
P2(17), P2(18), P2(19), P2(20), P2(21), P2(22), P2(23), P2(24), P2(25), P2(26), P2(27), P2(28), P2(29), P2(30), P2(31), P2(32),
P2(33), P2(34), P2(35), P2(36), P2(37), P2(38), P2(39), P2(40), P2(41), P2(42), P2(43), P2(44), P2(45), P2(46), P2(47), P2(48),
P2(49), P2(50), P2(51), P2(52), P2(53), P2(54), P2(55), P2(56), P2(57), P2(58), P2(59), P2(60), P2(61), P2(62), P2(63), P2(64)};
ASSERT_PARAM(preNmsTopN < 64 * blockSize);
CSC(cudaMemsetAsync(filtered, 0, batch * afterNmsTopN * 4 * sizeof(T_ROIS), stream), STATUS_FAILURE);
kernel[(preNmsTopN + blockSize - 1) / blockSize - 1]<<<batch, blockSize, 0, stream>>>(propSize,
(Bbox<T_PROPOSALS>*) proposals,
(T_ROIS*) filtered,
preNmsTopN,
nmsThres,
afterNmsTopN);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// SET OFFSET
// Works for up to 2Gi elements (cub's limitation)!
__global__ void setOffset(int stride, int size, int* output)
{
// One block, because batch size shouldn't be too large.
for (int i = threadIdx.x; i < size; i += blockDim.x)
{
output[i] = i * stride;
}
}
// NMS GPU
template <typename T_SCORES, typename T_ROIS>
pluginStatus_t nmsGpu(cudaStream_t stream,
const int N,
const int R,
const int preNmsTop,
const int nmsMaxOut,
const float iouThreshold,
//const float minBoxSize,
//const float * imInfo,
void* fgScores,
const void* proposals,
void* workspace,
void* rois)
{
int8_t* vworkspace = alignPtr((int8_t*) workspace, ALIGNMENT);
DEBUG_PRINTF("&&&& [NMS] PROPOSALS %u\n", hash(proposals, N * R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(fgScores, N * R * sizeof(float)));
pluginStatus_t error;
DEBUG_PRINTF("&&&& [NMS] DISCARD\n");
DEBUG_PRINTF("&&&& [NMS] PROPOSALS %u\n", hash(proposals, N * R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(fgScores, N * R * sizeof(float)));
// Generate offsets
int* offsets = (int*) vworkspace;
setOffset<<<1, 1024, 0, stream>>>(R, N + 1, offsets);
CSC(cudaGetLastError(), STATUS_FAILURE);
vworkspace = vworkspace + N + 1;
vworkspace = alignPtr(vworkspace, ALIGNMENT);
// Sort (batched)
std::size_t tempStorageBytes = 0;
cub::DeviceSegmentedRadixSort::SortPairsDescending(
NULL, tempStorageBytes,
(T_SCORES*) fgScores, (T_SCORES*) fgScores,
(Bbox<T_ROIS>*) proposals, (Bbox<T_ROIS>*) proposals,
N * R, N,
offsets, offsets + 1, 0, 8 * sizeof(T_SCORES), stream);
CSC(cudaGetLastError(), STATUS_FAILURE);
T_SCORES* scoresOut = (T_SCORES*) vworkspace;
vworkspace = (int8_t*) (scoresOut + N * R);
vworkspace = alignPtr(vworkspace, ALIGNMENT);
Bbox<T_ROIS>* proposalsOut = (Bbox<T_ROIS>*) vworkspace;
vworkspace = (int8_t*) (proposalsOut + N * R);
vworkspace = alignPtr(vworkspace, ALIGNMENT);
cub::DeviceSegmentedRadixSort::SortPairsDescending(
vworkspace, tempStorageBytes,
(T_SCORES*) fgScores, (T_SCORES*) scoresOut,
(Bbox<T_ROIS>*) proposals, (Bbox<T_ROIS>*) proposalsOut,
N * R, N,
offsets, offsets + 1,
0, 8 * sizeof(T_SCORES), stream);
CSC(cudaGetLastError(), STATUS_FAILURE);
DEBUG_PRINTF("&&&& [NMS] POST CUB\n");
DEBUG_PRINTF("&&&& [NMS] PROPOSALS %u\n", hash(proposalsOut, N * R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(scoresOut, N * R * sizeof(float)));
error = nmsLaunch<T_ROIS, NC4HW, T_ROIS>(stream,
N,
R,
proposalsOut,
rois,
preNmsTop,
iouThreshold,
nmsMaxOut);
DEBUG_PRINTF("&&&& [NMS] POST LAUNCH\n");
DEBUG_PRINTF("&&&& [NMS] SCORES %u\n", hash(rois, N * nmsMaxOut * 4 * sizeof(float)));
if (error != STATUS_SUCCESS)
{
return error;
}
return STATUS_SUCCESS;
}
// NMS LAUNCH CONFIG
typedef pluginStatus_t (*nmsFun)(cudaStream_t,
const int, // N
const int, // R
const int, // preNmsTop
const int, // nmsMaxOut
const float, // iouThreshold
//const float, // minBoxSize
//const float *, // imInfo
void*, // fgScores
const void*, // proposals,
void*, // workspace,
void*); // rois
struct nmsLaunchConfig
{
DataType t_fgScores;
DLayout_t l_fgScores;
DataType t_proposals;
DLayout_t l_proposals;
DataType t_rois;
nmsFun function;
nmsLaunchConfig(DataType t_fgScores,
DLayout_t l_fgScores,
DataType t_proposals,
DLayout_t l_proposals,
DataType t_rois,
nmsFun function)
: t_fgScores(t_fgScores)
, l_fgScores(l_fgScores)
, t_proposals(t_proposals)
, l_proposals(l_proposals)
, t_rois(t_rois)
, function(function)
{
}
nmsLaunchConfig(DataType t_fgScores,
DLayout_t l_fgScores,
DataType t_proposals,
DLayout_t l_proposals,
DataType t_rois)
: t_fgScores(t_fgScores)
, l_fgScores(l_fgScores)
, t_proposals(t_proposals)
, l_proposals(l_proposals)
, t_rois(t_rois)
{
}
bool operator==(const nmsLaunchConfig& other)
{
return (t_fgScores == other.t_fgScores) && (l_fgScores == other.l_fgScores) && (t_proposals == other.t_proposals) && (l_proposals == other.l_proposals) && (t_rois == other.t_rois);
}
};
static std::vector<nmsLaunchConfig> nmsLCVec;
#define FLOAT32 nvinfer1::DataType::kFLOAT
bool initNmsLC()
{
nmsLCVec.reserve(1);
nmsLCVec.push_back(nmsLaunchConfig(FLOAT32, NCHW,
FLOAT32, NC4HW,
FLOAT32,
nmsGpu<float, float>));
return true;
}
static bool initializedNmsLC = initNmsLC();
// NMS
pluginStatus_t nms(cudaStream_t stream,
const int N,
const int R,
const int preNmsTop,
const int nmsMaxOut,
const float iouThreshold,
const DataType t_fgScores,
const DLayout_t l_fgScores,
void* fgScores,
const DataType t_proposals,
const DLayout_t l_proposals,
const void* proposals,
void* workspace,
const DataType t_rois,
void* rois)
{
if (!initializedNmsLC)
return STATUS_NOT_INITIALIZED;
nmsLaunchConfig lc(t_fgScores, l_fgScores, t_proposals, l_proposals, t_rois);
for (unsigned i = 0; i < nmsLCVec.size(); i++)
{
if (nmsLCVec[i] == lc)
{
DEBUG_PRINTF("NMS KERNEL %d\n", i);
return nmsLCVec[i].function(stream,
N, R,
preNmsTop,
nmsMaxOut,
iouThreshold,
fgScores,
proposals,
workspace,
rois);
}
}
return STATUS_BAD_PARAM;
}
|
4c8af5d8f41c2b3eaea85d71dd2e069d6e6203c5.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip/ForeachFunctors.cuh>
namespace at { namespace native {
template <typename scalar_t, template<class> class Op> std::vector<Tensor> foreach_unary_op(TensorList tensors) {
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
vec_res.reserve(tensors.size());
for (const auto& t: tensors) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(tensors.vec());
tensor_lists.emplace_back(std::move(vec_res));
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<2>(tensor_lists,
UnaryOpFunctor<scalar_t,
/* depth */ 2,
/* r_args_depth */ 1,
/* res_arg_index */ 1>(),
Op<opmath_t>());
return tensor_lists[1];
}
template <typename scalar_t, template<class> class Op> void foreach_unary_op_(TensorList tensors) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<1>(tensor_lists,
UnaryOpFunctor<scalar_t,
/* depth */ 1,
/* r_args_depth */ 1,
/* res_arg_index */ 0>(),
Op<opmath_t>());
}
template <template<class> class Op>
std::vector<Tensor> floating_complex_half(TensorList tensors) {
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
return foreach_unary_op<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
void floating_complex_half_(TensorList tensors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
std::vector<Tensor> all_types_complex_bfloat16_half_bool(TensorList tensors) {
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
return foreach_unary_op<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
void all_types_complex_bfloat16_half_bool_(TensorList tensors) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
std::vector<Tensor> floating_complex_half_bfloat16(TensorList tensors) {
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
return foreach_unary_op<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
void floating_complex_half_bfloat16_(TensorList tensors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
std::vector<Tensor> all_types_half_complex_bfloat16(TensorList tensors) {
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(ScalarType::Half, at::ScalarType::BFloat16, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
return foreach_unary_op<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
void all_types_half_complex_bfloat16_(TensorList tensors) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(ScalarType::Half, at::ScalarType::BFloat16, tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
std::vector<Tensor> floating_half(TensorList tensors) {
return AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
return foreach_unary_op<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
void floating_half_(TensorList tensors) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
std::vector<Tensor> floating_half_bfloat16(TensorList tensors) {
return AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
return foreach_unary_op<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
void floating_half_bfloat16_(TensorList tensors) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
// makes the functor
#define STD_FUNCTOR(op_name, functor_name) \
template<typename T> \
struct functor_name { \
__device__ T operator()(T t) const { return std::op_name(t); } \
}; \
// given a functor and a "dispatch function", creates the outplace and inplace operations
#define OP_CUSTOM_FUNCTOR(function, op_name, functor_name) \
std::vector<Tensor> foreach_tensor_##op_name##_cuda(TensorList tensors) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors) || has_integral_tensor(tensors, /* includeBool */ true)) { \
return at::native::foreach_tensor_##op_name##_slow(tensors); \
} \
return function<functor_name>(tensors); \
} \
void foreach_tensor_##op_name##_cuda_(TensorList tensors) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors) || has_integral_tensor(tensors, /* includeBool */ true)) { \
return at::native::foreach_tensor_##op_name##_slow_(tensors); \
} \
\
function##_<functor_name>(tensors); \
}
// creates a functor, outplace version, and inplace version.
#define OP(function, op_name, functor_name) \
STD_FUNCTOR(op_name, functor_name); \
OP_CUSTOM_FUNCTOR(function, op_name, functor_name); \
OP(floating_half_bfloat16, erfc, Erfc);
OP(floating_half_bfloat16, expm1, Expm1);
OP(floating_half, lgamma, Lgamma);
OP(floating_half_bfloat16, trunc, Truncf);
OP(floating_half_bfloat16, floor, Floor);
OP(floating_half_bfloat16, ceil, Ceil);
OP(floating_complex_half_bfloat16, acos, Acos);
OP(floating_complex_half_bfloat16, asin, Asin);
OP(floating_complex_half_bfloat16, atan, Atan);
OP(floating_complex_half_bfloat16, cosh, Cosh);
OP(floating_complex_half_bfloat16, tan, Tan);
OP(floating_complex_half_bfloat16, sin, Sin);
OP(floating_complex_half_bfloat16, sinh, Sinh);
OP(floating_complex_half_bfloat16, exp, Exp);
OP(floating_complex_half_bfloat16, tanh, Tanh);
OP(floating_complex_half_bfloat16, log, Log);
OP(floating_complex_half_bfloat16, log10, Log10);
OP(floating_complex_half_bfloat16, log2, Log2);
OP(floating_complex_half_bfloat16, cos, Cos);
OP(floating_complex_half_bfloat16, sqrt, Sqrt);
OP(floating_half_bfloat16, log1p, Log1p);
OP(floating_half_bfloat16, erf, Erf);
//
// Special cases
// These functions must be special cased as they can't be written as std::functor_name in OP macro
//
template<typename T>
struct Sigmoid {
T one = T(1);
__device__ T operator()(T t) const { return (one / (one + ::exp(-t))); }
};
template<typename T>
struct Round {
__device__ T operator()(T t) const { return std::nearbyint(t); }
};
template<typename T>
struct Trunc {
__device__ T operator()(T t) const { return t - std::trunc(t); }
};
template<typename T>
struct Reciprocal {
T one = T(1);
__device__ T operator()(T t) const { return (one / t); }
};
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, sigmoid, Sigmoid)
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, round, Round)
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, frac, Trunc)
OP_CUSTOM_FUNCTOR(floating_complex_half_bfloat16, reciprocal, Reciprocal)
// note(mkozuki): tensor dtype checks of `neg` kernels.
// Since `check_foreach_api_restrictions` don't require all the tensors to have the same dtype,
// I think it safer to check every single tensor's dtype inside negation kernels.
std::vector<Tensor> foreach_tensor_neg_cuda(TensorList tensors) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors)) {
return at::native::foreach_tensor_neg_slow(tensors);
}
TORCH_CHECK(tensors[0].scalar_type() != kBool,
"Negation, the `-` operator, on a bool tensor is not supported. "
"If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
return all_types_half_complex_bfloat16<std::negate>(tensors);
}
void foreach_tensor_neg_cuda_(TensorList tensors) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors)) {
return at::native::foreach_tensor_neg_slow_(tensors);
}
TORCH_CHECK(tensors[0].scalar_type() != kBool,
"Negation, the `-` operator, on a bool tensor is not supported. "
"If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
all_types_half_complex_bfloat16_<std::negate>(tensors);
}
// Abs have to go via slow path in case of a complex type.
// This is because foreach kernels can't return a different dtype than passed, while
// abs with complex inputs will produce float output.
template<typename T>
struct Abs {
__device__ T operator()(T t) const { return std::abs(t); }
};
std::vector<Tensor> foreach_tensor_abs_cuda(TensorList tensors) {
check_foreach_api_restrictions(tensors);
const bool has_complex = std::any_of(
tensors.begin(), tensors.end(),
[](const auto & t) { return at::isComplexType(t.scalar_type()); });
if (!can_use_fast_route(tensors) || has_complex) {
return at::native::foreach_tensor_abs_slow(tensors);
}
return all_types_complex_bfloat16_half_bool<Abs>(tensors);
}
void foreach_tensor_abs_cuda_(TensorList tensors) {
check_foreach_api_restrictions(tensors);
const bool has_complex = std::any_of(
tensors.begin(), tensors.end(),
[](const auto & t) { return at::isComplexType(t.scalar_type()); });
if (!can_use_fast_route(tensors) || has_complex) {
return at::native::foreach_tensor_abs_slow_(tensors);
}
all_types_complex_bfloat16_half_bool_<Abs>(tensors);
}
void foreach_tensor_zero_cuda_(TensorList tensors) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors)) {
return at::native::foreach_tensor_zero_slow_(tensors);
}
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_zero_cuda_", [&]() {
multi_tensor_apply<1>(tensor_lists,
ZeroFunctor<scalar_t,
/* depth */ 1,
/* r_args_depth */ 1,
/* res_arg_index */ 0>());
});
}
}} // namespace at::native
| 4c8af5d8f41c2b3eaea85d71dd2e069d6e6203c5.cu | #include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/ForeachFunctors.cuh>
namespace at { namespace native {
template <typename scalar_t, template<class> class Op> std::vector<Tensor> foreach_unary_op(TensorList tensors) {
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
vec_res.reserve(tensors.size());
for (const auto& t: tensors) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(tensors.vec());
tensor_lists.emplace_back(std::move(vec_res));
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<2>(tensor_lists,
UnaryOpFunctor<scalar_t,
/* depth */ 2,
/* r_args_depth */ 1,
/* res_arg_index */ 1>(),
Op<opmath_t>());
return tensor_lists[1];
}
template <typename scalar_t, template<class> class Op> void foreach_unary_op_(TensorList tensors) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<1>(tensor_lists,
UnaryOpFunctor<scalar_t,
/* depth */ 1,
/* r_args_depth */ 1,
/* res_arg_index */ 0>(),
Op<opmath_t>());
}
template <template<class> class Op>
std::vector<Tensor> floating_complex_half(TensorList tensors) {
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
return foreach_unary_op<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
void floating_complex_half_(TensorList tensors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
std::vector<Tensor> all_types_complex_bfloat16_half_bool(TensorList tensors) {
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
return foreach_unary_op<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
void all_types_complex_bfloat16_half_bool_(TensorList tensors) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
std::vector<Tensor> floating_complex_half_bfloat16(TensorList tensors) {
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
return foreach_unary_op<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
void floating_complex_half_bfloat16_(TensorList tensors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
std::vector<Tensor> all_types_half_complex_bfloat16(TensorList tensors) {
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(ScalarType::Half, at::ScalarType::BFloat16, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
return foreach_unary_op<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
void all_types_half_complex_bfloat16_(TensorList tensors) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(ScalarType::Half, at::ScalarType::BFloat16, tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
std::vector<Tensor> floating_half(TensorList tensors) {
return AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
return foreach_unary_op<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
void floating_half_(TensorList tensors) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
std::vector<Tensor> floating_half_bfloat16(TensorList tensors) {
return AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, tensors[0].scalar_type(), "foreach_unary_op_cuda", [&]() {
return foreach_unary_op<scalar_t, Op>(tensors);
});
}
template <template<class> class Op>
void floating_half_bfloat16_(TensorList tensors) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
// makes the functor
#define STD_FUNCTOR(op_name, functor_name) \
template<typename T> \
struct functor_name { \
__device__ T operator()(T t) const { return std::op_name(t); } \
}; \
// given a functor and a "dispatch function", creates the outplace and inplace operations
#define OP_CUSTOM_FUNCTOR(function, op_name, functor_name) \
std::vector<Tensor> foreach_tensor_##op_name##_cuda(TensorList tensors) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors) || has_integral_tensor(tensors, /* includeBool */ true)) { \
return at::native::foreach_tensor_##op_name##_slow(tensors); \
} \
return function<functor_name>(tensors); \
} \
void foreach_tensor_##op_name##_cuda_(TensorList tensors) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors) || has_integral_tensor(tensors, /* includeBool */ true)) { \
return at::native::foreach_tensor_##op_name##_slow_(tensors); \
} \
\
function##_<functor_name>(tensors); \
}
// creates a functor, outplace version, and inplace version.
#define OP(function, op_name, functor_name) \
STD_FUNCTOR(op_name, functor_name); \
OP_CUSTOM_FUNCTOR(function, op_name, functor_name); \
OP(floating_half_bfloat16, erfc, Erfc);
OP(floating_half_bfloat16, expm1, Expm1);
OP(floating_half, lgamma, Lgamma);
OP(floating_half_bfloat16, trunc, Truncf);
OP(floating_half_bfloat16, floor, Floor);
OP(floating_half_bfloat16, ceil, Ceil);
OP(floating_complex_half_bfloat16, acos, Acos);
OP(floating_complex_half_bfloat16, asin, Asin);
OP(floating_complex_half_bfloat16, atan, Atan);
OP(floating_complex_half_bfloat16, cosh, Cosh);
OP(floating_complex_half_bfloat16, tan, Tan);
OP(floating_complex_half_bfloat16, sin, Sin);
OP(floating_complex_half_bfloat16, sinh, Sinh);
OP(floating_complex_half_bfloat16, exp, Exp);
OP(floating_complex_half_bfloat16, tanh, Tanh);
OP(floating_complex_half_bfloat16, log, Log);
OP(floating_complex_half_bfloat16, log10, Log10);
OP(floating_complex_half_bfloat16, log2, Log2);
OP(floating_complex_half_bfloat16, cos, Cos);
OP(floating_complex_half_bfloat16, sqrt, Sqrt);
OP(floating_half_bfloat16, log1p, Log1p);
OP(floating_half_bfloat16, erf, Erf);
//
// Special cases
// These functions must be special cased as they can't be written as std::functor_name in OP macro
//
template<typename T>
struct Sigmoid {
T one = T(1);
__device__ T operator()(T t) const { return (one / (one + std::exp(-t))); }
};
template<typename T>
struct Round {
__device__ T operator()(T t) const { return std::nearbyint(t); }
};
template<typename T>
struct Trunc {
__device__ T operator()(T t) const { return t - std::trunc(t); }
};
template<typename T>
struct Reciprocal {
T one = T(1);
__device__ T operator()(T t) const { return (one / t); }
};
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, sigmoid, Sigmoid)
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, round, Round)
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, frac, Trunc)
OP_CUSTOM_FUNCTOR(floating_complex_half_bfloat16, reciprocal, Reciprocal)
// note(mkozuki): tensor dtype checks of `neg` kernels.
// Since `check_foreach_api_restrictions` don't require all the tensors to have the same dtype,
// I think it safer to check every single tensor's dtype inside negation kernels.
std::vector<Tensor> foreach_tensor_neg_cuda(TensorList tensors) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors)) {
return at::native::foreach_tensor_neg_slow(tensors);
}
TORCH_CHECK(tensors[0].scalar_type() != kBool,
"Negation, the `-` operator, on a bool tensor is not supported. "
"If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
return all_types_half_complex_bfloat16<std::negate>(tensors);
}
void foreach_tensor_neg_cuda_(TensorList tensors) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors)) {
return at::native::foreach_tensor_neg_slow_(tensors);
}
TORCH_CHECK(tensors[0].scalar_type() != kBool,
"Negation, the `-` operator, on a bool tensor is not supported. "
"If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
all_types_half_complex_bfloat16_<std::negate>(tensors);
}
// Abs have to go via slow path in case of a complex type.
// This is because foreach kernels can't return a different dtype than passed, while
// abs with complex inputs will produce float output.
template<typename T>
struct Abs {
__device__ T operator()(T t) const { return std::abs(t); }
};
std::vector<Tensor> foreach_tensor_abs_cuda(TensorList tensors) {
check_foreach_api_restrictions(tensors);
const bool has_complex = std::any_of(
tensors.begin(), tensors.end(),
[](const auto & t) { return at::isComplexType(t.scalar_type()); });
if (!can_use_fast_route(tensors) || has_complex) {
return at::native::foreach_tensor_abs_slow(tensors);
}
return all_types_complex_bfloat16_half_bool<Abs>(tensors);
}
void foreach_tensor_abs_cuda_(TensorList tensors) {
check_foreach_api_restrictions(tensors);
const bool has_complex = std::any_of(
tensors.begin(), tensors.end(),
[](const auto & t) { return at::isComplexType(t.scalar_type()); });
if (!can_use_fast_route(tensors) || has_complex) {
return at::native::foreach_tensor_abs_slow_(tensors);
}
all_types_complex_bfloat16_half_bool_<Abs>(tensors);
}
void foreach_tensor_zero_cuda_(TensorList tensors) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors)) {
return at::native::foreach_tensor_zero_slow_(tensors);
}
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, tensors[0].scalar_type(), "foreach_zero_cuda_", [&]() {
multi_tensor_apply<1>(tensor_lists,
ZeroFunctor<scalar_t,
/* depth */ 1,
/* r_args_depth */ 1,
/* res_arg_index */ 0>());
});
}
}} // namespace at::native
|
f769e08f630fd4a1a4a534d641ddc3a5fb1ba4f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019-2020 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
//
// @author George A. Shulinok <sgazeos@gmail.com>
//
#include <ops/declarable/helpers/image_resize.h>
#include <exceptions/cuda_exception.h>
#include <array/NDArrayFactory.h>
namespace sd {
namespace ops {
namespace helpers {
struct BilinearInterpolationData {
Nd4jLong bottomIndex; // Lower source index used in the interpolation
Nd4jLong topIndex; // Upper source index used in the interpolation
// 1-D linear iterpolation scale (see:
// https://en.wikipedia.org/wiki/Bilinear_interpolation)
double interpolarValue;
};
// Older incorrect scaling method that causes all resizes to have a slight
// translation leading to inconsistent results. For example, a flip then a
// resize gives different results then a resize then a flip.
struct LegacyScaler {
_CUDA_HD LegacyScaler(){};
inline _CUDA_HD float operator()(const int x, const float scale) const {
return static_cast<float>(x) * scale;
}
};
// Half pixel scaler scales assuming that the pixel centers are at 0.5, i.e. the
// floating point coordinates of the top,left pixel is 0.5,0.5.
struct HalfPixelScaler {
_CUDA_HD HalfPixelScaler(){};
inline _CUDA_HD float operator()(const int x, const float scale) const {
// Note that we subtract 0.5 from the return value, as the existing bilinear
// sampling code etc assumes pixels are in the old coordinate system.
return (static_cast<float>(x) + 0.5f) * scale - 0.5f;
}
};
// Utility functions
// calculateResizeScale determines the float scaling factor.
inline float calculateResizeScale(Nd4jLong inSize, Nd4jLong outSize,
bool alignCorners) {
return (alignCorners && outSize > 1)
? (inSize - 1) / static_cast<float>(outSize - 1)
: inSize / static_cast<float>(outSize);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// computeInterpolationWeights kernel
// outSize - output length
// inSize - input size
// scale - input scale
// interporationData - result
//
template <class Scaler>
static __global__ void computeInterpolationWeights(Nd4jLong outSize,
Nd4jLong inSize,
double scale,
Nd4jLong channels,
BilinearInterpolationData* interpolationData) {
interpolationData[outSize].bottomIndex = 0;
interpolationData[outSize].topIndex = 0;
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
Scaler scaler;
for (Nd4jLong i = outSize - tid; i >= 0; i -= step) {
double in = scaler(i, scale);
// interpolationData[i].bottomIndex = static_cast<Nd4jLong>(in);
// interpolationData[i].topIndex = sd::math::nd4j_min(interpolationData[i].bottomIndex + 1, inSize - 1);
// interpolationData[i].interpolarValue = in - interpolationData[i].bottomIndex;
double const in_f = sd::math::p_floor<double>(in);
double const in_c = sd::math::p_ceil<double>(in);
interpolationData[i].bottomIndex = sd::math::nd4j_max(static_cast<Nd4jLong>(in_f), (Nd4jLong)0LL);//static_cast<Nd4jLong>(in);
interpolationData[i].topIndex = sd::math::nd4j_min(static_cast<Nd4jLong>(in_c), inSize - 1);
interpolationData[i].interpolarValue = in - in_f;
if (channels) {
math::atomics::nd4j_atomicMul(&interpolationData[i].bottomIndex, channels);
math::atomics::nd4j_atomicMul(&interpolationData[i].topIndex, channels);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm
//
static void resizeImage(sd::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm kernel
//
template <typename T, typename Z>
static __global__ void resizeImageKernel(T const* input, Nd4jLong const* inputShape, Z* outputYptr,
Nd4jLong const* outputShape, Nd4jLong batchSize, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels,
Nd4jLong inRowSize, Nd4jLong outRowSize, Nd4jLong inBatchNumValues,
BilinearInterpolationData* xs_, BilinearInterpolationData* ys_) {
for (auto batch = blockIdx.x; batch < batchSize; batch += gridDim.x ) { // blockIdx.x as batch index
auto pX = input + batch * inBatchNumValues;
for (Nd4jLong y = threadIdx.x; y < outHeight; y += blockDim.x) {
const T* ys_input_lower_ptr = pX + ys_[y].bottomIndex * inRowSize;
const T* ys_input_upper_ptr = pX + ys_[y].topIndex * inRowSize;
double yVal = ys_[y].interpolarValue;
auto pZ = outputYptr + (batch * outHeight + y) * outRowSize;
for (Nd4jLong x = 0; x < outWidth; x++) {
auto xsBottom = xs_[x].bottomIndex;
auto xsTop = xs_[x].topIndex;
auto xVal = xs_[x].interpolarValue;
// process interpolation for all channels
for (int c = 0; c < channels; c++) {
Z topLeft(ys_input_lower_ptr[xsBottom + c]);
Z topRight(ys_input_lower_ptr[xsTop + c]);
Z bottomLeft(ys_input_upper_ptr[xsBottom + c]);
Z bottomRight(ys_input_upper_ptr[xsTop + c]);
Z top = topLeft + (topRight - topLeft) * xVal;
Z bottom = bottomLeft + (bottomRight - bottomLeft) * xVal;
Z resVal = Z(top + (bottom - top) * yVal);
pZ[x * channels + c] = resVal;
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with
template <typename T, typename F>
static void resizeImage_(sd::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output) {
Nd4jLong inRowSize = inWidth * channels;
Nd4jLong inBatchNumValues = inHeight * inRowSize;
Nd4jLong outRowSize = outWidth * channels;
auto stream = context->getCudaStream();
T const* pInput = images->getDataBuffer()->specialAsT<T>(); //reinterpret_cast<T const *>(images->specialBuffer()); // this works only with 'c' direction
F* pOutput = output->dataBuffer()->specialAsT<F>();//reinterpret_cast<F *>(output->specialBuffer());
dim3 batchSizeBlock(batchSize, 1, 1);
dim3 pictureBlock(outHeight, outWidth, channels);
hipLaunchKernelGGL(( resizeImageKernel<T,F>), dim3(256), dim3(256), 256, *stream, pInput, images->specialShapeInfo(), pOutput,
output->specialShapeInfo(), batchSize, outWidth, outHeight, channels, inRowSize, outRowSize,
inBatchNumValues, xs_, ys_);
auto err = hipStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeImage_: Cannot synchronize kernel execution", err);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, typename F>
static int resizeBilinearFunctor_(sd::LaunchContext* context, NDArray const* images, int const width,
int const height, bool const alignCorners, bool const halfPixelCenter, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
float heightScale = calculateResizeScale(inHeight, outHeight, alignCorners);
float widthScale = calculateResizeScale(inWidth, outWidth, alignCorners);
BilinearInterpolationData* xs_;// = xs.data();
BilinearInterpolationData* ys_;// = xs.data();
hipError_t err = hipMalloc(&xs_, sizeof(BilinearInterpolationData) * (outWidth + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for vertical parts rectangulars", err);
}
err = hipMalloc(&ys_, sizeof(BilinearInterpolationData) * (outHeight + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for horizontal parts rectangulars", err);
}
auto stream = context->getCudaStream();
// Compute the cached interpolation weights on the x and y dimensions.
if (halfPixelCenter) {
hipLaunchKernelGGL(( computeInterpolationWeights <
HalfPixelScaler >), dim3(256), dim3(512), 512, *stream, outHeight, inHeight, heightScale, 0, ys_);
hipLaunchKernelGGL(( computeInterpolationWeights <
HalfPixelScaler >), dim3(256), dim3(512), 512, *stream, outWidth, inWidth, widthScale, channels, xs_);
}
else {
hipLaunchKernelGGL(( computeInterpolationWeights <
LegacyScaler >), dim3(256), dim3(512), 512, *stream, outHeight, inHeight, heightScale, 0, ys_);
hipLaunchKernelGGL(( computeInterpolationWeights <
LegacyScaler >), dim3(256), dim3(512), 512, *stream, outWidth, inWidth, widthScale, channels, xs_);
}
printf("Input is %dx%d, Output is %dx%d\n", inHeight, inWidth, outHeight, outWidth);
NDArray::prepareSpecialUse({output}, {images});
resizeImage_<T,F>(context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output);
err = hipStreamSynchronize(*stream);
NDArray::registerSpecialUse({output}, {images});
err = hipFree(xs_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for vertical parts rectangulars", err);
}
err = hipFree(ys_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for horizontical parts rectangulars", err);
}
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize by interpolation nearest neighbor algorithm kernel
//
template <typename T>
static __global__ void resizeNeighborKernel(T const* input, Nd4jLong const* inputShape, T* output, Nd4jLong const* outputShape,
Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool alignCorners, bool halfPixelCenters) {
//for (int b = blockIdx.x; b < batchSize; b += gridDim.x)
if (blockIdx.x < batchSize)
{
auto b = blockIdx.x;
for (int y = threadIdx.x; y < outHeight; y += blockDim.x) {
auto posY = alignCorners ? static_cast<Nd4jLong>(sd::math::p_round<float>(halfPixelCenters?((float)y + 0.5f) * heightScale:(float)y * heightScale)) : static_cast<Nd4jLong>(sd::math::p_floor<float>(
halfPixelCenters?((float)y + 0.5f) * heightScale:(float)y * heightScale));
Nd4jLong inY = sd::math::nd4j_min(posY, inHeight - 1);
if (halfPixelCenters) {
inY = sd::math::nd4j_max(0LL, inY);
}
for (int x = threadIdx.y; x < outWidth; x += blockDim.y) {
auto posX = alignCorners ? static_cast<Nd4jLong>(sd::math::p_round<float>(halfPixelCenters?((float)x + 0.5f) * widthScale:(float)x * widthScale)) : static_cast<Nd4jLong>(sd::math::p_floor<float>(
halfPixelCenters?((float)x + 0.5f) * widthScale:(float)x * widthScale));
Nd4jLong inX = sd::math::nd4j_min(posX, inWidth - 1);
if (halfPixelCenters) {
inX = sd::math::nd4j_max(0LL, inX);
}
auto start = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong e = start; e < channels; e += step) {
Nd4jLong posX[] = {b, inY, inX, e};
Nd4jLong posZ[] = {b, y, x, e};
auto xIndex = shape::getOffset(inputShape, posX);
auto zIndex = shape::getOffset(outputShape, posZ);
output[zIndex] = input[xIndex];
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeNeighborFunctor - main algorithm by nearest neighbor
//
template <typename T>
int resizeNeighborFunctor_(sd::LaunchContext* context, NDArray const* images, int const width, int const height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
// if ((alignCorners && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (alignCorners && outHeight < 2) ||
// (alignCorners && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// // wrong input data
// nd4j_printf("image.resize_nearest_neighbor: Wrong input or output size to resize\n", "");
// return ND4J_STATUS_BAD_ARGUMENTS;
// }
// float heightScale = alignCorners ? (inHeight - 1.f) / float(outHeight - 1.f) : (inHeight / float(outHeight));
// float widthScale = alignCorners ? (inWidth - 1.f) / float(outWidth - 1.f) : (inWidth / float(outWidth));
float heightScale = calculateResizeScale(inHeight, outHeight, alignCorners);
float widthScale = calculateResizeScale(inWidth, outWidth, alignCorners);
auto imagesBuffer = images->getDataBuffer()->specialAsT<T>();//reinterpret_cast<T const*>(images->specialBuffer());
auto outputBuffer = output->dataBuffer()->specialAsT<T>();//reinterpret_cast<T*>(output->specialBuffer());
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {images});
hipLaunchKernelGGL(( resizeNeighborKernel<T>), dim3(batchSize), dim3(outHeight * outWidth), 512, *stream, imagesBuffer, images->specialShapeInfo(), outputBuffer, output->specialShapeInfo(),
batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, alignCorners, halfPixelCenters);
NDArray::registerSpecialUse({output}, {images});
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeImage - resize bilinear algorithm caller
//
void resizeImage(sd::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight,
Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth, Nd4jLong channels, BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_, NDArray* output) {
BUILD_DOUBLE_SELECTOR(images->dataType(), output->dataType(),
resizeImage_, (context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels,
xs_, ys_, output), NUMERIC_TYPES, FLOAT_TYPES);
}
BUILD_DOUBLE_TEMPLATE(template void resizeImage_,(sd::LaunchContext* context, NDArray const* images,
Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth,
Nd4jLong channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output),
NUMERIC_TYPES, FLOAT_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeBilinearFunctor(sd::LaunchContext* context, NDArray const* images, int width, int height,
bool const alignCorners, bool const halfPixelCenter, NDArray* output) {
BUILD_DOUBLE_SELECTOR(images->dataType(), output->dataType(), return resizeBilinearFunctor_, (context, images,
width, height, alignCorners, halfPixelCenter, output), NUMERIC_TYPES, FLOAT_TYPES);
}
// BUILD_SINGLE_TEMPLATE(template int resizeBilinearFunctor_, (sd::LaunchContext* context,
// NDArray const* images, int const width, int const height, bool const alignCorners,
// bool const halfPixelCenter, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeNeighborFunctor(sd::LaunchContext* context, NDArray const* images, int const width, int const height,
bool const alignCorners, bool const halfPixelCenter, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeNeighborFunctor_,
(context, images, width, height, alignCorners, halfPixelCenter, output), LIBND4J_TYPES);
}
// BUILD_SINGLE_TEMPLATE(template int resizeNeighborFunctor_, (sd::LaunchContext* context, NDArray const* images,
// int width, int height, bool const alignCorners, bool const halfPixelCenter, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Bicubic interpolation
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct ImageResizerState {
explicit ImageResizerState(bool alignCorners, bool halfPixelCenters)
: _alignCorners(alignCorners),
_halfPixelCenters(halfPixelCenters) {}
// ValidateAndCalculateOutputSize checks the bounds on the input tensors
// and requested size, sets up some of the resizing state such as the
// heightScale and widthScale, and calculates the output size.
// If any of these operations fails, it sets an error status in
// the context, which the caller must check.
int validateAndCalculateOutputSize(NDArray const* input, int const width, int const height) {
//
batchSize = input->sizeAt(0);//.dim_size(0);
outHeight = height;
outWidth = width; //internal::SubtleMustCopy(Svec(1));
inHeight = static_cast<int32_t>(input->sizeAt(1));
inWidth = static_cast<int32_t>(input->sizeAt(2));
channels = input->sizeAt(3); //.dim_size(3);
heightScale = calculateResizeScale(inHeight, outHeight, _alignCorners);
widthScale = calculateResizeScale(inWidth, outWidth, _alignCorners);
// Guard against overflows
if (ceilf((outHeight - 1) * heightScale) > static_cast<float>(DataTypeUtils::max<int>())) {
nd4j_printf("resize_bicubic: Upper overflow occurs for resize height (%f)\n", ceilf((outHeight - 1) * heightScale));
return Status::CODE(ND4J_STATUS_BAD_INPUT, "resize_bicubic: Upper overflow occurs for resize height");
}
if (ceilf((outWidth - 1) * heightScale) > static_cast<float>(DataTypeUtils::max<int>())) {
nd4j_printf("resize_bicubic: Upper overflow occurs for resize height (%f)\n", ceilf((outHeight - 1) * heightScale));
return Status::CODE(ND4J_STATUS_BAD_INPUT, "resize_bicubic: Upper overflow occurs for resize width");
}
return Status::OK();
}
// Calculates all the required variables, and allocates the output.
int validateAndCreateOutput(NDArray const* input, int const width, int const height) {
return validateAndCalculateOutputSize(input, width, height);
}
Nd4jLong batchSize;
Nd4jLong outHeight;
Nd4jLong outWidth;
Nd4jLong inHeight;
Nd4jLong inWidth;
Nd4jLong channels;
float heightScale;
float widthScale;
NDArray* output = nullptr;
hipStream_t* stream;
private:
bool _alignCorners;
bool _halfPixelCenters;
};
struct WeightsAndIndices {
float _weight0;
float _weight1;
float _weight2;
float _weight3;
Nd4jLong _index0;
Nd4jLong _index1;
Nd4jLong _index2;
Nd4jLong _index3;
int _advance; // advance value.
};
class CachedInterpolationCalculator {
public:
_CUDA_HD CachedInterpolationCalculator() : _indexes{-1, -1, -1, -1} {}
// Advances iteration. Returns the number of values that should be copied from
// the current point to the next point. The copying should always be done by
// copying the last <retval> values from the old point to the first <retval>
// values of the new point.
inline _CUDA_HD int Advance(const Nd4jLong x0, const Nd4jLong x1, const Nd4jLong x2,
const Nd4jLong x3) {
// We use 2 hands and walk through, copying from one to another where
// we already have values.
// Invariant, new_indicies_hand <= cached_values_hand
const Nd4jLong new_x_indices[4] = {x0, x1, x2, x3};
int cachedValuesHand = 0;
int newIndiciesHand = 0;
while (cachedValuesHand < 4) {
if (_indexes[cachedValuesHand] == new_x_indices[newIndiciesHand]) {
if (newIndiciesHand < cachedValuesHand) {
_indexes[newIndiciesHand] = _indexes[cachedValuesHand];
}
newIndiciesHand++;
}
cachedValuesHand++;
}
switch (newIndiciesHand) {
case 0:
_indexes[0] = x0;
case 1:
_indexes[1] = x1;
case 2:
_indexes[2] = x2;
case 3:
_indexes[3] = x3;
break;
}
return newIndiciesHand;
}
private:
Nd4jLong _indexes[4];
};
static __global__ void initCoefTableKernel(const double a, float* table, Nd4jLong tableSize) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i <= tableSize; i += step) {
float x = i * 1.0 / tableSize;
table[i * 2] = ((a + 2) * x - (a + 3)) * x * x + 1;
x += 1.0;
table[i * 2 + 1] = ((a * x - 5 * a) * x + 8 * a) * x - 4 * a;
}
}
static const Nd4jLong kTableSize = (1 << 10);
float* initCoeffsTable(const double a, hipStream_t* stream) {
// Allocate and initialize coefficients table using Bicubic
// convolution algorithm.
// https://en.wikipedia.org/wiki/Bicubic_interpolation
float* coeffs_table; // = new float[(kTableSize + 1) * 2];
auto err = hipMalloc(&coeffs_table, sizeof(float) * ((kTableSize + 1) * 2));
if (err != 0) {
throw cuda_exception::build("helpers::initCoeffsTable: Cannot allocate memory for vertical parts rectangulars", err);
}
hipLaunchKernelGGL(( initCoefTableKernel), dim3(128),dim3(128),128, *stream, a, coeffs_table, kTableSize);
err = hipStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::initCoeffsTable: Cannot syncronize kernel", err);
}
return coeffs_table;
}
// _CUDA_HD const float* getCoeffsTable(const bool use_keys_cubic) {
// // Static so that we initialize it on first use
// if (use_keys_cubic) {
// // http://ieeexplore.ieee.org/document/1163711/
// // R. G. Keys. Cubic convolution interpolation for digital image
// // processing. IEEE Transactions on Acoustics, Speech, and Signal
// // Processing, 29(6):11531160, 1981.
// //static const float* coeffs_table = initCoeffsTable(-0.5f, stream);
// return sCoeffsTableHalf;
// } else {
// //static const float* coeffs_table = initCoeffsTable(-0.75f, stream);
// return sCoeffsTableThreeFourth;
// }
// }
inline _CUDA_HD Nd4jLong bound(Nd4jLong val, Nd4jLong limit) {
return math::nd4j_min(limit - 1ll, math::nd4j_max(Nd4jLong{0}, val));
}
template <typename T>
inline _CUDA_HD float interpolate1D(const float weight0, const float weight1, const float weight2, const float weight3,
const T value0, const T value1, const T value2, const T value3) {
return static_cast<float>(value0) * weight0 +
static_cast<float>(value1) * weight1 +
static_cast<float>(value2) * weight2 +
static_cast<float>(value3) * weight3;
}
// Compute the 1D interpolation for a given X index using the y_weights
static _CUDA_HD float compute(float values[4], const float xW0, const float xW1, const float xW2, const float xW3) {
return interpolate1D(xW0, xW1, xW2, xW3, values[0], values[1],values[2], values[3]);
}
template <typename Scaler, bool use_keys_cubic>
inline _CUDA_HD void getWeightsAndIndices(float const* coeffs_table, const float scale, const Nd4jLong out_loc, const Nd4jLong limit, WeightsAndIndices* out) {
const Scaler scaler;
const float in_loc_f = scaler(out_loc, scale);
const Nd4jLong in_loc = math::nd4j_floor<float, Nd4jLong>(in_loc_f);
const float delta = in_loc_f - in_loc;
const Nd4jLong offset = math::nd4j_round<float, Nd4jLong>(delta * kTableSize);
//const float* coeffs_table = getCoeffsTable(use_keys_cubic);
if (use_keys_cubic) {
// The legacy code placed more weight on the edge pixels, since bounding
// the set of inputs to sample could cause an edge pixel to be repeated.
// Here we change the behavior at borders to match that used by the
// scale_and_translate_op, where sampling locations outside the image have
// their weight set to 0, and the weights are renormalized so that their sum
// is 1.0.
out->_index0 = bound(in_loc - 1, limit);
out->_weight0 =
(out->_index0 == in_loc - 1 ? coeffs_table[offset * 2 + 1] : 0.0f);
out->_index1 = bound(in_loc, limit);
out->_weight1 = (out->_index1 == in_loc ? coeffs_table[offset * 2] : 0.0f);
out->_index2 = bound(in_loc + 1, limit);
out->_weight2 =
(out->_index2 == in_loc + 1 ? coeffs_table[(kTableSize - offset) * 2]
: 0.0f);
out->_index3 = bound(in_loc + 2, limit);
out->_weight3 = (out->_index3 == in_loc + 2
? coeffs_table[(kTableSize - offset) * 2 + 1]
: 0.0f);
const float weight_sum =
out->_weight0 + out->_weight1 + out->_weight2 + out->_weight3;
if (math::nd4j_abs(weight_sum) >= 1000.0f * DataTypeUtils::min<float>()) {
const float one_over_weight_sum = 1.0f / weight_sum;
out->_weight0 *= one_over_weight_sum;
out->_weight1 *= one_over_weight_sum;
out->_weight2 *= one_over_weight_sum;
out->_weight3 *= one_over_weight_sum;
}
} else {
out->_weight0 = coeffs_table[offset * 2 + 1];
out->_weight1 = coeffs_table[offset * 2];
out->_weight2 = coeffs_table[(kTableSize - offset) * 2];
out->_weight3 = coeffs_table[(kTableSize - offset) * 2 + 1];
out->_index0 = bound(in_loc - 1, limit);
out->_index1 = bound(in_loc, limit);
out->_index2 = bound(in_loc + 1, limit);
out->_index3 = bound(in_loc + 2, limit);
}
}
static __global__ void accumulateChannelsKernel(WeightsAndIndices* pXWais, Nd4jLong outWidth, Nd4jLong channels) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto x = start; x < outWidth; x += step) {
pXWais[x]._index0 *= channels;
pXWais[x]._index1 *= channels;
pXWais[x]._index2 *= channels;
pXWais[x]._index3 *= channels;
}
}
static __global__ void advaceWeightsAndIndicesKernel(float const* cacheTable, CachedInterpolationCalculator* calc, WeightsAndIndices* pXWais, Nd4jLong inWidth, float widthScale,
Nd4jLong outWidth, Nd4jLong channels, bool halfPixelCenters) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto x = start; x < outWidth; x += step) {
if (halfPixelCenters)
getWeightsAndIndices<HalfPixelScaler, true>(cacheTable, widthScale, x, inWidth, &pXWais[x]);
else
getWeightsAndIndices<LegacyScaler, false>(cacheTable, widthScale, x, inWidth, &pXWais[x]);
pXWais[x]._advance = calc->Advance(pXWais[x]._index0, pXWais[x]._index1, pXWais[x]._index2, pXWais[x]._index3);
}
}
// resizerState and xWais are device allocated
static void computeXWeightsAndIndices(float const* coeffsTable, const ImageResizerState& resizerState,
const bool halfPixelCenters,
WeightsAndIndices* pXWais) {
auto stream = resizerState.stream;
auto outWidth = resizerState.outWidth;
CachedInterpolationCalculator calc; // = new CachedInterpolationCalculator;
CachedInterpolationCalculator* pCalcD;
auto err = hipMalloc(&pCalcD, sizeof(CachedInterpolationCalculator));
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot allocated device memory for interpolate calculator", err);
}
err = hipMemcpyAsync(pCalcD, &calc, sizeof(CachedInterpolationCalculator), hipMemcpyHostToDevice, *stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot set up device memory for interpolate calculator", err);
}
hipLaunchKernelGGL(( advaceWeightsAndIndicesKernel), dim3(128), dim3(128), 128, *stream, coeffsTable, pCalcD, pXWais, resizerState.inWidth, resizerState.widthScale, outWidth, resizerState.channels, halfPixelCenters);
err = hipFree(pCalcD);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot deallocated device memory for interpolate calculator", err);
}
err = hipStreamSynchronize(*stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot synchronize stream after advance weights and indicers", err);
}
// Scale the values so they can be used as offsets into buffers.
hipLaunchKernelGGL(( accumulateChannelsKernel), dim3(128), dim3(128), 512, *stream, pXWais, outWidth, resizerState.channels);
err = hipStreamSynchronize(*stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot synchronize stream after accumulate channels", err);
}
}
template <typename T>
static _CUDA_HD FORCEINLINE float computeYInterpolation(
int which, int channelNum, const WeightsAndIndices& yWai,
const T* pY0, const T* pY1, const T* pY2, const T* pY3,
const WeightsAndIndices& xWai) {
int xIndex;
switch (which) {
case 0:
xIndex = xWai._index0;
break;
case 1:
xIndex = xWai._index1;
break;
case 2:
xIndex = xWai._index2;
break;
default:
xIndex = xWai._index3;
break;
}
const Nd4jLong pt_index = xIndex + channelNum;
return interpolate1D<T>(yWai._weight0, yWai._weight1, yWai._weight2,
yWai._weight3, pY0[pt_index], pY1[pt_index],
pY2[pt_index], pY3[pt_index]);
}
template <typename T>
static __global__ void bicubicInterpolateWithCachingKernel(float const* cachedTable, T const* inputPtr, ImageResizerState* pResizerState, WeightsAndIndices* xWais, bool halfPixelCenters, Nd4jLong inBatchWidth, Nd4jLong inRowWidth, float* outputPtr) {
// auto numChannels = pResizerState->channels;
for (Nd4jLong b = blockIdx.x; b < pResizerState->batchSize; b += gridDim.x) {
auto pInput = inputPtr + b * inBatchWidth;
float* cachedValue;
for (Nd4jLong y = threadIdx.x; y < pResizerState->outHeight; y += blockDim.x) {
if (threadIdx.x == 0) {
extern __shared__ char sharedChar[];
cachedValue = reinterpret_cast<float*>(sharedChar);
}
auto pos = (b * pResizerState->outHeight + y) * pResizerState->outWidth * pResizerState->channels;
auto pOutput = &outputPtr[pos];
struct WeightsAndIndices yWai;
if (halfPixelCenters) {
getWeightsAndIndices<HalfPixelScaler, true>(cachedTable, pResizerState->heightScale, y, pResizerState->inHeight, &yWai);
} else {
getWeightsAndIndices<LegacyScaler, false>(cachedTable, pResizerState->heightScale, y, pResizerState->inHeight, &yWai);
}
// Make pointers represent offsets of data in inputBPtr.
const T* y_ptr_0 = pInput + yWai._index0 * inRowWidth;
const T* y_ptr_1 = pInput + yWai._index1 * inRowWidth;
const T* y_ptr_2 = pInput + yWai._index2 * inRowWidth;
const T* y_ptr_3 = pInput + yWai._index3 * inRowWidth;
if (pResizerState->channels == 3) {
// Manually unroll case of 3 channels.
float cached_value_0[4] = {0};
float cached_value_1[4] = {0};
float cached_value_2[4] = {0};
for (Nd4jLong x = 0; x < pResizerState->outWidth; ++x) {
const WeightsAndIndices& xWai = xWais[x];
// Shift values in cached_value_* to fill first '_advance' values.
switch (xWai._advance) {
case 3:
cached_value_0[0] = cached_value_0[1];
cached_value_0[1] = cached_value_0[2];
cached_value_0[2] = cached_value_0[3];
cached_value_1[0] = cached_value_1[1];
cached_value_1[1] = cached_value_1[2];
cached_value_1[2] = cached_value_1[3];
cached_value_2[0] = cached_value_2[1];
cached_value_2[1] = cached_value_2[2];
cached_value_2[2] = cached_value_2[3];
break;
case 2:
cached_value_0[0] = cached_value_0[2];
cached_value_0[1] = cached_value_0[3];
cached_value_1[0] = cached_value_1[2];
cached_value_1[1] = cached_value_1[3];
cached_value_2[0] = cached_value_2[2];
cached_value_2[1] = cached_value_2[3];
break;
case 1: {
cached_value_0[0] = cached_value_0[3];
cached_value_1[0] = cached_value_1[3];
cached_value_2[0] = cached_value_2[3];
break;
}
}
// Set the remaining '4-_advance' values by computing.
switch (xWai._advance) {
case 0:
cached_value_0[0] = computeYInterpolation(0, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[0] = computeYInterpolation(0, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[0] = computeYInterpolation(0, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 1:
cached_value_0[1] = computeYInterpolation(1, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[1] = computeYInterpolation(1, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[1] = computeYInterpolation(1, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 2:
cached_value_0[2] = computeYInterpolation(2, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[2] = computeYInterpolation(2, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[2] = computeYInterpolation(2, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 3:
cached_value_0[3] = computeYInterpolation(3, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[3] = computeYInterpolation(3, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[3] = computeYInterpolation(3, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
// break;
}
pOutput[x * pResizerState->channels + 0] = compute(cached_value_0, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
pOutput[x * pResizerState->channels + 1] = compute(cached_value_1, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
pOutput[x * pResizerState->channels + 2] = compute(cached_value_2, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
}
} else {
for (Nd4jLong x = 0; x < pResizerState->outWidth; ++x) {
const WeightsAndIndices& xWai = xWais[x];
// Shift values in cachedValue to fill first '_advance' values.
switch (xWai._advance) {
case 3:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 1];
cachedValue[4 * c + 1] = cachedValue[4 * c + 2];
cachedValue[4 * c + 2] = cachedValue[4 * c + 3];
}
break;
case 2:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 2];
cachedValue[4 * c + 1] = cachedValue[4 * c + 3];
}
break;
case 1: {
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 3];
}
break;
}
}
// Set the remaining '4-_advance' values by computing.
switch (xWai._advance) {
case 0:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = computeYInterpolation(0, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 1:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 1] = computeYInterpolation(1, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 2:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 2] = computeYInterpolation(2, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 3:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 3] = computeYInterpolation(3, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
// break;
}
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
pOutput[x * pResizerState->channels + c] = compute(&cachedValue[4 * c], xWai._weight0, xWai._weight1, xWai._weight2, xWai._weight3);
}
}
}
}
}
}
template <typename T>
static void
bicubicInterpolateWithCaching(NDArray const* image, ImageResizerState const& resizerState, bool const halfPixelCenters, NDArray* output) {
const auto numChannels = resizerState.channels;
const Nd4jLong inRowWidth = resizerState.inWidth * numChannels;
const Nd4jLong inBatchWidth = resizerState.inHeight * inRowWidth;
auto stream = resizerState.stream; //output->getContext()->getCudaStream();
ImageResizerState* resizerStateD;
auto err = hipMalloc(&resizerStateD, sizeof(ImageResizerState));
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot allocate memory for resizerState", err);
}
err = hipMemcpyAsync(resizerStateD, &resizerState, sizeof(ImageResizerState), hipMemcpyHostToDevice, *stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot set up memory for resizerState", err);
}
// float* cachedValue = nullptr;
// size_t cachedSize = sizeof(float) * (numChannels == 3 ? 0 : 4 * numChannels);
// if (cachedSize) {
// err = hipMalloc(reinterpret_cast<void**>(&cachedValue), cachedSize);
// if (err != 0) {
// throw cuda_exception::build(
// "helpers::bicubicInterpolateWithCaching: Cannot allocate memory for cached values", err);
// }
// err = hipMemset(cachedValue, 0, cachedSize);
// if (err != 0) {
// throw cuda_exception::build(
// "helpers::bicubicInterpolateWithCaching: Cannot set up memory for cached values", err);
// }
// }
WeightsAndIndices* xWais; //(resizerState.outWidth);
err = hipMalloc(&xWais, sizeof(WeightsAndIndices) * resizerState.outWidth);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot allocate memory for weights and indices", err);
}
auto coeffsTable = halfPixelCenters?initCoeffsTable(-0.5, stream): initCoeffsTable(-0.75, stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err);
}
computeXWeightsAndIndices(coeffsTable, resizerState, halfPixelCenters, xWais);
err = hipStreamQuery(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err);
}
const T* pInput = image->getDataBuffer()->specialAsT<T>();
float* pOutput = output->dataBuffer()->specialAsT<float>(); //_data.data();
hipLaunchKernelGGL(( bicubicInterpolateWithCachingKernel<T>), dim3(128), dim3(1), 512, *stream, coeffsTable, pInput,
resizerStateD, xWais, halfPixelCenters, inBatchWidth, inRowWidth, pOutput);
err = hipStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Kernels finished with error", err);
}
err = hipFree(resizerStateD);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for resizerState", err);
}
// if (cachedSize)
// err = hipFree(cachedValue);
// if (err != 0) {
// throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for cached values", err);
// }
err = hipFree(xWais);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for weights and indices", err);
}
err = hipFree(coeffsTable);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for coefficients table", err);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
int resizeBicubicFunctor_(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output) {
return Status::OK();
}
int resizeBicubicFunctor(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctor_, (context, image,
width, height, preserveAspectRatio, antialias, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctor_, (sd::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output), NUMERIC_TYPES);
// ------------------------------------------------------------------------------------------------------------------ //
struct CachedInterpolation {
Nd4jLong start;
Nd4jLong end;
float startScale;
float endMinusOneScale;
bool needsBounding;
};
static __global__ void fillInterpolationCache(CachedInterpolation* xCached, Nd4jLong cacheLen, Nd4jLong inWidth, float widthScale) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto increment = blockDim.x * gridDim.x;
for (auto x = start; x < cacheLen; x += increment) {
auto& xCache = xCached[x];
const float inX = x * widthScale;
const float inX1 = (x + 1) * widthScale;
Nd4jLong v = math::nd4j_floor<float, Nd4jLong>(inX);
xCache.start = v;
xCache.startScale = v < inX ? (v + 1 > inX1 ? widthScale : v + 1 - inX) : (v + 1 > inX1 ? inX1 - v : 1.f);
v = math::nd4j_ceil<float, Nd4jLong>(inX1);
xCache.end = v--;
xCache.endMinusOneScale = v < inX ? (v + 1 > inX1 ? widthScale : v + 1 - inX) : (v + 1 > inX1 ? inX1 - v : 1.f);
xCache.needsBounding = bound(xCache.start, inWidth) != xCache.start || bound(xCache.end - 1, inWidth) != (xCache.end - 1);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
struct ScaleCache {
float yScale;
T const* yPtr;
};
// Computes the sum of all x values defined by <x_interp> taken across
// the y offsets and scales defined by y_ptrs and y_scales, for channel c.
//
// Note that <NeedsXBounding> is a template parameter to avoid a performance
// penalty from dynamically checking it.
template <typename T>
static __device__ void computePatchSumOf3Channels(float scale,
const ImageResizerState& st,
ScaleCache<T> const* yScaleCache,
Nd4jLong ptrsLen,
const CachedInterpolation& xCache,
float* outputPtr) {
bool const needsXBounding = xCache.needsBounding;
auto boundIfNeeded = [needsXBounding](Nd4jLong x, Nd4jLong y) -> Nd4jLong {
return (needsXBounding ? bound(x, y) : (x));
};
float sum_0 = 0;
float sum_1 = 0;
float sum_2 = 0;
for (int i = 0; i < ptrsLen; ++i) {
const T* ptr = yScaleCache[i].yPtr;
float scaleX = xCache.startScale;
Nd4jLong offset = 3 * boundIfNeeded(xCache.start, st.inWidth);
float sum_y_0 = static_cast<float>(ptr[offset + 0]) * scaleX;
float sum_y_1 = static_cast<float>(ptr[offset + 1]) * scaleX;
float sum_y_2 = static_cast<float>(ptr[offset + 2]) * scaleX;
if (xCache.start + 1 != xCache.end) {
for (Nd4jLong x = xCache.start + 1; x < xCache.end - 1; ++x) {
Nd4jLong offset = 3 * boundIfNeeded(x, st.inWidth);
sum_y_0 += static_cast<float>(ptr[offset + 0]);
sum_y_1 += static_cast<float>(ptr[offset + 1]);
sum_y_2 += static_cast<float>(ptr[offset + 2]);
}
scaleX = xCache.endMinusOneScale;
offset = st.channels * boundIfNeeded(xCache.end - 1, st.inWidth);
sum_y_0 += static_cast<float>(ptr[offset + 0]) * scaleX;
sum_y_1 += static_cast<float>(ptr[offset + 1]) * scaleX;
sum_y_2 += static_cast<float>(ptr[offset + 2]) * scaleX;
}
sum_0 += sum_y_0 * yScaleCache[i].yScale;
sum_1 += sum_y_1 * yScaleCache[i].yScale;
sum_2 += sum_y_2 * yScaleCache[i].yScale;
}
outputPtr[0] = sum_0 * scale;
outputPtr[1] = sum_1 * scale;
outputPtr[2] = sum_2 * scale;
}
// Computes the sum of all x values defined by <x_interp> taken across
// the y offsets and scales defined by y_ptrs and y_scales, for channel c.
//
// Note that <NeedsXBounding> is a template parameter to avoid a performance
// penalty from dynamically checking it.
template <typename T>
static __device__ void computePatchSum(float scale, const ImageResizerState& st,
ScaleCache<T> const* yScaleCache, Nd4jLong ptrsLen,
const CachedInterpolation& xCache,
float* outputPtr) {
bool const needsXBounding = xCache.needsBounding;
auto boundIfNeeded = [needsXBounding](Nd4jLong x, Nd4jLong y) -> Nd4jLong {
return (needsXBounding ? bound(x, y) : (x));
};
const auto numChannels = st.channels;
for (Nd4jLong c = 0; c < numChannels; ++c) {
float sum = 0;
for (int i = 0; i < ptrsLen; ++i) {
T const* ptr = yScaleCache[i].yPtr;
float scaleX = xCache.startScale;
float sumY = static_cast<float>(ptr[numChannels * boundIfNeeded(xCache.start, st.inWidth) + c]) * scaleX;
if (xCache.start + 1 != xCache.end) {
for (Nd4jLong x = xCache.start + 1; x < xCache.end - 1; ++x) {
sumY += static_cast<float>(
ptr[numChannels * boundIfNeeded(x, st.inWidth) + c]);
}
scaleX = xCache.endMinusOneScale;
sumY += static_cast<float>(ptr[numChannels * boundIfNeeded(xCache.end - 1, st.inWidth) + c]) * scaleX;
}
sum += sumY * yScaleCache[i].yScale;
}
outputPtr[c] = sum * scale;
}
}
template <typename T>
static __global__ void resizeAreaKernel(ImageResizerState const* pSt, CachedInterpolation const* caches, float scale,
T const* inputPtr, Nd4jLong const* inputShape, float* outputPtr, Nd4jLong const* outputShape, ScaleCache<T>* cachePool) { //batch * outWidth * outHeight
for (auto batch = blockIdx.x; batch < pSt->batchSize; batch += gridDim.x) {
for (auto y = threadIdx.x; y < pSt->outHeight; y += blockDim.x) {
const float inY = y * pSt->heightScale;
const float inY1 = (y + 1) * pSt->heightScale;
// The start and end height indices of all the cells that could
// contribute to the target cell.
const Nd4jLong yStart = math::nd4j_floor<float, Nd4jLong>(inY);
const Nd4jLong yEnd = math::nd4j_ceil<float, Nd4jLong>(inY1);
auto scalesDim = yEnd - yStart;
auto yScaleCache = cachePool + (batch * pSt->outHeight + y) * pSt->outWidth;
//auto startPtr = sharedPtr + y * scalesDim * sizeof(float);
//float* yScales = yScalesShare + y * sizeof(float) * scalesDim;//reinterpret_cast<float*>(startPtr); //shared + y * scalesDim * y + scalesDim * sizeof(T const *) [scalesDim];
//T const** yPtrs = yPtrsShare + y * sizeof(T const*) * scalesDim; //[scalesDim];
//yPtrs = reinterpret_cast<T const**>(sharedBuf);
float* output = outputPtr + (batch * pSt->outHeight + y) * pSt->channels * pSt->outWidth;
//int k = 0;
for (Nd4jLong i = yStart, k = 0; i < yEnd; ++i, ++k) {
float scaleY;
if (i < inY) {
scaleY = (i + 1 > inY1 ? pSt->heightScale : i + 1 - inY);
} else {
scaleY = (i + 1 > inY1 ? inY1 - i : 1.0);
}
yScaleCache[k].yScale = scaleY;
yScaleCache[k].yPtr = inputPtr + (batch * pSt->inHeight * pSt->inWidth * pSt->channels + bound(i, pSt->inHeight) * pSt->inWidth * pSt->channels);
}
if (pSt->channels == 3) {
for (Nd4jLong x = 0; x < pSt->outWidth; ++x) {
const CachedInterpolation& xCache = caches[x];
computePatchSumOf3Channels<T>(scale, *pSt, yScaleCache, scalesDim, xCache, output);
output += pSt->channels;
}
} else {
for (Nd4jLong x = 0; x < pSt->outWidth; ++x) {
const CachedInterpolation &xCache = caches[x];
computePatchSum<T>(scale, *pSt, yScaleCache, scalesDim, xCache, output);
output += pSt->channels;
}
}
}
}
}
template <typename T>
static void resizeArea(hipStream_t* stream, ImageResizerState const& st, CachedInterpolation* cache,
NDArray const* input, NDArray* output) {
T const* inputPtr = reinterpret_cast<T const*>(input->specialBuffer());
// float* yScales;
// T const** yPtrs;
float scale = 1.f / (st.heightScale * st.widthScale);
auto outputPtr = reinterpret_cast<float*>(output->specialBuffer()); // output is always float. TO DO: provide another float types also with template <typename X, typename Z> declaration
ImageResizerState* pSt;
auto err = hipMalloc(&pSt, sizeof(ImageResizerState));
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot allocate memory for ImageResizerState", err);
}
err = hipMemcpyAsync(pSt, &st, sizeof(ImageResizerState), hipMemcpyHostToDevice, *stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot copy to device memory", err);
}
ScaleCache<T>* cachePool;
auto cachePoolSize = sizeof(ScaleCache<T>) * st.batchSize * st.outWidth * st.outHeight;
err = hipMalloc(&cachePool, cachePoolSize);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot allocate memory for cache", err);
}
hipLaunchKernelGGL(( resizeAreaKernel<T>), dim3(128), dim3(128), 2048, *stream, pSt, cache, scale, inputPtr, input->specialShapeInfo(), outputPtr,
output->specialShapeInfo(), cachePool);
err = hipStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: An error occured with kernel running", err);
}
err = hipFree(cachePool);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot deallocate memory for cache", err);
}
err = hipFree(pSt);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot deallocate memory for ImageResizeState", err);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
int resizeAreaFunctor_(sd::LaunchContext* context, NDArray const* image, int const width, int const height,
bool const alignCorners, NDArray* output) {
ImageResizerState st(alignCorners, false); // Create resize info
auto res = st.validateAndCalculateOutputSize(image, width, height);
auto stream = context->getCudaStream();
if (Status::OK() == res) {
CachedInterpolation* xCached;
//(st.outWidth);
auto err = hipMalloc(&xCached, sizeof(CachedInterpolation) * st.outWidth);
if (err != 0) {
throw cuda_exception::build("helpers::resizeAreaFunctor_: Cannot allocate memory for cached interpolations", err);
}
NDArray::prepareSpecialUse({output}, {image});
hipLaunchKernelGGL(( fillInterpolationCache), dim3(128), dim3(128), 256, *stream, xCached, st.outWidth, st.inWidth, st.widthScale);
resizeArea<T>(stream, st, xCached, image, output);
err = hipStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeAreaFunctor_: Error occured when kernel was running", err);
}
err = hipFree(xCached);
if (err != 0) {
throw cuda_exception::build("helpers::resizeAreaFunctor_: Cannot deallocate memory for cached interpolations", err);
}
NDArray::registerSpecialUse({output}, {image});
}
return res;
}
int resizeAreaFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
bool const alignCorners, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeAreaFunctor_, (context, image, width, height, alignCorners, output), NUMERIC_TYPES);
}
// ------------------------------------------------------------------------------------------------------------------ //
// simplified bicubic resize without antialiasing
//
template <typename T>
int resizeBicubicFunctorA_(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
ImageResizerState st(alignCorners, halfPixelCenters); // align_corners, half_pixel_align
st.stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {image});
int res = st.validateAndCreateOutput(image, width, height);
if (res == Status::OK())
bicubicInterpolateWithCaching<T>(image, st, halfPixelCenters, output);
NDArray::registerSpecialUse({output}, {image});
return res;
}
int resizeBicubicFunctorA(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctorA_, (context,
image, width, height, alignCorners, halfPixelCenters, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctorA_, (sd::LaunchContext * context,
NDArray const* image, int width, int height, bool const alignCorners, bool const halfPixelCenters, NDArray* output), NUMERIC_TYPES);
// ------------------------------------------------------------------------------------------------------------------ //
int resizeImagesFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
ImageResizeMethods method, bool alignCorners, NDArray* output) {
switch (method) {
case kResizeBilinear:
return resizeBilinearFunctor(context, image, width, height, alignCorners, false, output);
case kResizeNearest:
return resizeNeighborFunctor(context, image, width, height, alignCorners, false, output);
case kResizeBicubic:
return resizeBicubicFunctor(context, image, width, height, alignCorners, false, output);
case kResizeArea:
return resizeAreaFunctor(context, image, width, height, alignCorners, output);
default:
throw std::runtime_error("helper::resizeImagesFunctor: Wrong resize method.");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// --------------------------------------------------------------------------------------------------------------- //
// Crop and Resize helper implementation
// -------------------------------------------------------------------------------------------------------------- //
// cropAndResize kernel type of input(images) and output should be the same
//
template <typename T, typename Z, typename I>
static __global__ void cropAndResizeKernel(T const *images, Nd4jLong const* imagesShape, Z const* boxes, Nd4jLong const* boxesShape,
I const* indices, Nd4jLong const* indexShape, I const* cropSize, Nd4jLong const* cropShape, int method,
double extrapolationVal, T* output, Nd4jLong const* outputShape, int numBoxes, int cropHeight, int cropWidth,
int batchSize, int imageHeight, int imageWidth, int depth) {
for (int b = blockIdx.x; b < numBoxes; b += gridDim.x)
{
Nd4jLong x1Pos[] = {b, 1};
Nd4jLong y1Pos[] = {b, 0};
Nd4jLong y2Pos[] = {b, 2};
Nd4jLong x2Pos[] = {b, 3};
Z y1 = boxes[shape::getOffset(boxesShape, y1Pos)];//->t<T>(b, 0)];
Z x1 = boxes[shape::getOffset(boxesShape, x1Pos)];
Z y2 = boxes[shape::getOffset(boxesShape, y2Pos)];
Z x2 = boxes[shape::getOffset(boxesShape, x2Pos)];
int bIn = indices[b];
if (bIn >= batchSize) {
continue;
}
Z heightScale = (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / Z(cropHeight - 1) : Z(0);
Z widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / Z(cropWidth - 1) : Z(0);
for (int y = threadIdx.x; y < cropHeight; y += blockDim.x) {
const float inY = (cropHeight > 1)
? y1 * (imageHeight - 1) + y * heightScale
: 0.5 * (y1 + y2) * (imageHeight - 1);
if (inY < 0 || inY > imageHeight - 1) {
for (int x = threadIdx.y; x < cropWidth; x += blockDim.y) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
//crops->p(b, y, x, d, extrapolationVal);
}
}
continue;
}
if (method == 0 /* bilinear */) {
const int topYIndex = sd::math::p_floor(inY);
const int bottomYIndex = sd::math::p_ceil(inY);
const float y_lerp = inY - topYIndex;
for (int x = 0; x < cropWidth; ++x) {
const float in_x = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (in_x < 0 || in_x > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
// crops->p(b, y, x, d, extrapolationVal);
}
continue;
}
int left_x_index = math::p_floor(in_x);
int right_x_index = math::p_ceil(in_x);
T x_lerp = in_x - left_x_index;
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong topLeftPos[] = {bIn, topYIndex, left_x_index, d};
Nd4jLong topRightPos[] = {bIn, topYIndex, right_x_index, d};
Nd4jLong bottomLeftPos[] = {bIn, bottomYIndex, left_x_index, d};
Nd4jLong bottomRightPos[] = {bIn, bottomYIndex, right_x_index, d};
const T topLeft(images[shape::getOffset(imagesShape, topLeftPos)]); //->e<float>(bIn, topYIndex, left_x_index, d));
const T topRight(images[shape::getOffset(imagesShape, topRightPos)]); //->e<float>(bIn, topYIndex, right_x_index, d));
const T bottomLeft(images[shape::getOffset(imagesShape, bottomLeftPos)]);//->e<float>(bIn, bottomYIndex, left_x_index, d));
const T bottomRight(images[shape::getOffset(imagesShape, bottomRightPos)]); //->e<float>(bIn, bottomYIndex, right_x_index, d));
const T top = topLeft + (topRight - topLeft) * x_lerp;
const T bottom = bottomLeft + (bottomRight - bottomLeft) * x_lerp;
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = Z(top + (bottom - top) * y_lerp);
}
}
} else { // method is "nearest neighbor"
for (int x = 0; x < cropWidth; ++x) {
const float inX = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (inX < 0 || inX > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
}
continue;
}
const int closestXIndex = roundf(inX);
const int closestYIndex = roundf(inY);
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
Nd4jLong xPos[] = {bIn, closestYIndex, closestXIndex, d};
auto zIndex = shape::getOffset(outputShape, zPos);
auto xIndex = shape::getOffset(imagesShape, xPos);
output[zIndex] = images[xIndex];
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// cropAndResizeFunctor main algorithm
// context - launch context
// images - batch of images (4D tensor - [batch, width, height, pixels])
// boxes - 2D tensor with boxes for crop
// indices - 2D int tensor with indices of boxes to crop
// cropSize - 2D int tensor with crop box sizes
// method - (one of 0 - bilinear, 1 - nearest)
// extrapolationVal - double value of extrapolation
// crops - output (4D tensor - [batch, outWidth, outHeight, pixels])
//
template <typename T, typename Z, typename I>
void cropAndResizeFunctor_(sd::LaunchContext* context, NDArray const *images, NDArray const *boxes, NDArray const *indices,
NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
const int batchSize = images->sizeAt(0);
const int imageHeight = images->sizeAt(1);
const int imageWidth = images->sizeAt(2);
const int numBoxes = crops->sizeAt(0);
const int cropHeight = crops->sizeAt(1);
const int cropWidth = crops->sizeAt(2);
const int depth = crops->sizeAt(3);
auto stream = context->getCudaStream();
T const* imagesBuf = reinterpret_cast<T const*>(images->specialBuffer());
Z const* boxesBuf = reinterpret_cast<Z const*>(boxes->specialBuffer());
I const* indexBuf = reinterpret_cast<I const*>(indices->specialBuffer());
I const* cropSizes = reinterpret_cast<I const*>(cropSize->specialBuffer());
T* outBuf = reinterpret_cast<T*>(crops->specialBuffer());
int threadsPerBlock = math::nd4j_max(imageHeight * imageWidth, cropHeight * cropWidth);
if(threadsPerBlock > MAX_NUM_THREADS/4)
threadsPerBlock = MAX_NUM_THREADS/4;
NDArray::prepareSpecialUse({crops}, {images, boxes, indices, cropSize});
hipLaunchKernelGGL(( cropAndResizeKernel<T,Z,I>), dim3(batchSize), dim3(threadsPerBlock), 256, *stream, imagesBuf, images->specialShapeInfo(), boxesBuf, boxes->specialShapeInfo(), indexBuf, indices->specialShapeInfo(),
cropSizes, cropSize->specialShapeInfo(), method, extrapolationVal, outBuf, crops->specialShapeInfo(), numBoxes, cropHeight, cropWidth, batchSize, imageHeight, imageWidth, depth);
NDArray::registerSpecialUse({crops}, {images, boxes, indices, cropSize});
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void cropAndResizeFunctor(sd::LaunchContext * context, NDArray const *images, NDArray const *boxes, NDArray const *indices, NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
BUILD_TRIPLE_SELECTOR(images->dataType(), boxes->dataType(), indices->dataType(), cropAndResizeFunctor_,
(context, images, boxes, indices, cropSize, method, extrapolationVal, crops), NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
//
}
BUILD_TRIPLE_TEMPLATE(template void cropAndResizeFunctor_,
(sd::LaunchContext * context, NDArray const* images, NDArray const* boxes, NDArray const* indices, NDArray const* cropSize, int method, double extrapolationVal, NDArray* crops),
NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
}
}
} | f769e08f630fd4a1a4a534d641ddc3a5fb1ba4f7.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019-2020 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
//
// @author George A. Shulinok <sgazeos@gmail.com>
//
#include <ops/declarable/helpers/image_resize.h>
#include <exceptions/cuda_exception.h>
#include <array/NDArrayFactory.h>
namespace sd {
namespace ops {
namespace helpers {
struct BilinearInterpolationData {
Nd4jLong bottomIndex; // Lower source index used in the interpolation
Nd4jLong topIndex; // Upper source index used in the interpolation
// 1-D linear iterpolation scale (see:
// https://en.wikipedia.org/wiki/Bilinear_interpolation)
double interpolarValue;
};
// Older incorrect scaling method that causes all resizes to have a slight
// translation leading to inconsistent results. For example, a flip then a
// resize gives different results then a resize then a flip.
struct LegacyScaler {
_CUDA_HD LegacyScaler(){};
inline _CUDA_HD float operator()(const int x, const float scale) const {
return static_cast<float>(x) * scale;
}
};
// Half pixel scaler scales assuming that the pixel centers are at 0.5, i.e. the
// floating point coordinates of the top,left pixel is 0.5,0.5.
struct HalfPixelScaler {
_CUDA_HD HalfPixelScaler(){};
inline _CUDA_HD float operator()(const int x, const float scale) const {
// Note that we subtract 0.5 from the return value, as the existing bilinear
// sampling code etc assumes pixels are in the old coordinate system.
return (static_cast<float>(x) + 0.5f) * scale - 0.5f;
}
};
// Utility functions
// calculateResizeScale determines the float scaling factor.
inline float calculateResizeScale(Nd4jLong inSize, Nd4jLong outSize,
bool alignCorners) {
return (alignCorners && outSize > 1)
? (inSize - 1) / static_cast<float>(outSize - 1)
: inSize / static_cast<float>(outSize);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// computeInterpolationWeights kernel
// outSize - output length
// inSize - input size
// scale - input scale
// interporationData - result
//
template <class Scaler>
static __global__ void computeInterpolationWeights(Nd4jLong outSize,
Nd4jLong inSize,
double scale,
Nd4jLong channels,
BilinearInterpolationData* interpolationData) {
interpolationData[outSize].bottomIndex = 0;
interpolationData[outSize].topIndex = 0;
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
Scaler scaler;
for (Nd4jLong i = outSize - tid; i >= 0; i -= step) {
double in = scaler(i, scale);
// interpolationData[i].bottomIndex = static_cast<Nd4jLong>(in);
// interpolationData[i].topIndex = sd::math::nd4j_min(interpolationData[i].bottomIndex + 1, inSize - 1);
// interpolationData[i].interpolarValue = in - interpolationData[i].bottomIndex;
double const in_f = sd::math::p_floor<double>(in);
double const in_c = sd::math::p_ceil<double>(in);
interpolationData[i].bottomIndex = sd::math::nd4j_max(static_cast<Nd4jLong>(in_f), (Nd4jLong)0LL);//static_cast<Nd4jLong>(in);
interpolationData[i].topIndex = sd::math::nd4j_min(static_cast<Nd4jLong>(in_c), inSize - 1);
interpolationData[i].interpolarValue = in - in_f;
if (channels) {
math::atomics::nd4j_atomicMul(&interpolationData[i].bottomIndex, channels);
math::atomics::nd4j_atomicMul(&interpolationData[i].topIndex, channels);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm
//
static void resizeImage(sd::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm kernel
//
template <typename T, typename Z>
static __global__ void resizeImageKernel(T const* input, Nd4jLong const* inputShape, Z* outputYptr,
Nd4jLong const* outputShape, Nd4jLong batchSize, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels,
Nd4jLong inRowSize, Nd4jLong outRowSize, Nd4jLong inBatchNumValues,
BilinearInterpolationData* xs_, BilinearInterpolationData* ys_) {
for (auto batch = blockIdx.x; batch < batchSize; batch += gridDim.x ) { // blockIdx.x as batch index
auto pX = input + batch * inBatchNumValues;
for (Nd4jLong y = threadIdx.x; y < outHeight; y += blockDim.x) {
const T* ys_input_lower_ptr = pX + ys_[y].bottomIndex * inRowSize;
const T* ys_input_upper_ptr = pX + ys_[y].topIndex * inRowSize;
double yVal = ys_[y].interpolarValue;
auto pZ = outputYptr + (batch * outHeight + y) * outRowSize;
for (Nd4jLong x = 0; x < outWidth; x++) {
auto xsBottom = xs_[x].bottomIndex;
auto xsTop = xs_[x].topIndex;
auto xVal = xs_[x].interpolarValue;
// process interpolation for all channels
for (int c = 0; c < channels; c++) {
Z topLeft(ys_input_lower_ptr[xsBottom + c]);
Z topRight(ys_input_lower_ptr[xsTop + c]);
Z bottomLeft(ys_input_upper_ptr[xsBottom + c]);
Z bottomRight(ys_input_upper_ptr[xsTop + c]);
Z top = topLeft + (topRight - topLeft) * xVal;
Z bottom = bottomLeft + (bottomRight - bottomLeft) * xVal;
Z resVal = Z(top + (bottom - top) * yVal);
pZ[x * channels + c] = resVal;
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with
template <typename T, typename F>
static void resizeImage_(sd::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output) {
Nd4jLong inRowSize = inWidth * channels;
Nd4jLong inBatchNumValues = inHeight * inRowSize;
Nd4jLong outRowSize = outWidth * channels;
auto stream = context->getCudaStream();
T const* pInput = images->getDataBuffer()->specialAsT<T>(); //reinterpret_cast<T const *>(images->specialBuffer()); // this works only with 'c' direction
F* pOutput = output->dataBuffer()->specialAsT<F>();//reinterpret_cast<F *>(output->specialBuffer());
dim3 batchSizeBlock(batchSize, 1, 1);
dim3 pictureBlock(outHeight, outWidth, channels);
resizeImageKernel<T,F><<<256, 256, 256, *stream>>>(pInput, images->specialShapeInfo(), pOutput,
output->specialShapeInfo(), batchSize, outWidth, outHeight, channels, inRowSize, outRowSize,
inBatchNumValues, xs_, ys_);
auto err = cudaStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeImage_: Cannot synchronize kernel execution", err);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, typename F>
static int resizeBilinearFunctor_(sd::LaunchContext* context, NDArray const* images, int const width,
int const height, bool const alignCorners, bool const halfPixelCenter, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
float heightScale = calculateResizeScale(inHeight, outHeight, alignCorners);
float widthScale = calculateResizeScale(inWidth, outWidth, alignCorners);
BilinearInterpolationData* xs_;// = xs.data();
BilinearInterpolationData* ys_;// = xs.data();
cudaError_t err = cudaMalloc(&xs_, sizeof(BilinearInterpolationData) * (outWidth + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for vertical parts rectangulars", err);
}
err = cudaMalloc(&ys_, sizeof(BilinearInterpolationData) * (outHeight + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for horizontal parts rectangulars", err);
}
auto stream = context->getCudaStream();
// Compute the cached interpolation weights on the x and y dimensions.
if (halfPixelCenter) {
computeInterpolationWeights <
HalfPixelScaler ><<<256, 512, 512, *stream>>>(outHeight, inHeight, heightScale, 0, ys_);
computeInterpolationWeights <
HalfPixelScaler ><<<256, 512, 512, *stream>>>(outWidth, inWidth, widthScale, channels, xs_);
}
else {
computeInterpolationWeights <
LegacyScaler ><<<256, 512, 512, *stream>>>(outHeight, inHeight, heightScale, 0, ys_);
computeInterpolationWeights <
LegacyScaler ><<<256, 512, 512, *stream>>>(outWidth, inWidth, widthScale, channels, xs_);
}
printf("Input is %dx%d, Output is %dx%d\n", inHeight, inWidth, outHeight, outWidth);
NDArray::prepareSpecialUse({output}, {images});
resizeImage_<T,F>(context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output);
err = cudaStreamSynchronize(*stream);
NDArray::registerSpecialUse({output}, {images});
err = cudaFree(xs_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for vertical parts rectangulars", err);
}
err = cudaFree(ys_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for horizontical parts rectangulars", err);
}
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize by interpolation nearest neighbor algorithm kernel
//
template <typename T>
static __global__ void resizeNeighborKernel(T const* input, Nd4jLong const* inputShape, T* output, Nd4jLong const* outputShape,
Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool alignCorners, bool halfPixelCenters) {
//for (int b = blockIdx.x; b < batchSize; b += gridDim.x)
if (blockIdx.x < batchSize)
{
auto b = blockIdx.x;
for (int y = threadIdx.x; y < outHeight; y += blockDim.x) {
auto posY = alignCorners ? static_cast<Nd4jLong>(sd::math::p_round<float>(halfPixelCenters?((float)y + 0.5f) * heightScale:(float)y * heightScale)) : static_cast<Nd4jLong>(sd::math::p_floor<float>(
halfPixelCenters?((float)y + 0.5f) * heightScale:(float)y * heightScale));
Nd4jLong inY = sd::math::nd4j_min(posY, inHeight - 1);
if (halfPixelCenters) {
inY = sd::math::nd4j_max(0LL, inY);
}
for (int x = threadIdx.y; x < outWidth; x += blockDim.y) {
auto posX = alignCorners ? static_cast<Nd4jLong>(sd::math::p_round<float>(halfPixelCenters?((float)x + 0.5f) * widthScale:(float)x * widthScale)) : static_cast<Nd4jLong>(sd::math::p_floor<float>(
halfPixelCenters?((float)x + 0.5f) * widthScale:(float)x * widthScale));
Nd4jLong inX = sd::math::nd4j_min(posX, inWidth - 1);
if (halfPixelCenters) {
inX = sd::math::nd4j_max(0LL, inX);
}
auto start = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong e = start; e < channels; e += step) {
Nd4jLong posX[] = {b, inY, inX, e};
Nd4jLong posZ[] = {b, y, x, e};
auto xIndex = shape::getOffset(inputShape, posX);
auto zIndex = shape::getOffset(outputShape, posZ);
output[zIndex] = input[xIndex];
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeNeighborFunctor - main algorithm by nearest neighbor
//
template <typename T>
int resizeNeighborFunctor_(sd::LaunchContext* context, NDArray const* images, int const width, int const height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
// if ((alignCorners && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (alignCorners && outHeight < 2) ||
// (alignCorners && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// // wrong input data
// nd4j_printf("image.resize_nearest_neighbor: Wrong input or output size to resize\n", "");
// return ND4J_STATUS_BAD_ARGUMENTS;
// }
// float heightScale = alignCorners ? (inHeight - 1.f) / float(outHeight - 1.f) : (inHeight / float(outHeight));
// float widthScale = alignCorners ? (inWidth - 1.f) / float(outWidth - 1.f) : (inWidth / float(outWidth));
float heightScale = calculateResizeScale(inHeight, outHeight, alignCorners);
float widthScale = calculateResizeScale(inWidth, outWidth, alignCorners);
auto imagesBuffer = images->getDataBuffer()->specialAsT<T>();//reinterpret_cast<T const*>(images->specialBuffer());
auto outputBuffer = output->dataBuffer()->specialAsT<T>();//reinterpret_cast<T*>(output->specialBuffer());
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {images});
resizeNeighborKernel<T><<<batchSize, outHeight * outWidth, 512, *stream>>>(imagesBuffer, images->specialShapeInfo(), outputBuffer, output->specialShapeInfo(),
batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, alignCorners, halfPixelCenters);
NDArray::registerSpecialUse({output}, {images});
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeImage - resize bilinear algorithm caller
//
void resizeImage(sd::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight,
Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth, Nd4jLong channels, BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_, NDArray* output) {
BUILD_DOUBLE_SELECTOR(images->dataType(), output->dataType(),
resizeImage_, (context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels,
xs_, ys_, output), NUMERIC_TYPES, FLOAT_TYPES);
}
BUILD_DOUBLE_TEMPLATE(template void resizeImage_,(sd::LaunchContext* context, NDArray const* images,
Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth,
Nd4jLong channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output),
NUMERIC_TYPES, FLOAT_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeBilinearFunctor(sd::LaunchContext* context, NDArray const* images, int width, int height,
bool const alignCorners, bool const halfPixelCenter, NDArray* output) {
BUILD_DOUBLE_SELECTOR(images->dataType(), output->dataType(), return resizeBilinearFunctor_, (context, images,
width, height, alignCorners, halfPixelCenter, output), NUMERIC_TYPES, FLOAT_TYPES);
}
// BUILD_SINGLE_TEMPLATE(template int resizeBilinearFunctor_, (sd::LaunchContext* context,
// NDArray const* images, int const width, int const height, bool const alignCorners,
// bool const halfPixelCenter, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeNeighborFunctor(sd::LaunchContext* context, NDArray const* images, int const width, int const height,
bool const alignCorners, bool const halfPixelCenter, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeNeighborFunctor_,
(context, images, width, height, alignCorners, halfPixelCenter, output), LIBND4J_TYPES);
}
// BUILD_SINGLE_TEMPLATE(template int resizeNeighborFunctor_, (sd::LaunchContext* context, NDArray const* images,
// int width, int height, bool const alignCorners, bool const halfPixelCenter, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Bicubic interpolation
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct ImageResizerState {
explicit ImageResizerState(bool alignCorners, bool halfPixelCenters)
: _alignCorners(alignCorners),
_halfPixelCenters(halfPixelCenters) {}
// ValidateAndCalculateOutputSize checks the bounds on the input tensors
// and requested size, sets up some of the resizing state such as the
// heightScale and widthScale, and calculates the output size.
// If any of these operations fails, it sets an error status in
// the context, which the caller must check.
int validateAndCalculateOutputSize(NDArray const* input, int const width, int const height) {
//
batchSize = input->sizeAt(0);//.dim_size(0);
outHeight = height;
outWidth = width; //internal::SubtleMustCopy(Svec(1));
inHeight = static_cast<int32_t>(input->sizeAt(1));
inWidth = static_cast<int32_t>(input->sizeAt(2));
channels = input->sizeAt(3); //.dim_size(3);
heightScale = calculateResizeScale(inHeight, outHeight, _alignCorners);
widthScale = calculateResizeScale(inWidth, outWidth, _alignCorners);
// Guard against overflows
if (ceilf((outHeight - 1) * heightScale) > static_cast<float>(DataTypeUtils::max<int>())) {
nd4j_printf("resize_bicubic: Upper overflow occurs for resize height (%f)\n", ceilf((outHeight - 1) * heightScale));
return Status::CODE(ND4J_STATUS_BAD_INPUT, "resize_bicubic: Upper overflow occurs for resize height");
}
if (ceilf((outWidth - 1) * heightScale) > static_cast<float>(DataTypeUtils::max<int>())) {
nd4j_printf("resize_bicubic: Upper overflow occurs for resize height (%f)\n", ceilf((outHeight - 1) * heightScale));
return Status::CODE(ND4J_STATUS_BAD_INPUT, "resize_bicubic: Upper overflow occurs for resize width");
}
return Status::OK();
}
// Calculates all the required variables, and allocates the output.
int validateAndCreateOutput(NDArray const* input, int const width, int const height) {
return validateAndCalculateOutputSize(input, width, height);
}
Nd4jLong batchSize;
Nd4jLong outHeight;
Nd4jLong outWidth;
Nd4jLong inHeight;
Nd4jLong inWidth;
Nd4jLong channels;
float heightScale;
float widthScale;
NDArray* output = nullptr;
cudaStream_t* stream;
private:
bool _alignCorners;
bool _halfPixelCenters;
};
struct WeightsAndIndices {
float _weight0;
float _weight1;
float _weight2;
float _weight3;
Nd4jLong _index0;
Nd4jLong _index1;
Nd4jLong _index2;
Nd4jLong _index3;
int _advance; // advance value.
};
class CachedInterpolationCalculator {
public:
_CUDA_HD CachedInterpolationCalculator() : _indexes{-1, -1, -1, -1} {}
// Advances iteration. Returns the number of values that should be copied from
// the current point to the next point. The copying should always be done by
// copying the last <retval> values from the old point to the first <retval>
// values of the new point.
inline _CUDA_HD int Advance(const Nd4jLong x0, const Nd4jLong x1, const Nd4jLong x2,
const Nd4jLong x3) {
// We use 2 hands and walk through, copying from one to another where
// we already have values.
// Invariant, new_indicies_hand <= cached_values_hand
const Nd4jLong new_x_indices[4] = {x0, x1, x2, x3};
int cachedValuesHand = 0;
int newIndiciesHand = 0;
while (cachedValuesHand < 4) {
if (_indexes[cachedValuesHand] == new_x_indices[newIndiciesHand]) {
if (newIndiciesHand < cachedValuesHand) {
_indexes[newIndiciesHand] = _indexes[cachedValuesHand];
}
newIndiciesHand++;
}
cachedValuesHand++;
}
switch (newIndiciesHand) {
case 0:
_indexes[0] = x0;
case 1:
_indexes[1] = x1;
case 2:
_indexes[2] = x2;
case 3:
_indexes[3] = x3;
break;
}
return newIndiciesHand;
}
private:
Nd4jLong _indexes[4];
};
static __global__ void initCoefTableKernel(const double a, float* table, Nd4jLong tableSize) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i <= tableSize; i += step) {
float x = i * 1.0 / tableSize;
table[i * 2] = ((a + 2) * x - (a + 3)) * x * x + 1;
x += 1.0;
table[i * 2 + 1] = ((a * x - 5 * a) * x + 8 * a) * x - 4 * a;
}
}
static const Nd4jLong kTableSize = (1 << 10);
float* initCoeffsTable(const double a, cudaStream_t* stream) {
// Allocate and initialize coefficients table using Bicubic
// convolution algorithm.
// https://en.wikipedia.org/wiki/Bicubic_interpolation
float* coeffs_table; // = new float[(kTableSize + 1) * 2];
auto err = cudaMalloc(&coeffs_table, sizeof(float) * ((kTableSize + 1) * 2));
if (err != 0) {
throw cuda_exception::build("helpers::initCoeffsTable: Cannot allocate memory for vertical parts rectangulars", err);
}
initCoefTableKernel<<<128,128,128, *stream>>>(a, coeffs_table, kTableSize);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::initCoeffsTable: Cannot syncronize kernel", err);
}
return coeffs_table;
}
// _CUDA_HD const float* getCoeffsTable(const bool use_keys_cubic) {
// // Static so that we initialize it on first use
// if (use_keys_cubic) {
// // http://ieeexplore.ieee.org/document/1163711/
// // R. G. Keys. Cubic convolution interpolation for digital image
// // processing. IEEE Transactions on Acoustics, Speech, and Signal
// // Processing, 29(6):1153–1160, 1981.
// //static const float* coeffs_table = initCoeffsTable(-0.5f, stream);
// return sCoeffsTableHalf;
// } else {
// //static const float* coeffs_table = initCoeffsTable(-0.75f, stream);
// return sCoeffsTableThreeFourth;
// }
// }
inline _CUDA_HD Nd4jLong bound(Nd4jLong val, Nd4jLong limit) {
return math::nd4j_min(limit - 1ll, math::nd4j_max(Nd4jLong{0}, val));
}
template <typename T>
inline _CUDA_HD float interpolate1D(const float weight0, const float weight1, const float weight2, const float weight3,
const T value0, const T value1, const T value2, const T value3) {
return static_cast<float>(value0) * weight0 +
static_cast<float>(value1) * weight1 +
static_cast<float>(value2) * weight2 +
static_cast<float>(value3) * weight3;
}
// Compute the 1D interpolation for a given X index using the y_weights
static _CUDA_HD float compute(float values[4], const float xW0, const float xW1, const float xW2, const float xW3) {
return interpolate1D(xW0, xW1, xW2, xW3, values[0], values[1],values[2], values[3]);
}
template <typename Scaler, bool use_keys_cubic>
inline _CUDA_HD void getWeightsAndIndices(float const* coeffs_table, const float scale, const Nd4jLong out_loc, const Nd4jLong limit, WeightsAndIndices* out) {
const Scaler scaler;
const float in_loc_f = scaler(out_loc, scale);
const Nd4jLong in_loc = math::nd4j_floor<float, Nd4jLong>(in_loc_f);
const float delta = in_loc_f - in_loc;
const Nd4jLong offset = math::nd4j_round<float, Nd4jLong>(delta * kTableSize);
//const float* coeffs_table = getCoeffsTable(use_keys_cubic);
if (use_keys_cubic) {
// The legacy code placed more weight on the edge pixels, since bounding
// the set of inputs to sample could cause an edge pixel to be repeated.
// Here we change the behavior at borders to match that used by the
// scale_and_translate_op, where sampling locations outside the image have
// their weight set to 0, and the weights are renormalized so that their sum
// is 1.0.
out->_index0 = bound(in_loc - 1, limit);
out->_weight0 =
(out->_index0 == in_loc - 1 ? coeffs_table[offset * 2 + 1] : 0.0f);
out->_index1 = bound(in_loc, limit);
out->_weight1 = (out->_index1 == in_loc ? coeffs_table[offset * 2] : 0.0f);
out->_index2 = bound(in_loc + 1, limit);
out->_weight2 =
(out->_index2 == in_loc + 1 ? coeffs_table[(kTableSize - offset) * 2]
: 0.0f);
out->_index3 = bound(in_loc + 2, limit);
out->_weight3 = (out->_index3 == in_loc + 2
? coeffs_table[(kTableSize - offset) * 2 + 1]
: 0.0f);
const float weight_sum =
out->_weight0 + out->_weight1 + out->_weight2 + out->_weight3;
if (math::nd4j_abs(weight_sum) >= 1000.0f * DataTypeUtils::min<float>()) {
const float one_over_weight_sum = 1.0f / weight_sum;
out->_weight0 *= one_over_weight_sum;
out->_weight1 *= one_over_weight_sum;
out->_weight2 *= one_over_weight_sum;
out->_weight3 *= one_over_weight_sum;
}
} else {
out->_weight0 = coeffs_table[offset * 2 + 1];
out->_weight1 = coeffs_table[offset * 2];
out->_weight2 = coeffs_table[(kTableSize - offset) * 2];
out->_weight3 = coeffs_table[(kTableSize - offset) * 2 + 1];
out->_index0 = bound(in_loc - 1, limit);
out->_index1 = bound(in_loc, limit);
out->_index2 = bound(in_loc + 1, limit);
out->_index3 = bound(in_loc + 2, limit);
}
}
static __global__ void accumulateChannelsKernel(WeightsAndIndices* pXWais, Nd4jLong outWidth, Nd4jLong channels) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto x = start; x < outWidth; x += step) {
pXWais[x]._index0 *= channels;
pXWais[x]._index1 *= channels;
pXWais[x]._index2 *= channels;
pXWais[x]._index3 *= channels;
}
}
static __global__ void advaceWeightsAndIndicesKernel(float const* cacheTable, CachedInterpolationCalculator* calc, WeightsAndIndices* pXWais, Nd4jLong inWidth, float widthScale,
Nd4jLong outWidth, Nd4jLong channels, bool halfPixelCenters) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto x = start; x < outWidth; x += step) {
if (halfPixelCenters)
getWeightsAndIndices<HalfPixelScaler, true>(cacheTable, widthScale, x, inWidth, &pXWais[x]);
else
getWeightsAndIndices<LegacyScaler, false>(cacheTable, widthScale, x, inWidth, &pXWais[x]);
pXWais[x]._advance = calc->Advance(pXWais[x]._index0, pXWais[x]._index1, pXWais[x]._index2, pXWais[x]._index3);
}
}
// resizerState and xWais are device allocated
static void computeXWeightsAndIndices(float const* coeffsTable, const ImageResizerState& resizerState,
const bool halfPixelCenters,
WeightsAndIndices* pXWais) {
auto stream = resizerState.stream;
auto outWidth = resizerState.outWidth;
CachedInterpolationCalculator calc; // = new CachedInterpolationCalculator;
CachedInterpolationCalculator* pCalcD;
auto err = cudaMalloc(&pCalcD, sizeof(CachedInterpolationCalculator));
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot allocated device memory for interpolate calculator", err);
}
err = cudaMemcpyAsync(pCalcD, &calc, sizeof(CachedInterpolationCalculator), cudaMemcpyHostToDevice, *stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot set up device memory for interpolate calculator", err);
}
advaceWeightsAndIndicesKernel<<<128, 128, 128, *stream>>>(coeffsTable, pCalcD, pXWais, resizerState.inWidth, resizerState.widthScale, outWidth, resizerState.channels, halfPixelCenters);
err = cudaFree(pCalcD);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot deallocated device memory for interpolate calculator", err);
}
err = cudaStreamSynchronize(*stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot synchronize stream after advance weights and indicers", err);
}
// Scale the values so they can be used as offsets into buffers.
accumulateChannelsKernel<<<128, 128, 512, *stream>>>(pXWais, outWidth, resizerState.channels);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot synchronize stream after accumulate channels", err);
}
}
template <typename T>
static _CUDA_HD FORCEINLINE float computeYInterpolation(
int which, int channelNum, const WeightsAndIndices& yWai,
const T* pY0, const T* pY1, const T* pY2, const T* pY3,
const WeightsAndIndices& xWai) {
int xIndex;
switch (which) {
case 0:
xIndex = xWai._index0;
break;
case 1:
xIndex = xWai._index1;
break;
case 2:
xIndex = xWai._index2;
break;
default:
xIndex = xWai._index3;
break;
}
const Nd4jLong pt_index = xIndex + channelNum;
return interpolate1D<T>(yWai._weight0, yWai._weight1, yWai._weight2,
yWai._weight3, pY0[pt_index], pY1[pt_index],
pY2[pt_index], pY3[pt_index]);
}
template <typename T>
static __global__ void bicubicInterpolateWithCachingKernel(float const* cachedTable, T const* inputPtr, ImageResizerState* pResizerState, WeightsAndIndices* xWais, bool halfPixelCenters, Nd4jLong inBatchWidth, Nd4jLong inRowWidth, float* outputPtr) {
// auto numChannels = pResizerState->channels;
for (Nd4jLong b = blockIdx.x; b < pResizerState->batchSize; b += gridDim.x) {
auto pInput = inputPtr + b * inBatchWidth;
float* cachedValue;
for (Nd4jLong y = threadIdx.x; y < pResizerState->outHeight; y += blockDim.x) {
if (threadIdx.x == 0) {
extern __shared__ char sharedChar[];
cachedValue = reinterpret_cast<float*>(sharedChar);
}
auto pos = (b * pResizerState->outHeight + y) * pResizerState->outWidth * pResizerState->channels;
auto pOutput = &outputPtr[pos];
struct WeightsAndIndices yWai;
if (halfPixelCenters) {
getWeightsAndIndices<HalfPixelScaler, true>(cachedTable, pResizerState->heightScale, y, pResizerState->inHeight, &yWai);
} else {
getWeightsAndIndices<LegacyScaler, false>(cachedTable, pResizerState->heightScale, y, pResizerState->inHeight, &yWai);
}
// Make pointers represent offsets of data in inputBPtr.
const T* y_ptr_0 = pInput + yWai._index0 * inRowWidth;
const T* y_ptr_1 = pInput + yWai._index1 * inRowWidth;
const T* y_ptr_2 = pInput + yWai._index2 * inRowWidth;
const T* y_ptr_3 = pInput + yWai._index3 * inRowWidth;
if (pResizerState->channels == 3) {
// Manually unroll case of 3 channels.
float cached_value_0[4] = {0};
float cached_value_1[4] = {0};
float cached_value_2[4] = {0};
for (Nd4jLong x = 0; x < pResizerState->outWidth; ++x) {
const WeightsAndIndices& xWai = xWais[x];
// Shift values in cached_value_* to fill first '_advance' values.
switch (xWai._advance) {
case 3:
cached_value_0[0] = cached_value_0[1];
cached_value_0[1] = cached_value_0[2];
cached_value_0[2] = cached_value_0[3];
cached_value_1[0] = cached_value_1[1];
cached_value_1[1] = cached_value_1[2];
cached_value_1[2] = cached_value_1[3];
cached_value_2[0] = cached_value_2[1];
cached_value_2[1] = cached_value_2[2];
cached_value_2[2] = cached_value_2[3];
break;
case 2:
cached_value_0[0] = cached_value_0[2];
cached_value_0[1] = cached_value_0[3];
cached_value_1[0] = cached_value_1[2];
cached_value_1[1] = cached_value_1[3];
cached_value_2[0] = cached_value_2[2];
cached_value_2[1] = cached_value_2[3];
break;
case 1: {
cached_value_0[0] = cached_value_0[3];
cached_value_1[0] = cached_value_1[3];
cached_value_2[0] = cached_value_2[3];
break;
}
}
// Set the remaining '4-_advance' values by computing.
switch (xWai._advance) {
case 0:
cached_value_0[0] = computeYInterpolation(0, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[0] = computeYInterpolation(0, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[0] = computeYInterpolation(0, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 1:
cached_value_0[1] = computeYInterpolation(1, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[1] = computeYInterpolation(1, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[1] = computeYInterpolation(1, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 2:
cached_value_0[2] = computeYInterpolation(2, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[2] = computeYInterpolation(2, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[2] = computeYInterpolation(2, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 3:
cached_value_0[3] = computeYInterpolation(3, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[3] = computeYInterpolation(3, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[3] = computeYInterpolation(3, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
// break;
}
pOutput[x * pResizerState->channels + 0] = compute(cached_value_0, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
pOutput[x * pResizerState->channels + 1] = compute(cached_value_1, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
pOutput[x * pResizerState->channels + 2] = compute(cached_value_2, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
}
} else {
for (Nd4jLong x = 0; x < pResizerState->outWidth; ++x) {
const WeightsAndIndices& xWai = xWais[x];
// Shift values in cachedValue to fill first '_advance' values.
switch (xWai._advance) {
case 3:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 1];
cachedValue[4 * c + 1] = cachedValue[4 * c + 2];
cachedValue[4 * c + 2] = cachedValue[4 * c + 3];
}
break;
case 2:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 2];
cachedValue[4 * c + 1] = cachedValue[4 * c + 3];
}
break;
case 1: {
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 3];
}
break;
}
}
// Set the remaining '4-_advance' values by computing.
switch (xWai._advance) {
case 0:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = computeYInterpolation(0, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 1:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 1] = computeYInterpolation(1, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 2:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 2] = computeYInterpolation(2, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 3:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 3] = computeYInterpolation(3, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
// break;
}
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
pOutput[x * pResizerState->channels + c] = compute(&cachedValue[4 * c], xWai._weight0, xWai._weight1, xWai._weight2, xWai._weight3);
}
}
}
}
}
}
template <typename T>
static void
bicubicInterpolateWithCaching(NDArray const* image, ImageResizerState const& resizerState, bool const halfPixelCenters, NDArray* output) {
const auto numChannels = resizerState.channels;
const Nd4jLong inRowWidth = resizerState.inWidth * numChannels;
const Nd4jLong inBatchWidth = resizerState.inHeight * inRowWidth;
auto stream = resizerState.stream; //output->getContext()->getCudaStream();
ImageResizerState* resizerStateD;
auto err = cudaMalloc(&resizerStateD, sizeof(ImageResizerState));
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot allocate memory for resizerState", err);
}
err = cudaMemcpyAsync(resizerStateD, &resizerState, sizeof(ImageResizerState), cudaMemcpyHostToDevice, *stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot set up memory for resizerState", err);
}
// float* cachedValue = nullptr;
// size_t cachedSize = sizeof(float) * (numChannels == 3 ? 0 : 4 * numChannels);
// if (cachedSize) {
// err = cudaMalloc(reinterpret_cast<void**>(&cachedValue), cachedSize);
// if (err != 0) {
// throw cuda_exception::build(
// "helpers::bicubicInterpolateWithCaching: Cannot allocate memory for cached values", err);
// }
// err = cudaMemset(cachedValue, 0, cachedSize);
// if (err != 0) {
// throw cuda_exception::build(
// "helpers::bicubicInterpolateWithCaching: Cannot set up memory for cached values", err);
// }
// }
WeightsAndIndices* xWais; //(resizerState.outWidth);
err = cudaMalloc(&xWais, sizeof(WeightsAndIndices) * resizerState.outWidth);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot allocate memory for weights and indices", err);
}
auto coeffsTable = halfPixelCenters?initCoeffsTable(-0.5, stream): initCoeffsTable(-0.75, stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err);
}
computeXWeightsAndIndices(coeffsTable, resizerState, halfPixelCenters, xWais);
err = cudaStreamQuery(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err);
}
const T* pInput = image->getDataBuffer()->specialAsT<T>();
float* pOutput = output->dataBuffer()->specialAsT<float>(); //_data.data();
bicubicInterpolateWithCachingKernel<T><<<128, 1, 512, *stream>>>(coeffsTable, pInput,
resizerStateD, xWais, halfPixelCenters, inBatchWidth, inRowWidth, pOutput);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Kernels finished with error", err);
}
err = cudaFree(resizerStateD);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for resizerState", err);
}
// if (cachedSize)
// err = cudaFree(cachedValue);
// if (err != 0) {
// throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for cached values", err);
// }
err = cudaFree(xWais);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for weights and indices", err);
}
err = cudaFree(coeffsTable);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for coefficients table", err);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
int resizeBicubicFunctor_(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output) {
return Status::OK();
}
int resizeBicubicFunctor(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctor_, (context, image,
width, height, preserveAspectRatio, antialias, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctor_, (sd::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output), NUMERIC_TYPES);
// ------------------------------------------------------------------------------------------------------------------ //
struct CachedInterpolation {
Nd4jLong start;
Nd4jLong end;
float startScale;
float endMinusOneScale;
bool needsBounding;
};
static __global__ void fillInterpolationCache(CachedInterpolation* xCached, Nd4jLong cacheLen, Nd4jLong inWidth, float widthScale) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto increment = blockDim.x * gridDim.x;
for (auto x = start; x < cacheLen; x += increment) {
auto& xCache = xCached[x];
const float inX = x * widthScale;
const float inX1 = (x + 1) * widthScale;
Nd4jLong v = math::nd4j_floor<float, Nd4jLong>(inX);
xCache.start = v;
xCache.startScale = v < inX ? (v + 1 > inX1 ? widthScale : v + 1 - inX) : (v + 1 > inX1 ? inX1 - v : 1.f);
v = math::nd4j_ceil<float, Nd4jLong>(inX1);
xCache.end = v--;
xCache.endMinusOneScale = v < inX ? (v + 1 > inX1 ? widthScale : v + 1 - inX) : (v + 1 > inX1 ? inX1 - v : 1.f);
xCache.needsBounding = bound(xCache.start, inWidth) != xCache.start || bound(xCache.end - 1, inWidth) != (xCache.end - 1);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
struct ScaleCache {
float yScale;
T const* yPtr;
};
// Computes the sum of all x values defined by <x_interp> taken across
// the y offsets and scales defined by y_ptrs and y_scales, for channel c.
//
// Note that <NeedsXBounding> is a template parameter to avoid a performance
// penalty from dynamically checking it.
template <typename T>
static __device__ void computePatchSumOf3Channels(float scale,
const ImageResizerState& st,
ScaleCache<T> const* yScaleCache,
Nd4jLong ptrsLen,
const CachedInterpolation& xCache,
float* outputPtr) {
bool const needsXBounding = xCache.needsBounding;
auto boundIfNeeded = [needsXBounding](Nd4jLong x, Nd4jLong y) -> Nd4jLong {
return (needsXBounding ? bound(x, y) : (x));
};
float sum_0 = 0;
float sum_1 = 0;
float sum_2 = 0;
for (int i = 0; i < ptrsLen; ++i) {
const T* ptr = yScaleCache[i].yPtr;
float scaleX = xCache.startScale;
Nd4jLong offset = 3 * boundIfNeeded(xCache.start, st.inWidth);
float sum_y_0 = static_cast<float>(ptr[offset + 0]) * scaleX;
float sum_y_1 = static_cast<float>(ptr[offset + 1]) * scaleX;
float sum_y_2 = static_cast<float>(ptr[offset + 2]) * scaleX;
if (xCache.start + 1 != xCache.end) {
for (Nd4jLong x = xCache.start + 1; x < xCache.end - 1; ++x) {
Nd4jLong offset = 3 * boundIfNeeded(x, st.inWidth);
sum_y_0 += static_cast<float>(ptr[offset + 0]);
sum_y_1 += static_cast<float>(ptr[offset + 1]);
sum_y_2 += static_cast<float>(ptr[offset + 2]);
}
scaleX = xCache.endMinusOneScale;
offset = st.channels * boundIfNeeded(xCache.end - 1, st.inWidth);
sum_y_0 += static_cast<float>(ptr[offset + 0]) * scaleX;
sum_y_1 += static_cast<float>(ptr[offset + 1]) * scaleX;
sum_y_2 += static_cast<float>(ptr[offset + 2]) * scaleX;
}
sum_0 += sum_y_0 * yScaleCache[i].yScale;
sum_1 += sum_y_1 * yScaleCache[i].yScale;
sum_2 += sum_y_2 * yScaleCache[i].yScale;
}
outputPtr[0] = sum_0 * scale;
outputPtr[1] = sum_1 * scale;
outputPtr[2] = sum_2 * scale;
}
// Computes the sum of all x values defined by <x_interp> taken across
// the y offsets and scales defined by y_ptrs and y_scales, for channel c.
//
// Note that <NeedsXBounding> is a template parameter to avoid a performance
// penalty from dynamically checking it.
template <typename T>
static __device__ void computePatchSum(float scale, const ImageResizerState& st,
ScaleCache<T> const* yScaleCache, Nd4jLong ptrsLen,
const CachedInterpolation& xCache,
float* outputPtr) {
bool const needsXBounding = xCache.needsBounding;
auto boundIfNeeded = [needsXBounding](Nd4jLong x, Nd4jLong y) -> Nd4jLong {
return (needsXBounding ? bound(x, y) : (x));
};
const auto numChannels = st.channels;
for (Nd4jLong c = 0; c < numChannels; ++c) {
float sum = 0;
for (int i = 0; i < ptrsLen; ++i) {
T const* ptr = yScaleCache[i].yPtr;
float scaleX = xCache.startScale;
float sumY = static_cast<float>(ptr[numChannels * boundIfNeeded(xCache.start, st.inWidth) + c]) * scaleX;
if (xCache.start + 1 != xCache.end) {
for (Nd4jLong x = xCache.start + 1; x < xCache.end - 1; ++x) {
sumY += static_cast<float>(
ptr[numChannels * boundIfNeeded(x, st.inWidth) + c]);
}
scaleX = xCache.endMinusOneScale;
sumY += static_cast<float>(ptr[numChannels * boundIfNeeded(xCache.end - 1, st.inWidth) + c]) * scaleX;
}
sum += sumY * yScaleCache[i].yScale;
}
outputPtr[c] = sum * scale;
}
}
template <typename T>
static __global__ void resizeAreaKernel(ImageResizerState const* pSt, CachedInterpolation const* caches, float scale,
T const* inputPtr, Nd4jLong const* inputShape, float* outputPtr, Nd4jLong const* outputShape, ScaleCache<T>* cachePool) { //batch * outWidth * outHeight
for (auto batch = blockIdx.x; batch < pSt->batchSize; batch += gridDim.x) {
for (auto y = threadIdx.x; y < pSt->outHeight; y += blockDim.x) {
const float inY = y * pSt->heightScale;
const float inY1 = (y + 1) * pSt->heightScale;
// The start and end height indices of all the cells that could
// contribute to the target cell.
const Nd4jLong yStart = math::nd4j_floor<float, Nd4jLong>(inY);
const Nd4jLong yEnd = math::nd4j_ceil<float, Nd4jLong>(inY1);
auto scalesDim = yEnd - yStart;
auto yScaleCache = cachePool + (batch * pSt->outHeight + y) * pSt->outWidth;
//auto startPtr = sharedPtr + y * scalesDim * sizeof(float);
//float* yScales = yScalesShare + y * sizeof(float) * scalesDim;//reinterpret_cast<float*>(startPtr); //shared + y * scalesDim * y + scalesDim * sizeof(T const *) [scalesDim];
//T const** yPtrs = yPtrsShare + y * sizeof(T const*) * scalesDim; //[scalesDim];
//yPtrs = reinterpret_cast<T const**>(sharedBuf);
float* output = outputPtr + (batch * pSt->outHeight + y) * pSt->channels * pSt->outWidth;
//int k = 0;
for (Nd4jLong i = yStart, k = 0; i < yEnd; ++i, ++k) {
float scaleY;
if (i < inY) {
scaleY = (i + 1 > inY1 ? pSt->heightScale : i + 1 - inY);
} else {
scaleY = (i + 1 > inY1 ? inY1 - i : 1.0);
}
yScaleCache[k].yScale = scaleY;
yScaleCache[k].yPtr = inputPtr + (batch * pSt->inHeight * pSt->inWidth * pSt->channels + bound(i, pSt->inHeight) * pSt->inWidth * pSt->channels);
}
if (pSt->channels == 3) {
for (Nd4jLong x = 0; x < pSt->outWidth; ++x) {
const CachedInterpolation& xCache = caches[x];
computePatchSumOf3Channels<T>(scale, *pSt, yScaleCache, scalesDim, xCache, output);
output += pSt->channels;
}
} else {
for (Nd4jLong x = 0; x < pSt->outWidth; ++x) {
const CachedInterpolation &xCache = caches[x];
computePatchSum<T>(scale, *pSt, yScaleCache, scalesDim, xCache, output);
output += pSt->channels;
}
}
}
}
}
template <typename T>
static void resizeArea(cudaStream_t* stream, ImageResizerState const& st, CachedInterpolation* cache,
NDArray const* input, NDArray* output) {
T const* inputPtr = reinterpret_cast<T const*>(input->specialBuffer());
// float* yScales;
// T const** yPtrs;
float scale = 1.f / (st.heightScale * st.widthScale);
auto outputPtr = reinterpret_cast<float*>(output->specialBuffer()); // output is always float. TO DO: provide another float types also with template <typename X, typename Z> declaration
ImageResizerState* pSt;
auto err = cudaMalloc(&pSt, sizeof(ImageResizerState));
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot allocate memory for ImageResizerState", err);
}
err = cudaMemcpyAsync(pSt, &st, sizeof(ImageResizerState), cudaMemcpyHostToDevice, *stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot copy to device memory", err);
}
ScaleCache<T>* cachePool;
auto cachePoolSize = sizeof(ScaleCache<T>) * st.batchSize * st.outWidth * st.outHeight;
err = cudaMalloc(&cachePool, cachePoolSize);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot allocate memory for cache", err);
}
resizeAreaKernel<T><<<128, 128, 2048, *stream>>>(pSt, cache, scale, inputPtr, input->specialShapeInfo(), outputPtr,
output->specialShapeInfo(), cachePool);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: An error occured with kernel running", err);
}
err = cudaFree(cachePool);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot deallocate memory for cache", err);
}
err = cudaFree(pSt);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot deallocate memory for ImageResizeState", err);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
int resizeAreaFunctor_(sd::LaunchContext* context, NDArray const* image, int const width, int const height,
bool const alignCorners, NDArray* output) {
ImageResizerState st(alignCorners, false); // Create resize info
auto res = st.validateAndCalculateOutputSize(image, width, height);
auto stream = context->getCudaStream();
if (Status::OK() == res) {
CachedInterpolation* xCached;
//(st.outWidth);
auto err = cudaMalloc(&xCached, sizeof(CachedInterpolation) * st.outWidth);
if (err != 0) {
throw cuda_exception::build("helpers::resizeAreaFunctor_: Cannot allocate memory for cached interpolations", err);
}
NDArray::prepareSpecialUse({output}, {image});
fillInterpolationCache<<<128, 128, 256, *stream>>>(xCached, st.outWidth, st.inWidth, st.widthScale);
resizeArea<T>(stream, st, xCached, image, output);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeAreaFunctor_: Error occured when kernel was running", err);
}
err = cudaFree(xCached);
if (err != 0) {
throw cuda_exception::build("helpers::resizeAreaFunctor_: Cannot deallocate memory for cached interpolations", err);
}
NDArray::registerSpecialUse({output}, {image});
}
return res;
}
int resizeAreaFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
bool const alignCorners, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeAreaFunctor_, (context, image, width, height, alignCorners, output), NUMERIC_TYPES);
}
// ------------------------------------------------------------------------------------------------------------------ //
// simplified bicubic resize without antialiasing
//
template <typename T>
int resizeBicubicFunctorA_(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
ImageResizerState st(alignCorners, halfPixelCenters); // align_corners, half_pixel_align
st.stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {image});
int res = st.validateAndCreateOutput(image, width, height);
if (res == Status::OK())
bicubicInterpolateWithCaching<T>(image, st, halfPixelCenters, output);
NDArray::registerSpecialUse({output}, {image});
return res;
}
int resizeBicubicFunctorA(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctorA_, (context,
image, width, height, alignCorners, halfPixelCenters, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctorA_, (sd::LaunchContext * context,
NDArray const* image, int width, int height, bool const alignCorners, bool const halfPixelCenters, NDArray* output), NUMERIC_TYPES);
// ------------------------------------------------------------------------------------------------------------------ //
int resizeImagesFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
ImageResizeMethods method, bool alignCorners, NDArray* output) {
switch (method) {
case kResizeBilinear:
return resizeBilinearFunctor(context, image, width, height, alignCorners, false, output);
case kResizeNearest:
return resizeNeighborFunctor(context, image, width, height, alignCorners, false, output);
case kResizeBicubic:
return resizeBicubicFunctor(context, image, width, height, alignCorners, false, output);
case kResizeArea:
return resizeAreaFunctor(context, image, width, height, alignCorners, output);
default:
throw std::runtime_error("helper::resizeImagesFunctor: Wrong resize method.");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// --------------------------------------------------------------------------------------------------------------- //
// Crop and Resize helper implementation
// -------------------------------------------------------------------------------------------------------------- //
// cropAndResize kernel type of input(images) and output should be the same
//
template <typename T, typename Z, typename I>
static __global__ void cropAndResizeKernel(T const *images, Nd4jLong const* imagesShape, Z const* boxes, Nd4jLong const* boxesShape,
I const* indices, Nd4jLong const* indexShape, I const* cropSize, Nd4jLong const* cropShape, int method,
double extrapolationVal, T* output, Nd4jLong const* outputShape, int numBoxes, int cropHeight, int cropWidth,
int batchSize, int imageHeight, int imageWidth, int depth) {
for (int b = blockIdx.x; b < numBoxes; b += gridDim.x)
{
Nd4jLong x1Pos[] = {b, 1};
Nd4jLong y1Pos[] = {b, 0};
Nd4jLong y2Pos[] = {b, 2};
Nd4jLong x2Pos[] = {b, 3};
Z y1 = boxes[shape::getOffset(boxesShape, y1Pos)];//->t<T>(b, 0)];
Z x1 = boxes[shape::getOffset(boxesShape, x1Pos)];
Z y2 = boxes[shape::getOffset(boxesShape, y2Pos)];
Z x2 = boxes[shape::getOffset(boxesShape, x2Pos)];
int bIn = indices[b];
if (bIn >= batchSize) {
continue;
}
Z heightScale = (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / Z(cropHeight - 1) : Z(0);
Z widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / Z(cropWidth - 1) : Z(0);
for (int y = threadIdx.x; y < cropHeight; y += blockDim.x) {
const float inY = (cropHeight > 1)
? y1 * (imageHeight - 1) + y * heightScale
: 0.5 * (y1 + y2) * (imageHeight - 1);
if (inY < 0 || inY > imageHeight - 1) {
for (int x = threadIdx.y; x < cropWidth; x += blockDim.y) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
//crops->p(b, y, x, d, extrapolationVal);
}
}
continue;
}
if (method == 0 /* bilinear */) {
const int topYIndex = sd::math::p_floor(inY);
const int bottomYIndex = sd::math::p_ceil(inY);
const float y_lerp = inY - topYIndex;
for (int x = 0; x < cropWidth; ++x) {
const float in_x = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (in_x < 0 || in_x > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
// crops->p(b, y, x, d, extrapolationVal);
}
continue;
}
int left_x_index = math::p_floor(in_x);
int right_x_index = math::p_ceil(in_x);
T x_lerp = in_x - left_x_index;
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong topLeftPos[] = {bIn, topYIndex, left_x_index, d};
Nd4jLong topRightPos[] = {bIn, topYIndex, right_x_index, d};
Nd4jLong bottomLeftPos[] = {bIn, bottomYIndex, left_x_index, d};
Nd4jLong bottomRightPos[] = {bIn, bottomYIndex, right_x_index, d};
const T topLeft(images[shape::getOffset(imagesShape, topLeftPos)]); //->e<float>(bIn, topYIndex, left_x_index, d));
const T topRight(images[shape::getOffset(imagesShape, topRightPos)]); //->e<float>(bIn, topYIndex, right_x_index, d));
const T bottomLeft(images[shape::getOffset(imagesShape, bottomLeftPos)]);//->e<float>(bIn, bottomYIndex, left_x_index, d));
const T bottomRight(images[shape::getOffset(imagesShape, bottomRightPos)]); //->e<float>(bIn, bottomYIndex, right_x_index, d));
const T top = topLeft + (topRight - topLeft) * x_lerp;
const T bottom = bottomLeft + (bottomRight - bottomLeft) * x_lerp;
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = Z(top + (bottom - top) * y_lerp);
}
}
} else { // method is "nearest neighbor"
for (int x = 0; x < cropWidth; ++x) {
const float inX = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (inX < 0 || inX > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
}
continue;
}
const int closestXIndex = roundf(inX);
const int closestYIndex = roundf(inY);
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
Nd4jLong xPos[] = {bIn, closestYIndex, closestXIndex, d};
auto zIndex = shape::getOffset(outputShape, zPos);
auto xIndex = shape::getOffset(imagesShape, xPos);
output[zIndex] = images[xIndex];
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// cropAndResizeFunctor main algorithm
// context - launch context
// images - batch of images (4D tensor - [batch, width, height, pixels])
// boxes - 2D tensor with boxes for crop
// indices - 2D int tensor with indices of boxes to crop
// cropSize - 2D int tensor with crop box sizes
// method - (one of 0 - bilinear, 1 - nearest)
// extrapolationVal - double value of extrapolation
// crops - output (4D tensor - [batch, outWidth, outHeight, pixels])
//
template <typename T, typename Z, typename I>
void cropAndResizeFunctor_(sd::LaunchContext* context, NDArray const *images, NDArray const *boxes, NDArray const *indices,
NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
const int batchSize = images->sizeAt(0);
const int imageHeight = images->sizeAt(1);
const int imageWidth = images->sizeAt(2);
const int numBoxes = crops->sizeAt(0);
const int cropHeight = crops->sizeAt(1);
const int cropWidth = crops->sizeAt(2);
const int depth = crops->sizeAt(3);
auto stream = context->getCudaStream();
T const* imagesBuf = reinterpret_cast<T const*>(images->specialBuffer());
Z const* boxesBuf = reinterpret_cast<Z const*>(boxes->specialBuffer());
I const* indexBuf = reinterpret_cast<I const*>(indices->specialBuffer());
I const* cropSizes = reinterpret_cast<I const*>(cropSize->specialBuffer());
T* outBuf = reinterpret_cast<T*>(crops->specialBuffer());
int threadsPerBlock = math::nd4j_max(imageHeight * imageWidth, cropHeight * cropWidth);
if(threadsPerBlock > MAX_NUM_THREADS/4)
threadsPerBlock = MAX_NUM_THREADS/4;
NDArray::prepareSpecialUse({crops}, {images, boxes, indices, cropSize});
cropAndResizeKernel<T,Z,I><<<batchSize, threadsPerBlock, 256, *stream>>>(imagesBuf, images->specialShapeInfo(), boxesBuf, boxes->specialShapeInfo(), indexBuf, indices->specialShapeInfo(),
cropSizes, cropSize->specialShapeInfo(), method, extrapolationVal, outBuf, crops->specialShapeInfo(), numBoxes, cropHeight, cropWidth, batchSize, imageHeight, imageWidth, depth);
NDArray::registerSpecialUse({crops}, {images, boxes, indices, cropSize});
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void cropAndResizeFunctor(sd::LaunchContext * context, NDArray const *images, NDArray const *boxes, NDArray const *indices, NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
BUILD_TRIPLE_SELECTOR(images->dataType(), boxes->dataType(), indices->dataType(), cropAndResizeFunctor_,
(context, images, boxes, indices, cropSize, method, extrapolationVal, crops), NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
//
}
BUILD_TRIPLE_TEMPLATE(template void cropAndResizeFunctor_,
(sd::LaunchContext * context, NDArray const* images, NDArray const* boxes, NDArray const* indices, NDArray const* cropSize, int method, double extrapolationVal, NDArray* crops),
NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
}
}
} |
7f34446ac2d59b1eccb93be6cfee13ee411244ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define p_Nthreads 128
__global__ void reduce(int N, float *x, float *xout){
__shared__ float s_x[p_Nthreads];
const int tid = threadIdx.x;
const int i = blockIdx.x*blockDim.x + tid;
// load smem
s_x[tid] = 0;
if (i < N){
s_x[tid] = x[i];
}
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2){
// strided access
// s = 1 -> [0, 2, 4, 8, ...]
// s = 2 -> [0, 4, 8, 16, ...]
if (tid % (2*s)==0){
s_x[tid] += s_x[tid + s];
}
__syncthreads();
}
if (tid==0){
xout[blockIdx.x] = s_x[0];
}
}
__global__ void reduce1(int N, float *x, float *xout){
__shared__ float s_x[p_Nthreads];
const int tid = threadIdx.x;
const int i = blockIdx.x*blockDim.x + tid;
// load smem
s_x[tid] = 0;
if (i < N){
s_x[tid] = x[i];
}
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2){
int index = 2*s*tid;
if (index < blockDim.x){
s_x[index] += s_x[index+s]; // bank conflicts
}
__syncthreads();
}
if (tid==0){
xout[blockIdx.x] = s_x[0];
}
}
__global__ void reduce2(int N, float *x, float *xout){
__shared__ float s_x[p_Nthreads];
const int tid = threadIdx.x;
const int i = blockIdx.x*blockDim.x + tid;
// load smem
s_x[tid] = 0;
if (i < N){
s_x[tid] = x[i];
}
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 0; s /= 2){
if (tid < s){
s_x[tid] += s_x[tid+s]; // fewer bank conflicts
}
__syncthreads();
}
if (tid==0){
xout[blockIdx.x] = s_x[0];
}
}
// use all threads
__global__ void reduce3(int N, float *x, float *xout){
__shared__ float s_x[p_Nthreads];
const int tid = threadIdx.x;
const int i = blockIdx.x*(2*blockDim.x) + tid;
s_x[tid] = 0;
if (i < N){
s_x[tid] = x[i] + x[i + blockDim.x];
}
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 0; s /= 2){
if (tid < s){
s_x[tid] += s_x[tid+s]; // no wasted threads on first iteration
}
__syncthreads();
}
if (tid==0){
xout[blockIdx.x] = s_x[0];
}
}
// use all threads
__global__ void reduce4(int N, float *x, float *xout){
__shared__ volatile float s_x[p_Nthreads]; // volatile for in-warp smem mods
const int tid = threadIdx.x;
const int i = blockIdx.x*(2*blockDim.x) + tid;
s_x[tid] = 0;
if (i < N){
s_x[tid] = x[i] + x[i + blockDim.x];
}
__syncthreads();
// stop at s = 64
for (unsigned int s = blockDim.x/2; s > 32; s /= 2){
if (tid < s){
s_x[tid] += s_x[tid+s];
}
__syncthreads();
}
// manually reduce within a warp
if (tid < 32){
s_x[tid] += s_x[tid + 32];
s_x[tid] += s_x[tid + 16];
s_x[tid] += s_x[tid + 8];
s_x[tid] += s_x[tid + 4];
s_x[tid] += s_x[tid + 2];
s_x[tid] += s_x[tid + 1];
}
if (tid==0){
xout[blockIdx.x] = s_x[0];
}
}
int main(void)
{
int N = 1048576;
float*x = (float*)malloc(N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1;
}
// alloc GPU mem and copy over
float *x_c, *xout_c, *xouthalf_c;
hipMalloc(&x_c, N*sizeof(float));
hipMemcpy(x_c, x, N*sizeof(float), hipMemcpyHostToDevice);
// run kernel, copy result back to CPU
int Nthreads = p_Nthreads; // good if it's a multiple of 32, can't have more than 1024
int Nblocks = (N+Nthreads-1)/Nthreads;
dim3 threadsPerBlock(Nthreads,1,1);
dim3 blocks(Nblocks,1,1);
float*xout = (float*)malloc(Nblocks*sizeof(float));
hipMalloc(&xout_c, Nblocks*sizeof(float));
hipMalloc(&xouthalf_c, Nblocks/2*sizeof(float));
// version 1: slow
hipLaunchKernelGGL(( reduce) , dim3(blocks), dim3(threadsPerBlock) , 0, 0, N, x_c, xout_c);
// version 2: remove % operator
hipLaunchKernelGGL(( reduce1) , dim3(blocks), dim3(threadsPerBlock) , 0, 0, N, x_c, xout_c);
// version 3: reduce bank conflicts
hipLaunchKernelGGL(( reduce2) , dim3(blocks), dim3(threadsPerBlock) , 0, 0, N, x_c, xout_c);
hipMemcpy(xout, xout_c, Nblocks*sizeof(float), hipMemcpyDeviceToHost);
// check result
int reduction = 0;
for (int i = 0; i < Nblocks; i++){
reduction += xout[i];
}
printf("error = %d\n",reduction-N);
// --- the following versions use only 1/2 the number of blocks
dim3 halfblocks(Nblocks/2,1,1);
float*xouthalf = (float*)malloc(Nblocks/2*sizeof(float));
hipMalloc(&xouthalf_c, Nblocks/2*sizeof(float));
// version 4: fewer idle threads
hipLaunchKernelGGL(( reduce3) , dim3(halfblocks), dim3(threadsPerBlock) , 0, 0, N, x_c, xouthalf_c);
hipMemcpy(xouthalf, xouthalf_c, Nblocks/2*sizeof(float), hipMemcpyDeviceToHost);
// version 5: manually unrolled last warp
hipLaunchKernelGGL(( reduce4) , dim3(halfblocks), dim3(threadsPerBlock) , 0, 0, N, x_c, xouthalf_c);
hipMemcpy(xouthalf, xouthalf_c, Nblocks/2*sizeof(float), hipMemcpyDeviceToHost);
// check result
reduction = 0;
for (int i = 0; i < Nblocks/2; i++){
reduction += xouthalf[i];
}
printf("error = %d\n",reduction-N);
}
| 7f34446ac2d59b1eccb93be6cfee13ee411244ae.cu | #include <stdio.h>
#define p_Nthreads 128
__global__ void reduce(int N, float *x, float *xout){
__shared__ float s_x[p_Nthreads];
const int tid = threadIdx.x;
const int i = blockIdx.x*blockDim.x + tid;
// load smem
s_x[tid] = 0;
if (i < N){
s_x[tid] = x[i];
}
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2){
// strided access
// s = 1 -> [0, 2, 4, 8, ...]
// s = 2 -> [0, 4, 8, 16, ...]
if (tid % (2*s)==0){
s_x[tid] += s_x[tid + s];
}
__syncthreads();
}
if (tid==0){
xout[blockIdx.x] = s_x[0];
}
}
__global__ void reduce1(int N, float *x, float *xout){
__shared__ float s_x[p_Nthreads];
const int tid = threadIdx.x;
const int i = blockIdx.x*blockDim.x + tid;
// load smem
s_x[tid] = 0;
if (i < N){
s_x[tid] = x[i];
}
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2){
int index = 2*s*tid;
if (index < blockDim.x){
s_x[index] += s_x[index+s]; // bank conflicts
}
__syncthreads();
}
if (tid==0){
xout[blockIdx.x] = s_x[0];
}
}
__global__ void reduce2(int N, float *x, float *xout){
__shared__ float s_x[p_Nthreads];
const int tid = threadIdx.x;
const int i = blockIdx.x*blockDim.x + tid;
// load smem
s_x[tid] = 0;
if (i < N){
s_x[tid] = x[i];
}
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 0; s /= 2){
if (tid < s){
s_x[tid] += s_x[tid+s]; // fewer bank conflicts
}
__syncthreads();
}
if (tid==0){
xout[blockIdx.x] = s_x[0];
}
}
// use all threads
__global__ void reduce3(int N, float *x, float *xout){
__shared__ float s_x[p_Nthreads];
const int tid = threadIdx.x;
const int i = blockIdx.x*(2*blockDim.x) + tid;
s_x[tid] = 0;
if (i < N){
s_x[tid] = x[i] + x[i + blockDim.x];
}
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 0; s /= 2){
if (tid < s){
s_x[tid] += s_x[tid+s]; // no wasted threads on first iteration
}
__syncthreads();
}
if (tid==0){
xout[blockIdx.x] = s_x[0];
}
}
// use all threads
__global__ void reduce4(int N, float *x, float *xout){
__shared__ volatile float s_x[p_Nthreads]; // volatile for in-warp smem mods
const int tid = threadIdx.x;
const int i = blockIdx.x*(2*blockDim.x) + tid;
s_x[tid] = 0;
if (i < N){
s_x[tid] = x[i] + x[i + blockDim.x];
}
__syncthreads();
// stop at s = 64
for (unsigned int s = blockDim.x/2; s > 32; s /= 2){
if (tid < s){
s_x[tid] += s_x[tid+s];
}
__syncthreads();
}
// manually reduce within a warp
if (tid < 32){
s_x[tid] += s_x[tid + 32];
s_x[tid] += s_x[tid + 16];
s_x[tid] += s_x[tid + 8];
s_x[tid] += s_x[tid + 4];
s_x[tid] += s_x[tid + 2];
s_x[tid] += s_x[tid + 1];
}
if (tid==0){
xout[blockIdx.x] = s_x[0];
}
}
int main(void)
{
int N = 1048576;
float*x = (float*)malloc(N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1;
}
// alloc GPU mem and copy over
float *x_c, *xout_c, *xouthalf_c;
cudaMalloc(&x_c, N*sizeof(float));
cudaMemcpy(x_c, x, N*sizeof(float), cudaMemcpyHostToDevice);
// run kernel, copy result back to CPU
int Nthreads = p_Nthreads; // good if it's a multiple of 32, can't have more than 1024
int Nblocks = (N+Nthreads-1)/Nthreads;
dim3 threadsPerBlock(Nthreads,1,1);
dim3 blocks(Nblocks,1,1);
float*xout = (float*)malloc(Nblocks*sizeof(float));
cudaMalloc(&xout_c, Nblocks*sizeof(float));
cudaMalloc(&xouthalf_c, Nblocks/2*sizeof(float));
// version 1: slow
reduce <<< blocks, threadsPerBlock >>> (N, x_c, xout_c);
// version 2: remove % operator
reduce1 <<< blocks, threadsPerBlock >>> (N, x_c, xout_c);
// version 3: reduce bank conflicts
reduce2 <<< blocks, threadsPerBlock >>> (N, x_c, xout_c);
cudaMemcpy(xout, xout_c, Nblocks*sizeof(float), cudaMemcpyDeviceToHost);
// check result
int reduction = 0;
for (int i = 0; i < Nblocks; i++){
reduction += xout[i];
}
printf("error = %d\n",reduction-N);
// --- the following versions use only 1/2 the number of blocks
dim3 halfblocks(Nblocks/2,1,1);
float*xouthalf = (float*)malloc(Nblocks/2*sizeof(float));
cudaMalloc(&xouthalf_c, Nblocks/2*sizeof(float));
// version 4: fewer idle threads
reduce3 <<< halfblocks, threadsPerBlock >>> (N, x_c, xouthalf_c);
cudaMemcpy(xouthalf, xouthalf_c, Nblocks/2*sizeof(float), cudaMemcpyDeviceToHost);
// version 5: manually unrolled last warp
reduce4 <<< halfblocks, threadsPerBlock >>> (N, x_c, xouthalf_c);
cudaMemcpy(xouthalf, xouthalf_c, Nblocks/2*sizeof(float), cudaMemcpyDeviceToHost);
// check result
reduction = 0;
for (int i = 0; i < Nblocks/2; i++){
reduction += xouthalf[i];
}
printf("error = %d\n",reduction-N);
}
|
384b5f42a10b275023cbddd4cd705f6850fe6dc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "quickshift_cmn.h"
#include "common.h"
texture<float, 3, hipReadModeElementType> texture_pixels;
texture<float, 2, hipReadModeElementType> texture_density;
// get pixel value from data matrix or texture memory (base on texture mode)
__device__ float get_pixel(int with_texture, int x, int y, int ch, int height, int width, const float * data){
if(with_texture) return tex3D(texture_pixels, x, y, ch);
else return data[x + height*y + width*height*ch];
}
// get density from density matrix or texture memory (base on texture mode)
__device__ float get_density(int with_texture, int x, int y, int height, float * E){
if(with_texture) return tex2D(texture_density, x, y);
else return E[x + height*y];
}
// distance between data at pixel i and j along K channels and adding the distance between i and j
__device__ float distance(const float * data, int height, int width, int channels, float * v, int x_col, int x_row, int y_col, int y_row, int with_texture){
int d1 = y_col - x_col;
int d2 = y_row - x_row;
int k;
float dist = d1*d1 + d2*d2;
for (k = 0; k < channels; ++k) {
float d = v[k] - get_pixel(with_texture,y_col,y_row,k,height,width,data);
dist += d*d;
}
return dist;
}
// divide grid base on block size
int divide_grid(int num, int den){
return (num % den != 0) ? (num / den + 1) : (num / den);
}
__global__ void find_neighbors(const float * data, int height, int width, int channels, float * E, float alpha, int Rd, float * map, float * gaps, int with_texture){
// thread index
int x_col = blockIdx.y * blockDim.y + threadIdx.y;
int x_row = blockIdx.x * blockDim.x + threadIdx.x;
if (x_col >= height || x_row >= width) return; // out of bounds
// varibales for best neighbor
int y_col,y_row;
float E0 = get_density(with_texture,x_col,x_row,height,E);
float d_best = INF;
float y_col_best = x_col;
float y_row_best = x_row;
// initialize boundaries from alpha
int y_col_min = MAX(x_col - Rd, 0);
int y_col_max = MIN(x_col + Rd, height-1);
int y_row_min = MAX(x_row - Rd, 0);
int y_row_max = MIN(x_row + Rd, width-1);
// cache the center value
float v[3];
for (int k = 0; k < channels; ++k)
v[k] = get_pixel(with_texture,x_col,x_row,k,height,width,data);
// for each pixel in the area (alpha) find the best root
for (y_row = y_row_min; y_row <= y_row_max; ++ y_row) {
for (y_col = y_col_min; y_col <= y_col_max; ++ y_col) {
if (get_density(with_texture,y_col,y_row,height,E) > E0) {
float Dij = distance(data,height,width,channels,v,x_col,x_row,y_col,y_row,with_texture);
if (Dij <= alpha*alpha && Dij < d_best) {
d_best = Dij;
y_col_best = y_col;
y_row_best = y_row;
}
}
}
}
// map is the index of the best pair
// gaps is the minimal distance, INF = root
map [x_col + height * x_row] = y_col_best + height * y_row_best;
if (map[x_col + height * x_row] != x_col + height * x_row) gaps[x_col + height * x_row] = sqrt(d_best);
else gaps[x_col + height * x_row] = d_best;
}
__global__ void compute_density(const float * data, int height, int width, int channels, int R, float sigma, float * E, int with_texture){
// thread index
int x_col = blockIdx.y * blockDim.y + threadIdx.y;
int x_row = blockIdx.x * blockDim.x + threadIdx.x;
if (x_col >= height || x_row >= width) return; // out of bounds
// initialize boundaries from sigma
int y_col,y_row;
int y_col_min = MAX(x_col - R, 0);
int y_col_max = MIN(x_col + R, height-1);
int y_row_min = MAX(x_row - R, 0);
int y_row_max = MIN(x_row + R, width-1);
float Ei = 0;
// cache the center value in registers
float v[3];
for (int k = 0; k < channels; ++k)
v[k] = get_pixel(with_texture,x_col,x_row,k,height,width,data);
// for each pixel in the area (sigma) compute the density (between it and the source pixel)
for (y_row = y_row_min; y_row <= y_row_max; ++ y_row) {
for (y_col = y_col_min; y_col <= y_col_max; ++ y_col) {
float Dij = distance(data,height,width,channels,v,x_col,x_row,y_col,y_row,with_texture);
float Fij = exp(-Dij / (2*sigma*sigma));
Ei += Fij;
}
}
// normalize
E[x_col + height * x_row] = Ei / ((y_col_max-y_col_min)*(y_row_max-y_row_min));
}
void quickshift_gpu(qs_image image, float sigma, float alpha, float * map, float * gaps, float * E, int with_texture, float * time){
CHECK( hipSetDevice(2) );
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipArray * cuda_array_pixels;
hipArray * cuda_array_density;
// texture for the image
if(with_texture){
hipChannelFormatDesc descr_pixels = hipCreateChannelDesc<float>();
texture_pixels.normalized = false;
texture_pixels.filterMode = hipFilterModePoint;
hipExtent const ext = {image.height, image.width, image.channels};
CHECK( hipMalloc3DArray(&cuda_array_pixels, &descr_pixels, ext) );
hipMemcpy3DParms copyParams = {0};
copyParams.extent = make_hipExtent(image.height, image.width, image.channels);
copyParams.kind = hipMemcpyHostToDevice;
copyParams.dstArray = cuda_array_pixels;
copyParams.srcPtr = make_hipPitchedPtr((void*)&image.data[0], ext.width*sizeof(float), ext.width, ext.height);
CHECK( hipMemcpy3D(©Params) );
CHECK( hipBindTextureToArray(texture_pixels, cuda_array_pixels, descr_pixels) );
}
// variables
float *map_cuda, *E_cuda, *gaps_cuda, *data;
int height = image.height;
int width = image.width;
int channels = image.channels;
int R = (int) ceil (3 * sigma);
int Rd = (int) ceil (alpha);
// allocate memory on device
unsigned int size = image.height*image.width * sizeof(float);
CHECK( hipMalloc((void**) &data, size*image.channels) );
CHECK( hipMalloc((void**) &map_cuda, size) );
CHECK( hipMalloc((void**) &gaps_cuda, size) );
CHECK( hipMalloc((void**) &E_cuda, size) );
CHECK( hipMemcpy(data, image.data, size*image.channels, hipMemcpyHostToDevice) );
CHECK( hipMemset(E_cuda, 0, size) );
// compute density (and copy result to host)
dim3 dimBlock(16,16,1);
dim3 dimGrid(divide_grid(width, dimBlock.x), divide_grid(height, dimBlock.y), 1);
CHECK( hipEventRecord(start) );
hipLaunchKernelGGL(( compute_density) , dim3(dimGrid),dim3(dimBlock), 0, 0, data, height, width, channels, R, sigma, E_cuda,with_texture);
CHECK( hipDeviceSynchronize() );
CHECK( hipMemcpy(E, E_cuda, size, hipMemcpyDeviceToHost) );
// texture for density
if(with_texture){
hipChannelFormatDesc descr_density = hipCreateChannelDesc<float>();
texture_density.normalized = false;
texture_density.filterMode = hipFilterModePoint;
CHECK( hipMallocArray(&cuda_array_density, &descr_density, image.height, image.width) );
CHECK( hipMemcpyToArray(cuda_array_density, 0, 0, E, sizeof(float)*image.height*image.width, hipMemcpyHostToDevice) );
CHECK( hipBindTextureToArray(texture_density, cuda_array_density, descr_density) );
CHECK( hipDeviceSynchronize() );
}
// find neighbors (and copy result to host)
hipLaunchKernelGGL(( find_neighbors) , dim3(dimGrid),dim3(dimBlock), 0, 0, data, height ,width, channels, E_cuda, alpha, Rd, map_cuda, gaps_cuda, with_texture);
CHECK( hipDeviceSynchronize() );
CHECK( hipMemcpy(map, map_cuda, size, hipMemcpyDeviceToHost) );
CHECK( hipMemcpy(gaps, gaps_cuda, size, hipMemcpyDeviceToHost) );
// time elapsed
CHECK( hipEventRecord(stop) );
CHECK( hipEventSynchronize(stop) );
hipEventElapsedTime(time, start, stop);
*time /= 1000.0;
// cleanup
CHECK( hipFree(data) );
CHECK( hipFree(map_cuda) );
CHECK( hipFree(gaps_cuda) );
CHECK( hipFree(E_cuda) );
CHECK( hipUnbindTexture(texture_pixels) );
CHECK( hipUnbindTexture(texture_density) );
if(with_texture){
CHECK( hipFreeArray(cuda_array_pixels) );
CHECK( hipFreeArray(cuda_array_density) );
}
}
| 384b5f42a10b275023cbddd4cd705f6850fe6dc1.cu | #include <stdio.h>
#include <stdlib.h>
#include "quickshift_cmn.h"
#include "common.h"
texture<float, 3, cudaReadModeElementType> texture_pixels;
texture<float, 2, cudaReadModeElementType> texture_density;
// get pixel value from data matrix or texture memory (base on texture mode)
__device__ float get_pixel(int with_texture, int x, int y, int ch, int height, int width, const float * data){
if(with_texture) return tex3D(texture_pixels, x, y, ch);
else return data[x + height*y + width*height*ch];
}
// get density from density matrix or texture memory (base on texture mode)
__device__ float get_density(int with_texture, int x, int y, int height, float * E){
if(with_texture) return tex2D(texture_density, x, y);
else return E[x + height*y];
}
// distance between data at pixel i and j along K channels and adding the distance between i and j
__device__ float distance(const float * data, int height, int width, int channels, float * v, int x_col, int x_row, int y_col, int y_row, int with_texture){
int d1 = y_col - x_col;
int d2 = y_row - x_row;
int k;
float dist = d1*d1 + d2*d2;
for (k = 0; k < channels; ++k) {
float d = v[k] - get_pixel(with_texture,y_col,y_row,k,height,width,data);
dist += d*d;
}
return dist;
}
// divide grid base on block size
int divide_grid(int num, int den){
return (num % den != 0) ? (num / den + 1) : (num / den);
}
__global__ void find_neighbors(const float * data, int height, int width, int channels, float * E, float alpha, int Rd, float * map, float * gaps, int with_texture){
// thread index
int x_col = blockIdx.y * blockDim.y + threadIdx.y;
int x_row = blockIdx.x * blockDim.x + threadIdx.x;
if (x_col >= height || x_row >= width) return; // out of bounds
// varibales for best neighbor
int y_col,y_row;
float E0 = get_density(with_texture,x_col,x_row,height,E);
float d_best = INF;
float y_col_best = x_col;
float y_row_best = x_row;
// initialize boundaries from alpha
int y_col_min = MAX(x_col - Rd, 0);
int y_col_max = MIN(x_col + Rd, height-1);
int y_row_min = MAX(x_row - Rd, 0);
int y_row_max = MIN(x_row + Rd, width-1);
// cache the center value
float v[3];
for (int k = 0; k < channels; ++k)
v[k] = get_pixel(with_texture,x_col,x_row,k,height,width,data);
// for each pixel in the area (alpha) find the best root
for (y_row = y_row_min; y_row <= y_row_max; ++ y_row) {
for (y_col = y_col_min; y_col <= y_col_max; ++ y_col) {
if (get_density(with_texture,y_col,y_row,height,E) > E0) {
float Dij = distance(data,height,width,channels,v,x_col,x_row,y_col,y_row,with_texture);
if (Dij <= alpha*alpha && Dij < d_best) {
d_best = Dij;
y_col_best = y_col;
y_row_best = y_row;
}
}
}
}
// map is the index of the best pair
// gaps is the minimal distance, INF = root
map [x_col + height * x_row] = y_col_best + height * y_row_best;
if (map[x_col + height * x_row] != x_col + height * x_row) gaps[x_col + height * x_row] = sqrt(d_best);
else gaps[x_col + height * x_row] = d_best;
}
__global__ void compute_density(const float * data, int height, int width, int channels, int R, float sigma, float * E, int with_texture){
// thread index
int x_col = blockIdx.y * blockDim.y + threadIdx.y;
int x_row = blockIdx.x * blockDim.x + threadIdx.x;
if (x_col >= height || x_row >= width) return; // out of bounds
// initialize boundaries from sigma
int y_col,y_row;
int y_col_min = MAX(x_col - R, 0);
int y_col_max = MIN(x_col + R, height-1);
int y_row_min = MAX(x_row - R, 0);
int y_row_max = MIN(x_row + R, width-1);
float Ei = 0;
// cache the center value in registers
float v[3];
for (int k = 0; k < channels; ++k)
v[k] = get_pixel(with_texture,x_col,x_row,k,height,width,data);
// for each pixel in the area (sigma) compute the density (between it and the source pixel)
for (y_row = y_row_min; y_row <= y_row_max; ++ y_row) {
for (y_col = y_col_min; y_col <= y_col_max; ++ y_col) {
float Dij = distance(data,height,width,channels,v,x_col,x_row,y_col,y_row,with_texture);
float Fij = exp(-Dij / (2*sigma*sigma));
Ei += Fij;
}
}
// normalize
E[x_col + height * x_row] = Ei / ((y_col_max-y_col_min)*(y_row_max-y_row_min));
}
void quickshift_gpu(qs_image image, float sigma, float alpha, float * map, float * gaps, float * E, int with_texture, float * time){
CHECK( cudaSetDevice(2) );
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaArray * cuda_array_pixels;
cudaArray * cuda_array_density;
// texture for the image
if(with_texture){
cudaChannelFormatDesc descr_pixels = cudaCreateChannelDesc<float>();
texture_pixels.normalized = false;
texture_pixels.filterMode = cudaFilterModePoint;
cudaExtent const ext = {image.height, image.width, image.channels};
CHECK( cudaMalloc3DArray(&cuda_array_pixels, &descr_pixels, ext) );
cudaMemcpy3DParms copyParams = {0};
copyParams.extent = make_cudaExtent(image.height, image.width, image.channels);
copyParams.kind = cudaMemcpyHostToDevice;
copyParams.dstArray = cuda_array_pixels;
copyParams.srcPtr = make_cudaPitchedPtr((void*)&image.data[0], ext.width*sizeof(float), ext.width, ext.height);
CHECK( cudaMemcpy3D(©Params) );
CHECK( cudaBindTextureToArray(texture_pixels, cuda_array_pixels, descr_pixels) );
}
// variables
float *map_cuda, *E_cuda, *gaps_cuda, *data;
int height = image.height;
int width = image.width;
int channels = image.channels;
int R = (int) ceil (3 * sigma);
int Rd = (int) ceil (alpha);
// allocate memory on device
unsigned int size = image.height*image.width * sizeof(float);
CHECK( cudaMalloc((void**) &data, size*image.channels) );
CHECK( cudaMalloc((void**) &map_cuda, size) );
CHECK( cudaMalloc((void**) &gaps_cuda, size) );
CHECK( cudaMalloc((void**) &E_cuda, size) );
CHECK( cudaMemcpy(data, image.data, size*image.channels, cudaMemcpyHostToDevice) );
CHECK( cudaMemset(E_cuda, 0, size) );
// compute density (and copy result to host)
dim3 dimBlock(16,16,1);
dim3 dimGrid(divide_grid(width, dimBlock.x), divide_grid(height, dimBlock.y), 1);
CHECK( cudaEventRecord(start) );
compute_density <<<dimGrid,dimBlock>>> (data, height, width, channels, R, sigma, E_cuda,with_texture);
CHECK( cudaThreadSynchronize() );
CHECK( cudaMemcpy(E, E_cuda, size, cudaMemcpyDeviceToHost) );
// texture for density
if(with_texture){
cudaChannelFormatDesc descr_density = cudaCreateChannelDesc<float>();
texture_density.normalized = false;
texture_density.filterMode = cudaFilterModePoint;
CHECK( cudaMallocArray(&cuda_array_density, &descr_density, image.height, image.width) );
CHECK( cudaMemcpyToArray(cuda_array_density, 0, 0, E, sizeof(float)*image.height*image.width, cudaMemcpyHostToDevice) );
CHECK( cudaBindTextureToArray(texture_density, cuda_array_density, descr_density) );
CHECK( cudaThreadSynchronize() );
}
// find neighbors (and copy result to host)
find_neighbors <<<dimGrid,dimBlock>>> (data, height ,width, channels, E_cuda, alpha, Rd, map_cuda, gaps_cuda, with_texture);
CHECK( cudaThreadSynchronize() );
CHECK( cudaMemcpy(map, map_cuda, size, cudaMemcpyDeviceToHost) );
CHECK( cudaMemcpy(gaps, gaps_cuda, size, cudaMemcpyDeviceToHost) );
// time elapsed
CHECK( cudaEventRecord(stop) );
CHECK( cudaEventSynchronize(stop) );
cudaEventElapsedTime(time, start, stop);
*time /= 1000.0;
// cleanup
CHECK( cudaFree(data) );
CHECK( cudaFree(map_cuda) );
CHECK( cudaFree(gaps_cuda) );
CHECK( cudaFree(E_cuda) );
CHECK( cudaUnbindTexture(texture_pixels) );
CHECK( cudaUnbindTexture(texture_density) );
if(with_texture){
CHECK( cudaFreeArray(cuda_array_pixels) );
CHECK( cudaFreeArray(cuda_array_density) );
}
}
|
2e361561a7608e16cc926172236f489107972de3.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "include/SWE.cuh"
#include "include/utils.cuh"
#include "../include/structs.h"
#include "../include/macros.h"
__device__ void calculateFeqSWE(prec* feq, prec* localMacroscopic, prec e){
prec factor = 1 / (9 * e*e);
prec localh = localMacroscopic[0];
prec localux = localMacroscopic[1];
prec localuy = localMacroscopic[2];
prec gh = 1.5 * 9.8 * localh;
prec usq = 1.5 * (localux * localux + localuy * localuy);
prec ux3 = 3.0 * e * localux;
prec uy3 = 3.0 * e * localuy;
prec uxuy5 = ux3 + uy3;
prec uxuy6 = uy3 - ux3;
feq[0] = localh * (1 - factor * (5.0 * gh + 4.0 * usq));
feq[1] = localh * factor * (gh + ux3 + 4.5 * ux3*ux3 * factor - usq);
feq[2] = localh * factor * (gh + uy3 + 4.5 * uy3*uy3 * factor - usq);
feq[3] = localh * factor * (gh - ux3 + 4.5 * ux3*ux3 * factor - usq);
feq[4] = localh * factor * (gh - uy3 + 4.5 * uy3*uy3 * factor - usq);
feq[5] = localh * factor * 0.25 * (gh + uxuy5 + 4.5 * uxuy5*uxuy5 * factor - usq);
feq[6] = localh * factor * 0.25 * (gh + uxuy6 + 4.5 * uxuy6*uxuy6 * factor - usq);
feq[7] = localh * factor * 0.25 * (gh - uxuy5 + 4.5 * uxuy5*uxuy5 * factor - usq);
feq[8] = localh * factor * 0.25 * (gh - uxuy6 + 4.5 * uxuy6*uxuy6 * factor - usq);
}
__device__ void calculateForcingSWE(prec* forcing, prec* h, const prec* __restrict__ b, prec e,
int i, int Lx, int* ex, int* ey){
prec factor = 1 / (6 * e*e);
prec localh = h[i];
prec localb = b[i];
for (int j = 0; j < 4; j++){
int index = IDX(i, j, Lx, ex, ey);
forcing[8*i+j] = factor * 9.8 * (localh + h[index]) * (b[index] - localb);
}
for (int j = 4; j < 8; j++){
int index = IDX(i, j, Lx, ex, ey);
forcing[8*i+j] = factor * 0.25 * 9.8 * (localh + h[index]) * (b[index] - localb);
}
}
__global__ void hKernel(const configStruct config, const prec* __restrict__ w,
const prec* __restrict__ b, prec* h){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < config.Lx*config.Ly) {
h[i] = w[i] - b[i];
}
}
__global__ void wKernel(const configStruct config, const prec* __restrict__ h,
const prec* __restrict__ b, prec* w){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < config.Lx*config.Ly) {
w[i] = h[i] + b[i];
}
}
| 2e361561a7608e16cc926172236f489107972de3.cu | #include <cuda_runtime.h>
#include "include/SWE.cuh"
#include "include/utils.cuh"
#include "../include/structs.h"
#include "../include/macros.h"
__device__ void calculateFeqSWE(prec* feq, prec* localMacroscopic, prec e){
prec factor = 1 / (9 * e*e);
prec localh = localMacroscopic[0];
prec localux = localMacroscopic[1];
prec localuy = localMacroscopic[2];
prec gh = 1.5 * 9.8 * localh;
prec usq = 1.5 * (localux * localux + localuy * localuy);
prec ux3 = 3.0 * e * localux;
prec uy3 = 3.0 * e * localuy;
prec uxuy5 = ux3 + uy3;
prec uxuy6 = uy3 - ux3;
feq[0] = localh * (1 - factor * (5.0 * gh + 4.0 * usq));
feq[1] = localh * factor * (gh + ux3 + 4.5 * ux3*ux3 * factor - usq);
feq[2] = localh * factor * (gh + uy3 + 4.5 * uy3*uy3 * factor - usq);
feq[3] = localh * factor * (gh - ux3 + 4.5 * ux3*ux3 * factor - usq);
feq[4] = localh * factor * (gh - uy3 + 4.5 * uy3*uy3 * factor - usq);
feq[5] = localh * factor * 0.25 * (gh + uxuy5 + 4.5 * uxuy5*uxuy5 * factor - usq);
feq[6] = localh * factor * 0.25 * (gh + uxuy6 + 4.5 * uxuy6*uxuy6 * factor - usq);
feq[7] = localh * factor * 0.25 * (gh - uxuy5 + 4.5 * uxuy5*uxuy5 * factor - usq);
feq[8] = localh * factor * 0.25 * (gh - uxuy6 + 4.5 * uxuy6*uxuy6 * factor - usq);
}
__device__ void calculateForcingSWE(prec* forcing, prec* h, const prec* __restrict__ b, prec e,
int i, int Lx, int* ex, int* ey){
prec factor = 1 / (6 * e*e);
prec localh = h[i];
prec localb = b[i];
for (int j = 0; j < 4; j++){
int index = IDX(i, j, Lx, ex, ey);
forcing[8*i+j] = factor * 9.8 * (localh + h[index]) * (b[index] - localb);
}
for (int j = 4; j < 8; j++){
int index = IDX(i, j, Lx, ex, ey);
forcing[8*i+j] = factor * 0.25 * 9.8 * (localh + h[index]) * (b[index] - localb);
}
}
__global__ void hKernel(const configStruct config, const prec* __restrict__ w,
const prec* __restrict__ b, prec* h){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < config.Lx*config.Ly) {
h[i] = w[i] - b[i];
}
}
__global__ void wKernel(const configStruct config, const prec* __restrict__ h,
const prec* __restrict__ b, prec* w){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < config.Lx*config.Ly) {
w[i] = h[i] + b[i];
}
}
|
518582b8a87a2d5bf46864e88068f2e8279411ba.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "subgradinputAtomic.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput = NULL;
hipMalloc(&gradInput, XSIZE*YSIZE);
float *gradOutput = NULL;
hipMalloc(&gradOutput, XSIZE*YSIZE);
int input_n = 1;
int input_h = 1;
int input_w = 1;
int kH = 1;
int kW = 1;
int dH = 1;
int dW = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
subgradinputAtomic), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,gradOutput,input_n,input_h,input_w,kH,kW,dH,dW);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
subgradinputAtomic), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,gradOutput,input_n,input_h,input_w,kH,kW,dH,dW);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
subgradinputAtomic), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,gradOutput,input_n,input_h,input_w,kH,kW,dH,dW);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 518582b8a87a2d5bf46864e88068f2e8279411ba.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "subgradinputAtomic.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput = NULL;
cudaMalloc(&gradInput, XSIZE*YSIZE);
float *gradOutput = NULL;
cudaMalloc(&gradOutput, XSIZE*YSIZE);
int input_n = 1;
int input_h = 1;
int input_w = 1;
int kH = 1;
int kW = 1;
int dH = 1;
int dW = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
subgradinputAtomic<<<gridBlock,threadBlock>>>(gradInput,gradOutput,input_n,input_h,input_w,kH,kW,dH,dW);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
subgradinputAtomic<<<gridBlock,threadBlock>>>(gradInput,gradOutput,input_n,input_h,input_w,kH,kW,dH,dW);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
subgradinputAtomic<<<gridBlock,threadBlock>>>(gradInput,gradOutput,input_n,input_h,input_w,kH,kW,dH,dW);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0dec625c5506822f86f5308f52d836ae1565aab8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pad_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void PadImageConstNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageReflectNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageEdgeNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageConstNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageReflectNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageEdgeNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientConstNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / width;
const int pw = index % width + pad_l;
const int ph = nc % height + pad_t;
nc /= height;
bottom_diff[index] =
top_diff[(nc * padded_height + ph) * padded_width + pw];
}
}
template <typename T>
__global__ void PadImageGradientReflectNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientConstNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % width + pad_l;
n /= width;
const int ph = n % height + pad_t;
n /= height;
bottom_diff[index] =
top_diff[((n * padded_height + ph) * padded_width + pw) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientReflectNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
} // namespace
template <>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto* Y = Output(0);
const int num = X.dim32(0);
const int channels = X.dim32(1);
const int height = X.dim32(2);
const int width = X.dim32(3);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, channels);
const int output_size = Y->size();
const int padded_height = Y->dim32(2);
const int padded_width = Y->dim32(3);
const float* Xdata = X.data<float>();
float* Ydata = Y->template mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageConstNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageReflectNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageEdgeNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto* Y = Output(0);
const int num = X.dim32(0);
const int height = X.dim32(1);
const int width = X.dim32(2);
const int channels = X.dim32(3);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, channels);
const int output_size = Y->size();
const int padded_height = Y->dim32(1);
const int padded_width = Y->dim32(2);
const float* Xdata = X.data<float>();
float* Ydata = Y->template mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageConstNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageReflectNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageEdgeNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& dY = Input(0);
auto* dX = Output(0);
dX->Resize(
dY.dim32(0),
dY.dim32(1),
dY.dim32(2) - pad_t() - pad_b(),
dY.dim32(3) - pad_l() - pad_r());
const int input_size = dY.size();
const int padded_height = dY.dim32(2);
const int padded_width = dY.dim32(3);
const int output_size = dX->size();
const int num = dX->dim32(0);
const int channels = dX->dim32(1);
const int height = dX->dim32(2);
const int width = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageGradientConstNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageGradientReflectNCHW<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageGradientEdgeNCHW<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& dY = Input(0);
auto* dX = Output(0);
dX->Resize(
dY.dim32(0),
dY.dim32(1) - pad_t() - pad_b(),
dY.dim32(2) - pad_l() - pad_r(),
dY.dim32(3));
const int input_size = dY.size();
const int padded_height = dY.dim32(1);
const int padded_width = dY.dim32(2);
const int output_size = dX->size();
const int num = dX->dim32(0);
const int height = dX->dim32(1);
const int width = dX->dim32(2);
const int channels = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageGradientConstNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageGradientReflectNHWC<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageGradientEdgeNHWC<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
REGISTER_CUDA_OPERATOR(PadImage, PadImageOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PadImageGradient,
PadImageGradientOp<float, CUDAContext>);
} // namespace caffe2
| 0dec625c5506822f86f5308f52d836ae1565aab8.cu | #include <algorithm>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pad_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void PadImageConstNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageReflectNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageEdgeNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageConstNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageReflectNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageEdgeNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientConstNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / width;
const int pw = index % width + pad_l;
const int ph = nc % height + pad_t;
nc /= height;
bottom_diff[index] =
top_diff[(nc * padded_height + ph) * padded_width + pw];
}
}
template <typename T>
__global__ void PadImageGradientReflectNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientConstNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % width + pad_l;
n /= width;
const int ph = n % height + pad_t;
n /= height;
bottom_diff[index] =
top_diff[((n * padded_height + ph) * padded_width + pw) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientReflectNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
} // namespace
template <>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto* Y = Output(0);
const int num = X.dim32(0);
const int channels = X.dim32(1);
const int height = X.dim32(2);
const int width = X.dim32(3);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, channels);
const int output_size = Y->size();
const int padded_height = Y->dim32(2);
const int padded_width = Y->dim32(3);
const float* Xdata = X.data<float>();
float* Ydata = Y->template mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
PadImageConstNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
PadImageReflectNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
PadImageEdgeNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto* Y = Output(0);
const int num = X.dim32(0);
const int height = X.dim32(1);
const int width = X.dim32(2);
const int channels = X.dim32(3);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, channels);
const int output_size = Y->size();
const int padded_height = Y->dim32(1);
const int padded_width = Y->dim32(2);
const float* Xdata = X.data<float>();
float* Ydata = Y->template mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
PadImageConstNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
PadImageReflectNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
PadImageEdgeNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& dY = Input(0);
auto* dX = Output(0);
dX->Resize(
dY.dim32(0),
dY.dim32(1),
dY.dim32(2) - pad_t() - pad_b(),
dY.dim32(3) - pad_l() - pad_r());
const int input_size = dY.size();
const int padded_height = dY.dim32(2);
const int padded_width = dY.dim32(3);
const int output_size = dX->size();
const int num = dX->dim32(0);
const int channels = dX->dim32(1);
const int height = dX->dim32(2);
const int width = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
PadImageGradientConstNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
PadImageGradientReflectNCHW<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
PadImageGradientEdgeNCHW<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& dY = Input(0);
auto* dX = Output(0);
dX->Resize(
dY.dim32(0),
dY.dim32(1) - pad_t() - pad_b(),
dY.dim32(2) - pad_l() - pad_r(),
dY.dim32(3));
const int input_size = dY.size();
const int padded_height = dY.dim32(1);
const int padded_width = dY.dim32(2);
const int output_size = dX->size();
const int num = dX->dim32(0);
const int height = dX->dim32(1);
const int width = dX->dim32(2);
const int channels = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
PadImageGradientConstNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
PadImageGradientReflectNHWC<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
PadImageGradientEdgeNHWC<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
REGISTER_CUDA_OPERATOR(PadImage, PadImageOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PadImageGradient,
PadImageGradientOp<float, CUDAContext>);
} // namespace caffe2
|
c6b00d88b414ac327fbc84fcd41cdc8101eb2559.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#define DIM 8
const int THREADS_PER_BLOCK = 8;
const int NUM_BLOCKS = 8;
__global__ void add(int *a, int *c)
{
__shared__ int cache[THREADS_PER_BLOCK];
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int cacheIndex = threadIdx.x;
int temp = 0;
temp = a[tid];
cache[cacheIndex] = temp;
int i = blockDim.x / 2;
while (i > 0)
{
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (threadIdx.x == 0) // if at thread 0 in this block
c[blockIdx.x] = cache[0]; // save the sum in global memory
}
int main()
{
int a[DIM][DIM], c[DIM];
int *dev_a, *dev_c;
hipMalloc((void **)&dev_a, DIM * DIM * sizeof(int));
hipMalloc((void **)&dev_c, DIM * sizeof(int));
for (int y = 0; y < DIM; y++) // Fill Arrays
for (int x = 0; x < DIM; x++)
a[y][x] = 7;
for (int i = 0; i < DIM; ++i)
{
c[i] = 0;
}
hipMemcpy(dev_a, a, DIM * DIM * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, dev_a, dev_c);
hipMemcpy(c, dev_c, DIM * sizeof(int), hipMemcpyDeviceToHost);
int total = 0;
for (int i = 0; i < DIM; ++i)
{
total += c[i];
}
printf("Total sum of all elements is: %d\n", total);
hipFree(dev_a);
hipFree(dev_c);
return 0;
}
| c6b00d88b414ac327fbc84fcd41cdc8101eb2559.cu | #include "stdio.h"
#define DIM 8
const int THREADS_PER_BLOCK = 8;
const int NUM_BLOCKS = 8;
__global__ void add(int *a, int *c)
{
__shared__ int cache[THREADS_PER_BLOCK];
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int cacheIndex = threadIdx.x;
int temp = 0;
temp = a[tid];
cache[cacheIndex] = temp;
int i = blockDim.x / 2;
while (i > 0)
{
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (threadIdx.x == 0) // if at thread 0 in this block
c[blockIdx.x] = cache[0]; // save the sum in global memory
}
int main()
{
int a[DIM][DIM], c[DIM];
int *dev_a, *dev_c;
cudaMalloc((void **)&dev_a, DIM * DIM * sizeof(int));
cudaMalloc((void **)&dev_c, DIM * sizeof(int));
for (int y = 0; y < DIM; y++) // Fill Arrays
for (int x = 0; x < DIM; x++)
a[y][x] = 7;
for (int i = 0; i < DIM; ++i)
{
c[i] = 0;
}
cudaMemcpy(dev_a, a, DIM * DIM * sizeof(int), cudaMemcpyHostToDevice);
add<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>(dev_a, dev_c);
cudaMemcpy(c, dev_c, DIM * sizeof(int), cudaMemcpyDeviceToHost);
int total = 0;
for (int i = 0; i < DIM; ++i)
{
total += c[i];
}
printf("Total sum of all elements is: %d\n", total);
cudaFree(dev_a);
cudaFree(dev_c);
return 0;
}
|
d53512d9c709af0943d4cd89f13503ccc89e2fae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// cuda_dot.cu
// Cuda GMRES
//
// Created by Tim Ioannidis on 2/18/12.
// Copyright 2012 Chemeng NTUA. All rights reserved.
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_config.h"
#include "cuda_methods.h"
//dot product dot_res=a<dot>b me diastasi dim
__global__ void cuda_dot_kernel(int n,double *a, double *b, double *dot_res)
{
__shared__ double cache[threadsPerBlock]; //thread shared memory
int global_tid=0,cacheIndex=0;
double temp = 0;
//orismos indexing
global_tid = threadIdx.x + blockIdx.x * blockDim.x;
cacheIndex = threadIdx.x;
while (global_tid < n) {
temp += a[global_tid] * b[global_tid];
global_tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
if (blockDim.x >= 1024 && threadIdx.x < 512) {
cache[threadIdx.x] += cache[threadIdx.x + 512];
__syncthreads();
}
if (blockDim.x >= 512 && threadIdx.x < 256) {
cache[threadIdx.x] += cache[threadIdx.x + 256];
__syncthreads();
}
if (blockDim.x >= 256 && threadIdx.x < 128) {
cache[threadIdx.x] += cache[threadIdx.x + 128];
__syncthreads();
}
if (blockDim.x >= 128 && threadIdx.x < 64) {
cache[threadIdx.x] += cache[threadIdx.x + 64];
__syncthreads();
}
//unroll last warp no sync needed
if (threadIdx.x <32 ) {
if (blockDim.x >= 64) cache[threadIdx.x] += cache[threadIdx.x +32];
if (blockDim.x >= 32) cache[threadIdx.x] += cache[threadIdx.x +16];
if (blockDim.x >= 16) cache[threadIdx.x] += cache[threadIdx.x +8];
if (blockDim.x >= 8) cache[threadIdx.x] += cache[threadIdx.x +4];
if (blockDim.x >= 4) cache[threadIdx.x] += cache[threadIdx.x +2];
if (blockDim.x >= 2) cache[threadIdx.x] += cache[threadIdx.x +1];
}
if (cacheIndex==0) {
dot_res[blockIdx.x]=cache[0];
}
}
| d53512d9c709af0943d4cd89f13503ccc89e2fae.cu | //
// cuda_dot.cu
// Cuda GMRES
//
// Created by Tim Ioannidis on 2/18/12.
// Copyright 2012 Chemeng NTUA. All rights reserved.
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_config.h"
#include "cuda_methods.h"
//dot product dot_res=a<dot>b me diastasi dim
__global__ void cuda_dot_kernel(int n,double *a, double *b, double *dot_res)
{
__shared__ double cache[threadsPerBlock]; //thread shared memory
int global_tid=0,cacheIndex=0;
double temp = 0;
//orismos indexing
global_tid = threadIdx.x + blockIdx.x * blockDim.x;
cacheIndex = threadIdx.x;
while (global_tid < n) {
temp += a[global_tid] * b[global_tid];
global_tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
if (blockDim.x >= 1024 && threadIdx.x < 512) {
cache[threadIdx.x] += cache[threadIdx.x + 512];
__syncthreads();
}
if (blockDim.x >= 512 && threadIdx.x < 256) {
cache[threadIdx.x] += cache[threadIdx.x + 256];
__syncthreads();
}
if (blockDim.x >= 256 && threadIdx.x < 128) {
cache[threadIdx.x] += cache[threadIdx.x + 128];
__syncthreads();
}
if (blockDim.x >= 128 && threadIdx.x < 64) {
cache[threadIdx.x] += cache[threadIdx.x + 64];
__syncthreads();
}
//unroll last warp no sync needed
if (threadIdx.x <32 ) {
if (blockDim.x >= 64) cache[threadIdx.x] += cache[threadIdx.x +32];
if (blockDim.x >= 32) cache[threadIdx.x] += cache[threadIdx.x +16];
if (blockDim.x >= 16) cache[threadIdx.x] += cache[threadIdx.x +8];
if (blockDim.x >= 8) cache[threadIdx.x] += cache[threadIdx.x +4];
if (blockDim.x >= 4) cache[threadIdx.x] += cache[threadIdx.x +2];
if (blockDim.x >= 2) cache[threadIdx.x] += cache[threadIdx.x +1];
}
if (cacheIndex==0) {
dot_res[blockIdx.x]=cache[0];
}
}
|
b3189cf8bc797a6c45ae7e8f4a4c07b8844f6b57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Author : H.M.Gamaarachchi
Mandelbrot set in CUDA
command line arguments are WIDTH HEIGHT REAL_MIN REAL_MAX IMAGINARY_MIN IMAGINARY_MAX OUT.txt OUT.ppm
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "myhelpers.h"
#define BLOCK 16
#define INF 4
#define MAXN 3000
#define max(a,b) (((a)>(b))?(a):(b))
//Tranform a pixel to complex plane
__device__ void transform_to_x(int x,float *x_dev,int WIDTH,float XMIN,float XMAX){
*x_dev=(XMIN+x*(XMAX-XMIN)/(float)WIDTH);
}
__device__ void transform_to_y(int y,float *y_dev,int HEIGHT,float YMIN,float YMAX){
*y_dev=(YMAX-y*(YMAX-YMIN)/(float)HEIGHT);
}
//check whether is in mandelbrot set
__device__ void isin_mandelbrot(float realc,float imagc,int *ans){
int i=0;
float realz_next=0,imagz_next=0;
float abs=0;
float realz=0;
float imagz=0;
while(i<MAXN && abs<INF){
realz_next=realz*realz-imagz*imagz+realc;
imagz_next=2*realz*imagz+imagc;
abs=realz*realz+imagz*imagz;
realz=realz_next;
imagz=imagz_next;
i++;
}
if (i==MAXN)
*ans= 0;
else
*ans= i;
}
unsigned char red(int i){
if (i==0 )
return 0 ;
else
return ((i+10)%256);
}
/*Calculate B value in RGB based on divergence*/
unsigned char blue(int i){
if (i==0)
return 0;
else
return ((i + 234) % 7 * (255/7));
}
/*Calculate G value in RGB based on divergence*/
unsigned char green(int i){
if (i==0)
return 0 ;
else
return ((i+100) % 9 * (255/9));
}
//Make the plotting matrix of colors depending on the presence in mandelbrot
__global__ void plot(int *blank,int WIDTH,int HEIGHT,float XMIN,float XMAX,float YMIN,float YMAX){
int y=blockDim.y*blockIdx.y+threadIdx.y;
int x=blockDim.x*blockIdx.x+threadIdx.x;
if (x<WIDTH && y<HEIGHT){
int n=y*WIDTH+x;
float x_trans;
float y_trans;
int ans;
transform_to_x(x,&x_trans,WIDTH,XMIN,XMAX);
transform_to_y(y,&y_trans,HEIGHT,YMIN,YMAX);
isin_mandelbrot(x_trans,y_trans,&ans);
blank[n]=ans;
}
}
//create the imahe matrix
void createimage(int *mandel_set,unsigned char *image,int WIDTH,int HEIGHT) {
int x=0,y=0,n=0;int color;
for (y=0;y<HEIGHT;y++){
for(x=0;x<WIDTH;x++){
color=mandel_set[y*WIDTH+x];
image[n]=red(color);
image[n+1]=green(color);
image[n+2]=blue(color);
n=n+3;
}
}
}
int main(int argc, char** argv) {
//check values
if (argc<9){
fprintf(stderr,"Enter arguments as ./binary WIDTH HEIGHT REAL_MIN REAL_MAX IMAGINARY_MIN IMAGINARY_MAX OUT.txt out.ppm\n");
exit(EXIT_FAILURE);
}
int WIDTH=atoi(argv[1]);
int HEIGHT=atoi(argv[2]);
float XMIN=atof(argv[3]);
float XMAX=atof(argv[4]);
float YMIN=atof(argv[5]);
float YMAX=atof(argv[6]);
printf("\nwidth : %d height : %d xmin : %f xmax:%f ymin : %f ymax : %f \n",WIDTH,HEIGHT,XMIN,XMAX,YMIN,YMAX);
//Memory allocation
int *dev_mandel;
checkCudaError(hipMalloc((void**)&dev_mandel, HEIGHT* WIDTH * sizeof(int)));
//CUDA function calling
float tdx=(float)BLOCK; //max possible threads per block
float tdy=(float)BLOCK;
dim3 grid(ceil(WIDTH/tdx),ceil(HEIGHT/tdy));
dim3 block(tdx,tdy);
//time calculations
hipEvent_t start,stop;
float elapsedtime;
hipEventCreate(&start);
hipEventRecord(start,0);
hipLaunchKernelGGL(( plot), dim3(grid), dim3(block), 0, 0, dev_mandel,WIDTH,HEIGHT,XMIN,XMAX,YMIN,YMAX);
checkCudaError(hipGetLastError());
//time calculation
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedtime,start,stop);
printf("Time spent for calculation in CUDA : %.10f s\n",elapsedtime/(float)1000);
//copy back and clear
int *mandel_set=(int *)malloc(sizeof(int)*WIDTH*HEIGHT);
checkCudaError(hipMemcpy(mandel_set, dev_mandel, HEIGHT* WIDTH * sizeof(int), hipMemcpyDeviceToHost));
hipFree(dev_mandel);
FILE *fp;
fp=fopen(argv[7],"w");
isFileOK(fp);
//printing results
int x,y;
for (y=0;y<HEIGHT;y++){
for(x=0;x<WIDTH;x++){
fprintf(fp,"%d ",mandel_set[y*WIDTH+x]);
//printf("%d ",mandel_set[y][x]);
}
fprintf(fp,"\n");
//printf("\n");
}
fclose(fp);
//Getting image
unsigned char *image=(unsigned char *)malloc(sizeof(unsigned char)*WIDTH*HEIGHT*3);
createimage(mandel_set,image,WIDTH,HEIGHT);
//Writing jpg
// color component ( R or G or B) is coded from 0 to 255
// it is 24 bit color RGB file
const int MaxColorComponentValue=255;
char *filename=argv[8];
char *comment="# ";//comment should start with #
//unsigned char color[3];
//create new file,give it a name and open it in binary mode
fp= fopen(filename,"wb"); // b - binary mode
isFileOK(fp);
//write ASCII header to the file
fprintf(fp,"P6\n %s\n %d\n %d\n %d\n",comment,WIDTH,HEIGHT,MaxColorComponentValue);
// compute and write image data bytes to the file
fwrite(image,1,WIDTH *HEIGHT * 3,fp);
fclose(fp);
free(mandel_set);
free(image);
return 0;
}
| b3189cf8bc797a6c45ae7e8f4a4c07b8844f6b57.cu | /*
Author : H.M.Gamaarachchi
Mandelbrot set in CUDA
command line arguments are WIDTH HEIGHT REAL_MIN REAL_MAX IMAGINARY_MIN IMAGINARY_MAX OUT.txt OUT.ppm
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "myhelpers.h"
#define BLOCK 16
#define INF 4
#define MAXN 3000
#define max(a,b) (((a)>(b))?(a):(b))
//Tranform a pixel to complex plane
__device__ void transform_to_x(int x,float *x_dev,int WIDTH,float XMIN,float XMAX){
*x_dev=(XMIN+x*(XMAX-XMIN)/(float)WIDTH);
}
__device__ void transform_to_y(int y,float *y_dev,int HEIGHT,float YMIN,float YMAX){
*y_dev=(YMAX-y*(YMAX-YMIN)/(float)HEIGHT);
}
//check whether is in mandelbrot set
__device__ void isin_mandelbrot(float realc,float imagc,int *ans){
int i=0;
float realz_next=0,imagz_next=0;
float abs=0;
float realz=0;
float imagz=0;
while(i<MAXN && abs<INF){
realz_next=realz*realz-imagz*imagz+realc;
imagz_next=2*realz*imagz+imagc;
abs=realz*realz+imagz*imagz;
realz=realz_next;
imagz=imagz_next;
i++;
}
if (i==MAXN)
*ans= 0;
else
*ans= i;
}
unsigned char red(int i){
if (i==0 )
return 0 ;
else
return ((i+10)%256);
}
/*Calculate B value in RGB based on divergence*/
unsigned char blue(int i){
if (i==0)
return 0;
else
return ((i + 234) % 7 * (255/7));
}
/*Calculate G value in RGB based on divergence*/
unsigned char green(int i){
if (i==0)
return 0 ;
else
return ((i+100) % 9 * (255/9));
}
//Make the plotting matrix of colors depending on the presence in mandelbrot
__global__ void plot(int *blank,int WIDTH,int HEIGHT,float XMIN,float XMAX,float YMIN,float YMAX){
int y=blockDim.y*blockIdx.y+threadIdx.y;
int x=blockDim.x*blockIdx.x+threadIdx.x;
if (x<WIDTH && y<HEIGHT){
int n=y*WIDTH+x;
float x_trans;
float y_trans;
int ans;
transform_to_x(x,&x_trans,WIDTH,XMIN,XMAX);
transform_to_y(y,&y_trans,HEIGHT,YMIN,YMAX);
isin_mandelbrot(x_trans,y_trans,&ans);
blank[n]=ans;
}
}
//create the imahe matrix
void createimage(int *mandel_set,unsigned char *image,int WIDTH,int HEIGHT) {
int x=0,y=0,n=0;int color;
for (y=0;y<HEIGHT;y++){
for(x=0;x<WIDTH;x++){
color=mandel_set[y*WIDTH+x];
image[n]=red(color);
image[n+1]=green(color);
image[n+2]=blue(color);
n=n+3;
}
}
}
int main(int argc, char** argv) {
//check values
if (argc<9){
fprintf(stderr,"Enter arguments as ./binary WIDTH HEIGHT REAL_MIN REAL_MAX IMAGINARY_MIN IMAGINARY_MAX OUT.txt out.ppm\n");
exit(EXIT_FAILURE);
}
int WIDTH=atoi(argv[1]);
int HEIGHT=atoi(argv[2]);
float XMIN=atof(argv[3]);
float XMAX=atof(argv[4]);
float YMIN=atof(argv[5]);
float YMAX=atof(argv[6]);
printf("\nwidth : %d height : %d xmin : %f xmax:%f ymin : %f ymax : %f \n",WIDTH,HEIGHT,XMIN,XMAX,YMIN,YMAX);
//Memory allocation
int *dev_mandel;
checkCudaError(cudaMalloc((void**)&dev_mandel, HEIGHT* WIDTH * sizeof(int)));
//CUDA function calling
float tdx=(float)BLOCK; //max possible threads per block
float tdy=(float)BLOCK;
dim3 grid(ceil(WIDTH/tdx),ceil(HEIGHT/tdy));
dim3 block(tdx,tdy);
//time calculations
cudaEvent_t start,stop;
float elapsedtime;
cudaEventCreate(&start);
cudaEventRecord(start,0);
plot<<<grid, block>>>(dev_mandel,WIDTH,HEIGHT,XMIN,XMAX,YMIN,YMAX);
checkCudaError(cudaGetLastError());
//time calculation
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedtime,start,stop);
printf("Time spent for calculation in CUDA : %.10f s\n",elapsedtime/(float)1000);
//copy back and clear
int *mandel_set=(int *)malloc(sizeof(int)*WIDTH*HEIGHT);
checkCudaError(cudaMemcpy(mandel_set, dev_mandel, HEIGHT* WIDTH * sizeof(int), cudaMemcpyDeviceToHost));
cudaFree(dev_mandel);
FILE *fp;
fp=fopen(argv[7],"w");
isFileOK(fp);
//printing results
int x,y;
for (y=0;y<HEIGHT;y++){
for(x=0;x<WIDTH;x++){
fprintf(fp,"%d ",mandel_set[y*WIDTH+x]);
//printf("%d ",mandel_set[y][x]);
}
fprintf(fp,"\n");
//printf("\n");
}
fclose(fp);
//Getting image
unsigned char *image=(unsigned char *)malloc(sizeof(unsigned char)*WIDTH*HEIGHT*3);
createimage(mandel_set,image,WIDTH,HEIGHT);
//Writing jpg
// color component ( R or G or B) is coded from 0 to 255
// it is 24 bit color RGB file
const int MaxColorComponentValue=255;
char *filename=argv[8];
char *comment="# ";//comment should start with #
//unsigned char color[3];
//create new file,give it a name and open it in binary mode
fp= fopen(filename,"wb"); // b - binary mode
isFileOK(fp);
//write ASCII header to the file
fprintf(fp,"P6\n %s\n %d\n %d\n %d\n",comment,WIDTH,HEIGHT,MaxColorComponentValue);
// compute and write image data bytes to the file
fwrite(image,1,WIDTH *HEIGHT * 3,fp);
fclose(fp);
free(mandel_set);
free(image);
return 0;
}
|
b528db3a9878f9db3782f90c8c225c713f3e00ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "SGFKM.cuh"
#include "hip/hip_runtime.h"
#include "Util.h"
#define DIM_MAX 16
#define MMAX 2
#define NSTREAM 5
#define BlockSizeLimit 1024
#pragma region Inline utility functions
inline __host__ int roundup(int x, int y)
{
return 1 + (x-1)/y;
}
inline __host__ void writeToFiles(double * centroids, int * NNT, GFKM & G)
{
Util::write<double>(centroids, G.K, G.D, G.path + "centroids.GPU.txt");
Util::write<int>(NNT, G.N, G.M, G.path + "NNT.GPU.txt");
}
#pragma endregion
#pragma region Update Membership Kernel
__global__ void update_memberships_kernel_v1a(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, double fuzzifier)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) return;
int i, j;
int * pNNT = NNT + idx*M;
double * pMemberships = memberships + idx*M;
double * pCentroids = centroids;
double X[DIM_MAX];
double DNNT[MMAX];
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
for (i = 0, j = idx*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i){
DNNT[i] = DBL_MAX;
pMemberships[i] = 0.;
}
for (i = 0; i < K; ++i, pCentroids += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pCentroids[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = i;
}
}
for (i = 0; i < M; ++i){
if (DNNT[i] == 0.){
pMemberships[i] = 1.;
return;
}
diff = pow(DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[i] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[i] = pow(pMemberships[i]*sum, -fuzzifier);
}
}
__global__ void update_memberships_kernel_v1b(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, double fuzzifier)
{
extern __shared__ double C[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tid;
if (tid < K*D){
C[tid] = centroids[tid];
}
__syncthreads();
if (idx >= N) return;
int i, j;
int * pNNT = NNT + idx*M;
double * pMemberships = memberships + idx*M;
double * pC = C;
double X[DIM_MAX];
double DNNT[MMAX];
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
for (i = 0, j = idx*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i){
DNNT[i] = DBL_MAX;
pMemberships[i] = 0.;
}
for (i = 0; i < K; ++i, pC += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = i;
}
}
for (i = 0; i < M; ++i){
if (DNNT[i] == 0.){
pMemberships[i] = 1.;
return;
}
diff = pow(DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[i] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[i] = pow(pMemberships[i]*sum, -fuzzifier);
}
}
__global__ void update_memberships_kernel_v1c(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, int step, double fuzzifier)
{
extern __shared__ double C[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tid;
int i, j;
for (i = 0, j = K*D, tid *= step; tid < j && i < step; ++i, ++tid){
C[tid] = centroids[tid];
}
__syncthreads();
if (idx >= N) return;
int * pNNT = NNT + idx*M;
double * pMemberships = memberships + idx*M;
double * pCentroids = C;
double X[DIM_MAX];
double DNNT[MMAX];
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
for (i = 0, j = idx*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i){
DNNT[i] = DBL_MAX;
pMemberships[i] = 0.;
}
for (i = 0; i < K; ++i, pCentroids += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pCentroids[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = i;
}
}
for (i = 0; i < M; ++i){
if (DNNT[i] == 0.){
pMemberships[i] = 1.;
return;
}
diff = pow(DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[i] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[i] = pow(pMemberships[i]*sum, -fuzzifier);
}
}
__global__ void update_memberships_kernel_v1d(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, int num_tiles, int tile_size, double fuzzifier)
{
extern __shared__ double C[];
int tid = threadIdx.x;
int pid = blockIdx.x * blockDim.x + tid;
int i = pid * M, j, t, cSize = K*D, idx;
int * pNNT = NNT + i;
int x = tile_size;
int y = num_tiles - 1;
int z = x * D;
int cid = 0;
int offsetC = 0;
double * pMemberships = memberships + i;
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
double X[DIM_MAX];
double DNNT[MMAX];
for (i = 0, j = pid*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i){
DNNT[i] = DBL_MAX;
pMemberships[i] = 0.;
}
#pragma region load (num_tiles - 1) tiles first
for (t = 0; t < y; ++t, offsetC += z)
{
if (tid < z) C[tid] = centroids[offsetC + tid];
__syncthreads();
if (pid < N)
{
double * pC = C;
for (i = 0; i < x; ++i, ++cid, pC += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = cid;
}
}
}
}
#pragma endregion
#pragma region load last tile
if (offsetC + z > cSize){
z = cSize - offsetC;
x = z / D;
}
if (tid < z) C[tid] = centroids[offsetC + tid];
__syncthreads();
if (pid < N){
double * pC = C;
for (i = 0; i < x; ++i, ++cid, pC += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = cid;
}
}
#pragma region calculate memberships
for (i = 0; i < M; ++i){
if (DNNT[i] == 0.){
pMemberships[i] = 1.;
return;
}
diff = pow(DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[i] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[i] = pow(pMemberships[i]*sum, -fuzzifier);
}
#pragma endregion
}
#pragma endregion
}
__global__ void update_memberships_kernel_v2a(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, double fuzzifier)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) return;
int i, j;
int * pNNT = NNT + idx*M;
double * pMemberships = memberships + idx*K;
double * pCentroids = centroids;
double X[DIM_MAX];
double DNNT[MMAX];
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
for (i = 0, j = idx*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i) DNNT[i] = DBL_MAX;
for (i = 0; i < K; ++i, pCentroids += D){
pMemberships[i] = 0.;
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pCentroids[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = i;
}
}
for (i = 0; i < M; ++i){
if ( DNNT[i] == 0.){
pMemberships[pNNT[i]] = 1.;
return;
}
diff = pow( DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[pNNT[i]] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[pNNT[i]] = pow(pMemberships[pNNT[i]]*sum, -fuzzifier);
}
}
__global__ void update_memberships_kernel_v2b(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, double fuzzifier)
{
extern __shared__ double C[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tid;
if (tid < K*D){
C[tid] = centroids[tid];
}
__syncthreads();
if (idx >= N) return;
int i, j;
int * pNNT = NNT + idx*M;
double * pMemberships = memberships + idx*K;
double * pC = C;
double X[DIM_MAX];
double DNNT[MMAX];
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
for (i = 0, j = idx*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i) DNNT[i] = DBL_MAX;
for (i = 0; i < K; ++i, pC += D){
pMemberships[i] = 0.;
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = i;
}
}
for (i = 0; i < M; ++i){
if ( DNNT[i] == 0.){
pMemberships[pNNT[i]] = 1.;
return;
}
diff = pow( DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[pNNT[i]] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[pNNT[i]] = pow(pMemberships[pNNT[i]]*sum, -fuzzifier);
}
}
__global__ void update_memberships_kernel_v2c(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, int step, double fuzzifier)
{
extern __shared__ double C[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tid;
int i, j;
for (i = 0, j = K*D, tid *= step; tid < j && i < step; ++i, ++tid){
C[tid] = centroids[tid];
}
__syncthreads();
if (idx >= N) return;
int * pNNT = NNT + idx*M;
double * pMemberships = memberships + idx*K;
double * pC = C;
double X[DIM_MAX];
double DNNT[MMAX];
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
for (i = 0, j = idx*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i) DNNT[i] = DBL_MAX;
for (i = 0; i < K; ++i, pC += D){
pMemberships[i] = 0.;
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = i;
}
}
for (i = 0; i < M; ++i){
if ( DNNT[i] == 0.){
pMemberships[pNNT[i]] = 1.;
return;
}
diff = pow( DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[pNNT[i]] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[pNNT[i]] = pow(pMemberships[pNNT[i]]*sum, -fuzzifier);
}
}
__global__ void update_memberships_kernel_v2d(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, int num_tiles, int tile_size, double fuzzifier)
{
extern __shared__ double C[];
int tid = threadIdx.x;
int pid = blockIdx.x * blockDim.x + tid;
int i = pid * M, j, t, cSize = K*D, idx;
int x = tile_size;
int y = num_tiles - 1;
int z = x * D;
int cid = 0;
int offsetC = 0;
int * pNNT = NNT + i;
double * pMemberships = memberships + i;
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
double X[DIM_MAX];
double DNNT[MMAX];
for (i = 0, j = pid*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i) DNNT[i] = DBL_MAX;
#pragma region load (num_tiles - 1) tiles first
for (t = 0; t < y; ++t, offsetC += z)
{
if (tid < z) C[tid] = centroids[offsetC + tid];
__syncthreads();
if (pid < N){
double * pC = C;
for (i = 0; i < x; ++i, ++cid, pC += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = cid;
}
}
}
}
#pragma endregion
#pragma region load last tile
if (offsetC + z > cSize){
z = cSize - offsetC;
x = z / D;
}
if (tid < z) C[tid] = centroids[offsetC + tid];
__syncthreads();
if (pid < N){
double * pC = C;
for (i = 0; i < x; ++i, ++cid, pC += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = cid;
}
}
#pragma region calculate memberships
for (i = 0; i < M; ++i){
if ( DNNT[i] == 0.){
pMemberships[pNNT[i]] = 1.;
return;
}
diff = pow( DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[pNNT[i]] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[pNNT[i]] = pow(pMemberships[pNNT[i]]*sum, -fuzzifier);
}
#pragma endregion
}
#pragma endregion
}
#pragma endregion
#pragma region Calculating New Centroids (FKM) Kernels
__global__ void reduce_memberships_kernel_FKM(double * memberships, double * sumU, int N)
{
extern __shared__ double sdata[];
int tid = threadIdx.x;
int i = blockIdx.x*blockDim.x + tid;
int gridSize = blockDim.x*gridDim.x;
double temp = 0.0;
while(i < N){
temp = temp + memberships[i];
i += gridSize;
}
sdata[tid] = temp;
__syncthreads();
if (blockDim.x > 1023 && tid < 512)
sdata[tid] = sdata[tid] + sdata[tid+512];
__syncthreads();
if (blockDim.x > 511 && tid < 256)
sdata[tid] = sdata[tid] + sdata[tid+256];
__syncthreads();
if (blockDim.x > 255 && tid < 128)
sdata[tid] = sdata[tid] + sdata[tid+128];
__syncthreads();
if (blockDim.x > 127 && tid < 64)
sdata[tid] = sdata[tid] + sdata[tid+64];
__syncthreads();
if (blockDim.x > 63 && tid < 32) sdata[tid] = sdata[tid] + sdata[tid + 32];
if (blockDim.x > 31 && tid < 16) sdata[tid] = sdata[tid] + sdata[tid + 16];
if (blockDim.x > 15 && tid < 8) sdata[tid] = sdata[tid] + sdata[tid + 8];
if (blockDim.x > 7 && tid < 4) sdata[tid] = sdata[tid] + sdata[tid + 4];
if (blockDim.x > 3 && tid < 2) sdata[tid] = sdata[tid] + sdata[tid + 2];
if (tid == 0) sumU[blockIdx.x] = sdata[0] + sdata[1];
}
__global__ void reduce_centroids_kernel_FKM
(double * points, double * memberships, double * sumC, int N)
{
extern __shared__ double sdata[];
int tid = threadIdx.x;
int i = blockIdx.x*blockDim.x + tid;
int gridSize = blockDim.x*gridDim.x;
double temp = 0.0;
while(i < N){
temp = temp + points[i] * memberships[i];
i += gridSize;
}
sdata[tid] = temp;
__syncthreads();
if (blockDim.x > 1023 && tid < 512)
sdata[tid] = sdata[tid] + sdata[tid+512];
__syncthreads();
if (blockDim.x > 511 && tid < 256)
sdata[tid] = sdata[tid] + sdata[tid+256];
__syncthreads();
if (blockDim.x > 255 && tid < 128)
sdata[tid] = sdata[tid] + sdata[tid+128];
__syncthreads();
if (blockDim.x > 127 && tid < 64)
sdata[tid] = sdata[tid] + sdata[tid+64];
__syncthreads();;
if (blockDim.x > 63 && tid < 32) sdata[tid] = sdata[tid] + sdata[tid + 32];
if (blockDim.x > 31 && tid < 16) sdata[tid] = sdata[tid] + sdata[tid + 16];
if (blockDim.x > 15 && tid < 8) sdata[tid] = sdata[tid] + sdata[tid + 8];
if (blockDim.x > 7 && tid < 4) sdata[tid] = sdata[tid] + sdata[tid + 4];
if (blockDim.x > 3 && tid < 2) sdata[tid] = sdata[tid] + sdata[tid + 2];
if (tid == 0) sumC[blockIdx.x] = sdata[0] + sdata[1];
}
__host__ void reduce_centroids
(double * centroids, double * sumC, double * sumU, int num_reduction_blocks, int D, int K)
{
double * p_centroids = centroids;
double * p_sumU = sumU;
double * p_sumC = sumC;
double u;
int i, j, k;
for (i = 0; i < K; ++i){
u = 0.0;
for (j = 0; j < num_reduction_blocks; ++j)
u = u + p_sumU[j];
for (j = 0; j < D; ++j){
p_centroids[j] = 0.0;
for (k = 0; k < num_reduction_blocks; ++k){
p_centroids[j] = p_centroids[j] + p_sumC[k];
}
p_centroids[j] = p_centroids[j] /u;
p_sumC += num_reduction_blocks;
}
p_sumU += num_reduction_blocks;
p_centroids += D;
}
}
__global__ void calculate_new_centroids(double * centroids, double * memberships)
{
int cid = blockIdx.x*blockDim.x + threadIdx.x;
centroids[cid] = centroids[cid] / memberships[blockIdx.x];
}
__host__ void calculate_new_centroids(
double * points, double * memberships, double * newCentroids,
int * NNT, int N, int D, int K, int M)
{
int i, j, k, idx;
int * pNNT = NNT;
double * pMemberships = memberships;
double * pPoints = points;
double * pCentroids;
double * sum = new double[K]();
memset(newCentroids, 0, K*D*sizeof(double));
for (i = 0; i < N; ++i, pMemberships += M, pNNT += M, pPoints += D){
for (j = 0; j < M; ++j){
idx = pNNT[j];
sum[idx] = sum[idx] + pMemberships[j];
pCentroids = newCentroids + idx*D;
for (k = 0; k < D; ++k)
pCentroids[k] = pCentroids[k] + pMemberships[j]*pPoints[k];
}
}
pCentroids = newCentroids;
for (i = 0; i < K; ++i, pCentroids += D)
for (j = 0; j < D; ++j)
pCentroids[j] = pCentroids[j] / sum[i];
}
#pragma endregion
#pragma region Calculating New Centroids (GFKM) Kernels
__global__ void histogram_kernel(int * NNT, int * histo, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
atomicAdd(&(histo[NNT[i]+1]), 1);
}
__global__ void scan_kernel(int * histo, int K)
{
for (int i = 1; i < K; ++i)
histo[i] += histo[i-1];
}
__global__ void counting_sort_kernel(
int * histo, int * NNT, int * sNNT, double * memberships, double * sU,
int size, int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int idx;
if (i < size){
idx = atomicAdd(&(histo[NNT[i]]), 1);
sNNT[idx] = i/M;
sU[idx] = memberships[i];
}
}
__global__ void gather_kernel(
int * indices, double * memberships, double * sU,
int size, int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size) return;
int idx = indices[i];
sU[i] = memberships[idx];
indices[i] = idx/M;
}
__global__ void reduce_memberships_kernel_GFKM(double * memberships, double * sumU, int N)
{
extern __shared__ double sdata[];
int tid = threadIdx.x;
int i = blockIdx.x*blockDim.x + tid;
int gridSize = blockDim.x*gridDim.x;
double temp = 0.0;
while(i < N){
temp = temp + memberships[i];
i += gridSize;
}
sdata[tid] = temp;
__syncthreads();
if (blockDim.x > 1023 && tid < 512)
sdata[tid] = sdata[tid] + sdata[tid+512];
__syncthreads();
if (blockDim.x > 511 && tid < 256)
sdata[tid] = sdata[tid] + sdata[tid+256];
__syncthreads();
if (blockDim.x > 255 && tid < 128)
sdata[tid] = sdata[tid] + sdata[tid+128];
__syncthreads();
if (blockDim.x > 127 && tid < 64)
sdata[tid] = sdata[tid] + sdata[tid+64];
__syncthreads();
if (blockDim.x > 63 && tid < 32) sdata[tid] = sdata[tid] + sdata[tid + 32];
if (blockDim.x > 31 && tid < 16) sdata[tid] = sdata[tid] + sdata[tid + 16];
if (blockDim.x > 15 && tid < 8) sdata[tid] = sdata[tid] + sdata[tid + 8];
if (blockDim.x > 7 && tid < 4) sdata[tid] = sdata[tid] + sdata[tid + 4];
if (blockDim.x > 3 && tid < 2) sdata[tid] = sdata[tid] + sdata[tid + 2];
if (tid == 0) sumU[blockIdx.x] = sdata[0] + sdata[1];
}
__global__ void reduce_centroids_kernel_GFKM
(double * points, double * sU, int * sNNT, double * sumC, int size)
{
extern __shared__ double sdata[];
int tid = threadIdx.x;
int i = blockIdx.x*blockDim.x + tid;
int gridSize = blockDim.x*gridDim.x;
double temp = 0.0;
while(i < size){
temp = temp + points[sNNT[i]] * sU[i];
i += gridSize;
}
sdata[tid] = temp;
__syncthreads();
/*if (tid < 128)
sdata[tid] = sdata[tid] + sdata[tid+128];
__syncthreads();
if (tid < 64)
sdata[tid] = sdata[tid] + sdata[tid+64];
__syncthreads();*/
if (blockDim.x > 1023 && tid < 512)
sdata[tid] = sdata[tid] + sdata[tid+512];
__syncthreads();
if (blockDim.x > 511 && tid < 256)
sdata[tid] = sdata[tid] + sdata[tid+256];
__syncthreads();
if (blockDim.x > 255 && tid < 128)
sdata[tid] = sdata[tid] + sdata[tid+128];
__syncthreads();
if (blockDim.x > 127 && tid < 64)
sdata[tid] = sdata[tid] + sdata[tid+64];
__syncthreads();
if (blockDim.x > 63 && tid < 32) sdata[tid] = sdata[tid] + sdata[tid + 32];
if (blockDim.x > 31 && tid < 16) sdata[tid] = sdata[tid] + sdata[tid + 16];
if (blockDim.x > 15 && tid < 8) sdata[tid] = sdata[tid] + sdata[tid + 8];
if (blockDim.x > 7 && tid < 4) sdata[tid] = sdata[tid] + sdata[tid + 4];
if (blockDim.x > 3 && tid < 2) sdata[tid] = sdata[tid] + sdata[tid + 2];
if (tid == 0) sumC[blockIdx.x] = sdata[0] + sdata[1];
}
__host__ void reduce_centroids
(double * centroids, double * sumC, double * sumU, int * histo, int D, int K)
{
double * p_centroids = centroids;
double * p_sumU = sumU;
double * p_sumC = sumC;
double u;
int i, j, k;
int size;
for (i = 0; i < K; ++i){
u = 0.0;
size = histo[i];
for (j = 0; j < size; ++j)
u = u + p_sumU[j];
for (j = 0; j < D; ++j){
p_centroids[j] = 0.0;
for (k = 0; k < size; ++k)
p_centroids[j] = p_centroids[j] + p_sumC[k];
p_centroids[j] = p_centroids[j] / u;
p_sumC += size;
}
p_sumU += size;
p_centroids += D;
}
}
#pragma endregion
#pragma region Main Methods
__global__ void check_convergence(double * centroids, double * newCentroids, bool * flag, double epsilon)
{
int cid = blockDim.x * blockIdx.x + threadIdx.x;
flag[0] = fabs(centroids[cid] - newCentroids[cid]) >= epsilon;
//__threadfence();
/*flag[0] = false;
int n = blockDim.x;
for (int i = 0; i < n; ++i){
if (fabs(centroids[i] - newCentroids[i]) >= epsilon){
flag[0] = true;
return;
}
}*/
}
#pragma region Version 1
__host__ double * GFKM_GPU_v1(FILE * f, GFKM & G, BlockSizeV1 block_size, int stop_iter)
{
#pragma region Declare common variables
int i;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int flag_size = sizeof(bool);
int points_size = G.N * G.D * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int step = roundup(KD_SIZE, block_size.updateMembershipsKernelBlockSize);
int num_update_memberships_blocks = roundup(G.N, block_size.updateMembershipsKernelBlockSize);
double t1 = 0.0, t2 = 0.0, t3 = 0.0;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_centroids;
double * d_newCentroids;
double * d_memberships;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_newCentroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
if (step > 4)
{
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1a), dim3(num_update_memberships_blocks), dim3(block_size.updateMembershipsKernelBlockSize), 0, 0,
d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else if (step == 1)
{
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1b), dim3(num_update_memberships_blocks), dim3(block_size.updateMembershipsKernelBlockSize), centroids_size, 0,
d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else
{
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1c), dim3(num_update_memberships_blocks), dim3(block_size.updateMembershipsKernelBlockSize), centroids_size, 0,
d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
}
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Calculate new centroids by CPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_memberships, d_memberships, memberships_size, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
calculate_new_centroids(p_points, p_memberships, p_centroids, p_NNT, G.N, G.D, G.K, G.M);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_newCentroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_newCentroids, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_newCentroids, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_centroids);
hipFree(d_newCentroids);
hipFree(d_memberships);
hipFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = 0.0;
rs[4] = (double)i;
#pragma endregion
hipDeviceReset();
return rs;
}
__host__ double * GFKM_GPU_v1a(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int NM_SIZE = G.N * G.M;
int flag_size = sizeof(bool);
int points_size = G.N * G.D * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
block_size = getBlockSizeForMembershipKkernelV1a(block_size);
int num_blocks = roundup(G.N, block_size);
double t1 = 0.0, t2 = 0.0, t3 = 0.0;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_centroids;
double * d_newCentroids;
double * d_memberships;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_newCentroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1a), dim3(num_blocks), dim3(block_size), 0, 0,
d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Calculate new centroids by CPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_memberships, d_memberships, memberships_size, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
calculate_new_centroids(p_points, p_memberships, p_centroids, p_NNT, G.N, G.D, G.K, G.M);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_newCentroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_newCentroids, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_newCentroids, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Writing results to files
Util::write<double>(p_centroids, G.K, G.D, G.path + "centroids.GPU.txt");
Util::write<int>(p_NNT, G.N, G.M, G.path + "NNT.GPU.txt");
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_centroids);
hipFree(d_newCentroids);
hipFree(d_memberships);
hipFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = 0.0;
rs[4] = (double)i;
#pragma endregion
hipDeviceReset();
return rs;
}
__host__ double * GFKM_GPU_v1b(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int NM_SIZE = G.N * G.M;
int flag_size = sizeof(bool);
int points_size = G.N * G.D * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
block_size = getBlockSizeForMembershipKkernelV1b(centroids_size, block_size);
int num_blocks = roundup(G.N, block_size);
double t1 = 0.0, t2 = 0.0, t3 = 0.0;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_centroids;
double * d_newCentroids;
double * d_memberships;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_newCentroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1b), dim3(num_blocks), dim3(block_size), centroids_size, 0,
d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Calculate new centroids by CPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_memberships, d_memberships, memberships_size, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
calculate_new_centroids(p_points, p_memberships, p_centroids, p_NNT, G.N, G.D, G.K, G.M);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_newCentroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_newCentroids, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_newCentroids, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_centroids);
hipFree(d_newCentroids);
hipFree(d_memberships);
hipFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = 0.0;
rs[4] = (double)i;
#pragma endregion
hipDeviceReset();
return rs;
}
__host__ double * GFKM_GPU_v1c(FILE * f, GFKM & G, int block_size, int stop_iter, int step)
{
#pragma region Declare common variables
int i;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
block_size = getBlockSizeForMembershipKkernelV1c(centroids_size, block_size);
int num_blocks = roundup(G.N, block_size);
//int step = roundup(KD_SIZE, block_size);
double t1 = 0.0, t2 = 0.0, t3 = 0.0;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_centroids;
double * d_newCentroids;
double * d_memberships;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_newCentroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1c), dim3(num_blocks), dim3(block_size), centroids_size, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Calculate new centroids by CPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_memberships, d_memberships, memberships_size, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
calculate_new_centroids(p_points, p_memberships, p_centroids, p_NNT, G.N, G.D, G.K, G.M);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_newCentroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_newCentroids, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_newCentroids, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_centroids);
hipFree(d_newCentroids);
hipFree(d_memberships);
hipFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = 0.0;
rs[4] = (double)i;
#pragma endregion
hipDeviceReset();
return rs;
}
__host__ double * GFKM_GPU_v1d(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int NM_SIZE = G.N * G.M;
int flag_size = sizeof(bool);
int points_size = G.N * G.D * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int num_blocks = roundup(G.N, block_size);
int tile_size = block_size / G.D;
int usm_size = (tile_size * G.D) * DBL_SIZE;
int num_tiles = roundup(G.K, tile_size);
double t1 = 0.0, t2 = 0.0, t3 = 0.0;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_centroids;
double * d_newCentroids;
double * d_memberships;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_newCentroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1d), dim3(num_blocks), dim3(block_size), usm_size, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, num_tiles, tile_size, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Calculate new centroids by CPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_memberships, d_memberships, memberships_size, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
calculate_new_centroids(p_points, p_memberships, p_centroids, p_NNT, G.N, G.D, G.K, G.M);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_newCentroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_newCentroids, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_newCentroids, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_centroids);
hipFree(d_newCentroids);
hipFree(d_memberships);
hipFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = 0.0;
rs[4] = (double)i;
#pragma endregion
hipDeviceReset();
return rs;
}
#pragma endregion
#pragma region Version 2
__host__ double * GFKM_GPU_v2(FILE * f, GFKM & G, BlockSizeV2 block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = G.N * G.K * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int step = roundup(KD_SIZE, block_size.updateMembershipsKernelBlockSize);
int num_update_memberships_blocks = roundup(G.N, block_size.updateMembershipsKernelBlockSize);
int reduction_block_size = min(block_size.reduceCentroidsKernelBlockSize, block_size.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(G.N, reduction_block_size << 2);
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sumU;
double * d_sumC;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
if (step > 4)
{
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v2a), dim3(num_update_memberships_blocks), dim3(block_size.updateMembershipsKernelBlockSize), 0, 0,
d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else if (step == 1)
{
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v2b), dim3(num_update_memberships_blocks), dim3(block_size.updateMembershipsKernelBlockSize), centroids_size, 0,
d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else
{
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v2c), dim3(num_update_memberships_blocks), dim3(block_size.updateMembershipsKernelBlockSize), centroids_size, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
}
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Transpose memberships
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.K,
&alpha, d_memberships, G.K, &beta, d_memberships, G.K, d_membershipsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reduce centroids by GPU
tmr_GPU.StartCounter();
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
for (j = 0; j < G.K; ++j){
hipLaunchKernelGGL(( reduce_memberships_kernel_FKM), dim3(num_reduction_blocks), dim3(reduction_block_size), sm_size, streams[0],
d_membershipsT + offset, d_sumU + offset_sumU, G.N);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_FKM), dim3(num_reduction_blocks), dim3(reduction_block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_membershipsT + offset, d_sumC + offset_sumC, G.N);
offset_pointsT += G.N;
offset_sumC += num_reduction_blocks;
}
offset_sumU += num_reduction_blocks;
offset += G.N;
}
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
if (num_reduction_blocks > 1){
#pragma region Reduce memberships and centroids block sums by CPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, sumU_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, sumC_size, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, num_reduction_blocks, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
else{
#pragma region Calculate centroids by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( calculate_new_centroids), dim3(G.K), dim3(G.D), 0, 0, d_sumC, d_sumU);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v2a(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = G.N * G.K * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV2a(initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV2 blockSizeV2 = getBlockSizeForCentroidKernelV2(initBlockSize);
int reduction_block_size = min(blockSizeV2.reduceCentroidsKernelBlockSize, blockSizeV2.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(G.N, reduction_block_size << 2);
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sumU;
double * d_sumC;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v2a), dim3(num_blocks), dim3(block_size), 0, 0,
d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Transpose memberships
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.K,
&alpha, d_memberships, G.K, &beta, d_memberships, G.K, d_membershipsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reduce centroids by GPU
tmr_GPU.StartCounter();
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
for (j = 0; j < G.K; ++j){
hipLaunchKernelGGL(( reduce_memberships_kernel_FKM), dim3(num_reduction_blocks), dim3(reduction_block_size), sm_size, streams[0],
d_membershipsT + offset, d_sumU + offset_sumU, G.N);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_FKM), dim3(num_reduction_blocks), dim3(reduction_block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_membershipsT + offset, d_sumC + offset_sumC, G.N);
offset_pointsT += G.N;
offset_sumC += num_reduction_blocks;
}
offset_sumU += num_reduction_blocks;
offset += G.N;
}
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
if (num_reduction_blocks > 1){
#pragma region Reduce memberships and centroids block sums by CPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, sumU_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, sumC_size, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, num_reduction_blocks, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
else{
#pragma region Calculate centroids by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( calculate_new_centroids), dim3(G.K), dim3(G.D), 0, 0, d_sumC, d_sumU);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v2b(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = G.N * G.K * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV2b(centroids_size, initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV2 blockSizeV2 = getBlockSizeForCentroidKernelV2(initBlockSize);
int reduction_block_size = min(blockSizeV2.reduceCentroidsKernelBlockSize, blockSizeV2.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(G.N, reduction_block_size << 2);
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sumU;
double * d_sumC;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v2b), dim3(num_blocks), dim3(block_size), centroids_size, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Transpose memberships
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.K,
&alpha, d_memberships, G.K, &beta, d_memberships, G.K, d_membershipsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reduce centroids by GPU
tmr_GPU.StartCounter();
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
for (j = 0; j < G.K; ++j){
hipLaunchKernelGGL(( reduce_memberships_kernel_FKM), dim3(num_reduction_blocks), dim3(reduction_block_size), sm_size, streams[0],
d_membershipsT + offset, d_sumU + offset_sumU, G.N);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_FKM), dim3(num_reduction_blocks), dim3(reduction_block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_membershipsT + offset, d_sumC + offset_sumC, G.N);
offset_pointsT += G.N;
offset_sumC += num_reduction_blocks;
}
offset_sumU += num_reduction_blocks;
offset += G.N;
}
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
if (num_reduction_blocks > 1){
#pragma region Reduce memberships and centroids block sums by CPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, sumU_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, sumC_size, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, num_reduction_blocks, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
else{
#pragma region Calculate centroids by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( calculate_new_centroids), dim3(G.K), dim3(G.D), 0, 0, d_sumC, d_sumU);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v2c(FILE * f, GFKM & G, int block_size, int stop_iter, int step)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = G.N * G.K * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV2c(centroids_size, initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV2 blockSizeV2 = getBlockSizeForCentroidKernelV2(initBlockSize);
int reduction_block_size = min(blockSizeV2.reduceCentroidsKernelBlockSize, blockSizeV2.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(G.N, reduction_block_size << 2);
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
//int step = roundup(KD_SIZE, block_size);
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sumU;
double * d_sumC;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v2c), dim3(num_blocks), dim3(block_size), centroids_size, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Transpose memberships
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.K,
&alpha, d_memberships, G.K, &beta, d_memberships, G.K, d_membershipsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reduce centroids by GPU
tmr_GPU.StartCounter();
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
for (j = 0; j < G.K; ++j){
hipLaunchKernelGGL(( reduce_memberships_kernel_FKM), dim3(num_reduction_blocks), dim3(reduction_block_size), sm_size, streams[0],
d_membershipsT + offset, d_sumU + offset_sumU, G.N);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_FKM), dim3(num_reduction_blocks), dim3(reduction_block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_membershipsT + offset, d_sumC + offset_sumC, G.N);
offset_pointsT += G.N;
offset_sumC += num_reduction_blocks;
}
offset_sumU += num_reduction_blocks;
offset += G.N;
}
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
if (num_reduction_blocks > 1){
#pragma region Reduce memberships and centroids block sums by CPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, sumU_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, sumC_size, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, num_reduction_blocks, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
else{
#pragma region Calculate centroids by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( calculate_new_centroids), dim3(G.K), dim3(G.D), 0, 0, d_sumC, d_sumU);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v2d(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = G.N * G.K * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sm_size = block_size * DBL_SIZE;
int num_blocks = roundup(G.N, block_size);
int num_histo_blocks = roundup(NM_SIZE, block_size);
int num_reduction_blocks;
int reduction_block_size = block_size<<2;
num_reduction_blocks = roundup(G.N, reduction_block_size);
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int tile_size = block_size / G.D;
int usm_size = (tile_size * G.D) * DBL_SIZE;
int num_tiles = roundup(G.K, tile_size);
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sumU;
double * d_sumC;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v2d), dim3(num_blocks), dim3(block_size), usm_size, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, num_tiles, tile_size, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Transpose memberships
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.K,
&alpha, d_memberships, G.K, &beta, d_memberships, G.K, d_membershipsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reduce centroids by GPU
tmr_GPU.StartCounter();
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
for (j = 0; j < G.K; ++j){
hipLaunchKernelGGL(( reduce_memberships_kernel_FKM), dim3(num_reduction_blocks), dim3(block_size), sm_size, streams[0],
d_membershipsT + offset, d_sumU + offset_sumU, G.N);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_FKM), dim3(num_reduction_blocks), dim3(block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_membershipsT + offset, d_sumC + offset_sumC, G.N);
offset_pointsT += G.N;
offset_sumC += num_reduction_blocks;
}
offset_sumU += num_reduction_blocks;
offset += G.N;
}
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
if (num_reduction_blocks > 1){
#pragma region Reduce memberships and centroids block sums by CPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, sumU_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, sumC_size, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, num_reduction_blocks, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
else{
#pragma region Calculate centroids by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( calculate_new_centroids), dim3(G.K), dim3(G.D), 0, 0, d_sumC, d_sumU);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
#pragma endregion
#pragma region Version 3
__host__ double * GFKM_GPU_v3(FILE * f, GFKM & G, BlockSizeV3 block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int step = roundup(KD_SIZE, block_size.updateMembershipsKernelBlockSize);
int num_update_memberships_blocks = roundup(G.N, block_size.updateMembershipsKernelBlockSize);
int num_histo_blocks = roundup(NM_SIZE, block_size.histogramKernelBlockSize);
int num_counting_sort_blocks = roundup(NM_SIZE, block_size.countingSortKernelBlockSize);
int reduction_block_size = min(block_size.reduceCentroidsKernelBlockSize, block_size.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo;
int * d_NNT;
int * d_sNNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sU, sU_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_histo, histo_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
CudaSafeCall(hipMalloc(&d_sNNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
CudaSafeCall(hipHostMalloc(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
if (step > 4)
{
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1a), dim3(num_update_memberships_blocks), dim3(block_size.updateMembershipsKernelBlockSize), 0, 0,
d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else if (step == 1)
{
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1b), dim3(num_update_memberships_blocks), dim3(block_size.updateMembershipsKernelBlockSize), centroids_size, 0,
d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else
{
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1c), dim3(num_update_memberships_blocks), dim3(block_size.updateMembershipsKernelBlockSize), centroids_size, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
}
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Counting sort by GPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemset(d_histo, 0, histo_size));
hipLaunchKernelGGL(( histogram_kernel), dim3(num_histo_blocks), dim3(block_size.histogramKernelBlockSize), 0, 0, d_NNT, d_histo, NM_SIZE);
CudaSafeCall(hipMemcpyAsync(p_histo, d_histo, histo_size, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( scan_kernel), dim3(1), dim3(1), 0, 0, d_histo, G.K);
hipLaunchKernelGGL(( counting_sort_kernel), dim3(num_counting_sort_blocks), dim3(block_size.countingSortKernelBlockSize), 0, 0, d_histo, d_NNT, d_sNNT, d_memberships, d_sU,
NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
hipLaunchKernelGGL(( reduce_memberships_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[0],
d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_sU + offset, d_sNNT + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sU);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_histo);
hipFree(d_NNT);
hipFree(d_sNNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_histo);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v3a(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV1a(initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV3 blockSizeV3 = getBlockSizeForCentroidKernelV3(initBlockSize);
int num_histo_blocks = roundup(NM_SIZE, blockSizeV3.histogramKernelBlockSize);
int num_counting_sort_blocks = roundup(NM_SIZE, blockSizeV3.countingSortKernelBlockSize);
int reduction_block_size = min(blockSizeV3.reduceCentroidsKernelBlockSize, blockSizeV3.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo;
int * d_NNT;
int * d_sNNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sU, sU_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_histo, histo_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
CudaSafeCall(hipMalloc(&d_sNNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
CudaSafeCall(hipHostMalloc(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1a), dim3(num_blocks), dim3(block_size), 0, 0,
d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Counting sort by GPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemset(d_histo, 0, histo_size));
hipLaunchKernelGGL(( histogram_kernel), dim3(num_histo_blocks), dim3(blockSizeV3.histogramKernelBlockSize), 0, 0, d_NNT, d_histo, NM_SIZE);
CudaSafeCall(hipMemcpyAsync(p_histo, d_histo, histo_size, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( scan_kernel), dim3(1), dim3(1), 0, 0, d_histo, G.K);
hipLaunchKernelGGL(( counting_sort_kernel), dim3(num_counting_sort_blocks), dim3(blockSizeV3.countingSortKernelBlockSize), 0, 0, d_histo, d_NNT, d_sNNT, d_memberships, d_sU,
NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
hipLaunchKernelGGL(( reduce_memberships_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[0],
d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_sU + offset, d_sNNT + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
Util::write<double>(p_centroids, G.K, G.D, G.path + "centroids.GPU.txt");
Util::write<int>(p_NNT, G.N, G.M, G.path + "NNT.GPU.txt");
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sU);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_histo);
hipFree(d_NNT);
hipFree(d_sNNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_histo);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v3b(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV1b(centroids_size, initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV3 blockSizeV3 = getBlockSizeForCentroidKernelV3(initBlockSize);
int num_histo_blocks = roundup(NM_SIZE, blockSizeV3.histogramKernelBlockSize);
int num_counting_sort_blocks = roundup(NM_SIZE, blockSizeV3.countingSortKernelBlockSize);
int reduction_block_size = min(blockSizeV3.reduceCentroidsKernelBlockSize, blockSizeV3.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo;
int * d_NNT;
int * d_sNNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sU, sU_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_histo, histo_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
CudaSafeCall(hipMalloc(&d_sNNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
CudaSafeCall(hipHostMalloc(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1b), dim3(num_blocks), dim3(block_size), centroids_size, 0,
d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Counting sort by GPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemset(d_histo, 0, histo_size));
hipLaunchKernelGGL(( histogram_kernel), dim3(num_histo_blocks), dim3(blockSizeV3.histogramKernelBlockSize), 0, 0, d_NNT, d_histo, NM_SIZE);
CudaSafeCall(hipMemcpyAsync(p_histo, d_histo, histo_size, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( scan_kernel), dim3(1), dim3(1), 0, 0, d_histo, G.K);
hipLaunchKernelGGL(( counting_sort_kernel), dim3(num_histo_blocks), dim3(blockSizeV3.countingSortKernelBlockSize), 0, 0, d_histo, d_NNT, d_sNNT, d_memberships, d_sU,
NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
hipLaunchKernelGGL(( reduce_memberships_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[0],
d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_sU + offset, d_sNNT + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sU);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_histo);
hipFree(d_NNT);
hipFree(d_sNNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_histo);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v3c(FILE * f, GFKM & G, int block_size, int stop_iter, int step)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV1c(centroids_size, initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV3 blockSizeV3 = getBlockSizeForCentroidKernelV3(initBlockSize);
int num_histo_blocks = roundup(NM_SIZE, blockSizeV3.histogramKernelBlockSize);
int num_counting_sort_blocks = roundup(NM_SIZE, blockSizeV3.countingSortKernelBlockSize);
int reduction_block_size = min(blockSizeV3.reduceCentroidsKernelBlockSize, blockSizeV3.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
//int step = roundup(KD_SIZE, block_size);
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo;
int * d_NNT;
int * d_sNNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sU, sU_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_histo, histo_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
CudaSafeCall(hipMalloc(&d_sNNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
CudaSafeCall(hipHostMalloc(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1c), dim3(num_blocks), dim3(block_size), centroids_size, 0,
d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Counting sort by GPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemset(d_histo, 0, histo_size));
hipLaunchKernelGGL(( histogram_kernel), dim3(num_histo_blocks), dim3(blockSizeV3.histogramKernelBlockSize), 0, 0, d_NNT, d_histo, NM_SIZE);
CudaSafeCall(hipMemcpyAsync(p_histo, d_histo, histo_size, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( scan_kernel), dim3(1), dim3(1), 0, 0, d_histo, G.K);
hipLaunchKernelGGL(( counting_sort_kernel), dim3(num_counting_sort_blocks), dim3(blockSizeV3.countingSortKernelBlockSize), 0, 0, d_histo, d_NNT, d_sNNT, d_memberships, d_sU,
NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
hipLaunchKernelGGL(( reduce_memberships_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[0],
d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_sU + offset, d_sNNT + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sU);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_histo);
hipFree(d_NNT);
hipFree(d_sNNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_histo);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v3d(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int sm_size = block_size * DBL_SIZE;
int num_blocks = roundup(G.N, block_size);
int num_histo_blocks = roundup(NM_SIZE, block_size);
int histo_size = (G.K+1)*INT_SIZE;
int reduction_block_size = block_size<<2;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int tile_size = block_size / G.D;
int usm_size = (tile_size * G.D) * DBL_SIZE;
int num_tiles = roundup(G.K, tile_size);
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo;
int * d_NNT;
int * d_sNNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sU, sU_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_histo, histo_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
CudaSafeCall(hipMalloc(&d_sNNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
CudaSafeCall(hipHostMalloc(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1d), dim3(num_blocks), dim3(block_size), usm_size, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, num_tiles, tile_size, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Counting sort by GPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemset(d_histo, 0, histo_size));
hipLaunchKernelGGL(( histogram_kernel), dim3(num_histo_blocks), dim3(block_size), 0, 0, d_NNT, d_histo, NM_SIZE);
CudaSafeCall(hipMemcpyAsync(p_histo, d_histo, histo_size, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( scan_kernel), dim3(1), dim3(1), 0, 0, d_histo, G.K);
hipLaunchKernelGGL(( counting_sort_kernel), dim3(num_histo_blocks), dim3(block_size), 0, 0, d_histo, d_NNT, d_sNNT, d_memberships, d_sU,
NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
p_histo[j] = roundup(p_histo[j+1], reduction_block_size);
hipLaunchKernelGGL(( reduce_memberships_kernel_GFKM), dim3(p_histo[j]), dim3(block_size), sm_size, streams[0],
d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_GFKM), dim3(p_histo[j]), dim3(block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_sU + offset, d_sNNT + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sU);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_histo);
hipFree(d_NNT);
hipFree(d_sNNT);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_histo);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
#pragma endregion
#pragma region Version 4
__host__ double * GFKM_GPU_v4(FILE * f, GFKM & G, BlockSizeV4 block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
//int segmentation_size;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int step = roundup(KD_SIZE, block_size.updateMembershipsKernelBlockSize);
int num_update_memberships_blocks = roundup(G.N, block_size.updateMembershipsKernelBlockSize);
int num_gather_blocks = roundup(NM_SIZE, block_size.gatherKernelBlockSize);
int reduction_block_size = min(block_size.reduceCentroidsKernelBlockSize, block_size.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo_values;
int * d_histo;
int * d_NNT;
int * d_sNNT;
int * d_indices;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sU, sU_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_histo_values, histo_size));
CudaSafeCall(hipMalloc(&d_histo, histo_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
CudaSafeCall(hipMalloc(&d_sNNT, NNT_size));
CudaSafeCall(hipMalloc(&d_indices, NNT_size));
CudaSafeCall(hipMemset(d_histo, 0, INT_SIZE));
thrust::device_ptr<int> dev_indices_ptr(d_indices);
thrust::device_ptr<int> dev_sNNT_ptr(d_sNNT);
thrust::device_ptr<int> dev_histo_values_ptr(d_histo_values);
thrust::device_ptr<int> dev_histo_counts_ptr(d_histo);
thrust::counting_iterator<int> search(0);
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
CudaSafeCall(hipHostMalloc(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
if (step > 4)
{
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1a), dim3(num_update_memberships_blocks), dim3(block_size.updateMembershipsKernelBlockSize), 0, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else if (step == 1)
{
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1b), dim3(num_update_memberships_blocks), dim3(block_size.updateMembershipsKernelBlockSize), centroids_size, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else
{
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1c), dim3(num_update_memberships_blocks), dim3(block_size.updateMembershipsKernelBlockSize), centroids_size, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
}
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Sort NNT, calculate histogram, and rearrange data using Thrust on GPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sNNT, d_NNT, NNT_size, hipMemcpyDeviceToDevice));
thrust::sequence(dev_indices_ptr, dev_indices_ptr + NM_SIZE);
thrust::stable_sort_by_key(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, dev_indices_ptr);
thrust::upper_bound(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, search, search + G.K, dev_histo_counts_ptr+1);
thrust::adjacent_difference(dev_histo_counts_ptr, dev_histo_counts_ptr + G.K + 1, dev_histo_counts_ptr);
CudaSafeCall(hipMemcpyAsync(p_histo, d_histo, histo_size, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( gather_kernel), dim3(num_gather_blocks), dim3(block_size.gatherKernelBlockSize), 0, 0, d_indices, d_memberships, d_sU, NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
//segmentation_size = p_histo[j+1] - p_histo[j];
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
hipLaunchKernelGGL(( reduce_memberships_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[0],
d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_sU + offset, d_indices + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sU);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_histo_values);
hipFree(d_histo);
hipFree(d_NNT);
hipFree(d_sNNT);
hipFree(d_indices);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_histo);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v4a(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
//int segmentation_size;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV1a(initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV4 blockSizeV4 = getBlockSizeForCentroidKernelV4(initBlockSize);
int num_gather_blocks = roundup(NM_SIZE, blockSizeV4.gatherKernelBlockSize);
int reduction_block_size = min(blockSizeV4.reduceCentroidsKernelBlockSize, blockSizeV4.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo_values;
int * d_histo;
int * d_NNT;
int * d_sNNT;
int * d_indices;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sU, sU_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_histo_values, histo_size));
CudaSafeCall(hipMalloc(&d_histo, histo_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
CudaSafeCall(hipMalloc(&d_sNNT, NNT_size));
CudaSafeCall(hipMalloc(&d_indices, NNT_size));
CudaSafeCall(hipMemset(d_histo, 0, INT_SIZE));
thrust::device_ptr<int> dev_indices_ptr(d_indices);
thrust::device_ptr<int> dev_sNNT_ptr(d_sNNT);
thrust::device_ptr<int> dev_histo_values_ptr(d_histo_values);
thrust::device_ptr<int> dev_histo_counts_ptr(d_histo);
thrust::counting_iterator<int> search(0);
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
CudaSafeCall(hipHostMalloc(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1a), dim3(num_blocks), dim3(block_size), 0, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Sort NNT, calculate histogram, and rearrange data using Thrust on GPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sNNT, d_NNT, NNT_size, hipMemcpyDeviceToDevice));
thrust::sequence(dev_indices_ptr, dev_indices_ptr + NM_SIZE);
thrust::stable_sort_by_key(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, dev_indices_ptr);
thrust::upper_bound(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, search, search + G.K, dev_histo_counts_ptr+1);
thrust::adjacent_difference(dev_histo_counts_ptr, dev_histo_counts_ptr + G.K + 1, dev_histo_counts_ptr);
CudaSafeCall(hipMemcpyAsync(p_histo, d_histo, histo_size, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( gather_kernel), dim3(num_gather_blocks), dim3(blockSizeV4.gatherKernelBlockSize), 0, 0, d_indices, d_memberships, d_sU, NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
//segmentation_size = p_histo[j+1] - p_histo[j];
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
hipLaunchKernelGGL(( reduce_memberships_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[0],
d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_sU + offset, d_indices + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sU);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_histo_values);
hipFree(d_histo);
hipFree(d_NNT);
hipFree(d_sNNT);
hipFree(d_indices);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_histo);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v4b(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
//int segmentation_size;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV1b(centroids_size, initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV4 blockSizeV4 = getBlockSizeForCentroidKernelV4(initBlockSize);
int num_gather_blocks = roundup(NM_SIZE, blockSizeV4.gatherKernelBlockSize);
int reduction_block_size = min(blockSizeV4.reduceCentroidsKernelBlockSize, blockSizeV4.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo_values;
int * d_histo;
int * d_NNT;
int * d_sNNT;
int * d_indices;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sU, sU_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_histo_values, histo_size));
CudaSafeCall(hipMalloc(&d_histo, histo_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
CudaSafeCall(hipMalloc(&d_sNNT, NNT_size));
CudaSafeCall(hipMalloc(&d_indices, NNT_size));
CudaSafeCall(hipMemset(d_histo, 0, INT_SIZE));
thrust::device_ptr<int> dev_indices_ptr(d_indices);
thrust::device_ptr<int> dev_sNNT_ptr(d_sNNT);
thrust::device_ptr<int> dev_histo_values_ptr(d_histo_values);
thrust::device_ptr<int> dev_histo_counts_ptr(d_histo);
thrust::counting_iterator<int> search(0);
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
CudaSafeCall(hipHostMalloc(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1b), dim3(num_blocks), dim3(block_size), centroids_size, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Sort NNT, calculate histogram, and rearrange data using Thrust on GPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sNNT, d_NNT, NNT_size, hipMemcpyDeviceToDevice));
thrust::sequence(dev_indices_ptr, dev_indices_ptr + NM_SIZE);
thrust::stable_sort_by_key(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, dev_indices_ptr);
thrust::upper_bound(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, search, search + G.K, dev_histo_counts_ptr+1);
thrust::adjacent_difference(dev_histo_counts_ptr, dev_histo_counts_ptr + G.K + 1, dev_histo_counts_ptr);
CudaSafeCall(hipMemcpyAsync(p_histo, d_histo, histo_size, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( gather_kernel), dim3(num_gather_blocks), dim3(blockSizeV4.gatherKernelBlockSize), 0, 0, d_indices, d_memberships, d_sU, NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
//segmentation_size = p_histo[j+1] - p_histo[j];
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
hipLaunchKernelGGL(( reduce_memberships_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[0],
d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_sU + offset, d_indices + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sU);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_histo_values);
hipFree(d_histo);
hipFree(d_NNT);
hipFree(d_sNNT);
hipFree(d_indices);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_histo);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v4c(FILE * f, GFKM & G, int block_size, int stop_iter, int step)
{
#pragma region Declare common variables
int i, j, k;
//int segmentation_size;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV1c(centroids_size, initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV4 blockSizeV4 = getBlockSizeForCentroidKernelV4(initBlockSize);
int num_gather_blocks = roundup(NM_SIZE, blockSizeV4.gatherKernelBlockSize);
int reduction_block_size = min(blockSizeV4.reduceCentroidsKernelBlockSize, blockSizeV4.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
//int step = roundup(KD_SIZE, block_size);
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo_values;
int * d_histo;
int * d_NNT;
int * d_sNNT;
int * d_indices;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sU, sU_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_histo_values, histo_size));
CudaSafeCall(hipMalloc(&d_histo, histo_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
CudaSafeCall(hipMalloc(&d_sNNT, NNT_size));
CudaSafeCall(hipMalloc(&d_indices, NNT_size));
CudaSafeCall(hipMemset(d_histo, 0, INT_SIZE));
thrust::device_ptr<int> dev_indices_ptr(d_indices);
thrust::device_ptr<int> dev_sNNT_ptr(d_sNNT);
thrust::device_ptr<int> dev_histo_values_ptr(d_histo_values);
thrust::device_ptr<int> dev_histo_counts_ptr(d_histo);
thrust::counting_iterator<int> search(0);
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
CudaSafeCall(hipHostMalloc(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1c), dim3(num_blocks), dim3(block_size), centroids_size, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Sort NNT, calculate histogram, and rearrange data using Thrust on GPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sNNT, d_NNT, NNT_size, hipMemcpyDeviceToDevice));
thrust::sequence(dev_indices_ptr, dev_indices_ptr + NM_SIZE);
thrust::stable_sort_by_key(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, dev_indices_ptr);
thrust::upper_bound(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, search, search + G.K, dev_histo_counts_ptr+1);
thrust::adjacent_difference(dev_histo_counts_ptr, dev_histo_counts_ptr + G.K + 1, dev_histo_counts_ptr);
CudaSafeCall(hipMemcpyAsync(p_histo, d_histo, histo_size, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( gather_kernel), dim3(num_gather_blocks), dim3(blockSizeV4.gatherKernelBlockSize), 0, 0, d_indices, d_memberships, d_sU, NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
//segmentation_size = p_histo[j+1] - p_histo[j];
p_histo[j] = roundup(p_histo[j+1], reduction_block_size <<2);
hipLaunchKernelGGL(( reduce_memberships_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[0],
d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_GFKM), dim3(p_histo[j]), dim3(reduction_block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_sU + offset, d_indices + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sU);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_histo_values);
hipFree(d_histo);
hipFree(d_NNT);
hipFree(d_sNNT);
hipFree(d_indices);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_histo);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v4d(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
//int segmentation_size;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int sm_size = block_size * DBL_SIZE;
int num_blocks = roundup(G.N, block_size);
int num_histo_blocks = roundup(NM_SIZE, block_size);
int histo_size = (G.K+1)*INT_SIZE;
int reduction_block_size = block_size<<2;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int tile_size = block_size / G.D;
int usm_size = (tile_size * G.D) * DBL_SIZE;
int num_tiles = roundup(G.K, tile_size);
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo_values;
int * d_histo;
int * d_NNT;
int * d_sNNT;
int * d_indices;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(hipMalloc(&d_flags, flag_size));
CudaSafeCall(hipMalloc(&d_points, points_size));
CudaSafeCall(hipMalloc(&d_pointsT, points_size));
CudaSafeCall(hipMalloc(&d_centroids, centroids_size));
CudaSafeCall(hipMalloc(&d_memberships, memberships_size));
CudaSafeCall(hipMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(hipMalloc(&d_sU, sU_size));
CudaSafeCall(hipMalloc(&d_sumU, sumU_size));
CudaSafeCall(hipMalloc(&d_sumC, sumC_size));
CudaSafeCall(hipMalloc(&d_histo_values, histo_size));
CudaSafeCall(hipMalloc(&d_histo, histo_size));
CudaSafeCall(hipMalloc(&d_NNT, NNT_size));
CudaSafeCall(hipMalloc(&d_sNNT, NNT_size));
CudaSafeCall(hipMalloc(&d_indices, NNT_size));
CudaSafeCall(hipMemset(d_histo, 0, INT_SIZE));
thrust::device_ptr<int> dev_indices_ptr(d_indices);
thrust::device_ptr<int> dev_sNNT_ptr(d_sNNT);
thrust::device_ptr<int> dev_histo_values_ptr(d_histo_values);
thrust::device_ptr<int> dev_histo_counts_ptr(d_histo);
thrust::counting_iterator<int> search(0);
#pragma endregion
#pragma region Malloc host
CudaSafeCall(hipHostMalloc(&p_flags, flag_size));
CudaSafeCall(hipHostMalloc(&p_points, points_size));
CudaSafeCall(hipHostMalloc(&p_centroids, centroids_size));
CudaSafeCall(hipHostMalloc(&p_memberships, memberships_size));
CudaSafeCall(hipHostMalloc(&p_sumU, sumU_size));
CudaSafeCall(hipHostMalloc(&p_sumC, sumC_size));
CudaSafeCall(hipHostMalloc(&p_NNT, NNT_size));
CudaSafeCall(hipHostMalloc(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(hipMemcpy(d_points, p_points, points_size, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_centroids, p_centroids, centroids_size, hipMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
hipblasHandle_t handle;
hipStream_t * streams = new hipStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
hipStreamCreate(&streams[i]);
CublasSafeCall(hipblasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i < G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( update_memberships_kernel_v1d), dim3(num_blocks), dim3(block_size), usm_size, 0,
d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, num_tiles, tile_size, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Sort NNT, calculate histogram, and rearrange data using Thrust on GPU
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sNNT, d_NNT, NNT_size, hipMemcpyDeviceToDevice));
thrust::sequence(dev_indices_ptr, dev_indices_ptr + NM_SIZE);
thrust::stable_sort_by_key(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, dev_indices_ptr);
thrust::upper_bound(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, search, search + G.K, dev_histo_counts_ptr+1);
thrust::adjacent_difference(dev_histo_counts_ptr, dev_histo_counts_ptr + G.K + 1, dev_histo_counts_ptr);
CudaSafeCall(hipMemcpyAsync(p_histo, d_histo, histo_size, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( gather_kernel), dim3(num_histo_blocks), dim3(block_size), 0, 0, d_indices, d_memberships, d_sU, NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
//segmentation_size = p_histo[j+1] - p_histo[j];
p_histo[j] = roundup(p_histo[j+1], reduction_block_size);
hipLaunchKernelGGL(( reduce_memberships_kernel_GFKM), dim3(p_histo[j]), dim3(block_size), sm_size, streams[0],
d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
hipLaunchKernelGGL(( reduce_centroids_kernel_GFKM), dim3(p_histo[j]), dim3(block_size), sm_size, streams[k % (NSTREAM-1)+1],
d_pointsT + offset_pointsT, d_sU + offset, d_indices + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(hipMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, hipMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_sumC, p_centroids, centroids_size, hipMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
hipLaunchKernelGGL(( check_convergence), dim3(G.K), dim3(G.D), 0, 0, d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(hipMemcpyAsync(p_flags, d_flags, flag_size, hipMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(d_centroids, d_sumC, centroids_size, hipMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(hipMemcpyAsync(p_centroids, d_centroids, centroids_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpyAsync(p_NNT, d_NNT, NNT_size, hipMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
hipFree(d_flags);
hipFree(d_points);
hipFree(d_pointsT);
hipFree(d_centroids);
hipFree(d_memberships);
hipFree(d_membershipsT);
hipFree(d_sU);
hipFree(d_sumU);
hipFree(d_sumC);
hipFree(d_histo_values);
hipFree(d_histo);
hipFree(d_NNT);
hipFree(d_sNNT);
hipFree(d_indices);
#pragma endregion
#pragma region Cuda free host pinned memories
hipHostFree(p_flags);
hipHostFree(p_points);
hipHostFree(p_centroids);
hipHostFree(p_memberships);
hipHostFree(p_sumU);
hipHostFree(p_sumC);
hipHostFree(p_histo);
hipHostFree(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(hipblasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
hipStreamDestroy(streams[i]);
hipDeviceReset();
#pragma endregion
return rs;
}
#pragma endregion
__host__ double * GFKM_GPU(FILE * f, GFKM & G, int block_size, int stop_iter, int mode)
{
//int centroids_size = G.K*G.D;
//int sz1a, sz1b, sz1c, sz2a, sz2b, sz2c;
//int step1a, step1b, step1c, step2a, step2b, step2c;
//int step = 1;
//int minGridSize;
/*if (block_size > 0)
step = roundup(centroids_size, block_size);
if (mode == 1){
if (step > 4)
{
return GFKM_GPU_v1a(f, G, block_size, stop_iter);
}
else if (step == 1)
{
return GFKM_GPU_v1b(f, G, block_size, stop_iter);
}
else
{
return GFKM_GPU_v1c(f, G, block_size, stop_iter, step);
}
}
else if (mode == 2){
if (step > 4)
{
return GFKM_GPU_v2a(f, G, block_size, stop_iter);
}
else if (step == 1)
{
return GFKM_GPU_v2b(f, G, block_size, stop_iter);
}
else
{
return GFKM_GPU_v2c(f, G, block_size, stop_iter, step);
}
}
else if (mode == 3){
if (step > 4)
{
return GFKM_GPU_v3a(f, G, block_size, stop_iter);
}
else if (step == 1)
{
return GFKM_GPU_v3b(f, G, block_size, stop_iter);
}
else
{
return GFKM_GPU_v3c(f, G, block_size, stop_iter, step);
}
}
else{
if (step > 4)
{
return GFKM_GPU_v4a(f, G, block_size, stop_iter);
}
else if (step == 1)
{
return GFKM_GPU_v4b(f, G, block_size, stop_iter);
}
else
{
return GFKM_GPU_v4c(f, G, block_size, stop_iter, step);
}
}*/
if (mode == 1)
{
BlockSizeV1 blockSizeV1 = getBlockSizesForVersion1(block_size);
return GFKM_GPU_v1(f, G, blockSizeV1, stop_iter);
}
else if (mode == 2)
{
BlockSizeV2 blockSizeV2 = getBlockSizesForVersion2(block_size);
return GFKM_GPU_v2(f, G, blockSizeV2, stop_iter);
}
else if (mode == 3)
{
BlockSizeV3 blockSizeV3 = getBlockSizesForVersion3(block_size);
return GFKM_GPU_v3(f, G, blockSizeV3, stop_iter);
}
else
{
BlockSizeV4 blockSizeV4 = getBlockSizesForVersion4(block_size);
return GFKM_GPU_v4(f, G, blockSizeV4, stop_iter);
}
}
#pragma region Getting block size functions
inline __host__ int getBlockSizeForMembershipKkernelV1a(int initBlockSize)
{
if (initBlockSize > 0) return initBlockSize;
int minGridSize;
int blockSize;
hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
update_memberships_kernel_v1a, 0, BlockSizeLimit);
printf("Updating memberships kernel block size version 1a = %d\n", blockSize);
return blockSize;
}
inline __host__ int getBlockSizeForMembershipKkernelV1b(int dynamicSMemSize, int initBlockSize)
{
if (initBlockSize > 0) return initBlockSize;
int minGridSize;
int blockSize;
hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
update_memberships_kernel_v1b, dynamicSMemSize, BlockSizeLimit);
printf("Updating memberships kernel block size version 1b = %d\n", blockSize);
return blockSize;
}
inline __host__ int getBlockSizeForMembershipKkernelV1c(int dynamicSMemSize, int initBlockSize)
{
if (initBlockSize > 0) return initBlockSize;
int minGridSize;
int blockSize;
hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
update_memberships_kernel_v1c, dynamicSMemSize, BlockSizeLimit);
printf("Updating memberships kernel block size version 1c = %d\n", blockSize);
return blockSize;
}
inline __host__ int getBlockSizeForMembershipKkernelV2a(int initBlockSize)
{
if (initBlockSize > 0) return initBlockSize;
int minGridSize;
int blockSize;
hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
update_memberships_kernel_v2a, 0, BlockSizeLimit);
printf("Updating memberships kernel block size version 2a = %d\n", blockSize);
return blockSize;
}
inline __host__ int getBlockSizeForMembershipKkernelV2b(int dynamicSMemSize, int initBlockSize)
{
if (initBlockSize > 0) return initBlockSize;
int minGridSize;
int blockSize;
hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
update_memberships_kernel_v2b, dynamicSMemSize, BlockSizeLimit);
printf("Updating memberships kernel block size version 2b = %d\n", blockSize);
return blockSize;
}
inline __host__ int getBlockSizeForMembershipKkernelV2c(int dynamicSMemSize, int initBlockSize)
{
if (initBlockSize > 0) return initBlockSize;
int minGridSize;
int blockSize;
hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
update_memberships_kernel_v2c, dynamicSMemSize, BlockSizeLimit);
printf("Updating memberships kernel block size version 2c = %d\n", blockSize);
return blockSize;
}
inline __host__ int blockSizeToDynamicSMemSize(int blockSize)
{
return blockSize << 3;
}
inline __host__ BlockSizeV2 getBlockSizeForCentroidKernelV2(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV2(initBlockSize);
}
else if (initBlockSize == 0) return BlockSizeV2();
BlockSizeV2 blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceMembershipsKernelBlockSize),
reduce_memberships_kernel_FKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceCentroidsKernelBlockSize),
reduce_centroids_kernel_FKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
printf("ReduceMembershipsKernelBlockSize version 2 = %d\n", blockSize.reduceMembershipsKernelBlockSize);
printf("ReduceCentroidsKernelBlockSize version 2 = %d\n", blockSize.reduceCentroidsKernelBlockSize);
return blockSize;
}
inline __host__ BlockSizeV3 getBlockSizeForCentroidKernelV3(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV3(initBlockSize);
}
else if (initBlockSize == 0) return BlockSizeV3();
BlockSizeV3 blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize( &minGridSize, &(blockSize.histogramKernelBlockSize),
histogram_kernel, 0, BlockSizeLimit);
hipOccupancyMaxPotentialBlockSize( &minGridSize, &(blockSize.countingSortKernelBlockSize),
counting_sort_kernel, 0, BlockSizeLimit);
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceMembershipsKernelBlockSize),
reduce_memberships_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceCentroidsKernelBlockSize),
reduce_centroids_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
printf("ReduceMembershipsKernelBlockSize version 3 = %d\n", blockSize.reduceMembershipsKernelBlockSize);
printf("ReduceCentroidsKernelBlockSize version 3 = %d\n", blockSize.reduceCentroidsKernelBlockSize);
printf("HistogramKernelBlockSize version 3 = %d\n", blockSize.histogramKernelBlockSize);
printf("CountingSortKernelBlockSize version 3 = %d\n", blockSize.countingSortKernelBlockSize);
return blockSize;
}
inline __host__ BlockSizeV4 getBlockSizeForCentroidKernelV4(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV4(initBlockSize);
}
else if (initBlockSize == 0) return BlockSizeV4();
BlockSizeV4 blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize( &minGridSize, &(blockSize.gatherKernelBlockSize),
gather_kernel, 0, BlockSizeLimit);
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceMembershipsKernelBlockSize),
reduce_memberships_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceCentroidsKernelBlockSize),
reduce_centroids_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
printf("ReduceMembershipsKernelBlockSize version 4 = %d\n", blockSize.reduceMembershipsKernelBlockSize);
printf("ReduceCentroidsKernelBlockSize version 4 = %d\n", blockSize.reduceCentroidsKernelBlockSize);
printf("GatherKernelBlockSize version 4 = %d\n", blockSize.gatherKernelBlockSize);
return blockSize;
}
inline __host__ BlockSizeV1 getBlockSizesForVersion1(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV1(initBlockSize);
}
return BlockSizeV1();
}
inline __host__ BlockSizeV2 getBlockSizesForVersion2(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV2(initBlockSize);
}
else if (initBlockSize == 0) return BlockSizeV2();
BlockSizeV2 blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceMembershipsKernelBlockSize),
reduce_memberships_kernel_FKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceCentroidsKernelBlockSize),
reduce_centroids_kernel_FKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
printf("ReduceMembershipsKernelBlockSize version 2 = %d\n", blockSize.reduceMembershipsKernelBlockSize);
printf("ReduceCentroidsKernelBlockSize version 2 = %d\n", blockSize.reduceCentroidsKernelBlockSize);
return blockSize;
}
inline __host__ BlockSizeV3 getBlockSizesForVersion3(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV3(initBlockSize);
}
else if (initBlockSize == 0) return BlockSizeV3();
BlockSizeV3 blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize( &minGridSize, &(blockSize.histogramKernelBlockSize),
histogram_kernel, 0, BlockSizeLimit);
hipOccupancyMaxPotentialBlockSize( &minGridSize, &(blockSize.countingSortKernelBlockSize),
counting_sort_kernel, 0, BlockSizeLimit);
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceMembershipsKernelBlockSize),
reduce_memberships_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceCentroidsKernelBlockSize),
reduce_centroids_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
printf("ReduceMembershipsKernelBlockSize version 3 = %d\n", blockSize.reduceMembershipsKernelBlockSize);
printf("ReduceCentroidsKernelBlockSize version 3 = %d\n", blockSize.reduceCentroidsKernelBlockSize);
printf("HistogramKernelBlockSize version 3 = %d\n", blockSize.histogramKernelBlockSize);
printf("CountingSortKernelBlockSize version 3 = %d\n", blockSize.countingSortKernelBlockSize);
return blockSize;
}
inline __host__ BlockSizeV4 getBlockSizesForVersion4(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV4(initBlockSize);
}
else if (initBlockSize == 0) return BlockSizeV4();
BlockSizeV4 blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize( &minGridSize, &(blockSize.gatherKernelBlockSize),
gather_kernel, 0, BlockSizeLimit);
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceMembershipsKernelBlockSize),
reduce_memberships_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
hipOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceCentroidsKernelBlockSize),
reduce_centroids_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
printf("ReduceMembershipsKernelBlockSize version 4 = %d\n", blockSize.reduceMembershipsKernelBlockSize);
printf("ReduceCentroidsKernelBlockSize version 4 = %d\n", blockSize.reduceCentroidsKernelBlockSize);
printf("GatherKernelBlockSize version 4 = %d\n", blockSize.gatherKernelBlockSize);
return blockSize;
}
#pragma endregion
#pragma endregion | b528db3a9878f9db3782f90c8c225c713f3e00ee.cu | #include "SGFKM.cuh"
#include "cuda_runtime.h"
#include "Util.h"
#define DIM_MAX 16
#define MMAX 2
#define NSTREAM 5
#define BlockSizeLimit 1024
#pragma region Inline utility functions
inline __host__ int roundup(int x, int y)
{
return 1 + (x-1)/y;
}
inline __host__ void writeToFiles(double * centroids, int * NNT, GFKM & G)
{
Util::write<double>(centroids, G.K, G.D, G.path + "centroids.GPU.txt");
Util::write<int>(NNT, G.N, G.M, G.path + "NNT.GPU.txt");
}
#pragma endregion
#pragma region Update Membership Kernel
__global__ void update_memberships_kernel_v1a(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, double fuzzifier)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) return;
int i, j;
int * pNNT = NNT + idx*M;
double * pMemberships = memberships + idx*M;
double * pCentroids = centroids;
double X[DIM_MAX];
double DNNT[MMAX];
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
for (i = 0, j = idx*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i){
DNNT[i] = DBL_MAX;
pMemberships[i] = 0.;
}
for (i = 0; i < K; ++i, pCentroids += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pCentroids[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = i;
}
}
for (i = 0; i < M; ++i){
if (DNNT[i] == 0.){
pMemberships[i] = 1.;
return;
}
diff = pow(DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[i] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[i] = pow(pMemberships[i]*sum, -fuzzifier);
}
}
__global__ void update_memberships_kernel_v1b(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, double fuzzifier)
{
extern __shared__ double C[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tid;
if (tid < K*D){
C[tid] = centroids[tid];
}
__syncthreads();
if (idx >= N) return;
int i, j;
int * pNNT = NNT + idx*M;
double * pMemberships = memberships + idx*M;
double * pC = C;
double X[DIM_MAX];
double DNNT[MMAX];
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
for (i = 0, j = idx*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i){
DNNT[i] = DBL_MAX;
pMemberships[i] = 0.;
}
for (i = 0; i < K; ++i, pC += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = i;
}
}
for (i = 0; i < M; ++i){
if (DNNT[i] == 0.){
pMemberships[i] = 1.;
return;
}
diff = pow(DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[i] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[i] = pow(pMemberships[i]*sum, -fuzzifier);
}
}
__global__ void update_memberships_kernel_v1c(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, int step, double fuzzifier)
{
extern __shared__ double C[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tid;
int i, j;
for (i = 0, j = K*D, tid *= step; tid < j && i < step; ++i, ++tid){
C[tid] = centroids[tid];
}
__syncthreads();
if (idx >= N) return;
int * pNNT = NNT + idx*M;
double * pMemberships = memberships + idx*M;
double * pCentroids = C;
double X[DIM_MAX];
double DNNT[MMAX];
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
for (i = 0, j = idx*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i){
DNNT[i] = DBL_MAX;
pMemberships[i] = 0.;
}
for (i = 0; i < K; ++i, pCentroids += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pCentroids[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = i;
}
}
for (i = 0; i < M; ++i){
if (DNNT[i] == 0.){
pMemberships[i] = 1.;
return;
}
diff = pow(DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[i] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[i] = pow(pMemberships[i]*sum, -fuzzifier);
}
}
__global__ void update_memberships_kernel_v1d(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, int num_tiles, int tile_size, double fuzzifier)
{
extern __shared__ double C[];
int tid = threadIdx.x;
int pid = blockIdx.x * blockDim.x + tid;
int i = pid * M, j, t, cSize = K*D, idx;
int * pNNT = NNT + i;
int x = tile_size;
int y = num_tiles - 1;
int z = x * D;
int cid = 0;
int offsetC = 0;
double * pMemberships = memberships + i;
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
double X[DIM_MAX];
double DNNT[MMAX];
for (i = 0, j = pid*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i){
DNNT[i] = DBL_MAX;
pMemberships[i] = 0.;
}
#pragma region load (num_tiles - 1) tiles first
for (t = 0; t < y; ++t, offsetC += z)
{
if (tid < z) C[tid] = centroids[offsetC + tid];
__syncthreads();
if (pid < N)
{
double * pC = C;
for (i = 0; i < x; ++i, ++cid, pC += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = cid;
}
}
}
}
#pragma endregion
#pragma region load last tile
if (offsetC + z > cSize){
z = cSize - offsetC;
x = z / D;
}
if (tid < z) C[tid] = centroids[offsetC + tid];
__syncthreads();
if (pid < N){
double * pC = C;
for (i = 0; i < x; ++i, ++cid, pC += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = cid;
}
}
#pragma region calculate memberships
for (i = 0; i < M; ++i){
if (DNNT[i] == 0.){
pMemberships[i] = 1.;
return;
}
diff = pow(DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[i] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[i] = pow(pMemberships[i]*sum, -fuzzifier);
}
#pragma endregion
}
#pragma endregion
}
__global__ void update_memberships_kernel_v2a(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, double fuzzifier)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) return;
int i, j;
int * pNNT = NNT + idx*M;
double * pMemberships = memberships + idx*K;
double * pCentroids = centroids;
double X[DIM_MAX];
double DNNT[MMAX];
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
for (i = 0, j = idx*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i) DNNT[i] = DBL_MAX;
for (i = 0; i < K; ++i, pCentroids += D){
pMemberships[i] = 0.;
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pCentroids[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = i;
}
}
for (i = 0; i < M; ++i){
if ( DNNT[i] == 0.){
pMemberships[pNNT[i]] = 1.;
return;
}
diff = pow( DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[pNNT[i]] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[pNNT[i]] = pow(pMemberships[pNNT[i]]*sum, -fuzzifier);
}
}
__global__ void update_memberships_kernel_v2b(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, double fuzzifier)
{
extern __shared__ double C[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tid;
if (tid < K*D){
C[tid] = centroids[tid];
}
__syncthreads();
if (idx >= N) return;
int i, j;
int * pNNT = NNT + idx*M;
double * pMemberships = memberships + idx*K;
double * pC = C;
double X[DIM_MAX];
double DNNT[MMAX];
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
for (i = 0, j = idx*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i) DNNT[i] = DBL_MAX;
for (i = 0; i < K; ++i, pC += D){
pMemberships[i] = 0.;
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = i;
}
}
for (i = 0; i < M; ++i){
if ( DNNT[i] == 0.){
pMemberships[pNNT[i]] = 1.;
return;
}
diff = pow( DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[pNNT[i]] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[pNNT[i]] = pow(pMemberships[pNNT[i]]*sum, -fuzzifier);
}
}
__global__ void update_memberships_kernel_v2c(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, int step, double fuzzifier)
{
extern __shared__ double C[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tid;
int i, j;
for (i = 0, j = K*D, tid *= step; tid < j && i < step; ++i, ++tid){
C[tid] = centroids[tid];
}
__syncthreads();
if (idx >= N) return;
int * pNNT = NNT + idx*M;
double * pMemberships = memberships + idx*K;
double * pC = C;
double X[DIM_MAX];
double DNNT[MMAX];
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
for (i = 0, j = idx*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i) DNNT[i] = DBL_MAX;
for (i = 0; i < K; ++i, pC += D){
pMemberships[i] = 0.;
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = i;
}
}
for (i = 0; i < M; ++i){
if ( DNNT[i] == 0.){
pMemberships[pNNT[i]] = 1.;
return;
}
diff = pow( DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[pNNT[i]] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[pNNT[i]] = pow(pMemberships[pNNT[i]]*sum, -fuzzifier);
}
}
__global__ void update_memberships_kernel_v2d(
double * points, double * centroids, double * memberships, int * NNT,
int N, int D, int K, int M, int num_tiles, int tile_size, double fuzzifier)
{
extern __shared__ double C[];
int tid = threadIdx.x;
int pid = blockIdx.x * blockDim.x + tid;
int i = pid * M, j, t, cSize = K*D, idx;
int x = tile_size;
int y = num_tiles - 1;
int z = x * D;
int cid = 0;
int offsetC = 0;
int * pNNT = NNT + i;
double * pMemberships = memberships + i;
double f = 1. / (fuzzifier - 1.);
double diff, temp, sum = 0.;
double X[DIM_MAX];
double DNNT[MMAX];
for (i = 0, j = pid*D; i < D; ++i, ++j) X[i] = points[j];
for (i = 0; i < M; ++i) DNNT[i] = DBL_MAX;
#pragma region load (num_tiles - 1) tiles first
for (t = 0; t < y; ++t, offsetC += z)
{
if (tid < z) C[tid] = centroids[offsetC + tid];
__syncthreads();
if (pid < N){
double * pC = C;
for (i = 0; i < x; ++i, ++cid, pC += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = cid;
}
}
}
}
#pragma endregion
#pragma region load last tile
if (offsetC + z > cSize){
z = cSize - offsetC;
x = z / D;
}
if (tid < z) C[tid] = centroids[offsetC + tid];
__syncthreads();
if (pid < N){
double * pC = C;
for (i = 0; i < x; ++i, ++cid, pC += D){
diff = 0.;
for (j = 0; j < D; ++j){
temp = X[j] - pC[j];
diff = diff + temp*temp;
}
idx = 0;
for (; idx < M; ++idx){
if (DNNT[idx] > diff) break;
}
for (j = M-1; j > idx; --j){
DNNT[j] = DNNT[j-1];
pNNT[j] = pNNT[j-1];
}
if (idx < M){
DNNT[idx] = diff;
pNNT[idx] = cid;
}
}
#pragma region calculate memberships
for (i = 0; i < M; ++i){
if ( DNNT[i] == 0.){
pMemberships[pNNT[i]] = 1.;
return;
}
diff = pow( DNNT[i], f);
//if (__isinf(diff)) diff = DBL_MAX;
pMemberships[pNNT[i]] = diff;
sum = sum + 1. / diff;
}
for (i = 0; i < M; ++i){
pMemberships[pNNT[i]] = pow(pMemberships[pNNT[i]]*sum, -fuzzifier);
}
#pragma endregion
}
#pragma endregion
}
#pragma endregion
#pragma region Calculating New Centroids (FKM) Kernels
__global__ void reduce_memberships_kernel_FKM(double * memberships, double * sumU, int N)
{
extern __shared__ double sdata[];
int tid = threadIdx.x;
int i = blockIdx.x*blockDim.x + tid;
int gridSize = blockDim.x*gridDim.x;
double temp = 0.0;
while(i < N){
temp = temp + memberships[i];
i += gridSize;
}
sdata[tid] = temp;
__syncthreads();
if (blockDim.x > 1023 && tid < 512)
sdata[tid] = sdata[tid] + sdata[tid+512];
__syncthreads();
if (blockDim.x > 511 && tid < 256)
sdata[tid] = sdata[tid] + sdata[tid+256];
__syncthreads();
if (blockDim.x > 255 && tid < 128)
sdata[tid] = sdata[tid] + sdata[tid+128];
__syncthreads();
if (blockDim.x > 127 && tid < 64)
sdata[tid] = sdata[tid] + sdata[tid+64];
__syncthreads();
if (blockDim.x > 63 && tid < 32) sdata[tid] = sdata[tid] + sdata[tid + 32];
if (blockDim.x > 31 && tid < 16) sdata[tid] = sdata[tid] + sdata[tid + 16];
if (blockDim.x > 15 && tid < 8) sdata[tid] = sdata[tid] + sdata[tid + 8];
if (blockDim.x > 7 && tid < 4) sdata[tid] = sdata[tid] + sdata[tid + 4];
if (blockDim.x > 3 && tid < 2) sdata[tid] = sdata[tid] + sdata[tid + 2];
if (tid == 0) sumU[blockIdx.x] = sdata[0] + sdata[1];
}
__global__ void reduce_centroids_kernel_FKM
(double * points, double * memberships, double * sumC, int N)
{
extern __shared__ double sdata[];
int tid = threadIdx.x;
int i = blockIdx.x*blockDim.x + tid;
int gridSize = blockDim.x*gridDim.x;
double temp = 0.0;
while(i < N){
temp = temp + points[i] * memberships[i];
i += gridSize;
}
sdata[tid] = temp;
__syncthreads();
if (blockDim.x > 1023 && tid < 512)
sdata[tid] = sdata[tid] + sdata[tid+512];
__syncthreads();
if (blockDim.x > 511 && tid < 256)
sdata[tid] = sdata[tid] + sdata[tid+256];
__syncthreads();
if (blockDim.x > 255 && tid < 128)
sdata[tid] = sdata[tid] + sdata[tid+128];
__syncthreads();
if (blockDim.x > 127 && tid < 64)
sdata[tid] = sdata[tid] + sdata[tid+64];
__syncthreads();;
if (blockDim.x > 63 && tid < 32) sdata[tid] = sdata[tid] + sdata[tid + 32];
if (blockDim.x > 31 && tid < 16) sdata[tid] = sdata[tid] + sdata[tid + 16];
if (blockDim.x > 15 && tid < 8) sdata[tid] = sdata[tid] + sdata[tid + 8];
if (blockDim.x > 7 && tid < 4) sdata[tid] = sdata[tid] + sdata[tid + 4];
if (blockDim.x > 3 && tid < 2) sdata[tid] = sdata[tid] + sdata[tid + 2];
if (tid == 0) sumC[blockIdx.x] = sdata[0] + sdata[1];
}
__host__ void reduce_centroids
(double * centroids, double * sumC, double * sumU, int num_reduction_blocks, int D, int K)
{
double * p_centroids = centroids;
double * p_sumU = sumU;
double * p_sumC = sumC;
double u;
int i, j, k;
for (i = 0; i < K; ++i){
u = 0.0;
for (j = 0; j < num_reduction_blocks; ++j)
u = u + p_sumU[j];
for (j = 0; j < D; ++j){
p_centroids[j] = 0.0;
for (k = 0; k < num_reduction_blocks; ++k){
p_centroids[j] = p_centroids[j] + p_sumC[k];
}
p_centroids[j] = p_centroids[j] /u;
p_sumC += num_reduction_blocks;
}
p_sumU += num_reduction_blocks;
p_centroids += D;
}
}
__global__ void calculate_new_centroids(double * centroids, double * memberships)
{
int cid = blockIdx.x*blockDim.x + threadIdx.x;
centroids[cid] = centroids[cid] / memberships[blockIdx.x];
}
__host__ void calculate_new_centroids(
double * points, double * memberships, double * newCentroids,
int * NNT, int N, int D, int K, int M)
{
int i, j, k, idx;
int * pNNT = NNT;
double * pMemberships = memberships;
double * pPoints = points;
double * pCentroids;
double * sum = new double[K]();
memset(newCentroids, 0, K*D*sizeof(double));
for (i = 0; i < N; ++i, pMemberships += M, pNNT += M, pPoints += D){
for (j = 0; j < M; ++j){
idx = pNNT[j];
sum[idx] = sum[idx] + pMemberships[j];
pCentroids = newCentroids + idx*D;
for (k = 0; k < D; ++k)
pCentroids[k] = pCentroids[k] + pMemberships[j]*pPoints[k];
}
}
pCentroids = newCentroids;
for (i = 0; i < K; ++i, pCentroids += D)
for (j = 0; j < D; ++j)
pCentroids[j] = pCentroids[j] / sum[i];
}
#pragma endregion
#pragma region Calculating New Centroids (GFKM) Kernels
__global__ void histogram_kernel(int * NNT, int * histo, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
atomicAdd(&(histo[NNT[i]+1]), 1);
}
__global__ void scan_kernel(int * histo, int K)
{
for (int i = 1; i < K; ++i)
histo[i] += histo[i-1];
}
__global__ void counting_sort_kernel(
int * histo, int * NNT, int * sNNT, double * memberships, double * sU,
int size, int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int idx;
if (i < size){
idx = atomicAdd(&(histo[NNT[i]]), 1);
sNNT[idx] = i/M;
sU[idx] = memberships[i];
}
}
__global__ void gather_kernel(
int * indices, double * memberships, double * sU,
int size, int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size) return;
int idx = indices[i];
sU[i] = memberships[idx];
indices[i] = idx/M;
}
__global__ void reduce_memberships_kernel_GFKM(double * memberships, double * sumU, int N)
{
extern __shared__ double sdata[];
int tid = threadIdx.x;
int i = blockIdx.x*blockDim.x + tid;
int gridSize = blockDim.x*gridDim.x;
double temp = 0.0;
while(i < N){
temp = temp + memberships[i];
i += gridSize;
}
sdata[tid] = temp;
__syncthreads();
if (blockDim.x > 1023 && tid < 512)
sdata[tid] = sdata[tid] + sdata[tid+512];
__syncthreads();
if (blockDim.x > 511 && tid < 256)
sdata[tid] = sdata[tid] + sdata[tid+256];
__syncthreads();
if (blockDim.x > 255 && tid < 128)
sdata[tid] = sdata[tid] + sdata[tid+128];
__syncthreads();
if (blockDim.x > 127 && tid < 64)
sdata[tid] = sdata[tid] + sdata[tid+64];
__syncthreads();
if (blockDim.x > 63 && tid < 32) sdata[tid] = sdata[tid] + sdata[tid + 32];
if (blockDim.x > 31 && tid < 16) sdata[tid] = sdata[tid] + sdata[tid + 16];
if (blockDim.x > 15 && tid < 8) sdata[tid] = sdata[tid] + sdata[tid + 8];
if (blockDim.x > 7 && tid < 4) sdata[tid] = sdata[tid] + sdata[tid + 4];
if (blockDim.x > 3 && tid < 2) sdata[tid] = sdata[tid] + sdata[tid + 2];
if (tid == 0) sumU[blockIdx.x] = sdata[0] + sdata[1];
}
__global__ void reduce_centroids_kernel_GFKM
(double * points, double * sU, int * sNNT, double * sumC, int size)
{
extern __shared__ double sdata[];
int tid = threadIdx.x;
int i = blockIdx.x*blockDim.x + tid;
int gridSize = blockDim.x*gridDim.x;
double temp = 0.0;
while(i < size){
temp = temp + points[sNNT[i]] * sU[i];
i += gridSize;
}
sdata[tid] = temp;
__syncthreads();
/*if (tid < 128)
sdata[tid] = sdata[tid] + sdata[tid+128];
__syncthreads();
if (tid < 64)
sdata[tid] = sdata[tid] + sdata[tid+64];
__syncthreads();*/
if (blockDim.x > 1023 && tid < 512)
sdata[tid] = sdata[tid] + sdata[tid+512];
__syncthreads();
if (blockDim.x > 511 && tid < 256)
sdata[tid] = sdata[tid] + sdata[tid+256];
__syncthreads();
if (blockDim.x > 255 && tid < 128)
sdata[tid] = sdata[tid] + sdata[tid+128];
__syncthreads();
if (blockDim.x > 127 && tid < 64)
sdata[tid] = sdata[tid] + sdata[tid+64];
__syncthreads();
if (blockDim.x > 63 && tid < 32) sdata[tid] = sdata[tid] + sdata[tid + 32];
if (blockDim.x > 31 && tid < 16) sdata[tid] = sdata[tid] + sdata[tid + 16];
if (blockDim.x > 15 && tid < 8) sdata[tid] = sdata[tid] + sdata[tid + 8];
if (blockDim.x > 7 && tid < 4) sdata[tid] = sdata[tid] + sdata[tid + 4];
if (blockDim.x > 3 && tid < 2) sdata[tid] = sdata[tid] + sdata[tid + 2];
if (tid == 0) sumC[blockIdx.x] = sdata[0] + sdata[1];
}
__host__ void reduce_centroids
(double * centroids, double * sumC, double * sumU, int * histo, int D, int K)
{
double * p_centroids = centroids;
double * p_sumU = sumU;
double * p_sumC = sumC;
double u;
int i, j, k;
int size;
for (i = 0; i < K; ++i){
u = 0.0;
size = histo[i];
for (j = 0; j < size; ++j)
u = u + p_sumU[j];
for (j = 0; j < D; ++j){
p_centroids[j] = 0.0;
for (k = 0; k < size; ++k)
p_centroids[j] = p_centroids[j] + p_sumC[k];
p_centroids[j] = p_centroids[j] / u;
p_sumC += size;
}
p_sumU += size;
p_centroids += D;
}
}
#pragma endregion
#pragma region Main Methods
__global__ void check_convergence(double * centroids, double * newCentroids, bool * flag, double epsilon)
{
int cid = blockDim.x * blockIdx.x + threadIdx.x;
flag[0] = fabs(centroids[cid] - newCentroids[cid]) >= epsilon;
//__threadfence();
/*flag[0] = false;
int n = blockDim.x;
for (int i = 0; i < n; ++i){
if (fabs(centroids[i] - newCentroids[i]) >= epsilon){
flag[0] = true;
return;
}
}*/
}
#pragma region Version 1
__host__ double * GFKM_GPU_v1(FILE * f, GFKM & G, BlockSizeV1 block_size, int stop_iter)
{
#pragma region Declare common variables
int i;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int flag_size = sizeof(bool);
int points_size = G.N * G.D * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int step = roundup(KD_SIZE, block_size.updateMembershipsKernelBlockSize);
int num_update_memberships_blocks = roundup(G.N, block_size.updateMembershipsKernelBlockSize);
double t1 = 0.0, t2 = 0.0, t3 = 0.0;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_centroids;
double * d_newCentroids;
double * d_memberships;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_newCentroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
if (step > 4)
{
tmr_GPU.StartCounter();
update_memberships_kernel_v1a<<<num_update_memberships_blocks, block_size.updateMembershipsKernelBlockSize>>>
(d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else if (step == 1)
{
tmr_GPU.StartCounter();
update_memberships_kernel_v1b<<<num_update_memberships_blocks, block_size.updateMembershipsKernelBlockSize, centroids_size>>>
(d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else
{
tmr_GPU.StartCounter();
update_memberships_kernel_v1c<<<num_update_memberships_blocks, block_size.updateMembershipsKernelBlockSize, centroids_size>>>
(d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
}
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Calculate new centroids by CPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_memberships, d_memberships, memberships_size, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
calculate_new_centroids(p_points, p_memberships, p_centroids, p_NNT, G.N, G.D, G.K, G.M);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_newCentroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_newCentroids, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_newCentroids, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_centroids);
cudaFree(d_newCentroids);
cudaFree(d_memberships);
cudaFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = 0.0;
rs[4] = (double)i;
#pragma endregion
cudaDeviceReset();
return rs;
}
__host__ double * GFKM_GPU_v1a(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int NM_SIZE = G.N * G.M;
int flag_size = sizeof(bool);
int points_size = G.N * G.D * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
block_size = getBlockSizeForMembershipKkernelV1a(block_size);
int num_blocks = roundup(G.N, block_size);
double t1 = 0.0, t2 = 0.0, t3 = 0.0;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_centroids;
double * d_newCentroids;
double * d_memberships;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_newCentroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v1a<<<num_blocks, block_size>>>
(d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Calculate new centroids by CPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_memberships, d_memberships, memberships_size, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
calculate_new_centroids(p_points, p_memberships, p_centroids, p_NNT, G.N, G.D, G.K, G.M);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_newCentroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_newCentroids, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_newCentroids, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Writing results to files
Util::write<double>(p_centroids, G.K, G.D, G.path + "centroids.GPU.txt");
Util::write<int>(p_NNT, G.N, G.M, G.path + "NNT.GPU.txt");
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_centroids);
cudaFree(d_newCentroids);
cudaFree(d_memberships);
cudaFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = 0.0;
rs[4] = (double)i;
#pragma endregion
cudaDeviceReset();
return rs;
}
__host__ double * GFKM_GPU_v1b(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int NM_SIZE = G.N * G.M;
int flag_size = sizeof(bool);
int points_size = G.N * G.D * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
block_size = getBlockSizeForMembershipKkernelV1b(centroids_size, block_size);
int num_blocks = roundup(G.N, block_size);
double t1 = 0.0, t2 = 0.0, t3 = 0.0;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_centroids;
double * d_newCentroids;
double * d_memberships;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_newCentroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v1b<<<num_blocks, block_size, centroids_size>>>
(d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Calculate new centroids by CPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_memberships, d_memberships, memberships_size, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
calculate_new_centroids(p_points, p_memberships, p_centroids, p_NNT, G.N, G.D, G.K, G.M);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_newCentroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_newCentroids, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_newCentroids, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_centroids);
cudaFree(d_newCentroids);
cudaFree(d_memberships);
cudaFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = 0.0;
rs[4] = (double)i;
#pragma endregion
cudaDeviceReset();
return rs;
}
__host__ double * GFKM_GPU_v1c(FILE * f, GFKM & G, int block_size, int stop_iter, int step)
{
#pragma region Declare common variables
int i;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
block_size = getBlockSizeForMembershipKkernelV1c(centroids_size, block_size);
int num_blocks = roundup(G.N, block_size);
//int step = roundup(KD_SIZE, block_size);
double t1 = 0.0, t2 = 0.0, t3 = 0.0;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_centroids;
double * d_newCentroids;
double * d_memberships;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_newCentroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v1c<<<num_blocks, block_size, centroids_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Calculate new centroids by CPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_memberships, d_memberships, memberships_size, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
calculate_new_centroids(p_points, p_memberships, p_centroids, p_NNT, G.N, G.D, G.K, G.M);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_newCentroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_newCentroids, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_newCentroids, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_centroids);
cudaFree(d_newCentroids);
cudaFree(d_memberships);
cudaFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = 0.0;
rs[4] = (double)i;
#pragma endregion
cudaDeviceReset();
return rs;
}
__host__ double * GFKM_GPU_v1d(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int NM_SIZE = G.N * G.M;
int flag_size = sizeof(bool);
int points_size = G.N * G.D * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int num_blocks = roundup(G.N, block_size);
int tile_size = block_size / G.D;
int usm_size = (tile_size * G.D) * DBL_SIZE;
int num_tiles = roundup(G.K, tile_size);
double t1 = 0.0, t2 = 0.0, t3 = 0.0;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_centroids;
double * d_newCentroids;
double * d_memberships;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_newCentroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v1d<<<num_blocks, block_size, usm_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, num_tiles, tile_size, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Calculate new centroids by CPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_memberships, d_memberships, memberships_size, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
calculate_new_centroids(p_points, p_memberships, p_centroids, p_NNT, G.N, G.D, G.K, G.M);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_newCentroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_newCentroids, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_newCentroids, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_centroids);
cudaFree(d_newCentroids);
cudaFree(d_memberships);
cudaFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = 0.0;
rs[4] = (double)i;
#pragma endregion
cudaDeviceReset();
return rs;
}
#pragma endregion
#pragma region Version 2
__host__ double * GFKM_GPU_v2(FILE * f, GFKM & G, BlockSizeV2 block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = G.N * G.K * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int step = roundup(KD_SIZE, block_size.updateMembershipsKernelBlockSize);
int num_update_memberships_blocks = roundup(G.N, block_size.updateMembershipsKernelBlockSize);
int reduction_block_size = min(block_size.reduceCentroidsKernelBlockSize, block_size.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(G.N, reduction_block_size << 2);
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sumU;
double * d_sumC;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
if (step > 4)
{
tmr_GPU.StartCounter();
update_memberships_kernel_v2a<<<num_update_memberships_blocks, block_size.updateMembershipsKernelBlockSize>>>
(d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else if (step == 1)
{
tmr_GPU.StartCounter();
update_memberships_kernel_v2b<<<num_update_memberships_blocks, block_size.updateMembershipsKernelBlockSize, centroids_size>>>
(d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else
{
tmr_GPU.StartCounter();
update_memberships_kernel_v2c<<<num_update_memberships_blocks, block_size.updateMembershipsKernelBlockSize, centroids_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
}
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Transpose memberships
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.K,
&alpha, d_memberships, G.K, &beta, d_memberships, G.K, d_membershipsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reduce centroids by GPU
tmr_GPU.StartCounter();
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
for (j = 0; j < G.K; ++j){
reduce_memberships_kernel_FKM<<<num_reduction_blocks, reduction_block_size, sm_size, streams[0]>>>
(d_membershipsT + offset, d_sumU + offset_sumU, G.N);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_FKM<<<num_reduction_blocks, reduction_block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_membershipsT + offset, d_sumC + offset_sumC, G.N);
offset_pointsT += G.N;
offset_sumC += num_reduction_blocks;
}
offset_sumU += num_reduction_blocks;
offset += G.N;
}
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
if (num_reduction_blocks > 1){
#pragma region Reduce memberships and centroids block sums by CPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, sumU_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, sumC_size, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, num_reduction_blocks, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
else{
#pragma region Calculate centroids by GPU
tmr_GPU.StartCounter();
calculate_new_centroids<<<G.K, G.D>>>(d_sumC, d_sumU);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v2a(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = G.N * G.K * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV2a(initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV2 blockSizeV2 = getBlockSizeForCentroidKernelV2(initBlockSize);
int reduction_block_size = min(blockSizeV2.reduceCentroidsKernelBlockSize, blockSizeV2.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(G.N, reduction_block_size << 2);
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sumU;
double * d_sumC;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v2a<<<num_blocks, block_size>>>
(d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Transpose memberships
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.K,
&alpha, d_memberships, G.K, &beta, d_memberships, G.K, d_membershipsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reduce centroids by GPU
tmr_GPU.StartCounter();
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
for (j = 0; j < G.K; ++j){
reduce_memberships_kernel_FKM<<<num_reduction_blocks, reduction_block_size, sm_size, streams[0]>>>
(d_membershipsT + offset, d_sumU + offset_sumU, G.N);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_FKM<<<num_reduction_blocks, reduction_block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_membershipsT + offset, d_sumC + offset_sumC, G.N);
offset_pointsT += G.N;
offset_sumC += num_reduction_blocks;
}
offset_sumU += num_reduction_blocks;
offset += G.N;
}
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
if (num_reduction_blocks > 1){
#pragma region Reduce memberships and centroids block sums by CPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, sumU_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, sumC_size, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, num_reduction_blocks, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
else{
#pragma region Calculate centroids by GPU
tmr_GPU.StartCounter();
calculate_new_centroids<<<G.K, G.D>>>(d_sumC, d_sumU);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v2b(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = G.N * G.K * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV2b(centroids_size, initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV2 blockSizeV2 = getBlockSizeForCentroidKernelV2(initBlockSize);
int reduction_block_size = min(blockSizeV2.reduceCentroidsKernelBlockSize, blockSizeV2.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(G.N, reduction_block_size << 2);
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sumU;
double * d_sumC;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v2b<<<num_blocks, block_size, centroids_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Transpose memberships
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.K,
&alpha, d_memberships, G.K, &beta, d_memberships, G.K, d_membershipsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reduce centroids by GPU
tmr_GPU.StartCounter();
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
for (j = 0; j < G.K; ++j){
reduce_memberships_kernel_FKM<<<num_reduction_blocks, reduction_block_size, sm_size, streams[0]>>>
(d_membershipsT + offset, d_sumU + offset_sumU, G.N);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_FKM<<<num_reduction_blocks, reduction_block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_membershipsT + offset, d_sumC + offset_sumC, G.N);
offset_pointsT += G.N;
offset_sumC += num_reduction_blocks;
}
offset_sumU += num_reduction_blocks;
offset += G.N;
}
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
if (num_reduction_blocks > 1){
#pragma region Reduce memberships and centroids block sums by CPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, sumU_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, sumC_size, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, num_reduction_blocks, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
else{
#pragma region Calculate centroids by GPU
tmr_GPU.StartCounter();
calculate_new_centroids<<<G.K, G.D>>>(d_sumC, d_sumU);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v2c(FILE * f, GFKM & G, int block_size, int stop_iter, int step)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = G.N * G.K * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV2c(centroids_size, initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV2 blockSizeV2 = getBlockSizeForCentroidKernelV2(initBlockSize);
int reduction_block_size = min(blockSizeV2.reduceCentroidsKernelBlockSize, blockSizeV2.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(G.N, reduction_block_size << 2);
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
//int step = roundup(KD_SIZE, block_size);
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sumU;
double * d_sumC;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v2c<<<num_blocks, block_size, centroids_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Transpose memberships
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.K,
&alpha, d_memberships, G.K, &beta, d_memberships, G.K, d_membershipsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reduce centroids by GPU
tmr_GPU.StartCounter();
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
for (j = 0; j < G.K; ++j){
reduce_memberships_kernel_FKM<<<num_reduction_blocks, reduction_block_size, sm_size, streams[0]>>>
(d_membershipsT + offset, d_sumU + offset_sumU, G.N);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_FKM<<<num_reduction_blocks, reduction_block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_membershipsT + offset, d_sumC + offset_sumC, G.N);
offset_pointsT += G.N;
offset_sumC += num_reduction_blocks;
}
offset_sumU += num_reduction_blocks;
offset += G.N;
}
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
if (num_reduction_blocks > 1){
#pragma region Reduce memberships and centroids block sums by CPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, sumU_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, sumC_size, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, num_reduction_blocks, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
else{
#pragma region Calculate centroids by GPU
tmr_GPU.StartCounter();
calculate_new_centroids<<<G.K, G.D>>>(d_sumC, d_sumU);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v2d(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = G.N * G.K * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sm_size = block_size * DBL_SIZE;
int num_blocks = roundup(G.N, block_size);
int num_histo_blocks = roundup(NM_SIZE, block_size);
int num_reduction_blocks;
int reduction_block_size = block_size<<2;
num_reduction_blocks = roundup(G.N, reduction_block_size);
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int tile_size = block_size / G.D;
int usm_size = (tile_size * G.D) * DBL_SIZE;
int num_tiles = roundup(G.K, tile_size);
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sumU;
double * d_sumC;
int * d_NNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v2d<<<num_blocks, block_size, usm_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, num_tiles, tile_size, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Transpose memberships
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.K,
&alpha, d_memberships, G.K, &beta, d_memberships, G.K, d_membershipsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reduce centroids by GPU
tmr_GPU.StartCounter();
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
for (j = 0; j < G.K; ++j){
reduce_memberships_kernel_FKM<<<num_reduction_blocks, block_size, sm_size, streams[0]>>>
(d_membershipsT + offset, d_sumU + offset_sumU, G.N);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_FKM<<<num_reduction_blocks, block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_membershipsT + offset, d_sumC + offset_sumC, G.N);
offset_pointsT += G.N;
offset_sumC += num_reduction_blocks;
}
offset_sumU += num_reduction_blocks;
offset += G.N;
}
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
if (num_reduction_blocks > 1){
#pragma region Reduce memberships and centroids block sums by CPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, sumU_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, sumC_size, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, num_reduction_blocks, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
else{
#pragma region Calculate centroids by GPU
tmr_GPU.StartCounter();
calculate_new_centroids<<<G.K, G.D>>>(d_sumC, d_sumU);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
}
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_NNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
#pragma endregion
#pragma region Version 3
__host__ double * GFKM_GPU_v3(FILE * f, GFKM & G, BlockSizeV3 block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int step = roundup(KD_SIZE, block_size.updateMembershipsKernelBlockSize);
int num_update_memberships_blocks = roundup(G.N, block_size.updateMembershipsKernelBlockSize);
int num_histo_blocks = roundup(NM_SIZE, block_size.histogramKernelBlockSize);
int num_counting_sort_blocks = roundup(NM_SIZE, block_size.countingSortKernelBlockSize);
int reduction_block_size = min(block_size.reduceCentroidsKernelBlockSize, block_size.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo;
int * d_NNT;
int * d_sNNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sU, sU_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_histo, histo_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_sNNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
CudaSafeCall(cudaMallocHost(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
if (step > 4)
{
tmr_GPU.StartCounter();
update_memberships_kernel_v1a<<<num_update_memberships_blocks, block_size.updateMembershipsKernelBlockSize>>>
(d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else if (step == 1)
{
tmr_GPU.StartCounter();
update_memberships_kernel_v1b<<<num_update_memberships_blocks, block_size.updateMembershipsKernelBlockSize, centroids_size>>>
(d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else
{
tmr_GPU.StartCounter();
update_memberships_kernel_v1c<<<num_update_memberships_blocks, block_size.updateMembershipsKernelBlockSize, centroids_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
}
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Counting sort by GPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemset(d_histo, 0, histo_size));
histogram_kernel<<<num_histo_blocks, block_size.histogramKernelBlockSize>>>(d_NNT, d_histo, NM_SIZE);
CudaSafeCall(cudaMemcpyAsync(p_histo, d_histo, histo_size, cudaMemcpyDeviceToHost));
scan_kernel<<<1, 1>>>(d_histo, G.K);
counting_sort_kernel<<<num_counting_sort_blocks, block_size.countingSortKernelBlockSize>>>(d_histo, d_NNT, d_sNNT, d_memberships, d_sU,
NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
reduce_memberships_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[0]>>>
(d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_sU + offset, d_sNNT + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sU);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_histo);
cudaFree(d_NNT);
cudaFree(d_sNNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_histo);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v3a(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV1a(initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV3 blockSizeV3 = getBlockSizeForCentroidKernelV3(initBlockSize);
int num_histo_blocks = roundup(NM_SIZE, blockSizeV3.histogramKernelBlockSize);
int num_counting_sort_blocks = roundup(NM_SIZE, blockSizeV3.countingSortKernelBlockSize);
int reduction_block_size = min(blockSizeV3.reduceCentroidsKernelBlockSize, blockSizeV3.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo;
int * d_NNT;
int * d_sNNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sU, sU_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_histo, histo_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_sNNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
CudaSafeCall(cudaMallocHost(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v1a<<<num_blocks, block_size>>>
(d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Counting sort by GPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemset(d_histo, 0, histo_size));
histogram_kernel<<<num_histo_blocks, blockSizeV3.histogramKernelBlockSize>>>(d_NNT, d_histo, NM_SIZE);
CudaSafeCall(cudaMemcpyAsync(p_histo, d_histo, histo_size, cudaMemcpyDeviceToHost));
scan_kernel<<<1, 1>>>(d_histo, G.K);
counting_sort_kernel<<<num_counting_sort_blocks, blockSizeV3.countingSortKernelBlockSize>>>(d_histo, d_NNT, d_sNNT, d_memberships, d_sU,
NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
reduce_memberships_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[0]>>>
(d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_sU + offset, d_sNNT + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
Util::write<double>(p_centroids, G.K, G.D, G.path + "centroids.GPU.txt");
Util::write<int>(p_NNT, G.N, G.M, G.path + "NNT.GPU.txt");
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sU);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_histo);
cudaFree(d_NNT);
cudaFree(d_sNNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_histo);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v3b(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV1b(centroids_size, initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV3 blockSizeV3 = getBlockSizeForCentroidKernelV3(initBlockSize);
int num_histo_blocks = roundup(NM_SIZE, blockSizeV3.histogramKernelBlockSize);
int num_counting_sort_blocks = roundup(NM_SIZE, blockSizeV3.countingSortKernelBlockSize);
int reduction_block_size = min(blockSizeV3.reduceCentroidsKernelBlockSize, blockSizeV3.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo;
int * d_NNT;
int * d_sNNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sU, sU_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_histo, histo_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_sNNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
CudaSafeCall(cudaMallocHost(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v1b<<<num_blocks, block_size, centroids_size>>>
(d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Counting sort by GPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemset(d_histo, 0, histo_size));
histogram_kernel<<<num_histo_blocks, blockSizeV3.histogramKernelBlockSize>>>(d_NNT, d_histo, NM_SIZE);
CudaSafeCall(cudaMemcpyAsync(p_histo, d_histo, histo_size, cudaMemcpyDeviceToHost));
scan_kernel<<<1, 1>>>(d_histo, G.K);
counting_sort_kernel<<<num_histo_blocks, blockSizeV3.countingSortKernelBlockSize>>>(d_histo, d_NNT, d_sNNT, d_memberships, d_sU,
NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
reduce_memberships_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[0]>>>
(d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_sU + offset, d_sNNT + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sU);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_histo);
cudaFree(d_NNT);
cudaFree(d_sNNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_histo);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v3c(FILE * f, GFKM & G, int block_size, int stop_iter, int step)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV1c(centroids_size, initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV3 blockSizeV3 = getBlockSizeForCentroidKernelV3(initBlockSize);
int num_histo_blocks = roundup(NM_SIZE, blockSizeV3.histogramKernelBlockSize);
int num_counting_sort_blocks = roundup(NM_SIZE, blockSizeV3.countingSortKernelBlockSize);
int reduction_block_size = min(blockSizeV3.reduceCentroidsKernelBlockSize, blockSizeV3.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
//int step = roundup(KD_SIZE, block_size);
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo;
int * d_NNT;
int * d_sNNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sU, sU_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_histo, histo_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_sNNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
CudaSafeCall(cudaMallocHost(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v1c<<<num_blocks, block_size, centroids_size>>>
(d_points, d_centroids, d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Counting sort by GPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemset(d_histo, 0, histo_size));
histogram_kernel<<<num_histo_blocks, blockSizeV3.histogramKernelBlockSize>>>(d_NNT, d_histo, NM_SIZE);
CudaSafeCall(cudaMemcpyAsync(p_histo, d_histo, histo_size, cudaMemcpyDeviceToHost));
scan_kernel<<<1, 1>>>(d_histo, G.K);
counting_sort_kernel<<<num_counting_sort_blocks, blockSizeV3.countingSortKernelBlockSize>>>(d_histo, d_NNT, d_sNNT, d_memberships, d_sU,
NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
reduce_memberships_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[0]>>>
(d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_sU + offset, d_sNNT + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sU);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_histo);
cudaFree(d_NNT);
cudaFree(d_sNNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_histo);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v3d(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int sm_size = block_size * DBL_SIZE;
int num_blocks = roundup(G.N, block_size);
int num_histo_blocks = roundup(NM_SIZE, block_size);
int histo_size = (G.K+1)*INT_SIZE;
int reduction_block_size = block_size<<2;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int tile_size = block_size / G.D;
int usm_size = (tile_size * G.D) * DBL_SIZE;
int num_tiles = roundup(G.K, tile_size);
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo;
int * d_NNT;
int * d_sNNT;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sU, sU_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_histo, histo_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_sNNT, NNT_size));
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
CudaSafeCall(cudaMallocHost(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v1d<<<num_blocks, block_size, usm_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, num_tiles, tile_size, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Counting sort by GPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemset(d_histo, 0, histo_size));
histogram_kernel<<<num_histo_blocks, block_size>>>(d_NNT, d_histo, NM_SIZE);
CudaSafeCall(cudaMemcpyAsync(p_histo, d_histo, histo_size, cudaMemcpyDeviceToHost));
scan_kernel<<<1, 1>>>(d_histo, G.K);
counting_sort_kernel<<<num_histo_blocks, block_size>>>(d_histo, d_NNT, d_sNNT, d_memberships, d_sU,
NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
p_histo[j] = roundup(p_histo[j+1], reduction_block_size);
reduce_memberships_kernel_GFKM<<<p_histo[j], block_size, sm_size, streams[0]>>>
(d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_GFKM<<<p_histo[j], block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_sU + offset, d_sNNT + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sU);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_histo);
cudaFree(d_NNT);
cudaFree(d_sNNT);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_histo);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
#pragma endregion
#pragma region Version 4
__host__ double * GFKM_GPU_v4(FILE * f, GFKM & G, BlockSizeV4 block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
//int segmentation_size;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int step = roundup(KD_SIZE, block_size.updateMembershipsKernelBlockSize);
int num_update_memberships_blocks = roundup(G.N, block_size.updateMembershipsKernelBlockSize);
int num_gather_blocks = roundup(NM_SIZE, block_size.gatherKernelBlockSize);
int reduction_block_size = min(block_size.reduceCentroidsKernelBlockSize, block_size.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo_values;
int * d_histo;
int * d_NNT;
int * d_sNNT;
int * d_indices;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sU, sU_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_histo_values, histo_size));
CudaSafeCall(cudaMalloc(&d_histo, histo_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_sNNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_indices, NNT_size));
CudaSafeCall(cudaMemset(d_histo, 0, INT_SIZE));
thrust::device_ptr<int> dev_indices_ptr(d_indices);
thrust::device_ptr<int> dev_sNNT_ptr(d_sNNT);
thrust::device_ptr<int> dev_histo_values_ptr(d_histo_values);
thrust::device_ptr<int> dev_histo_counts_ptr(d_histo);
thrust::counting_iterator<int> search(0);
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
CudaSafeCall(cudaMallocHost(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
if (step > 4)
{
tmr_GPU.StartCounter();
update_memberships_kernel_v1a<<<num_update_memberships_blocks, block_size.updateMembershipsKernelBlockSize>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else if (step == 1)
{
tmr_GPU.StartCounter();
update_memberships_kernel_v1b<<<num_update_memberships_blocks, block_size.updateMembershipsKernelBlockSize, centroids_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
}
else
{
tmr_GPU.StartCounter();
update_memberships_kernel_v1c<<<num_update_memberships_blocks, block_size.updateMembershipsKernelBlockSize, centroids_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
}
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Sort NNT, calculate histogram, and rearrange data using Thrust on GPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sNNT, d_NNT, NNT_size, cudaMemcpyDeviceToDevice));
thrust::sequence(dev_indices_ptr, dev_indices_ptr + NM_SIZE);
thrust::stable_sort_by_key(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, dev_indices_ptr);
thrust::upper_bound(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, search, search + G.K, dev_histo_counts_ptr+1);
thrust::adjacent_difference(dev_histo_counts_ptr, dev_histo_counts_ptr + G.K + 1, dev_histo_counts_ptr);
CudaSafeCall(cudaMemcpyAsync(p_histo, d_histo, histo_size, cudaMemcpyDeviceToHost));
gather_kernel<<<num_gather_blocks, block_size.gatherKernelBlockSize>>>(d_indices, d_memberships, d_sU, NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
//segmentation_size = p_histo[j+1] - p_histo[j];
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
reduce_memberships_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[0]>>>
(d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_sU + offset, d_indices + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sU);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_histo_values);
cudaFree(d_histo);
cudaFree(d_NNT);
cudaFree(d_sNNT);
cudaFree(d_indices);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_histo);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v4a(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
//int segmentation_size;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV1a(initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV4 blockSizeV4 = getBlockSizeForCentroidKernelV4(initBlockSize);
int num_gather_blocks = roundup(NM_SIZE, blockSizeV4.gatherKernelBlockSize);
int reduction_block_size = min(blockSizeV4.reduceCentroidsKernelBlockSize, blockSizeV4.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo_values;
int * d_histo;
int * d_NNT;
int * d_sNNT;
int * d_indices;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sU, sU_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_histo_values, histo_size));
CudaSafeCall(cudaMalloc(&d_histo, histo_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_sNNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_indices, NNT_size));
CudaSafeCall(cudaMemset(d_histo, 0, INT_SIZE));
thrust::device_ptr<int> dev_indices_ptr(d_indices);
thrust::device_ptr<int> dev_sNNT_ptr(d_sNNT);
thrust::device_ptr<int> dev_histo_values_ptr(d_histo_values);
thrust::device_ptr<int> dev_histo_counts_ptr(d_histo);
thrust::counting_iterator<int> search(0);
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
CudaSafeCall(cudaMallocHost(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v1a<<<num_blocks, block_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Sort NNT, calculate histogram, and rearrange data using Thrust on GPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sNNT, d_NNT, NNT_size, cudaMemcpyDeviceToDevice));
thrust::sequence(dev_indices_ptr, dev_indices_ptr + NM_SIZE);
thrust::stable_sort_by_key(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, dev_indices_ptr);
thrust::upper_bound(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, search, search + G.K, dev_histo_counts_ptr+1);
thrust::adjacent_difference(dev_histo_counts_ptr, dev_histo_counts_ptr + G.K + 1, dev_histo_counts_ptr);
CudaSafeCall(cudaMemcpyAsync(p_histo, d_histo, histo_size, cudaMemcpyDeviceToHost));
gather_kernel<<<num_gather_blocks, blockSizeV4.gatherKernelBlockSize>>>(d_indices, d_memberships, d_sU, NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
//segmentation_size = p_histo[j+1] - p_histo[j];
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
reduce_memberships_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[0]>>>
(d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_sU + offset, d_indices + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sU);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_histo_values);
cudaFree(d_histo);
cudaFree(d_NNT);
cudaFree(d_sNNT);
cudaFree(d_indices);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_histo);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v4b(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
//int segmentation_size;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV1b(centroids_size, initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV4 blockSizeV4 = getBlockSizeForCentroidKernelV4(initBlockSize);
int num_gather_blocks = roundup(NM_SIZE, blockSizeV4.gatherKernelBlockSize);
int reduction_block_size = min(blockSizeV4.reduceCentroidsKernelBlockSize, blockSizeV4.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo_values;
int * d_histo;
int * d_NNT;
int * d_sNNT;
int * d_indices;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sU, sU_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_histo_values, histo_size));
CudaSafeCall(cudaMalloc(&d_histo, histo_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_sNNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_indices, NNT_size));
CudaSafeCall(cudaMemset(d_histo, 0, INT_SIZE));
thrust::device_ptr<int> dev_indices_ptr(d_indices);
thrust::device_ptr<int> dev_sNNT_ptr(d_sNNT);
thrust::device_ptr<int> dev_histo_values_ptr(d_histo_values);
thrust::device_ptr<int> dev_histo_counts_ptr(d_histo);
thrust::counting_iterator<int> search(0);
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
CudaSafeCall(cudaMallocHost(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v1b<<<num_blocks, block_size, centroids_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Sort NNT, calculate histogram, and rearrange data using Thrust on GPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sNNT, d_NNT, NNT_size, cudaMemcpyDeviceToDevice));
thrust::sequence(dev_indices_ptr, dev_indices_ptr + NM_SIZE);
thrust::stable_sort_by_key(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, dev_indices_ptr);
thrust::upper_bound(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, search, search + G.K, dev_histo_counts_ptr+1);
thrust::adjacent_difference(dev_histo_counts_ptr, dev_histo_counts_ptr + G.K + 1, dev_histo_counts_ptr);
CudaSafeCall(cudaMemcpyAsync(p_histo, d_histo, histo_size, cudaMemcpyDeviceToHost));
gather_kernel<<<num_gather_blocks, blockSizeV4.gatherKernelBlockSize>>>(d_indices, d_memberships, d_sU, NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
//segmentation_size = p_histo[j+1] - p_histo[j];
p_histo[j] = roundup(p_histo[j+1], reduction_block_size << 2);
reduce_memberships_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[0]>>>
(d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_sU + offset, d_indices + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sU);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_histo_values);
cudaFree(d_histo);
cudaFree(d_NNT);
cudaFree(d_sNNT);
cudaFree(d_indices);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_histo);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v4c(FILE * f, GFKM & G, int block_size, int stop_iter, int step)
{
#pragma region Declare common variables
int i, j, k;
//int segmentation_size;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int KD_SIZE = G.K * G.D;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = KD_SIZE * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int histo_size = (G.K+1)*INT_SIZE;
int initBlockSize = block_size;
block_size = getBlockSizeForMembershipKkernelV1c(centroids_size, initBlockSize);
int num_blocks = roundup(G.N, block_size);
BlockSizeV4 blockSizeV4 = getBlockSizeForCentroidKernelV4(initBlockSize);
int num_gather_blocks = roundup(NM_SIZE, blockSizeV4.gatherKernelBlockSize);
int reduction_block_size = min(blockSizeV4.reduceCentroidsKernelBlockSize, blockSizeV4.reduceMembershipsKernelBlockSize);
int sm_size = reduction_block_size * DBL_SIZE;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size<< 2) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
//int step = roundup(KD_SIZE, block_size);
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo_values;
int * d_histo;
int * d_NNT;
int * d_sNNT;
int * d_indices;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sU, sU_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_histo_values, histo_size));
CudaSafeCall(cudaMalloc(&d_histo, histo_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_sNNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_indices, NNT_size));
CudaSafeCall(cudaMemset(d_histo, 0, INT_SIZE));
thrust::device_ptr<int> dev_indices_ptr(d_indices);
thrust::device_ptr<int> dev_sNNT_ptr(d_sNNT);
thrust::device_ptr<int> dev_histo_values_ptr(d_histo_values);
thrust::device_ptr<int> dev_histo_counts_ptr(d_histo);
thrust::counting_iterator<int> search(0);
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
CudaSafeCall(cudaMallocHost(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i< G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v1c<<<num_blocks, block_size, centroids_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, step, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Sort NNT, calculate histogram, and rearrange data using Thrust on GPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sNNT, d_NNT, NNT_size, cudaMemcpyDeviceToDevice));
thrust::sequence(dev_indices_ptr, dev_indices_ptr + NM_SIZE);
thrust::stable_sort_by_key(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, dev_indices_ptr);
thrust::upper_bound(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, search, search + G.K, dev_histo_counts_ptr+1);
thrust::adjacent_difference(dev_histo_counts_ptr, dev_histo_counts_ptr + G.K + 1, dev_histo_counts_ptr);
CudaSafeCall(cudaMemcpyAsync(p_histo, d_histo, histo_size, cudaMemcpyDeviceToHost));
gather_kernel<<<num_gather_blocks, blockSizeV4.gatherKernelBlockSize>>>(d_indices, d_memberships, d_sU, NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
//segmentation_size = p_histo[j+1] - p_histo[j];
p_histo[j] = roundup(p_histo[j+1], reduction_block_size <<2);
reduce_memberships_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[0]>>>
(d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_GFKM<<<p_histo[j], reduction_block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_sU + offset, d_indices + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sU);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_histo_values);
cudaFree(d_histo);
cudaFree(d_NNT);
cudaFree(d_sNNT);
cudaFree(d_indices);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_histo);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
__host__ double * GFKM_GPU_v4d(FILE * f, GFKM & G, int block_size, int stop_iter)
{
#pragma region Declare common variables
int i, j, k;
//int segmentation_size;
int DBL_SIZE = sizeof(double);
int INT_SIZE = sizeof(int);
int flag_size = sizeof(bool);
int NM_SIZE = G.N * G.M;
int points_size = G.N * G.D * DBL_SIZE;
int centroid_size = G.K * DBL_SIZE;
int centroids_size = G.K * G.D * DBL_SIZE;
int memberships_size = NM_SIZE * DBL_SIZE;
int NNT_size = NM_SIZE * INT_SIZE;
int sU_size = NM_SIZE * DBL_SIZE;
int sm_size = block_size * DBL_SIZE;
int num_blocks = roundup(G.N, block_size);
int num_histo_blocks = roundup(NM_SIZE, block_size);
int histo_size = (G.K+1)*INT_SIZE;
int reduction_block_size = block_size<<2;
int num_reduction_blocks = roundup(NM_SIZE, reduction_block_size) + G.K;
int sumU_size = num_reduction_blocks * centroid_size;
int sumC_size = num_reduction_blocks * centroids_size;
int tile_size = block_size / G.D;
int usm_size = (tile_size * G.D) * DBL_SIZE;
int num_tiles = roundup(G.K, tile_size);
int offset;
int offset_sumU;
int offset_sumC;
int offset_pointsT;
TimingCPU tmr_CPU;
TimingGPU tmr_GPU;
double alpha, beta;
double t1 = 0.0, t2 = 0.0, t3 = 0.0, t4;
#pragma endregion
#pragma region Declare device memories
bool * d_flags;
double * d_points;
double * d_pointsT;
double * d_centroids;
double * d_memberships;
double * d_membershipsT;
double * d_sU;
double * d_sumU;
double * d_sumC;
int * d_histo_values;
int * d_histo;
int * d_NNT;
int * d_sNNT;
int * d_indices;
#pragma endregion
#pragma region Declare host pinned memories
bool * p_flags;
double * p_points;
double * p_centroids;
double * p_memberships;
double * p_sumU;
double * p_sumC;
int * p_histo;
int * p_NNT;
#pragma endregion
#pragma region Malloc device
CudaSafeCall(cudaMalloc(&d_flags, flag_size));
CudaSafeCall(cudaMalloc(&d_points, points_size));
CudaSafeCall(cudaMalloc(&d_pointsT, points_size));
CudaSafeCall(cudaMalloc(&d_centroids, centroids_size));
CudaSafeCall(cudaMalloc(&d_memberships, memberships_size));
CudaSafeCall(cudaMalloc(&d_membershipsT, memberships_size));
CudaSafeCall(cudaMalloc(&d_sU, sU_size));
CudaSafeCall(cudaMalloc(&d_sumU, sumU_size));
CudaSafeCall(cudaMalloc(&d_sumC, sumC_size));
CudaSafeCall(cudaMalloc(&d_histo_values, histo_size));
CudaSafeCall(cudaMalloc(&d_histo, histo_size));
CudaSafeCall(cudaMalloc(&d_NNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_sNNT, NNT_size));
CudaSafeCall(cudaMalloc(&d_indices, NNT_size));
CudaSafeCall(cudaMemset(d_histo, 0, INT_SIZE));
thrust::device_ptr<int> dev_indices_ptr(d_indices);
thrust::device_ptr<int> dev_sNNT_ptr(d_sNNT);
thrust::device_ptr<int> dev_histo_values_ptr(d_histo_values);
thrust::device_ptr<int> dev_histo_counts_ptr(d_histo);
thrust::counting_iterator<int> search(0);
#pragma endregion
#pragma region Malloc host
CudaSafeCall(cudaMallocHost(&p_flags, flag_size));
CudaSafeCall(cudaMallocHost(&p_points, points_size));
CudaSafeCall(cudaMallocHost(&p_centroids, centroids_size));
CudaSafeCall(cudaMallocHost(&p_memberships, memberships_size));
CudaSafeCall(cudaMallocHost(&p_sumU, sumU_size));
CudaSafeCall(cudaMallocHost(&p_sumC, sumC_size));
CudaSafeCall(cudaMallocHost(&p_NNT, NNT_size));
CudaSafeCall(cudaMallocHost(&p_histo, histo_size));
#pragma endregion
#pragma region Memories copy
memcpy(p_points, G.points, points_size);
memcpy(p_centroids, G.centroids, centroids_size);
CudaSafeCall(cudaMemcpy(d_points, p_points, points_size, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_centroids, p_centroids, centroids_size, cudaMemcpyHostToDevice));
#pragma endregion
#pragma region Declare cuda streams and transpose points
cublasHandle_t handle;
cudaStream_t * streams = new cudaStream_t[NSTREAM];
for (i = 0; i < NSTREAM; ++i)
cudaStreamCreate(&streams[i]);
CublasSafeCall(cublasCreate(&handle));
alpha = 1.;
beta = 0.;
tmr_GPU.StartCounter();
CublasSafeCall(cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, G.N, G.D,
&alpha, d_points, G.D, &beta, d_points, G.D, d_pointsT, G.N));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Main loop
for (i = 0; i < G.max_iter; ++i){
#pragma region Update memberships by GPU
tmr_GPU.StartCounter();
update_memberships_kernel_v1d<<<num_blocks, block_size, usm_size>>>
(d_points, d_centroids,d_memberships, d_NNT, G.N, G.D, G.K, G.M, num_tiles, tile_size, G.fuzzifier);
//CudaCheckError();
t1 = t1 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Sort NNT, calculate histogram, and rearrange data using Thrust on GPU
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sNNT, d_NNT, NNT_size, cudaMemcpyDeviceToDevice));
thrust::sequence(dev_indices_ptr, dev_indices_ptr + NM_SIZE);
thrust::stable_sort_by_key(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, dev_indices_ptr);
thrust::upper_bound(dev_sNNT_ptr, dev_sNNT_ptr + NM_SIZE, search, search + G.K, dev_histo_counts_ptr+1);
thrust::adjacent_difference(dev_histo_counts_ptr, dev_histo_counts_ptr + G.K + 1, dev_histo_counts_ptr);
CudaSafeCall(cudaMemcpyAsync(p_histo, d_histo, histo_size, cudaMemcpyDeviceToHost));
gather_kernel<<<num_histo_blocks, block_size>>>(d_indices, d_memberships, d_sU, NM_SIZE, G.M);
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by GPU
offset = 0;
offset_sumU = 0;
offset_sumC = 0;
tmr_GPU.StartCounter();
for (j = 0; j < G.K; ++j){
//segmentation_size = p_histo[j+1] - p_histo[j];
p_histo[j] = roundup(p_histo[j+1], reduction_block_size);
reduce_memberships_kernel_GFKM<<<p_histo[j], block_size, sm_size, streams[0]>>>
(d_sU + offset, d_sumU + offset_sumU, p_histo[j+1]);
offset_pointsT = 0;
for (k = 0; k < G.D; ++k){
reduce_centroids_kernel_GFKM<<<p_histo[j], block_size, sm_size, streams[k % (NSTREAM-1)+1]>>>
(d_pointsT + offset_pointsT, d_sU + offset, d_indices + offset, d_sumC + offset_sumC, p_histo[j+1]);
offset_sumC += p_histo[j];
offset_pointsT += G.N;
}
offset_sumU += p_histo[j];
offset += p_histo[j+1];
}
CudaSafeCall(cudaMemcpyAsync(p_sumU, d_sumU, offset_sumU * DBL_SIZE, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_sumC, d_sumC, offset_sumC * DBL_SIZE, cudaMemcpyDeviceToHost));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Reducing centroids by CPU
tmr_CPU.start();
reduce_centroids(p_centroids, p_sumC, p_sumU, p_histo, G.D, G.K);
tmr_CPU.stop();
t2 = t2 + tmr_CPU.elapsed();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_sumC, p_centroids, centroids_size, cudaMemcpyHostToDevice));
t2 = t2 + tmr_GPU.GetCounter();
#pragma endregion
#pragma region Getting and checking stop-condition
tmr_GPU.StartCounter();
check_convergence<<<G.K, G.D>>>(d_centroids, d_sumC, d_flags, G.epsilon);
CudaSafeCall(cudaMemcpyAsync(p_flags, d_flags, flag_size, cudaMemcpyDeviceToHost));
t3 = t3 + tmr_GPU.GetCounter();
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(d_centroids, d_sumC, centroids_size, cudaMemcpyDeviceToDevice));
t2 = t2 + tmr_GPU.GetCounter();
if ((!p_flags[0] && (stop_iter < 0 || i==stop_iter)) || i==stop_iter)
break;
#pragma endregion
}
if (i == G.max_iter) i--;
#pragma endregion
#pragma region Copying device back to host
tmr_GPU.StartCounter();
CudaSafeCall(cudaMemcpyAsync(p_centroids, d_centroids, centroids_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpyAsync(p_NNT, d_NNT, NNT_size, cudaMemcpyDeviceToHost));
t4 = tmr_GPU.GetCounter();
#pragma endregion
#pragma region Writing results to files
writeToFiles(p_centroids, p_NNT, G);
Util::print_times(f, t1, t2, t3, i+1);
#pragma endregion
#pragma region Cuda free device memories
cudaFree(d_flags);
cudaFree(d_points);
cudaFree(d_pointsT);
cudaFree(d_centroids);
cudaFree(d_memberships);
cudaFree(d_membershipsT);
cudaFree(d_sU);
cudaFree(d_sumU);
cudaFree(d_sumC);
cudaFree(d_histo_values);
cudaFree(d_histo);
cudaFree(d_NNT);
cudaFree(d_sNNT);
cudaFree(d_indices);
#pragma endregion
#pragma region Cuda free host pinned memories
cudaFreeHost(p_flags);
cudaFreeHost(p_points);
cudaFreeHost(p_centroids);
cudaFreeHost(p_memberships);
cudaFreeHost(p_sumU);
cudaFreeHost(p_sumC);
cudaFreeHost(p_histo);
cudaFreeHost(p_NNT);
#pragma endregion
#pragma region Get total time and last iteration index
double *rs = new double[5];
rs[0] = t1;
rs[1] = t2;
rs[2] = t3;
rs[3] = t4;
rs[4] = (double)i;
#pragma endregion
#pragma region CublasDestroy, CudaStreamDestroy, and DeviceReset
CublasSafeCall(cublasDestroy(handle));
for (i = 0; i < NSTREAM; ++i)
cudaStreamDestroy(streams[i]);
cudaDeviceReset();
#pragma endregion
return rs;
}
#pragma endregion
__host__ double * GFKM_GPU(FILE * f, GFKM & G, int block_size, int stop_iter, int mode)
{
//int centroids_size = G.K*G.D;
//int sz1a, sz1b, sz1c, sz2a, sz2b, sz2c;
//int step1a, step1b, step1c, step2a, step2b, step2c;
//int step = 1;
//int minGridSize;
/*if (block_size > 0)
step = roundup(centroids_size, block_size);
if (mode == 1){
if (step > 4)
{
return GFKM_GPU_v1a(f, G, block_size, stop_iter);
}
else if (step == 1)
{
return GFKM_GPU_v1b(f, G, block_size, stop_iter);
}
else
{
return GFKM_GPU_v1c(f, G, block_size, stop_iter, step);
}
}
else if (mode == 2){
if (step > 4)
{
return GFKM_GPU_v2a(f, G, block_size, stop_iter);
}
else if (step == 1)
{
return GFKM_GPU_v2b(f, G, block_size, stop_iter);
}
else
{
return GFKM_GPU_v2c(f, G, block_size, stop_iter, step);
}
}
else if (mode == 3){
if (step > 4)
{
return GFKM_GPU_v3a(f, G, block_size, stop_iter);
}
else if (step == 1)
{
return GFKM_GPU_v3b(f, G, block_size, stop_iter);
}
else
{
return GFKM_GPU_v3c(f, G, block_size, stop_iter, step);
}
}
else{
if (step > 4)
{
return GFKM_GPU_v4a(f, G, block_size, stop_iter);
}
else if (step == 1)
{
return GFKM_GPU_v4b(f, G, block_size, stop_iter);
}
else
{
return GFKM_GPU_v4c(f, G, block_size, stop_iter, step);
}
}*/
if (mode == 1)
{
BlockSizeV1 blockSizeV1 = getBlockSizesForVersion1(block_size);
return GFKM_GPU_v1(f, G, blockSizeV1, stop_iter);
}
else if (mode == 2)
{
BlockSizeV2 blockSizeV2 = getBlockSizesForVersion2(block_size);
return GFKM_GPU_v2(f, G, blockSizeV2, stop_iter);
}
else if (mode == 3)
{
BlockSizeV3 blockSizeV3 = getBlockSizesForVersion3(block_size);
return GFKM_GPU_v3(f, G, blockSizeV3, stop_iter);
}
else
{
BlockSizeV4 blockSizeV4 = getBlockSizesForVersion4(block_size);
return GFKM_GPU_v4(f, G, blockSizeV4, stop_iter);
}
}
#pragma region Getting block size functions
inline __host__ int getBlockSizeForMembershipKkernelV1a(int initBlockSize)
{
if (initBlockSize > 0) return initBlockSize;
int minGridSize;
int blockSize;
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
update_memberships_kernel_v1a, 0, BlockSizeLimit);
printf("Updating memberships kernel block size version 1a = %d\n", blockSize);
return blockSize;
}
inline __host__ int getBlockSizeForMembershipKkernelV1b(int dynamicSMemSize, int initBlockSize)
{
if (initBlockSize > 0) return initBlockSize;
int minGridSize;
int blockSize;
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
update_memberships_kernel_v1b, dynamicSMemSize, BlockSizeLimit);
printf("Updating memberships kernel block size version 1b = %d\n", blockSize);
return blockSize;
}
inline __host__ int getBlockSizeForMembershipKkernelV1c(int dynamicSMemSize, int initBlockSize)
{
if (initBlockSize > 0) return initBlockSize;
int minGridSize;
int blockSize;
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
update_memberships_kernel_v1c, dynamicSMemSize, BlockSizeLimit);
printf("Updating memberships kernel block size version 1c = %d\n", blockSize);
return blockSize;
}
inline __host__ int getBlockSizeForMembershipKkernelV2a(int initBlockSize)
{
if (initBlockSize > 0) return initBlockSize;
int minGridSize;
int blockSize;
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
update_memberships_kernel_v2a, 0, BlockSizeLimit);
printf("Updating memberships kernel block size version 2a = %d\n", blockSize);
return blockSize;
}
inline __host__ int getBlockSizeForMembershipKkernelV2b(int dynamicSMemSize, int initBlockSize)
{
if (initBlockSize > 0) return initBlockSize;
int minGridSize;
int blockSize;
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
update_memberships_kernel_v2b, dynamicSMemSize, BlockSizeLimit);
printf("Updating memberships kernel block size version 2b = %d\n", blockSize);
return blockSize;
}
inline __host__ int getBlockSizeForMembershipKkernelV2c(int dynamicSMemSize, int initBlockSize)
{
if (initBlockSize > 0) return initBlockSize;
int minGridSize;
int blockSize;
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
update_memberships_kernel_v2c, dynamicSMemSize, BlockSizeLimit);
printf("Updating memberships kernel block size version 2c = %d\n", blockSize);
return blockSize;
}
inline __host__ int blockSizeToDynamicSMemSize(int blockSize)
{
return blockSize << 3;
}
inline __host__ BlockSizeV2 getBlockSizeForCentroidKernelV2(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV2(initBlockSize);
}
else if (initBlockSize == 0) return BlockSizeV2();
BlockSizeV2 blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceMembershipsKernelBlockSize),
reduce_memberships_kernel_FKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceCentroidsKernelBlockSize),
reduce_centroids_kernel_FKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
printf("ReduceMembershipsKernelBlockSize version 2 = %d\n", blockSize.reduceMembershipsKernelBlockSize);
printf("ReduceCentroidsKernelBlockSize version 2 = %d\n", blockSize.reduceCentroidsKernelBlockSize);
return blockSize;
}
inline __host__ BlockSizeV3 getBlockSizeForCentroidKernelV3(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV3(initBlockSize);
}
else if (initBlockSize == 0) return BlockSizeV3();
BlockSizeV3 blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &(blockSize.histogramKernelBlockSize),
histogram_kernel, 0, BlockSizeLimit);
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &(blockSize.countingSortKernelBlockSize),
counting_sort_kernel, 0, BlockSizeLimit);
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceMembershipsKernelBlockSize),
reduce_memberships_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceCentroidsKernelBlockSize),
reduce_centroids_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
printf("ReduceMembershipsKernelBlockSize version 3 = %d\n", blockSize.reduceMembershipsKernelBlockSize);
printf("ReduceCentroidsKernelBlockSize version 3 = %d\n", blockSize.reduceCentroidsKernelBlockSize);
printf("HistogramKernelBlockSize version 3 = %d\n", blockSize.histogramKernelBlockSize);
printf("CountingSortKernelBlockSize version 3 = %d\n", blockSize.countingSortKernelBlockSize);
return blockSize;
}
inline __host__ BlockSizeV4 getBlockSizeForCentroidKernelV4(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV4(initBlockSize);
}
else if (initBlockSize == 0) return BlockSizeV4();
BlockSizeV4 blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &(blockSize.gatherKernelBlockSize),
gather_kernel, 0, BlockSizeLimit);
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceMembershipsKernelBlockSize),
reduce_memberships_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceCentroidsKernelBlockSize),
reduce_centroids_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
printf("ReduceMembershipsKernelBlockSize version 4 = %d\n", blockSize.reduceMembershipsKernelBlockSize);
printf("ReduceCentroidsKernelBlockSize version 4 = %d\n", blockSize.reduceCentroidsKernelBlockSize);
printf("GatherKernelBlockSize version 4 = %d\n", blockSize.gatherKernelBlockSize);
return blockSize;
}
inline __host__ BlockSizeV1 getBlockSizesForVersion1(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV1(initBlockSize);
}
return BlockSizeV1();
}
inline __host__ BlockSizeV2 getBlockSizesForVersion2(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV2(initBlockSize);
}
else if (initBlockSize == 0) return BlockSizeV2();
BlockSizeV2 blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceMembershipsKernelBlockSize),
reduce_memberships_kernel_FKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceCentroidsKernelBlockSize),
reduce_centroids_kernel_FKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
printf("ReduceMembershipsKernelBlockSize version 2 = %d\n", blockSize.reduceMembershipsKernelBlockSize);
printf("ReduceCentroidsKernelBlockSize version 2 = %d\n", blockSize.reduceCentroidsKernelBlockSize);
return blockSize;
}
inline __host__ BlockSizeV3 getBlockSizesForVersion3(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV3(initBlockSize);
}
else if (initBlockSize == 0) return BlockSizeV3();
BlockSizeV3 blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &(blockSize.histogramKernelBlockSize),
histogram_kernel, 0, BlockSizeLimit);
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &(blockSize.countingSortKernelBlockSize),
counting_sort_kernel, 0, BlockSizeLimit);
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceMembershipsKernelBlockSize),
reduce_memberships_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceCentroidsKernelBlockSize),
reduce_centroids_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
printf("ReduceMembershipsKernelBlockSize version 3 = %d\n", blockSize.reduceMembershipsKernelBlockSize);
printf("ReduceCentroidsKernelBlockSize version 3 = %d\n", blockSize.reduceCentroidsKernelBlockSize);
printf("HistogramKernelBlockSize version 3 = %d\n", blockSize.histogramKernelBlockSize);
printf("CountingSortKernelBlockSize version 3 = %d\n", blockSize.countingSortKernelBlockSize);
return blockSize;
}
inline __host__ BlockSizeV4 getBlockSizesForVersion4(int initBlockSize)
{
if (initBlockSize > 0) {
return BlockSizeV4(initBlockSize);
}
else if (initBlockSize == 0) return BlockSizeV4();
BlockSizeV4 blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &(blockSize.gatherKernelBlockSize),
gather_kernel, 0, BlockSizeLimit);
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceMembershipsKernelBlockSize),
reduce_memberships_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
cudaOccupancyMaxPotentialBlockSizeVariableSMem( &minGridSize, &(blockSize.reduceCentroidsKernelBlockSize),
reduce_centroids_kernel_GFKM, blockSizeToDynamicSMemSize, BlockSizeLimit);
printf("ReduceMembershipsKernelBlockSize version 4 = %d\n", blockSize.reduceMembershipsKernelBlockSize);
printf("ReduceCentroidsKernelBlockSize version 4 = %d\n", blockSize.reduceCentroidsKernelBlockSize);
printf("GatherKernelBlockSize version 4 = %d\n", blockSize.gatherKernelBlockSize);
return blockSize;
}
#pragma endregion
#pragma endregion |
4ebaa6a9e2420185297e8b37b3443be27e2f3008.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdlib>
#include <cassert>
#include <cuda_gl_interop.h>
#include "CResource.hh"
#include "helper_cuda.h" // for checkCudaErrors
#include "cfloat4x4.h"
struct CResourceImp
{
unsigned buffer_id ;
size_t bufsize ;
unsigned flags ;
hipStream_t stream ;
struct cudaGraphicsResource* resource ;
void* dev_ptr ;
CResourceImp(unsigned buffer_id_, unsigned flags_, hipStream_t stream_)
:
buffer_id(buffer_id_),
bufsize(0),
flags(flags_),
stream(stream_),
resource(NULL),
dev_ptr(NULL)
{
}
// HMM : stream_ arg was previously ignored with steam(NULL) : Changed to taking copy of stream arg Jun 26, 2016
const char* getFlagDescription()
{
const char* ret(NULL);
switch(flags)
{
case hipGraphicsMapFlagsNone: ret="hipGraphicsMapFlagsNone: Default; Assume resource can be read/written " ; break ;
case hipGraphicsMapFlagsReadOnly: ret="hipGraphicsMapFlagsReadOnly: CUDA will not write to this resource " ; break ;
case hipGraphicsMapFlagsWriteDiscard: ret="hipGraphicsMapFlagsWriteDiscard: CUDA will only write to and will not read from this resource " ; break ;
}
return ret ;
}
void registerBuffer()
{
//printf("Resource::registerBuffer %d : %s \n", buffer_id, getFlagDescription() );
checkCudaErrors( hipGraphicsGLRegisterBuffer(&resource, buffer_id, flags) );
}
void unregisterBuffer()
{
//printf("Resource::unregisterBuffer %d \n", buffer_id );
checkCudaErrors( hipGraphicsUnregisterResource(resource) );
}
void* mapGLToCUDA()
{
checkCudaErrors( hipGraphicsMapResources(1, &resource, stream) );
checkCudaErrors( hipGraphicsResourceGetMappedPointer((void **)&dev_ptr, &bufsize, resource) );
//printf("Resource::mapGLToCUDA bufsize %lu dev_ptr %p \n", bufsize, dev_ptr );
return dev_ptr ;
}
void unmapGLToCUDA()
{
//printf("Resource::unmapGLToCUDA\n");
checkCudaErrors( hipGraphicsUnmapResources(1, &resource, stream));
}
void streamSync()
{
//printf("Resource::streamSync\n");
checkCudaErrors( hipStreamSynchronize(stream));
}
};
void CResource::init()
{
unsigned flgs(0) ;
switch(m_access)
{
case RW: flgs = hipGraphicsMapFlagsNone ;break;
case R: flgs = hipGraphicsMapFlagsReadOnly ;break;
case W: flgs = hipGraphicsMapFlagsWriteDiscard ;break;
}
//hipStream_t stream1 ;
//hipStreamCreate ( &stream1) ;
m_imp = new CResourceImp(m_buffer_id, flgs, (hipStream_t)0 );
}
void CResource::streamSync()
{
m_imp->streamSync();
}
template <typename T>
CBufSpec CResource::mapGLToCUDA()
{
m_mapped = true ;
m_imp->registerBuffer();
m_imp->mapGLToCUDA();
unsigned int size = m_imp->bufsize/sizeof(T) ;
//printf("CResource::mapGLToCUDA buffer_id %d imp.bufsize %lu sizeof(T) %lu size %d \n", m_buffer_id, m_imp->bufsize, sizeof(T), size );
return CBufSpec( m_imp->dev_ptr, size, m_imp->bufsize ); // number of items only defined when decide on item size
}
void CResource::unmapGLToCUDA()
{
m_mapped = false ;
//printf("CResource::unmapGLToCUDA\n");
m_imp->unmapGLToCUDA();
m_imp->unregisterBuffer();
}
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<unsigned char>();
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<unsigned int>();
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<unsigned long long>();
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<short>();
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<int>();
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<float>();
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<cfloat4x4>();
| 4ebaa6a9e2420185297e8b37b3443be27e2f3008.cu | /*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdlib>
#include <cassert>
#include <cuda_gl_interop.h>
#include "CResource.hh"
#include "helper_cuda.h" // for checkCudaErrors
#include "cfloat4x4.h"
struct CResourceImp
{
unsigned buffer_id ;
size_t bufsize ;
unsigned flags ;
cudaStream_t stream ;
struct cudaGraphicsResource* resource ;
void* dev_ptr ;
CResourceImp(unsigned buffer_id_, unsigned flags_, cudaStream_t stream_)
:
buffer_id(buffer_id_),
bufsize(0),
flags(flags_),
stream(stream_),
resource(NULL),
dev_ptr(NULL)
{
}
// HMM : stream_ arg was previously ignored with steam(NULL) : Changed to taking copy of stream arg Jun 26, 2016
const char* getFlagDescription()
{
const char* ret(NULL);
switch(flags)
{
case cudaGraphicsMapFlagsNone: ret="cudaGraphicsMapFlagsNone: Default; Assume resource can be read/written " ; break ;
case cudaGraphicsMapFlagsReadOnly: ret="cudaGraphicsMapFlagsReadOnly: CUDA will not write to this resource " ; break ;
case cudaGraphicsMapFlagsWriteDiscard: ret="cudaGraphicsMapFlagsWriteDiscard: CUDA will only write to and will not read from this resource " ; break ;
}
return ret ;
}
void registerBuffer()
{
//printf("Resource::registerBuffer %d : %s \n", buffer_id, getFlagDescription() );
checkCudaErrors( cudaGraphicsGLRegisterBuffer(&resource, buffer_id, flags) );
}
void unregisterBuffer()
{
//printf("Resource::unregisterBuffer %d \n", buffer_id );
checkCudaErrors( cudaGraphicsUnregisterResource(resource) );
}
void* mapGLToCUDA()
{
checkCudaErrors( cudaGraphicsMapResources(1, &resource, stream) );
checkCudaErrors( cudaGraphicsResourceGetMappedPointer((void **)&dev_ptr, &bufsize, resource) );
//printf("Resource::mapGLToCUDA bufsize %lu dev_ptr %p \n", bufsize, dev_ptr );
return dev_ptr ;
}
void unmapGLToCUDA()
{
//printf("Resource::unmapGLToCUDA\n");
checkCudaErrors( cudaGraphicsUnmapResources(1, &resource, stream));
}
void streamSync()
{
//printf("Resource::streamSync\n");
checkCudaErrors( cudaStreamSynchronize(stream));
}
};
void CResource::init()
{
unsigned flgs(0) ;
switch(m_access)
{
case RW: flgs = cudaGraphicsMapFlagsNone ;break;
case R: flgs = cudaGraphicsMapFlagsReadOnly ;break;
case W: flgs = cudaGraphicsMapFlagsWriteDiscard ;break;
}
//cudaStream_t stream1 ;
//cudaStreamCreate ( &stream1) ;
m_imp = new CResourceImp(m_buffer_id, flgs, (cudaStream_t)0 );
}
void CResource::streamSync()
{
m_imp->streamSync();
}
template <typename T>
CBufSpec CResource::mapGLToCUDA()
{
m_mapped = true ;
m_imp->registerBuffer();
m_imp->mapGLToCUDA();
unsigned int size = m_imp->bufsize/sizeof(T) ;
//printf("CResource::mapGLToCUDA buffer_id %d imp.bufsize %lu sizeof(T) %lu size %d \n", m_buffer_id, m_imp->bufsize, sizeof(T), size );
return CBufSpec( m_imp->dev_ptr, size, m_imp->bufsize ); // number of items only defined when decide on item size
}
void CResource::unmapGLToCUDA()
{
m_mapped = false ;
//printf("CResource::unmapGLToCUDA\n");
m_imp->unmapGLToCUDA();
m_imp->unregisterBuffer();
}
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<unsigned char>();
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<unsigned int>();
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<unsigned long long>();
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<short>();
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<int>();
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<float>();
template CUDARAP_API CBufSpec CResource::mapGLToCUDA<cfloat4x4>();
|
7896fc3714f5080a2a09ec938ee6aab926a48f9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void orcu_kernel17975(const int sites_on_node, double* A, double* y, double* x) {
const int tid=blockIdx.x*blockDim.x+threadIdx.x;
const int gsize=gridDim.x*blockDim.x;
double ci, ai, bi, ar, br, cr;
int j, k;
for (int i=tid; i<=sites_on_node-1; i+=gsize) {
{
#pragma unroll 2
for (j=0; j<=5; j=j+2) {
cr=ci=0.0;
for (k=0; k<=5; k=k+2) {
ar=A[18*i+3*j+k];
ai=A[18*i+3*j+k+1];
br=x[6*i+k];
bi=x[6*i+k+1];
cr=cr+ar*br-ai*bi;
ci=ci+ar*bi+ai*br;
}
y[6*i+j]=cr;
y[6*i+j+1]=ci;
}
}
}
}
/**************** m_matvec.c (in su3.a) *******************************
* *
* matrix vector multiply *
* y[i] <- A[i]*x[i] *
*/
void mult_su3_mat_vec(double A[], double x[], double y[]) {
const int sites_on_node = 10; // or some other global constant value
register int i,j,k;
register double ar,ai,br,bi,cr,ci;
/*@ begin PerfTuning (
def performance_params {
param TC[] = range(32,1025,32);
param BC[] = range(14,113,14);
param UIF[] = range(1,6);
param PL[] = [16,48];
param CFLAGS[] = map(join, product(['-O0', '-O1', '-O2', '-O3']));
}
def input_params {
param SITES[] = [2,4,6,8,10,12,14,16];
}
def input_vars {
decl dynamic double A[18*SITES] = random;
decl dynamic double x[6*SITES] = random;
decl dynamic double y[6*SITES] = 0;
}
def build {
arg build_command = 'nvcc -arch=sm_20 @CFLAGS';
}
def performance_counter {
arg method = 'basic timer';
arg repetitions = 6;
}
def search {
arg algorithm = 'Exhaustive';
arg resume = True;
arg exhaustive_start_coord = [25, 4, 1, 1, 1]; }
) @*/
/**-- (Generated by Orio)
Best performance cost:
[0.044575999999999998, 0.019136, 0.018464000000000001, 0.015167999999999999, 0.0184, 0.018079999999999999]
Tuned for specific problem sizes:
SITES = 2
Best performance parameters:
BC = 28
CFLAGS = -O2
PL = 48
TC = 128
UIF = 2
--**/
int sites_on_node=SITES;
/*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL, unrollInner=UIF)
for(i=0; i<=sites_on_node-1; i++) {
for(j=0; j<=5; j+=2) {
cr = ci = 0.0;
for(k=0; k<=5; k+=2) {
ar=A[18*i+3*j+k];
ai=A[18*i+3*j+k+1];
br=x[6*i+k];
bi=x[6*i+k+1];
cr += ar*br - ai*bi;
ci += ar*bi + ai*br;
}
y[6*i+j] =cr;
y[6*i+j+1]=ci;
}
}
) @*/
{
hipDeviceSynchronize();
/*declare variables*/
double *dev_A, *dev_y, *dev_x;
int nthreads=128;
/*calculate device dimensions*/
dim3 dimGrid, dimBlock;
dimBlock.x=nthreads;
dimGrid.x=28;
/*allocate device memory*/
hipMalloc(&dev_A,18 *SITES*sizeof(double));
hipMalloc(&dev_x,6 *SITES*sizeof(double));
hipMalloc(&dev_y,6 *SITES*sizeof(double));
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
/*copy data from host to device*/
hipEventRecord(tstart,0);
hipMemcpy(dev_A,A,18 *SITES*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(dev_x,x,6 *SITES*sizeof(double),hipMemcpyHostToDevice);
hipEventRecord(tstop,0);
hipEventSynchronize(tstop);
hipEventElapsedTime(&orcu_transfer,tstart,tstop);
hipEventRecord(start,0);
/*invoke device kernel*/
hipLaunchKernelGGL(( orcu_kernel17975), dim3(dimGrid),dim3(dimBlock), 0, 0, sites_on_node,dev_A,dev_y,dev_x);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&orcu_elapsed,start,stop);
/*copy data from device to host*/
hipMemcpy(y,dev_y,6 *SITES*sizeof(double),hipMemcpyDeviceToHost);
hipDeviceSetCacheConfig(hipFuncCachePreferNone);
/*free allocated memory*/
hipFree(dev_A);
hipFree(dev_y);
hipFree(dev_x);
hipError_t err=hipGetLastError();
if (hipSuccess!=err)
printf("CUDA runtime error: %s@",hipGetErrorString(err));
}
/*@ end @*/
/*@ end @*/
}
| 7896fc3714f5080a2a09ec938ee6aab926a48f9b.cu | __global__ void orcu_kernel17975(const int sites_on_node, double* A, double* y, double* x) {
const int tid=blockIdx.x*blockDim.x+threadIdx.x;
const int gsize=gridDim.x*blockDim.x;
double ci, ai, bi, ar, br, cr;
int j, k;
for (int i=tid; i<=sites_on_node-1; i+=gsize) {
{
#pragma unroll 2
for (j=0; j<=5; j=j+2) {
cr=ci=0.0;
for (k=0; k<=5; k=k+2) {
ar=A[18*i+3*j+k];
ai=A[18*i+3*j+k+1];
br=x[6*i+k];
bi=x[6*i+k+1];
cr=cr+ar*br-ai*bi;
ci=ci+ar*bi+ai*br;
}
y[6*i+j]=cr;
y[6*i+j+1]=ci;
}
}
}
}
/**************** m_matvec.c (in su3.a) *******************************
* *
* matrix vector multiply *
* y[i] <- A[i]*x[i] *
*/
void mult_su3_mat_vec(double A[], double x[], double y[]) {
const int sites_on_node = 10; // or some other global constant value
register int i,j,k;
register double ar,ai,br,bi,cr,ci;
/*@ begin PerfTuning (
def performance_params {
param TC[] = range(32,1025,32);
param BC[] = range(14,113,14);
param UIF[] = range(1,6);
param PL[] = [16,48];
param CFLAGS[] = map(join, product(['-O0', '-O1', '-O2', '-O3']));
}
def input_params {
param SITES[] = [2,4,6,8,10,12,14,16];
}
def input_vars {
decl dynamic double A[18*SITES] = random;
decl dynamic double x[6*SITES] = random;
decl dynamic double y[6*SITES] = 0;
}
def build {
arg build_command = 'nvcc -arch=sm_20 @CFLAGS';
}
def performance_counter {
arg method = 'basic timer';
arg repetitions = 6;
}
def search {
arg algorithm = 'Exhaustive';
arg resume = True;
arg exhaustive_start_coord = [25, 4, 1, 1, 1]; }
) @*/
/**-- (Generated by Orio)
Best performance cost:
[0.044575999999999998, 0.019136, 0.018464000000000001, 0.015167999999999999, 0.0184, 0.018079999999999999]
Tuned for specific problem sizes:
SITES = 2
Best performance parameters:
BC = 28
CFLAGS = -O2
PL = 48
TC = 128
UIF = 2
--**/
int sites_on_node=SITES;
/*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL, unrollInner=UIF)
for(i=0; i<=sites_on_node-1; i++) {
for(j=0; j<=5; j+=2) {
cr = ci = 0.0;
for(k=0; k<=5; k+=2) {
ar=A[18*i+3*j+k];
ai=A[18*i+3*j+k+1];
br=x[6*i+k];
bi=x[6*i+k+1];
cr += ar*br - ai*bi;
ci += ar*bi + ai*br;
}
y[6*i+j] =cr;
y[6*i+j+1]=ci;
}
}
) @*/
{
cudaDeviceSynchronize();
/*declare variables*/
double *dev_A, *dev_y, *dev_x;
int nthreads=128;
/*calculate device dimensions*/
dim3 dimGrid, dimBlock;
dimBlock.x=nthreads;
dimGrid.x=28;
/*allocate device memory*/
cudaMalloc(&dev_A,18 *SITES*sizeof(double));
cudaMalloc(&dev_x,6 *SITES*sizeof(double));
cudaMalloc(&dev_y,6 *SITES*sizeof(double));
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
/*copy data from host to device*/
cudaEventRecord(tstart,0);
cudaMemcpy(dev_A,A,18 *SITES*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(dev_x,x,6 *SITES*sizeof(double),cudaMemcpyHostToDevice);
cudaEventRecord(tstop,0);
cudaEventSynchronize(tstop);
cudaEventElapsedTime(&orcu_transfer,tstart,tstop);
cudaEventRecord(start,0);
/*invoke device kernel*/
orcu_kernel17975<<<dimGrid,dimBlock>>>(sites_on_node,dev_A,dev_y,dev_x);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&orcu_elapsed,start,stop);
/*copy data from device to host*/
cudaMemcpy(y,dev_y,6 *SITES*sizeof(double),cudaMemcpyDeviceToHost);
cudaDeviceSetCacheConfig(cudaFuncCachePreferNone);
/*free allocated memory*/
cudaFree(dev_A);
cudaFree(dev_y);
cudaFree(dev_x);
cudaError_t err=cudaGetLastError();
if (cudaSuccess!=err)
printf("CUDA runtime error: %s@",cudaGetErrorString(err));
}
/*@ end @*/
/*@ end @*/
}
|
cbaa660ed783564b700010973702dd16c7f008e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ### Adrian's
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2014, September 8 - October 10
// ###
// ###
// ### Maria Klodt, Jan Stuehmer, Mohamed Souiai, Thomas Moellenhoff
// ###
// ###
// ### Dennis Mack, dennis.mack@tum.de, p060
// ### Adrian Haarbach, haarbach@in.tum.de, p077
// ### Markus Schlaffer, markus.schlaffer@in.tum.de, p070
//invoce like: ./ex11/main -i ../images/flowers.png -C 2 -delay 1
#include "helper.h"
#include <iostream>
#include <math.h>
#include "common_kernels.cuh"
#include "opencv_helpers.h"
//#include <stdio.h>
using namespace std;
// uncomment to use the camera
//#define CAMERA
//the update step for the image u_n -> u_n+1
__global__ void update(float tau, float *u_n, float *div, int w, int h, int nc){
size_t x = threadIdx.x + blockDim.x * blockIdx.x;
size_t y = threadIdx.y + blockDim.y * blockIdx.y;
if(x>=w || y>=h) return;
for (int i = 0; i < nc; ++i){
u_n[x+ y*w +i*w*h]=u_n[x+ y*w +i*w*h]+tau*div[x+ y*w +i*w*h];
}
}
//result stored again in v1,v2
__global__ void diffusivity(float* v1, float* v2, float* d_diffusionTensor, int w, int h, int nc){
size_t x = threadIdx.x + blockDim.x * blockIdx.x;
size_t y = threadIdx.y + blockDim.y * blockIdx.y;
if(x>=w || y>=h) return;
float4 G;
G.x = d_diffusionTensor[x + y*w];
G.y = G.z = d_diffusionTensor[x + y*w + w*h];
G.w = d_diffusionTensor[x + y*w + 2*w*h];
for (int i = 0; i < nc; ++i)
{
float2 nabla_u;
nabla_u.x=v1[x+ y*w +i*w*h];
nabla_u.y=v2[x+ y*w +i*w*h];
float2 vec= G * nabla_u; //matrix -> vector product
//store result again in gradient
v1[x+ y*w +i*w*h]=vec.x;
v2[x+ y*w +i*w*h]=vec.y;
}
}
__host__ __device__ float4 calcG(float lambda1, float lambda2, float2 e1, float2 e2, float C, float alpha){
float4 e1_2; outer(e1,&e1_2);
float4 e2_2; outer(e2,&e2_2);
mul(alpha,&e1_2); //mu1 = alpha
float mu2=alpha;
float lambdaDiff=lambda1-lambda2; //always positive since l1>l2;
if(lambdaDiff>0.000001){ //alpha since we use floating point arithmetic
mu2 = alpha + (1-alpha) * expf( -C / (lambdaDiff*lambdaDiff) );
}
mul(mu2,&e2_2);
float4 G = e1_2 + e2_2; //own operator in common_kernels.cuh
//add(e1_2,&e2_2);G=e2_2;
return G;
}
//13.1
__global__ void diffusionTensorFromStructureTensor(float* d_structSmooth, float* d_diffusionTensor, int w, int h, float C, float alpha){
size_t x = threadIdx.x + blockDim.x * blockIdx.x;
size_t y = threadIdx.y + blockDim.y * blockIdx.y;
if(x>=w || y>=h) return;
//b)
float4 m;
m.x=d_structSmooth[x + w*y]; //m11
m.y=d_structSmooth[x + w*y + w*h]; //m12
m.z=m.y; //m21 == m12
m.w=d_structSmooth[x + w*y + w*h*2]; //m22
float lambda1,lambda2;
float2 e1,e2;
compute_eig(m, &lambda1, &lambda2, &e1, &e2);
//c)
float4 G = calcG(lambda1,lambda2,e1,e2,C,alpha);
d_diffusionTensor[x + y*w] = G.x;
d_diffusionTensor[x + y*w + w*h] = G.y; //==G.z ??
d_diffusionTensor[x + y*w + 2*w*h] = G.w;
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
hipDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-N <N>] [-tau <tau>] [-delay <delay>] [-C <1,2,3>] [-gray]" << endl; return 1; }
#endif
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
//iteration steps on CPU
int N = 2000;
getParam("N", N, argc, argv);
cout << "N: " << N <<" [CPU iterations] "<<endl;
float tau = 0.25;
getParam("tau", tau, argc, argv);
cout << "tau: " << tau << endl;
float sigma = 0.5f;
getParam("sigma", sigma, argc, argv);
cout << "sigma: " << sigma << endl;
float rho = 3.0f;
getParam("rho", rho, argc, argv);
cout << "rho: " << rho << endl;
int delay = 1;
getParam("delay", delay, argc, argv);
cout << "delay: " << delay << " ms"<<" [use -delay 0 to step with keys]"<<endl;
float C = 5e-6f;
getParam("C", C, argc, argv);
cout << "C: " << C << " [ G_CONSTANT_1 = 1 , G_INVERSE = 2 , G_EXP = 3 ]"<<endl;
float alpha=0.01; //define alpha
getParam("alpha", alpha, argc, argv);
cout << "alpha: " << alpha << endl;
//check if tau is not too large;
float tauMax=0.25f;
if(tau>tauMax){
cout << "tau: " << tau <<" is to big for convergence, setting tau to 0.25*g(0) new tau: "<<tauMax<< endl;
tau=tauMax;
}
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << " nc="<<nc <<endl;
cv::Mat G_sigma=kernel(sigma);
//imagesc("Kernel sigma", G_sigma, 100, 200);
float *imgKernel_sigma = new float[G_sigma.rows * G_sigma.cols];
convert_mat_to_layered(imgKernel_sigma,G_sigma);
cv::Mat G_rho=kernel(rho);
//imagesc("Kernel rho", G_rho, 100, 200);
float *imgKernel_rho = new float[G_rho.rows * G_rho.cols];
convert_mat_to_layered(imgKernel_rho,G_rho);
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
size_t n = (size_t)w*h*nc;
float *imgIn = new float[n];
size_t n3 = (size_t)w*h*3;
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
float *tg;
tg=(float*)malloc(N*sizeof(float));
//GPU:
int i=0;
Timer timergpu;
float *d_imgIn, *d_imgCur, *d_v2, *d_v1, *d_divergence, *d_struct, *d_structSmooth, *d_imgKernel_sigma, *d_imgS, *d_imgKernel_rho;
hipMalloc(&d_imgIn, n * sizeof(float) );CUDA_CHECK;
hipMemcpy(d_imgIn, imgIn, n * sizeof(float), hipMemcpyHostToDevice);CUDA_CHECK;
// u0 = imgIn
hipMalloc(&d_imgCur, n * sizeof(float) );CUDA_CHECK;
hipMemcpy(d_imgCur, imgIn, n * sizeof(float), hipMemcpyHostToDevice);CUDA_CHECK;
hipMalloc(&d_imgS, n * sizeof(float) );CUDA_CHECK;
hipMalloc(&d_v1, n * sizeof(float) ); CUDA_CHECK;
hipMalloc(&d_v2, n * sizeof(float) ); CUDA_CHECK;
hipMalloc(&d_divergence, n * sizeof(float) ); CUDA_CHECK;
hipMalloc(&d_struct, n3 * sizeof(float) ); CUDA_CHECK;
hipMalloc(&d_structSmooth, n3 * sizeof(float) ); CUDA_CHECK;
hipMalloc(&d_imgKernel_sigma, (size_t) G_sigma.cols * G_sigma.rows * sizeof(float) ); CUDA_CHECK;
hipMemcpy(d_imgKernel_sigma, imgKernel_sigma, G_sigma.cols * G_sigma.rows * sizeof(float), hipMemcpyHostToDevice);CUDA_CHECK;
//d_imagesc("d_imgKernel_sigma",d_imgKernel_sigma,G_sigma.cols,G_sigma.rows,1,false,true);
hipMalloc(&d_imgKernel_rho, (size_t) G_rho.cols * G_rho.rows * sizeof(float) );CUDA_CHECK;
hipMemcpy(d_imgKernel_rho, imgKernel_rho, (size_t) G_rho.cols * G_rho.rows * sizeof(float), hipMemcpyHostToDevice);CUDA_CHECK;
//d_imagesc("d_imgKernel_rho",d_imgKernel_rho,G_rho.cols,G_rho.rows,1,false,true);
dim3 block = dim3(32,8,1);
dim3 grid = dim3((w + block.x - 1 ) / block.x,(h + block.y - 1 ) / block.y, 1);
bool isRunning=true;
//presmooth input image with sigma
hipLaunchKernelGGL(( convolutionGPU), dim3(grid), dim3(block), 0, 0, d_imgIn, d_imgKernel_sigma, d_imgS, w, h, nc, G_sigma.cols); CUDA_CHECK;
hipDeviceSynchronize();CUDA_CHECK;
//d_imagesc("d_imgS",d_imgS, w, h, nc);
//cv::waitKey(0);
hipLaunchKernelGGL(( computeSpatialDerivatives), dim3(grid), dim3(block), 0, 0, d_imgS,d_v1,d_v2, w, h, nc);
hipDeviceSynchronize();CUDA_CHECK;
//d_imagesc("d_v1",d_v1, w, h, nc);
//d_imagesc("d_v2",d_v1, w, h, nc);
//cv::waitKey(0);
//a)
hipLaunchKernelGGL(( createStructureTensorLayered), dim3(grid), dim3(block), 0, 0, d_v1,d_v2,d_struct, w, h, nc);
hipDeviceSynchronize();CUDA_CHECK;
//d_imagesc("d_struct",d_struct, w, h, nc, true);
//d_imagesc("d_imgKernel_rho",d_imgKernel_rho,G_rho.cols,G_rho.rows,1,false,true);
//cv::waitKey(0);
//postsmooth structure tensor with rho
hipLaunchKernelGGL(( convolutionGPU), dim3(grid), dim3(block), 0, 0, d_struct,d_imgKernel_rho, d_structSmooth, w, h, nc, G_rho.cols); CUDA_CHECK;
hipDeviceSynchronize();CUDA_CHECK;
//d_imagesc("d_structSmooth",d_structSmooth, w, h, nc, true);
//cv::waitKey(0);
//b and c)
// creates diffusion tensor - this should be done only once on the input image and
// is then constant for all following iterations
float *d_diffusionTensor;
d_diffusionTensor = d_struct; //missusing unsmoothed structure tensor to hold diffusionTensor since not needed anymore
hipLaunchKernelGGL(( diffusionTensorFromStructureTensor), dim3(grid), dim3(block), 0, 0, d_structSmooth, d_diffusionTensor, w, h, C, alpha);
hipDeviceSynchronize();CUDA_CHECK;
d_imagesc("d_diffusionTensor",d_diffusionTensor, w, h, nc, true);
cv::waitKey(0);
for (; i < N && isRunning; ++i)
{
timergpu.start();
hipLaunchKernelGGL(( convolutionGPU), dim3(grid), dim3(block), 0, 0, d_imgCur, d_imgKernel_sigma, d_imgS, w, h, nc, G_sigma.cols); CUDA_CHECK;
hipDeviceSynchronize();CUDA_CHECK;
hipLaunchKernelGGL(( computeSpatialDerivatives), dim3(grid), dim3(block), 0, 0, d_imgS,d_v1,d_v2, w, h, nc);
hipDeviceSynchronize();CUDA_CHECK;
hipLaunchKernelGGL(( diffusivity), dim3(grid),dim3(block), 0, 0, d_v1,d_v2,d_diffusionTensor,w,h,nc);
hipDeviceSynchronize();CUDA_CHECK;
d_imagesc("d_v1",d_v1, w, h, nc);
d_imagesc("d_v2",d_v1, w, h, nc);
cv::waitKey(0);
hipLaunchKernelGGL(( divergence), dim3(grid),dim3(block), 0, 0, d_v1,d_v2,d_divergence, w, h, nc);
hipDeviceSynchronize();CUDA_CHECK;
d_imagesc("d_divergence",d_divergence, w, h, nc);
cv::waitKey(0);
hipLaunchKernelGGL(( update), dim3(grid),dim3(block), 0, 0, tau,d_imgIn,d_divergence,w,h,nc);
hipDeviceSynchronize();CUDA_CHECK;
timergpu.end();
tg[i] = timergpu.get();
d_imagesc("d_imgIn",d_imgIn, w, h, nc);
imagescReset();
cout<<"iteration: "<<i<<endl;
char key=cv::waitKey(delay);
int keyN=key;
//cout<<"-----------"<<key<<" "<<keyN<<endl;
if(keyN == 27 || key == 'q' || key == 'Q'){
cout<<"leaving iteration loop at i: "<<i<<" total iterations: "<<i<<endl;
isRunning=false;
}
}
hipFree(d_imgIn);CUDA_CHECK;
hipFree(d_imgS);CUDA_CHECK;
hipFree(d_v1);CUDA_CHECK;
hipFree(d_v2);CUDA_CHECK;
hipFree(d_struct);CUDA_CHECK;
hipFree(d_structSmooth);CUDA_CHECK;
hipFree(d_divergence);CUDA_CHECK;
hipFree(d_imgKernel_sigma);CUDA_CHECK;
hipFree(d_imgKernel_rho);CUDA_CHECK;
float ms=GetAverage(tg, i)*1000;
cout << "avg time for one gpu iteration: "<<ms<<" ms"<<endl;
#ifdef CAMERA
// end of camera loop
}
#else
cout<<"---[pres any key to exit]---"<<endl;
// wait for key inputs
cv::waitKey(0);
#endif
cout<<"exiting"<<endl;
// free allocated arrays
delete[] imgIn;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
| cbaa660ed783564b700010973702dd16c7f008e3.cu | // ### Adrian's
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2014, September 8 - October 10
// ###
// ###
// ### Maria Klodt, Jan Stuehmer, Mohamed Souiai, Thomas Moellenhoff
// ###
// ###
// ### Dennis Mack, dennis.mack@tum.de, p060
// ### Adrian Haarbach, haarbach@in.tum.de, p077
// ### Markus Schlaffer, markus.schlaffer@in.tum.de, p070
//invoce like: ./ex11/main -i ../images/flowers.png -C 2 -delay 1
#include "helper.h"
#include <iostream>
#include <math.h>
#include "common_kernels.cuh"
#include "opencv_helpers.h"
//#include <stdio.h>
using namespace std;
// uncomment to use the camera
//#define CAMERA
//the update step for the image u_n -> u_n+1
__global__ void update(float tau, float *u_n, float *div, int w, int h, int nc){
size_t x = threadIdx.x + blockDim.x * blockIdx.x;
size_t y = threadIdx.y + blockDim.y * blockIdx.y;
if(x>=w || y>=h) return;
for (int i = 0; i < nc; ++i){
u_n[x+ y*w +i*w*h]=u_n[x+ y*w +i*w*h]+tau*div[x+ y*w +i*w*h];
}
}
//result stored again in v1,v2
__global__ void diffusivity(float* v1, float* v2, float* d_diffusionTensor, int w, int h, int nc){
size_t x = threadIdx.x + blockDim.x * blockIdx.x;
size_t y = threadIdx.y + blockDim.y * blockIdx.y;
if(x>=w || y>=h) return;
float4 G;
G.x = d_diffusionTensor[x + y*w];
G.y = G.z = d_diffusionTensor[x + y*w + w*h];
G.w = d_diffusionTensor[x + y*w + 2*w*h];
for (int i = 0; i < nc; ++i)
{
float2 nabla_u;
nabla_u.x=v1[x+ y*w +i*w*h];
nabla_u.y=v2[x+ y*w +i*w*h];
float2 vec= G * nabla_u; //matrix -> vector product
//store result again in gradient
v1[x+ y*w +i*w*h]=vec.x;
v2[x+ y*w +i*w*h]=vec.y;
}
}
__host__ __device__ float4 calcG(float lambda1, float lambda2, float2 e1, float2 e2, float C, float alpha){
float4 e1_2; outer(e1,&e1_2);
float4 e2_2; outer(e2,&e2_2);
mul(alpha,&e1_2); //mu1 = alpha
float mu2=alpha;
float lambdaDiff=lambda1-lambda2; //always positive since l1>l2;
if(lambdaDiff>0.000001){ //alpha since we use floating point arithmetic
mu2 = alpha + (1-alpha) * expf( -C / (lambdaDiff*lambdaDiff) );
}
mul(mu2,&e2_2);
float4 G = e1_2 + e2_2; //own operator in common_kernels.cuh
//add(e1_2,&e2_2);G=e2_2;
return G;
}
//13.1
__global__ void diffusionTensorFromStructureTensor(float* d_structSmooth, float* d_diffusionTensor, int w, int h, float C, float alpha){
size_t x = threadIdx.x + blockDim.x * blockIdx.x;
size_t y = threadIdx.y + blockDim.y * blockIdx.y;
if(x>=w || y>=h) return;
//b)
float4 m;
m.x=d_structSmooth[x + w*y]; //m11
m.y=d_structSmooth[x + w*y + w*h]; //m12
m.z=m.y; //m21 == m12
m.w=d_structSmooth[x + w*y + w*h*2]; //m22
float lambda1,lambda2;
float2 e1,e2;
compute_eig(m, &lambda1, &lambda2, &e1, &e2);
//c)
float4 G = calcG(lambda1,lambda2,e1,e2,C,alpha);
d_diffusionTensor[x + y*w] = G.x;
d_diffusionTensor[x + y*w + w*h] = G.y; //==G.z ??
d_diffusionTensor[x + y*w + 2*w*h] = G.w;
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
cudaDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-N <N>] [-tau <tau>] [-delay <delay>] [-C <1,2,3>] [-gray]" << endl; return 1; }
#endif
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
//iteration steps on CPU
int N = 2000;
getParam("N", N, argc, argv);
cout << "N: " << N <<" [CPU iterations] "<<endl;
float tau = 0.25;
getParam("tau", tau, argc, argv);
cout << "tau: " << tau << endl;
float sigma = 0.5f;
getParam("sigma", sigma, argc, argv);
cout << "sigma: " << sigma << endl;
float rho = 3.0f;
getParam("rho", rho, argc, argv);
cout << "rho: " << rho << endl;
int delay = 1;
getParam("delay", delay, argc, argv);
cout << "delay: " << delay << " ms"<<" [use -delay 0 to step with keys]"<<endl;
float C = 5e-6f;
getParam("C", C, argc, argv);
cout << "C: " << C << " [ G_CONSTANT_1 = 1 , G_INVERSE = 2 , G_EXP = 3 ]"<<endl;
float alpha=0.01; //define alpha
getParam("alpha", alpha, argc, argv);
cout << "alpha: " << alpha << endl;
//check if tau is not too large;
float tauMax=0.25f;
if(tau>tauMax){
cout << "tau: " << tau <<" is to big for convergence, setting tau to 0.25*g(0) new tau: "<<tauMax<< endl;
tau=tauMax;
}
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << " nc="<<nc <<endl;
cv::Mat G_sigma=kernel(sigma);
//imagesc("Kernel sigma", G_sigma, 100, 200);
float *imgKernel_sigma = new float[G_sigma.rows * G_sigma.cols];
convert_mat_to_layered(imgKernel_sigma,G_sigma);
cv::Mat G_rho=kernel(rho);
//imagesc("Kernel rho", G_rho, 100, 200);
float *imgKernel_rho = new float[G_rho.rows * G_rho.cols];
convert_mat_to_layered(imgKernel_rho,G_rho);
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
size_t n = (size_t)w*h*nc;
float *imgIn = new float[n];
size_t n3 = (size_t)w*h*3;
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
float *tg;
tg=(float*)malloc(N*sizeof(float));
//GPU:
int i=0;
Timer timergpu;
float *d_imgIn, *d_imgCur, *d_v2, *d_v1, *d_divergence, *d_struct, *d_structSmooth, *d_imgKernel_sigma, *d_imgS, *d_imgKernel_rho;
cudaMalloc(&d_imgIn, n * sizeof(float) );CUDA_CHECK;
cudaMemcpy(d_imgIn, imgIn, n * sizeof(float), cudaMemcpyHostToDevice);CUDA_CHECK;
// u0 = imgIn
cudaMalloc(&d_imgCur, n * sizeof(float) );CUDA_CHECK;
cudaMemcpy(d_imgCur, imgIn, n * sizeof(float), cudaMemcpyHostToDevice);CUDA_CHECK;
cudaMalloc(&d_imgS, n * sizeof(float) );CUDA_CHECK;
cudaMalloc(&d_v1, n * sizeof(float) ); CUDA_CHECK;
cudaMalloc(&d_v2, n * sizeof(float) ); CUDA_CHECK;
cudaMalloc(&d_divergence, n * sizeof(float) ); CUDA_CHECK;
cudaMalloc(&d_struct, n3 * sizeof(float) ); CUDA_CHECK;
cudaMalloc(&d_structSmooth, n3 * sizeof(float) ); CUDA_CHECK;
cudaMalloc(&d_imgKernel_sigma, (size_t) G_sigma.cols * G_sigma.rows * sizeof(float) ); CUDA_CHECK;
cudaMemcpy(d_imgKernel_sigma, imgKernel_sigma, G_sigma.cols * G_sigma.rows * sizeof(float), cudaMemcpyHostToDevice);CUDA_CHECK;
//d_imagesc("d_imgKernel_sigma",d_imgKernel_sigma,G_sigma.cols,G_sigma.rows,1,false,true);
cudaMalloc(&d_imgKernel_rho, (size_t) G_rho.cols * G_rho.rows * sizeof(float) );CUDA_CHECK;
cudaMemcpy(d_imgKernel_rho, imgKernel_rho, (size_t) G_rho.cols * G_rho.rows * sizeof(float), cudaMemcpyHostToDevice);CUDA_CHECK;
//d_imagesc("d_imgKernel_rho",d_imgKernel_rho,G_rho.cols,G_rho.rows,1,false,true);
dim3 block = dim3(32,8,1);
dim3 grid = dim3((w + block.x - 1 ) / block.x,(h + block.y - 1 ) / block.y, 1);
bool isRunning=true;
//presmooth input image with sigma
convolutionGPU<<<grid, block>>>(d_imgIn, d_imgKernel_sigma, d_imgS, w, h, nc, G_sigma.cols); CUDA_CHECK;
cudaDeviceSynchronize();CUDA_CHECK;
//d_imagesc("d_imgS",d_imgS, w, h, nc);
//cv::waitKey(0);
computeSpatialDerivatives<<<grid, block>>>(d_imgS,d_v1,d_v2, w, h, nc);
cudaDeviceSynchronize();CUDA_CHECK;
//d_imagesc("d_v1",d_v1, w, h, nc);
//d_imagesc("d_v2",d_v1, w, h, nc);
//cv::waitKey(0);
//a)
createStructureTensorLayered<<<grid, block>>>(d_v1,d_v2,d_struct, w, h, nc);
cudaDeviceSynchronize();CUDA_CHECK;
//d_imagesc("d_struct",d_struct, w, h, nc, true);
//d_imagesc("d_imgKernel_rho",d_imgKernel_rho,G_rho.cols,G_rho.rows,1,false,true);
//cv::waitKey(0);
//postsmooth structure tensor with rho
convolutionGPU<<<grid, block>>>(d_struct,d_imgKernel_rho, d_structSmooth, w, h, nc, G_rho.cols); CUDA_CHECK;
cudaDeviceSynchronize();CUDA_CHECK;
//d_imagesc("d_structSmooth",d_structSmooth, w, h, nc, true);
//cv::waitKey(0);
//b and c)
// creates diffusion tensor - this should be done only once on the input image and
// is then constant for all following iterations
float *d_diffusionTensor;
d_diffusionTensor = d_struct; //missusing unsmoothed structure tensor to hold diffusionTensor since not needed anymore
diffusionTensorFromStructureTensor<<<grid, block>>>(d_structSmooth, d_diffusionTensor, w, h, C, alpha);
cudaDeviceSynchronize();CUDA_CHECK;
d_imagesc("d_diffusionTensor",d_diffusionTensor, w, h, nc, true);
cv::waitKey(0);
for (; i < N && isRunning; ++i)
{
timergpu.start();
convolutionGPU<<<grid, block>>>(d_imgCur, d_imgKernel_sigma, d_imgS, w, h, nc, G_sigma.cols); CUDA_CHECK;
cudaDeviceSynchronize();CUDA_CHECK;
computeSpatialDerivatives<<<grid, block>>>(d_imgS,d_v1,d_v2, w, h, nc);
cudaDeviceSynchronize();CUDA_CHECK;
diffusivity<<<grid,block>>>(d_v1,d_v2,d_diffusionTensor,w,h,nc);
cudaDeviceSynchronize();CUDA_CHECK;
d_imagesc("d_v1",d_v1, w, h, nc);
d_imagesc("d_v2",d_v1, w, h, nc);
cv::waitKey(0);
divergence<<<grid,block>>>(d_v1,d_v2,d_divergence, w, h, nc);
cudaDeviceSynchronize();CUDA_CHECK;
d_imagesc("d_divergence",d_divergence, w, h, nc);
cv::waitKey(0);
update<<<grid,block>>>(tau,d_imgIn,d_divergence,w,h,nc);
cudaDeviceSynchronize();CUDA_CHECK;
timergpu.end();
tg[i] = timergpu.get();
d_imagesc("d_imgIn",d_imgIn, w, h, nc);
imagescReset();
cout<<"iteration: "<<i<<endl;
char key=cv::waitKey(delay);
int keyN=key;
//cout<<"-----------"<<key<<" "<<keyN<<endl;
if(keyN == 27 || key == 'q' || key == 'Q'){
cout<<"leaving iteration loop at i: "<<i<<" total iterations: "<<i<<endl;
isRunning=false;
}
}
cudaFree(d_imgIn);CUDA_CHECK;
cudaFree(d_imgS);CUDA_CHECK;
cudaFree(d_v1);CUDA_CHECK;
cudaFree(d_v2);CUDA_CHECK;
cudaFree(d_struct);CUDA_CHECK;
cudaFree(d_structSmooth);CUDA_CHECK;
cudaFree(d_divergence);CUDA_CHECK;
cudaFree(d_imgKernel_sigma);CUDA_CHECK;
cudaFree(d_imgKernel_rho);CUDA_CHECK;
float ms=GetAverage(tg, i)*1000;
cout << "avg time for one gpu iteration: "<<ms<<" ms"<<endl;
#ifdef CAMERA
// end of camera loop
}
#else
cout<<"---[pres any key to exit]---"<<endl;
// wait for key inputs
cv::waitKey(0);
#endif
cout<<"exiting"<<endl;
// free allocated arrays
delete[] imgIn;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
|
f9925f7d9b955513eaf137e7512f319c953c997d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
| f9925f7d9b955513eaf137e7512f319c953c997d.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
|
dfede005163ec350d14199d9fe1fb93ebcc3b07d.hip | // !!! This is a file automatically generated by hipify!!!
#include "ATen/Dispatch.h"
#include "ATen/ExpandUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/hip/HIPApplyUtils.cuh"
#include "ATen/AccumulateType.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <nvfunctional>
#include "ATen/native/Distributions.h"
#include <THH/THHGeneral.h>
#include <THH/THHTensorRandom.h>
#include <THH/THHGenerator.hpp>
#include <THH/THHApply.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
THCGenerator* THCRandom_getGenerator(THCState* state);
namespace {
// increment should be at least the number of hiprand() random numbers used in
// each thread.
std::pair<uint64_t, uint64_t> next_philox_seed(at::Generator* gen, uint64_t increment) {
auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState());
uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment);
return std::make_pair(gen_->state.initial_seed, offset);
}
template <typename scalar_t>
void poisson_cuda_kernel(
at::Tensor& ret,
const at::Tensor& lambda,
std::pair<uint64_t, uint64_t> seeds) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
lambda,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& lambda) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
ret_val = static_cast<scalar_t>(hiprand_poisson(&state, lambda));
});
}
template <typename scalar_t>
void gamma_cuda_kernel(
at::Tensor& ret,
const at::Tensor& alpha,
std::pair<uint64_t, uint64_t> seeds) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
alpha,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& alpha) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
BaseSampler<accscalar_t> standard_uniform([&state] __device__ () {
return hiprand_uniform(&state);
});
BaseSampler<accscalar_t> standard_normal([&state] __device__ () {
return hiprand_normal(&state);
});
auto sample = sample_gamma<scalar_t, accscalar_t>(alpha, standard_uniform, standard_normal);
auto min_value = std::numeric_limits<scalar_t>::lowest();
ret_val = (min_value > sample) ? min_value : sample;
});
}
template <typename scalar_t>
void gamma_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& self,
const at::Tensor& output) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
ret, self, output,
[] __device__ (scalar_t& ret_val, const scalar_t& self_val, const scalar_t &output_val) {
ret_val = standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val);
});
}
template<typename scalar_t, typename prob_t>
void bernoulli_tensor_cuda_kernel(
at::Tensor& ret, const at::Tensor& p,
std::pair<uint64_t, uint64_t> seeds) {
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4>(
ret, p,
[seeds] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4,
const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
float4 rand = hiprand_uniform4(&state);
switch (n) {
case 4: {
assert(0 <= p4 && p4 <= 1);
v4 = static_cast<scalar_t>(rand.w <= p4);
// fallthrough
}
case 3: {
assert(0 <= p3 && p3 <= 1);
v3 = static_cast<scalar_t>(rand.z <= p3);
// fallthrough
}
case 2: {
assert(0 <= p2 && p2 <= 1);
v2 = static_cast<scalar_t>(rand.y <= p2);
// fallthrough
}
case 1: {
assert(0 <= p1 && p1 <= 1);
v1 = static_cast<scalar_t>(rand.x <= p1);
}
}
}
);
}
template<typename scalar_t>
void bernoulli_scalar_cuda_kernel(
at::Tensor& ret, double p_,
std::pair<uint64_t, uint64_t> seeds) {
float p = static_cast<float>(p_);
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply1<scalar_t, 4>(
ret, [seeds, p] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4) {
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
float4 rand = hiprand_uniform4(&state);
switch (n) {
case 4: {
v4 = static_cast<scalar_t>(rand.w <= p);
// fallthrough
}
case 3: {
v3 = static_cast<scalar_t>(rand.z <= p);
// fallthrough
}
case 2: {
v2 = static_cast<scalar_t>(rand.y <= p);
// fallthrough
}
case 1: {
v1 = static_cast<scalar_t>(rand.x <= p);
}
}
}
);
}
} // namespace
namespace at { namespace native {
Tensor _s_poisson_cuda(const Tensor& lambda, Generator* gen) {
Tensor ret = at::empty(lambda.sizes(), lambda.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.type(), "poisson", [&] {
poisson_cuda_kernel<scalar_t>(ret, lambda, next_philox_seed(gen, 20));
});
return ret;
}
Tensor _s_gamma_cuda(const Tensor& alpha, Generator* gen) {
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.type(), "gamma", [&] {
gamma_cuda_kernel<scalar_t>(ret, alpha, next_philox_seed(gen, 10));
});
return ret;
}
Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) {
Tensor ret = at::empty(self.sizes(), self.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "_standard_gamma_grad", [&] {
gamma_grad_cuda_kernel<scalar_t>(ret, self, output);
});
return ret;
}
Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen) {
auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA)));
AT_DISPATCH_ALL_TYPES_AND_HALF(self.type(), "bernoulli_tensor_cuda_self_", [&] {
const at::Type& p_type = p.type();
using self_t = scalar_t;
auto seeds = next_philox_seed(gen, 10);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(p.type(), "bernoulli_tensor_cuda_p_", [&] {
using p_t = scalar_t;
return bernoulli_tensor_cuda_kernel<self_t, p_t>(self, p, seeds);
});
});
return self;
}
Tensor& bernoulli_scalar_cuda_(Tensor &self, double p, Generator* gen) {
AT_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
AT_DISPATCH_ALL_TYPES_AND_HALF(self.type(), "bernoulli_scalar_cuda_", [&] {
auto seeds = next_philox_seed(gen, 10);
bernoulli_scalar_cuda_kernel<scalar_t>(self, p, seeds);
});
return self;
}
}} // namespace at::native
| dfede005163ec350d14199d9fe1fb93ebcc3b07d.cu | #include "ATen/Dispatch.h"
#include "ATen/ExpandUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/cuda/CUDAApplyUtils.cuh"
#include "ATen/AccumulateType.h"
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <nvfunctional>
#include "ATen/native/Distributions.h"
#include <THC/THCGeneral.h>
#include <THC/THCTensorRandom.h>
#include <THC/THCGenerator.hpp>
#include <THC/THCApply.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
THCGenerator* THCRandom_getGenerator(THCState* state);
namespace {
// increment should be at least the number of curand() random numbers used in
// each thread.
std::pair<uint64_t, uint64_t> next_philox_seed(at::Generator* gen, uint64_t increment) {
auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState());
uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment);
return std::make_pair(gen_->state.initial_seed, offset);
}
template <typename scalar_t>
void poisson_cuda_kernel(
at::Tensor& ret,
const at::Tensor& lambda,
std::pair<uint64_t, uint64_t> seeds) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
lambda,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& lambda) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
ret_val = static_cast<scalar_t>(curand_poisson(&state, lambda));
});
}
template <typename scalar_t>
void gamma_cuda_kernel(
at::Tensor& ret,
const at::Tensor& alpha,
std::pair<uint64_t, uint64_t> seeds) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
ret,
alpha,
[seeds] __device__(
scalar_t & ret_val, const scalar_t& alpha) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
BaseSampler<accscalar_t> standard_uniform([&state] __device__ () {
return curand_uniform(&state);
});
BaseSampler<accscalar_t> standard_normal([&state] __device__ () {
return curand_normal(&state);
});
auto sample = sample_gamma<scalar_t, accscalar_t>(alpha, standard_uniform, standard_normal);
auto min_value = std::numeric_limits<scalar_t>::lowest();
ret_val = (min_value > sample) ? min_value : sample;
});
}
template <typename scalar_t>
void gamma_grad_cuda_kernel(
at::Tensor& ret,
const at::Tensor& self,
const at::Tensor& output) {
using accscalar_t = at::acc_type<scalar_t, true>;
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(
ret, self, output,
[] __device__ (scalar_t& ret_val, const scalar_t& self_val, const scalar_t &output_val) {
ret_val = standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val);
});
}
template<typename scalar_t, typename prob_t>
void bernoulli_tensor_cuda_kernel(
at::Tensor& ret, const at::Tensor& p,
std::pair<uint64_t, uint64_t> seeds) {
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4>(
ret, p,
[seeds] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4,
const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
float4 rand = curand_uniform4(&state);
switch (n) {
case 4: {
assert(0 <= p4 && p4 <= 1);
v4 = static_cast<scalar_t>(rand.w <= p4);
// fallthrough
}
case 3: {
assert(0 <= p3 && p3 <= 1);
v3 = static_cast<scalar_t>(rand.z <= p3);
// fallthrough
}
case 2: {
assert(0 <= p2 && p2 <= 1);
v2 = static_cast<scalar_t>(rand.y <= p2);
// fallthrough
}
case 1: {
assert(0 <= p1 && p1 <= 1);
v1 = static_cast<scalar_t>(rand.x <= p1);
}
}
}
);
}
template<typename scalar_t>
void bernoulli_scalar_cuda_kernel(
at::Tensor& ret, double p_,
std::pair<uint64_t, uint64_t> seeds) {
float p = static_cast<float>(p_);
// The template argument `4` below indicates that we want to operate on four
// element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details.
at::cuda::CUDA_tensor_apply1<scalar_t, 4>(
ret, [seeds, p] __device__(
int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4) {
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
blockIdx.x * blockDim.x + threadIdx.x,
seeds.second,
&state);
float4 rand = curand_uniform4(&state);
switch (n) {
case 4: {
v4 = static_cast<scalar_t>(rand.w <= p);
// fallthrough
}
case 3: {
v3 = static_cast<scalar_t>(rand.z <= p);
// fallthrough
}
case 2: {
v2 = static_cast<scalar_t>(rand.y <= p);
// fallthrough
}
case 1: {
v1 = static_cast<scalar_t>(rand.x <= p);
}
}
}
);
}
} // namespace
namespace at { namespace native {
Tensor _s_poisson_cuda(const Tensor& lambda, Generator* gen) {
Tensor ret = at::empty(lambda.sizes(), lambda.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.type(), "poisson", [&] {
poisson_cuda_kernel<scalar_t>(ret, lambda, next_philox_seed(gen, 20));
});
return ret;
}
Tensor _s_gamma_cuda(const Tensor& alpha, Generator* gen) {
Tensor ret = at::empty(alpha.sizes(), alpha.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.type(), "gamma", [&] {
gamma_cuda_kernel<scalar_t>(ret, alpha, next_philox_seed(gen, 10));
});
return ret;
}
Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) {
Tensor ret = at::empty(self.sizes(), self.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "_standard_gamma_grad", [&] {
gamma_grad_cuda_kernel<scalar_t>(ret, self, output);
});
return ret;
}
Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen) {
auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA)));
AT_DISPATCH_ALL_TYPES_AND_HALF(self.type(), "bernoulli_tensor_cuda_self_", [&] {
const at::Type& p_type = p.type();
using self_t = scalar_t;
auto seeds = next_philox_seed(gen, 10);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(p.type(), "bernoulli_tensor_cuda_p_", [&] {
using p_t = scalar_t;
return bernoulli_tensor_cuda_kernel<self_t, p_t>(self, p, seeds);
});
});
return self;
}
Tensor& bernoulli_scalar_cuda_(Tensor &self, double p, Generator* gen) {
AT_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
AT_DISPATCH_ALL_TYPES_AND_HALF(self.type(), "bernoulli_scalar_cuda_", [&] {
auto seeds = next_philox_seed(gen, 10);
bernoulli_scalar_cuda_kernel<scalar_t>(self, p, seeds);
});
return self;
}
}} // namespace at::native
|
52339fae280bc4e3d878fdc8a733ae1be30faffd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_2_front;
int xdim0_update_halo_kernel2_xvel_plus_2_front_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_2_front;
int ydim0_update_halo_kernel2_xvel_plus_2_front_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_2_front;
int xdim1_update_halo_kernel2_xvel_plus_2_front_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_2_front;
int ydim1_update_halo_kernel2_xvel_plus_2_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_2_front * (y) + \
xdim0_update_halo_kernel2_xvel_plus_2_front * \
ydim0_update_halo_kernel2_xvel_plus_2_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_2_front * (y) + \
xdim1_update_halo_kernel2_xvel_plus_2_front * \
ydim1_update_halo_kernel2_xvel_plus_2_front * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_2_front_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 0, -2)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, -2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_2_front(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front *
ydim0_update_halo_kernel2_xvel_plus_2_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front *
ydim1_update_halo_kernel2_xvel_plus_2_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_2_front_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 79))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(79, "update_halo_kernel2_xvel_plus_2_front");
OPS_kernels[79].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_front_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_front_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_front_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_front_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_2_front_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_2_front_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_2_front_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_2_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[79].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_2_front), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[79].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[79].mpi_time += t2 - t1;
OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 52339fae280bc4e3d878fdc8a733ae1be30faffd.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_2_front;
int xdim0_update_halo_kernel2_xvel_plus_2_front_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_2_front;
int ydim0_update_halo_kernel2_xvel_plus_2_front_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_2_front;
int xdim1_update_halo_kernel2_xvel_plus_2_front_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_2_front;
int ydim1_update_halo_kernel2_xvel_plus_2_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_2_front * (y) + \
xdim0_update_halo_kernel2_xvel_plus_2_front * \
ydim0_update_halo_kernel2_xvel_plus_2_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_2_front * (y) + \
xdim1_update_halo_kernel2_xvel_plus_2_front * \
ydim1_update_halo_kernel2_xvel_plus_2_front * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_2_front_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 0, -2)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, -2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_2_front(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front *
ydim0_update_halo_kernel2_xvel_plus_2_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front *
ydim1_update_halo_kernel2_xvel_plus_2_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_2_front_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 79))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(79, "update_halo_kernel2_xvel_plus_2_front");
OPS_kernels[79].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_front_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_front_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_front_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_front_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_2_front_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_2_front_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_2_front_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_2_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[79].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_xvel_plus_2_front<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[79].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[79].mpi_time += t2 - t1;
OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
dedef7803a05453fcde97d3a2da39742cd9b0368.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include <cmath>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename T>
__device__ __forceinline__ bool is_aligned(const T* p){
return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
}
template<typename T>
__device__ __forceinline__ void load_store(
T* dst,
const T* src,
int dst_offset = 0,
int src_offset = 0){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((const LT*)src)[src_offset];
}
// (1-t)*x + t*y
__device__ __forceinline__ float lerp(float t, float x, float y) {
// See https://developer.nvidia.com/blog/lerp-faster-cuda/
return fma(t, y, fma(-t, x, x));
}
typedef enum{
ADAM_MODE_0 =0, // L2 regularization mode
ADAM_MODE_1 =1 // Decoupled weight decay mode(AdamW)
} adamMode_t;
/* Multi-tensor Adam
*
* Updates params in-place and outputs a copy with a desired datatype.
*/
template <typename T, typename GRAD_T, typename PARAM_OUT_T>
struct DistAdamFunctor
{
// Vectorized local compute
__device__ __forceinline__ static void local_step(
T p[ILP],
T m[ILP],
T v[ILP],
const GRAD_T g[ILP],
const float grad_scale,
const float beta1,
const float beta2,
const float beta1_correction,
const float beta2_correction,
const float eps,
const float lr,
adamMode_t mode,
const float weight_decay) {
if (mode == ADAM_MODE_0) { // L2
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
float scaled_grad = (g[ii] * grad_scale) + (weight_decay * p[ii]);
float next_m = lerp(beta1, scaled_grad, m[ii]);
float next_v = lerp(beta2, scaled_grad*scaled_grad, v[ii]);
float next_m_unbiased = next_m / beta1_correction;
float next_v_unbiased = next_v / beta2_correction;
float denom = sqrtf(next_v_unbiased) + eps;
float update = next_m_unbiased / denom;
m[ii] = next_m;
v[ii] = next_v;
p[ii] -= lr * update;
}
} else { // weight decay
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
float scaled_grad = g[ii] * grad_scale;
float next_m = lerp(beta1, scaled_grad, m[ii]);
float next_v = lerp(beta2, scaled_grad*scaled_grad, v[ii]);
float next_m_unbiased = next_m / beta1_correction;
float next_v_unbiased = next_v / beta2_correction;
float denom = sqrtf(next_v_unbiased) + eps;
float update = (next_m_unbiased / denom) + (weight_decay * p[ii]);
m[ii] = next_m;
v[ii] = next_v;
p[ii] -= lr * update;
}
}
}
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<5>& tl,
const float* grad_scale_ptr,
const float beta1,
const float beta2,
const float beta1_correction,
const float beta2_correction,
const float eps,
const float lr,
adamMode_t mode,
const float weight_decay) const
{
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
const float grad_scale = *grad_scale_ptr;
T* p_in = (T *)tl.addresses[0][tensor_loc];
p_in += chunk_idx*chunk_size;
T* m = (T *)tl.addresses[1][tensor_loc];
m += chunk_idx*chunk_size;
T* v = (T *)tl.addresses[2][tensor_loc];
v += chunk_idx*chunk_size;
const GRAD_T* g = (GRAD_T *)tl.addresses[3][tensor_loc];
g += chunk_idx*chunk_size;
PARAM_OUT_T* p_out = (PARAM_OUT_T *)tl.addresses[4][tensor_loc];
p_out += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
n = chunk_size < n ? chunk_size : n;
const bool aligned = (n % ILP == 0 &&
is_aligned(p_in) &&
is_aligned(m) &&
is_aligned(v) &&
is_aligned(g) &&
is_aligned(p_out));
for (int i_start = threadIdx.x*ILP; i_start < n; i_start += blockDim.x*ILP) {
T local_p[ILP];
T local_m[ILP];
T local_v[ILP];
GRAD_T local_g[ILP];
PARAM_OUT_T local_p_out[ILP];
// Load
if (aligned) {
load_store(local_p, p_in + i_start);
load_store(local_m, m + i_start);
load_store(local_v, v + i_start);
load_store(local_g, g + i_start);
} else {
#pragma unroll
for (int ii = 0, i = i_start; ii < ILP; ii++, i++) {
if (i < n) {
local_p[ii] = p_in[i];
local_m[ii] = m[i];
local_v[ii] = v[i];
local_g[ii] = g[i];
} else {
local_p[ii] = 0;
local_m[ii] = 0;
local_v[ii] = 0;
local_g[ii] = 0;
}
}
}
// Local compute
local_step(
local_p, local_m, local_v, local_g, grad_scale,
beta1, beta2, beta1_correction, beta2_correction,
eps, lr, mode, weight_decay);
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
local_p_out[ii] = static_cast<PARAM_OUT_T>(local_p[ii]);
}
// Store
if (aligned) {
load_store(p_in + i_start, local_p);
load_store(m + i_start, local_m);
load_store(v + i_start, local_v);
load_store(p_out + i_start, local_p_out);
} else {
#pragma unroll
for (int ii = 0, i = i_start; ii < ILP; ii++, i++) {
if (i < n) {
p_in[i] = local_p[ii];
m[i] = local_m[ii];
v[i] = local_v[ii];
p_out[i] = local_p_out[ii];
}
}
}
}
}
};
/* Functor for multi-tensor Adam with implicit main params
*
* If params are BF16 and optimizer state is FP32, it is not necessary
* to store FP32 main params. Instead, store 16-bit param remainder
* and combine with BF16 param to reconstruct the FP32 main param.
*/
struct DistAdamWithParamRemaindersFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<6>& tl,
const float* grad_scale_ptr,
const float beta1,
const float beta2,
const float beta1_correction,
const float beta2_correction,
const float eps,
const float lr,
adamMode_t mode,
const float weight_decay) const
{
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
const float grad_scale = *grad_scale_ptr;
int16_t* p_in = (int16_t *)tl.addresses[0][tensor_loc];
p_in += chunk_idx*chunk_size;
int16_t* p_rem = (int16_t *)tl.addresses[1][tensor_loc];
p_rem += chunk_idx*chunk_size;
float* m = (float *)tl.addresses[2][tensor_loc];
m += chunk_idx*chunk_size;
float* v = (float *)tl.addresses[3][tensor_loc];
v += chunk_idx*chunk_size;
float* g = (float *)tl.addresses[4][tensor_loc];
g += chunk_idx*chunk_size;
int16_t* p_out = (int16_t *)tl.addresses[5][tensor_loc];
p_out += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
n = chunk_size < n ? chunk_size : n;
const bool aligned = (n % ILP == 0 &&
is_aligned(p_in) &&
is_aligned(p_rem) &&
is_aligned(m) &&
is_aligned(v) &&
is_aligned(g) &&
is_aligned(p_out));
for (int i_start = threadIdx.x*ILP; i_start < n; i_start += blockDim.x*ILP) {
union fp32_or_int162 {
float fp32;
int16_t int16[2];
};
fp32_or_int162 local_p[ILP];
int16_t local_p_bf16[ILP];
int16_t local_p_rem[ILP];
float local_m[ILP];
float local_v[ILP];
float local_g[ILP];
// Load
if (aligned) {
load_store(local_p_bf16, p_in + i_start);
load_store(local_p_rem, p_rem + i_start);
load_store(local_m, m + i_start);
load_store(local_v, v + i_start);
load_store(local_g, g + i_start);
} else {
#pragma unroll
for (int ii = 0, i = i_start; ii < ILP; ii++, i++) {
if (i < n) {
local_p_bf16[ii] = p_in[i];
local_p_rem[ii] = p_rem[i];
local_m[ii] = m[i];
local_v[ii] = v[i];
local_g[ii] = g[i];
} else {
local_p_bf16[ii] = 0;
local_p_rem[ii] = 0;
local_m[ii] = 0;
local_v[ii] = 0;
local_g[ii] = 0;
}
}
}
// Reconstruct FP32 params
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
if (local_p_rem[ii] < 0)
local_p_bf16[ii]--; // Undo rounding
local_p[ii].int16[1] = local_p_bf16[ii];
local_p[ii].int16[0] = local_p_rem[ii];
}
// Local compute
using LocalFunctor = DistAdamFunctor<float, float, void>;
LocalFunctor::local_step(
reinterpret_cast<float *>(local_p), local_m, local_v, local_g, grad_scale,
beta1, beta2, beta1_correction, beta2_correction,
eps, lr, mode, weight_decay);
// Split into BF16 params (rounded-to-nearest) and remainders
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
local_p_bf16[ii] = local_p[ii].int16[1];
local_p_rem[ii] = local_p[ii].int16[0];
if (local_p_rem[ii] < 0)
local_p_bf16[ii]++; // Round up
}
// Store
if (aligned) {
load_store(p_rem + i_start, local_p_rem);
load_store(m + i_start, local_m);
load_store(v + i_start, local_v);
load_store(p_out + i_start, local_p_bf16);
} else {
#pragma unroll
for (int ii = 0, i = i_start; ii < ILP; ii++, i++) {
if (i < n) {
p_rem[i] = local_p_rem[ii];
m[i] = local_m[ii];
v[i] = local_v[ii];
p_out[i] = local_p_bf16[ii];
}
}
}
}
}
};
void multi_tensor_fused_adam_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists, // p_in, m, v, g, p_out
at::Tensor grad_scale,
float lr,
float beta1,
float beta2,
float eps,
int step,
int mode,
int bias_correction,
float weight_decay)
{
using namespace at;
// Expect p_in, m, v, g, p_out
size_t tl_sz = tensor_lists.size();
TORCH_CHECK(tl_sz == 5, "expected tensor lists of size 5");
// Assume p_in and g have same type
auto p_in_type = tensor_lists[0][0].scalar_type();
auto g_type = tensor_lists[3][0].scalar_type();
auto p_out_type = tensor_lists[4][0].scalar_type();
TORCH_CHECK(p_in_type == g_type, "expected main params and grads to have same type");
float beta1_correction = 1.0f, beta2_correction = 1.0f;
if (bias_correction == 1) {
beta1_correction = 1 - ::pow(beta1, step);
beta2_correction = 1 - ::pow(beta2, step);
}
DISPATCH_FLOAT_HALF_AND_BFLOAT(p_in_type, 0, "dist_adam_cuda_kernel",
DISPATCH_FLOAT_HALF_AND_BFLOAT(p_out_type, 1, "dist_adam_cuda_kernel",
multi_tensor_apply<5>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
DistAdamFunctor<scalar_t_0, scalar_t_0, scalar_t_1>(),
grad_scale.DATA_PTR<float>(),
beta1,
beta2,
beta1_correction,
beta2_correction,
eps,
lr,
(adamMode_t) mode,
weight_decay);
));
C10_HIP_CHECK(hipGetLastError());
}
void multi_tensor_fused_adam_with_param_remainders_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists, // p_in, p_rem, m, v, g, p_out
at::Tensor grad_scale,
float lr,
float beta1,
float beta2,
float eps,
int step,
int mode,
int bias_correction,
float weight_decay)
{
using namespace at;
// Expect p_in, p_rem, m, v, g, p_out
size_t tl_sz = tensor_lists.size();
TORCH_CHECK(tl_sz == 6, "expected tensor lists of size 6");
float beta1_correction = 1.0f, beta2_correction = 1.0f;
if (bias_correction == 1) {
beta1_correction = 1 - ::pow(beta1, step);
beta2_correction = 1 - ::pow(beta2, step);
}
multi_tensor_apply<6>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
DistAdamWithParamRemaindersFunctor(),
grad_scale.DATA_PTR<float>(),
beta1,
beta2,
beta1_correction,
beta2_correction,
eps,
lr,
(adamMode_t) mode,
weight_decay);
C10_HIP_CHECK(hipGetLastError());
}
| dedef7803a05453fcde97d3a2da39742cd9b0368.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include <cmath>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename T>
__device__ __forceinline__ bool is_aligned(const T* p){
return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
}
template<typename T>
__device__ __forceinline__ void load_store(
T* dst,
const T* src,
int dst_offset = 0,
int src_offset = 0){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((const LT*)src)[src_offset];
}
// (1-t)*x + t*y
__device__ __forceinline__ float lerp(float t, float x, float y) {
// See https://developer.nvidia.com/blog/lerp-faster-cuda/
return fma(t, y, fma(-t, x, x));
}
typedef enum{
ADAM_MODE_0 =0, // L2 regularization mode
ADAM_MODE_1 =1 // Decoupled weight decay mode(AdamW)
} adamMode_t;
/* Multi-tensor Adam
*
* Updates params in-place and outputs a copy with a desired datatype.
*/
template <typename T, typename GRAD_T, typename PARAM_OUT_T>
struct DistAdamFunctor
{
// Vectorized local compute
__device__ __forceinline__ static void local_step(
T p[ILP],
T m[ILP],
T v[ILP],
const GRAD_T g[ILP],
const float grad_scale,
const float beta1,
const float beta2,
const float beta1_correction,
const float beta2_correction,
const float eps,
const float lr,
adamMode_t mode,
const float weight_decay) {
if (mode == ADAM_MODE_0) { // L2
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
float scaled_grad = (g[ii] * grad_scale) + (weight_decay * p[ii]);
float next_m = lerp(beta1, scaled_grad, m[ii]);
float next_v = lerp(beta2, scaled_grad*scaled_grad, v[ii]);
float next_m_unbiased = next_m / beta1_correction;
float next_v_unbiased = next_v / beta2_correction;
float denom = sqrtf(next_v_unbiased) + eps;
float update = next_m_unbiased / denom;
m[ii] = next_m;
v[ii] = next_v;
p[ii] -= lr * update;
}
} else { // weight decay
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
float scaled_grad = g[ii] * grad_scale;
float next_m = lerp(beta1, scaled_grad, m[ii]);
float next_v = lerp(beta2, scaled_grad*scaled_grad, v[ii]);
float next_m_unbiased = next_m / beta1_correction;
float next_v_unbiased = next_v / beta2_correction;
float denom = sqrtf(next_v_unbiased) + eps;
float update = (next_m_unbiased / denom) + (weight_decay * p[ii]);
m[ii] = next_m;
v[ii] = next_v;
p[ii] -= lr * update;
}
}
}
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<5>& tl,
const float* grad_scale_ptr,
const float beta1,
const float beta2,
const float beta1_correction,
const float beta2_correction,
const float eps,
const float lr,
adamMode_t mode,
const float weight_decay) const
{
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
const float grad_scale = *grad_scale_ptr;
T* p_in = (T *)tl.addresses[0][tensor_loc];
p_in += chunk_idx*chunk_size;
T* m = (T *)tl.addresses[1][tensor_loc];
m += chunk_idx*chunk_size;
T* v = (T *)tl.addresses[2][tensor_loc];
v += chunk_idx*chunk_size;
const GRAD_T* g = (GRAD_T *)tl.addresses[3][tensor_loc];
g += chunk_idx*chunk_size;
PARAM_OUT_T* p_out = (PARAM_OUT_T *)tl.addresses[4][tensor_loc];
p_out += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
n = chunk_size < n ? chunk_size : n;
const bool aligned = (n % ILP == 0 &&
is_aligned(p_in) &&
is_aligned(m) &&
is_aligned(v) &&
is_aligned(g) &&
is_aligned(p_out));
for (int i_start = threadIdx.x*ILP; i_start < n; i_start += blockDim.x*ILP) {
T local_p[ILP];
T local_m[ILP];
T local_v[ILP];
GRAD_T local_g[ILP];
PARAM_OUT_T local_p_out[ILP];
// Load
if (aligned) {
load_store(local_p, p_in + i_start);
load_store(local_m, m + i_start);
load_store(local_v, v + i_start);
load_store(local_g, g + i_start);
} else {
#pragma unroll
for (int ii = 0, i = i_start; ii < ILP; ii++, i++) {
if (i < n) {
local_p[ii] = p_in[i];
local_m[ii] = m[i];
local_v[ii] = v[i];
local_g[ii] = g[i];
} else {
local_p[ii] = 0;
local_m[ii] = 0;
local_v[ii] = 0;
local_g[ii] = 0;
}
}
}
// Local compute
local_step(
local_p, local_m, local_v, local_g, grad_scale,
beta1, beta2, beta1_correction, beta2_correction,
eps, lr, mode, weight_decay);
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
local_p_out[ii] = static_cast<PARAM_OUT_T>(local_p[ii]);
}
// Store
if (aligned) {
load_store(p_in + i_start, local_p);
load_store(m + i_start, local_m);
load_store(v + i_start, local_v);
load_store(p_out + i_start, local_p_out);
} else {
#pragma unroll
for (int ii = 0, i = i_start; ii < ILP; ii++, i++) {
if (i < n) {
p_in[i] = local_p[ii];
m[i] = local_m[ii];
v[i] = local_v[ii];
p_out[i] = local_p_out[ii];
}
}
}
}
}
};
/* Functor for multi-tensor Adam with implicit main params
*
* If params are BF16 and optimizer state is FP32, it is not necessary
* to store FP32 main params. Instead, store 16-bit param remainder
* and combine with BF16 param to reconstruct the FP32 main param.
*/
struct DistAdamWithParamRemaindersFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<6>& tl,
const float* grad_scale_ptr,
const float beta1,
const float beta2,
const float beta1_correction,
const float beta2_correction,
const float eps,
const float lr,
adamMode_t mode,
const float weight_decay) const
{
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
const float grad_scale = *grad_scale_ptr;
int16_t* p_in = (int16_t *)tl.addresses[0][tensor_loc];
p_in += chunk_idx*chunk_size;
int16_t* p_rem = (int16_t *)tl.addresses[1][tensor_loc];
p_rem += chunk_idx*chunk_size;
float* m = (float *)tl.addresses[2][tensor_loc];
m += chunk_idx*chunk_size;
float* v = (float *)tl.addresses[3][tensor_loc];
v += chunk_idx*chunk_size;
float* g = (float *)tl.addresses[4][tensor_loc];
g += chunk_idx*chunk_size;
int16_t* p_out = (int16_t *)tl.addresses[5][tensor_loc];
p_out += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
n = chunk_size < n ? chunk_size : n;
const bool aligned = (n % ILP == 0 &&
is_aligned(p_in) &&
is_aligned(p_rem) &&
is_aligned(m) &&
is_aligned(v) &&
is_aligned(g) &&
is_aligned(p_out));
for (int i_start = threadIdx.x*ILP; i_start < n; i_start += blockDim.x*ILP) {
union fp32_or_int162 {
float fp32;
int16_t int16[2];
};
fp32_or_int162 local_p[ILP];
int16_t local_p_bf16[ILP];
int16_t local_p_rem[ILP];
float local_m[ILP];
float local_v[ILP];
float local_g[ILP];
// Load
if (aligned) {
load_store(local_p_bf16, p_in + i_start);
load_store(local_p_rem, p_rem + i_start);
load_store(local_m, m + i_start);
load_store(local_v, v + i_start);
load_store(local_g, g + i_start);
} else {
#pragma unroll
for (int ii = 0, i = i_start; ii < ILP; ii++, i++) {
if (i < n) {
local_p_bf16[ii] = p_in[i];
local_p_rem[ii] = p_rem[i];
local_m[ii] = m[i];
local_v[ii] = v[i];
local_g[ii] = g[i];
} else {
local_p_bf16[ii] = 0;
local_p_rem[ii] = 0;
local_m[ii] = 0;
local_v[ii] = 0;
local_g[ii] = 0;
}
}
}
// Reconstruct FP32 params
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
if (local_p_rem[ii] < 0)
local_p_bf16[ii]--; // Undo rounding
local_p[ii].int16[1] = local_p_bf16[ii];
local_p[ii].int16[0] = local_p_rem[ii];
}
// Local compute
using LocalFunctor = DistAdamFunctor<float, float, void>;
LocalFunctor::local_step(
reinterpret_cast<float *>(local_p), local_m, local_v, local_g, grad_scale,
beta1, beta2, beta1_correction, beta2_correction,
eps, lr, mode, weight_decay);
// Split into BF16 params (rounded-to-nearest) and remainders
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
local_p_bf16[ii] = local_p[ii].int16[1];
local_p_rem[ii] = local_p[ii].int16[0];
if (local_p_rem[ii] < 0)
local_p_bf16[ii]++; // Round up
}
// Store
if (aligned) {
load_store(p_rem + i_start, local_p_rem);
load_store(m + i_start, local_m);
load_store(v + i_start, local_v);
load_store(p_out + i_start, local_p_bf16);
} else {
#pragma unroll
for (int ii = 0, i = i_start; ii < ILP; ii++, i++) {
if (i < n) {
p_rem[i] = local_p_rem[ii];
m[i] = local_m[ii];
v[i] = local_v[ii];
p_out[i] = local_p_bf16[ii];
}
}
}
}
}
};
void multi_tensor_fused_adam_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists, // p_in, m, v, g, p_out
at::Tensor grad_scale,
float lr,
float beta1,
float beta2,
float eps,
int step,
int mode,
int bias_correction,
float weight_decay)
{
using namespace at;
// Expect p_in, m, v, g, p_out
size_t tl_sz = tensor_lists.size();
TORCH_CHECK(tl_sz == 5, "expected tensor lists of size 5");
// Assume p_in and g have same type
auto p_in_type = tensor_lists[0][0].scalar_type();
auto g_type = tensor_lists[3][0].scalar_type();
auto p_out_type = tensor_lists[4][0].scalar_type();
TORCH_CHECK(p_in_type == g_type, "expected main params and grads to have same type");
float beta1_correction = 1.0f, beta2_correction = 1.0f;
if (bias_correction == 1) {
beta1_correction = 1 - std::pow(beta1, step);
beta2_correction = 1 - std::pow(beta2, step);
}
DISPATCH_FLOAT_HALF_AND_BFLOAT(p_in_type, 0, "dist_adam_cuda_kernel",
DISPATCH_FLOAT_HALF_AND_BFLOAT(p_out_type, 1, "dist_adam_cuda_kernel",
multi_tensor_apply<5>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
DistAdamFunctor<scalar_t_0, scalar_t_0, scalar_t_1>(),
grad_scale.DATA_PTR<float>(),
beta1,
beta2,
beta1_correction,
beta2_correction,
eps,
lr,
(adamMode_t) mode,
weight_decay);
));
C10_CUDA_CHECK(cudaGetLastError());
}
void multi_tensor_fused_adam_with_param_remainders_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists, // p_in, p_rem, m, v, g, p_out
at::Tensor grad_scale,
float lr,
float beta1,
float beta2,
float eps,
int step,
int mode,
int bias_correction,
float weight_decay)
{
using namespace at;
// Expect p_in, p_rem, m, v, g, p_out
size_t tl_sz = tensor_lists.size();
TORCH_CHECK(tl_sz == 6, "expected tensor lists of size 6");
float beta1_correction = 1.0f, beta2_correction = 1.0f;
if (bias_correction == 1) {
beta1_correction = 1 - std::pow(beta1, step);
beta2_correction = 1 - std::pow(beta2, step);
}
multi_tensor_apply<6>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
DistAdamWithParamRemaindersFunctor(),
grad_scale.DATA_PTR<float>(),
beta1,
beta2,
beta1_correction,
beta2_correction,
eps,
lr,
(adamMode_t) mode,
weight_decay);
C10_CUDA_CHECK(cudaGetLastError());
}
|
e901d2480d626f298533bd18b089efb7bf2daa80.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda_runtime.h>
#include<stdio.h>
#include<stdlib.h>
__global__ void add(int* a,int* b,int* c,int* n)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
if(id<*n)
c[id]=a[id]+b[id];
}
int main()
{
int a[100],b[100],c[100],n,*da,*db,*dc;
int *dn;
printf("Enter size: ");
scanf("%d",&n);
printf("Enter elements for A: ");
for(int i=0;i<n;i++)
scanf("%d",&a[i]);
printf("Enter elements for B: ");
for(int i=0;i<n;i++)
scanf("%d",&b[i]);
hipMalloc((void**)&da,n*sizeof(int));
hipMalloc((void**)&db,n*sizeof(int));
hipMalloc((void**)&dc,n*sizeof(int));
hipMalloc((void**)&dn,sizeof(int));
hipMemcpy(da,a,n*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(db,b,n*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dn,&n,sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(n),dim3(1), 0, 0, da,db,dc,dn);
hipMemcpy(c,dc,n*sizeof(int),hipMemcpyDeviceToHost);
printf("\nBlock size as N: ");
for(int i=0;i<n;i++)
printf("%d+%d=%d\n",a[i],b[i],c[i]);
hipLaunchKernelGGL(( add), dim3(1),dim3(n), 0, 0, da,db,dc,dn);
hipMemcpy(c,dc,n*sizeof(int),hipMemcpyDeviceToHost);
printf("\nN Threads: ");
for(int i=0;i<n;i++)
printf("%d+%d=%d\n",a[i],b[i],c[i]);
int tsize=256;
hipLaunchKernelGGL(( add), dim3((n+tsize-1)/tsize),dim3(tsize), 0, 0, da,db,dc,dn);
hipMemcpy(c,dc,n*sizeof(int),hipMemcpyDeviceToHost);
printf("\n256 threads: ");
for(int i=0;i<n;i++)
printf("%d+%d=%d\n",a[i],b[i],c[i]);
hipFree(da);
hipFree(db);
hipFree(dc);
hipFree(dn);
} | e901d2480d626f298533bd18b089efb7bf2daa80.cu | #include<cuda_runtime.h>
#include<stdio.h>
#include<stdlib.h>
__global__ void add(int* a,int* b,int* c,int* n)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
if(id<*n)
c[id]=a[id]+b[id];
}
int main()
{
int a[100],b[100],c[100],n,*da,*db,*dc;
int *dn;
printf("Enter size: ");
scanf("%d",&n);
printf("Enter elements for A: ");
for(int i=0;i<n;i++)
scanf("%d",&a[i]);
printf("Enter elements for B: ");
for(int i=0;i<n;i++)
scanf("%d",&b[i]);
cudaMalloc((void**)&da,n*sizeof(int));
cudaMalloc((void**)&db,n*sizeof(int));
cudaMalloc((void**)&dc,n*sizeof(int));
cudaMalloc((void**)&dn,sizeof(int));
cudaMemcpy(da,a,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(db,b,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dn,&n,sizeof(int),cudaMemcpyHostToDevice);
add<<<n,1>>>(da,db,dc,dn);
cudaMemcpy(c,dc,n*sizeof(int),cudaMemcpyDeviceToHost);
printf("\nBlock size as N: ");
for(int i=0;i<n;i++)
printf("%d+%d=%d\n",a[i],b[i],c[i]);
add<<<1,n>>>(da,db,dc,dn);
cudaMemcpy(c,dc,n*sizeof(int),cudaMemcpyDeviceToHost);
printf("\nN Threads: ");
for(int i=0;i<n;i++)
printf("%d+%d=%d\n",a[i],b[i],c[i]);
int tsize=256;
add<<<(n+tsize-1)/tsize,tsize>>>(da,db,dc,dn);
cudaMemcpy(c,dc,n*sizeof(int),cudaMemcpyDeviceToHost);
printf("\n256 threads: ");
for(int i=0;i<n;i++)
printf("%d+%d=%d\n",a[i],b[i],c[i]);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
cudaFree(dn);
} |
501389784f346b8f716352e5c1efa84227b7236b.hip | // !!! This is a file automatically generated by hipify!!!
#include <call_kernel.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <assert.h>
__global__ void Asum(int *a, int *b, int *c){
*c = *a + *b;
}
int main(void){
int a, b, c;
int *dev_a, *dev_b, *dev_c; //These are pointers to a memory slot ON DEVICE
int size = sizeof(int); //memory size in bytes
hipMalloc((void**)&dev_a,size); //hipMalloc() allocates a memory slot on device (GPU memory)
//this slot equals size bytes
//void** assures that pointers won't have trouble getting a variable that is not an int type
//dev_a now points to the allocated slot
hipMalloc((void**)&dev_b,size);
hipMalloc((void**)&dev_c,size); //conclusion: pointers are referencing a position that is avaliable from DEVICE
//a, b and c positions are not avaliable from device, a priori
a = 2;
b = 7;
c = 8;
hipMemcpy(dev_a,&a,size, hipMemcpyHostToDevice); //note that &a is used
hipMemcpy(dev_b,&b,size, hipMemcpyHostToDevice); //hipMemcpy(*destiny, *source, size, hipMemcpyKind)
// Asum<<<1,1>>>(dev_a,dev_b,dev_c);
ESBMC_verify_kernel(Asum, 1,2,dev_a,dev_b, dev_c);
hipMemcpy(&c,dev_c,size,hipMemcpyDeviceToHost);
printf("a + b = %d\n", c);
assert(c != a+b);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 501389784f346b8f716352e5c1efa84227b7236b.cu | #include <call_kernel.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <assert.h>
__global__ void Asum(int *a, int *b, int *c){
*c = *a + *b;
}
int main(void){
int a, b, c;
int *dev_a, *dev_b, *dev_c; //These are pointers to a memory slot ON DEVICE
int size = sizeof(int); //memory size in bytes
cudaMalloc((void**)&dev_a,size); //cudaMalloc() allocates a memory slot on device (GPU memory)
//this slot equals size bytes
//void** assures that pointers won't have trouble getting a variable that is not an int type
//dev_a now points to the allocated slot
cudaMalloc((void**)&dev_b,size);
cudaMalloc((void**)&dev_c,size); //conclusion: pointers are referencing a position that is avaliable from DEVICE
//a, b and c positions are not avaliable from device, a priori
a = 2;
b = 7;
c = 8;
cudaMemcpy(dev_a,&a,size, cudaMemcpyHostToDevice); //note that &a is used
cudaMemcpy(dev_b,&b,size, cudaMemcpyHostToDevice); //cudaMemcpy(*destiny, *source, size, cudaMemcpyKind)
// Asum<<<1,1>>>(dev_a,dev_b,dev_c);
ESBMC_verify_kernel(Asum, 1,2,dev_a,dev_b, dev_c);
cudaMemcpy(&c,dev_c,size,cudaMemcpyDeviceToHost);
printf("a + b = %d\n", c);
assert(c != a+b);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
aa8363ab40b554c291117f7be8dc2cfcf8943a1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "labeling.h"
#define BLOCK_SIZE 512
__global__
void labling_kernel(const char *cuStr, int *cuPos, const int strLen) {
__shared__ int local_pos[BLOCK_SIZE];
int pos_index = threadIdx.x + blockIdx.x*blockDim.x;
int index = threadIdx.x;
if (pos_index >= strLen) {
return;
}
// thrust::tabulate, mark_spaces
local_pos[index] = (cuStr[pos_index] != ' ') ? -1 : index;
__syncthreads();
// thrust::inclusive_scan, thrust::maximum<int>
for (int offset = 1; offset <= index; offset *= 2) {
if (local_pos[index] < local_pos[index-offset]) {
local_pos[index] = local_pos[index-offset];
}
__syncthreads();
}
// thrust::tabulate, sub_offset
cuPos[pos_index] = index - local_pos[index];
}
__global__
void patch_kernel(int *cuPos, const int strLen) {
int pos_index = threadIdx.x + blockIdx.x*blockDim.x;
int index = threadIdx.x;
if (pos_index >= strLen) {
return;
}
// cross blocks
if (blockIdx.x > 0 && cuPos[pos_index] == (index+1)) {
cuPos[pos_index] += cuPos[blockIdx.x*blockDim.x-1];
}
}
void labeling(const char *cuStr, int *cuPos, int strLen) {
int n_blocks = (strLen + BLOCK_SIZE-1) / BLOCK_SIZE;
hipLaunchKernelGGL(( labling_kernel), dim3(n_blocks), dim3(BLOCK_SIZE), 0, 0, cuStr, cuPos, strLen);
hipLaunchKernelGGL(( patch_kernel), dim3(n_blocks), dim3(BLOCK_SIZE), 0, 0, cuPos, strLen);
}
| aa8363ab40b554c291117f7be8dc2cfcf8943a1f.cu | #include "labeling.h"
#define BLOCK_SIZE 512
__global__
void labling_kernel(const char *cuStr, int *cuPos, const int strLen) {
__shared__ int local_pos[BLOCK_SIZE];
int pos_index = threadIdx.x + blockIdx.x*blockDim.x;
int index = threadIdx.x;
if (pos_index >= strLen) {
return;
}
// thrust::tabulate, mark_spaces
local_pos[index] = (cuStr[pos_index] != ' ') ? -1 : index;
__syncthreads();
// thrust::inclusive_scan, thrust::maximum<int>
for (int offset = 1; offset <= index; offset *= 2) {
if (local_pos[index] < local_pos[index-offset]) {
local_pos[index] = local_pos[index-offset];
}
__syncthreads();
}
// thrust::tabulate, sub_offset
cuPos[pos_index] = index - local_pos[index];
}
__global__
void patch_kernel(int *cuPos, const int strLen) {
int pos_index = threadIdx.x + blockIdx.x*blockDim.x;
int index = threadIdx.x;
if (pos_index >= strLen) {
return;
}
// cross blocks
if (blockIdx.x > 0 && cuPos[pos_index] == (index+1)) {
cuPos[pos_index] += cuPos[blockIdx.x*blockDim.x-1];
}
}
void labeling(const char *cuStr, int *cuPos, int strLen) {
int n_blocks = (strLen + BLOCK_SIZE-1) / BLOCK_SIZE;
labling_kernel<<<n_blocks, BLOCK_SIZE>>>(cuStr, cuPos, strLen);
patch_kernel<<<n_blocks, BLOCK_SIZE>>>(cuPos, strLen);
}
|
dd88ae5025a4bf6afd3f0e5034f8e222d6ad0e71.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel3_plus_2_back;
int xdim0_update_halo_kernel3_plus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel3_plus_2_back;
int ydim0_update_halo_kernel3_plus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel3_plus_2_back;
int xdim1_update_halo_kernel3_plus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel3_plus_2_back;
int ydim1_update_halo_kernel3_plus_2_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel3_plus_2_back*(y)+xdim0_update_halo_kernel3_plus_2_back*ydim0_update_halo_kernel3_plus_2_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel3_plus_2_back*(y)+xdim1_update_halo_kernel3_plus_2_back*ydim1_update_halo_kernel3_plus_2_back*(z))
//user function
__device__
inline void update_halo_kernel3_plus_2_back_gpu(double *vol_flux_x, double *mass_flux_x, const int* fields) {
if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0,0,0)] = vol_flux_x[OPS_ACC0(0,0,2)];
if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0,0,0)] = mass_flux_x[OPS_ACC1(0,0,2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel3_plus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel3_plus_2_back + idx_z * 1*1 * xdim0_update_halo_kernel3_plus_2_back * ydim0_update_halo_kernel3_plus_2_back;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel3_plus_2_back + idx_z * 1*1 * xdim1_update_halo_kernel3_plus_2_back * ydim1_update_halo_kernel3_plus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel3_plus_2_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel3_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel3_plus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,68)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(68,"update_halo_kernel3_plus_2_back");
OPS_kernels[68].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel3_plus_2_back_h || ydim0 != ydim0_update_halo_kernel3_plus_2_back_h || xdim1 != xdim1_update_halo_kernel3_plus_2_back_h || ydim1 != ydim1_update_halo_kernel3_plus_2_back_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel3_plus_2_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel3_plus_2_back_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel3_plus_2_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel3_plus_2_back_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel3_plus_2_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel3_plus_2_back_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel3_plus_2_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel3_plus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[68].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel3_plus_2_back), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[68].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[68].mpi_time += t2-t1;
OPS_kernels[68].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[68].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel3_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 68;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 68;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel3_plus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(68,"update_halo_kernel3_plus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
| dd88ae5025a4bf6afd3f0e5034f8e222d6ad0e71.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel3_plus_2_back;
int xdim0_update_halo_kernel3_plus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel3_plus_2_back;
int ydim0_update_halo_kernel3_plus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel3_plus_2_back;
int xdim1_update_halo_kernel3_plus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel3_plus_2_back;
int ydim1_update_halo_kernel3_plus_2_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel3_plus_2_back*(y)+xdim0_update_halo_kernel3_plus_2_back*ydim0_update_halo_kernel3_plus_2_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel3_plus_2_back*(y)+xdim1_update_halo_kernel3_plus_2_back*ydim1_update_halo_kernel3_plus_2_back*(z))
//user function
__device__
inline void update_halo_kernel3_plus_2_back_gpu(double *vol_flux_x, double *mass_flux_x, const int* fields) {
if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0,0,0)] = vol_flux_x[OPS_ACC0(0,0,2)];
if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0,0,0)] = mass_flux_x[OPS_ACC1(0,0,2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel3_plus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel3_plus_2_back + idx_z * 1*1 * xdim0_update_halo_kernel3_plus_2_back * ydim0_update_halo_kernel3_plus_2_back;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel3_plus_2_back + idx_z * 1*1 * xdim1_update_halo_kernel3_plus_2_back * ydim1_update_halo_kernel3_plus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel3_plus_2_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel3_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel3_plus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,68)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(68,"update_halo_kernel3_plus_2_back");
OPS_kernels[68].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel3_plus_2_back_h || ydim0 != ydim0_update_halo_kernel3_plus_2_back_h || xdim1 != xdim1_update_halo_kernel3_plus_2_back_h || ydim1 != ydim1_update_halo_kernel3_plus_2_back_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel3_plus_2_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel3_plus_2_back_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel3_plus_2_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel3_plus_2_back_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel3_plus_2_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel3_plus_2_back_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel3_plus_2_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel3_plus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[68].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel3_plus_2_back<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[68].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[68].mpi_time += t2-t1;
OPS_kernels[68].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[68].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel3_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 68;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 68;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel3_plus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(68,"update_halo_kernel3_plus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
|
3454e2f25cb2a750e24419fb8a300223951ef006.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <vector>
#include <cstdio>
#include <cstdlib>
__global__ void kernel(int* x, int* y, int n) {
// TODO: fill this in with the formula to calculate thread ID
// size_t tid = ...;
// TODO: fill in the guard condition
/*
if (...) {
// x[:] += y[:] (add each element of y to the corresponding element of x,
// and store the result in x)
...;
}
*/
}
int main(int argc, char** argv) {
size_t n = 1000;
std::vector<int> x(n, 1);
std::vector<int> y(n, 1);
int* d_x;
hipMalloc(&d_x, sizeof(int)*n);
int* d_y;
hipMalloc(&d_y, sizeof(int)*n);
hipMemcpy(d_x, x.data(), sizeof(int)*n, hipMemcpyHostToDevice);
hipMemcpy(d_y, y.data(), sizeof(int)*n, hipMemcpyHostToDevice);
hipDeviceSynchronize();
size_t block_size = 256;
// ceil(grid_size / block_size)
dim3 grid((n + block_size - 1) / block_size);
dim3 block(block_size);
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, d_x, d_y, n);
hipMemcpy(x.data(), d_x, sizeof(int)*n, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
bool all_twos = true;
for (size_t i = 0; i < x.size(); i++) {
if (x[i] != 2) {
all_twos = false;
break;
}
}
if (all_twos) {
printf("OK!\n");
} else {
printf("FAILED.\n");
}
hipFree(d_x);
hipFree(d_y);
return 0;
}
| 3454e2f25cb2a750e24419fb8a300223951ef006.cu | #include <cuda.h>
#include <vector>
#include <cstdio>
#include <cstdlib>
__global__ void kernel(int* x, int* y, int n) {
// TODO: fill this in with the formula to calculate thread ID
// size_t tid = ...;
// TODO: fill in the guard condition
/*
if (...) {
// x[:] += y[:] (add each element of y to the corresponding element of x,
// and store the result in x)
...;
}
*/
}
int main(int argc, char** argv) {
size_t n = 1000;
std::vector<int> x(n, 1);
std::vector<int> y(n, 1);
int* d_x;
cudaMalloc(&d_x, sizeof(int)*n);
int* d_y;
cudaMalloc(&d_y, sizeof(int)*n);
cudaMemcpy(d_x, x.data(), sizeof(int)*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y.data(), sizeof(int)*n, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
size_t block_size = 256;
// ceil(grid_size / block_size)
dim3 grid((n + block_size - 1) / block_size);
dim3 block(block_size);
kernel<<<grid, block>>>(d_x, d_y, n);
cudaMemcpy(x.data(), d_x, sizeof(int)*n, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
bool all_twos = true;
for (size_t i = 0; i < x.size(); i++) {
if (x[i] != 2) {
all_twos = false;
break;
}
}
if (all_twos) {
printf("OK!\n");
} else {
printf("FAILED.\n");
}
cudaFree(d_x);
cudaFree(d_y);
return 0;
}
|
0ce093cb8b0f291915d11750e230c716cdafeb70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
typedef struct {
real x, y, z;
real q;
real fx, fy, fz;
ATOM_PARAMETER_DATA
#ifndef PARAMETER_SIZE_IS_EVEN
real padding;
#endif
} AtomData;
/**
* Find the maximum of a value across all threads in a warp, and return that to
* every thread. This is only needed on Volta and later. On earlier architectures,
* we can just return the value that was passed in.
*/
__device__ int reduceMax(int val) {
#if __CUDA_ARCH__ >= 700
for (int mask = 16; mask > 0; mask /= 2)
val = max(val, __shfl_xor_sync(0xffffffff, val, mask));
#endif
return val;
}
extern "C" __global__ void computeInteractionGroups(
unsigned long long* __restrict__ forceBuffers, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, const int4* __restrict__ groupData,
const int* __restrict__ numGroupTiles, bool useNeighborList,
real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ
PARAMETER_ARGUMENTS) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; // global warpIndex
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); // index within the warp
const unsigned int tbx = threadIdx.x - tgx; // block warpIndex
mixed energy = 0;
INIT_DERIVATIVES
__shared__ AtomData localData[LOCAL_MEMORY_SIZE];
const unsigned int startTile = (useNeighborList ? warp*numGroupTiles[0]/totalWarps : FIRST_TILE+warp*(LAST_TILE-FIRST_TILE)/totalWarps);
const unsigned int endTile = (useNeighborList ? (warp+1)*numGroupTiles[0]/totalWarps : FIRST_TILE+(warp+1)*(LAST_TILE-FIRST_TILE)/totalWarps);
for (int tile = startTile; tile < endTile; tile++) {
const int4 atomData = groupData[TILE_SIZE*tile+tgx];
const int atom1 = atomData.x;
const int atom2 = atomData.y;
const int rangeStart = atomData.z&0xFFFF;
const int rangeEnd = (atomData.z>>16)&0xFFFF;
const int exclusions = atomData.w;
real4 posq1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
real3 force = make_real3(0);
real4 posq2 = posq[atom2];
localData[threadIdx.x].x = posq2.x;
localData[threadIdx.x].y = posq2.y;
localData[threadIdx.x].z = posq2.z;
localData[threadIdx.x].q = posq2.w;
LOAD_LOCAL_PARAMETERS
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
int tj = tgx;
int rangeStop = rangeStart + reduceMax(rangeEnd-rangeStart);
SYNC_WARPS;
for (int j = rangeStart; j < rangeStop; j++) {
if (j < rangeEnd) {
bool isExcluded = (((exclusions>>tj)&1) == 0);
int localIndex = tbx+tj;
posq2 = make_real4(localData[localIndex].x, localData[localIndex].y, localData[localIndex].z, localData[localIndex].q);
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (!isExcluded && r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
real dEdR = 0.0f;
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
localData[localIndex].fx += delta.x;
localData[localIndex].fy += delta.y;
localData[localIndex].fz += delta.z;
#ifdef USE_CUTOFF
}
#endif
tj = (tj == rangeEnd-1 ? rangeStart : tj+1);
}
SYNC_WARPS;
}
if (exclusions != 0) {
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
}
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
SYNC_WARPS;
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
SAVE_DERIVATIVES
}
/**
* If the neighbor list needs to be rebuilt, reset the number of tiles to 0. This is
* executed by a single thread.
*/
extern "C" __global__ void prepareToBuildNeighborList(int* __restrict__ rebuildNeighborList, int* __restrict__ numGroupTiles) {
if (rebuildNeighborList[0] == 1)
numGroupTiles[0] = 0;
}
/**
* Filter the list of tiles to include only ones that have interactions within the
* padded cutoff.
*/
extern "C" __global__ void buildNeighborList(int* __restrict__ rebuildNeighborList, int* __restrict__ numGroupTiles,
const real4* __restrict__ posq, const int4* __restrict__ groupData, int4* __restrict__ filteredGroupData,
real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ) {
// If the neighbor list doesn't need to be rebuilt on this step, return immediately.
if (rebuildNeighborList[0] == 0)
return;
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; // global warpIndex
const unsigned int local_warp = threadIdx.x/TILE_SIZE; // local warpIndex
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); // index within the warp
const unsigned int tbx = threadIdx.x - tgx; // block warpIndex
__shared__ real4 localPos[LOCAL_MEMORY_SIZE];
__shared__ volatile bool anyInteraction[WARPS_IN_BLOCK];
__shared__ volatile int tileIndex[WARPS_IN_BLOCK];
const unsigned int startTile = warp*NUM_TILES/totalWarps;
const unsigned int endTile = (warp+1)*NUM_TILES/totalWarps;
for (int tile = startTile; tile < endTile; tile++) {
const int4 atomData = groupData[TILE_SIZE*tile+tgx];
const int atom1 = atomData.x;
const int atom2 = atomData.y;
const int rangeStart = atomData.z&0xFFFF;
const int rangeEnd = (atomData.z>>16)&0xFFFF;
const int exclusions = atomData.w;
real4 posq1 = posq[atom1];
localPos[threadIdx.x] = posq[atom2];
if (tgx == 0)
anyInteraction[local_warp] = false;
int tj = tgx;
int rangeStop = rangeStart + reduceMax(rangeEnd-rangeStart);
SYNC_WARPS;
for (int j = rangeStart; j < rangeStop && !anyInteraction[local_warp]; j++) {
SYNC_WARPS;
if (j < rangeEnd && tj < rangeEnd) {
bool isExcluded = (((exclusions>>tj)&1) == 0);
int localIndex = tbx+tj;
real3 delta = make_real3(localPos[localIndex].x-posq1.x, localPos[localIndex].y-posq1.y, localPos[localIndex].z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
if (!isExcluded && r2 < PADDED_CUTOFF_SQUARED)
anyInteraction[local_warp] = true;
}
tj = (tj == rangeEnd-1 ? rangeStart : tj+1);
SYNC_WARPS;
}
if (anyInteraction[local_warp]) {
SYNC_WARPS;
if (tgx == 0)
tileIndex[local_warp] = atomicAdd(numGroupTiles, 1);
SYNC_WARPS;
filteredGroupData[TILE_SIZE*tileIndex[local_warp]+tgx] = atomData;
}
}
}
| 0ce093cb8b0f291915d11750e230c716cdafeb70.cu | typedef struct {
real x, y, z;
real q;
real fx, fy, fz;
ATOM_PARAMETER_DATA
#ifndef PARAMETER_SIZE_IS_EVEN
real padding;
#endif
} AtomData;
/**
* Find the maximum of a value across all threads in a warp, and return that to
* every thread. This is only needed on Volta and later. On earlier architectures,
* we can just return the value that was passed in.
*/
__device__ int reduceMax(int val) {
#if __CUDA_ARCH__ >= 700
for (int mask = 16; mask > 0; mask /= 2)
val = max(val, __shfl_xor_sync(0xffffffff, val, mask));
#endif
return val;
}
extern "C" __global__ void computeInteractionGroups(
unsigned long long* __restrict__ forceBuffers, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, const int4* __restrict__ groupData,
const int* __restrict__ numGroupTiles, bool useNeighborList,
real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ
PARAMETER_ARGUMENTS) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; // global warpIndex
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); // index within the warp
const unsigned int tbx = threadIdx.x - tgx; // block warpIndex
mixed energy = 0;
INIT_DERIVATIVES
__shared__ AtomData localData[LOCAL_MEMORY_SIZE];
const unsigned int startTile = (useNeighborList ? warp*numGroupTiles[0]/totalWarps : FIRST_TILE+warp*(LAST_TILE-FIRST_TILE)/totalWarps);
const unsigned int endTile = (useNeighborList ? (warp+1)*numGroupTiles[0]/totalWarps : FIRST_TILE+(warp+1)*(LAST_TILE-FIRST_TILE)/totalWarps);
for (int tile = startTile; tile < endTile; tile++) {
const int4 atomData = groupData[TILE_SIZE*tile+tgx];
const int atom1 = atomData.x;
const int atom2 = atomData.y;
const int rangeStart = atomData.z&0xFFFF;
const int rangeEnd = (atomData.z>>16)&0xFFFF;
const int exclusions = atomData.w;
real4 posq1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
real3 force = make_real3(0);
real4 posq2 = posq[atom2];
localData[threadIdx.x].x = posq2.x;
localData[threadIdx.x].y = posq2.y;
localData[threadIdx.x].z = posq2.z;
localData[threadIdx.x].q = posq2.w;
LOAD_LOCAL_PARAMETERS
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
int tj = tgx;
int rangeStop = rangeStart + reduceMax(rangeEnd-rangeStart);
SYNC_WARPS;
for (int j = rangeStart; j < rangeStop; j++) {
if (j < rangeEnd) {
bool isExcluded = (((exclusions>>tj)&1) == 0);
int localIndex = tbx+tj;
posq2 = make_real4(localData[localIndex].x, localData[localIndex].y, localData[localIndex].z, localData[localIndex].q);
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (!isExcluded && r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
real dEdR = 0.0f;
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
localData[localIndex].fx += delta.x;
localData[localIndex].fy += delta.y;
localData[localIndex].fz += delta.z;
#ifdef USE_CUTOFF
}
#endif
tj = (tj == rangeEnd-1 ? rangeStart : tj+1);
}
SYNC_WARPS;
}
if (exclusions != 0) {
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
}
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
SYNC_WARPS;
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
SAVE_DERIVATIVES
}
/**
* If the neighbor list needs to be rebuilt, reset the number of tiles to 0. This is
* executed by a single thread.
*/
extern "C" __global__ void prepareToBuildNeighborList(int* __restrict__ rebuildNeighborList, int* __restrict__ numGroupTiles) {
if (rebuildNeighborList[0] == 1)
numGroupTiles[0] = 0;
}
/**
* Filter the list of tiles to include only ones that have interactions within the
* padded cutoff.
*/
extern "C" __global__ void buildNeighborList(int* __restrict__ rebuildNeighborList, int* __restrict__ numGroupTiles,
const real4* __restrict__ posq, const int4* __restrict__ groupData, int4* __restrict__ filteredGroupData,
real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ) {
// If the neighbor list doesn't need to be rebuilt on this step, return immediately.
if (rebuildNeighborList[0] == 0)
return;
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; // global warpIndex
const unsigned int local_warp = threadIdx.x/TILE_SIZE; // local warpIndex
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); // index within the warp
const unsigned int tbx = threadIdx.x - tgx; // block warpIndex
__shared__ real4 localPos[LOCAL_MEMORY_SIZE];
__shared__ volatile bool anyInteraction[WARPS_IN_BLOCK];
__shared__ volatile int tileIndex[WARPS_IN_BLOCK];
const unsigned int startTile = warp*NUM_TILES/totalWarps;
const unsigned int endTile = (warp+1)*NUM_TILES/totalWarps;
for (int tile = startTile; tile < endTile; tile++) {
const int4 atomData = groupData[TILE_SIZE*tile+tgx];
const int atom1 = atomData.x;
const int atom2 = atomData.y;
const int rangeStart = atomData.z&0xFFFF;
const int rangeEnd = (atomData.z>>16)&0xFFFF;
const int exclusions = atomData.w;
real4 posq1 = posq[atom1];
localPos[threadIdx.x] = posq[atom2];
if (tgx == 0)
anyInteraction[local_warp] = false;
int tj = tgx;
int rangeStop = rangeStart + reduceMax(rangeEnd-rangeStart);
SYNC_WARPS;
for (int j = rangeStart; j < rangeStop && !anyInteraction[local_warp]; j++) {
SYNC_WARPS;
if (j < rangeEnd && tj < rangeEnd) {
bool isExcluded = (((exclusions>>tj)&1) == 0);
int localIndex = tbx+tj;
real3 delta = make_real3(localPos[localIndex].x-posq1.x, localPos[localIndex].y-posq1.y, localPos[localIndex].z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
if (!isExcluded && r2 < PADDED_CUTOFF_SQUARED)
anyInteraction[local_warp] = true;
}
tj = (tj == rangeEnd-1 ? rangeStart : tj+1);
SYNC_WARPS;
}
if (anyInteraction[local_warp]) {
SYNC_WARPS;
if (tgx == 0)
tileIndex[local_warp] = atomicAdd(numGroupTiles, 1);
SYNC_WARPS;
filteredGroupData[TILE_SIZE*tileIndex[local_warp]+tgx] = atomData;
}
}
}
|
ebe7c710ec78690c8ebc23a7eabdecef3b392327.hip | // !!! This is a file automatically generated by hipify!!!
#include "distconv/runtime_gpu.hpp"
#include "distconv/tensor/tensor.hpp"
#include "distconv/tensor/tensor_mpi_cuda.hpp"
#include "distconv/util/util_gpu.hpp"
#include "distconv/util/util_mpi.hpp"
#include "test_tensor.hpp"
#include <iostream>
#include <vector>
using namespace distconv;
using namespace distconv::tensor;
using namespace distconv::util;
template <>
inline LocaleMPI get_locale() {
LocaleMPI loc(MPI_COMM_WORLD);
return loc;
}
template <typename TensorType>
inline int test_concat(const Shape &dst_shape,
const Distribution &dst_dist,
const Shape &src1_shape,
const Shape &src2_shape,
const Distribution &src_dist) {
const int num_dims = dst_shape.num_dims();
using DataType = typename TensorType::data_type;
using LocaleType = typename TensorType::locale_type;
LocaleType loc = get_locale<LocaleType>();
TensorType dst = get_tensor<TensorType>(dst_shape, loc, dst_dist);
TensorType src1 = get_tensor<TensorType>(src1_shape, loc, src_dist);
TensorType src2 = get_tensor<TensorType>(src2_shape, loc, src_dist);
assert0(dst.allocate());
assert0(src1.allocate());
assert0(src2.allocate());
dst.zero();
src1.zero();
src2.zero();
int concat_dim = -1;
for (int i = 0; i < num_dims; ++i) {
if (dst_shape[i] == src1_shape[i] && dst_shape[i] == src2_shape[i]) {
continue;
} else {
assert_always(dst_shape[i] == src1_shape[i] + src2_shape[i]);
concat_dim = i;
}
}
assert_always(concat_dim >= 0);
util::MPIRootPrintStreamInfo()
<< "Concatenating tensors along dimension " << concat_dim;
// init src1
DataType src1_init_val = 1;
auto src1_size = src1.get_local_real_size();
std::vector<DataType> src1_init(src1_size, src1_init_val);
h2::gpu::mem_copy(src1.get_buffer(), src1_init.data(), src1_size);
// init src2
DataType src2_init_val = 2;
auto src2_size = src2.get_local_real_size();
std::vector<DataType> src2_init(src2_size, src2_init_val);
h2::gpu::mem_copy(src2.get_buffer(), src2_init.data(), src2_size);
Concatenate(dst, src1, src2, 0);
h2::gpu::sync();
MPI_Barrier(MPI_COMM_WORLD);
util::MPIRootPrintStreamInfo() << "Concatenation done";
using TensorProcType = Tensor< DataType, LocaleProcess,
BaseAllocator>;
auto proc_dist = Distribution::make_localized_distribution(num_dims);
TensorProcType dst_host(LocaleProcess(), proc_dist);
assert0(tensor::Copy(dst_host, dst, 0));
int num_errors = 0;
if (loc.get_rank() == 0) {
for (auto it = dst_host.get_shape().index_begin();
it != dst_host.get_shape().index_end(); ++it) {
auto idx = *it;
auto computed = dst_host.get(idx);
DataType ref = 0;
if (idx[concat_dim] < src1_shape[concat_dim]) {
ref = src1_init_val;
} else {
ref = src2_init_val;
}
if (computed != ref) {
util::MPIPrintStreamError() << "Error! Mismatch at " << *it
<< ". Computed: " << computed
<< ", ref: " << ref;
++num_errors;
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
Slice(src1, src2, dst, 0);
h2::gpu::sync();
MPI_Barrier(MPI_COMM_WORLD);
util::MPIRootPrintStreamInfo() << "Split done";
std::vector<TensorType*> src_tensors = {&src1, &src2};
for (int i = 0; i < src_tensors.size(); ++i) {
const auto &src = *src_tensors[i];
TensorProcType host(LocaleProcess(), proc_dist);
assert0(tensor::Copy(host, src, 0));
int num_errors = 0;
if (loc.get_rank() == 0) {
for (auto it = host.get_shape().index_begin();
it != host.get_shape().index_end(); ++it) {
auto idx = *it;
auto computed = host.get(idx);
DataType ref = i == 0 ? src1_init_val : src2_init_val;
if (computed != ref) {
util::MPIPrintStreamError() << "Error! Mismatch at " << *it
<< ". Computed: " << computed
<< ", ref: " << ref;
++num_errors;
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
return num_errors;
}
int main(int argc, char *argv[]) {
h2::gpu::set_gpu(util::choose_gpu());
MPI_Init(&argc, &argv);
int pid;
int np;
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &np);
MPIPrintStreamInfo() << "Using device " << h2::gpu::current_gpu();
using DataType = int;
using TensorType = Tensor<DataType, LocaleMPI, HIPAllocator>;
Shape dst_shape({32, 32, 32, 5, np});
Shape src1_shape({32, 32, 32, 2, np});
Shape src2_shape({32, 32, 32, 3, np});
auto overlapped_dist = Distribution::make_overlapped_distribution(
{1, 2, 2, 1, np / 4}, {0, 1, 1, 0, 0});
auto non_overlapped_dist =
Distribution::make_distribution({1, 2, 2, 1, np / 4});
assert_always((np % 4) == 0 && (np / 4 > 0));
// concat tensors with no halo to tensor with halo
assert0(test_concat<TensorType>(dst_shape,
overlapped_dist,
src1_shape,
src2_shape,
non_overlapped_dist));
// concat tensors with no halo to tensor with halo
assert0(test_concat<TensorType>(dst_shape,
non_overlapped_dist,
src1_shape,
src2_shape,
overlapped_dist));
MPI_Barrier(MPI_COMM_WORLD);
MPIRootPrintStreamInfo() << "Completed successfully.";
MPI_Finalize();
static_cast<void>(GPU_DEVICE_RESET());
return 0;
}
| ebe7c710ec78690c8ebc23a7eabdecef3b392327.cu | #include "distconv/runtime_gpu.hpp"
#include "distconv/tensor/tensor.hpp"
#include "distconv/tensor/tensor_mpi_cuda.hpp"
#include "distconv/util/util_gpu.hpp"
#include "distconv/util/util_mpi.hpp"
#include "test_tensor.hpp"
#include <iostream>
#include <vector>
using namespace distconv;
using namespace distconv::tensor;
using namespace distconv::util;
template <>
inline LocaleMPI get_locale() {
LocaleMPI loc(MPI_COMM_WORLD);
return loc;
}
template <typename TensorType>
inline int test_concat(const Shape &dst_shape,
const Distribution &dst_dist,
const Shape &src1_shape,
const Shape &src2_shape,
const Distribution &src_dist) {
const int num_dims = dst_shape.num_dims();
using DataType = typename TensorType::data_type;
using LocaleType = typename TensorType::locale_type;
LocaleType loc = get_locale<LocaleType>();
TensorType dst = get_tensor<TensorType>(dst_shape, loc, dst_dist);
TensorType src1 = get_tensor<TensorType>(src1_shape, loc, src_dist);
TensorType src2 = get_tensor<TensorType>(src2_shape, loc, src_dist);
assert0(dst.allocate());
assert0(src1.allocate());
assert0(src2.allocate());
dst.zero();
src1.zero();
src2.zero();
int concat_dim = -1;
for (int i = 0; i < num_dims; ++i) {
if (dst_shape[i] == src1_shape[i] && dst_shape[i] == src2_shape[i]) {
continue;
} else {
assert_always(dst_shape[i] == src1_shape[i] + src2_shape[i]);
concat_dim = i;
}
}
assert_always(concat_dim >= 0);
util::MPIRootPrintStreamInfo()
<< "Concatenating tensors along dimension " << concat_dim;
// init src1
DataType src1_init_val = 1;
auto src1_size = src1.get_local_real_size();
std::vector<DataType> src1_init(src1_size, src1_init_val);
h2::gpu::mem_copy(src1.get_buffer(), src1_init.data(), src1_size);
// init src2
DataType src2_init_val = 2;
auto src2_size = src2.get_local_real_size();
std::vector<DataType> src2_init(src2_size, src2_init_val);
h2::gpu::mem_copy(src2.get_buffer(), src2_init.data(), src2_size);
Concatenate(dst, src1, src2, 0);
h2::gpu::sync();
MPI_Barrier(MPI_COMM_WORLD);
util::MPIRootPrintStreamInfo() << "Concatenation done";
using TensorProcType = Tensor< DataType, LocaleProcess,
BaseAllocator>;
auto proc_dist = Distribution::make_localized_distribution(num_dims);
TensorProcType dst_host(LocaleProcess(), proc_dist);
assert0(tensor::Copy(dst_host, dst, 0));
int num_errors = 0;
if (loc.get_rank() == 0) {
for (auto it = dst_host.get_shape().index_begin();
it != dst_host.get_shape().index_end(); ++it) {
auto idx = *it;
auto computed = dst_host.get(idx);
DataType ref = 0;
if (idx[concat_dim] < src1_shape[concat_dim]) {
ref = src1_init_val;
} else {
ref = src2_init_val;
}
if (computed != ref) {
util::MPIPrintStreamError() << "Error! Mismatch at " << *it
<< ". Computed: " << computed
<< ", ref: " << ref;
++num_errors;
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
Slice(src1, src2, dst, 0);
h2::gpu::sync();
MPI_Barrier(MPI_COMM_WORLD);
util::MPIRootPrintStreamInfo() << "Split done";
std::vector<TensorType*> src_tensors = {&src1, &src2};
for (int i = 0; i < src_tensors.size(); ++i) {
const auto &src = *src_tensors[i];
TensorProcType host(LocaleProcess(), proc_dist);
assert0(tensor::Copy(host, src, 0));
int num_errors = 0;
if (loc.get_rank() == 0) {
for (auto it = host.get_shape().index_begin();
it != host.get_shape().index_end(); ++it) {
auto idx = *it;
auto computed = host.get(idx);
DataType ref = i == 0 ? src1_init_val : src2_init_val;
if (computed != ref) {
util::MPIPrintStreamError() << "Error! Mismatch at " << *it
<< ". Computed: " << computed
<< ", ref: " << ref;
++num_errors;
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
return num_errors;
}
int main(int argc, char *argv[]) {
h2::gpu::set_gpu(util::choose_gpu());
MPI_Init(&argc, &argv);
int pid;
int np;
MPI_Comm_rank(MPI_COMM_WORLD, &pid);
MPI_Comm_size(MPI_COMM_WORLD, &np);
MPIPrintStreamInfo() << "Using device " << h2::gpu::current_gpu();
using DataType = int;
using TensorType = Tensor<DataType, LocaleMPI, CUDAAllocator>;
Shape dst_shape({32, 32, 32, 5, np});
Shape src1_shape({32, 32, 32, 2, np});
Shape src2_shape({32, 32, 32, 3, np});
auto overlapped_dist = Distribution::make_overlapped_distribution(
{1, 2, 2, 1, np / 4}, {0, 1, 1, 0, 0});
auto non_overlapped_dist =
Distribution::make_distribution({1, 2, 2, 1, np / 4});
assert_always((np % 4) == 0 && (np / 4 > 0));
// concat tensors with no halo to tensor with halo
assert0(test_concat<TensorType>(dst_shape,
overlapped_dist,
src1_shape,
src2_shape,
non_overlapped_dist));
// concat tensors with no halo to tensor with halo
assert0(test_concat<TensorType>(dst_shape,
non_overlapped_dist,
src1_shape,
src2_shape,
overlapped_dist));
MPI_Barrier(MPI_COMM_WORLD);
MPIRootPrintStreamInfo() << "Completed successfully.";
MPI_Finalize();
static_cast<void>(GPU_DEVICE_RESET());
return 0;
}
|
6fb2d791e27be04626cd1152de231fd727516caf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define BLOCK_SIZE 1024
__device__ float f(float x){
//these are integrals chosen to be very difficult to solve;
//I was not able to solve them myself, but I verified the value
//against several sources online
//return sqrt(1 - (x * x * x * x));
return pow(x,x);
//these are very simple integrals, chosen to be "simple" and allow
//for testing of accuracy with a known value.
//return x;
//return x * x;
}
__global__ void integrate(unsigned long long *counters_d, float a, float b, int top, int trials){
//implement shared to allow for reduction tree approach
__shared__ unsigned long long successes_shared[BLOCK_SIZE + 1];
unsigned long long successes = 0;
//only need the block ID and the thread id. Each thread will work independantly of the others, and
//so thread id is only important in the context of the block for the reduction tree
int bidx = blockIdx.x;
int tidx = threadIdx.x;
//the following is based in part on an example I found in a presentation online from
//a tech conference on GPU methods, in addition to the api
hiprandState_t state;
/* Each thread gets same seed, a different sequence number, no offset. */
hiprand_init ( 0, blockIdx.x + threadIdx.x, 0, &state );
//implement cuda rng
float domain = b - a;
float x;
float y;
float fx;
for(int i = 0; i < trials; i++){
//get random number for x
x = a + (domain * hiprand_uniform(&state));
//get random number for y
y = (float)top * hiprand_uniform(&state);
//check if f(x) <= y
fx = f(x);
//if(f(x) <= Y), successes++
if(y < fx){
successes++;
}
}
//sync
__syncthreads();
//atomicAdd(&counters_d[bidx], successes);
//__syncthreads();
//the following is an attempt to use a reduction tree to parallelize the addition of all the
//counters in a block. I was unable to get it functional, I kept getting an error with
//and ran out of time to debug it.
//add successes to local memory
successes_shared[tidx] = successes;
//atomicAdd(counters_d[bidx], (float)successes);
//use a basic reduction implementation to add all the shared variables together
for(int stride = 1; stride < BLOCK_SIZE; stride <<= 1) {
__syncthreads();
if(threadIdx.x % stride == 0){
successes_shared[threadIdx.x] += successes_shared[threadIdx.x + stride];
//write block sum of successes to the global array
}
}
if(threadIdx.x == 0)
counters_d[bidx] = successes_shared[0];
}
| 6fb2d791e27be04626cd1152de231fd727516caf.cu | #include <stdio.h>
#define BLOCK_SIZE 1024
__device__ float f(float x){
//these are integrals chosen to be very difficult to solve;
//I was not able to solve them myself, but I verified the value
//against several sources online
//return sqrt(1 - (x * x * x * x));
return pow(x,x);
//these are very simple integrals, chosen to be "simple" and allow
//for testing of accuracy with a known value.
//return x;
//return x * x;
}
__global__ void integrate(unsigned long long *counters_d, float a, float b, int top, int trials){
//implement shared to allow for reduction tree approach
__shared__ unsigned long long successes_shared[BLOCK_SIZE + 1];
unsigned long long successes = 0;
//only need the block ID and the thread id. Each thread will work independantly of the others, and
//so thread id is only important in the context of the block for the reduction tree
int bidx = blockIdx.x;
int tidx = threadIdx.x;
//the following is based in part on an example I found in a presentation online from
//a tech conference on GPU methods, in addition to the api
curandState state;
/* Each thread gets same seed, a different sequence number, no offset. */
curand_init ( 0, blockIdx.x + threadIdx.x, 0, &state );
//implement cuda rng
float domain = b - a;
float x;
float y;
float fx;
for(int i = 0; i < trials; i++){
//get random number for x
x = a + (domain * curand_uniform(&state));
//get random number for y
y = (float)top * curand_uniform(&state);
//check if f(x) <= y
fx = f(x);
//if(f(x) <= Y), successes++
if(y < fx){
successes++;
}
}
//sync
__syncthreads();
//atomicAdd(&counters_d[bidx], successes);
//__syncthreads();
//the following is an attempt to use a reduction tree to parallelize the addition of all the
//counters in a block. I was unable to get it functional, I kept getting an error with
//and ran out of time to debug it.
//add successes to local memory
successes_shared[tidx] = successes;
//atomicAdd(counters_d[bidx], (float)successes);
//use a basic reduction implementation to add all the shared variables together
for(int stride = 1; stride < BLOCK_SIZE; stride <<= 1) {
__syncthreads();
if(threadIdx.x % stride == 0){
successes_shared[threadIdx.x] += successes_shared[threadIdx.x + stride];
//write block sum of successes to the global array
}
}
if(threadIdx.x == 0)
counters_d[bidx] = successes_shared[0];
}
|
9d62f28daed7fb07388211be7a5a22c2a1e1e09c.hip | // !!! This is a file automatically generated by hipify!!!
#include "CUDA_WRAPPER.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
| 9d62f28daed7fb07388211be7a5a22c2a1e1e09c.cu | #include "CUDA_WRAPPER.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
|
4958917309247387ca47e88ed5d612bc1443a8fb.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file
* z = a.*x+b where a,x, and b are gpu based arrays and .* is the matlab-style
* multiplication operation.
*/
#include "../core.h"
#include "../ops.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdint.h>
#include <float.h>
#include <algorithm>
#include "macros.h"
TYPEDEFS; ///< Typedef aliases for basic types. See generic/macros.h
#define ENDL "\n"
#define LOG(...) ndLogError(z,__VA_ARGS__)
#define REPORT(msg1,msg2) LOG("%s(%d): %s()"ENDL "\t%s"ENDL "\t%s"ENDL,__FILE__,__LINE__,__FUNCTION__,msg1,msg2)
#define TRY(e) do{if(!(e)) {REPORT("Expression evaluated as failure.",#e); goto Error; }}while(0)
#define CUTRY(e) do{hipError_t ecode=(e); if(ecode!=hipSuccess) {REPORT(#e,hipGetErrorString(ecode)); goto Error; }}while(0)
#define FAIL do{REPORT("Execution should not have reached here.","Failing."); goto Error; } while(0)
#ifndef restrict
#define restrict __restrict__
#endif
#define max_(a,b) (((a)<(b))?(b):(a))
#define min_(a,b) (((a)<(b))?(a):(b))
// std::numeric_limits<T>::min() is a host function; can't be used on device.
// template static const init is not allowed in cuda (for the device code),
// so this:
#define min_u8 0
#define min_u16 0
#define min_u32 0
#define min_u64 0
#define min_i8 CHAR_MIN
#define min_i16 SHRT_MIN
#define min_i32 INT_MIN
#define min_i64 LLONG_MIN
#define min_f32 (-FLT_MAX)
#define min_f64 (-DBL_MAX)
#define max_u8 UCHAR_MAX
#define max_u16 USHRT_MAX
#define max_u32 ULONG_MAX
#define max_u64 ULLONG_MAX
#define max_i8 CHAR_MAX
#define max_i16 SHRT_MAX
#define max_i32 LONG_MAX
#define max_i64 LLONG_MAX
#define max_f32 FLT_MAX
#define max_f64 DBL_MAX
template<typename T> inline __device__ T saturate(float v);
#define DECL(T) \
template<> inline T saturate<T>(float v) \
{ const float mn=min_(v,max_##T); \
return max_(min_##T,mn); \
}
DECL(u8); DECL(u16); DECL(u32); DECL(u64);
DECL(i8); DECL(i16); DECL(i32); DECL(i64);
DECL(f32); DECL(f64);
#undef DECL
typedef struct shape_t_
{ u8 ndim; ///< The number of dimensions
u32 nelem; ///< The total number of elements
size_t *restrict shape; ///< Buffer of length ndim, ordered [w,h,d,...]. Always agrees with stride. Maintained for convenience.
} shape_t;
template<typename T>
struct vol_t
{ size_t *restrict strides; ///< Buffer of length ndim+1, strides[i] is the number of bytes layed out between unit steps along dimension i
T *restrict data;
};
static shape_t make_shape(const nd_t a)
{ shape_t out =
{ (u8) ndndim(a),
(u32) ndnelem(a),
(size_t*)ndCudaShape(a)
};
return out;
}
template<typename T>
static vol_t<T> make_vol(const nd_t a)
{ vol_t<T> out =
{ (size_t*)ndCudaStrides(a),
(T*)nddata(a)
};
return out;
}
#if 0
#define __launch_bounds__ (...)
#endif
inline __device__ unsigned prod(dim3 a) {return a.x*a.y*a.z;}
inline __device__ unsigned stride(uint3 a, dim3 b) {return a.x+b.x*(a.y+b.y*a.z);}
inline __device__ unsigned sum(uint3 a) {return a.x+a.y+a.z;}
// i = s[0]*(r[0]+s[1]*(r[1]+s[2]*(r[2]+...)))
// r[0]=[i/sh[0]]%r[1]
// r[1]=[i/(sh[0]sh[1])]%sh[2]
// ...
//
// [ ] Accomidate possible overlap between src/dst arrays.
// [x] Do work elements per thread
// [ ] requires shape[0] aligned to WORK
// [ ] bad memory access pattern - want to do BX*WORK (Bx=32) loads, and then distribute over BY elements.
// - then we'd require shape[0] aligned to BX*WORK (or do bounds checking)
template<typename TDST,typename TSRC,unsigned BX,unsigned BY,unsigned WORK>
__global__ void __launch_bounds__(BX*BY,1)
fmad_kernel(vol_t<TDST> z, vol_t<TSRC> a, vol_t<TSRC> x, vol_t<TSRC> b, shape_t shape)
{ unsigned i = WORK*(sum(threadIdx)+stride(blockIdx,gridDim)*prod(blockDim));
#if 1
if(i<shape.nelem)
{ unsigned st=1;
TDST *zz=z.data;
TSRC *aa=a.data,
*xx=x.data,
*bb=b.data;
for(u8 dim=0;dim<shape.ndim;++dim)
{ unsigned r=(i/st)%shape.shape[dim];
st*=shape.shape[dim];
zz+=r*z.strides[dim]/z.strides[0];
aa+=r*a.strides[dim]/a.strides[0];
xx+=r*x.strides[dim]/x.strides[0];
bb+=r*b.strides[dim]/b.strides[0];
}
if(i<(((int)shape.nelem)-BX*WORK))
{
#pragma unroll
for(unsigned j=0;j<WORK;++j)
zz[j*BX]=aa[j*BX]*xx[j*BX]+bb[j*BX];
} else
{
for(unsigned j=0;j<(shape.nelem-i)/BX;++j)
zz[j*BX]=aa[j*BX]*xx[j*BX]+bb[j*BX];
}
}
#endif
}
// Treat this as a 1d problem, each thread does WORK elements.
// [ ] FIXME - use shape properly
extern "C" unsigned fmad_cuda(nd_t z,nd_t a,nd_t x,nd_t b,size_t ndim,size_t *shape)
{ unsigned n=ndnelem(z);
const unsigned BX=32,BY=32,WORK=8;
dim3 blocks((unsigned)ceil(n/(float)(WORK*BX*BY))),
threads(BX*BY);
/// @cond DEFINES
#define V(a,T) make_vol<T>(a)
#define S(a) make_shape(a)
#define CASE2(TDST,TSRC)hipLaunchKernelGGL(( fmad_kernel<TDST,TSRC,BX,BY,WORK>), dim3(blocks),dim3(threads),0,(hipStream_t)ndCudaStream(z), V(z,TDST),V(a,TSRC),V(x,TSRC),V(b,TSRC),S(z));break
#define CASE(TSRC) TYPECASE2(ndtype(z),TSRC); break
{TYPECASE(ndtype(x));}
#undef CASE
#undef CASE2
#undef V
#undef S
/// @endcond
CUTRY(hipGetLastError());
return 1;
Error:
return 0;
} | 4958917309247387ca47e88ed5d612bc1443a8fb.cu | /**
* \file
* z = a.*x+b where a,x, and b are gpu based arrays and .* is the matlab-style
* multiplication operation.
*/
#include "../core.h"
#include "../ops.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdint.h>
#include <float.h>
#include <algorithm>
#include "macros.h"
TYPEDEFS; ///< Typedef aliases for basic types. See generic/macros.h
#define ENDL "\n"
#define LOG(...) ndLogError(z,__VA_ARGS__)
#define REPORT(msg1,msg2) LOG("%s(%d): %s()"ENDL "\t%s"ENDL "\t%s"ENDL,__FILE__,__LINE__,__FUNCTION__,msg1,msg2)
#define TRY(e) do{if(!(e)) {REPORT("Expression evaluated as failure.",#e); goto Error; }}while(0)
#define CUTRY(e) do{cudaError_t ecode=(e); if(ecode!=cudaSuccess) {REPORT(#e,cudaGetErrorString(ecode)); goto Error; }}while(0)
#define FAIL do{REPORT("Execution should not have reached here.","Failing."); goto Error; } while(0)
#ifndef restrict
#define restrict __restrict__
#endif
#define max_(a,b) (((a)<(b))?(b):(a))
#define min_(a,b) (((a)<(b))?(a):(b))
// std::numeric_limits<T>::min() is a host function; can't be used on device.
// template static const init is not allowed in cuda (for the device code),
// so this:
#define min_u8 0
#define min_u16 0
#define min_u32 0
#define min_u64 0
#define min_i8 CHAR_MIN
#define min_i16 SHRT_MIN
#define min_i32 INT_MIN
#define min_i64 LLONG_MIN
#define min_f32 (-FLT_MAX)
#define min_f64 (-DBL_MAX)
#define max_u8 UCHAR_MAX
#define max_u16 USHRT_MAX
#define max_u32 ULONG_MAX
#define max_u64 ULLONG_MAX
#define max_i8 CHAR_MAX
#define max_i16 SHRT_MAX
#define max_i32 LONG_MAX
#define max_i64 LLONG_MAX
#define max_f32 FLT_MAX
#define max_f64 DBL_MAX
template<typename T> inline __device__ T saturate(float v);
#define DECL(T) \
template<> inline T saturate<T>(float v) \
{ const float mn=min_(v,max_##T); \
return max_(min_##T,mn); \
}
DECL(u8); DECL(u16); DECL(u32); DECL(u64);
DECL(i8); DECL(i16); DECL(i32); DECL(i64);
DECL(f32); DECL(f64);
#undef DECL
typedef struct shape_t_
{ u8 ndim; ///< The number of dimensions
u32 nelem; ///< The total number of elements
size_t *restrict shape; ///< Buffer of length ndim, ordered [w,h,d,...]. Always agrees with stride. Maintained for convenience.
} shape_t;
template<typename T>
struct vol_t
{ size_t *restrict strides; ///< Buffer of length ndim+1, strides[i] is the number of bytes layed out between unit steps along dimension i
T *restrict data;
};
static shape_t make_shape(const nd_t a)
{ shape_t out =
{ (u8) ndndim(a),
(u32) ndnelem(a),
(size_t*)ndCudaShape(a)
};
return out;
}
template<typename T>
static vol_t<T> make_vol(const nd_t a)
{ vol_t<T> out =
{ (size_t*)ndCudaStrides(a),
(T*)nddata(a)
};
return out;
}
#if 0
#define __launch_bounds__ (...)
#endif
inline __device__ unsigned prod(dim3 a) {return a.x*a.y*a.z;}
inline __device__ unsigned stride(uint3 a, dim3 b) {return a.x+b.x*(a.y+b.y*a.z);}
inline __device__ unsigned sum(uint3 a) {return a.x+a.y+a.z;}
// i = s[0]*(r[0]+s[1]*(r[1]+s[2]*(r[2]+...)))
// r[0]=[i/sh[0]]%r[1]
// r[1]=[i/(sh[0]sh[1])]%sh[2]
// ...
//
// [ ] Accomidate possible overlap between src/dst arrays.
// [x] Do work elements per thread
// [ ] requires shape[0] aligned to WORK
// [ ] bad memory access pattern - want to do BX*WORK (Bx=32) loads, and then distribute over BY elements.
// - then we'd require shape[0] aligned to BX*WORK (or do bounds checking)
template<typename TDST,typename TSRC,unsigned BX,unsigned BY,unsigned WORK>
__global__ void __launch_bounds__(BX*BY,1)
fmad_kernel(vol_t<TDST> z, vol_t<TSRC> a, vol_t<TSRC> x, vol_t<TSRC> b, shape_t shape)
{ unsigned i = WORK*(sum(threadIdx)+stride(blockIdx,gridDim)*prod(blockDim));
#if 1
if(i<shape.nelem)
{ unsigned st=1;
TDST *zz=z.data;
TSRC *aa=a.data,
*xx=x.data,
*bb=b.data;
for(u8 dim=0;dim<shape.ndim;++dim)
{ unsigned r=(i/st)%shape.shape[dim];
st*=shape.shape[dim];
zz+=r*z.strides[dim]/z.strides[0];
aa+=r*a.strides[dim]/a.strides[0];
xx+=r*x.strides[dim]/x.strides[0];
bb+=r*b.strides[dim]/b.strides[0];
}
if(i<(((int)shape.nelem)-BX*WORK))
{
#pragma unroll
for(unsigned j=0;j<WORK;++j)
zz[j*BX]=aa[j*BX]*xx[j*BX]+bb[j*BX];
} else
{
for(unsigned j=0;j<(shape.nelem-i)/BX;++j)
zz[j*BX]=aa[j*BX]*xx[j*BX]+bb[j*BX];
}
}
#endif
}
// Treat this as a 1d problem, each thread does WORK elements.
// [ ] FIXME - use shape properly
extern "C" unsigned fmad_cuda(nd_t z,nd_t a,nd_t x,nd_t b,size_t ndim,size_t *shape)
{ unsigned n=ndnelem(z);
const unsigned BX=32,BY=32,WORK=8;
dim3 blocks((unsigned)ceil(n/(float)(WORK*BX*BY))),
threads(BX*BY);
/// @cond DEFINES
#define V(a,T) make_vol<T>(a)
#define S(a) make_shape(a)
#define CASE2(TDST,TSRC) fmad_kernel<TDST,TSRC,BX,BY,WORK><<<blocks,threads,0,(cudaStream_t)ndCudaStream(z)>>>(V(z,TDST),V(a,TSRC),V(x,TSRC),V(b,TSRC),S(z));break
#define CASE(TSRC) TYPECASE2(ndtype(z),TSRC); break
{TYPECASE(ndtype(x));}
#undef CASE
#undef CASE2
#undef V
#undef S
/// @endcond
CUTRY(cudaGetLastError());
return 1;
Error:
return 0;
} |
0d140591c8c2b8789b9adec2f3088bfb0f781a15.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Accelerating large graph algorithms on the GPU using CUDA
// http://dl.acm.org/citation.cfm?id=1782200
#define BLOCK_QUEUE_SIZE 8192
__global__ void kernel_cuda_frontier_queue(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int *result,
int iteration,
int *input_queue,
int *output_queue)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
//assert(false);
// TODO: Read to shared memory? Maybe also the input queue?
__shared__ int input_queue_size;
if (threadIdx.x == 0)
{
input_queue_size = *input_queue;
}
__syncthreads();
__shared__ int queue_size;
__shared__ int next_queue[BLOCK_QUEUE_SIZE];
if (threadIdx.x == 0)
{
queue_size = 0;
}
__syncthreads();
for (int v = 0; v < input_queue_size; v += num_threads)
{
if (v + tid < input_queue_size)
{
int vertex = input_queue[v + tid + 1];
for (int n = 0; n < v_adj_length[vertex]; n++)
{
int neighbor = v_adj_list[v_adj_begin[vertex] + n];
if (result[neighbor] == MAX_DIST)
{
result[neighbor] = iteration + 1;
// Add to queue (atomicAdd returns original value)
int position = atomicAdd(&queue_size, 1);
next_queue[position] = neighbor;
}
}
}
__syncthreads();
__shared__ int global_offset;
if (threadIdx.x == 0)
{
// First value is size of queue
global_offset = atomicAdd(output_queue, queue_size);
}
__syncthreads();
// Copy queue to global memory
for (int i = 0; i < queue_size; i += blockDim.x)
{
if (i + threadIdx.x < queue_size)
{
output_queue[global_offset + i + threadIdx.x + 1] = next_queue[i + threadIdx.x];
}
}
__syncthreads();
queue_size = 0;
__syncthreads();
}
}
int bfs_cuda_frontier_queue(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int num_edges,
int start_vertex,
int *result)
{
int *k_v_adj_list;
int *k_v_adj_begin;
int *k_v_adj_length;
int *k_result;
int *k_queue_1;
int *k_queue_2;
int kernel_runs = 0;
int zero_value = 0;
fill_n(result, num_vertices, MAX_DIST);
result[start_vertex] = 0;
int *input_queue_size = new int;
hipMalloc(&k_v_adj_list, sizeof(int) * num_edges);
hipMalloc(&k_v_adj_begin, sizeof(int) * num_vertices);
hipMalloc(&k_v_adj_length, sizeof(int) * num_vertices);
hipMalloc(&k_result, sizeof(int) * num_vertices);
hipMalloc(&k_queue_1, sizeof(int) * num_vertices * 8); // Not sure how big?
hipMalloc(&k_queue_2, sizeof(int) * num_vertices * 8); // Not sure how big?
hipMemcpy(k_v_adj_list, v_adj_list, sizeof(int) * num_edges, hipMemcpyHostToDevice);
hipMemcpy(k_v_adj_begin, v_adj_begin, sizeof(int) * num_vertices, hipMemcpyHostToDevice);
hipMemcpy(k_v_adj_length, v_adj_length, sizeof(int) * num_vertices, hipMemcpyHostToDevice);
hipMemcpy(k_result, result, sizeof(int) * num_vertices, hipMemcpyHostToDevice);
// --- START MEASURE TIME ---
struct timeval t1, t2;
gettimeofday(&t1, NULL);
int *k_input_queue = k_queue_1;
int *k_output_queue = k_queue_2;
int first_queue[] = { 1, start_vertex };
*input_queue_size = 1;
hipMemcpy(k_input_queue, first_queue, sizeof(int) * 2, hipMemcpyHostToDevice);
do
{
hipMemcpy(k_output_queue, &zero_value, sizeof(int) * 1, hipMemcpyHostToDevice);
int blocks = min(BLOCKS, max(1, *input_queue_size / THREADS));
int threads = *input_queue_size <= THREADS ? *input_queue_size : THREADS;
hipLaunchKernelGGL(( kernel_cuda_frontier_queue), dim3(blocks), dim3(threads), 0, 0,
k_v_adj_list,
k_v_adj_begin,
k_v_adj_length,
num_vertices,
k_result,
kernel_runs,
k_input_queue,
k_output_queue);
kernel_runs++;
if (kernel_runs > MAX_KERNEL_RUNS)
{
return -1;
}
// Swap queues
int *tmp = k_input_queue;
k_input_queue = k_output_queue;
k_output_queue = tmp;
hipMemcpy(input_queue_size, k_input_queue, sizeof(int) * 1, hipMemcpyDeviceToHost);
} while (*input_queue_size > 0);
hipDeviceSynchronize();
gettimeofday(&t2, NULL);
long long time = get_elapsed_time(&t1, &t2);
if (report_time)
{
printf("%s,%i,%i,%i,%i,%lld\n", __FILE__, num_vertices, num_edges, BLOCKS, THREADS, time);
}
// --- END MEASURE TIME ---
hipMemcpy(result, k_result, sizeof(int) * num_vertices, hipMemcpyDeviceToHost);
hipFree(k_v_adj_list);
hipFree(k_v_adj_begin);
hipFree(k_v_adj_length);
hipFree(k_result);
hipFree(k_queue_1);
hipFree(k_queue_2);
// printf("%i kernel runs\n", kernel_runs);
return time;
}
| 0d140591c8c2b8789b9adec2f3088bfb0f781a15.cu | // Accelerating large graph algorithms on the GPU using CUDA
// http://dl.acm.org/citation.cfm?id=1782200
#define BLOCK_QUEUE_SIZE 8192
__global__ void kernel_cuda_frontier_queue(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int *result,
int iteration,
int *input_queue,
int *output_queue)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
//assert(false);
// TODO: Read to shared memory? Maybe also the input queue?
__shared__ int input_queue_size;
if (threadIdx.x == 0)
{
input_queue_size = *input_queue;
}
__syncthreads();
__shared__ int queue_size;
__shared__ int next_queue[BLOCK_QUEUE_SIZE];
if (threadIdx.x == 0)
{
queue_size = 0;
}
__syncthreads();
for (int v = 0; v < input_queue_size; v += num_threads)
{
if (v + tid < input_queue_size)
{
int vertex = input_queue[v + tid + 1];
for (int n = 0; n < v_adj_length[vertex]; n++)
{
int neighbor = v_adj_list[v_adj_begin[vertex] + n];
if (result[neighbor] == MAX_DIST)
{
result[neighbor] = iteration + 1;
// Add to queue (atomicAdd returns original value)
int position = atomicAdd(&queue_size, 1);
next_queue[position] = neighbor;
}
}
}
__syncthreads();
__shared__ int global_offset;
if (threadIdx.x == 0)
{
// First value is size of queue
global_offset = atomicAdd(output_queue, queue_size);
}
__syncthreads();
// Copy queue to global memory
for (int i = 0; i < queue_size; i += blockDim.x)
{
if (i + threadIdx.x < queue_size)
{
output_queue[global_offset + i + threadIdx.x + 1] = next_queue[i + threadIdx.x];
}
}
__syncthreads();
queue_size = 0;
__syncthreads();
}
}
int bfs_cuda_frontier_queue(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int num_edges,
int start_vertex,
int *result)
{
int *k_v_adj_list;
int *k_v_adj_begin;
int *k_v_adj_length;
int *k_result;
int *k_queue_1;
int *k_queue_2;
int kernel_runs = 0;
int zero_value = 0;
fill_n(result, num_vertices, MAX_DIST);
result[start_vertex] = 0;
int *input_queue_size = new int;
cudaMalloc(&k_v_adj_list, sizeof(int) * num_edges);
cudaMalloc(&k_v_adj_begin, sizeof(int) * num_vertices);
cudaMalloc(&k_v_adj_length, sizeof(int) * num_vertices);
cudaMalloc(&k_result, sizeof(int) * num_vertices);
cudaMalloc(&k_queue_1, sizeof(int) * num_vertices * 8); // Not sure how big?
cudaMalloc(&k_queue_2, sizeof(int) * num_vertices * 8); // Not sure how big?
cudaMemcpy(k_v_adj_list, v_adj_list, sizeof(int) * num_edges, cudaMemcpyHostToDevice);
cudaMemcpy(k_v_adj_begin, v_adj_begin, sizeof(int) * num_vertices, cudaMemcpyHostToDevice);
cudaMemcpy(k_v_adj_length, v_adj_length, sizeof(int) * num_vertices, cudaMemcpyHostToDevice);
cudaMemcpy(k_result, result, sizeof(int) * num_vertices, cudaMemcpyHostToDevice);
// --- START MEASURE TIME ---
struct timeval t1, t2;
gettimeofday(&t1, NULL);
int *k_input_queue = k_queue_1;
int *k_output_queue = k_queue_2;
int first_queue[] = { 1, start_vertex };
*input_queue_size = 1;
cudaMemcpy(k_input_queue, first_queue, sizeof(int) * 2, cudaMemcpyHostToDevice);
do
{
cudaMemcpy(k_output_queue, &zero_value, sizeof(int) * 1, cudaMemcpyHostToDevice);
int blocks = min(BLOCKS, max(1, *input_queue_size / THREADS));
int threads = *input_queue_size <= THREADS ? *input_queue_size : THREADS;
kernel_cuda_frontier_queue<<<blocks, threads>>>(
k_v_adj_list,
k_v_adj_begin,
k_v_adj_length,
num_vertices,
k_result,
kernel_runs,
k_input_queue,
k_output_queue);
kernel_runs++;
if (kernel_runs > MAX_KERNEL_RUNS)
{
return -1;
}
// Swap queues
int *tmp = k_input_queue;
k_input_queue = k_output_queue;
k_output_queue = tmp;
cudaMemcpy(input_queue_size, k_input_queue, sizeof(int) * 1, cudaMemcpyDeviceToHost);
} while (*input_queue_size > 0);
cudaThreadSynchronize();
gettimeofday(&t2, NULL);
long long time = get_elapsed_time(&t1, &t2);
if (report_time)
{
printf("%s,%i,%i,%i,%i,%lld\n", __FILE__, num_vertices, num_edges, BLOCKS, THREADS, time);
}
// --- END MEASURE TIME ---
cudaMemcpy(result, k_result, sizeof(int) * num_vertices, cudaMemcpyDeviceToHost);
cudaFree(k_v_adj_list);
cudaFree(k_v_adj_begin);
cudaFree(k_v_adj_length);
cudaFree(k_result);
cudaFree(k_queue_1);
cudaFree(k_queue_2);
// printf("%i kernel runs\n", kernel_runs);
return time;
}
|
df6832a891e0087ed4bd0f62f220a957c6483567.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <float.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "gpu_matching.h"
// Copy from OpenCV
#define F_MAX FLT_MAX
#define F_MIN -FLT_MAX
#define NUM_SECTOR 9
texture<float, hipTextureType1D, hipReadModeElementType> texRef;
texture<float, hipTextureType1D, hipReadModeElementType> texMap;
texture<float, hipTextureType1D, hipReadModeElementType> texFi;
__device__
static inline float
atomicAdd_float(float * __restrict__ address, float val)
{
return atomicAdd(address, val); // atomicAdd must be called from "__device__" function
}
__device__
int DistanceTransformOneDimensionalProblemX
(
const int y,
const int x,
const float * __restrict__ score,
const float a,
const float b,
DistTransWork * __restrict__ work,
int indx
)
{
int i, k;
int diff;
float pointIntersection;
float tmp;
const int size = x * y;
k = 0;
work[indx].v = 0;
work[indx].z = (float)F_MIN;
work[indx+1].z = (float)F_MAX;
for (i = 1; i < x; i++)
{
tmp = ( score[i + size] - a * i + b * i * i );
pointIntersection = ( tmp
- ( score[work[indx + k].v + size] - a * work[indx + k].v + b * work[indx + k].v * work[indx + k].v ) )
/ (2 * b * (i - work[indx + k].v));
while (pointIntersection <= work[indx + k].z)
{
k--;
pointIntersection = ( tmp
- ( score[work[indx + k].v + size] - a * work[indx + k].v + b * work[indx + k].v * work[indx + k].v ) )
/ (2 * b * (i - work[indx + k].v));
}
// Addition parabola to the envelope
k++;
work[indx + k].v = i;
work[indx + k].z = pointIntersection;
work[indx + k + 1].z = (float)F_MAX;
}
// Computation values of generalized distance transform at all grid points
k = 0;
for (i = 0; i < x; i++)
{
while (work[indx + k + 1].z < i)
{
k++;
}
work[size + i].internalPoints = work[indx + k].v;
diff = i - work[indx + k].v;
work[size + i].internalDistTrans = a * diff + b * diff * diff + score[work[indx + k].v + size];
}
return 0;
}
__device__ int DistanceTransformOneDimensionalProblemY
(
const int x,
const int y,
const int diffX,
const float a,
const float b,
float * __restrict__ distanceTransform,
int * __restrict__ points,
DistTransWork * __restrict__ work,
int indx
)
{
int i, k;
int diff;
float pointIntersection;
float tmp;
k = 0;
work[indx].v = 0;
work[indx].z = (float)F_MIN;
work[indx+1].z = (float)F_MAX;
for (i = 1; i < y; i++)
{
tmp = ( work[x + i * diffX].internalDistTrans - a * i + b * i * i );
pointIntersection = ( tmp
- ( work[x + work[indx + k].v * diffX].internalDistTrans - a * work[indx + k].v + b * work[indx + k].v * work[indx + k].v ))
/ (2 * b * (i - work[indx + k].v));
while (pointIntersection <= work[indx + k].z)
{
k--;
pointIntersection = ( tmp
- ( work[x + work[indx + k].v * diffX].internalDistTrans - a * work[indx + k].v + b * work[indx + k].v * work[indx + k].v ))
/ (2 * b * (i - work[indx + k].v));
}
// Addition parabola to the envelope
k++;
work[indx + k].v = i;
work[indx + k].z = pointIntersection;
work[indx + k + 1].z = (float)F_MAX;
}
// Computation values of generalized distance transform at all grid points
k = 0;
for (i = 0; i < y; i++)
{
while (work[indx + k + 1].z < i)
{
k++;
}
points[x + i * diffX] = work[indx + k].v;
diff = i - work[indx + k].v;
distanceTransform[x + i * diffX] = a * diff + b * diff * diff + work[x + work[indx + k].v * diffX].internalDistTrans;
}
return 0;
}
extern "C"
{
__global__
void DistanceTransformTwoDimensionalProblemKernel
(
float * __restrict__ score,
const int x,
const int y,
const float coeff0,
const float coeff1,
const float coeff2,
const float coeff3,
DistTransWork * __restrict__ work,
float * __restrict__ resalt,
int * __restrict__ pointsX,
int * __restrict__ pointsY
)
{
int t = threadIdx.x;
int i;
int size = x * y;
for (i = t; i < size; i+=TRANS_THREAD)
{
score[i] = -score[i];
}
__syncthreads();
for (i = t; i < y; i+=TRANS_THREAD)
{
DistanceTransformOneDimensionalProblemX(i,x,
score,
coeff0, coeff2,
work,
i * ( x + 1 ));
}
__syncthreads();
for (i = t; i < x; i+=TRANS_THREAD)
{
DistanceTransformOneDimensionalProblemY(i,y,x,
coeff1, coeff3,
resalt,
pointsY,
work,
i * ( y + 1 ));
}
__syncthreads();
for (i = t; i < size; i+=TRANS_THREAD)
{
pointsX[i] = work[pointsY[i] * (i % x) + (i / x)].internalPoints;
}
}
__global__
void ConvolutionKernel
(
const int idx,
float * __restrict__ dev_score,
const unsigned int * __restrict__ dev_filterIdxTbl,
const ConvolutionParam prm
)
{
__shared__ float cache[CONV_THREAD];
unsigned int score_idx; //
unsigned int fi_idx; //
unsigned int t = threadIdx.x;
unsigned int b = gridDim.x;
unsigned int mtable;
score_idx = blockIdx.x;
mtable = (( score_idx / prm.scoreX ) * prm.mapX ) + (( score_idx % prm.scoreX ) * prm.numSectors );
// dev_score[score_idx]
for( score_idx = blockIdx.x; score_idx < prm.scoreSize; score_idx += b )
{
//
cache[t] = 0.0f;
//
for( fi_idx = t; fi_idx < prm.filterSize; fi_idx += CONV_THREAD)
{
cache[t] += tex1Dfetch(texMap,dev_filterIdxTbl[fi_idx] + mtable) * tex1Dfetch(texFi,fi_idx + idx);
}
//cache[t] = score;
// cache[0]
for( unsigned int i = CONV_THREAD >> 1; i > 0; i >>= 1)
{
__syncthreads();
if( t < i )
{
cache[t] += cache[t + i];
}
}
//
if(t == 0)
{
dev_score[score_idx] = cache[0];
}
}
}
__global__
void BilinearKernelTex32F(
float * __restrict__ out,
const int widthIn,
const int heightIn,
const int widthOut,
const int heightOut,
const int channels,
const int widthStepIn,
const int widthStepOut
)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
const int c = blockIdx.z;
if(x < widthOut && y < heightOut)
{
const float fx = ((float)widthIn / widthOut);
const float fy = ((float)heightIn / heightOut);
const float src_x = x * fx;
const float src_y = y * fy;
const int x1 = __float2int_rd(src_x);
const int y1 = __float2int_rd(src_y);
const int x2 = x1 + 1;
const int y2 = y1 + 1;
const int x2_read = min(x2, widthIn - 1);
const int y2_read = min(y2, heightIn - 1);
int width_step_out_u = widthStepOut / 4;
float cell1 = (x2 - src_x) * (y2 - src_y);
float cell2 = (src_x - x1) * (y2 - src_y);
float cell3 = (x2 - src_x) * (src_y - y1);
float cell4 = (src_x - x1) * (src_y - y1);
out[y * width_step_out_u + x * channels + c] = (float)(
cell1 * (float)tex1Dfetch(texRef, y1 * widthIn * channels + x1 * channels + c)
+ cell2 * (float)tex1Dfetch(texRef, y1 * widthIn * channels + x2_read * channels + c)
+ cell3 * (float)tex1Dfetch(texRef, y2_read * widthIn * channels + x1 * channels + c)
+ cell4 * (float)tex1Dfetch(texRef, y2_read * widthIn * channels + x2_read * channels + c));
}
}
__global__
void calculateHistogram(
const float * __restrict__ in,
float * __restrict__ r,
int * __restrict__ alfa,
const int widthIn,
const int heightIn,
const int widthStep,
const int channels
)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
const int j = blockDim.y * blockIdx.y + threadIdx.y;
int height, width, numChannels;
int kk, c;
float magnitude, x, y, tx, ty;
const float boundary_x[NUM_SECTOR + 1] =
{ 1.000000, 0.939693, 0.766044, 0.500000, 0.173648, -0.173648, -0.500000, -0.766045, -0.939693, -1.000000};
const float boundary_y[NUM_SECTOR + 1] =
{ 0.000000, 0.342020, 0.642788, 0.866025, 0.984808, 0.984808, 0.866025, 0.642787, 0.342020, 0.000000};
float max, dotProd;
int maxi;
height = heightIn;
width = widthIn;
numChannels = channels;
int width_step_u = widthStep / 4;
if(j >= 1 && j < height - 1)
{
if(i >= 1 && i < width - 1)
{
c = 0;
x = (-in[(j * width + (i - 1)) * numChannels + c]) + in[(j * width + (i + 1)) * numChannels + c];
y = (-in[((j - 1) * width + i) * numChannels + c]) + in[((j + 1) * width + i) * numChannels + c];
r[j * width + i] = sqrtf(x * x + y * y);
for(int ch = 1; ch < numChannels; ch++)
{
tx = (-in[j * width_step_u + (i - 1) * numChannels + c]) + in[j * width_step_u + (i + 1) * numChannels + c];
ty = (-in[(j - 1) * width_step_u + i * numChannels + c]) + in[(j + 1) * width_step_u + i * numChannels + c];
magnitude = sqrtf(tx * tx + ty * ty);
if(magnitude > r[j * width + i])
{
r[j * width + i] = magnitude;
c = ch;
x = tx;
y = ty;
}
}
max = boundary_x[0] * x + boundary_y[0] * y;
maxi = 0;
for (kk = 0; kk < NUM_SECTOR; kk++)
{
dotProd = boundary_x[kk] * x + boundary_y[kk] * y;
if (dotProd > max)
{
max = dotProd;
maxi = kk;
}
else
{
if (-dotProd > max)
{
max = -dotProd;
maxi = kk + NUM_SECTOR;
}
}
}
alfa[j * width * 2 + i * 2 ] = maxi % NUM_SECTOR;
alfa[j * width * 2 + i * 2 + 1] = maxi;
}
}
}
__global__
void getFeatureMaps(
const float * __restrict__ r,
const int * __restrict__ alfa,
const int * __restrict__ nearest,
const float * __restrict__ w,
float * __restrict__ map,
const int widthMap,
const int heightMap,
const int k,
const int numFeatures
)
{
const int j = blockDim.x * blockIdx.x + threadIdx.x;
const int i = blockDim.y * blockIdx.y + threadIdx.y;
const int ii = blockIdx.z / k;
const int jj = blockIdx.z % k;
int sizeX, sizeY;
int p, px, stringSize;
int height, width;
int d;
height = heightMap;
width = widthMap;
sizeX = width / k;
sizeY = height / k;
px = 3 * NUM_SECTOR;
p = px;
stringSize = sizeX * p;
if(i < sizeY)
{
if(j < sizeX)
{
if(ii < k)
{
if(jj < k)
{
if ((i * k + ii > 0) &&
(i * k + ii < height - 1) &&
(j * k + jj > 0) &&
(j * k + jj < width - 1))
{
d = (k * i + ii) * width + (j * k + jj);
atomicAdd_float(
(float *)(map + (i * stringSize + j * numFeatures + alfa[d * 2 ])),
(float)(r[d] * w[ii * 2] * w[jj * 2])
);
atomicAdd_float(
(float *)(map + (i * stringSize + j * numFeatures + alfa[d * 2 + 1] + NUM_SECTOR)),
(float)(r[d] * w[ii * 2] * w[jj * 2])
);
if ((i + nearest[ii] >= 0) &&
(i + nearest[ii] <= sizeY - 1))
{
atomicAdd_float(
(float *)(map + ((i + nearest[ii]) * stringSize + j * numFeatures + alfa[d * 2 ])),
(float)(r[d] * w[ii * 2 + 1] * w[jj * 2])
);
atomicAdd_float(
(float *)(map + ((i + nearest[ii]) * stringSize + j * numFeatures + alfa[d * 2 + 1] + NUM_SECTOR)),
(float)(r[d] * w[ii * 2 + 1] * w[jj * 2])
);
}
if ((j + nearest[jj] >= 0) &&
(j + nearest[jj] <= sizeX - 1))
{
atomicAdd_float(
(float *)(map + (i * stringSize + (j + nearest[jj]) * numFeatures + alfa[d * 2 ])),
(float)(r[d] * w[ii * 2] * w[jj * 2 + 1])
);
atomicAdd_float(
(float *)(map + (i * stringSize + (j + nearest[jj]) * numFeatures + alfa[d * 2 + 1] + NUM_SECTOR)),
(float)(r[d] * w[ii * 2] * w[jj * 2 + 1])
);
}
if ((i + nearest[ii] >= 0) &&
(i + nearest[ii] <= sizeY - 1) &&
(j + nearest[jj] >= 0) &&
(j + nearest[jj] <= sizeX - 1))
{
atomicAdd_float(
(float *)(map + ((i + nearest[ii]) * stringSize + (j + nearest[jj]) * numFeatures + alfa[d * 2 ])),
(float)(r[d] * w[ii * 2 + 1] * w[jj * 2 + 1])
);
atomicAdd_float(
(float *)(map + ((i + nearest[ii]) * stringSize + (j + nearest[jj]) * numFeatures + alfa[d * 2 + 1] + NUM_SECTOR)),
(float)(r[d] * w[ii * 2 + 1] * w[jj * 2 + 1])
);
}
}
}
}
}
}
}
__global__
void calculateNorm(
const float * __restrict__ map,
float * __restrict__ partOfNorm,
const int sizeX,
const int sizeY,
const int numFeatures
)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if(y <= sizeY)
{
if(x <= sizeX)
{
int i, j, p, pos;
float valOfNorm = 0.0f;
p = NUM_SECTOR;
i = y * sizeX + x;
pos = i * numFeatures;
for(j = 0; j < p; j++)
{
valOfNorm += map[pos + j] * map[pos + j];
}
partOfNorm[i] = valOfNorm;
}
}
}
__global__
void normalizeAndTruncate(
const float * __restrict__ map,
const float * __restrict__ partOfNorm,
float * __restrict__ newData,
const int mapSizeX,
const int mapSizeY,
const float alfa
)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int ii = blockIdx.z;
int sizeX, sizeY, p, pp, xp, pos1, pos2;
float valOfNorm1, valOfNorm2, valOfNorm3, valOfNorm4;
p = NUM_SECTOR;
xp = NUM_SECTOR * 3;
pp = NUM_SECTOR * 12;
sizeX = mapSizeX - 2;
sizeY = mapSizeY - 2;
if(y >= 1 && y <= sizeY)
{
if(x >= 1 && x <= sizeX)
{
valOfNorm1 = sqrtf(
partOfNorm[(y )*(sizeX + 2) + (x )] +
partOfNorm[(y )*(sizeX + 2) + (x + 1)] +
partOfNorm[(y + 1)*(sizeX + 2) + (x )] +
partOfNorm[(y + 1)*(sizeX + 2) + (x + 1)]) + FLT_EPSILON;
valOfNorm2 = sqrtf(
partOfNorm[(y )*(sizeX + 2) + (x )] +
partOfNorm[(y )*(sizeX + 2) + (x + 1)] +
partOfNorm[(y - 1)*(sizeX + 2) + (x )] +
partOfNorm[(y - 1)*(sizeX + 2) + (x + 1)]) + FLT_EPSILON;
valOfNorm3 = sqrtf(
partOfNorm[(y )*(sizeX + 2) + (x )] +
partOfNorm[(y )*(sizeX + 2) + (x - 1)] +
partOfNorm[(y + 1)*(sizeX + 2) + (x )] +
partOfNorm[(y + 1)*(sizeX + 2) + (x - 1)]) + FLT_EPSILON;
valOfNorm4 = sqrtf(
partOfNorm[(y )*(sizeX + 2) + (x )] +
partOfNorm[(y )*(sizeX + 2) + (x - 1)] +
partOfNorm[(y - 1)*(sizeX + 2) + (x )] +
partOfNorm[(y - 1)*(sizeX + 2) + (x - 1)]) + FLT_EPSILON;
pos1 = (y ) * (sizeX + 2) * xp + (x ) * xp;
pos2 = (y-1) * (sizeX ) * pp + (x-1) * pp;
if(ii < p)
{
newData[pos2 + ii ] = fminf(map[pos1 + ii] / valOfNorm1, alfa);
newData[pos2 + ii + p ] = fminf(map[pos1 + ii] / valOfNorm2, alfa);
newData[pos2 + ii + p * 2] = fminf(map[pos1 + ii] / valOfNorm3, alfa);
newData[pos2 + ii + p * 3] = fminf(map[pos1 + ii] / valOfNorm4, alfa);
}
newData[pos2 + ii + p * 4 ] = fminf(map[pos1 + ii + p] / valOfNorm1, alfa);
newData[pos2 + ii + p * 6 ] = fminf(map[pos1 + ii + p] / valOfNorm2, alfa);
newData[pos2 + ii + p * 8 ] = fminf(map[pos1 + ii + p] / valOfNorm3, alfa);
newData[pos2 + ii + p * 10] = fminf(map[pos1 + ii + p] / valOfNorm4, alfa);
}
}
}
__global__
void PCAFeatureMapsAddNullableBorder(
const float * __restrict__ map,
float * __restrict__ newData,
const int borderMapSizeX,
const int borderMapSizeY,
const int numFeatures,
const int bx,
const int by
)
{
const int j = blockDim.x * blockIdx.x + threadIdx.x;
const int i = blockDim.y * blockIdx.y + threadIdx.y;
int ii, jj, k;
int sizeX, sizeY, p, pp, xp, yp, pos1, pos2;
float val;
float nx, ny;
sizeX = borderMapSizeX;
sizeY = borderMapSizeY;
p = numFeatures;
pp = NUM_SECTOR * 3 + 4;
yp = 4;
xp = NUM_SECTOR;
nx = 1.0f / sqrtf((float)(xp * 2));
ny = 1.0f / sqrtf((float)(yp ));
if(i < sizeY)
{
if(j < sizeX)
{
pos1 = ((i)*sizeX + j)*p;
pos2 = ((i + by)*(sizeX + 2 * bx) + j + bx)*pp;
k = 0;
for(jj = 0; jj < xp * 2; jj++)
{
newData[pos2 + k] = ( map[pos1 + yp * xp + jj]
+ map[pos1 + (yp + 2) * xp + jj]
+ map[pos1 + (yp + 4) * xp + jj]
+ map[pos1 + (yp + 6) * xp + jj] ) * ny;
k++;
}
for(jj = 0; jj < xp; jj++)
{
newData[pos2 + k] = ( map[pos1 + jj]
+ map[pos1 + xp + jj]
+ map[pos1 + 2 * xp + jj]
+ map[pos1 + 3 * xp + jj]) * ny;
k++;
}
for(ii = 0; ii < yp; ii++)
{
val = 0;
for(jj = 0; jj < 2 * xp; jj++)
{
val += map[pos1 + yp * xp + ii * xp * 2 + jj];
}
newData[pos2 + k] = val * nx;
k++;
}
}
}
}
} // extern "C"
| df6832a891e0087ed4bd0f62f220a957c6483567.cu | #include <stdio.h>
#include <float.h>
#include <math.h>
#include <cuda.h>
#include "gpu_matching.h"
// Copy from OpenCV
#define F_MAX FLT_MAX
#define F_MIN -FLT_MAX
#define NUM_SECTOR 9
texture<float, cudaTextureType1D, cudaReadModeElementType> texRef;
texture<float, cudaTextureType1D, cudaReadModeElementType> texMap;
texture<float, cudaTextureType1D, cudaReadModeElementType> texFi;
__device__
static inline float
atomicAdd_float(float * __restrict__ address, float val)
{
return atomicAdd(address, val); // atomicAdd must be called from "__device__" function
}
__device__
int DistanceTransformOneDimensionalProblemX
(
const int y,
const int x,
const float * __restrict__ score,
const float a,
const float b,
DistTransWork * __restrict__ work,
int indx
)
{
int i, k;
int diff;
float pointIntersection;
float tmp;
const int size = x * y;
k = 0;
work[indx].v = 0;
work[indx].z = (float)F_MIN;
work[indx+1].z = (float)F_MAX;
for (i = 1; i < x; i++)
{
tmp = ( score[i + size] - a * i + b * i * i );
pointIntersection = ( tmp
- ( score[work[indx + k].v + size] - a * work[indx + k].v + b * work[indx + k].v * work[indx + k].v ) )
/ (2 * b * (i - work[indx + k].v));
while (pointIntersection <= work[indx + k].z)
{
k--;
pointIntersection = ( tmp
- ( score[work[indx + k].v + size] - a * work[indx + k].v + b * work[indx + k].v * work[indx + k].v ) )
/ (2 * b * (i - work[indx + k].v));
}
// Addition parabola to the envelope
k++;
work[indx + k].v = i;
work[indx + k].z = pointIntersection;
work[indx + k + 1].z = (float)F_MAX;
}
// Computation values of generalized distance transform at all grid points
k = 0;
for (i = 0; i < x; i++)
{
while (work[indx + k + 1].z < i)
{
k++;
}
work[size + i].internalPoints = work[indx + k].v;
diff = i - work[indx + k].v;
work[size + i].internalDistTrans = a * diff + b * diff * diff + score[work[indx + k].v + size];
}
return 0;
}
__device__ int DistanceTransformOneDimensionalProblemY
(
const int x,
const int y,
const int diffX,
const float a,
const float b,
float * __restrict__ distanceTransform,
int * __restrict__ points,
DistTransWork * __restrict__ work,
int indx
)
{
int i, k;
int diff;
float pointIntersection;
float tmp;
k = 0;
work[indx].v = 0;
work[indx].z = (float)F_MIN;
work[indx+1].z = (float)F_MAX;
for (i = 1; i < y; i++)
{
tmp = ( work[x + i * diffX].internalDistTrans - a * i + b * i * i );
pointIntersection = ( tmp
- ( work[x + work[indx + k].v * diffX].internalDistTrans - a * work[indx + k].v + b * work[indx + k].v * work[indx + k].v ))
/ (2 * b * (i - work[indx + k].v));
while (pointIntersection <= work[indx + k].z)
{
k--;
pointIntersection = ( tmp
- ( work[x + work[indx + k].v * diffX].internalDistTrans - a * work[indx + k].v + b * work[indx + k].v * work[indx + k].v ))
/ (2 * b * (i - work[indx + k].v));
}
// Addition parabola to the envelope
k++;
work[indx + k].v = i;
work[indx + k].z = pointIntersection;
work[indx + k + 1].z = (float)F_MAX;
}
// Computation values of generalized distance transform at all grid points
k = 0;
for (i = 0; i < y; i++)
{
while (work[indx + k + 1].z < i)
{
k++;
}
points[x + i * diffX] = work[indx + k].v;
diff = i - work[indx + k].v;
distanceTransform[x + i * diffX] = a * diff + b * diff * diff + work[x + work[indx + k].v * diffX].internalDistTrans;
}
return 0;
}
extern "C"
{
__global__
void DistanceTransformTwoDimensionalProblemKernel
(
float * __restrict__ score,
const int x,
const int y,
const float coeff0,
const float coeff1,
const float coeff2,
const float coeff3,
DistTransWork * __restrict__ work,
float * __restrict__ resalt,
int * __restrict__ pointsX,
int * __restrict__ pointsY
)
{
int t = threadIdx.x;
int i;
int size = x * y;
for (i = t; i < size; i+=TRANS_THREAD)
{
score[i] = -score[i];
}
__syncthreads();
for (i = t; i < y; i+=TRANS_THREAD)
{
DistanceTransformOneDimensionalProblemX(i,x,
score,
coeff0, coeff2,
work,
i * ( x + 1 ));
}
__syncthreads();
for (i = t; i < x; i+=TRANS_THREAD)
{
DistanceTransformOneDimensionalProblemY(i,y,x,
coeff1, coeff3,
resalt,
pointsY,
work,
i * ( y + 1 ));
}
__syncthreads();
for (i = t; i < size; i+=TRANS_THREAD)
{
pointsX[i] = work[pointsY[i] * (i % x) + (i / x)].internalPoints;
}
}
__global__
void ConvolutionKernel
(
const int idx,
float * __restrict__ dev_score,
const unsigned int * __restrict__ dev_filterIdxTbl,
const ConvolutionParam prm
)
{
__shared__ float cache[CONV_THREAD];
unsigned int score_idx; //スコア番号
unsigned int fi_idx; //フィルタ距離
unsigned int t = threadIdx.x;
unsigned int b = gridDim.x;
unsigned int mtable;
score_idx = blockIdx.x;
mtable = (( score_idx / prm.scoreX ) * prm.mapX ) + (( score_idx % prm.scoreX ) * prm.numSectors );
// 各ブロックでdev_score[score_idx]を計算
for( score_idx = blockIdx.x; score_idx < prm.scoreSize; score_idx += b )
{
// キャッシュの初期化
cache[t] = 0.0f;
// 各スレッドで対応した部分スコアを計算しキャッシュに格納
for( fi_idx = t; fi_idx < prm.filterSize; fi_idx += CONV_THREAD)
{
cache[t] += tex1Dfetch(texMap,dev_filterIdxTbl[fi_idx] + mtable) * tex1Dfetch(texFi,fi_idx + idx);
}
//cache[t] = score;
// キャッシュ内の部分スコアの総和をcache[0]へ畳み込む
for( unsigned int i = CONV_THREAD >> 1; i > 0; i >>= 1)
{
__syncthreads();
if( t < i )
{
cache[t] += cache[t + i];
}
}
// スコアの代入
if(t == 0)
{
dev_score[score_idx] = cache[0];
}
}
}
__global__
void BilinearKernelTex32F(
float * __restrict__ out,
const int widthIn,
const int heightIn,
const int widthOut,
const int heightOut,
const int channels,
const int widthStepIn,
const int widthStepOut
)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
const int c = blockIdx.z;
if(x < widthOut && y < heightOut)
{
const float fx = ((float)widthIn / widthOut);
const float fy = ((float)heightIn / heightOut);
const float src_x = x * fx;
const float src_y = y * fy;
const int x1 = __float2int_rd(src_x);
const int y1 = __float2int_rd(src_y);
const int x2 = x1 + 1;
const int y2 = y1 + 1;
const int x2_read = min(x2, widthIn - 1);
const int y2_read = min(y2, heightIn - 1);
int width_step_out_u = widthStepOut / 4;
float cell1 = (x2 - src_x) * (y2 - src_y);
float cell2 = (src_x - x1) * (y2 - src_y);
float cell3 = (x2 - src_x) * (src_y - y1);
float cell4 = (src_x - x1) * (src_y - y1);
out[y * width_step_out_u + x * channels + c] = (float)(
cell1 * (float)tex1Dfetch(texRef, y1 * widthIn * channels + x1 * channels + c)
+ cell2 * (float)tex1Dfetch(texRef, y1 * widthIn * channels + x2_read * channels + c)
+ cell3 * (float)tex1Dfetch(texRef, y2_read * widthIn * channels + x1 * channels + c)
+ cell4 * (float)tex1Dfetch(texRef, y2_read * widthIn * channels + x2_read * channels + c));
}
}
__global__
void calculateHistogram(
const float * __restrict__ in,
float * __restrict__ r,
int * __restrict__ alfa,
const int widthIn,
const int heightIn,
const int widthStep,
const int channels
)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
const int j = blockDim.y * blockIdx.y + threadIdx.y;
int height, width, numChannels;
int kk, c;
float magnitude, x, y, tx, ty;
const float boundary_x[NUM_SECTOR + 1] =
{ 1.000000, 0.939693, 0.766044, 0.500000, 0.173648, -0.173648, -0.500000, -0.766045, -0.939693, -1.000000};
const float boundary_y[NUM_SECTOR + 1] =
{ 0.000000, 0.342020, 0.642788, 0.866025, 0.984808, 0.984808, 0.866025, 0.642787, 0.342020, 0.000000};
float max, dotProd;
int maxi;
height = heightIn;
width = widthIn;
numChannels = channels;
int width_step_u = widthStep / 4;
if(j >= 1 && j < height - 1)
{
if(i >= 1 && i < width - 1)
{
c = 0;
x = (-in[(j * width + (i - 1)) * numChannels + c]) + in[(j * width + (i + 1)) * numChannels + c];
y = (-in[((j - 1) * width + i) * numChannels + c]) + in[((j + 1) * width + i) * numChannels + c];
r[j * width + i] = sqrtf(x * x + y * y);
for(int ch = 1; ch < numChannels; ch++)
{
tx = (-in[j * width_step_u + (i - 1) * numChannels + c]) + in[j * width_step_u + (i + 1) * numChannels + c];
ty = (-in[(j - 1) * width_step_u + i * numChannels + c]) + in[(j + 1) * width_step_u + i * numChannels + c];
magnitude = sqrtf(tx * tx + ty * ty);
if(magnitude > r[j * width + i])
{
r[j * width + i] = magnitude;
c = ch;
x = tx;
y = ty;
}
}
max = boundary_x[0] * x + boundary_y[0] * y;
maxi = 0;
for (kk = 0; kk < NUM_SECTOR; kk++)
{
dotProd = boundary_x[kk] * x + boundary_y[kk] * y;
if (dotProd > max)
{
max = dotProd;
maxi = kk;
}
else
{
if (-dotProd > max)
{
max = -dotProd;
maxi = kk + NUM_SECTOR;
}
}
}
alfa[j * width * 2 + i * 2 ] = maxi % NUM_SECTOR;
alfa[j * width * 2 + i * 2 + 1] = maxi;
}
}
}
__global__
void getFeatureMaps(
const float * __restrict__ r,
const int * __restrict__ alfa,
const int * __restrict__ nearest,
const float * __restrict__ w,
float * __restrict__ map,
const int widthMap,
const int heightMap,
const int k,
const int numFeatures
)
{
const int j = blockDim.x * blockIdx.x + threadIdx.x;
const int i = blockDim.y * blockIdx.y + threadIdx.y;
const int ii = blockIdx.z / k;
const int jj = blockIdx.z % k;
int sizeX, sizeY;
int p, px, stringSize;
int height, width;
int d;
height = heightMap;
width = widthMap;
sizeX = width / k;
sizeY = height / k;
px = 3 * NUM_SECTOR;
p = px;
stringSize = sizeX * p;
if(i < sizeY)
{
if(j < sizeX)
{
if(ii < k)
{
if(jj < k)
{
if ((i * k + ii > 0) &&
(i * k + ii < height - 1) &&
(j * k + jj > 0) &&
(j * k + jj < width - 1))
{
d = (k * i + ii) * width + (j * k + jj);
atomicAdd_float(
(float *)(map + (i * stringSize + j * numFeatures + alfa[d * 2 ])),
(float)(r[d] * w[ii * 2] * w[jj * 2])
);
atomicAdd_float(
(float *)(map + (i * stringSize + j * numFeatures + alfa[d * 2 + 1] + NUM_SECTOR)),
(float)(r[d] * w[ii * 2] * w[jj * 2])
);
if ((i + nearest[ii] >= 0) &&
(i + nearest[ii] <= sizeY - 1))
{
atomicAdd_float(
(float *)(map + ((i + nearest[ii]) * stringSize + j * numFeatures + alfa[d * 2 ])),
(float)(r[d] * w[ii * 2 + 1] * w[jj * 2])
);
atomicAdd_float(
(float *)(map + ((i + nearest[ii]) * stringSize + j * numFeatures + alfa[d * 2 + 1] + NUM_SECTOR)),
(float)(r[d] * w[ii * 2 + 1] * w[jj * 2])
);
}
if ((j + nearest[jj] >= 0) &&
(j + nearest[jj] <= sizeX - 1))
{
atomicAdd_float(
(float *)(map + (i * stringSize + (j + nearest[jj]) * numFeatures + alfa[d * 2 ])),
(float)(r[d] * w[ii * 2] * w[jj * 2 + 1])
);
atomicAdd_float(
(float *)(map + (i * stringSize + (j + nearest[jj]) * numFeatures + alfa[d * 2 + 1] + NUM_SECTOR)),
(float)(r[d] * w[ii * 2] * w[jj * 2 + 1])
);
}
if ((i + nearest[ii] >= 0) &&
(i + nearest[ii] <= sizeY - 1) &&
(j + nearest[jj] >= 0) &&
(j + nearest[jj] <= sizeX - 1))
{
atomicAdd_float(
(float *)(map + ((i + nearest[ii]) * stringSize + (j + nearest[jj]) * numFeatures + alfa[d * 2 ])),
(float)(r[d] * w[ii * 2 + 1] * w[jj * 2 + 1])
);
atomicAdd_float(
(float *)(map + ((i + nearest[ii]) * stringSize + (j + nearest[jj]) * numFeatures + alfa[d * 2 + 1] + NUM_SECTOR)),
(float)(r[d] * w[ii * 2 + 1] * w[jj * 2 + 1])
);
}
}
}
}
}
}
}
__global__
void calculateNorm(
const float * __restrict__ map,
float * __restrict__ partOfNorm,
const int sizeX,
const int sizeY,
const int numFeatures
)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if(y <= sizeY)
{
if(x <= sizeX)
{
int i, j, p, pos;
float valOfNorm = 0.0f;
p = NUM_SECTOR;
i = y * sizeX + x;
pos = i * numFeatures;
for(j = 0; j < p; j++)
{
valOfNorm += map[pos + j] * map[pos + j];
}
partOfNorm[i] = valOfNorm;
}
}
}
__global__
void normalizeAndTruncate(
const float * __restrict__ map,
const float * __restrict__ partOfNorm,
float * __restrict__ newData,
const int mapSizeX,
const int mapSizeY,
const float alfa
)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int ii = blockIdx.z;
int sizeX, sizeY, p, pp, xp, pos1, pos2;
float valOfNorm1, valOfNorm2, valOfNorm3, valOfNorm4;
p = NUM_SECTOR;
xp = NUM_SECTOR * 3;
pp = NUM_SECTOR * 12;
sizeX = mapSizeX - 2;
sizeY = mapSizeY - 2;
if(y >= 1 && y <= sizeY)
{
if(x >= 1 && x <= sizeX)
{
valOfNorm1 = sqrtf(
partOfNorm[(y )*(sizeX + 2) + (x )] +
partOfNorm[(y )*(sizeX + 2) + (x + 1)] +
partOfNorm[(y + 1)*(sizeX + 2) + (x )] +
partOfNorm[(y + 1)*(sizeX + 2) + (x + 1)]) + FLT_EPSILON;
valOfNorm2 = sqrtf(
partOfNorm[(y )*(sizeX + 2) + (x )] +
partOfNorm[(y )*(sizeX + 2) + (x + 1)] +
partOfNorm[(y - 1)*(sizeX + 2) + (x )] +
partOfNorm[(y - 1)*(sizeX + 2) + (x + 1)]) + FLT_EPSILON;
valOfNorm3 = sqrtf(
partOfNorm[(y )*(sizeX + 2) + (x )] +
partOfNorm[(y )*(sizeX + 2) + (x - 1)] +
partOfNorm[(y + 1)*(sizeX + 2) + (x )] +
partOfNorm[(y + 1)*(sizeX + 2) + (x - 1)]) + FLT_EPSILON;
valOfNorm4 = sqrtf(
partOfNorm[(y )*(sizeX + 2) + (x )] +
partOfNorm[(y )*(sizeX + 2) + (x - 1)] +
partOfNorm[(y - 1)*(sizeX + 2) + (x )] +
partOfNorm[(y - 1)*(sizeX + 2) + (x - 1)]) + FLT_EPSILON;
pos1 = (y ) * (sizeX + 2) * xp + (x ) * xp;
pos2 = (y-1) * (sizeX ) * pp + (x-1) * pp;
if(ii < p)
{
newData[pos2 + ii ] = fminf(map[pos1 + ii] / valOfNorm1, alfa);
newData[pos2 + ii + p ] = fminf(map[pos1 + ii] / valOfNorm2, alfa);
newData[pos2 + ii + p * 2] = fminf(map[pos1 + ii] / valOfNorm3, alfa);
newData[pos2 + ii + p * 3] = fminf(map[pos1 + ii] / valOfNorm4, alfa);
}
newData[pos2 + ii + p * 4 ] = fminf(map[pos1 + ii + p] / valOfNorm1, alfa);
newData[pos2 + ii + p * 6 ] = fminf(map[pos1 + ii + p] / valOfNorm2, alfa);
newData[pos2 + ii + p * 8 ] = fminf(map[pos1 + ii + p] / valOfNorm3, alfa);
newData[pos2 + ii + p * 10] = fminf(map[pos1 + ii + p] / valOfNorm4, alfa);
}
}
}
__global__
void PCAFeatureMapsAddNullableBorder(
const float * __restrict__ map,
float * __restrict__ newData,
const int borderMapSizeX,
const int borderMapSizeY,
const int numFeatures,
const int bx,
const int by
)
{
const int j = blockDim.x * blockIdx.x + threadIdx.x;
const int i = blockDim.y * blockIdx.y + threadIdx.y;
int ii, jj, k;
int sizeX, sizeY, p, pp, xp, yp, pos1, pos2;
float val;
float nx, ny;
sizeX = borderMapSizeX;
sizeY = borderMapSizeY;
p = numFeatures;
pp = NUM_SECTOR * 3 + 4;
yp = 4;
xp = NUM_SECTOR;
nx = 1.0f / sqrtf((float)(xp * 2));
ny = 1.0f / sqrtf((float)(yp ));
if(i < sizeY)
{
if(j < sizeX)
{
pos1 = ((i)*sizeX + j)*p;
pos2 = ((i + by)*(sizeX + 2 * bx) + j + bx)*pp;
k = 0;
for(jj = 0; jj < xp * 2; jj++)
{
newData[pos2 + k] = ( map[pos1 + yp * xp + jj]
+ map[pos1 + (yp + 2) * xp + jj]
+ map[pos1 + (yp + 4) * xp + jj]
+ map[pos1 + (yp + 6) * xp + jj] ) * ny;
k++;
}
for(jj = 0; jj < xp; jj++)
{
newData[pos2 + k] = ( map[pos1 + jj]
+ map[pos1 + xp + jj]
+ map[pos1 + 2 * xp + jj]
+ map[pos1 + 3 * xp + jj]) * ny;
k++;
}
for(ii = 0; ii < yp; ii++)
{
val = 0;
for(jj = 0; jj < 2 * xp; jj++)
{
val += map[pos1 + yp * xp + ii * xp * 2 + jj];
}
newData[pos2 + k] = val * nx;
k++;
}
}
}
}
} // extern "C"
|
91179220433f7e2ca6fb55bcc198de28bb8e4ad6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <conio.h>
#define NUM_NODES 5
typedef struct
{
int start; // Index of first adjacent node in Ea
int length; // Number of adjacent nodes
} Node;
__global__ void CUDA_BFS_KERNEL(Node *Va, int *Ea, bool *Fa, bool *Xa, int *Ca,bool *done)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id > NUM_NODES)
*done = false;
if (Fa[id] == true && Xa[id] == false)
{
printf("%d ", id); //This printf gives the order of vertices in BFS
Fa[id] = false;
Xa[id] = true;
__syncthreads();
int k = 0;
int i;
int start = Va[id].start;
int end = start + Va[id].length;
for (int i = start; i < end; i++)
{
int nid = Ea[i];
if (Xa[nid] == false)
{
Ca[nid] = Ca[id] + 1;
Fa[nid] = true;
*done = false;
}
}
}
}
// The BFS frontier corresponds to all the nodes being processed at the current level.
int main()
{
Node node[NUM_NODES];
//int edgesSize = 2 * NUM_NODES;
int edges[NUM_NODES];
node[0].start = 0;
node[0].length = 2;
node[1].start = 2;
node[1].length = 1;
node[2].start = 3;
node[2].length = 1;
node[3].start = 4;
node[3].length = 1;
node[4].start = 5;
node[4].length = 0;
edges[0] = 1;
edges[1] = 2;
edges[2] = 4;
edges[3] = 3;
edges[4] = 4;
bool frontier[NUM_NODES] = { false };
bool visited[NUM_NODES] = { false };
int cost[NUM_NODES] = { 0 };
int source = 0;
frontier[source] = true;
Node* Va;
hipMalloc((void**)&Va, sizeof(Node)*NUM_NODES);
hipMemcpy(Va, node, sizeof(Node)*NUM_NODES, hipMemcpyHostToDevice);
int* Ea;
hipMalloc((void**)&Ea, sizeof(Node)*NUM_NODES);
hipMemcpy(Ea, edges, sizeof(Node)*NUM_NODES, hipMemcpyHostToDevice);
bool* Fa;
hipMalloc((void**)&Fa, sizeof(bool)*NUM_NODES);
hipMemcpy(Fa, frontier, sizeof(bool)*NUM_NODES, hipMemcpyHostToDevice);
bool* Xa;
hipMalloc((void**)&Xa, sizeof(bool)*NUM_NODES);
hipMemcpy(Xa, visited, sizeof(bool)*NUM_NODES, hipMemcpyHostToDevice);
int* Ca;
hipMalloc((void**)&Ca, sizeof(int)*NUM_NODES);
hipMemcpy(Ca, cost, sizeof(int)*NUM_NODES, hipMemcpyHostToDevice);
int num_blks = 1;
int threads = 5;
bool done;
bool* d_done;
hipMalloc((void**)&d_done, sizeof(bool));
printf("\n\n");
int count = 0;
printf("Order: \n\n");
do {
count++;
done = true;
hipMemcpy(d_done, &done, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( CUDA_BFS_KERNEL) , dim3(num_blks), dim3(threads) , 0, 0, Va, Ea, Fa, Xa, Ca,d_done);
hipMemcpy(&done, d_done , sizeof(bool), hipMemcpyDeviceToHost);
} while (!done);
hipMemcpy(cost, Ca, sizeof(int)*NUM_NODES, hipMemcpyDeviceToHost);
printf("Number of times the kernel is called : %d \n", count);
printf("\nCost: ");
for (int i = 0; i<NUM_NODES; i++)
printf( "%d ", cost[i]);
printf("\n");
_getch();
}
| 91179220433f7e2ca6fb55bcc198de28bb8e4ad6.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <conio.h>
#define NUM_NODES 5
typedef struct
{
int start; // Index of first adjacent node in Ea
int length; // Number of adjacent nodes
} Node;
__global__ void CUDA_BFS_KERNEL(Node *Va, int *Ea, bool *Fa, bool *Xa, int *Ca,bool *done)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id > NUM_NODES)
*done = false;
if (Fa[id] == true && Xa[id] == false)
{
printf("%d ", id); //This printf gives the order of vertices in BFS
Fa[id] = false;
Xa[id] = true;
__syncthreads();
int k = 0;
int i;
int start = Va[id].start;
int end = start + Va[id].length;
for (int i = start; i < end; i++)
{
int nid = Ea[i];
if (Xa[nid] == false)
{
Ca[nid] = Ca[id] + 1;
Fa[nid] = true;
*done = false;
}
}
}
}
// The BFS frontier corresponds to all the nodes being processed at the current level.
int main()
{
Node node[NUM_NODES];
//int edgesSize = 2 * NUM_NODES;
int edges[NUM_NODES];
node[0].start = 0;
node[0].length = 2;
node[1].start = 2;
node[1].length = 1;
node[2].start = 3;
node[2].length = 1;
node[3].start = 4;
node[3].length = 1;
node[4].start = 5;
node[4].length = 0;
edges[0] = 1;
edges[1] = 2;
edges[2] = 4;
edges[3] = 3;
edges[4] = 4;
bool frontier[NUM_NODES] = { false };
bool visited[NUM_NODES] = { false };
int cost[NUM_NODES] = { 0 };
int source = 0;
frontier[source] = true;
Node* Va;
cudaMalloc((void**)&Va, sizeof(Node)*NUM_NODES);
cudaMemcpy(Va, node, sizeof(Node)*NUM_NODES, cudaMemcpyHostToDevice);
int* Ea;
cudaMalloc((void**)&Ea, sizeof(Node)*NUM_NODES);
cudaMemcpy(Ea, edges, sizeof(Node)*NUM_NODES, cudaMemcpyHostToDevice);
bool* Fa;
cudaMalloc((void**)&Fa, sizeof(bool)*NUM_NODES);
cudaMemcpy(Fa, frontier, sizeof(bool)*NUM_NODES, cudaMemcpyHostToDevice);
bool* Xa;
cudaMalloc((void**)&Xa, sizeof(bool)*NUM_NODES);
cudaMemcpy(Xa, visited, sizeof(bool)*NUM_NODES, cudaMemcpyHostToDevice);
int* Ca;
cudaMalloc((void**)&Ca, sizeof(int)*NUM_NODES);
cudaMemcpy(Ca, cost, sizeof(int)*NUM_NODES, cudaMemcpyHostToDevice);
int num_blks = 1;
int threads = 5;
bool done;
bool* d_done;
cudaMalloc((void**)&d_done, sizeof(bool));
printf("\n\n");
int count = 0;
printf("Order: \n\n");
do {
count++;
done = true;
cudaMemcpy(d_done, &done, sizeof(bool), cudaMemcpyHostToDevice);
CUDA_BFS_KERNEL <<<num_blks, threads >>>(Va, Ea, Fa, Xa, Ca,d_done);
cudaMemcpy(&done, d_done , sizeof(bool), cudaMemcpyDeviceToHost);
} while (!done);
cudaMemcpy(cost, Ca, sizeof(int)*NUM_NODES, cudaMemcpyDeviceToHost);
printf("Number of times the kernel is called : %d \n", count);
printf("\nCost: ");
for (int i = 0; i<NUM_NODES; i++)
printf( "%d ", cost[i]);
printf("\n");
_getch();
}
|
5f17a1fd5962edb5cd7bd642ae33a210d8b7339e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "utils.h"
#include <vector>
#include <random>
#include <chrono>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include "cuda_utils.h"
#include <algorithm>
using namespace std;
typedef float TYPE;
__global__ void convolution(const TYPE *signal, const int signalLen,
const TYPE *kernel, const int kernelLen,
TYPE *result)
{
for (int t = blockIdx.x * blockDim.x + threadIdx.x;
t < signalLen * kernelLen;
t += blockDim.x * gridDim.x) {
int i = t / kernelLen;
int j = t % kernelLen;
atomicAdd(&result[i + j], signal[i] * kernel[j]);
}
}
template <typename T>
void print_vector(vector<T> &v) {
for (auto &i : v) cout << i << "\n";
}
int main(int argc, char const *argv[])
{
int n_turns = 50000;
int n_signal = 1000;
int n_kernel = 1000;
int blocks = 512;
int threads = 64;
// int n_threads = 1;
if (argc > 1) n_turns = atoi(argv[1]);
if (argc > 2) n_signal = atoi(argv[2]);
if (argc > 3) n_kernel = atoi(argv[3]);
if (argc > 4) blocks = atoi(argv[4]);
if (argc > 5) threads = atoi(argv[5]);
// setup random engine
default_random_engine gen;
// uniform_real_distribution<double> d(0.0, 1.0);
// initialize variables
vector<int> signal, kernel;
vector<int> result;
signal.resize(n_signal);
kernel.resize(n_kernel);
result.resize(n_signal + n_kernel - 1);
for (int i = 0; i < n_signal; ++i) {
// signal[i] = d(gen);
signal[i] = i + 1;
}
for (int i = 0; i < n_kernel; ++i) {
// kernel[i] = d(gen);
kernel[i] = n_signal + i + 1;
}
thrust::device_vector<TYPE> d_signal(result.size(), 0.);
thrust::device_vector<TYPE> d_kernel(result.size(), 0.);
thrust::copy(signal.begin(), signal.end(), d_signal.begin());
thrust::copy(kernel.begin(), kernel.end(), d_kernel.begin());
thrust::device_vector<TYPE> d_result(result.size());
TYPE *d_signal_ptr = thrust::raw_pointer_cast(d_signal.data());
TYPE *d_kernel_ptr = thrust::raw_pointer_cast(d_kernel.data());
TYPE *d_result_ptr = thrust::raw_pointer_cast(d_result.data());
// auto papiprof = new PAPIProf();
// papiprof->start_counters("convolution");
auto start = chrono::high_resolution_clock::now();
// main loop
for (int i = 0; i < n_turns; ++i) {
hipLaunchKernelGGL(( convolution) , dim3(blocks), dim3(threads), 0, 0, d_signal_ptr, n_signal,
d_kernel_ptr, n_kernel,
d_result_ptr);
hipDeviceSynchronize();
}
auto end = chrono::high_resolution_clock::now();
thrust::copy(d_result.begin(), d_result.end(), result.begin());
auto duration = chrono::duration_cast<chrono::milliseconds>(end - start).count();
printf("function\tcounter\taverage_value\tstd(%%)\tcalls\n");
printf("convolution_gpu_v10\ttime(ms)\t%d\t0\t1\n", duration);
// print_vector(result);
printf("result: %lf\n", accumulate(result.begin(), result.end(), 0.0) / (n_signal + n_kernel - 1));
// papiprof->stop_counters();
// papiprof->report_timing();
// report results
return 0;
} | 5f17a1fd5962edb5cd7bd642ae33a210d8b7339e.cu | #include <stdlib.h>
#include <stdio.h>
#include "utils.h"
#include <vector>
#include <random>
#include <chrono>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include "cuda_utils.h"
#include <algorithm>
using namespace std;
typedef float TYPE;
__global__ void convolution(const TYPE *signal, const int signalLen,
const TYPE *kernel, const int kernelLen,
TYPE *result)
{
for (int t = blockIdx.x * blockDim.x + threadIdx.x;
t < signalLen * kernelLen;
t += blockDim.x * gridDim.x) {
int i = t / kernelLen;
int j = t % kernelLen;
atomicAdd(&result[i + j], signal[i] * kernel[j]);
}
}
template <typename T>
void print_vector(vector<T> &v) {
for (auto &i : v) cout << i << "\n";
}
int main(int argc, char const *argv[])
{
int n_turns = 50000;
int n_signal = 1000;
int n_kernel = 1000;
int blocks = 512;
int threads = 64;
// int n_threads = 1;
if (argc > 1) n_turns = atoi(argv[1]);
if (argc > 2) n_signal = atoi(argv[2]);
if (argc > 3) n_kernel = atoi(argv[3]);
if (argc > 4) blocks = atoi(argv[4]);
if (argc > 5) threads = atoi(argv[5]);
// setup random engine
default_random_engine gen;
// uniform_real_distribution<double> d(0.0, 1.0);
// initialize variables
vector<int> signal, kernel;
vector<int> result;
signal.resize(n_signal);
kernel.resize(n_kernel);
result.resize(n_signal + n_kernel - 1);
for (int i = 0; i < n_signal; ++i) {
// signal[i] = d(gen);
signal[i] = i + 1;
}
for (int i = 0; i < n_kernel; ++i) {
// kernel[i] = d(gen);
kernel[i] = n_signal + i + 1;
}
thrust::device_vector<TYPE> d_signal(result.size(), 0.);
thrust::device_vector<TYPE> d_kernel(result.size(), 0.);
thrust::copy(signal.begin(), signal.end(), d_signal.begin());
thrust::copy(kernel.begin(), kernel.end(), d_kernel.begin());
thrust::device_vector<TYPE> d_result(result.size());
TYPE *d_signal_ptr = thrust::raw_pointer_cast(d_signal.data());
TYPE *d_kernel_ptr = thrust::raw_pointer_cast(d_kernel.data());
TYPE *d_result_ptr = thrust::raw_pointer_cast(d_result.data());
// auto papiprof = new PAPIProf();
// papiprof->start_counters("convolution");
auto start = chrono::high_resolution_clock::now();
// main loop
for (int i = 0; i < n_turns; ++i) {
convolution <<< blocks, threads>>>(d_signal_ptr, n_signal,
d_kernel_ptr, n_kernel,
d_result_ptr);
cudaThreadSynchronize();
}
auto end = chrono::high_resolution_clock::now();
thrust::copy(d_result.begin(), d_result.end(), result.begin());
auto duration = chrono::duration_cast<chrono::milliseconds>(end - start).count();
printf("function\tcounter\taverage_value\tstd(%%)\tcalls\n");
printf("convolution_gpu_v10\ttime(ms)\t%d\t0\t1\n", duration);
// print_vector(result);
printf("result: %lf\n", accumulate(result.begin(), result.end(), 0.0) / (n_signal + n_kernel - 1));
// papiprof->stop_counters();
// papiprof->report_timing();
// report results
return 0;
} |
8c10c36d10d24ccfceba01a92a5cef72881cb0b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdafx.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include "cuda_guard.cuh"
#include "subnet_shortest_path_hip.cuh"
#include "index_.h"
void SubnetBF::handle_unresolved(int* cells, int cell_num, int start_cell, int start_vertex, int start_edge, int distance, SBfResult* result, int edge_num) {
int grid_to_result[G_Grid::kCellNum];
for (int i = 0; i < G_Grid::kCellNum; ++i) {
grid_to_result[i] = -1;
}
int result_idx = 0;
for (int i = 0; i < cell_num; ++i) {
grid_to_result[cells[i]] = i;
for (int j = 0; j < G_Grid::kMaxVerticesPerCell; ++j) {
auto& r = result[result_idx++];
r.id_ = G_Grid::grid_[cells[i]].vertex_[j].id_;
r.distance_ = std::numeric_limits<int>::max();
r.previous_vertex_id = 0;
}
}
auto& edge = G_Grid::grid_[start_cell].vertex_[start_vertex].edges_[start_edge];
result[G_Grid::kMaxVerticesPerCell * grid_to_result[edge.to_cell_] + edge.to_vertex_pos_].distance_ = edge.length_ - distance;
for (int i = 0; i < edge_num; ++i) {
for (int j = 0; j < cell_num; ++j) {
auto& c = G_Grid::grid_[cells[j]];
for (int k = 0; k < G_Grid::kMaxVerticesPerCell; ++k) {
auto& v = c.vertex_[k];
int v_dist = result[G_Grid::kMaxVerticesPerCell * grid_to_result[cells[j]] + k].distance_;
if (v_dist < std::numeric_limits<int>::max()) {
for (int l = 0; l < G_Grid::kMaxEdgesPerVertex; ++l) {
auto& e = v.edges_[l];
if (e.id_ != 0 && grid_to_result[e.to_cell_] != -1) {
auto& r = result[G_Grid::kMaxVerticesPerCell * grid_to_result[e.to_cell_] + e.to_vertex_pos_];
if (r.distance_ > v_dist + e.length_) {
r.distance_ = v_dist + e.length_;
r.previous_vertex_id = v.id_;
}
}
}
}
}
}
}
}
__global__ void fst_k_knl(int* cells, int cell_num, int * grid_to_result, SubnetBF::SBfResult* result, G_Grid::Cell* grid_, int edge_num, int int_max) {
int cells_per_loop = SubnetBF::kMaxThreadsPerBlock / G_Grid::kMaxVerticesPerCell;
int loop_num = (cell_num + cells_per_loop - 1) / cells_per_loop;
for (int i = 0; i < edge_num; ++i) {
for (int j = 0; j < loop_num; ++j) {
int current_cell = threadIdx.y + cells_per_loop * j;
if (current_cell < cell_num) {
auto& cell = grid_[cells[current_cell]];
auto& vertex = cell.vertex_[threadIdx.x];
int v_dist = result[G_Grid::kMaxVerticesPerCell * grid_to_result[cells[current_cell]] + threadIdx.x].distance_;
if (v_dist < int_max) {
for (int l = 0; l < G_Grid::kMaxEdgesPerVertex; ++l) {
auto& e = vertex.edges_[l];
if (e.id_ != 0 && grid_to_result[e.to_cell_] != -1) {
auto& r = result[G_Grid::kMaxVerticesPerCell * grid_to_result[e.to_cell_] + e.to_vertex_pos_];
if (r.distance_ > v_dist + e.length_) {
r.distance_ = v_dist + e.length_;
r.previous_vertex_id = vertex.id_;
}
}
}
}
}
}
__syncthreads();
}
}
void SubnetBF::find_first_k(int* cells, int cell_num, int start_cell, int start_vertex, int start_edge, int distance, SBfResult* result, int edge_num) {
int grid_to_result[G_Grid::kCellNum];
for (int i = 0; i < G_Grid::kCellNum; ++i) {
grid_to_result[i] = -1;
}
int result_idx = 0;
for (int i = 0; i < cell_num; ++i) {
grid_to_result[cells[i]] = i;
for (int j = 0; j < G_Grid::kMaxVerticesPerCell; ++j) {
auto& r = result[result_idx++];
r.id_ = G_Grid::grid_[cells[i]].vertex_[j].id_;
r.distance_ = std::numeric_limits<int>::max();
r.previous_vertex_id = 0;
}
}
auto& edge = G_Grid::grid_[start_cell].vertex_[start_vertex].edges_[start_edge];
result[G_Grid::kMaxVerticesPerCell * grid_to_result[edge.to_cell_] + edge.to_vertex_pos_].distance_ = edge.length_ - distance;
int * d_cells;
hipMalloc(&d_cells, sizeof(int) * cell_num);
hipMemcpy(d_cells, cells, sizeof(int) * cell_num, hipMemcpyHostToDevice);
int * d_grid_to_result;
hipMalloc(&d_grid_to_result, sizeof(int) * G_Grid::kCellNum);
hipMemcpy(d_grid_to_result, grid_to_result, sizeof(int) * G_Grid::kCellNum, hipMemcpyHostToDevice);
SBfResult * d_result;
hipMalloc(&d_result, sizeof(SBfResult) * cell_num * G_Grid::kMaxVerticesPerCell);
hipMemcpy(d_result, result, sizeof(SBfResult) * cell_num * G_Grid::kMaxVerticesPerCell, hipMemcpyHostToDevice);
dim3 block(G_Grid::kMaxVerticesPerCell, kMaxThreadsPerBlock / G_Grid::kMaxVerticesPerCell);
hipLaunchKernelGGL(( fst_k_knl) , dim3(1), dim3(block), 0, 0, d_cells, cell_num, d_grid_to_result, d_result, CudaGuard::pd_grid_, edge_num, std::numeric_limits<int>::max());
hipMemcpy(result, d_result, sizeof(SBfResult) * cell_num * G_Grid::kMaxVerticesPerCell, hipMemcpyDeviceToHost);
hipFree(d_grid_to_result);
hipFree(d_result);
hipFree(d_cells);
}
| 8c10c36d10d24ccfceba01a92a5cef72881cb0b2.cu | #include "stdafx.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include "cuda_guard.cuh"
#include "subnet_shortest_path.cuh"
#include "index_.h"
void SubnetBF::handle_unresolved(int* cells, int cell_num, int start_cell, int start_vertex, int start_edge, int distance, SBfResult* result, int edge_num) {
int grid_to_result[G_Grid::kCellNum];
for (int i = 0; i < G_Grid::kCellNum; ++i) {
grid_to_result[i] = -1;
}
int result_idx = 0;
for (int i = 0; i < cell_num; ++i) {
grid_to_result[cells[i]] = i;
for (int j = 0; j < G_Grid::kMaxVerticesPerCell; ++j) {
auto& r = result[result_idx++];
r.id_ = G_Grid::grid_[cells[i]].vertex_[j].id_;
r.distance_ = std::numeric_limits<int>::max();
r.previous_vertex_id = 0;
}
}
auto& edge = G_Grid::grid_[start_cell].vertex_[start_vertex].edges_[start_edge];
result[G_Grid::kMaxVerticesPerCell * grid_to_result[edge.to_cell_] + edge.to_vertex_pos_].distance_ = edge.length_ - distance;
for (int i = 0; i < edge_num; ++i) {
for (int j = 0; j < cell_num; ++j) {
auto& c = G_Grid::grid_[cells[j]];
for (int k = 0; k < G_Grid::kMaxVerticesPerCell; ++k) {
auto& v = c.vertex_[k];
int v_dist = result[G_Grid::kMaxVerticesPerCell * grid_to_result[cells[j]] + k].distance_;
if (v_dist < std::numeric_limits<int>::max()) {
for (int l = 0; l < G_Grid::kMaxEdgesPerVertex; ++l) {
auto& e = v.edges_[l];
if (e.id_ != 0 && grid_to_result[e.to_cell_] != -1) {
auto& r = result[G_Grid::kMaxVerticesPerCell * grid_to_result[e.to_cell_] + e.to_vertex_pos_];
if (r.distance_ > v_dist + e.length_) {
r.distance_ = v_dist + e.length_;
r.previous_vertex_id = v.id_;
}
}
}
}
}
}
}
}
__global__ void fst_k_knl(int* cells, int cell_num, int * grid_to_result, SubnetBF::SBfResult* result, G_Grid::Cell* grid_, int edge_num, int int_max) {
int cells_per_loop = SubnetBF::kMaxThreadsPerBlock / G_Grid::kMaxVerticesPerCell;
int loop_num = (cell_num + cells_per_loop - 1) / cells_per_loop;
for (int i = 0; i < edge_num; ++i) {
for (int j = 0; j < loop_num; ++j) {
int current_cell = threadIdx.y + cells_per_loop * j;
if (current_cell < cell_num) {
auto& cell = grid_[cells[current_cell]];
auto& vertex = cell.vertex_[threadIdx.x];
int v_dist = result[G_Grid::kMaxVerticesPerCell * grid_to_result[cells[current_cell]] + threadIdx.x].distance_;
if (v_dist < int_max) {
for (int l = 0; l < G_Grid::kMaxEdgesPerVertex; ++l) {
auto& e = vertex.edges_[l];
if (e.id_ != 0 && grid_to_result[e.to_cell_] != -1) {
auto& r = result[G_Grid::kMaxVerticesPerCell * grid_to_result[e.to_cell_] + e.to_vertex_pos_];
if (r.distance_ > v_dist + e.length_) {
r.distance_ = v_dist + e.length_;
r.previous_vertex_id = vertex.id_;
}
}
}
}
}
}
__syncthreads();
}
}
void SubnetBF::find_first_k(int* cells, int cell_num, int start_cell, int start_vertex, int start_edge, int distance, SBfResult* result, int edge_num) {
int grid_to_result[G_Grid::kCellNum];
for (int i = 0; i < G_Grid::kCellNum; ++i) {
grid_to_result[i] = -1;
}
int result_idx = 0;
for (int i = 0; i < cell_num; ++i) {
grid_to_result[cells[i]] = i;
for (int j = 0; j < G_Grid::kMaxVerticesPerCell; ++j) {
auto& r = result[result_idx++];
r.id_ = G_Grid::grid_[cells[i]].vertex_[j].id_;
r.distance_ = std::numeric_limits<int>::max();
r.previous_vertex_id = 0;
}
}
auto& edge = G_Grid::grid_[start_cell].vertex_[start_vertex].edges_[start_edge];
result[G_Grid::kMaxVerticesPerCell * grid_to_result[edge.to_cell_] + edge.to_vertex_pos_].distance_ = edge.length_ - distance;
int * d_cells;
cudaMalloc(&d_cells, sizeof(int) * cell_num);
cudaMemcpy(d_cells, cells, sizeof(int) * cell_num, cudaMemcpyHostToDevice);
int * d_grid_to_result;
cudaMalloc(&d_grid_to_result, sizeof(int) * G_Grid::kCellNum);
cudaMemcpy(d_grid_to_result, grid_to_result, sizeof(int) * G_Grid::kCellNum, cudaMemcpyHostToDevice);
SBfResult * d_result;
cudaMalloc(&d_result, sizeof(SBfResult) * cell_num * G_Grid::kMaxVerticesPerCell);
cudaMemcpy(d_result, result, sizeof(SBfResult) * cell_num * G_Grid::kMaxVerticesPerCell, cudaMemcpyHostToDevice);
dim3 block(G_Grid::kMaxVerticesPerCell, kMaxThreadsPerBlock / G_Grid::kMaxVerticesPerCell);
fst_k_knl <<<1, block>>>(d_cells, cell_num, d_grid_to_result, d_result, CudaGuard::pd_grid_, edge_num, std::numeric_limits<int>::max());
cudaMemcpy(result, d_result, sizeof(SBfResult) * cell_num * G_Grid::kMaxVerticesPerCell, cudaMemcpyDeviceToHost);
cudaFree(d_grid_to_result);
cudaFree(d_result);
cudaFree(d_cells);
}
|
09f987be1ab0c596e3aa389f546287235a270c93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zlacpy_conj.cu, normal z -> c, Sun Nov 20 20:20:28 2016
*/
#include "magma_internal.h"
#define BLOCK_SIZE 64
/******************************************************************************/
// copy & conjugate a single vector of length n.
// TODO: this was modeled on the old cswap routine. Update to new clacpy code for 2D matrix?
__global__ void clacpy_conj_kernel(
int n,
magmaFloatComplex *A1, int lda1,
magmaFloatComplex *A2, int lda2 )
{
int x = threadIdx.x + blockDim.x*blockIdx.x;
int offset1 = x*lda1;
int offset2 = x*lda2;
if ( x < n )
{
A2[offset2] = MAGMA_C_CONJ( A1[offset1] );
}
}
/******************************************************************************/
extern "C" void
magmablas_clacpy_conj(
magma_int_t n,
magmaFloatComplex_ptr dA1, magma_int_t lda1,
magmaFloatComplex_ptr dA2, magma_int_t lda2,
magma_queue_t queue )
{
dim3 threads( BLOCK_SIZE );
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
hipLaunchKernelGGL(( clacpy_conj_kernel), dim3(blocks), dim3(threads), 0, queue->cuda_stream() , n, dA1, lda1, dA2, lda2 );
}
| 09f987be1ab0c596e3aa389f546287235a270c93.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zlacpy_conj.cu, normal z -> c, Sun Nov 20 20:20:28 2016
*/
#include "magma_internal.h"
#define BLOCK_SIZE 64
/******************************************************************************/
// copy & conjugate a single vector of length n.
// TODO: this was modeled on the old cswap routine. Update to new clacpy code for 2D matrix?
__global__ void clacpy_conj_kernel(
int n,
magmaFloatComplex *A1, int lda1,
magmaFloatComplex *A2, int lda2 )
{
int x = threadIdx.x + blockDim.x*blockIdx.x;
int offset1 = x*lda1;
int offset2 = x*lda2;
if ( x < n )
{
A2[offset2] = MAGMA_C_CONJ( A1[offset1] );
}
}
/******************************************************************************/
extern "C" void
magmablas_clacpy_conj(
magma_int_t n,
magmaFloatComplex_ptr dA1, magma_int_t lda1,
magmaFloatComplex_ptr dA2, magma_int_t lda2,
magma_queue_t queue )
{
dim3 threads( BLOCK_SIZE );
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
clacpy_conj_kernel<<< blocks, threads, 0, queue->cuda_stream() >>>( n, dA1, lda1, dA2, lda2 );
}
|
226be986bce44adc40d8103ef1b1a329c17823ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2010 Rich Townsend <townsend@astro.wisc.edu>
//
// This file is part of CULSP.
//
// CULSP is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// CULSP is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with CULSP. If not, see <http://www.gnu.org/licenses/>.
#ifndef _CULSP_KERNEL_
#define _CULSP_KERNEL_
#define TWOPI 6.2831853071796f
// Kernel
__global__ void
__launch_bounds__(BLOCK_SIZE)
culsp_kernel(float *d_t, float *d_X, float *d_P, float df, int N_t)
{
__shared__ float s_t[BLOCK_SIZE];
__shared__ float s_X[BLOCK_SIZE];
// Calculate the frequency
float f = (blockIdx.x*BLOCK_SIZE+threadIdx.x+1)*df;
// Calculate the various sums
float XC = 0.f;
float XS = 0.f;
float CC = 0.f;
float CS = 0.f;
float XC_chunk = 0.f;
float XS_chunk = 0.f;
float CC_chunk = 0.f;
float CS_chunk = 0.f;
int j;
for(j = 0; j < N_t-BLOCK_SIZE; j += BLOCK_SIZE) {
// Load the chunk into shared memory
__syncthreads();
s_t[threadIdx.x] = d_t[j+threadIdx.x];
s_X[threadIdx.x] = d_X[j+threadIdx.x];
__syncthreads();
// Update the sums
#pragma unroll
for(int k = 0; k < BLOCK_SIZE; k++) {
// Range reduction
float ft = f*s_t[k];
ft -= rintf(ft);
float c;
float s;
__sincosf(TWOPI*ft, &s, &c);
XC_chunk += s_X[k]*c;
XS_chunk += s_X[k]*s;
CC_chunk += c*c;
CS_chunk += c*s;
}
XC += XC_chunk;
XS += XS_chunk;
CC += CC_chunk;
CS += CS_chunk;
XC_chunk = 0.f;
XS_chunk = 0.f;
CC_chunk = 0.f;
CS_chunk = 0.f;
}
// Handle the final chunk
__syncthreads();
if(j+threadIdx.x < N_t) {
s_t[threadIdx.x] = d_t[j+threadIdx.x];
s_X[threadIdx.x] = d_X[j+threadIdx.x];
}
__syncthreads();
for(int k = 0; k < N_t-j; k++) {
// Range reduction
float ft = f*s_t[k];
ft -= rintf(ft);
float c;
float s;
__sincosf(TWOPI*ft, &s, &c);
XC_chunk += s_X[k]*c;
XS_chunk += s_X[k]*s;
CC_chunk += c*c;
CS_chunk += c*s;
}
XC += XC_chunk;
XS += XS_chunk;
CC += CC_chunk;
CS += CS_chunk;
float SS = (float) N_t - CC;
// Calculate the tau terms
float ct;
float st;
__sincosf(0.5f*atan2(2.f*CS, CC-SS), &st, &ct);
// Calculate P
d_P[blockIdx.x*BLOCK_SIZE+threadIdx.x] =
0.5f*((ct*XC + st*XS)*(ct*XC + st*XS)/
(ct*ct*CC + 2*ct*st*CS + st*st*SS) +
(ct*XS - st*XC)*(ct*XS - st*XC)/
(ct*ct*SS - 2*ct*st*CS + st*st*CC));
// Finish
}
#endif
| 226be986bce44adc40d8103ef1b1a329c17823ef.cu | // Copyright 2010 Rich Townsend <townsend@astro.wisc.edu>
//
// This file is part of CULSP.
//
// CULSP is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// CULSP is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with CULSP. If not, see <http://www.gnu.org/licenses/>.
#ifndef _CULSP_KERNEL_
#define _CULSP_KERNEL_
#define TWOPI 6.2831853071796f
// Kernel
__global__ void
__launch_bounds__(BLOCK_SIZE)
culsp_kernel(float *d_t, float *d_X, float *d_P, float df, int N_t)
{
__shared__ float s_t[BLOCK_SIZE];
__shared__ float s_X[BLOCK_SIZE];
// Calculate the frequency
float f = (blockIdx.x*BLOCK_SIZE+threadIdx.x+1)*df;
// Calculate the various sums
float XC = 0.f;
float XS = 0.f;
float CC = 0.f;
float CS = 0.f;
float XC_chunk = 0.f;
float XS_chunk = 0.f;
float CC_chunk = 0.f;
float CS_chunk = 0.f;
int j;
for(j = 0; j < N_t-BLOCK_SIZE; j += BLOCK_SIZE) {
// Load the chunk into shared memory
__syncthreads();
s_t[threadIdx.x] = d_t[j+threadIdx.x];
s_X[threadIdx.x] = d_X[j+threadIdx.x];
__syncthreads();
// Update the sums
#pragma unroll
for(int k = 0; k < BLOCK_SIZE; k++) {
// Range reduction
float ft = f*s_t[k];
ft -= rintf(ft);
float c;
float s;
__sincosf(TWOPI*ft, &s, &c);
XC_chunk += s_X[k]*c;
XS_chunk += s_X[k]*s;
CC_chunk += c*c;
CS_chunk += c*s;
}
XC += XC_chunk;
XS += XS_chunk;
CC += CC_chunk;
CS += CS_chunk;
XC_chunk = 0.f;
XS_chunk = 0.f;
CC_chunk = 0.f;
CS_chunk = 0.f;
}
// Handle the final chunk
__syncthreads();
if(j+threadIdx.x < N_t) {
s_t[threadIdx.x] = d_t[j+threadIdx.x];
s_X[threadIdx.x] = d_X[j+threadIdx.x];
}
__syncthreads();
for(int k = 0; k < N_t-j; k++) {
// Range reduction
float ft = f*s_t[k];
ft -= rintf(ft);
float c;
float s;
__sincosf(TWOPI*ft, &s, &c);
XC_chunk += s_X[k]*c;
XS_chunk += s_X[k]*s;
CC_chunk += c*c;
CS_chunk += c*s;
}
XC += XC_chunk;
XS += XS_chunk;
CC += CC_chunk;
CS += CS_chunk;
float SS = (float) N_t - CC;
// Calculate the tau terms
float ct;
float st;
__sincosf(0.5f*atan2(2.f*CS, CC-SS), &st, &ct);
// Calculate P
d_P[blockIdx.x*BLOCK_SIZE+threadIdx.x] =
0.5f*((ct*XC + st*XS)*(ct*XC + st*XS)/
(ct*ct*CC + 2*ct*st*CS + st*st*SS) +
(ct*XS - st*XC)*(ct*XS - st*XC)/
(ct*ct*SS - 2*ct*st*CS + st*st*CC));
// Finish
}
#endif
|
605b708217ead9170f0ffa32a796fb70bedff218.hip | // !!! This is a file automatically generated by hipify!!!
/** *****************************************************************************
* This program is the confidential and proprietary product of Overview
* Limited. Any unauthorised use, reproduction or transfer of this
* program is strictly prohibited.
* Copyright 2017 Overview Limited. (Subject to limited
* distribution and restricted disclosure only.) All rights reserved.
*
* @file DidoFusedAnalytics_3dBgSub_CUDA.h
* @author SL
* @version 1
* @date 2017-06-29
* @brief class that does the background subtraction on the GPU
*****************************************************************************
**/
/**
* description of the algorithm
* this uses a standard MOG2 based background subtractor with a few key differences -
* it checks if the background point would be in range of the lidar, and if not, adjusts the thresholds appropriately and only uses thermal instead
* if it is in range of the lidar, it maintains separate variances for each component, and because the lidar values are returned as a range, it models them
* as a gaussian with mean at the center of the range and standard deviation = half the width of the range. Then the values are based simply on the joint integrals of the two distributions
*/
#include "global_defines.h"
#include "DidoFusedAnalytics_3dBgSub_CUDA.h"
#include <hip/hip_runtime.h>
#include <iostream>
#include <algorithm>
#include "math.h"
#include "math_constants.h"
#include "hip/hip_runtime_api.h"
//for our logging we will throw an error that can then be caught by the surrounding code that is allowed to include boost
#include "CUDA_Exception.h"
/*
if the computer being used doesn't have a GPU, define DIDOLIDAR_NOGPU as 1 in the preprocessor, and this wil produce some noops instead. It still requires the nvidia sdk to compile at all, however
*/
#if DIDOLIDAR_NOGPU
#else
//error handling function
static void HandleError( hipError_t err, const char *file, int line) {
if (err != hipSuccess) {
// hipDeviceReset();
throw overview::CUDA_Exception(hipGetErrorString( err ) , err, line, file);
}
}
#define HANDLE_ERROR(err) {HandleError((err), __FILE__, __LINE__);}
#endif
namespace overview
{
namespace bgrcuda
{
#define NOPOINTVALUE -1.0f
//convenience function for swapping with
__device__ __forceinline__ void swap(float * array, int ind1, int ind2)
{
float tmp = array[ind1];
array[ind1] = array[ind2];
array[ind2] = tmp;
}
template<typename T>
__global__ void downsample(const T * in, T* out, int nrows, int ncols, int scale)
{
const int totind_x = (threadIdx.x + blockIdx.x * blockDim.x);
const int totind_y = (threadIdx.y + blockIdx.y * blockDim.y);
if (totind_x >= ncols / scale || totind_y > nrows / scale) return;
double total = 0;
int npts = 0;
for (int i = 0; i < scale; i++)
{
int index_x = totind_x*scale + i;
if (index_x >= ncols) continue;
for (int j = 0; j < scale; j++)
{
int index_y = totind_y*scale + j;
if (index_y >= nrows) continue;
total += in[index_x + ncols*index_y];
npts++;
}
}
out[totind_x + totind_y*(ncols / scale)] = (T)(total / npts);
}
__global__ void upsample(const float * in_pano, float * out_pano,int nrows, int ncols, int scale)
{
int index_x = (threadIdx.x + blockIdx.x * blockDim.x);
int index_y = (threadIdx.y + blockIdx.y * blockDim.y);
if (index_x / scale < ncols / scale && index_y / scale < nrows / scale)
out_pano[index_x + index_y*ncols] = in_pano[(index_x / scale) + (index_y / scale)*(ncols / scale)];
}
//the actual bgr
__global__ void mixturegaussians(const float * ranges_min, const float * ranges_max, float * rangemdl, float * rangevar, const thermalType * temps,
float * tempmdl, float * tempvar, float * modelweights, float* out_min, float* out_max, int rows, int cols, float alphaT, float alpha1, float prune, DidoFusedAnalytics_3dBgSub_CUDA::bgrPars pars)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cols || y >= rows)
return;
float r_min = ranges_min[x + y*cols];
float r_max = ranges_max[x + y*cols];
float r_width = r_max - r_min;
float r_center = (r_max + r_min)/2;
float temp = temps[x + y*cols];
//check if the observation has a range
bool hasRange = r_center > 0;
//calculate distances to the modes (+ sort)
//here we need to go in descending order!!!
bool background = false; // true - the pixel classified as background
//internal:
bool fitsPDF = false; //if it remains zero a new GMM mode will be added
float totalWeight = 0.0f;
//go through all modes
int lastmode = 0;
for (int mode = 0; mode < pars.c_nmixtures; ++mode)
{
int modeind = (x + (y*cols))*pars.c_nmixtures + mode;
//skip modes with no weight
//need only weight if fit is found
if (modelweights[modeind] <= 0) continue;
float weight = alpha1 * modelweights[modeind] + prune;
lastmode++;
//fit not found yet
if (!fitsPDF)
{
bool hasModelRange = rangemdl[modeind] > 0;
//check if it belongs to some of the remaining modes
float t_var = tempvar[modeind];
//our observations of range are also gaussian distibuted, so we look at the distribution of the convolution
float r_var = rangevar[modeind] + (r_width*r_width);
//calculate difference and distance
float t_diff = tempmdl[modeind] - temp;
float r_diff = rangemdl[modeind] - r_center;
//weighted distance in both directions
float dist2 = hasRange && hasModelRange ? t_diff*t_diff*r_var + r_diff*r_diff*t_var : t_diff*t_diff;
float bgthresh = hasRange && hasModelRange ? pars.c_Tb * t_var * r_var : pars.c_Tb*t_var;
float genthresh = hasRange && hasModelRange ? pars.c_Tg * t_var * r_var : pars.c_Tg*t_var;
//background? - Tb - usually larger than Tg
if (totalWeight < pars.c_TB && dist2 < bgthresh)
background = true;
//check fit
if (dist2 < genthresh)
{
//belongs to the mode
fitsPDF = true;
//update distribution
//update weight
weight += alphaT;
float k = alphaT / weight;
//update variance
float t_varnew = t_var + k * (t_diff*t_diff - t_var);
//integrating the weighting against the probability of the observation
float r_varnew = rangevar[modeind] + hasRange && hasModelRange ? k * ((r_width*r_width + 1)*(r_diff*r_diff) + pars.c_r_varInflate - rangevar[modeind]) : 0;
//update means
tempmdl[modeind] = tempmdl[modeind] - k * t_diff;
rangemdl[modeind] = hasModelRange ? (rangemdl[modeind] - hasRange ? k *( r_diff ) : 0) : r_center;
//limit the variance
t_varnew = (t_varnew < pars.c_varMin_t) ? pars.c_varMin_t : (t_varnew > pars.c_varMax_t)? pars.c_varMax_t : t_varnew;
r_varnew = (r_varnew < pars.c_varMin_r) ? pars.c_varMin_r : (r_varnew > pars.c_varMax_r)? pars.c_varMax_r : r_varnew;
rangevar[modeind] = r_varnew;
tempvar[modeind] = t_varnew;
//sort
//all other weights are at the same place and
//only the matched (iModes) is higher -> just find the new place for it
for (int i = mode; i > 0; --i)
{
//check one up
if (weight < modelweights[(i - 1) + pars.c_nmixtures*(x + y*cols)])
break;
//swap one up
swap(modelweights, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(rangevar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempvar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(rangemdl, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempmdl, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
}
//belongs to the mode - bFitsPDF becomes 1
}
} // !fitsPDF
//check prune
if (weight < -prune)
{
weight = 0.0f;
lastmode--;
}
modelweights[modeind] = weight; //update weight by the calculated value
totalWeight += weight;
}
//renormalize weights
totalWeight = totalWeight == 0 ? 1.f : 1.f / totalWeight;
for (int mode = 0; mode < pars.c_nmixtures; ++mode)
modelweights[(x + (y*cols))*pars.c_nmixtures + mode] *= totalWeight;
//make new mode if needed and exit
if (!fitsPDF)
{
if(lastmode == pars.c_nmixtures) lastmode--;
if (lastmode == 0)
modelweights[(x + (y*cols))*pars.c_nmixtures + lastmode] = 1.f;
else
{
modelweights[(x + (y*cols))*pars.c_nmixtures + lastmode] = alphaT;
// renormalize all other weights
for (int i = lastmode - 1; i >= 0 ; i--)
modelweights[(x + (y*cols))*pars.c_nmixtures + i] *= alpha1;
}
// init
rangemdl[(x + (y*cols))*pars.c_nmixtures + lastmode] = hasRange ? r_center : -1.0f;
tempmdl[(x + (y*cols))*pars.c_nmixtures + lastmode] = temp;
tempvar[(x + (y*cols))*pars.c_nmixtures + lastmode] = pars.c_varInit_t;
rangevar[(x + (y*cols))*pars.c_nmixtures + lastmode] = pars.c_varInit_r;
//sort
//find the new place for it
for (int i = lastmode - 1; i > 0; --i)
{
// check one up
if (alphaT < modelweights[(i - 1) + pars.c_nmixtures*(x + y*cols)])
break;
//swap one up
swap(modelweights, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(rangevar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempvar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(rangemdl, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempmdl, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
}
}
//return inf if we don't have a range
out_min[x + y*cols] = background ? NOPOINTVALUE : hasRange ? r_min : CUDART_INF_F;
out_max[x + y*cols] = background ? NOPOINTVALUE : hasRange ? r_max : CUDART_INF_F;
}
__global__ void mixturegaussians_onlyTherm(const float * ranges_min, const float * ranges_max, const thermalType * temps, float * tempmdl, float * tempvar,
float * modelweights, float* out_min, float* out_max, int rows, int cols, float alphaT, float alpha1, float prune, DidoFusedAnalytics_3dBgSub_CUDA::bgrPars pars)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cols || y >= rows)
return;
float r_min = ranges_min[x + y*cols];
float r_max = ranges_max[x + y*cols];
float temp = temps[x + y*cols];
//calculate distances to the modes (+ sort)
//here we need to go in descending order!!!
bool background = false; // true - the pixel classified as background
//internal:
bool fitsPDF = false; //if it remains zero a new GMM mode will be added
float totalWeight = 0.0f;
//go through all modes
int lastmode = 0;
for (int mode = 0; mode < pars.c_nmixtures; ++mode)
{
int modeind = (x + (y*cols))*pars.c_nmixtures + mode;
//skip modes with no weight
//need only weight if fit is found
if (modelweights[modeind] <= 0) continue;
float weight = alpha1 * modelweights[modeind] + prune;
lastmode++;
//fit not found yet
if (!fitsPDF)
{
//check if it belongs to some of the remaining modes
float t_var = tempvar[modeind];
//calculate difference and distance
float t_diff = tempmdl[modeind] - temp;
//weighted distance in both directions
float dist2 = t_diff*t_diff;
float bgthresh = pars.c_Tb*t_var;
float genthresh = pars.c_Tg*t_var;
//background? - Tb - usually larger than Tg
if (totalWeight < pars.c_TB && dist2 < bgthresh)
background = true;
//check fit
if (dist2 < genthresh)
{
//belongs to the mode
fitsPDF = true;
//update distribution
//update weight
weight += alphaT;
float k = alphaT / weight;
//update variance
float t_varnew = t_var + k * (t_diff*t_diff - t_var);
//integrating the weighting against the probability of the observation
//update means
tempmdl[modeind] = tempmdl[modeind] - k * t_diff;
//limit the variance
t_varnew = (t_varnew < pars.c_varMin_t) ? pars.c_varMin_t : (t_varnew > pars.c_varMax_t)? pars.c_varMax_t : t_varnew;
tempvar[modeind] = t_varnew;
//sort
//all other weights are at the same place and
//only the matched (iModes) is higher -> just find the new place for it
for (int i = mode; i > 0; --i)
{
//check one up
if (weight < modelweights[(i - 1) + pars.c_nmixtures*(x + y*cols)])
break;
//swap one up
swap(modelweights, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempvar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempmdl, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
}
//belongs to the mode - bFitsPDF becomes 1
}
} // !fitsPDF
//check prune
if (weight < -prune)
{
weight = 0.0f;
lastmode--;
}
modelweights[modeind] = weight; //update weight by the calculated value
totalWeight += weight;
}
//renormalize weights
totalWeight = totalWeight == 0 ? 1.f : 1.f / totalWeight;
for (int mode = 0; mode < pars.c_nmixtures; ++mode)
modelweights[(x + (y*cols))*pars.c_nmixtures + mode] *= totalWeight;
//make new mode if needed and exit
if (!fitsPDF)
{
if(lastmode == pars.c_nmixtures) lastmode--;
if (lastmode == 0)
modelweights[(x + (y*cols))*pars.c_nmixtures + lastmode] = 1.f;
else
{
modelweights[(x + (y*cols))*pars.c_nmixtures + lastmode] = alphaT;
// renormalize all other weights
for (int i = lastmode - 1; i >= 0 ; i--)
modelweights[(x + (y*cols))*pars.c_nmixtures + i] *= alpha1;
}
// init
tempmdl[(x + (y*cols))*pars.c_nmixtures + lastmode] = temp;
tempvar[(x + (y*cols))*pars.c_nmixtures + lastmode] = pars.c_varInit_t;
//sort
//find the new place for it
for (int i = lastmode - 1; i > 0; --i)
{
// check one up
if (alphaT < modelweights[(i - 1) + pars.c_nmixtures*(x + y*cols)])
break;
//swap one up
swap(modelweights, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempvar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempmdl, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
}
}
//return inf if we don't have a range
out_min[x + y*cols] = background ? NOPOINTVALUE : r_min;
out_max[x + y*cols] = background ? NOPOINTVALUE : r_max;
}
#define REGION_WIDTH 3
#define REGION_AREA (REGION_WIDTH*REGION_WIDTH)
#define REGION_HWIDTH (REGION_WIDTH/2)
//the actual bgr - region based method
__global__ void mixturegaussians_region(const float * ranges_min, const float * ranges_max, float * rangemdl, float * rangevar, const thermalType * temps,
float * tempmdl, float * tempvar, float * modelweights, float* out_min, float* out_max, int rows, int cols, float alphaT, float alpha1, float prune, DidoFusedAnalytics_3dBgSub_CUDA::bgrPars pars)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cols || y >= rows)
return;
float r_center[REGION_AREA], r_width =0, temp[REGION_AREA];
for (int i = 0; i < REGION_AREA; i++)
{
int sx = x - REGION_HWIDTH + i % REGION_WIDTH;
int sy = y - REGION_HWIDTH + i / REGION_WIDTH;
if (sx < 0 || sy < 0 || sx >= cols || sy >= rows)
{
r_center[i] = 0;
temp[i] = 0;
}
else
{
float r_min = ranges_min[sx + sy*cols];
float r_max = ranges_max[sx + sy*cols];
r_width += r_max - r_min;
r_center[i] = (r_max + r_min) / 2;
temp[i] = temps[sx + sy*cols];
}
}
//normalise the variance of the range observations
r_width /= REGION_AREA;
//check if the observation is outside our maximum range
bool hasRange = r_center[4] > 0;
//calculate distances to the modes (+ sort)
//here we need to go in descending order!!!
bool background = false; // true - the pixel classified as background
//internal:
bool fitsPDF = false; //if it remains zero a new GMM mode will be added
float totalWeight = 0.0f;
//go through all modes
int lastmode = 0;
for (int mode = 0; mode < pars.c_nmixtures; ++mode)
{
int modeind = (x + (y*cols))*pars.c_nmixtures + mode;
//skip modes with no weight
//need only weight if fit is found
if (modelweights[modeind] <= 0) continue;
float weight = alpha1 * modelweights[modeind] + prune;
lastmode++;
//fit not found yet
if (!fitsPDF)
{
bool hasModelRange = rangemdl[modeind* REGION_AREA + REGION_WIDTH] > 0;
//check if it belongs to some of the remaining modes
float t_var = tempvar[modeind];
//our observations of range are also gaussian distibuted, so we look at the distribution of the convolution
float r_var = rangevar[modeind] + (r_width*r_width);
//calculate difference and distance
float t_diff[REGION_AREA], r_diff[REGION_AREA];
for (int i = 0; i < REGION_AREA; i++)
{
t_diff[i] = tempmdl[modeind*REGION_AREA + i] - temp[i];
r_diff[i] = rangemdl[modeind * REGION_AREA + i] - r_center[i];
}
//clculate the L2norm of the adjusted differences
float tdist = 0, rdist = 0;
for (int i = 0; i < REGION_AREA; i++)
{
tdist += fmin(t_diff[i] * t_diff[i], pars.c_errrorCap * t_var);
rdist += fmin(r_diff[i] * r_diff[i], pars.c_errrorCap * r_var);
}
float dist2 = hasRange && hasModelRange ? tdist*r_var + rdist*t_var : tdist;
//weighted distance in both directions
float bgthresh = hasRange && hasModelRange ? pars.c_Tb * t_var * r_var *REGION_AREA : pars.c_Tb*t_var *REGION_AREA;
float genthresh = hasRange && hasModelRange ? pars.c_Tg * t_var * r_var * REGION_AREA : pars.c_Tg*t_var *REGION_AREA;
//background? - Tb - usually larger than Tg
if (totalWeight < pars.c_TB && dist2 < bgthresh)
background = true;
//check fit
if (dist2 < genthresh)
{
//belongs to the mode
fitsPDF = true;
//update distribution
//update weight
weight += alphaT;
float k = alphaT / weight;
//update variance
float t_varnew = t_var + k * (tdist/ REGION_AREA + pars.c_t_varInflate - t_var);
//integrating the weighting against the probability of the observation
float r_varnew = rangevar[modeind] + hasRange && hasModelRange ? (k * ((r_width*r_width + 1)*(rdist/ REGION_AREA) + pars.c_r_varInflate - rangevar[modeind])) : 0;
//update means
for (int i = 0; i < REGION_AREA; i++)
{
tempmdl[modeind*REGION_AREA +i] = tempmdl[modeind * REGION_AREA + i] - k * t_diff[i];
rangemdl[modeind*REGION_AREA + i] = rangemdl[modeind * REGION_AREA + i] > 0 ? rangemdl[modeind * REGION_AREA + i] - k *(r_diff[i]) : r_center[i];
}
//limit the variance
t_varnew = (t_varnew < pars.c_varMin_t) ? pars.c_varMin_t : (t_varnew > pars.c_varMax_t)? pars.c_varMax_t : t_varnew;
r_varnew = (r_varnew < pars.c_varMin_r) ? pars.c_varMin_r : (r_varnew > pars.c_varMax_r)? pars.c_varMax_r : r_varnew;
rangevar[modeind] = r_varnew;
tempvar[modeind] = t_varnew;
//sort
//all other weights are at the same place and
//only the matched (iModes) is higher -> just find the new place for it
for (int i = mode; i > 0; --i)
{
//check one up
if (weight < modelweights[(i - 1) + pars.c_nmixtures*(x + y*cols)])
break;
//swap one up
swap(modelweights, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(rangevar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempvar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
for (int k = 0; k < REGION_AREA; k++)
{
swap(rangemdl, ((i - 1) + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k, (i + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k);
swap(tempmdl, ((i - 1) + pars.c_nmixtures*(x + y*cols)) *REGION_AREA + k, (i + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k);
}
}
//belongs to the mode - bFitsPDF becomes 1
}
} // !fitsPDF
//check prune
if (weight < -prune)
{
weight = 0.0f;
lastmode--;
}
modelweights[modeind] = weight; //update weight by the calculated value
totalWeight += weight;
}
//renormalize weights
totalWeight = totalWeight == 0 ? 1.f : 1.f / totalWeight;
for (int mode = 0; mode < pars.c_nmixtures; ++mode)
modelweights[(x + (y*cols))*pars.c_nmixtures + mode] *= totalWeight;
//make new mode if needed and exit
if (!fitsPDF)
{
if(lastmode == pars.c_nmixtures) lastmode--;
if (lastmode == 0)
modelweights[(x + (y*cols))*pars.c_nmixtures + lastmode] = 1.f;
else
{
modelweights[(x + (y*cols))*pars.c_nmixtures + lastmode] = alphaT;
// renormalize all other weights
for (int i = lastmode - 1; i >= 0 ; i--)
modelweights[(x + (y*cols))*pars.c_nmixtures + i] *= alpha1;
}
// init
for (int k = 0; k < REGION_AREA; k++)
{
rangemdl[((x + (y*cols))*pars.c_nmixtures + lastmode)*REGION_AREA + k] = r_center[k];
tempmdl[((x + (y*cols))*pars.c_nmixtures + lastmode)*REGION_AREA +k] = temp[k];
}
tempvar[(x + (y*cols))*pars.c_nmixtures + lastmode] = pars.c_varInit_t;
rangevar[(x + (y*cols))*pars.c_nmixtures + lastmode] = pars.c_varInit_r;
//sort
//find the new place for it
for (int i = lastmode - 1; i > 0; --i)
{
// check one up
if (alphaT < modelweights[(i - 1) + pars.c_nmixtures*(x + y*cols)])
break;
//swap one up
swap(modelweights, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(rangevar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempvar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
for (int k = 0; k < REGION_AREA; k++)
{
swap(rangemdl, ((i - 1) + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k, (i + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k);
swap(tempmdl, ((i - 1) + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k, (i + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k);
}
}
}
out_min[x + y*cols] = background ? NOPOINTVALUE : hasRange ? ranges_min[x + y*cols]: CUDART_INF_F;
out_max[x + y*cols] = background ? NOPOINTVALUE : hasRange ? ranges_max[x + y*cols] : CUDART_INF_F;
}
}
//called whenever we change the constants
static inline float getUnivarateThresh(float thresh)
{
//these numbers are numerically estimated from the normal distributiobns in the range 1-6
return pow(thresh, 4)*0.0016298f - pow(thresh, 3)*0.0080105f - pow(thresh, 2)*0.1293664f + thresh*1.3835517f - 0.6398407f;
}
DidoFusedAnalytics_3dBgSub_CUDA::DidoFusedAnalytics_3dBgSub_CUDA(DidoFusedAnalytics_3dBgSub_CUDA & cp): useRegion(cp.useRegion), scale(cp.scale)
{
cols = cp.cols;
rows = cp.rows;
ct = cp.ct;
history = cp.history;
nsteps = cp.nsteps;
pars = cp.pars;
//cuda allocated variables
#if DIDOLIDAR_NOGPU
#else
int modelsize = rows*cols*pars.c_nmixtures * sizeof(float);
int regionsize = useRegion ? modelsize*REGION_AREA : modelsize;
//allocate the model
HANDLE_ERROR(hipMalloc(&rangeModel, regionsize));
HANDLE_ERROR(hipMalloc(&rangevars, modelsize));
HANDLE_ERROR(hipMalloc(&tempmodel, regionsize));
HANDLE_ERROR(hipMalloc(&tempvars, modelsize));
HANDLE_ERROR(hipMalloc(&modelweights, modelsize));
HANDLE_ERROR(hipMemcpy(modelweights,cp.modelweights, modelsize,hipMemcpyDeviceToDevice));
HANDLE_ERROR(hipMemcpy(rangeModel, cp.rangeModel, regionsize, hipMemcpyDeviceToDevice));
HANDLE_ERROR(hipMemcpy(rangevars, cp.rangevars, modelsize, hipMemcpyDeviceToDevice));
HANDLE_ERROR(hipMemcpy(tempmodel, cp.tempmodel, regionsize, hipMemcpyDeviceToDevice));
HANDLE_ERROR(hipMemcpy(tempvars, cp.tempvars, modelsize, hipMemcpyDeviceToDevice));
#endif
}
DidoFusedAnalytics_3dBgSub_CUDA::DidoFusedAnalytics_3dBgSub_CUDA(DidoFusedAnalytics_3dBgSub_CUDA && mv): useRegion(mv.useRegion), scale(mv.scale)
{
cols = mv.cols;
rows = mv.rows;
ct = mv.ct;
history = mv.history;
nsteps = mv.nsteps;
pars = mv.pars;
modelweights = mv.modelweights;
mv.modelweights = nullptr;
rangeModel = mv.rangeModel;
mv.rangeModel = nullptr;
rangevars = mv.rangevars;
mv.rangevars = nullptr;
tempmodel = mv.tempmodel;
mv.tempmodel = nullptr;
tempvars = mv.tempvars;
mv.tempvars = nullptr;
}
DidoFusedAnalytics_3dBgSub_CUDA::DidoFusedAnalytics_3dBgSub_CUDA(int _rows, int _cols, bool useregion, int _scale)
: rows(_rows), cols(_cols), useRegion(useregion), scale(_scale)
{
#if DIDOLIDAR_NOGPU
#else
int modelsize = (rows/scale)*(cols/scale)*pars.c_nmixtures*sizeof(float);
int regionsize = useRegion ? modelsize*REGION_AREA : modelsize;
//allocate the model
HANDLE_ERROR(hipMalloc(&rangeModel, regionsize));
HANDLE_ERROR(hipMalloc(&rangevars, modelsize));
HANDLE_ERROR(hipMalloc(&tempmodel, regionsize));
HANDLE_ERROR(hipMalloc(&tempvars, modelsize));
HANDLE_ERROR(hipMalloc(&modelweights, modelsize));
pars.c_Tb_u = getUnivarateThresh(pars.c_Tb);
pars.c_Tg_u = getUnivarateThresh(pars.c_Tg);
#endif
}
DidoFusedAnalytics_3dBgSub_CUDA::~DidoFusedAnalytics_3dBgSub_CUDA()
{
#if DIDOLIDAR_NOGPU
#else
//deallocate the model
if(rangeModel != nullptr) (hipFree(rangeModel));
if(rangevars != nullptr) (hipFree(rangevars));
if(tempmodel != nullptr) (hipFree(tempmodel));
if(tempvars != nullptr) (hipFree(tempvars));
if(modelweights != nullptr) (hipFree(modelweights));
#endif
}
void DidoFusedAnalytics_3dBgSub_CUDA::apply(const thermalType * input_t,
const float * input_d_min, const float * input_d_max, float * out_min, float * out_max, float learningRate)
{
#if DIDOLIDAR_NOGPU
#else
nsteps++;
float lr;
//allocate the learning rate
if(learningRate < 0)
{
lr = nsteps > history ? 1.f/history : 1.f/(nsteps);
}
else
{
lr = learningRate;
}
//downsample it
float * l_d_min, *l_d_max, *l_o_max, *l_o_min;
thermalType * l_therm;
HANDLE_ERROR(hipMalloc(&l_d_max, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(hipMalloc(&l_d_min, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(hipMalloc(&l_o_max, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(hipMalloc(&l_o_min, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(hipMalloc(&l_therm, (cols / scale)*(rows / scale) * sizeof(thermalType)));
int blockdim = 16;
dim3 grid((cols / scale) / blockdim + 1, (rows / scale) / blockdim + 1);
dim3 block(blockdim, blockdim);
if (scale == 1)
{
HANDLE_ERROR(hipMemcpy(l_d_max, input_d_max, cols*rows*sizeof(float), hipMemcpyDeviceToDevice));
HANDLE_ERROR(hipMemcpy(l_d_min, input_d_min, cols*rows * sizeof(float), hipMemcpyDeviceToDevice));
}
else
{
hipLaunchKernelGGL(( bgrcuda::downsample<float>) , dim3(grid), dim3(block) , 0, 0, input_d_max, l_d_max, rows, cols, scale);
hipLaunchKernelGGL(( bgrcuda::downsample <float>), dim3(grid), dim3(block) , 0, 0, input_d_min, l_d_min, rows, cols, scale);
hipLaunchKernelGGL(( bgrcuda::downsample <thermalType>) , dim3(grid), dim3(block) , 0, 0, input_t, l_therm, rows, cols, scale);
}
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
//run the bgr
if(useRegion)
{
hipLaunchKernelGGL(( bgrcuda::mixturegaussians_region), dim3(grid), dim3(block), 0, 0, l_d_min, l_d_max, rangeModel, rangevars, l_therm,
tempmodel, tempvars, modelweights, l_o_min, l_o_max, rows/scale, cols/scale, lr, 1.0f - lr, -lr*ct, pars);
}
else
{
hipLaunchKernelGGL(( bgrcuda::mixturegaussians), dim3(grid), dim3(block), 0, 0, l_d_min, l_d_max, rangeModel, rangevars, l_therm,
tempmodel, tempvars, modelweights, l_o_min, l_o_max, rows/scale, cols/scale, lr, 1.0f - lr, -lr*ct, pars);
}
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
//upsample it
if (scale == 1)
{
HANDLE_ERROR(hipMemcpy(out_max, l_o_max, cols*rows * sizeof(float), hipMemcpyDeviceToDevice));
HANDLE_ERROR(hipMemcpy(out_min, l_o_min, cols*rows * sizeof(float), hipMemcpyDeviceToDevice));
}
else
{
grid = dim3((cols) / blockdim + 1, (rows ) / blockdim + 1);
hipLaunchKernelGGL(( bgrcuda::upsample) , dim3(grid), dim3(block) , 0, 0, l_o_max, out_max, rows, cols, scale);
hipLaunchKernelGGL(( bgrcuda::upsample), dim3(grid), dim3(block) , 0, 0, l_o_min, out_min, rows, cols, scale);
}
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
if (l_o_max) hipFree(l_o_max);
if (l_d_max) hipFree(l_d_max);
if (l_d_min) hipFree(l_d_min);
if (l_o_min) hipFree(l_o_min);
if (l_therm) hipFree(l_therm);
#endif
}
void DidoFusedAnalytics_3dBgSub_CUDA::setHistory(int hist_)
{
history = hist_;
}
void DidoFusedAnalytics_3dBgSub_CUDA::setBackgroundWeight(float TB)
{
pars.c_TB = TB;
}
void DidoFusedAnalytics_3dBgSub_CUDA::setVariance(float initTempVar, float initRangeVar)
{
//parameters for thermal space
pars.c_varInit_t = initTempVar; // initial variance for new components
pars.c_varMax_t = 5.0f * pars.c_varInit_t;
pars.c_varMin_t = pars.c_varInit_t/1.5f;
//params for range space
pars.c_varInit_r = initRangeVar; // initial variance for new components
pars.c_varMax_r = 5.0f * pars.c_varInit_r;
pars.c_varMin_r = pars.c_varInit_r/4;
pars.c_r_varInflate = initRangeVar / 3;
pars.c_t_varInflate = initTempVar/3;
}
void DidoFusedAnalytics_3dBgSub_CUDA::setThresholds(float backgroundThresh, float generativeThresh)
{
pars.c_Tb = backgroundThresh;
pars.c_Tg = generativeThresh;
pars.c_Tb_u = getUnivarateThresh(pars.c_Tb);
pars.c_Tg_u = getUnivarateThresh(pars.c_Tg);
}
DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly(DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly & cp): scale(cp.scale)
{
cols = cp.cols;
rows = cp.rows;
ct = cp.ct;
history = cp.history;
nsteps = cp.nsteps;
pars = cp.pars;
//cuda allocated variables
#if DIDOLIDAR_NOGPU
#else
int modelsize = rows*cols*pars.c_nmixtures * sizeof(float);
int regionsize = modelsize;
//allocate the model
HANDLE_ERROR(hipMalloc(&tempmodel, regionsize));
HANDLE_ERROR(hipMalloc(&tempvars, modelsize));
HANDLE_ERROR(hipMalloc(&modelweights, modelsize));
HANDLE_ERROR(hipMemcpy(modelweights,cp.modelweights, modelsize,hipMemcpyDeviceToDevice));
HANDLE_ERROR(hipMemcpy(tempmodel, cp.tempmodel, regionsize, hipMemcpyDeviceToDevice));
HANDLE_ERROR(hipMemcpy(tempvars, cp.tempvars, modelsize, hipMemcpyDeviceToDevice));
#endif
}
DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly(DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly && mv): scale(mv.scale)
{
cols = mv.cols;
rows = mv.rows;
ct = mv.ct;
history = mv.history;
nsteps = mv.nsteps;
pars = mv.pars;
modelweights = mv.modelweights;
mv.modelweights = nullptr;
tempmodel = mv.tempmodel;
mv.tempmodel = nullptr;
tempvars = mv.tempvars;
mv.tempvars = nullptr;
}
DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly(int _rows, int _cols, int _scale)
: rows(_rows), cols(_cols), scale(_scale)
{
#if DIDOLIDAR_NOGPU
#else
int modelsize = (rows/scale)*(cols/scale)*pars.c_nmixtures*sizeof(float);
int regionsize = modelsize;
//allocate the model
HANDLE_ERROR(hipMalloc(&tempmodel, regionsize));
HANDLE_ERROR(hipMalloc(&tempvars, modelsize));
HANDLE_ERROR(hipMalloc(&modelweights, modelsize));
pars.c_Tb_u = getUnivarateThresh(pars.c_Tb);
pars.c_Tg_u = getUnivarateThresh(pars.c_Tg);
#endif
}
DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::~DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly()
{
#if DIDOLIDAR_NOGPU
#else
//deallocate the model
if(tempmodel != nullptr) (hipFree(tempmodel));
if(tempvars != nullptr) (hipFree(tempvars));
if(modelweights != nullptr) (hipFree(modelweights));
#endif
}
void DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::apply(const thermalType * input_t,
const float * input_d_min, const float * input_d_max, float * out_min, float * out_max, float learningRate)
{
#if DIDOLIDAR_NOGPU
#else
nsteps++;
float lr;
//allocate the learning rate
if(learningRate < 0)
{
lr = nsteps > history ? 1.f/history : 1.f/(nsteps);
}
else
{
lr = learningRate;
}
//downsample it
float * l_d_min, *l_d_max, *l_o_max, *l_o_min;
thermalType * l_therm;
HANDLE_ERROR(hipMalloc(&l_d_max, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(hipMalloc(&l_d_min, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(hipMalloc(&l_o_max, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(hipMalloc(&l_o_min, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(hipMalloc(&l_therm, (cols / scale)*(rows / scale) * sizeof(thermalType)));
int blockdim = 16;
dim3 grid((cols / scale) / blockdim + 1, (rows / scale) / blockdim + 1);
dim3 block(blockdim, blockdim);
if (scale == 1)
{
HANDLE_ERROR(hipMemcpy(l_d_max, input_d_max, cols*rows*sizeof(float), hipMemcpyDeviceToDevice));
HANDLE_ERROR(hipMemcpy(l_d_min, input_d_min, cols*rows * sizeof(float), hipMemcpyDeviceToDevice));
}
else
{
hipLaunchKernelGGL(( bgrcuda::downsample<float>) , dim3(grid), dim3(block) , 0, 0, input_d_max, l_d_max, rows, cols, scale);
hipLaunchKernelGGL(( bgrcuda::downsample <float>), dim3(grid), dim3(block) , 0, 0, input_d_min, l_d_min, rows, cols, scale);
hipLaunchKernelGGL(( bgrcuda::downsample <thermalType>) , dim3(grid), dim3(block) , 0, 0, input_t, l_therm, rows, cols, scale);
}
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
hipLaunchKernelGGL(( bgrcuda::mixturegaussians_onlyTherm), dim3(grid), dim3(block), 0, 0, l_d_min, l_d_max, l_therm,
tempmodel, tempvars, modelweights, l_o_min, l_o_max, rows/scale, cols/scale, lr, 1.0f - lr, -lr*ct, pars);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
//upsample it
if (scale == 1)
{
HANDLE_ERROR(hipMemcpy(out_max, l_o_max, cols*rows * sizeof(float), hipMemcpyDeviceToDevice));
HANDLE_ERROR(hipMemcpy(out_min, l_o_min, cols*rows * sizeof(float), hipMemcpyDeviceToDevice));
}
else
{
grid = dim3((cols) / blockdim + 1, (rows ) / blockdim + 1);
hipLaunchKernelGGL(( bgrcuda::upsample) , dim3(grid), dim3(block) , 0, 0, l_o_max, out_max, rows, cols, scale);
hipLaunchKernelGGL(( bgrcuda::upsample), dim3(grid), dim3(block) , 0, 0, l_o_min, out_min, rows, cols, scale);
}
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
if (l_o_max) hipFree(l_o_max);
if (l_d_max) hipFree(l_d_max);
if (l_d_min) hipFree(l_d_min);
if (l_o_min) hipFree(l_o_min);
if (l_therm) hipFree(l_therm);
#endif
}
void DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::setHistory(int hist_)
{
history = hist_;
}
void DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::setBackgroundWeight(float TB)
{
pars.c_TB = TB;
}
void DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::setVariance(float initTempVar, float initRangeVar)
{
//parameters for thermal space
pars.c_varInit_t = initTempVar; // initial variance for new components
pars.c_varMax_t = 5.0f * pars.c_varInit_t;
pars.c_varMin_t = pars.c_varInit_t/1.5f;
//params for range space
pars.c_varInit_r = initRangeVar; // initial variance for new components
pars.c_varMax_r = 5.0f * pars.c_varInit_r;
pars.c_varMin_r = pars.c_varInit_r/4;
pars.c_r_varInflate = initRangeVar / 3;
pars.c_t_varInflate = initTempVar/3;
}
void DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::setThresholds(float backgroundThresh, float generativeThresh)
{
pars.c_Tb = backgroundThresh;
pars.c_Tg = generativeThresh;
pars.c_Tb_u = getUnivarateThresh(pars.c_Tb);
pars.c_Tg_u = getUnivarateThresh(pars.c_Tg);
}
} | 605b708217ead9170f0ffa32a796fb70bedff218.cu | /** *****************************************************************************
* This program is the confidential and proprietary product of Overview
* Limited. Any unauthorised use, reproduction or transfer of this
* program is strictly prohibited.
* Copyright 2017 Overview Limited. (Subject to limited
* distribution and restricted disclosure only.) All rights reserved.
*
* @file DidoFusedAnalytics_3dBgSub_CUDA.h
* @author SL
* @version 1
* @date 2017-06-29
* @brief class that does the background subtraction on the GPU
*****************************************************************************
**/
/**
* description of the algorithm
* this uses a standard MOG2 based background subtractor with a few key differences -
* it checks if the background point would be in range of the lidar, and if not, adjusts the thresholds appropriately and only uses thermal instead
* if it is in range of the lidar, it maintains separate variances for each component, and because the lidar values are returned as a range, it models them
* as a gaussian with mean at the center of the range and standard deviation = half the width of the range. Then the values are based simply on the joint integrals of the two distributions
*/
#include "global_defines.h"
#include "DidoFusedAnalytics_3dBgSub_CUDA.h"
#include <cuda_runtime.h>
#include <iostream>
#include <algorithm>
#include "math.h"
#include "math_constants.h"
#include "cuda_profiler_api.h"
//for our logging we will throw an error that can then be caught by the surrounding code that is allowed to include boost
#include "CUDA_Exception.h"
/*
if the computer being used doesn't have a GPU, define DIDOLIDAR_NOGPU as 1 in the preprocessor, and this wil produce some noops instead. It still requires the nvidia sdk to compile at all, however
*/
#if DIDOLIDAR_NOGPU
#else
//error handling function
static void HandleError( cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
// cudaDeviceReset();
throw overview::CUDA_Exception(cudaGetErrorString( err ) , err, line, file);
}
}
#define HANDLE_ERROR(err) {HandleError((err), __FILE__, __LINE__);}
#endif
namespace overview
{
namespace bgrcuda
{
#define NOPOINTVALUE -1.0f
//convenience function for swapping with
__device__ __forceinline__ void swap(float * array, int ind1, int ind2)
{
float tmp = array[ind1];
array[ind1] = array[ind2];
array[ind2] = tmp;
}
template<typename T>
__global__ void downsample(const T * in, T* out, int nrows, int ncols, int scale)
{
const int totind_x = (threadIdx.x + blockIdx.x * blockDim.x);
const int totind_y = (threadIdx.y + blockIdx.y * blockDim.y);
if (totind_x >= ncols / scale || totind_y > nrows / scale) return;
double total = 0;
int npts = 0;
for (int i = 0; i < scale; i++)
{
int index_x = totind_x*scale + i;
if (index_x >= ncols) continue;
for (int j = 0; j < scale; j++)
{
int index_y = totind_y*scale + j;
if (index_y >= nrows) continue;
total += in[index_x + ncols*index_y];
npts++;
}
}
out[totind_x + totind_y*(ncols / scale)] = (T)(total / npts);
}
__global__ void upsample(const float * in_pano, float * out_pano,int nrows, int ncols, int scale)
{
int index_x = (threadIdx.x + blockIdx.x * blockDim.x);
int index_y = (threadIdx.y + blockIdx.y * blockDim.y);
if (index_x / scale < ncols / scale && index_y / scale < nrows / scale)
out_pano[index_x + index_y*ncols] = in_pano[(index_x / scale) + (index_y / scale)*(ncols / scale)];
}
//the actual bgr
__global__ void mixturegaussians(const float * ranges_min, const float * ranges_max, float * rangemdl, float * rangevar, const thermalType * temps,
float * tempmdl, float * tempvar, float * modelweights, float* out_min, float* out_max, int rows, int cols, float alphaT, float alpha1, float prune, DidoFusedAnalytics_3dBgSub_CUDA::bgrPars pars)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cols || y >= rows)
return;
float r_min = ranges_min[x + y*cols];
float r_max = ranges_max[x + y*cols];
float r_width = r_max - r_min;
float r_center = (r_max + r_min)/2;
float temp = temps[x + y*cols];
//check if the observation has a range
bool hasRange = r_center > 0;
//calculate distances to the modes (+ sort)
//here we need to go in descending order!!!
bool background = false; // true - the pixel classified as background
//internal:
bool fitsPDF = false; //if it remains zero a new GMM mode will be added
float totalWeight = 0.0f;
//go through all modes
int lastmode = 0;
for (int mode = 0; mode < pars.c_nmixtures; ++mode)
{
int modeind = (x + (y*cols))*pars.c_nmixtures + mode;
//skip modes with no weight
//need only weight if fit is found
if (modelweights[modeind] <= 0) continue;
float weight = alpha1 * modelweights[modeind] + prune;
lastmode++;
//fit not found yet
if (!fitsPDF)
{
bool hasModelRange = rangemdl[modeind] > 0;
//check if it belongs to some of the remaining modes
float t_var = tempvar[modeind];
//our observations of range are also gaussian distibuted, so we look at the distribution of the convolution
float r_var = rangevar[modeind] + (r_width*r_width);
//calculate difference and distance
float t_diff = tempmdl[modeind] - temp;
float r_diff = rangemdl[modeind] - r_center;
//weighted distance in both directions
float dist2 = hasRange && hasModelRange ? t_diff*t_diff*r_var + r_diff*r_diff*t_var : t_diff*t_diff;
float bgthresh = hasRange && hasModelRange ? pars.c_Tb * t_var * r_var : pars.c_Tb*t_var;
float genthresh = hasRange && hasModelRange ? pars.c_Tg * t_var * r_var : pars.c_Tg*t_var;
//background? - Tb - usually larger than Tg
if (totalWeight < pars.c_TB && dist2 < bgthresh)
background = true;
//check fit
if (dist2 < genthresh)
{
//belongs to the mode
fitsPDF = true;
//update distribution
//update weight
weight += alphaT;
float k = alphaT / weight;
//update variance
float t_varnew = t_var + k * (t_diff*t_diff - t_var);
//integrating the weighting against the probability of the observation
float r_varnew = rangevar[modeind] + hasRange && hasModelRange ? k * ((r_width*r_width + 1)*(r_diff*r_diff) + pars.c_r_varInflate - rangevar[modeind]) : 0;
//update means
tempmdl[modeind] = tempmdl[modeind] - k * t_diff;
rangemdl[modeind] = hasModelRange ? (rangemdl[modeind] - hasRange ? k *( r_diff ) : 0) : r_center;
//limit the variance
t_varnew = (t_varnew < pars.c_varMin_t) ? pars.c_varMin_t : (t_varnew > pars.c_varMax_t)? pars.c_varMax_t : t_varnew;
r_varnew = (r_varnew < pars.c_varMin_r) ? pars.c_varMin_r : (r_varnew > pars.c_varMax_r)? pars.c_varMax_r : r_varnew;
rangevar[modeind] = r_varnew;
tempvar[modeind] = t_varnew;
//sort
//all other weights are at the same place and
//only the matched (iModes) is higher -> just find the new place for it
for (int i = mode; i > 0; --i)
{
//check one up
if (weight < modelweights[(i - 1) + pars.c_nmixtures*(x + y*cols)])
break;
//swap one up
swap(modelweights, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(rangevar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempvar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(rangemdl, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempmdl, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
}
//belongs to the mode - bFitsPDF becomes 1
}
} // !fitsPDF
//check prune
if (weight < -prune)
{
weight = 0.0f;
lastmode--;
}
modelweights[modeind] = weight; //update weight by the calculated value
totalWeight += weight;
}
//renormalize weights
totalWeight = totalWeight == 0 ? 1.f : 1.f / totalWeight;
for (int mode = 0; mode < pars.c_nmixtures; ++mode)
modelweights[(x + (y*cols))*pars.c_nmixtures + mode] *= totalWeight;
//make new mode if needed and exit
if (!fitsPDF)
{
if(lastmode == pars.c_nmixtures) lastmode--;
if (lastmode == 0)
modelweights[(x + (y*cols))*pars.c_nmixtures + lastmode] = 1.f;
else
{
modelweights[(x + (y*cols))*pars.c_nmixtures + lastmode] = alphaT;
// renormalize all other weights
for (int i = lastmode - 1; i >= 0 ; i--)
modelweights[(x + (y*cols))*pars.c_nmixtures + i] *= alpha1;
}
// init
rangemdl[(x + (y*cols))*pars.c_nmixtures + lastmode] = hasRange ? r_center : -1.0f;
tempmdl[(x + (y*cols))*pars.c_nmixtures + lastmode] = temp;
tempvar[(x + (y*cols))*pars.c_nmixtures + lastmode] = pars.c_varInit_t;
rangevar[(x + (y*cols))*pars.c_nmixtures + lastmode] = pars.c_varInit_r;
//sort
//find the new place for it
for (int i = lastmode - 1; i > 0; --i)
{
// check one up
if (alphaT < modelweights[(i - 1) + pars.c_nmixtures*(x + y*cols)])
break;
//swap one up
swap(modelweights, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(rangevar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempvar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(rangemdl, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempmdl, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
}
}
//return inf if we don't have a range
out_min[x + y*cols] = background ? NOPOINTVALUE : hasRange ? r_min : CUDART_INF_F;
out_max[x + y*cols] = background ? NOPOINTVALUE : hasRange ? r_max : CUDART_INF_F;
}
__global__ void mixturegaussians_onlyTherm(const float * ranges_min, const float * ranges_max, const thermalType * temps, float * tempmdl, float * tempvar,
float * modelweights, float* out_min, float* out_max, int rows, int cols, float alphaT, float alpha1, float prune, DidoFusedAnalytics_3dBgSub_CUDA::bgrPars pars)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cols || y >= rows)
return;
float r_min = ranges_min[x + y*cols];
float r_max = ranges_max[x + y*cols];
float temp = temps[x + y*cols];
//calculate distances to the modes (+ sort)
//here we need to go in descending order!!!
bool background = false; // true - the pixel classified as background
//internal:
bool fitsPDF = false; //if it remains zero a new GMM mode will be added
float totalWeight = 0.0f;
//go through all modes
int lastmode = 0;
for (int mode = 0; mode < pars.c_nmixtures; ++mode)
{
int modeind = (x + (y*cols))*pars.c_nmixtures + mode;
//skip modes with no weight
//need only weight if fit is found
if (modelweights[modeind] <= 0) continue;
float weight = alpha1 * modelweights[modeind] + prune;
lastmode++;
//fit not found yet
if (!fitsPDF)
{
//check if it belongs to some of the remaining modes
float t_var = tempvar[modeind];
//calculate difference and distance
float t_diff = tempmdl[modeind] - temp;
//weighted distance in both directions
float dist2 = t_diff*t_diff;
float bgthresh = pars.c_Tb*t_var;
float genthresh = pars.c_Tg*t_var;
//background? - Tb - usually larger than Tg
if (totalWeight < pars.c_TB && dist2 < bgthresh)
background = true;
//check fit
if (dist2 < genthresh)
{
//belongs to the mode
fitsPDF = true;
//update distribution
//update weight
weight += alphaT;
float k = alphaT / weight;
//update variance
float t_varnew = t_var + k * (t_diff*t_diff - t_var);
//integrating the weighting against the probability of the observation
//update means
tempmdl[modeind] = tempmdl[modeind] - k * t_diff;
//limit the variance
t_varnew = (t_varnew < pars.c_varMin_t) ? pars.c_varMin_t : (t_varnew > pars.c_varMax_t)? pars.c_varMax_t : t_varnew;
tempvar[modeind] = t_varnew;
//sort
//all other weights are at the same place and
//only the matched (iModes) is higher -> just find the new place for it
for (int i = mode; i > 0; --i)
{
//check one up
if (weight < modelweights[(i - 1) + pars.c_nmixtures*(x + y*cols)])
break;
//swap one up
swap(modelweights, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempvar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempmdl, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
}
//belongs to the mode - bFitsPDF becomes 1
}
} // !fitsPDF
//check prune
if (weight < -prune)
{
weight = 0.0f;
lastmode--;
}
modelweights[modeind] = weight; //update weight by the calculated value
totalWeight += weight;
}
//renormalize weights
totalWeight = totalWeight == 0 ? 1.f : 1.f / totalWeight;
for (int mode = 0; mode < pars.c_nmixtures; ++mode)
modelweights[(x + (y*cols))*pars.c_nmixtures + mode] *= totalWeight;
//make new mode if needed and exit
if (!fitsPDF)
{
if(lastmode == pars.c_nmixtures) lastmode--;
if (lastmode == 0)
modelweights[(x + (y*cols))*pars.c_nmixtures + lastmode] = 1.f;
else
{
modelweights[(x + (y*cols))*pars.c_nmixtures + lastmode] = alphaT;
// renormalize all other weights
for (int i = lastmode - 1; i >= 0 ; i--)
modelweights[(x + (y*cols))*pars.c_nmixtures + i] *= alpha1;
}
// init
tempmdl[(x + (y*cols))*pars.c_nmixtures + lastmode] = temp;
tempvar[(x + (y*cols))*pars.c_nmixtures + lastmode] = pars.c_varInit_t;
//sort
//find the new place for it
for (int i = lastmode - 1; i > 0; --i)
{
// check one up
if (alphaT < modelweights[(i - 1) + pars.c_nmixtures*(x + y*cols)])
break;
//swap one up
swap(modelweights, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempvar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempmdl, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
}
}
//return inf if we don't have a range
out_min[x + y*cols] = background ? NOPOINTVALUE : r_min;
out_max[x + y*cols] = background ? NOPOINTVALUE : r_max;
}
#define REGION_WIDTH 3
#define REGION_AREA (REGION_WIDTH*REGION_WIDTH)
#define REGION_HWIDTH (REGION_WIDTH/2)
//the actual bgr - region based method
__global__ void mixturegaussians_region(const float * ranges_min, const float * ranges_max, float * rangemdl, float * rangevar, const thermalType * temps,
float * tempmdl, float * tempvar, float * modelweights, float* out_min, float* out_max, int rows, int cols, float alphaT, float alpha1, float prune, DidoFusedAnalytics_3dBgSub_CUDA::bgrPars pars)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cols || y >= rows)
return;
float r_center[REGION_AREA], r_width =0, temp[REGION_AREA];
for (int i = 0; i < REGION_AREA; i++)
{
int sx = x - REGION_HWIDTH + i % REGION_WIDTH;
int sy = y - REGION_HWIDTH + i / REGION_WIDTH;
if (sx < 0 || sy < 0 || sx >= cols || sy >= rows)
{
r_center[i] = 0;
temp[i] = 0;
}
else
{
float r_min = ranges_min[sx + sy*cols];
float r_max = ranges_max[sx + sy*cols];
r_width += r_max - r_min;
r_center[i] = (r_max + r_min) / 2;
temp[i] = temps[sx + sy*cols];
}
}
//normalise the variance of the range observations
r_width /= REGION_AREA;
//check if the observation is outside our maximum range
bool hasRange = r_center[4] > 0;
//calculate distances to the modes (+ sort)
//here we need to go in descending order!!!
bool background = false; // true - the pixel classified as background
//internal:
bool fitsPDF = false; //if it remains zero a new GMM mode will be added
float totalWeight = 0.0f;
//go through all modes
int lastmode = 0;
for (int mode = 0; mode < pars.c_nmixtures; ++mode)
{
int modeind = (x + (y*cols))*pars.c_nmixtures + mode;
//skip modes with no weight
//need only weight if fit is found
if (modelweights[modeind] <= 0) continue;
float weight = alpha1 * modelweights[modeind] + prune;
lastmode++;
//fit not found yet
if (!fitsPDF)
{
bool hasModelRange = rangemdl[modeind* REGION_AREA + REGION_WIDTH] > 0;
//check if it belongs to some of the remaining modes
float t_var = tempvar[modeind];
//our observations of range are also gaussian distibuted, so we look at the distribution of the convolution
float r_var = rangevar[modeind] + (r_width*r_width);
//calculate difference and distance
float t_diff[REGION_AREA], r_diff[REGION_AREA];
for (int i = 0; i < REGION_AREA; i++)
{
t_diff[i] = tempmdl[modeind*REGION_AREA + i] - temp[i];
r_diff[i] = rangemdl[modeind * REGION_AREA + i] - r_center[i];
}
//clculate the L2norm of the adjusted differences
float tdist = 0, rdist = 0;
for (int i = 0; i < REGION_AREA; i++)
{
tdist += fmin(t_diff[i] * t_diff[i], pars.c_errrorCap * t_var);
rdist += fmin(r_diff[i] * r_diff[i], pars.c_errrorCap * r_var);
}
float dist2 = hasRange && hasModelRange ? tdist*r_var + rdist*t_var : tdist;
//weighted distance in both directions
float bgthresh = hasRange && hasModelRange ? pars.c_Tb * t_var * r_var *REGION_AREA : pars.c_Tb*t_var *REGION_AREA;
float genthresh = hasRange && hasModelRange ? pars.c_Tg * t_var * r_var * REGION_AREA : pars.c_Tg*t_var *REGION_AREA;
//background? - Tb - usually larger than Tg
if (totalWeight < pars.c_TB && dist2 < bgthresh)
background = true;
//check fit
if (dist2 < genthresh)
{
//belongs to the mode
fitsPDF = true;
//update distribution
//update weight
weight += alphaT;
float k = alphaT / weight;
//update variance
float t_varnew = t_var + k * (tdist/ REGION_AREA + pars.c_t_varInflate - t_var);
//integrating the weighting against the probability of the observation
float r_varnew = rangevar[modeind] + hasRange && hasModelRange ? (k * ((r_width*r_width + 1)*(rdist/ REGION_AREA) + pars.c_r_varInflate - rangevar[modeind])) : 0;
//update means
for (int i = 0; i < REGION_AREA; i++)
{
tempmdl[modeind*REGION_AREA +i] = tempmdl[modeind * REGION_AREA + i] - k * t_diff[i];
rangemdl[modeind*REGION_AREA + i] = rangemdl[modeind * REGION_AREA + i] > 0 ? rangemdl[modeind * REGION_AREA + i] - k *(r_diff[i]) : r_center[i];
}
//limit the variance
t_varnew = (t_varnew < pars.c_varMin_t) ? pars.c_varMin_t : (t_varnew > pars.c_varMax_t)? pars.c_varMax_t : t_varnew;
r_varnew = (r_varnew < pars.c_varMin_r) ? pars.c_varMin_r : (r_varnew > pars.c_varMax_r)? pars.c_varMax_r : r_varnew;
rangevar[modeind] = r_varnew;
tempvar[modeind] = t_varnew;
//sort
//all other weights are at the same place and
//only the matched (iModes) is higher -> just find the new place for it
for (int i = mode; i > 0; --i)
{
//check one up
if (weight < modelweights[(i - 1) + pars.c_nmixtures*(x + y*cols)])
break;
//swap one up
swap(modelweights, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(rangevar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempvar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
for (int k = 0; k < REGION_AREA; k++)
{
swap(rangemdl, ((i - 1) + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k, (i + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k);
swap(tempmdl, ((i - 1) + pars.c_nmixtures*(x + y*cols)) *REGION_AREA + k, (i + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k);
}
}
//belongs to the mode - bFitsPDF becomes 1
}
} // !fitsPDF
//check prune
if (weight < -prune)
{
weight = 0.0f;
lastmode--;
}
modelweights[modeind] = weight; //update weight by the calculated value
totalWeight += weight;
}
//renormalize weights
totalWeight = totalWeight == 0 ? 1.f : 1.f / totalWeight;
for (int mode = 0; mode < pars.c_nmixtures; ++mode)
modelweights[(x + (y*cols))*pars.c_nmixtures + mode] *= totalWeight;
//make new mode if needed and exit
if (!fitsPDF)
{
if(lastmode == pars.c_nmixtures) lastmode--;
if (lastmode == 0)
modelweights[(x + (y*cols))*pars.c_nmixtures + lastmode] = 1.f;
else
{
modelweights[(x + (y*cols))*pars.c_nmixtures + lastmode] = alphaT;
// renormalize all other weights
for (int i = lastmode - 1; i >= 0 ; i--)
modelweights[(x + (y*cols))*pars.c_nmixtures + i] *= alpha1;
}
// init
for (int k = 0; k < REGION_AREA; k++)
{
rangemdl[((x + (y*cols))*pars.c_nmixtures + lastmode)*REGION_AREA + k] = r_center[k];
tempmdl[((x + (y*cols))*pars.c_nmixtures + lastmode)*REGION_AREA +k] = temp[k];
}
tempvar[(x + (y*cols))*pars.c_nmixtures + lastmode] = pars.c_varInit_t;
rangevar[(x + (y*cols))*pars.c_nmixtures + lastmode] = pars.c_varInit_r;
//sort
//find the new place for it
for (int i = lastmode - 1; i > 0; --i)
{
// check one up
if (alphaT < modelweights[(i - 1) + pars.c_nmixtures*(x + y*cols)])
break;
//swap one up
swap(modelweights, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(rangevar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
swap(tempvar, (i - 1) + pars.c_nmixtures*(x + y*cols),i + pars.c_nmixtures*(x + y*cols));
for (int k = 0; k < REGION_AREA; k++)
{
swap(rangemdl, ((i - 1) + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k, (i + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k);
swap(tempmdl, ((i - 1) + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k, (i + pars.c_nmixtures*(x + y*cols)) * REGION_AREA + k);
}
}
}
out_min[x + y*cols] = background ? NOPOINTVALUE : hasRange ? ranges_min[x + y*cols]: CUDART_INF_F;
out_max[x + y*cols] = background ? NOPOINTVALUE : hasRange ? ranges_max[x + y*cols] : CUDART_INF_F;
}
}
//called whenever we change the constants
static inline float getUnivarateThresh(float thresh)
{
//these numbers are numerically estimated from the normal distributiobns in the range 1-6
return pow(thresh, 4)*0.0016298f - pow(thresh, 3)*0.0080105f - pow(thresh, 2)*0.1293664f + thresh*1.3835517f - 0.6398407f;
}
DidoFusedAnalytics_3dBgSub_CUDA::DidoFusedAnalytics_3dBgSub_CUDA(DidoFusedAnalytics_3dBgSub_CUDA & cp): useRegion(cp.useRegion), scale(cp.scale)
{
cols = cp.cols;
rows = cp.rows;
ct = cp.ct;
history = cp.history;
nsteps = cp.nsteps;
pars = cp.pars;
//cuda allocated variables
#if DIDOLIDAR_NOGPU
#else
int modelsize = rows*cols*pars.c_nmixtures * sizeof(float);
int regionsize = useRegion ? modelsize*REGION_AREA : modelsize;
//allocate the model
HANDLE_ERROR(cudaMalloc(&rangeModel, regionsize));
HANDLE_ERROR(cudaMalloc(&rangevars, modelsize));
HANDLE_ERROR(cudaMalloc(&tempmodel, regionsize));
HANDLE_ERROR(cudaMalloc(&tempvars, modelsize));
HANDLE_ERROR(cudaMalloc(&modelweights, modelsize));
HANDLE_ERROR(cudaMemcpy(modelweights,cp.modelweights, modelsize,cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemcpy(rangeModel, cp.rangeModel, regionsize, cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemcpy(rangevars, cp.rangevars, modelsize, cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemcpy(tempmodel, cp.tempmodel, regionsize, cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemcpy(tempvars, cp.tempvars, modelsize, cudaMemcpyDeviceToDevice));
#endif
}
DidoFusedAnalytics_3dBgSub_CUDA::DidoFusedAnalytics_3dBgSub_CUDA(DidoFusedAnalytics_3dBgSub_CUDA && mv): useRegion(mv.useRegion), scale(mv.scale)
{
cols = mv.cols;
rows = mv.rows;
ct = mv.ct;
history = mv.history;
nsteps = mv.nsteps;
pars = mv.pars;
modelweights = mv.modelweights;
mv.modelweights = nullptr;
rangeModel = mv.rangeModel;
mv.rangeModel = nullptr;
rangevars = mv.rangevars;
mv.rangevars = nullptr;
tempmodel = mv.tempmodel;
mv.tempmodel = nullptr;
tempvars = mv.tempvars;
mv.tempvars = nullptr;
}
DidoFusedAnalytics_3dBgSub_CUDA::DidoFusedAnalytics_3dBgSub_CUDA(int _rows, int _cols, bool useregion, int _scale)
: rows(_rows), cols(_cols), useRegion(useregion), scale(_scale)
{
#if DIDOLIDAR_NOGPU
#else
int modelsize = (rows/scale)*(cols/scale)*pars.c_nmixtures*sizeof(float);
int regionsize = useRegion ? modelsize*REGION_AREA : modelsize;
//allocate the model
HANDLE_ERROR(cudaMalloc(&rangeModel, regionsize));
HANDLE_ERROR(cudaMalloc(&rangevars, modelsize));
HANDLE_ERROR(cudaMalloc(&tempmodel, regionsize));
HANDLE_ERROR(cudaMalloc(&tempvars, modelsize));
HANDLE_ERROR(cudaMalloc(&modelweights, modelsize));
pars.c_Tb_u = getUnivarateThresh(pars.c_Tb);
pars.c_Tg_u = getUnivarateThresh(pars.c_Tg);
#endif
}
DidoFusedAnalytics_3dBgSub_CUDA::~DidoFusedAnalytics_3dBgSub_CUDA()
{
#if DIDOLIDAR_NOGPU
#else
//deallocate the model
if(rangeModel != nullptr) (cudaFree(rangeModel));
if(rangevars != nullptr) (cudaFree(rangevars));
if(tempmodel != nullptr) (cudaFree(tempmodel));
if(tempvars != nullptr) (cudaFree(tempvars));
if(modelweights != nullptr) (cudaFree(modelweights));
#endif
}
void DidoFusedAnalytics_3dBgSub_CUDA::apply(const thermalType * input_t,
const float * input_d_min, const float * input_d_max, float * out_min, float * out_max, float learningRate)
{
#if DIDOLIDAR_NOGPU
#else
nsteps++;
float lr;
//allocate the learning rate
if(learningRate < 0)
{
lr = nsteps > history ? 1.f/history : 1.f/(nsteps);
}
else
{
lr = learningRate;
}
//downsample it
float * l_d_min, *l_d_max, *l_o_max, *l_o_min;
thermalType * l_therm;
HANDLE_ERROR(cudaMalloc(&l_d_max, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(cudaMalloc(&l_d_min, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(cudaMalloc(&l_o_max, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(cudaMalloc(&l_o_min, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(cudaMalloc(&l_therm, (cols / scale)*(rows / scale) * sizeof(thermalType)));
int blockdim = 16;
dim3 grid((cols / scale) / blockdim + 1, (rows / scale) / blockdim + 1);
dim3 block(blockdim, blockdim);
if (scale == 1)
{
HANDLE_ERROR(cudaMemcpy(l_d_max, input_d_max, cols*rows*sizeof(float), cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemcpy(l_d_min, input_d_min, cols*rows * sizeof(float), cudaMemcpyDeviceToDevice));
}
else
{
bgrcuda::downsample<float> <<<grid, block >>> (input_d_max, l_d_max, rows, cols, scale);
bgrcuda::downsample <float><<<grid, block >>> (input_d_min, l_d_min, rows, cols, scale);
bgrcuda::downsample <thermalType> <<<grid, block >>> (input_t, l_therm, rows, cols, scale);
}
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
//run the bgr
if(useRegion)
{
bgrcuda::mixturegaussians_region<<<grid, block>>>(l_d_min, l_d_max, rangeModel, rangevars, l_therm,
tempmodel, tempvars, modelweights, l_o_min, l_o_max, rows/scale, cols/scale, lr, 1.0f - lr, -lr*ct, pars);
}
else
{
bgrcuda::mixturegaussians<<<grid, block>>>(l_d_min, l_d_max, rangeModel, rangevars, l_therm,
tempmodel, tempvars, modelweights, l_o_min, l_o_max, rows/scale, cols/scale, lr, 1.0f - lr, -lr*ct, pars);
}
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
//upsample it
if (scale == 1)
{
HANDLE_ERROR(cudaMemcpy(out_max, l_o_max, cols*rows * sizeof(float), cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemcpy(out_min, l_o_min, cols*rows * sizeof(float), cudaMemcpyDeviceToDevice));
}
else
{
grid = dim3((cols) / blockdim + 1, (rows ) / blockdim + 1);
bgrcuda::upsample <<<grid, block >>> (l_o_max, out_max, rows, cols, scale);
bgrcuda::upsample<<<grid, block >>> (l_o_min, out_min, rows, cols, scale);
}
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
if (l_o_max) cudaFree(l_o_max);
if (l_d_max) cudaFree(l_d_max);
if (l_d_min) cudaFree(l_d_min);
if (l_o_min) cudaFree(l_o_min);
if (l_therm) cudaFree(l_therm);
#endif
}
void DidoFusedAnalytics_3dBgSub_CUDA::setHistory(int hist_)
{
history = hist_;
}
void DidoFusedAnalytics_3dBgSub_CUDA::setBackgroundWeight(float TB)
{
pars.c_TB = TB;
}
void DidoFusedAnalytics_3dBgSub_CUDA::setVariance(float initTempVar, float initRangeVar)
{
//parameters for thermal space
pars.c_varInit_t = initTempVar; // initial variance for new components
pars.c_varMax_t = 5.0f * pars.c_varInit_t;
pars.c_varMin_t = pars.c_varInit_t/1.5f;
//params for range space
pars.c_varInit_r = initRangeVar; // initial variance for new components
pars.c_varMax_r = 5.0f * pars.c_varInit_r;
pars.c_varMin_r = pars.c_varInit_r/4;
pars.c_r_varInflate = initRangeVar / 3;
pars.c_t_varInflate = initTempVar/3;
}
void DidoFusedAnalytics_3dBgSub_CUDA::setThresholds(float backgroundThresh, float generativeThresh)
{
pars.c_Tb = backgroundThresh;
pars.c_Tg = generativeThresh;
pars.c_Tb_u = getUnivarateThresh(pars.c_Tb);
pars.c_Tg_u = getUnivarateThresh(pars.c_Tg);
}
DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly(DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly & cp): scale(cp.scale)
{
cols = cp.cols;
rows = cp.rows;
ct = cp.ct;
history = cp.history;
nsteps = cp.nsteps;
pars = cp.pars;
//cuda allocated variables
#if DIDOLIDAR_NOGPU
#else
int modelsize = rows*cols*pars.c_nmixtures * sizeof(float);
int regionsize = modelsize;
//allocate the model
HANDLE_ERROR(cudaMalloc(&tempmodel, regionsize));
HANDLE_ERROR(cudaMalloc(&tempvars, modelsize));
HANDLE_ERROR(cudaMalloc(&modelweights, modelsize));
HANDLE_ERROR(cudaMemcpy(modelweights,cp.modelweights, modelsize,cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemcpy(tempmodel, cp.tempmodel, regionsize, cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemcpy(tempvars, cp.tempvars, modelsize, cudaMemcpyDeviceToDevice));
#endif
}
DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly(DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly && mv): scale(mv.scale)
{
cols = mv.cols;
rows = mv.rows;
ct = mv.ct;
history = mv.history;
nsteps = mv.nsteps;
pars = mv.pars;
modelweights = mv.modelweights;
mv.modelweights = nullptr;
tempmodel = mv.tempmodel;
mv.tempmodel = nullptr;
tempvars = mv.tempvars;
mv.tempvars = nullptr;
}
DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly(int _rows, int _cols, int _scale)
: rows(_rows), cols(_cols), scale(_scale)
{
#if DIDOLIDAR_NOGPU
#else
int modelsize = (rows/scale)*(cols/scale)*pars.c_nmixtures*sizeof(float);
int regionsize = modelsize;
//allocate the model
HANDLE_ERROR(cudaMalloc(&tempmodel, regionsize));
HANDLE_ERROR(cudaMalloc(&tempvars, modelsize));
HANDLE_ERROR(cudaMalloc(&modelweights, modelsize));
pars.c_Tb_u = getUnivarateThresh(pars.c_Tb);
pars.c_Tg_u = getUnivarateThresh(pars.c_Tg);
#endif
}
DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::~DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly()
{
#if DIDOLIDAR_NOGPU
#else
//deallocate the model
if(tempmodel != nullptr) (cudaFree(tempmodel));
if(tempvars != nullptr) (cudaFree(tempvars));
if(modelweights != nullptr) (cudaFree(modelweights));
#endif
}
void DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::apply(const thermalType * input_t,
const float * input_d_min, const float * input_d_max, float * out_min, float * out_max, float learningRate)
{
#if DIDOLIDAR_NOGPU
#else
nsteps++;
float lr;
//allocate the learning rate
if(learningRate < 0)
{
lr = nsteps > history ? 1.f/history : 1.f/(nsteps);
}
else
{
lr = learningRate;
}
//downsample it
float * l_d_min, *l_d_max, *l_o_max, *l_o_min;
thermalType * l_therm;
HANDLE_ERROR(cudaMalloc(&l_d_max, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(cudaMalloc(&l_d_min, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(cudaMalloc(&l_o_max, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(cudaMalloc(&l_o_min, (cols / scale)*(rows / scale) * sizeof(float)));
HANDLE_ERROR(cudaMalloc(&l_therm, (cols / scale)*(rows / scale) * sizeof(thermalType)));
int blockdim = 16;
dim3 grid((cols / scale) / blockdim + 1, (rows / scale) / blockdim + 1);
dim3 block(blockdim, blockdim);
if (scale == 1)
{
HANDLE_ERROR(cudaMemcpy(l_d_max, input_d_max, cols*rows*sizeof(float), cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemcpy(l_d_min, input_d_min, cols*rows * sizeof(float), cudaMemcpyDeviceToDevice));
}
else
{
bgrcuda::downsample<float> <<<grid, block >>> (input_d_max, l_d_max, rows, cols, scale);
bgrcuda::downsample <float><<<grid, block >>> (input_d_min, l_d_min, rows, cols, scale);
bgrcuda::downsample <thermalType> <<<grid, block >>> (input_t, l_therm, rows, cols, scale);
}
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
bgrcuda::mixturegaussians_onlyTherm<<<grid, block>>>(l_d_min, l_d_max, l_therm,
tempmodel, tempvars, modelweights, l_o_min, l_o_max, rows/scale, cols/scale, lr, 1.0f - lr, -lr*ct, pars);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
//upsample it
if (scale == 1)
{
HANDLE_ERROR(cudaMemcpy(out_max, l_o_max, cols*rows * sizeof(float), cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemcpy(out_min, l_o_min, cols*rows * sizeof(float), cudaMemcpyDeviceToDevice));
}
else
{
grid = dim3((cols) / blockdim + 1, (rows ) / blockdim + 1);
bgrcuda::upsample <<<grid, block >>> (l_o_max, out_max, rows, cols, scale);
bgrcuda::upsample<<<grid, block >>> (l_o_min, out_min, rows, cols, scale);
}
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
if (l_o_max) cudaFree(l_o_max);
if (l_d_max) cudaFree(l_d_max);
if (l_d_min) cudaFree(l_d_min);
if (l_o_min) cudaFree(l_o_min);
if (l_therm) cudaFree(l_therm);
#endif
}
void DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::setHistory(int hist_)
{
history = hist_;
}
void DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::setBackgroundWeight(float TB)
{
pars.c_TB = TB;
}
void DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::setVariance(float initTempVar, float initRangeVar)
{
//parameters for thermal space
pars.c_varInit_t = initTempVar; // initial variance for new components
pars.c_varMax_t = 5.0f * pars.c_varInit_t;
pars.c_varMin_t = pars.c_varInit_t/1.5f;
//params for range space
pars.c_varInit_r = initRangeVar; // initial variance for new components
pars.c_varMax_r = 5.0f * pars.c_varInit_r;
pars.c_varMin_r = pars.c_varInit_r/4;
pars.c_r_varInflate = initRangeVar / 3;
pars.c_t_varInflate = initTempVar/3;
}
void DidoFusedAnalytics_3dBgSub_CUDA_ThermalOnly::setThresholds(float backgroundThresh, float generativeThresh)
{
pars.c_Tb = backgroundThresh;
pars.c_Tg = generativeThresh;
pars.c_Tb_u = getUnivarateThresh(pars.c_Tb);
pars.c_Tg_u = getUnivarateThresh(pars.c_Tg);
}
} |
828b3c65343340605af8ac4a91368f2b9fbd5552.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// Thread block size
#define BLOCK_SIZE 16
#define MATRIX_SIZE 1024
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc(&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size,
hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
hipMalloc(&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size,
hipMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
hipMalloc(&d_C.elements, size);
// Initialize timing
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
// Get Time
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Time elapsed: %f\n", milliseconds);
// Read C from device memory
hipMemcpy(C.elements, d_C.elements, size,
hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
}
void initMatrix(Matrix *m) {
m->height = MATRIX_SIZE;
m->width = MATRIX_SIZE;
m->stride = MATRIX_SIZE;
m->elements = (float *) malloc(m->height * m->width * sizeof(float));
}
void randomMatrix(Matrix m) {
for (int i = 0; i < m.height; i++) {
for (int j = 0; j < m.width; j++) {
m.elements[i*m.width + j] = rand();
}
}
}
int main() {
Matrix A, B, C;
initMatrix(&A);
initMatrix(&B);
initMatrix(&C);
randomMatrix(A);
randomMatrix(B);
MatMul(A, B, C);
return 0;
}
| 828b3c65343340605af8ac4a91368f2b9fbd5552.cu | #include <stdio.h>
// Thread block size
#define BLOCK_SIZE 16
#define MATRIX_SIZE 1024
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Initialize timing
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Get Time
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time elapsed: %f\n", milliseconds);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
}
void initMatrix(Matrix *m) {
m->height = MATRIX_SIZE;
m->width = MATRIX_SIZE;
m->stride = MATRIX_SIZE;
m->elements = (float *) malloc(m->height * m->width * sizeof(float));
}
void randomMatrix(Matrix m) {
for (int i = 0; i < m.height; i++) {
for (int j = 0; j < m.width; j++) {
m.elements[i*m.width + j] = rand();
}
}
}
int main() {
Matrix A, B, C;
initMatrix(&A);
initMatrix(&B);
initMatrix(&C);
randomMatrix(A);
randomMatrix(B);
MatMul(A, B, C);
return 0;
}
|
f33db7b99ea0ca44726ccf977bdfe988bac76da4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// This example shows how to use the clock function to measure the performance of
// block of threads of a kernel accurately.
//
// Blocks are executed in parallel and out of order. Since there's no synchronization
// mechanism between blocks, we measure the clock once for each block. The clock
// samples are written to device memory.
// System includes
#include <stdio.h>
#include <stdint.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include<stdlib.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <math.h>
#include <assert.h>
void rand_ints(int *a, int n)
{
for(int i = 0; i< n; ++i)
{
*a = rand()%100;
a++;
}
}
#include<ctime>
int imin(int a, int b)
{
if(a>b)
return b;
else
return a;
}
const int N = 33 * 1024;
const int threadPerBlock = 256;
__global__ void add(int *first, int * second, int * result, int n)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
while(tid < n)
{
result[tid] = first[tid] + second[tid];
tid += blockDim.x* gridDim.x;
}
}
int main(int argc, char **argv)
{
int *a, *b, *c;
int *da, *db, *dc;
const int NU = 100000;
a = (int*)malloc(NU*sizeof(int));
b = (int*)malloc(NU*sizeof(int));
c = (int*)malloc(NU*sizeof(int));
for(int i = 0; i< NU; ++i)
{
a[i] = 2;
b[i] = 5;
}
hipMalloc((void**)&da, NU*sizeof(int));
hipMalloc((void**)&db, NU*sizeof(int));
hipMalloc((void**)&dc, NU*sizeof(int));
hipMemcpy(da, a, NU*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(db, b, NU*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3((N + threadPerBlock - 1)/threadPerBlock), dim3(threadPerBlock) , 0, 0, da, db, dc, NU);
hipMemcpy(c, dc, NU*sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i< 100; ++i)
{
printf("%d\n", c[i]);
}
return 0;
}
| f33db7b99ea0ca44726ccf977bdfe988bac76da4.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// This example shows how to use the clock function to measure the performance of
// block of threads of a kernel accurately.
//
// Blocks are executed in parallel and out of order. Since there's no synchronization
// mechanism between blocks, we measure the clock once for each block. The clock
// samples are written to device memory.
// System includes
#include <stdio.h>
#include <stdint.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include<stdlib.h>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
#include <assert.h>
void rand_ints(int *a, int n)
{
for(int i = 0; i< n; ++i)
{
*a = rand()%100;
a++;
}
}
#include<ctime>
int imin(int a, int b)
{
if(a>b)
return b;
else
return a;
}
const int N = 33 * 1024;
const int threadPerBlock = 256;
__global__ void add(int *first, int * second, int * result, int n)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
while(tid < n)
{
result[tid] = first[tid] + second[tid];
tid += blockDim.x* gridDim.x;
}
}
int main(int argc, char **argv)
{
int *a, *b, *c;
int *da, *db, *dc;
const int NU = 100000;
a = (int*)malloc(NU*sizeof(int));
b = (int*)malloc(NU*sizeof(int));
c = (int*)malloc(NU*sizeof(int));
for(int i = 0; i< NU; ++i)
{
a[i] = 2;
b[i] = 5;
}
cudaMalloc((void**)&da, NU*sizeof(int));
cudaMalloc((void**)&db, NU*sizeof(int));
cudaMalloc((void**)&dc, NU*sizeof(int));
cudaMemcpy(da, a, NU*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(db, b, NU*sizeof(int), cudaMemcpyHostToDevice);
add<<<(N + threadPerBlock - 1)/threadPerBlock, threadPerBlock >>>(da, db, dc, NU);
cudaMemcpy(c, dc, NU*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i< 100; ++i)
{
printf("%d\n", c[i]);
}
return 0;
}
|
f1769ff6966a0f2ee5dc6bd904c5821e206d5a78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace testing {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_a __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_b __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_x __attribute__((unused)) = params_.state_vars[2];\
auto* _pp_var_y __attribute__((unused)) = params_.state_vars[3];\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
_pp_var_a[tid_] = 0.20000000000000001;
_pp_var_b[tid_] = 1.0-_pp_var_a[tid_];
_pp_var_x[tid_] = 0.59999999999999998;
_pp_var_y[tid_] = 1.0-_pp_var_x[tid_];
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type t_7_, t_5_, t_3_, t_2_, t_0_, a_7_, t_6_, a_6_, a_5_, a_4_, t_4_, alpha, a_3_, a_2_, a_1_, delta, t_1_, beta, a_0_, gamma;
alpha = 2.0;
beta = 0.59999999999999998;
gamma = 3.0;
delta = 0.69999999999999996;
a_0_ = 1.0- -1.0*alpha*dt;
a_1_ = -( -1.0* -beta*dt);
a_2_ = -(alpha*dt);
a_3_ = 1.0- -beta*dt;
a_4_ = 1.0- -1.0*gamma*dt;
a_5_ = -( -1.0* -delta*dt);
a_6_ = -(gamma*dt);
a_7_ = 1.0- -delta*dt;
t_0_ = a_7_*a_4_-a_5_*a_6_;
t_1_ = a_7_*_pp_var_x[tid_]-a_5_*_pp_var_y[tid_];
t_2_ = t_0_*a_7_;
t_3_ = t_0_*_pp_var_y[tid_]-a_6_*t_1_;
t_4_ = a_3_*a_0_-a_1_*a_2_;
t_5_ = a_3_*_pp_var_a[tid_]-a_1_*_pp_var_b[tid_];
t_6_ = t_4_*a_3_;
t_7_ = t_4_*_pp_var_b[tid_]-a_2_*t_5_;
_pp_var_a[tid_] = t_5_/t_4_;
_pp_var_b[tid_] = t_7_/t_6_;
_pp_var_x[tid_] = t_1_/t_0_;
_pp_var_y[tid_] = t_3_/t_2_;
}
}
} // namespace
void mechanism_test1_kin_diff_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
if (!p->multiplicity) return;
hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(4}), block_dim, 0, *p);
}
void mechanism_test1_kin_diff_gpu_compute_currents_(arb_mechanism_ppack* p) {}
void mechanism_test1_kin_diff_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_test1_kin_diff_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_test1_kin_diff_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_test1_kin_diff_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace testing
| f1769ff6966a0f2ee5dc6bd904c5821e206d5a78.cu | #include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace testing {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_a __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_b __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_x __attribute__((unused)) = params_.state_vars[2];\
auto* _pp_var_y __attribute__((unused)) = params_.state_vars[3];\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
_pp_var_a[tid_] = 0.20000000000000001;
_pp_var_b[tid_] = 1.0-_pp_var_a[tid_];
_pp_var_x[tid_] = 0.59999999999999998;
_pp_var_y[tid_] = 1.0-_pp_var_x[tid_];
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type t_7_, t_5_, t_3_, t_2_, t_0_, a_7_, t_6_, a_6_, a_5_, a_4_, t_4_, alpha, a_3_, a_2_, a_1_, delta, t_1_, beta, a_0_, gamma;
alpha = 2.0;
beta = 0.59999999999999998;
gamma = 3.0;
delta = 0.69999999999999996;
a_0_ = 1.0- -1.0*alpha*dt;
a_1_ = -( -1.0* -beta*dt);
a_2_ = -(alpha*dt);
a_3_ = 1.0- -beta*dt;
a_4_ = 1.0- -1.0*gamma*dt;
a_5_ = -( -1.0* -delta*dt);
a_6_ = -(gamma*dt);
a_7_ = 1.0- -delta*dt;
t_0_ = a_7_*a_4_-a_5_*a_6_;
t_1_ = a_7_*_pp_var_x[tid_]-a_5_*_pp_var_y[tid_];
t_2_ = t_0_*a_7_;
t_3_ = t_0_*_pp_var_y[tid_]-a_6_*t_1_;
t_4_ = a_3_*a_0_-a_1_*a_2_;
t_5_ = a_3_*_pp_var_a[tid_]-a_1_*_pp_var_b[tid_];
t_6_ = t_4_*a_3_;
t_7_ = t_4_*_pp_var_b[tid_]-a_2_*t_5_;
_pp_var_a[tid_] = t_5_/t_4_;
_pp_var_b[tid_] = t_7_/t_6_;
_pp_var_x[tid_] = t_1_/t_0_;
_pp_var_y[tid_] = t_3_/t_2_;
}
}
} // namespace
void mechanism_test1_kin_diff_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
init<<<grid_dim, block_dim>>>(*p);
if (!p->multiplicity) return;
multiply<<<dim3{grid_dim, 4}, block_dim>>>(*p);
}
void mechanism_test1_kin_diff_gpu_compute_currents_(arb_mechanism_ppack* p) {}
void mechanism_test1_kin_diff_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
advance_state<<<grid_dim, block_dim>>>(*p);
}
void mechanism_test1_kin_diff_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_test1_kin_diff_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_test1_kin_diff_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace testing
|
1785add8d49e0d9a2819bda5f74c54b0a368af9e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/entropy_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ForwardGPU(const int nthreads, const Dtype* prob,
const Dtype* log_data, const Dtype threshold, const Dtype prob_pow, Dtype* loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(prob[index] < threshold){
loss_data[index] = Dtype(0);
}
else{
loss_data[index] = pow(prob[index], prob_pow) * log_data[index];
}
}
}
template <typename Dtype>
void EntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
if(now_iteration_ < iterations_num_){
top[0]->mutable_cpu_data()[0] = Dtype(0);
return;
}
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* log_data = normalized_bottom_data_.mutable_gpu_data();
caffe_gpu_log(data_num_ * label_num_, bottom_data, log_data);
int nthreads = label_num_ * data_num_;
Dtype loss;
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
hipLaunchKernelGGL(( ForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, log_data, Dtype(0.00001), prob_pow_, loss_data);
caffe_gpu_asum(label_num_ * data_num_, loss_data, &loss);
loss = -loss;
top[0]->mutable_cpu_data()[0] = loss;
}
/*
template <typename Dtype>
__global__ void EntropyDiff(const int nthreads, const Dtype* data,
const Dtype* log_data, const Dtype* label, const Dtype threshold,
const int data_num, const int ignore_label, const int label_num,
Dtype* count, Dtype* diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
count[index] = Dtype(1) / label_num;
if(data[index] < threshold){
diff[index] = Dtype(0);
}
else if(label[(index / data_num) * 2] < 0){
count[index] = Dtype(0);
diff[index] = Dtype(0);
}
else if(label[(index / data_num) * 2] == ignore_label){
count[index] = Dtype(0);
diff[index] = Dtype(0);
}
else{
diff[index] = -(Dtype(1) + log_data[index]);
}
}
}
*/
template <typename Dtype>
__global__ void EntropyDiff(const int nthreads, const Dtype* data,
const Dtype* log_data, const Dtype threshold,
const int data_num, const int ignore_label, const int label_num,
const Dtype prob_pow, Dtype* count, Dtype* diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
count[index] = Dtype(1) / label_num;
if(data[index] < threshold){
diff[index] = Dtype(0);
}
else{
diff[index] = -(
pow(data[index], prob_pow - Dtype(1.0)) +
prob_pow * pow(data[index], prob_pow - Dtype(1.0)) * log_data[index]
);
}
}
}
template <typename Dtype>
void EntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if(now_iteration_ < iterations_num_){
now_iteration_++;
return;
}
//if (propagate_down[1]) {
// LOG(FATAL) << this->type()
// << " Layer cannot backpropagate to label inputs.";
//}
const Dtype* bottom_data = bottom[0]->gpu_data();
//const Dtype* bottom_label = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* log_data = normalized_bottom_data_.mutable_gpu_data();
Dtype* count = normalized_bottom_data_.mutable_gpu_diff();
int nthreads = data_num_ * label_num_;
if (propagate_down[0]) {
hipLaunchKernelGGL(( EntropyDiff<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, log_data,
threshold_, data_num_, ignore_label_, label_num_, prob_pow_, count, bottom_diff);
Dtype count_num;
caffe_gpu_asum(nthreads, count, &count_num);
count_num = count_num > 0 ? count_num : Dtype(1);
caffe_gpu_scal(nthreads, loss_weight_ / count_num, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EntropyLossLayer);
} // namespace caffe
| 1785add8d49e0d9a2819bda5f74c54b0a368af9e.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/entropy_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ForwardGPU(const int nthreads, const Dtype* prob,
const Dtype* log_data, const Dtype threshold, const Dtype prob_pow, Dtype* loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(prob[index] < threshold){
loss_data[index] = Dtype(0);
}
else{
loss_data[index] = pow(prob[index], prob_pow) * log_data[index];
}
}
}
template <typename Dtype>
void EntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
if(now_iteration_ < iterations_num_){
top[0]->mutable_cpu_data()[0] = Dtype(0);
return;
}
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* log_data = normalized_bottom_data_.mutable_gpu_data();
caffe_gpu_log(data_num_ * label_num_, bottom_data, log_data);
int nthreads = label_num_ * data_num_;
Dtype loss;
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
ForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, log_data, Dtype(0.00001), prob_pow_, loss_data);
caffe_gpu_asum(label_num_ * data_num_, loss_data, &loss);
loss = -loss;
top[0]->mutable_cpu_data()[0] = loss;
}
/*
template <typename Dtype>
__global__ void EntropyDiff(const int nthreads, const Dtype* data,
const Dtype* log_data, const Dtype* label, const Dtype threshold,
const int data_num, const int ignore_label, const int label_num,
Dtype* count, Dtype* diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
count[index] = Dtype(1) / label_num;
if(data[index] < threshold){
diff[index] = Dtype(0);
}
else if(label[(index / data_num) * 2] < 0){
count[index] = Dtype(0);
diff[index] = Dtype(0);
}
else if(label[(index / data_num) * 2] == ignore_label){
count[index] = Dtype(0);
diff[index] = Dtype(0);
}
else{
diff[index] = -(Dtype(1) + log_data[index]);
}
}
}
*/
template <typename Dtype>
__global__ void EntropyDiff(const int nthreads, const Dtype* data,
const Dtype* log_data, const Dtype threshold,
const int data_num, const int ignore_label, const int label_num,
const Dtype prob_pow, Dtype* count, Dtype* diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
count[index] = Dtype(1) / label_num;
if(data[index] < threshold){
diff[index] = Dtype(0);
}
else{
diff[index] = -(
pow(data[index], prob_pow - Dtype(1.0)) +
prob_pow * pow(data[index], prob_pow - Dtype(1.0)) * log_data[index]
);
}
}
}
template <typename Dtype>
void EntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if(now_iteration_ < iterations_num_){
now_iteration_++;
return;
}
//if (propagate_down[1]) {
// LOG(FATAL) << this->type()
// << " Layer cannot backpropagate to label inputs.";
//}
const Dtype* bottom_data = bottom[0]->gpu_data();
//const Dtype* bottom_label = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* log_data = normalized_bottom_data_.mutable_gpu_data();
Dtype* count = normalized_bottom_data_.mutable_gpu_diff();
int nthreads = data_num_ * label_num_;
if (propagate_down[0]) {
EntropyDiff<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, log_data,
threshold_, data_num_, ignore_label_, label_num_, prob_pow_, count, bottom_diff);
Dtype count_num;
caffe_gpu_asum(nthreads, count, &count_num);
count_num = count_num > 0 ? count_num : Dtype(1);
caffe_gpu_scal(nthreads, loss_weight_ / count_num, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EntropyLossLayer);
} // namespace caffe
|
4b475aeb85e2dc13d91a26ac4abc41788b3c2eb5.hip | // !!! This is a file automatically generated by hipify!!!
//
// CUDA implementation of Sobel Edge Detect Filter
//
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv2/highgui.hpp>
#include <iostream>
#include <string>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#define BLOCK_SIZE 16
#define FILTER_WIDTH 3
#define FILTER_HEIGHT 3
using namespace std;
// Run Sobel Edge Detect Filter on GPU
__global__ void sobelFilter(unsigned char *srcImage, unsigned char *dstImage, unsigned int width, unsigned int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
float Kx[3][3] = {-1, 0, 1, -2, 0, 2, -1, 0, 1};
float Ky[3][3] = {1, 2, 1, 0, 0, 0, -1, -2, -1};
// only threads inside image will write results
if((x>=FILTER_WIDTH/2) && (x<(width-FILTER_WIDTH/2)) && (y>=FILTER_HEIGHT/2) && (y<(height-FILTER_HEIGHT/2)))
{
// Gradient in x-direction
float Gx = 0;
// Loop inside the filter to average pixel values
for(int ky=-FILTER_HEIGHT/2; ky<=FILTER_HEIGHT/2; ky++) {
for(int kx=-FILTER_WIDTH/2; kx<=FILTER_WIDTH/2; kx++) {
float fl = srcImage[((y+ky)*width + (x+kx))];
Gx += fl*Kx[ky+FILTER_HEIGHT/2][kx+FILTER_WIDTH/2];
}
}
float Gx_abs = Gx < 0 ? -Gx : Gx;
// Gradient in y-direction
float Gy = 0;
// Loop inside the filter to average pixel values
for(int ky=-FILTER_HEIGHT/2; ky<=FILTER_HEIGHT/2; ky++) {
for(int kx=-FILTER_WIDTH/2; kx<=FILTER_WIDTH/2; kx++) {
float fl = srcImage[((y+ky)*width + (x+kx))];
Gy += fl*Ky[ky+FILTER_HEIGHT/2][kx+FILTER_WIDTH/2];
}
}
float Gy_abs = Gy < 0 ? -Gy : Gy;
dstImage[(y*width+x)] = Gx_abs + Gy_abs;
}
}
// The wrapper is use to call sobel edge detection filter
extern "C" void sobelFilter_GPU_wrapper(const cv::Mat& input, cv::Mat& output)
{
// Use cuda event to catch time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Calculate number of input & output bytes in each block
const int inputSize = input.cols * input.rows;
const int outputSize = output.cols * output.rows;
unsigned char *d_input, *d_output;
// Allocate device memory
hipMalloc<unsigned char>(&d_input,inputSize);
hipMalloc<unsigned char>(&d_output,outputSize);
// Copy data from OpenCV input image to device memory
hipMemcpy(d_input,input.ptr(),inputSize,hipMemcpyHostToDevice);
// Specify block size
const dim3 block(BLOCK_SIZE,BLOCK_SIZE);
// Calculate grid size to cover the whole image
const dim3 grid((output.cols + block.x - 1)/block.x, (output.rows + block.y - 1)/block.y);
// Start time
hipEventRecord(start);
// Run Sobel Edge Detection Filter kernel on CUDA
hipLaunchKernelGGL(( sobelFilter), dim3(grid),dim3(block), 0, 0, d_input, d_output, output.cols, output.rows);
// Stop time
hipEventRecord(stop);
//Copy data from device memory to output image
hipMemcpy(output.ptr(),d_output,outputSize,hipMemcpyDeviceToHost);
//Free the device memory
hipFree(d_input);
hipFree(d_output);
hipEventSynchronize(stop);
float milliseconds = 0;
// Calculate elapsed time in milisecond
hipEventElapsedTime(&milliseconds, start, stop);
cout<< "\nProcessing time on GPU (ms): " << milliseconds << "\n";
}
| 4b475aeb85e2dc13d91a26ac4abc41788b3c2eb5.cu | //
// CUDA implementation of Sobel Edge Detect Filter
//
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv2/highgui.hpp>
#include <iostream>
#include <string>
#include <stdio.h>
#include <cuda.h>
#include "cuda_runtime.h"
#define BLOCK_SIZE 16
#define FILTER_WIDTH 3
#define FILTER_HEIGHT 3
using namespace std;
// Run Sobel Edge Detect Filter on GPU
__global__ void sobelFilter(unsigned char *srcImage, unsigned char *dstImage, unsigned int width, unsigned int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
float Kx[3][3] = {-1, 0, 1, -2, 0, 2, -1, 0, 1};
float Ky[3][3] = {1, 2, 1, 0, 0, 0, -1, -2, -1};
// only threads inside image will write results
if((x>=FILTER_WIDTH/2) && (x<(width-FILTER_WIDTH/2)) && (y>=FILTER_HEIGHT/2) && (y<(height-FILTER_HEIGHT/2)))
{
// Gradient in x-direction
float Gx = 0;
// Loop inside the filter to average pixel values
for(int ky=-FILTER_HEIGHT/2; ky<=FILTER_HEIGHT/2; ky++) {
for(int kx=-FILTER_WIDTH/2; kx<=FILTER_WIDTH/2; kx++) {
float fl = srcImage[((y+ky)*width + (x+kx))];
Gx += fl*Kx[ky+FILTER_HEIGHT/2][kx+FILTER_WIDTH/2];
}
}
float Gx_abs = Gx < 0 ? -Gx : Gx;
// Gradient in y-direction
float Gy = 0;
// Loop inside the filter to average pixel values
for(int ky=-FILTER_HEIGHT/2; ky<=FILTER_HEIGHT/2; ky++) {
for(int kx=-FILTER_WIDTH/2; kx<=FILTER_WIDTH/2; kx++) {
float fl = srcImage[((y+ky)*width + (x+kx))];
Gy += fl*Ky[ky+FILTER_HEIGHT/2][kx+FILTER_WIDTH/2];
}
}
float Gy_abs = Gy < 0 ? -Gy : Gy;
dstImage[(y*width+x)] = Gx_abs + Gy_abs;
}
}
// The wrapper is use to call sobel edge detection filter
extern "C" void sobelFilter_GPU_wrapper(const cv::Mat& input, cv::Mat& output)
{
// Use cuda event to catch time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Calculate number of input & output bytes in each block
const int inputSize = input.cols * input.rows;
const int outputSize = output.cols * output.rows;
unsigned char *d_input, *d_output;
// Allocate device memory
cudaMalloc<unsigned char>(&d_input,inputSize);
cudaMalloc<unsigned char>(&d_output,outputSize);
// Copy data from OpenCV input image to device memory
cudaMemcpy(d_input,input.ptr(),inputSize,cudaMemcpyHostToDevice);
// Specify block size
const dim3 block(BLOCK_SIZE,BLOCK_SIZE);
// Calculate grid size to cover the whole image
const dim3 grid((output.cols + block.x - 1)/block.x, (output.rows + block.y - 1)/block.y);
// Start time
cudaEventRecord(start);
// Run Sobel Edge Detection Filter kernel on CUDA
sobelFilter<<<grid,block>>>(d_input, d_output, output.cols, output.rows);
// Stop time
cudaEventRecord(stop);
//Copy data from device memory to output image
cudaMemcpy(output.ptr(),d_output,outputSize,cudaMemcpyDeviceToHost);
//Free the device memory
cudaFree(d_input);
cudaFree(d_output);
cudaEventSynchronize(stop);
float milliseconds = 0;
// Calculate elapsed time in milisecond
cudaEventElapsedTime(&milliseconds, start, stop);
cout<< "\nProcessing time on GPU (ms): " << milliseconds << "\n";
}
|
e33100a63e54613df63abe514f95d17263b871d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Copyright-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CascadedCompressionGPU.h"
#include "BitPackGPU.h"
#include "CascadedMetadata.h"
#include "CascadedMetadataOnGPU.h"
#include "Check.h"
#include "CudaUtils.h"
#include "DeltaGPU.h"
#include "RunLengthEncodeGPU.h"
#include "TempSpaceBroker.h"
#include "nvcomp.h"
#include "nvcomp.hpp"
#include "type_macros.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iostream>
#include <limits>
#include <memory>
#include <sstream>
#include <stdexcept>
#include <string>
namespace nvcomp
{
/******************************************************************************
* KERNELS ********************************************************************
*****************************************************************************/
namespace
{
template <typename T>
__global__ void dereferenceDevice(T* const outValue, T* const* const ref)
{
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
*outValue = **ref;
}
template <typename T>
__global__ void configureBitPackHeader(
CascadedMetadata::Header* const header,
T** const minValueDevicePtr,
unsigned char** const numBitsDevicePtr)
{
// setup the header and pointers into it
assert(blockIdx.x == 0);
assert(threadIdx.x == 0);
*minValueDevicePtr = CascadedMetadata::getMinValueLocation<T>(header);
*numBitsDevicePtr = &header->numBits;
}
/**
* @brief Asynchronously perform a device to device copy, where the destination
* address and number of elements to copy are stored on the device.
*
* @tparam T The type of element to copy.
* @tparam BLOCK_SIZE The size of each thread block.
* @param destDPtr The pointer to the destination address to copy elements to,
* stored on the device.
* @param src The source address to copy elements from.
* @param numElementsDPtr The number of elements to copy, stored on the device.
*/
template <typename T, int BLOCK_SIZE>
__global__ void deferredCopy(
T** const destDPtr, const T* const src, const size_t* const numElementsDPtr)
{
assert(blockDim.x == BLOCK_SIZE);
T* const dest = *destDPtr;
const size_t num = *numElementsDPtr;
for (int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; idx < num;
idx += gridDim.x * BLOCK_SIZE) {
dest[idx] = src[idx];
}
}
/**
* @brief Asynchronously perform a device to device copy, where the number of
* elements to copy is stored on the device.
*
* @tparam T The type of element to copy.
* @tparam BLOCK_SIZE The size of each thread block to use.
* @param dest The destination address to copy to.
* @param src The source address to copy from.
* @param numElementsDPtr The number of elements to copy, stored on the device.
*/
template <typename T, int BLOCK_SIZE>
__global__ void deferredCopy(
T* const dest, const T* const src, const size_t* const numElementsDPtr)
{
assert(blockDim.x == BLOCK_SIZE);
const size_t num = *numElementsDPtr;
for (int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; idx < num;
idx += gridDim.x * BLOCK_SIZE) {
dest[idx] = src[idx];
}
}
template <typename T>
__global__ void
offsetPointerAsync(T* const src, T** const dst, const size_t* const offset)
{
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
*dst = src + *offset;
}
__global__ void offsetAndAlignPointerAsync(
void* const src, void** const dst, size_t* const offset)
{
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
// update the offset if we need to
const size_t unalignedOffset = *offset;
const size_t alignedOffset = roundUpTo(unalignedOffset, sizeof(size_t));
if (alignedOffset != unalignedOffset) {
*offset = alignedOffset;
}
*dst = static_cast<char*>(src) + alignedOffset;
}
template <typename VALUE, typename RUN>
__global__ void configTempSpacePointers(
VALUE* const vals,
VALUE** const valsPtr,
RUN* const runs,
RUN** const runsPtr,
VALUE* const delta,
VALUE** const deltaPtr)
{
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
*valsPtr = vals;
*runsPtr = runs;
*deltaPtr = delta;
}
template <typename T>
__global__ void increaseOffsetByBitPacking(
size_t* const offsetDevice, const CascadedMetadata::Header* const header)
{
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
const size_t temp_size = roundUpTo(
roundUpDiv(header->length * header->numBits, 8ULL), sizeof(T));
*offsetDevice += temp_size;
}
template <typename T>
__global__ void increaseOffsetByRaw(
size_t* const offsetDevice, const CascadedMetadata::Header* const header)
{
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
const size_t temp_size = header->length * sizeof(T);
*offsetDevice += temp_size;
}
/**
* @brief This kernel allows copying to the device from a stack variable
* asynchronously.
*
* @tparam T The type of variable to copy.
* @param hostValue The value to copy.
* @param deviceValue The location to copy to.
*/
template <typename T>
__global__ void asyncPODCopyKernel(const T hostValue, T* const deviceValue)
{
static_assert(std::is_pod<T>::value, "Must be a POD to do async copy.");
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
*deviceValue = hostValue;
}
} // namespace
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
void checkAlignmentOf(void* const ptr, const size_t alignment)
{
void* aligned_ptr = ptr;
size_t space = alignment;
if (std::align(alignment, alignment, aligned_ptr, space) == nullptr
|| ptr != aligned_ptr) {
std::ostringstream oss;
oss << ptr;
throw std::runtime_error(
"Incorrectly aligned buffer: " + oss.str() + ", should be aligned to "
+ std::to_string(alignment));
}
}
/**
* @brief This copies the input to the device from a stack variable
* asynchronously. While this is inefficient, it is better than synchronizing or
* pinning the variable.
*
* @tparam T The type of variable to copy.
* @param hostValue The value to copy.
* @param deviceValue The location to copy to.
*/
template <typename T>
void asyncPODCopy(const T& value, T* const destination, hipStream_t stream)
{
hipLaunchKernelGGL(( asyncPODCopyKernel), dim3(dim3(1)), dim3(dim3(1)), 0, stream, value, destination);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
throw std::runtime_error(
"Failed to launch asyncPODCopyKernel "
"kernel: "
+ std::to_string(err));
}
}
/**
* @brief Bit pack or copy the elements to an output address.
*
* @tparam T The type of element to pack/copy.
* @param headerDPtr The header, stored on the device.
* @param temp_ptr The temporary workspace allocated (on the device).
* @param temp_bytes The size of the temporary workspace.
* @param outputDPtr The pointer to the location to output the elements to (on
* the device), stored on the device.
* @param input The input elements (on the device).
* @param numElementsDPtr The pointer to the number of elements, stored on the
* device.
* @param maxNum The maximum number of elements.
* @param offsetDPtr The current offset output, to be increased by
* the number of bytes written by this function.
* @param bitPacking Whether or not to perform bitpacking on this data.
* @param stream The stream to asynchronously perform work on.
*/
template <typename T>
void packToOutput(
CascadedMetadata::Header* const headerDPtr,
void* const temp_ptr,
const size_t temp_bytes,
void** const outputDPtr,
const T* const input,
const size_t* const numElementsDPtr,
const size_t maxNum,
size_t* const offsetDPtr,
const bool bitPacking,
hipStream_t stream)
{
CudaUtils::copy_async(
&(headerDPtr->length), numElementsDPtr, 1, DEVICE_TO_DEVICE, stream);
if (bitPacking) {
TempSpaceBroker tempSpace(temp_ptr, temp_bytes);
void** bitPackOutputPtr;
void** minValueDevicePtr;
unsigned char** numBitsDevicePtr;
tempSpace.reserve(&bitPackOutputPtr, 1);
tempSpace.reserve(&minValueDevicePtr, 1);
tempSpace.reserve(&numBitsDevicePtr, 1);
hipLaunchKernelGGL(( configureBitPackHeader), dim3(1), dim3(1), 0, stream,
headerDPtr, reinterpret_cast<T**>(minValueDevicePtr), numBitsDevicePtr);
void* const packTemp = reinterpret_cast<void*>(numBitsDevicePtr + 1);
const size_t packTempSize
= temp_bytes
- (static_cast<char*>(packTemp) - static_cast<char*>(temp_ptr));
BitPackGPU::compress(
packTemp,
packTempSize,
getnvcompType<T>(),
outputDPtr,
input,
numElementsDPtr,
maxNum,
minValueDevicePtr,
numBitsDevicePtr,
stream);
hipLaunchKernelGGL(( increaseOffsetByBitPacking<T>), dim3(1), dim3(1), 0, stream, offsetDPtr, headerDPtr);
} else {
constexpr const int BLOCK_SIZE = 512;
const dim3 grid(::min(1024, roundUpDiv<int, int>(maxNum, BLOCK_SIZE)));
const dim3 block(BLOCK_SIZE);
hipLaunchKernelGGL(( deferredCopy<T, BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream,
reinterpret_cast<T**>(outputDPtr), input, numElementsDPtr);
hipLaunchKernelGGL(( increaseOffsetByRaw<T>), dim3(1), dim3(1), 0, stream, offsetDPtr, headerDPtr);
}
}
template <typename valT, typename runT>
void generateTypedOutputUpperBound(
const void* const /*in_ptr*/,
const size_t in_bytes,
const nvcompCascadedFormatOpts* const opts,
void* const temp_ptr,
const size_t temp_bytes,
size_t* const out_bytes)
{
if (temp_bytes > 0) {
CHECK_NOT_NULL(temp_ptr);
// only check if its non-null
checkAlignmentOf(temp_ptr, sizeof(size_t));
}
CascadedMetadata metadata(*opts, getnvcompType<valT>(), in_bytes, 0);
const int numRLEs = metadata.getNumRLEs();
const int numDeltas = metadata.getNumDeltas();
const bool bitPacking = metadata.useBitPacking();
// assume single chunk for now
// TODO: implement a multi-chunk version
const size_t outputSize = in_bytes / sizeof(valT);
assert(outputSize * sizeof(valT) == in_bytes);
int vals_id = 0;
// initialize config
nvcompType_t type = getnvcompType<valT>();
nvcompIntConfig_t* config = createConfig(&metadata);
// First past - set layers assume nothing actual compresses.
// TODO: This will be a
// gross over estimation of the output size, but the better option would
// be to probably just assume 1:1 output/input, and error out during
// compression if we fail to achieve that (maybe just set RLE, Delta, and BP
// to 0, and do a memcpy, so that user's wont have to handle the error case
// in their code).
// A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally
// having bit packing applied
const int numSteps = ::max(numRLEs, numDeltas);
for (int r = numSteps - 1; r >= 0; r--) {
const int inputId = vals_id;
if (numSteps - r - 1 < numRLEs) {
const int runId = ++vals_id;
const int valId = ++vals_id;
nvcompConfigAddRLE_BP(
config,
inputId,
outputSize,
valId,
type,
bitPacking,
runId,
type,
bitPacking);
// store vals (apply delta if necessary)
if (numRLEs - 1 - r < numDeltas) {
const int deltaId = ++vals_id;
if (r == 0) {
nvcompConfigAddDelta_BP(
config, valId, outputSize, deltaId, type, bitPacking);
} else {
nvcompConfigAddDelta_BP(
config,
deltaId,
outputSize,
valId,
type,
0); // no bitpacking when delta is used as an intermediate step
}
}
} else {
// RLE-less step
const int deltaId = ++vals_id;
if (r == 0) {
nvcompConfigAddDelta_BP(
config, inputId, outputSize, deltaId, type, bitPacking);
} else {
nvcompConfigAddDelta_BP(
config,
deltaId,
outputSize,
inputId,
type,
0); // no bitpacking when delta is used as an intermediate step
}
}
}
destroyConfig(config);
// we will abort compression if we can't fit into out_bytes.
const size_t serializedMetadataSize
= CascadedMetadataOnGPU::getSerializedSizeOf(metadata);
// This may be overkill, as most datatypes we use are aligned to size_t,
// which on x86_64 is 8 bytes, where as this will be 16 bytes. In theory a
// smart compiler could potentially generate instructions for some of our
// structure that at 16-byte aligned.
const size_t wordSize = alignof(std::max_align_t);
// space for metadata, each set of 'runs', one set of 'vals'.
*out_bytes = roundUpTo(serializedMetadataSize, wordSize)
+ roundUpTo(sizeof(runT) * outputSize, wordSize) * numRLEs
+ roundUpTo(sizeof(valT) * outputSize, wordSize);
}
template <typename valT, typename runT>
void compressTypedAsync(
const void* const in_ptr,
const size_t in_bytes,
const nvcompCascadedFormatOpts* const format_opts,
void* const temp_ptr,
const size_t temp_bytes,
void* const out_ptr,
size_t* const out_bytes,
hipStream_t stream)
{
const nvcompType_t type = getnvcompType<valT>();
CascadedMetadata metadata(*format_opts, type, in_bytes, 0);
const int numRLEs = metadata.getNumRLEs();
const int numDeltas = metadata.getNumDeltas();
const bool bitPacking = metadata.useBitPacking();
// assume single chunk for now
// TODO: implement a multi-chunk version
const size_t maxNum = in_bytes / sizeof(valT);
int vals_id = 0;
TempSpaceBroker tempSpace(temp_ptr, temp_bytes);
size_t* offsetDevice;
tempSpace.reserve(&offsetDevice, 1);
CascadedMetadataOnGPU metadataOnGPU(out_ptr, *out_bytes);
metadataOnGPU.copyToGPU(metadata, offsetDevice, stream);
valT* vals_delta = nullptr;
valT* vals_output = nullptr;
runT* runs_output = nullptr;
if (numRLEs > 0 || numDeltas > 0) {
tempSpace.reserve(&vals_output, maxNum);
if (numRLEs > 0) {
tempSpace.reserve(&runs_output, maxNum);
}
tempSpace.reserve(&vals_delta, maxNum);
}
size_t* numRunsDevice;
size_t* outputSizePtr;
tempSpace.reserve(&numRunsDevice, 1);
tempSpace.reserve(&outputSizePtr, 1);
runT** runs_output_ptr;
valT** vals_output_ptr;
valT** vals_delta_ptr;
tempSpace.reserve(&runs_output_ptr, 1);
tempSpace.reserve(&vals_output_ptr, 1);
tempSpace.reserve(&vals_delta_ptr, 1);
void** bit_out_ptr;
tempSpace.reserve(&bit_out_ptr, 1);
hipError_t* statusDevice;
tempSpace.reserve(&statusDevice, 1);
hipLaunchKernelGGL(( configTempSpacePointers), dim3(1), dim3(1), 0, stream,
vals_output,
vals_output_ptr,
runs_output,
runs_output_ptr,
vals_delta,
vals_delta_ptr);
// Second pass - perform compression and store in the memory allocated above.
// A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally
// having bit packing applied
const int numSteps = ::max(numRLEs, numDeltas);
for (int r = numSteps - 1; r >= 0; r--) {
int nextValId;
const bool firstLayer = r == ::max(numRLEs - 1, numDeltas - 1);
const valT* const vals_input
= firstLayer ? static_cast<const valT*>(in_ptr) : vals_delta;
if (numSteps - r - 1 < numRLEs) {
const int runId = ++vals_id;
const int valId = ++vals_id;
// rle always first
if (firstLayer) {
RunLengthEncodeGPU::compress(
tempSpace.next(),
tempSpace.spaceLeft(),
getnvcompType<valT>(),
vals_output,
getnvcompType<runT>(),
runs_output,
numRunsDevice,
vals_input,
maxNum,
stream);
} else {
RunLengthEncodeGPU::compressDownstream(
tempSpace.next(),
tempSpace.spaceLeft(),
getnvcompType<valT>(),
(void**)vals_output_ptr,
getnvcompType<runT>(),
(void**)runs_output_ptr,
numRunsDevice,
vals_input,
outputSizePtr,
maxNum,
stream);
}
CascadedMetadata::Header* const valHdr
= metadataOnGPU.getHeaderLocation(valId);
CudaUtils::copy_async(
&(valHdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream);
CascadedMetadata::Header* const runHdr
= metadataOnGPU.getHeaderLocation(runId);
CudaUtils::copy_async(
&(runHdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream);
// store vals (apply delta if necessary)
if (numRLEs - 1 - r < numDeltas) {
DeltaGPU::compress(
tempSpace.next(),
tempSpace.spaceLeft(),
getnvcompType<valT>(),
(void**)vals_delta_ptr,
vals_output,
numRunsDevice,
maxNum,
stream);
const int id = ++vals_id;
nextValId = id;
CascadedMetadata::Header* const hdr
= metadataOnGPU.getHeaderLocation(id);
CudaUtils::copy_async(
&(hdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream);
} else {
constexpr const int COPY_BLOCK_SIZE = 512;
const dim3 grid(::min(
4096, static_cast<int>(roundUpDiv(maxNum, COPY_BLOCK_SIZE))));
const dim3 block(COPY_BLOCK_SIZE);
hipLaunchKernelGGL(( deferredCopy<valT, COPY_BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream,
vals_delta, vals_output, numRunsDevice);
nextValId = valId;
}
hipLaunchKernelGGL(( offsetAndAlignPointerAsync), dim3(1), dim3(1), 0, stream,
out_ptr, bit_out_ptr, offsetDevice);
metadataOnGPU.saveOffset(runId, offsetDevice, stream);
// pack runs into bytes
packToOutput(
metadataOnGPU.getHeaderLocation(runId),
tempSpace.next(),
tempSpace.spaceLeft(),
bit_out_ptr,
runs_output,
numRunsDevice,
maxNum,
offsetDevice,
bitPacking,
stream);
} else {
if (!firstLayer) {
CudaUtils::copy_async(
numRunsDevice, outputSizePtr, 1, DEVICE_TO_DEVICE, stream);
} else {
CudaUtils::copy_async(
numRunsDevice, &maxNum, 1, HOST_TO_DEVICE, stream);
}
// No RLE
DeltaGPU::compress(
tempSpace.next(),
tempSpace.spaceLeft(),
getnvcompType<valT>(),
(void**)vals_output_ptr,
vals_input,
numRunsDevice,
maxNum,
stream);
// we need to copy the delta to final delta buffer
{
constexpr const int COPY_BLOCK_SIZE = 512;
const dim3 grid(::min(
4096, static_cast<int>(roundUpDiv(maxNum, COPY_BLOCK_SIZE))));
const dim3 block(COPY_BLOCK_SIZE);
hipLaunchKernelGGL(( deferredCopy<valT, COPY_BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream,
vals_delta, vals_output, numRunsDevice);
}
const int id = ++vals_id;
nextValId = id;
CascadedMetadata::Header* const hdr = metadataOnGPU.getHeaderLocation(id);
CudaUtils::copy_async(
&(hdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream);
}
if (r == 0) {
hipLaunchKernelGGL(( offsetAndAlignPointerAsync), dim3(1), dim3(1), 0, stream,
out_ptr, bit_out_ptr, offsetDevice);
metadataOnGPU.saveOffset(nextValId, offsetDevice, stream);
// pack runs into bytes
packToOutput(
metadataOnGPU.getHeaderLocation(nextValId),
tempSpace.next(),
tempSpace.spaceLeft(),
bit_out_ptr,
vals_delta,
numRunsDevice,
maxNum,
offsetDevice,
bitPacking,
stream);
} else {
// update current RLE size
CudaUtils::copy_async(
outputSizePtr, numRunsDevice, 1, DEVICE_TO_DEVICE, stream);
}
}
// If there are no RLEs or Deltas, we will do a single BP step.
if (numRLEs == 0 && numDeltas == 0) {
const int nextValId = ++vals_id;
const valT* const vals_input = static_cast<const valT*>(in_ptr);
CudaUtils::copy_async(numRunsDevice, &maxNum, 1, HOST_TO_DEVICE, stream);
hipLaunchKernelGGL(( offsetAndAlignPointerAsync), dim3(1), dim3(1), 0, stream,
out_ptr, bit_out_ptr, offsetDevice);
metadataOnGPU.saveOffset(nextValId, offsetDevice, stream);
// pack runs into bytes
packToOutput(
metadataOnGPU.getHeaderLocation(nextValId),
tempSpace.next(),
tempSpace.spaceLeft(),
bit_out_ptr,
vals_input,
numRunsDevice,
maxNum,
offsetDevice,
bitPacking,
stream);
}
// async copy output
metadataOnGPU.setCompressedSizeFromGPU(offsetDevice, stream);
CudaUtils::copy_async(out_bytes, offsetDevice, 1, DEVICE_TO_HOST, stream);
}
} // namespace
/******************************************************************************
* PUBLIC STATIC METHODS ******************************************************
*****************************************************************************/
void nvcompCascadedCompressionGPU::computeWorkspaceSize(
const void* /*in_ptr*/,
const size_t in_bytes,
const nvcompType_t in_type,
const nvcompCascadedFormatOpts* const opts,
size_t* const temp_bytes)
{
size_t kernelBytes = 0;
// get at least enough for intermediate gpu values
size_t ioBytes = 1024;
const size_t numIn = in_bytes / sizeOfnvcompType(in_type);
const nvcompType_t runType = selectRunsType(numIn);
if (opts->use_bp) {
// max of runs and values
kernelBytes = ::max(
kernelBytes, BitPackGPU::requiredWorkspaceSize(numIn, in_type));
kernelBytes = ::max(
kernelBytes, BitPackGPU::requiredWorkspaceSize(numIn, runType));
}
if (opts->num_deltas > 0) {
kernelBytes = ::max(
kernelBytes, DeltaGPU::requiredWorkspaceSize(numIn, in_type));
}
if (opts->num_RLEs > 0) {
kernelBytes = ::max(
kernelBytes,
RunLengthEncodeGPU::requiredWorkspaceSize(numIn, in_type, runType));
ioBytes += (2 * in_bytes) + numIn * sizeOfnvcompType(runType);
} else if (opts->num_deltas > 0) {
ioBytes += 2 * in_bytes;
}
*temp_bytes = kernelBytes + ioBytes;
}
void nvcompCascadedCompressionGPU::generateOutputUpperBound(
const void* const in_ptr,
const size_t in_bytes,
const nvcompType_t in_type,
const nvcompCascadedFormatOpts* const opts,
void* const temp_ptr,
const size_t temp_bytes,
size_t* const out_bytes)
{
CHECK_NOT_NULL(in_ptr);
CHECK_NOT_NULL(opts);
if (temp_bytes > 0) {
CHECK_NOT_NULL(temp_ptr);
}
CHECK_NOT_NULL(out_bytes);
const nvcompType_t countType
= selectRunsType(in_bytes / sizeOfnvcompType(in_type));
NVCOMP_TYPE_TWO_SWITCH(
in_type,
countType,
generateTypedOutputUpperBound,
in_ptr,
in_bytes,
opts,
temp_ptr,
temp_bytes,
out_bytes);
}
void nvcompCascadedCompressionGPU::compressAsync(
const void* const in_ptr,
const size_t in_bytes,
const nvcompType_t in_type,
const nvcompCascadedFormatOpts* const cascadedOpts,
void* const temp_ptr,
const size_t temp_bytes,
void* const out_ptr,
size_t* const out_bytes,
hipStream_t stream)
{
CHECK_NOT_NULL(in_ptr);
CHECK_NOT_NULL(cascadedOpts);
CHECK_NOT_NULL(temp_ptr);
CHECK_NOT_NULL(out_ptr);
CHECK_NOT_NULL(out_bytes);
checkAlignmentOf(out_ptr, sizeof(size_t));
checkAlignmentOf(temp_ptr, sizeof(size_t));
const nvcompType_t countType
= selectRunsType(in_bytes / sizeOfnvcompType(in_type));
NVCOMP_TYPE_TWO_SWITCH(
in_type,
countType,
compressTypedAsync,
in_ptr,
in_bytes,
cascadedOpts,
temp_ptr,
temp_bytes,
out_ptr,
out_bytes,
stream);
}
} // namespace nvcomp
| e33100a63e54613df63abe514f95d17263b871d1.cu | /*
* Copyright (c) Copyright-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CascadedCompressionGPU.h"
#include "BitPackGPU.h"
#include "CascadedMetadata.h"
#include "CascadedMetadataOnGPU.h"
#include "Check.h"
#include "CudaUtils.h"
#include "DeltaGPU.h"
#include "RunLengthEncodeGPU.h"
#include "TempSpaceBroker.h"
#include "nvcomp.h"
#include "nvcomp.hpp"
#include "type_macros.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iostream>
#include <limits>
#include <memory>
#include <sstream>
#include <stdexcept>
#include <string>
namespace nvcomp
{
/******************************************************************************
* KERNELS ********************************************************************
*****************************************************************************/
namespace
{
template <typename T>
__global__ void dereferenceDevice(T* const outValue, T* const* const ref)
{
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
*outValue = **ref;
}
template <typename T>
__global__ void configureBitPackHeader(
CascadedMetadata::Header* const header,
T** const minValueDevicePtr,
unsigned char** const numBitsDevicePtr)
{
// setup the header and pointers into it
assert(blockIdx.x == 0);
assert(threadIdx.x == 0);
*minValueDevicePtr = CascadedMetadata::getMinValueLocation<T>(header);
*numBitsDevicePtr = &header->numBits;
}
/**
* @brief Asynchronously perform a device to device copy, where the destination
* address and number of elements to copy are stored on the device.
*
* @tparam T The type of element to copy.
* @tparam BLOCK_SIZE The size of each thread block.
* @param destDPtr The pointer to the destination address to copy elements to,
* stored on the device.
* @param src The source address to copy elements from.
* @param numElementsDPtr The number of elements to copy, stored on the device.
*/
template <typename T, int BLOCK_SIZE>
__global__ void deferredCopy(
T** const destDPtr, const T* const src, const size_t* const numElementsDPtr)
{
assert(blockDim.x == BLOCK_SIZE);
T* const dest = *destDPtr;
const size_t num = *numElementsDPtr;
for (int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; idx < num;
idx += gridDim.x * BLOCK_SIZE) {
dest[idx] = src[idx];
}
}
/**
* @brief Asynchronously perform a device to device copy, where the number of
* elements to copy is stored on the device.
*
* @tparam T The type of element to copy.
* @tparam BLOCK_SIZE The size of each thread block to use.
* @param dest The destination address to copy to.
* @param src The source address to copy from.
* @param numElementsDPtr The number of elements to copy, stored on the device.
*/
template <typename T, int BLOCK_SIZE>
__global__ void deferredCopy(
T* const dest, const T* const src, const size_t* const numElementsDPtr)
{
assert(blockDim.x == BLOCK_SIZE);
const size_t num = *numElementsDPtr;
for (int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; idx < num;
idx += gridDim.x * BLOCK_SIZE) {
dest[idx] = src[idx];
}
}
template <typename T>
__global__ void
offsetPointerAsync(T* const src, T** const dst, const size_t* const offset)
{
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
*dst = src + *offset;
}
__global__ void offsetAndAlignPointerAsync(
void* const src, void** const dst, size_t* const offset)
{
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
// update the offset if we need to
const size_t unalignedOffset = *offset;
const size_t alignedOffset = roundUpTo(unalignedOffset, sizeof(size_t));
if (alignedOffset != unalignedOffset) {
*offset = alignedOffset;
}
*dst = static_cast<char*>(src) + alignedOffset;
}
template <typename VALUE, typename RUN>
__global__ void configTempSpacePointers(
VALUE* const vals,
VALUE** const valsPtr,
RUN* const runs,
RUN** const runsPtr,
VALUE* const delta,
VALUE** const deltaPtr)
{
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
*valsPtr = vals;
*runsPtr = runs;
*deltaPtr = delta;
}
template <typename T>
__global__ void increaseOffsetByBitPacking(
size_t* const offsetDevice, const CascadedMetadata::Header* const header)
{
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
const size_t temp_size = roundUpTo(
roundUpDiv(header->length * header->numBits, 8ULL), sizeof(T));
*offsetDevice += temp_size;
}
template <typename T>
__global__ void increaseOffsetByRaw(
size_t* const offsetDevice, const CascadedMetadata::Header* const header)
{
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
const size_t temp_size = header->length * sizeof(T);
*offsetDevice += temp_size;
}
/**
* @brief This kernel allows copying to the device from a stack variable
* asynchronously.
*
* @tparam T The type of variable to copy.
* @param hostValue The value to copy.
* @param deviceValue The location to copy to.
*/
template <typename T>
__global__ void asyncPODCopyKernel(const T hostValue, T* const deviceValue)
{
static_assert(std::is_pod<T>::value, "Must be a POD to do async copy.");
assert(threadIdx.x == 0);
assert(blockIdx.x == 0);
*deviceValue = hostValue;
}
} // namespace
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
void checkAlignmentOf(void* const ptr, const size_t alignment)
{
void* aligned_ptr = ptr;
size_t space = alignment;
if (std::align(alignment, alignment, aligned_ptr, space) == nullptr
|| ptr != aligned_ptr) {
std::ostringstream oss;
oss << ptr;
throw std::runtime_error(
"Incorrectly aligned buffer: " + oss.str() + ", should be aligned to "
+ std::to_string(alignment));
}
}
/**
* @brief This copies the input to the device from a stack variable
* asynchronously. While this is inefficient, it is better than synchronizing or
* pinning the variable.
*
* @tparam T The type of variable to copy.
* @param hostValue The value to copy.
* @param deviceValue The location to copy to.
*/
template <typename T>
void asyncPODCopy(const T& value, T* const destination, cudaStream_t stream)
{
asyncPODCopyKernel<<<dim3(1), dim3(1), 0, stream>>>(value, destination);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
throw std::runtime_error(
"Failed to launch asyncPODCopyKernel "
"kernel: "
+ std::to_string(err));
}
}
/**
* @brief Bit pack or copy the elements to an output address.
*
* @tparam T The type of element to pack/copy.
* @param headerDPtr The header, stored on the device.
* @param temp_ptr The temporary workspace allocated (on the device).
* @param temp_bytes The size of the temporary workspace.
* @param outputDPtr The pointer to the location to output the elements to (on
* the device), stored on the device.
* @param input The input elements (on the device).
* @param numElementsDPtr The pointer to the number of elements, stored on the
* device.
* @param maxNum The maximum number of elements.
* @param offsetDPtr The current offset output, to be increased by
* the number of bytes written by this function.
* @param bitPacking Whether or not to perform bitpacking on this data.
* @param stream The stream to asynchronously perform work on.
*/
template <typename T>
void packToOutput(
CascadedMetadata::Header* const headerDPtr,
void* const temp_ptr,
const size_t temp_bytes,
void** const outputDPtr,
const T* const input,
const size_t* const numElementsDPtr,
const size_t maxNum,
size_t* const offsetDPtr,
const bool bitPacking,
cudaStream_t stream)
{
CudaUtils::copy_async(
&(headerDPtr->length), numElementsDPtr, 1, DEVICE_TO_DEVICE, stream);
if (bitPacking) {
TempSpaceBroker tempSpace(temp_ptr, temp_bytes);
void** bitPackOutputPtr;
void** minValueDevicePtr;
unsigned char** numBitsDevicePtr;
tempSpace.reserve(&bitPackOutputPtr, 1);
tempSpace.reserve(&minValueDevicePtr, 1);
tempSpace.reserve(&numBitsDevicePtr, 1);
configureBitPackHeader<<<1, 1, 0, stream>>>(
headerDPtr, reinterpret_cast<T**>(minValueDevicePtr), numBitsDevicePtr);
void* const packTemp = reinterpret_cast<void*>(numBitsDevicePtr + 1);
const size_t packTempSize
= temp_bytes
- (static_cast<char*>(packTemp) - static_cast<char*>(temp_ptr));
BitPackGPU::compress(
packTemp,
packTempSize,
getnvcompType<T>(),
outputDPtr,
input,
numElementsDPtr,
maxNum,
minValueDevicePtr,
numBitsDevicePtr,
stream);
increaseOffsetByBitPacking<T><<<1, 1, 0, stream>>>(offsetDPtr, headerDPtr);
} else {
constexpr const int BLOCK_SIZE = 512;
const dim3 grid(std::min(1024, roundUpDiv<int, int>(maxNum, BLOCK_SIZE)));
const dim3 block(BLOCK_SIZE);
deferredCopy<T, BLOCK_SIZE><<<grid, block, 0, stream>>>(
reinterpret_cast<T**>(outputDPtr), input, numElementsDPtr);
increaseOffsetByRaw<T><<<1, 1, 0, stream>>>(offsetDPtr, headerDPtr);
}
}
template <typename valT, typename runT>
void generateTypedOutputUpperBound(
const void* const /*in_ptr*/,
const size_t in_bytes,
const nvcompCascadedFormatOpts* const opts,
void* const temp_ptr,
const size_t temp_bytes,
size_t* const out_bytes)
{
if (temp_bytes > 0) {
CHECK_NOT_NULL(temp_ptr);
// only check if its non-null
checkAlignmentOf(temp_ptr, sizeof(size_t));
}
CascadedMetadata metadata(*opts, getnvcompType<valT>(), in_bytes, 0);
const int numRLEs = metadata.getNumRLEs();
const int numDeltas = metadata.getNumDeltas();
const bool bitPacking = metadata.useBitPacking();
// assume single chunk for now
// TODO: implement a multi-chunk version
const size_t outputSize = in_bytes / sizeof(valT);
assert(outputSize * sizeof(valT) == in_bytes);
int vals_id = 0;
// initialize config
nvcompType_t type = getnvcompType<valT>();
nvcompIntConfig_t* config = createConfig(&metadata);
// First past - set layers assume nothing actual compresses.
// TODO: This will be a
// gross over estimation of the output size, but the better option would
// be to probably just assume 1:1 output/input, and error out during
// compression if we fail to achieve that (maybe just set RLE, Delta, and BP
// to 0, and do a memcpy, so that user's wont have to handle the error case
// in their code).
// A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally
// having bit packing applied
const int numSteps = std::max(numRLEs, numDeltas);
for (int r = numSteps - 1; r >= 0; r--) {
const int inputId = vals_id;
if (numSteps - r - 1 < numRLEs) {
const int runId = ++vals_id;
const int valId = ++vals_id;
nvcompConfigAddRLE_BP(
config,
inputId,
outputSize,
valId,
type,
bitPacking,
runId,
type,
bitPacking);
// store vals (apply delta if necessary)
if (numRLEs - 1 - r < numDeltas) {
const int deltaId = ++vals_id;
if (r == 0) {
nvcompConfigAddDelta_BP(
config, valId, outputSize, deltaId, type, bitPacking);
} else {
nvcompConfigAddDelta_BP(
config,
deltaId,
outputSize,
valId,
type,
0); // no bitpacking when delta is used as an intermediate step
}
}
} else {
// RLE-less step
const int deltaId = ++vals_id;
if (r == 0) {
nvcompConfigAddDelta_BP(
config, inputId, outputSize, deltaId, type, bitPacking);
} else {
nvcompConfigAddDelta_BP(
config,
deltaId,
outputSize,
inputId,
type,
0); // no bitpacking when delta is used as an intermediate step
}
}
}
destroyConfig(config);
// we will abort compression if we can't fit into out_bytes.
const size_t serializedMetadataSize
= CascadedMetadataOnGPU::getSerializedSizeOf(metadata);
// This may be overkill, as most datatypes we use are aligned to size_t,
// which on x86_64 is 8 bytes, where as this will be 16 bytes. In theory a
// smart compiler could potentially generate instructions for some of our
// structure that at 16-byte aligned.
const size_t wordSize = alignof(std::max_align_t);
// space for metadata, each set of 'runs', one set of 'vals'.
*out_bytes = roundUpTo(serializedMetadataSize, wordSize)
+ roundUpTo(sizeof(runT) * outputSize, wordSize) * numRLEs
+ roundUpTo(sizeof(valT) * outputSize, wordSize);
}
template <typename valT, typename runT>
void compressTypedAsync(
const void* const in_ptr,
const size_t in_bytes,
const nvcompCascadedFormatOpts* const format_opts,
void* const temp_ptr,
const size_t temp_bytes,
void* const out_ptr,
size_t* const out_bytes,
cudaStream_t stream)
{
const nvcompType_t type = getnvcompType<valT>();
CascadedMetadata metadata(*format_opts, type, in_bytes, 0);
const int numRLEs = metadata.getNumRLEs();
const int numDeltas = metadata.getNumDeltas();
const bool bitPacking = metadata.useBitPacking();
// assume single chunk for now
// TODO: implement a multi-chunk version
const size_t maxNum = in_bytes / sizeof(valT);
int vals_id = 0;
TempSpaceBroker tempSpace(temp_ptr, temp_bytes);
size_t* offsetDevice;
tempSpace.reserve(&offsetDevice, 1);
CascadedMetadataOnGPU metadataOnGPU(out_ptr, *out_bytes);
metadataOnGPU.copyToGPU(metadata, offsetDevice, stream);
valT* vals_delta = nullptr;
valT* vals_output = nullptr;
runT* runs_output = nullptr;
if (numRLEs > 0 || numDeltas > 0) {
tempSpace.reserve(&vals_output, maxNum);
if (numRLEs > 0) {
tempSpace.reserve(&runs_output, maxNum);
}
tempSpace.reserve(&vals_delta, maxNum);
}
size_t* numRunsDevice;
size_t* outputSizePtr;
tempSpace.reserve(&numRunsDevice, 1);
tempSpace.reserve(&outputSizePtr, 1);
runT** runs_output_ptr;
valT** vals_output_ptr;
valT** vals_delta_ptr;
tempSpace.reserve(&runs_output_ptr, 1);
tempSpace.reserve(&vals_output_ptr, 1);
tempSpace.reserve(&vals_delta_ptr, 1);
void** bit_out_ptr;
tempSpace.reserve(&bit_out_ptr, 1);
cudaError_t* statusDevice;
tempSpace.reserve(&statusDevice, 1);
configTempSpacePointers<<<1, 1, 0, stream>>>(
vals_output,
vals_output_ptr,
runs_output,
runs_output_ptr,
vals_delta,
vals_delta_ptr);
// Second pass - perform compression and store in the memory allocated above.
// A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally
// having bit packing applied
const int numSteps = std::max(numRLEs, numDeltas);
for (int r = numSteps - 1; r >= 0; r--) {
int nextValId;
const bool firstLayer = r == std::max(numRLEs - 1, numDeltas - 1);
const valT* const vals_input
= firstLayer ? static_cast<const valT*>(in_ptr) : vals_delta;
if (numSteps - r - 1 < numRLEs) {
const int runId = ++vals_id;
const int valId = ++vals_id;
// rle always first
if (firstLayer) {
RunLengthEncodeGPU::compress(
tempSpace.next(),
tempSpace.spaceLeft(),
getnvcompType<valT>(),
vals_output,
getnvcompType<runT>(),
runs_output,
numRunsDevice,
vals_input,
maxNum,
stream);
} else {
RunLengthEncodeGPU::compressDownstream(
tempSpace.next(),
tempSpace.spaceLeft(),
getnvcompType<valT>(),
(void**)vals_output_ptr,
getnvcompType<runT>(),
(void**)runs_output_ptr,
numRunsDevice,
vals_input,
outputSizePtr,
maxNum,
stream);
}
CascadedMetadata::Header* const valHdr
= metadataOnGPU.getHeaderLocation(valId);
CudaUtils::copy_async(
&(valHdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream);
CascadedMetadata::Header* const runHdr
= metadataOnGPU.getHeaderLocation(runId);
CudaUtils::copy_async(
&(runHdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream);
// store vals (apply delta if necessary)
if (numRLEs - 1 - r < numDeltas) {
DeltaGPU::compress(
tempSpace.next(),
tempSpace.spaceLeft(),
getnvcompType<valT>(),
(void**)vals_delta_ptr,
vals_output,
numRunsDevice,
maxNum,
stream);
const int id = ++vals_id;
nextValId = id;
CascadedMetadata::Header* const hdr
= metadataOnGPU.getHeaderLocation(id);
CudaUtils::copy_async(
&(hdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream);
} else {
constexpr const int COPY_BLOCK_SIZE = 512;
const dim3 grid(std::min(
4096, static_cast<int>(roundUpDiv(maxNum, COPY_BLOCK_SIZE))));
const dim3 block(COPY_BLOCK_SIZE);
deferredCopy<valT, COPY_BLOCK_SIZE><<<grid, block, 0, stream>>>(
vals_delta, vals_output, numRunsDevice);
nextValId = valId;
}
offsetAndAlignPointerAsync<<<1, 1, 0, stream>>>(
out_ptr, bit_out_ptr, offsetDevice);
metadataOnGPU.saveOffset(runId, offsetDevice, stream);
// pack runs into bytes
packToOutput(
metadataOnGPU.getHeaderLocation(runId),
tempSpace.next(),
tempSpace.spaceLeft(),
bit_out_ptr,
runs_output,
numRunsDevice,
maxNum,
offsetDevice,
bitPacking,
stream);
} else {
if (!firstLayer) {
CudaUtils::copy_async(
numRunsDevice, outputSizePtr, 1, DEVICE_TO_DEVICE, stream);
} else {
CudaUtils::copy_async(
numRunsDevice, &maxNum, 1, HOST_TO_DEVICE, stream);
}
// No RLE
DeltaGPU::compress(
tempSpace.next(),
tempSpace.spaceLeft(),
getnvcompType<valT>(),
(void**)vals_output_ptr,
vals_input,
numRunsDevice,
maxNum,
stream);
// we need to copy the delta to final delta buffer
{
constexpr const int COPY_BLOCK_SIZE = 512;
const dim3 grid(std::min(
4096, static_cast<int>(roundUpDiv(maxNum, COPY_BLOCK_SIZE))));
const dim3 block(COPY_BLOCK_SIZE);
deferredCopy<valT, COPY_BLOCK_SIZE><<<grid, block, 0, stream>>>(
vals_delta, vals_output, numRunsDevice);
}
const int id = ++vals_id;
nextValId = id;
CascadedMetadata::Header* const hdr = metadataOnGPU.getHeaderLocation(id);
CudaUtils::copy_async(
&(hdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream);
}
if (r == 0) {
offsetAndAlignPointerAsync<<<1, 1, 0, stream>>>(
out_ptr, bit_out_ptr, offsetDevice);
metadataOnGPU.saveOffset(nextValId, offsetDevice, stream);
// pack runs into bytes
packToOutput(
metadataOnGPU.getHeaderLocation(nextValId),
tempSpace.next(),
tempSpace.spaceLeft(),
bit_out_ptr,
vals_delta,
numRunsDevice,
maxNum,
offsetDevice,
bitPacking,
stream);
} else {
// update current RLE size
CudaUtils::copy_async(
outputSizePtr, numRunsDevice, 1, DEVICE_TO_DEVICE, stream);
}
}
// If there are no RLEs or Deltas, we will do a single BP step.
if (numRLEs == 0 && numDeltas == 0) {
const int nextValId = ++vals_id;
const valT* const vals_input = static_cast<const valT*>(in_ptr);
CudaUtils::copy_async(numRunsDevice, &maxNum, 1, HOST_TO_DEVICE, stream);
offsetAndAlignPointerAsync<<<1, 1, 0, stream>>>(
out_ptr, bit_out_ptr, offsetDevice);
metadataOnGPU.saveOffset(nextValId, offsetDevice, stream);
// pack runs into bytes
packToOutput(
metadataOnGPU.getHeaderLocation(nextValId),
tempSpace.next(),
tempSpace.spaceLeft(),
bit_out_ptr,
vals_input,
numRunsDevice,
maxNum,
offsetDevice,
bitPacking,
stream);
}
// async copy output
metadataOnGPU.setCompressedSizeFromGPU(offsetDevice, stream);
CudaUtils::copy_async(out_bytes, offsetDevice, 1, DEVICE_TO_HOST, stream);
}
} // namespace
/******************************************************************************
* PUBLIC STATIC METHODS ******************************************************
*****************************************************************************/
void nvcompCascadedCompressionGPU::computeWorkspaceSize(
const void* /*in_ptr*/,
const size_t in_bytes,
const nvcompType_t in_type,
const nvcompCascadedFormatOpts* const opts,
size_t* const temp_bytes)
{
size_t kernelBytes = 0;
// get at least enough for intermediate gpu values
size_t ioBytes = 1024;
const size_t numIn = in_bytes / sizeOfnvcompType(in_type);
const nvcompType_t runType = selectRunsType(numIn);
if (opts->use_bp) {
// max of runs and values
kernelBytes = std::max(
kernelBytes, BitPackGPU::requiredWorkspaceSize(numIn, in_type));
kernelBytes = std::max(
kernelBytes, BitPackGPU::requiredWorkspaceSize(numIn, runType));
}
if (opts->num_deltas > 0) {
kernelBytes = std::max(
kernelBytes, DeltaGPU::requiredWorkspaceSize(numIn, in_type));
}
if (opts->num_RLEs > 0) {
kernelBytes = std::max(
kernelBytes,
RunLengthEncodeGPU::requiredWorkspaceSize(numIn, in_type, runType));
ioBytes += (2 * in_bytes) + numIn * sizeOfnvcompType(runType);
} else if (opts->num_deltas > 0) {
ioBytes += 2 * in_bytes;
}
*temp_bytes = kernelBytes + ioBytes;
}
void nvcompCascadedCompressionGPU::generateOutputUpperBound(
const void* const in_ptr,
const size_t in_bytes,
const nvcompType_t in_type,
const nvcompCascadedFormatOpts* const opts,
void* const temp_ptr,
const size_t temp_bytes,
size_t* const out_bytes)
{
CHECK_NOT_NULL(in_ptr);
CHECK_NOT_NULL(opts);
if (temp_bytes > 0) {
CHECK_NOT_NULL(temp_ptr);
}
CHECK_NOT_NULL(out_bytes);
const nvcompType_t countType
= selectRunsType(in_bytes / sizeOfnvcompType(in_type));
NVCOMP_TYPE_TWO_SWITCH(
in_type,
countType,
generateTypedOutputUpperBound,
in_ptr,
in_bytes,
opts,
temp_ptr,
temp_bytes,
out_bytes);
}
void nvcompCascadedCompressionGPU::compressAsync(
const void* const in_ptr,
const size_t in_bytes,
const nvcompType_t in_type,
const nvcompCascadedFormatOpts* const cascadedOpts,
void* const temp_ptr,
const size_t temp_bytes,
void* const out_ptr,
size_t* const out_bytes,
cudaStream_t stream)
{
CHECK_NOT_NULL(in_ptr);
CHECK_NOT_NULL(cascadedOpts);
CHECK_NOT_NULL(temp_ptr);
CHECK_NOT_NULL(out_ptr);
CHECK_NOT_NULL(out_bytes);
checkAlignmentOf(out_ptr, sizeof(size_t));
checkAlignmentOf(temp_ptr, sizeof(size_t));
const nvcompType_t countType
= selectRunsType(in_bytes / sizeOfnvcompType(in_type));
NVCOMP_TYPE_TWO_SWITCH(
in_type,
countType,
compressTypedAsync,
in_ptr,
in_bytes,
cascadedOpts,
temp_ptr,
temp_bytes,
out_ptr,
out_bytes,
stream);
}
} // namespace nvcomp
|
241674c74511cb35e4fc8c68244c682d1ff979f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* dmv_gpu.cu -- Template for DMV GPU kernels
*
* Copyright (C) 2010-2013, Computing Systems Laboratory (CSLab)
* Copyright (C) 2010-2013, Vasileios Karakasis
*/
#include <stdio.h>
#include "dmv.h"
/*
* Utility function to get the thread ID within the
* global working space.
*/
__device__ int get_global_tid()
{
return (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y +
blockDim.x*threadIdx.y + threadIdx.x;
}
/*
* Utility function to get the thread ID within the
* local/block working space.
*/
__device__ int get_local_tid()
{
return blockDim.x*threadIdx.y + threadIdx.x;
}
/*
* Naive kernel
*/
__global__ void dmv_gpu_naive(const value_t *a, const value_t *x, value_t *y,
size_t n)
{
int i;
int idx = get_global_tid();
if (idx >= n) {
return;
}
y[idx] = 0;
for (i = 0; i < n; ++i) {
y[idx] += a[idx*n+i]*x[i];
}
}
/*
* Coalesced memory acceses
*/
__global__ void dmv_gpu_coalesced(const value_t *a, const value_t *x,
value_t *y, size_t n)
{
int i, j;
int idx = get_global_tid();
if (idx >= n) {
return;
}
y[idx] = 0;
for (i = 0; i < n; ++i) {
y[idx] += a[idx*n+i]*x[i];
}
}
/*
* Use of shared memory
*/
__global__ void dmv_gpu_shmem(const value_t *a, const value_t *x, value_t *y,
size_t n)
{
int i,j;
int idx = get_global_tid();
int idx2 = get_local_tid();
extern __shared__ value_t shmem[];
if (idx >= n) {
return;
}
y[idx] = 0;
for(j = idx2; j < n; j += blockDim.y) {
shmem[j] = x[j];
__syncthreads();
for(i = j-idx2; i < j-idx2+blockDim.y; i++) {
y[idx] += a[i*n+idx]*shmem[i];
}
}
}
| 241674c74511cb35e4fc8c68244c682d1ff979f8.cu | /*
* dmv_gpu.cu -- Template for DMV GPU kernels
*
* Copyright (C) 2010-2013, Computing Systems Laboratory (CSLab)
* Copyright (C) 2010-2013, Vasileios Karakasis
*/
#include <stdio.h>
#include "dmv.h"
/*
* Utility function to get the thread ID within the
* global working space.
*/
__device__ int get_global_tid()
{
return (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y +
blockDim.x*threadIdx.y + threadIdx.x;
}
/*
* Utility function to get the thread ID within the
* local/block working space.
*/
__device__ int get_local_tid()
{
return blockDim.x*threadIdx.y + threadIdx.x;
}
/*
* Naive kernel
*/
__global__ void dmv_gpu_naive(const value_t *a, const value_t *x, value_t *y,
size_t n)
{
int i;
int idx = get_global_tid();
if (idx >= n) {
return;
}
y[idx] = 0;
for (i = 0; i < n; ++i) {
y[idx] += a[idx*n+i]*x[i];
}
}
/*
* Coalesced memory acceses
*/
__global__ void dmv_gpu_coalesced(const value_t *a, const value_t *x,
value_t *y, size_t n)
{
int i, j;
int idx = get_global_tid();
if (idx >= n) {
return;
}
y[idx] = 0;
for (i = 0; i < n; ++i) {
y[idx] += a[idx*n+i]*x[i];
}
}
/*
* Use of shared memory
*/
__global__ void dmv_gpu_shmem(const value_t *a, const value_t *x, value_t *y,
size_t n)
{
int i,j;
int idx = get_global_tid();
int idx2 = get_local_tid();
extern __shared__ value_t shmem[];
if (idx >= n) {
return;
}
y[idx] = 0;
for(j = idx2; j < n; j += blockDim.y) {
shmem[j] = x[j];
__syncthreads();
for(i = j-idx2; i < j-idx2+blockDim.y; i++) {
y[idx] += a[i*n+idx]*shmem[i];
}
}
}
|
c071bd6ab48e3c12173445129cacd72ef1820225.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2022 by XGBoost Contributors
*/
#include <thrust/execution_policy.h>
#include <thrust/functional.h> // thrust:plus
#include "device_helpers_hip.cuh" // dh::Reduce, safe_cuda, dh::XGBCachingDeviceAllocator
#include "numeric.h"
#include "xgboost/context.h" // Context
#include "xgboost/host_device_vector.h" // HostDeviceVector
namespace xgboost {
namespace common {
namespace cuda {
double Reduce(Context const* ctx, HostDeviceVector<float> const& values) {
values.SetDevice(ctx->gpu_id);
auto const d_values = values.ConstDeviceSpan();
dh::XGBCachingDeviceAllocator<char> alloc;
auto res = dh::Reduce(thrust::hip::par(alloc), d_values.data(),
d_values.data() + d_values.size(), 0.0, thrust::plus<double>{});
return res;
}
} // namespace cuda
} // namespace common
} // namespace xgboost
| c071bd6ab48e3c12173445129cacd72ef1820225.cu | /*!
* Copyright 2022 by XGBoost Contributors
*/
#include <thrust/execution_policy.h>
#include <thrust/functional.h> // thrust:plus
#include "device_helpers.cuh" // dh::Reduce, safe_cuda, dh::XGBCachingDeviceAllocator
#include "numeric.h"
#include "xgboost/context.h" // Context
#include "xgboost/host_device_vector.h" // HostDeviceVector
namespace xgboost {
namespace common {
namespace cuda {
double Reduce(Context const* ctx, HostDeviceVector<float> const& values) {
values.SetDevice(ctx->gpu_id);
auto const d_values = values.ConstDeviceSpan();
dh::XGBCachingDeviceAllocator<char> alloc;
auto res = dh::Reduce(thrust::cuda::par(alloc), d_values.data(),
d_values.data() + d_values.size(), 0.0, thrust::plus<double>{});
return res;
}
} // namespace cuda
} // namespace common
} // namespace xgboost
|
4547f055b6fc4f3759b623b5ed7babe650aa4fe6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cube_select_two.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int b = 2;
int n = XSIZE*YSIZE;
float radius = 1;
const float *xyz = NULL;
hipMalloc(&xyz, XSIZE*YSIZE);
int *idx_out = NULL;
hipMalloc(&idx_out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cube_select_two), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,radius,xyz,idx_out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cube_select_two), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,radius,xyz,idx_out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cube_select_two), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,radius,xyz,idx_out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4547f055b6fc4f3759b623b5ed7babe650aa4fe6.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cube_select_two.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int b = 2;
int n = XSIZE*YSIZE;
float radius = 1;
const float *xyz = NULL;
cudaMalloc(&xyz, XSIZE*YSIZE);
int *idx_out = NULL;
cudaMalloc(&idx_out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cube_select_two<<<gridBlock,threadBlock>>>(b,n,radius,xyz,idx_out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cube_select_two<<<gridBlock,threadBlock>>>(b,n,radius,xyz,idx_out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cube_select_two<<<gridBlock,threadBlock>>>(b,n,radius,xyz,idx_out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
8caf3caf5813c0a08eab082e2f99e122429e2cce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/*
CUDA Radix Sort
See the paper:
"Designing Efficient Sorting Algorithms for Manycore GPUs",
Nadathur Satish, Mark Harris, and Michael Garland,
to appear in the proceedings of IEEE International Parallel &
Distributed Processing Symposium 2009.
*/
#include "new_radixsort.h"
#include <string.h>
#include <stdio.h>
#define CUDPP_STATIC_LIB
#include "cudpp/cudpp.h"
#include <algorithm>
#ifdef __DEVICE_EMULATION__
#define __SYNC __syncthreads();
#else
#define __SYNC
#endif
typedef unsigned int uint;
extern "C"
void checkCudaError(const char *msg)
{
#if defined(_DEBUG) || defined(DEBUG)
hipError_t e = hipDeviceSynchronize();
if( e != hipSuccess )
{
fprintf(stderr, "CUDA Error %s : %s\n", msg, hipGetErrorString(e));
exit(EXIT_FAILURE);
}
e = hipGetLastError();
if( e != hipSuccess )
{
fprintf(stderr, "CUDA Error %s : %s\n", msg, hipGetErrorString(e));
exit(EXIT_FAILURE);
}
#endif
}
// ================================================================================================
// flip a float for sorting
// finds SIGN of fp number.
// if it's 1 (negative float), it flips all bits
// if it's 0 (positive float), it flips the sign only
// ================================================================================================
template <bool doFlip>
__device__ uint floatFlip(uint f)
{
if (doFlip)
{
uint mask = -int(f >> 31) | 0x80000000;
return f ^ mask;
}
else
return f;
}
// ================================================================================================
// flip a float back (invert FloatFlip)
// signed was flipped from above, so:
// if sign is 1 (negative), it flips the sign bit back
// if sign is 0 (positive), it flips all bits back
// ================================================================================================
template <bool doFlip>
__device__ uint floatUnflip(uint f)
{
if (doFlip)
{
uint mask = ((f >> 31) - 1) | 0x80000000;
return f ^ mask;
}
else
return f;
}
__global__ void flipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
}
__global__ void unflipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
}
//----------------------------------------------------------------------------
// Scans each warp in parallel ("warp-scan"), one element per thread.
// uses 2 numElements of shared memory per thread (64 numElements per warp)
//----------------------------------------------------------------------------
template<class T, int maxlevel>
__device__ T scanwarp(T val, T* sData)
{
// The following is the same as 2 * NewRadixSort::WARP_SIZE * warpId + threadInWarp =
// 64*(threadIdx.x >> 5) + (threadIdx.x & (NewRadixSort::WARP_SIZE - 1))
int idx = 2 * threadIdx.x - (threadIdx.x & (NewRadixSort::WARP_SIZE - 1));
sData[idx] = 0;
idx += NewRadixSort::WARP_SIZE;
sData[idx] = val; __SYNC
#ifdef __DEVICE_EMULATION__
T t = sData[idx - 1]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 2]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 4]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 8]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 16]; __SYNC
sData[idx] += t; __SYNC
#else
if (0 <= maxlevel) { sData[idx] += sData[idx - 1]; } __SYNC
if (1 <= maxlevel) { sData[idx] += sData[idx - 2]; } __SYNC
if (2 <= maxlevel) { sData[idx] += sData[idx - 4]; } __SYNC
if (3 <= maxlevel) { sData[idx] += sData[idx - 8]; } __SYNC
if (4 <= maxlevel) { sData[idx] += sData[idx -16]; } __SYNC
#endif
return sData[idx] - val; // convert inclusive -> exclusive
}
//----------------------------------------------------------------------------
// scan4 scans 4*NewNewRadixSort::CTA_SIZE numElements in a block (4 per thread), using
// a warp-scan algorithm
//----------------------------------------------------------------------------
__device__ uint4 scan4(uint4 idata)
{
extern __shared__ uint ptr[];
uint idx = threadIdx.x;
uint4 val4 = idata;
uint sum[3];
sum[0] = val4.x;
sum[1] = val4.y + sum[0];
sum[2] = val4.z + sum[1];
uint val = val4.w + sum[2];
val = scanwarp<uint, 4>(val, ptr);
__syncthreads();
if ((idx & (NewRadixSort::WARP_SIZE - 1)) == NewRadixSort::WARP_SIZE - 1)
{
ptr[idx >> 5] = val + val4.w + sum[2];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (idx < NewRadixSort::WARP_SIZE)
#endif
{
ptr[idx] = scanwarp<uint, 2>(ptr[idx], ptr);
}
__syncthreads();
val += ptr[idx >> 5];
val4.x = val;
val4.y = val + sum[0];
val4.z = val + sum[1];
val4.w = val + sum[2];
return val4;
}
//----------------------------------------------------------------------------
//
// Rank is the core of the radix sort loop. Given a predicate, it
// computes the output position for each thread in an ordering where all
// True threads come first, followed by all False threads.
//
// This version handles 4 predicates per thread; hence, "rank4".
//
//----------------------------------------------------------------------------
template <int ctasize>
__device__ uint4 rank4(uint4 preds)
{
uint4 address = scan4(preds);
__shared__ uint numtrue;
if (threadIdx.x == ctasize-1)
{
numtrue = address.w + preds.w;
}
__syncthreads();
uint4 rank;
uint idx = threadIdx.x << 2;
rank.x = (preds.x) ? address.x : numtrue + idx - address.x;
rank.y = (preds.y) ? address.y : numtrue + idx + 1 - address.y;
rank.z = (preds.z) ? address.z : numtrue + idx + 2 - address.z;
rank.w = (preds.w) ? address.w : numtrue + idx + 3 - address.w;
return rank;
}
//----------------------------------------------------------------------------
// Uses rank to sort one bit at a time: Sorts a block according
// to bits startbit -> nbits + startbit
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool floatFlip>
__device__ void radixSortBlock(uint4 &key, uint4 &value)
{
extern __shared__ uint sMem1[];
for(uint shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r = rank4<NewRadixSort::CTA_SIZE>(lsb);
#if 1
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem1[(r.x & 3) * NewRadixSort::CTA_SIZE + (r.x >> 2)] = key.x;
sMem1[(r.y & 3) * NewRadixSort::CTA_SIZE + (r.y >> 2)] = key.y;
sMem1[(r.z & 3) * NewRadixSort::CTA_SIZE + (r.z >> 2)] = key.z;
sMem1[(r.w & 3) * NewRadixSort::CTA_SIZE + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem1[threadIdx.x];
key.y = sMem1[threadIdx.x + NewRadixSort::CTA_SIZE];
key.z = sMem1[threadIdx.x + 2 * NewRadixSort::CTA_SIZE];
key.w = sMem1[threadIdx.x + 3 * NewRadixSort::CTA_SIZE];
__syncthreads();
sMem1[(r.x & 3) * NewRadixSort::CTA_SIZE + (r.x >> 2)] = value.x;
sMem1[(r.y & 3) * NewRadixSort::CTA_SIZE + (r.y >> 2)] = value.y;
sMem1[(r.z & 3) * NewRadixSort::CTA_SIZE + (r.z >> 2)] = value.z;
sMem1[(r.w & 3) * NewRadixSort::CTA_SIZE + (r.w >> 2)] = value.w;
__syncthreads();
value.x = sMem1[threadIdx.x];
value.y = sMem1[threadIdx.x + NewRadixSort::CTA_SIZE];
value.z = sMem1[threadIdx.x + 2 * NewRadixSort::CTA_SIZE];
value.w = sMem1[threadIdx.x + 3 * NewRadixSort::CTA_SIZE];
#else
sMem1[r.x] = key.x;
sMem1[r.y] = key.y;
sMem1[r.z] = key.z;
sMem1[r.w] = key.w;
__syncthreads();
// This access has 4-way bank conflicts
key = sMem[threadIdx.x];
__syncthreads();
sMem1[r.x] = value.x;
sMem1[r.y] = value.y;
sMem1[r.z] = value.z;
sMem1[r.w] = value.w;
__syncthreads();
value = sMem[threadIdx.x];
#endif
__syncthreads();
}
}
//----------------------------------------------------------------------------
//
// radixSortBlocks sorts each block of data independently in shared
// memory.
//
// Done in two separate stages. This stage calls radixSortBlock on each block
// independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool fullBlocks, bool flip>
__global__ void radixSortBlocks(uint4* keysOut, uint4* valuesOut,
uint4* keysIn, uint4* valuesIn,
uint numElements, uint startBlock)
{
extern __shared__ uint4 sMem[];
uint4 key, value;
const uint blockId = blockIdx.x + startBlock;
const uint i = blockId * blockDim.x + threadIdx.x;
const uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
value = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
uint *values1 = (uint*)valuesIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
value.x = (idx < numElements) ? values1[idx] : UINT_MAX;
value.y = (idx+1 < numElements) ? values1[idx+1] : UINT_MAX;
value.z = (idx+2 < numElements) ? values1[idx+2] : UINT_MAX;
value.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
value = valuesIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlock<nbits, startbit, false>(key, value);
__syncthreads();
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
uint *values1 = (uint*)valuesOut;
keys1[idx] = key.x;
values1[idx] = value.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
values1[idx + 1] = value.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
values1[idx + 2] = value.z;
}
}
}
}
else
{
keysOut[i] = key;
valuesOut[i] = value;
}
}
//----------------------------------------------------------------------------
// Given an array with blocks sorted according to a 4-bit radix group, each
// block counts the number of keys that fall into each radix in the group, and
// finds the starting offset of each radix in the block. Writes the radix
// counts to counters, and the starting offsets to blockOffsets.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks>
__global__ void findRadixOffsets(uint2 *keys,
uint *counters,
uint *blockOffsets,
uint numElements,
uint totalBlocks,
uint startBlock)
{
extern __shared__ uint2 sMem2[];
uint2 *sRadix2 = (uint2*)sMem2;
uint *sRadix1 = (uint*) sRadix2;
uint *sStartPointers = (uint*)(sMem2 + NewRadixSort::CTA_SIZE);
uint blockId = blockIdx.x + startBlock;
const uint i = blockId * blockDim.x + threadIdx.x;
uint2 radix2;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && ((i + 1) << 1 ) > numElements )
{
// handle uint1 rather than uint2 for non-full blocks
uint *keys1 = (uint*)keys;
uint j = i << 1;
radix2.x = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
radix2.y = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
radix2 = keys[i];
}
sRadix1[2 * threadIdx.x] = (radix2.x >> startbit) & 0xF;
sRadix1[2 * threadIdx.x + 1] = (radix2.y >> startbit) & 0xF;
// Finds the position where the sRadix1 entries differ and stores start
// index for each radix.
if(threadIdx.x < 16)
{
sStartPointers[threadIdx.x] = 0;
}
__syncthreads();
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x]] = threadIdx.x;
}
if(sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE] != sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE - 1])
{
sStartPointers[sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE]] = threadIdx.x + NewRadixSort::CTA_SIZE;
}
__syncthreads();
if(threadIdx.x < 16)
{
blockOffsets[blockId*16 + threadIdx.x] = sStartPointers[threadIdx.x];
}
__syncthreads();
// Compute the sizes of each block.
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x - 1]] =
threadIdx.x - sStartPointers[sRadix1[threadIdx.x - 1]];
}
if(sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE] != sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE - 1] )
{
sStartPointers[sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE - 1]] =
threadIdx.x + NewRadixSort::CTA_SIZE - sStartPointers[sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE - 1]];
}
if(threadIdx.x == NewRadixSort::CTA_SIZE - 1)
{
sStartPointers[sRadix1[2 * NewRadixSort::CTA_SIZE - 1]] =
2 * NewRadixSort::CTA_SIZE - sStartPointers[sRadix1[2 * NewRadixSort::CTA_SIZE - 1]];
}
__syncthreads();
if(threadIdx.x < 16)
{
counters[threadIdx.x * totalBlocks + blockId] =
sStartPointers[threadIdx.x];
}
}
//----------------------------------------------------------------------------
// reorderData shuffles data in the array globally after the radix offsets
// have been found. Depends on NewRadixSort::CTA_SIZE being 16 * number of radices
// (i.e. 16 * 2^nbits).
//
// This is quite fast and fully coalesces memory writes, albeit by doing extra
// (potentially wasted) work allocating threads to portions of memory that are
// not written out. Significantly faster than the generic approach on G80.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip>
__global__ void reorderData(uint *outKeys,
uint *outValues,
uint2 *keys,
uint2 *values,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks,
uint startBlock)
{
__shared__ uint2 sKeys2[NewRadixSort::CTA_SIZE];
__shared__ uint2 sValues2[NewRadixSort::CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint *sValues1 = (uint*)sValues2;
const uint blockId = blockIdx.x + startBlock;
const uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint *values1 = (uint*)values;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[threadIdx.x << 1] = (j < numElements) ? values1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[(threadIdx.x << 1) + 1] = (j < numElements) ? values1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
sValues2[threadIdx.x] = values[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
outValues[globalOffset] = sValues1[threadIdx.x];
}
radix = (sKeys1[threadIdx.x + NewRadixSort::CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + NewRadixSort::CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + NewRadixSort::CTA_SIZE]);
outValues[globalOffset] = sValues1[threadIdx.x + NewRadixSort::CTA_SIZE];
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
outValues[outOffset] = sValues1[inOffset];
}
}
}
}
}
//----------------------------------------------------------------------------
// Perform one step of the radix sort. Sorts by nbits key bits per step,
// starting at startbit.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStep(uint *keys,
uint *values,
uint *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
bool manualCoalesce)
{
const uint eltsPerBlock = NewRadixSort::CTA_SIZE * 4;
const uint eltsPerBlock2 = NewRadixSort::CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
const uint max1DBlocks = 65535;
for (uint block = 0; block < numBlocks; block += max1DBlocks)
{
uint blocks = min(max1DBlocks, numBlocks - block);
if (blocks < max1DBlocks && !fullBlocks)
{
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, false, flip>)
, dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 4 * NewRadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, block);
}
else
{
hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, true, flip>)
, dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 4 * NewRadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, block);
}
}
for (uint block = 0; block < numBlocks2; block += max1DBlocks)
{
uint blocks = min(max1DBlocks, numBlocks2 - block);
if (blocks < max1DBlocks && !fullBlocks)
{
hipLaunchKernelGGL(( findRadixOffsets<startbit, false>)
, dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 3 * NewRadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2, block);
}
else
{
hipLaunchKernelGGL(( findRadixOffsets<startbit, true>)
, dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 3 * NewRadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2, block);
}
}
cudppScan(scanPlan, countersSum, counters, 16*numBlocks2);
for (uint block = 0; block < numBlocks2; block += max1DBlocks)
{
uint blocks = min(max1DBlocks, numBlocks2 - block);
if (blocks < max1DBlocks && !fullBlocks)
{
if (manualCoalesce)
{
hipLaunchKernelGGL(( reorderData<startbit, false, true, unflip>), dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2, block);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, false, false, unflip>), dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2, block);
}
}
else
{
if (manualCoalesce)
{
hipLaunchKernelGGL(( reorderData<startbit, true, true, unflip>), dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2, block);
}
else
{
hipLaunchKernelGGL(( reorderData<startbit, true, false, unflip>), dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 0, 0,
keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2, block);
}
}
}
checkCudaError("radixSortStep");
}
//----------------------------------------------------------------------------
// Optimization for sorts of fewer than 4 * CTA_SIZE elements
//----------------------------------------------------------------------------
template <bool flip>
void radixSortSingleBlock(uint *keys,
uint *values,
uint numElements)
{
bool fullBlocks = (numElements % (NewRadixSort::CTA_SIZE * 4) == 0);
if (fullBlocks)
{
hipLaunchKernelGGL(( radixSortBlocks<32, 0, true, flip>)
, dim3(1), dim3(NewRadixSort::CTA_SIZE), 4 * NewRadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 0);
}
else
{
hipLaunchKernelGGL(( radixSortBlocks<32, 0, false, flip>)
, dim3(1), dim3(NewRadixSort::CTA_SIZE), 4 * NewRadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 0);
}
if (flip)
hipLaunchKernelGGL(( unflipFloats), dim3(1), dim3(NewRadixSort::CTA_SIZE), 0, 0, keys, numElements);
checkCudaError("radixSortSingleBlock");
}
//----------------------------------------------------------------------------
// Optimization for sorts of WARP_SIZE or fewer elements
//----------------------------------------------------------------------------
template <bool flip>
__global__
void radixSortSingleWarp(uint *keys,
uint *values,
uint numElements)
{
__shared__ uint sKeys[NewRadixSort::WARP_SIZE];
__shared__ uint sValues[NewRadixSort::WARP_SIZE];
__shared__ uint sFlags[NewRadixSort::WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
sValues[threadIdx.x] = values[threadIdx.x];
__SYNC // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
uint val_i = sValues[i];
sFlags[threadIdx.x] = 0;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
uint temp = sKeys[threadIdx.x];
uint tempval = sValues[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sValues[threadIdx.x + 1] = tempval;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
sValues[threadIdx.x] = val_i;
}
__SYNC // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
values[threadIdx.x] = sValues[threadIdx.x];
}
//----------------------------------------------------------------------------
// Main radix sort function. Sorts in place in the keys and values arrays,
// but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers. Uses cudppScan() for the prefix sum of
// radix counters.
//----------------------------------------------------------------------------
extern "C"
void radixSort(uint *keys,
uint *values,
uint *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool manualCoalesce = true,
bool flipBits = false)
{
if(numElements <= NewRadixSort::WARP_SIZE)
{
if (flipBits)
hipLaunchKernelGGL(( radixSortSingleWarp<true>), dim3(1), dim3(numElements), 0, 0, keys, values, numElements);
else
hipLaunchKernelGGL(( radixSortSingleWarp<false>), dim3(1), dim3(numElements), 0, 0, keys, values, numElements);
checkCudaError("radixSortSingleWarp");
return;
}
if(numElements <= NewRadixSort::CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlock<true>(keys, values, numElements);
else
radixSortSingleBlock<false>(keys, values, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStep<4, 0, true, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
else
{ radixSortStep<4, 0, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 4)
{
radixSortStep<4, 4, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 8)
{
radixSortStep<4, 8, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 12)
{
radixSortStep<4, 12, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 16)
{
radixSortStep<4, 16, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 20)
{
radixSortStep<4, 20, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 24)
{
radixSortStep<4, 24, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStep<4, 28, false, true>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
else
{
radixSortStep<4, 28, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
}
checkCudaError("radixSort");
}
extern "C"
void radixSortFloatKeys(float *keys,
uint *values,
float *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool manualCoalesce,
bool negativeKeys)
{
radixSort((uint*)keys, values, (uint*)tempKeys, tempValues, counters,
countersSum, blockOffsets, scanPlan, numElements, keyBits,
manualCoalesce, negativeKeys);
checkCudaError("radixSortFloatKeys");
}
//----------------------------------------------------------------------------
// Key-only Sorts
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
// Uses rank to sort one bit at a time: Sorts a block according
// to bits startbit -> nbits + startbit
//----------------------------------------------------------------------------
template<uint nbits, uint startbit>
__device__ void radixSortBlockKeysOnly(uint4 &key)
{
extern __shared__ uint sMem1[];
for(uint shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r = rank4<256>(lsb);
#if 1
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem1[(r.x & 3) * NewRadixSort::CTA_SIZE + (r.x >> 2)] = key.x;
sMem1[(r.y & 3) * NewRadixSort::CTA_SIZE + (r.y >> 2)] = key.y;
sMem1[(r.z & 3) * NewRadixSort::CTA_SIZE + (r.z >> 2)] = key.z;
sMem1[(r.w & 3) * NewRadixSort::CTA_SIZE + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem1[threadIdx.x];
key.y = sMem1[threadIdx.x + NewRadixSort::CTA_SIZE];
key.z = sMem1[threadIdx.x + 2 * NewRadixSort::CTA_SIZE];
key.w = sMem1[threadIdx.x + 3 * NewRadixSort::CTA_SIZE];
#else
sMem1[r.x] = key.x;
sMem1[r.y] = key.y;
sMem1[r.z] = key.z;
sMem1[r.w] = key.w;
__syncthreads();
// This access has 4-way bank conflicts
key = sMem[threadIdx.x];
#endif
__syncthreads();
}
}
//----------------------------------------------------------------------------
//
// radixSortBlocks sorts each block of data independently in shared
// memory.
//
// Done in two separate stages. This stage calls radixSortBlock on each block
// independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool fullBlocks, bool flip>
__global__ void radixSortBlocksKeysOnly(uint4* keysOut, uint4* keysIn, uint numElements, uint startBlock)
{
extern __shared__ uint4 sMem[];
uint4 key;
const uint blockId = blockIdx.x + startBlock;
const uint i = blockId * blockDim.x + threadIdx.x;
const uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlockKeysOnly<nbits, startbit>(key);
__syncthreads();
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
keys1[idx] = key.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
}
}
}
}
else
{
keysOut[i] = key;
}
}
//----------------------------------------------------------------------------
// reorderData shuffles data in the array globally after the radix offsets
// have been found. Depends on NewRadixSort::CTA_SIZE being 16 * number of radices
// (i.e. 16 * 2^nbits).
//
// This is quite fast and fully coalesces memory writes, albeit by doing extra
// (potentially wasted) work allocating threads to portions of memory that are
// not written out. Significantly faster than the generic approach on G80.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip>
__global__ void reorderDataKeysOnly(uint *outKeys,
uint2 *keys,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks,
uint startBlock)
{
__shared__ uint2 sKeys2[NewRadixSort::CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
const uint blockId = blockIdx.x + startBlock;
const uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
}
radix = (sKeys1[threadIdx.x + NewRadixSort::CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + NewRadixSort::CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + NewRadixSort::CTA_SIZE]);
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
}
}
}
}
}
//----------------------------------------------------------------------------
// Perform one step of the radix sort. Sorts by nbits key bits per step,
// starting at startbit.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStepKeysOnly(uint *keys,
uint *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
bool manualCoalesce)
{
const uint eltsPerBlock = NewRadixSort::CTA_SIZE * 4;
const uint eltsPerBlock2 = NewRadixSort::CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
const uint max1DBlocks = 65535;
for (uint block = 0; block < numBlocks; block += max1DBlocks)
{
uint blocks = min(max1DBlocks, numBlocks - block);
if (blocks < max1DBlocks && !fullBlocks)
{
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, false, flip>)
, dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 4 * NewRadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)keys, numElements, block);
}
else
{
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, true, flip>)
, dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 4 * NewRadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)tempKeys, (uint4*)keys, numElements, block);
}
}
for (uint block = 0; block < numBlocks2; block += max1DBlocks)
{
uint blocks = min(max1DBlocks, numBlocks2 - block);
if (blocks < max1DBlocks && !fullBlocks)
{
hipLaunchKernelGGL(( findRadixOffsets<startbit, false>)
, dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 3 * NewRadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2, block);
}
else
{
hipLaunchKernelGGL(( findRadixOffsets<startbit, true>)
, dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 3 * NewRadixSort::CTA_SIZE * sizeof(uint), 0,
(uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2, block);
}
}
cudppScan(scanPlan, countersSum, counters, 16*numBlocks2);
for (uint block = 0; block < numBlocks2; block += max1DBlocks)
{
uint blocks = min(max1DBlocks, numBlocks2 - block);
if (blocks < max1DBlocks && !fullBlocks)
{
if (manualCoalesce)
{
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, true, unflip>), dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2, block);
}
else
{
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, false, unflip>), dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2, block);
}
}
else
{
if (manualCoalesce)
{
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, true, unflip>), dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2, block);
}
else
{
hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, false, unflip>), dim3(blocks), dim3(NewRadixSort::CTA_SIZE), 0, 0,
keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2, block);
}
}
}
checkCudaError("radixSortStepKeysOnly");
}
//----------------------------------------------------------------------------
// Optimization for sorts of fewer than 4 * CTA_SIZE elements
//----------------------------------------------------------------------------
template <bool flip>
void radixSortSingleBlockKeysOnly(uint *keys,
uint numElements)
{
bool fullBlocks = (numElements % (NewRadixSort::CTA_SIZE * 4) == 0);
if (fullBlocks)
{
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<32, 0, true, flip>)
, dim3(1), dim3(NewRadixSort::CTA_SIZE), 4 * NewRadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)keys, numElements, 0);
}
else
{
hipLaunchKernelGGL(( radixSortBlocksKeysOnly<32, 0, false, flip>)
, dim3(1), dim3(NewRadixSort::CTA_SIZE), 4 * NewRadixSort::CTA_SIZE * sizeof(uint), 0,
(uint4*)keys, (uint4*)keys, numElements, 0);
}
if (flip)
hipLaunchKernelGGL(( unflipFloats), dim3(1), dim3(NewRadixSort::CTA_SIZE), 0, 0, keys, numElements);
checkCudaError("radixSortSingleBlock");
}
//----------------------------------------------------------------------------
// Optimization for sorts of WARP_SIZE or fewer elements
//----------------------------------------------------------------------------
template <bool flip>
__global__
void radixSortSingleWarpKeysOnly(uint *keys,
uint numElements)
{
__shared__ uint sKeys[NewRadixSort::WARP_SIZE];
__shared__ uint sFlags[NewRadixSort::WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
__SYNC // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
sFlags[threadIdx.x] = 0;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
uint temp = sKeys[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
}
__SYNC // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
}
//----------------------------------------------------------------------------
// Main radix sort function. Sorts in place in the keys and values arrays,
// but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers. Uses cudppScan() for the prefix sum of
// radix counters.
//----------------------------------------------------------------------------
extern "C"
void radixSortKeysOnly(uint *keys,
uint *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool manualCoalesce = true,
bool flipBits = false)
{
if(numElements <= NewRadixSort::WARP_SIZE)
{
if (flipBits)
hipLaunchKernelGGL(( radixSortSingleWarpKeysOnly<true>), dim3(1), dim3(numElements), 0, 0, keys, numElements);
else
hipLaunchKernelGGL(( radixSortSingleWarpKeysOnly<false>), dim3(1), dim3(numElements), 0, 0, keys, numElements);
checkCudaError("radixSortSingleWarp");
return;
}
if(numElements <= NewRadixSort::CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlockKeysOnly<true>(keys, numElements);
else
radixSortSingleBlockKeysOnly<false>(keys, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStepKeysOnly<4, 0, true, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
else
{
radixSortStepKeysOnly<4, 0, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 4)
{
radixSortStepKeysOnly<4, 4, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 8)
{
radixSortStepKeysOnly<4, 8, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 12)
{
radixSortStepKeysOnly<4, 12, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 16)
{
radixSortStepKeysOnly<4, 16, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 20)
{
radixSortStepKeysOnly<4, 20, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 24)
{
radixSortStepKeysOnly<4, 24, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStepKeysOnly<4, 28, false, true>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
else
{
radixSortStepKeysOnly<4, 28, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
}
checkCudaError("radixSortKeysOnly");
}
extern "C"
void radixSortFloatKeysOnly(float *keys,
float *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool manualCoalesce,
bool negativeKeys)
{
radixSortKeysOnly((uint*)keys, (uint*)tempKeys, counters, countersSum, blockOffsets,
scanPlan, numElements, keyBits, manualCoalesce, negativeKeys);
checkCudaError("radixSortFloatKeys");
}
| 8caf3caf5813c0a08eab082e2f99e122429e2cce.cu | /*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/*
CUDA Radix Sort
See the paper:
"Designing Efficient Sorting Algorithms for Manycore GPUs",
Nadathur Satish, Mark Harris, and Michael Garland,
to appear in the proceedings of IEEE International Parallel &
Distributed Processing Symposium 2009.
*/
#include "new_radixsort.h"
#include <string.h>
#include <stdio.h>
#define CUDPP_STATIC_LIB
#include "cudpp/cudpp.h"
#include <algorithm>
#ifdef __DEVICE_EMULATION__
#define __SYNC __syncthreads();
#else
#define __SYNC
#endif
typedef unsigned int uint;
extern "C"
void checkCudaError(const char *msg)
{
#if defined(_DEBUG) || defined(DEBUG)
cudaError_t e = cudaThreadSynchronize();
if( e != cudaSuccess )
{
fprintf(stderr, "CUDA Error %s : %s\n", msg, cudaGetErrorString(e));
exit(EXIT_FAILURE);
}
e = cudaGetLastError();
if( e != cudaSuccess )
{
fprintf(stderr, "CUDA Error %s : %s\n", msg, cudaGetErrorString(e));
exit(EXIT_FAILURE);
}
#endif
}
// ================================================================================================
// flip a float for sorting
// finds SIGN of fp number.
// if it's 1 (negative float), it flips all bits
// if it's 0 (positive float), it flips the sign only
// ================================================================================================
template <bool doFlip>
__device__ uint floatFlip(uint f)
{
if (doFlip)
{
uint mask = -int(f >> 31) | 0x80000000;
return f ^ mask;
}
else
return f;
}
// ================================================================================================
// flip a float back (invert FloatFlip)
// signed was flipped from above, so:
// if sign is 1 (negative), it flips the sign bit back
// if sign is 0 (positive), it flips all bits back
// ================================================================================================
template <bool doFlip>
__device__ uint floatUnflip(uint f)
{
if (doFlip)
{
uint mask = ((f >> 31) - 1) | 0x80000000;
return f ^ mask;
}
else
return f;
}
__global__ void flipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatFlip<true>(values[index]);
}
__global__ void unflipFloats(uint *values, uint numValues)
{
uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
index += blockDim.x;
if (index < numValues) values[index] = floatUnflip<true>(values[index]);
}
//----------------------------------------------------------------------------
// Scans each warp in parallel ("warp-scan"), one element per thread.
// uses 2 numElements of shared memory per thread (64 numElements per warp)
//----------------------------------------------------------------------------
template<class T, int maxlevel>
__device__ T scanwarp(T val, T* sData)
{
// The following is the same as 2 * NewRadixSort::WARP_SIZE * warpId + threadInWarp =
// 64*(threadIdx.x >> 5) + (threadIdx.x & (NewRadixSort::WARP_SIZE - 1))
int idx = 2 * threadIdx.x - (threadIdx.x & (NewRadixSort::WARP_SIZE - 1));
sData[idx] = 0;
idx += NewRadixSort::WARP_SIZE;
sData[idx] = val; __SYNC
#ifdef __DEVICE_EMULATION__
T t = sData[idx - 1]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 2]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 4]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 8]; __SYNC
sData[idx] += t; __SYNC
t = sData[idx - 16]; __SYNC
sData[idx] += t; __SYNC
#else
if (0 <= maxlevel) { sData[idx] += sData[idx - 1]; } __SYNC
if (1 <= maxlevel) { sData[idx] += sData[idx - 2]; } __SYNC
if (2 <= maxlevel) { sData[idx] += sData[idx - 4]; } __SYNC
if (3 <= maxlevel) { sData[idx] += sData[idx - 8]; } __SYNC
if (4 <= maxlevel) { sData[idx] += sData[idx -16]; } __SYNC
#endif
return sData[idx] - val; // convert inclusive -> exclusive
}
//----------------------------------------------------------------------------
// scan4 scans 4*NewNewRadixSort::CTA_SIZE numElements in a block (4 per thread), using
// a warp-scan algorithm
//----------------------------------------------------------------------------
__device__ uint4 scan4(uint4 idata)
{
extern __shared__ uint ptr[];
uint idx = threadIdx.x;
uint4 val4 = idata;
uint sum[3];
sum[0] = val4.x;
sum[1] = val4.y + sum[0];
sum[2] = val4.z + sum[1];
uint val = val4.w + sum[2];
val = scanwarp<uint, 4>(val, ptr);
__syncthreads();
if ((idx & (NewRadixSort::WARP_SIZE - 1)) == NewRadixSort::WARP_SIZE - 1)
{
ptr[idx >> 5] = val + val4.w + sum[2];
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (idx < NewRadixSort::WARP_SIZE)
#endif
{
ptr[idx] = scanwarp<uint, 2>(ptr[idx], ptr);
}
__syncthreads();
val += ptr[idx >> 5];
val4.x = val;
val4.y = val + sum[0];
val4.z = val + sum[1];
val4.w = val + sum[2];
return val4;
}
//----------------------------------------------------------------------------
//
// Rank is the core of the radix sort loop. Given a predicate, it
// computes the output position for each thread in an ordering where all
// True threads come first, followed by all False threads.
//
// This version handles 4 predicates per thread; hence, "rank4".
//
//----------------------------------------------------------------------------
template <int ctasize>
__device__ uint4 rank4(uint4 preds)
{
uint4 address = scan4(preds);
__shared__ uint numtrue;
if (threadIdx.x == ctasize-1)
{
numtrue = address.w + preds.w;
}
__syncthreads();
uint4 rank;
uint idx = threadIdx.x << 2;
rank.x = (preds.x) ? address.x : numtrue + idx - address.x;
rank.y = (preds.y) ? address.y : numtrue + idx + 1 - address.y;
rank.z = (preds.z) ? address.z : numtrue + idx + 2 - address.z;
rank.w = (preds.w) ? address.w : numtrue + idx + 3 - address.w;
return rank;
}
//----------------------------------------------------------------------------
// Uses rank to sort one bit at a time: Sorts a block according
// to bits startbit -> nbits + startbit
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool floatFlip>
__device__ void radixSortBlock(uint4 &key, uint4 &value)
{
extern __shared__ uint sMem1[];
for(uint shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r = rank4<NewRadixSort::CTA_SIZE>(lsb);
#if 1
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem1[(r.x & 3) * NewRadixSort::CTA_SIZE + (r.x >> 2)] = key.x;
sMem1[(r.y & 3) * NewRadixSort::CTA_SIZE + (r.y >> 2)] = key.y;
sMem1[(r.z & 3) * NewRadixSort::CTA_SIZE + (r.z >> 2)] = key.z;
sMem1[(r.w & 3) * NewRadixSort::CTA_SIZE + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem1[threadIdx.x];
key.y = sMem1[threadIdx.x + NewRadixSort::CTA_SIZE];
key.z = sMem1[threadIdx.x + 2 * NewRadixSort::CTA_SIZE];
key.w = sMem1[threadIdx.x + 3 * NewRadixSort::CTA_SIZE];
__syncthreads();
sMem1[(r.x & 3) * NewRadixSort::CTA_SIZE + (r.x >> 2)] = value.x;
sMem1[(r.y & 3) * NewRadixSort::CTA_SIZE + (r.y >> 2)] = value.y;
sMem1[(r.z & 3) * NewRadixSort::CTA_SIZE + (r.z >> 2)] = value.z;
sMem1[(r.w & 3) * NewRadixSort::CTA_SIZE + (r.w >> 2)] = value.w;
__syncthreads();
value.x = sMem1[threadIdx.x];
value.y = sMem1[threadIdx.x + NewRadixSort::CTA_SIZE];
value.z = sMem1[threadIdx.x + 2 * NewRadixSort::CTA_SIZE];
value.w = sMem1[threadIdx.x + 3 * NewRadixSort::CTA_SIZE];
#else
sMem1[r.x] = key.x;
sMem1[r.y] = key.y;
sMem1[r.z] = key.z;
sMem1[r.w] = key.w;
__syncthreads();
// This access has 4-way bank conflicts
key = sMem[threadIdx.x];
__syncthreads();
sMem1[r.x] = value.x;
sMem1[r.y] = value.y;
sMem1[r.z] = value.z;
sMem1[r.w] = value.w;
__syncthreads();
value = sMem[threadIdx.x];
#endif
__syncthreads();
}
}
//----------------------------------------------------------------------------
//
// radixSortBlocks sorts each block of data independently in shared
// memory.
//
// Done in two separate stages. This stage calls radixSortBlock on each block
// independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool fullBlocks, bool flip>
__global__ void radixSortBlocks(uint4* keysOut, uint4* valuesOut,
uint4* keysIn, uint4* valuesIn,
uint numElements, uint startBlock)
{
extern __shared__ uint4 sMem[];
uint4 key, value;
const uint blockId = blockIdx.x + startBlock;
const uint i = blockId * blockDim.x + threadIdx.x;
const uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
value = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
uint *values1 = (uint*)valuesIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
value.x = (idx < numElements) ? values1[idx] : UINT_MAX;
value.y = (idx+1 < numElements) ? values1[idx+1] : UINT_MAX;
value.z = (idx+2 < numElements) ? values1[idx+2] : UINT_MAX;
value.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
value = valuesIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlock<nbits, startbit, false>(key, value);
__syncthreads();
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
uint *values1 = (uint*)valuesOut;
keys1[idx] = key.x;
values1[idx] = value.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
values1[idx + 1] = value.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
values1[idx + 2] = value.z;
}
}
}
}
else
{
keysOut[i] = key;
valuesOut[i] = value;
}
}
//----------------------------------------------------------------------------
// Given an array with blocks sorted according to a 4-bit radix group, each
// block counts the number of keys that fall into each radix in the group, and
// finds the starting offset of each radix in the block. Writes the radix
// counts to counters, and the starting offsets to blockOffsets.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks>
__global__ void findRadixOffsets(uint2 *keys,
uint *counters,
uint *blockOffsets,
uint numElements,
uint totalBlocks,
uint startBlock)
{
extern __shared__ uint2 sMem2[];
uint2 *sRadix2 = (uint2*)sMem2;
uint *sRadix1 = (uint*) sRadix2;
uint *sStartPointers = (uint*)(sMem2 + NewRadixSort::CTA_SIZE);
uint blockId = blockIdx.x + startBlock;
const uint i = blockId * blockDim.x + threadIdx.x;
uint2 radix2;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && ((i + 1) << 1 ) > numElements )
{
// handle uint1 rather than uint2 for non-full blocks
uint *keys1 = (uint*)keys;
uint j = i << 1;
radix2.x = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
radix2.y = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
radix2 = keys[i];
}
sRadix1[2 * threadIdx.x] = (radix2.x >> startbit) & 0xF;
sRadix1[2 * threadIdx.x + 1] = (radix2.y >> startbit) & 0xF;
// Finds the position where the sRadix1 entries differ and stores start
// index for each radix.
if(threadIdx.x < 16)
{
sStartPointers[threadIdx.x] = 0;
}
__syncthreads();
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x]] = threadIdx.x;
}
if(sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE] != sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE - 1])
{
sStartPointers[sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE]] = threadIdx.x + NewRadixSort::CTA_SIZE;
}
__syncthreads();
if(threadIdx.x < 16)
{
blockOffsets[blockId*16 + threadIdx.x] = sStartPointers[threadIdx.x];
}
__syncthreads();
// Compute the sizes of each block.
if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) )
{
sStartPointers[sRadix1[threadIdx.x - 1]] =
threadIdx.x - sStartPointers[sRadix1[threadIdx.x - 1]];
}
if(sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE] != sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE - 1] )
{
sStartPointers[sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE - 1]] =
threadIdx.x + NewRadixSort::CTA_SIZE - sStartPointers[sRadix1[threadIdx.x + NewRadixSort::CTA_SIZE - 1]];
}
if(threadIdx.x == NewRadixSort::CTA_SIZE - 1)
{
sStartPointers[sRadix1[2 * NewRadixSort::CTA_SIZE - 1]] =
2 * NewRadixSort::CTA_SIZE - sStartPointers[sRadix1[2 * NewRadixSort::CTA_SIZE - 1]];
}
__syncthreads();
if(threadIdx.x < 16)
{
counters[threadIdx.x * totalBlocks + blockId] =
sStartPointers[threadIdx.x];
}
}
//----------------------------------------------------------------------------
// reorderData shuffles data in the array globally after the radix offsets
// have been found. Depends on NewRadixSort::CTA_SIZE being 16 * number of radices
// (i.e. 16 * 2^nbits).
//
// This is quite fast and fully coalesces memory writes, albeit by doing extra
// (potentially wasted) work allocating threads to portions of memory that are
// not written out. Significantly faster than the generic approach on G80.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip>
__global__ void reorderData(uint *outKeys,
uint *outValues,
uint2 *keys,
uint2 *values,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks,
uint startBlock)
{
__shared__ uint2 sKeys2[NewRadixSort::CTA_SIZE];
__shared__ uint2 sValues2[NewRadixSort::CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
uint *sValues1 = (uint*)sValues2;
const uint blockId = blockIdx.x + startBlock;
const uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint *values1 = (uint*)values;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[threadIdx.x << 1] = (j < numElements) ? values1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
sValues1[(threadIdx.x << 1) + 1] = (j < numElements) ? values1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
sValues2[threadIdx.x] = values[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
outValues[globalOffset] = sValues1[threadIdx.x];
}
radix = (sKeys1[threadIdx.x + NewRadixSort::CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + NewRadixSort::CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + NewRadixSort::CTA_SIZE]);
outValues[globalOffset] = sValues1[threadIdx.x + NewRadixSort::CTA_SIZE];
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
outValues[outOffset] = sValues1[inOffset];
}
}
}
}
}
//----------------------------------------------------------------------------
// Perform one step of the radix sort. Sorts by nbits key bits per step,
// starting at startbit.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStep(uint *keys,
uint *values,
uint *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
bool manualCoalesce)
{
const uint eltsPerBlock = NewRadixSort::CTA_SIZE * 4;
const uint eltsPerBlock2 = NewRadixSort::CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
const uint max1DBlocks = 65535;
for (uint block = 0; block < numBlocks; block += max1DBlocks)
{
uint blocks = min(max1DBlocks, numBlocks - block);
if (blocks < max1DBlocks && !fullBlocks)
{
radixSortBlocks<nbits, startbit, false, flip>
<<<blocks, NewRadixSort::CTA_SIZE, 4 * NewRadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, block);
}
else
{
radixSortBlocks<nbits, startbit, true, flip>
<<<blocks, NewRadixSort::CTA_SIZE, 4 * NewRadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)tempValues, (uint4*)keys, (uint4*)values, numElements, block);
}
}
for (uint block = 0; block < numBlocks2; block += max1DBlocks)
{
uint blocks = min(max1DBlocks, numBlocks2 - block);
if (blocks < max1DBlocks && !fullBlocks)
{
findRadixOffsets<startbit, false>
<<<blocks, NewRadixSort::CTA_SIZE, 3 * NewRadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2, block);
}
else
{
findRadixOffsets<startbit, true>
<<<blocks, NewRadixSort::CTA_SIZE, 3 * NewRadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2, block);
}
}
cudppScan(scanPlan, countersSum, counters, 16*numBlocks2);
for (uint block = 0; block < numBlocks2; block += max1DBlocks)
{
uint blocks = min(max1DBlocks, numBlocks2 - block);
if (blocks < max1DBlocks && !fullBlocks)
{
if (manualCoalesce)
{
reorderData<startbit, false, true, unflip><<<blocks, NewRadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2, block);
}
else
{
reorderData<startbit, false, false, unflip><<<blocks, NewRadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2, block);
}
}
else
{
if (manualCoalesce)
{
reorderData<startbit, true, true, unflip><<<blocks, NewRadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2, block);
}
else
{
reorderData<startbit, true, false, unflip><<<blocks, NewRadixSort::CTA_SIZE>>>
(keys, values, (uint2*)tempKeys, (uint2*)tempValues,
blockOffsets, countersSum, counters, numElements, numBlocks2, block);
}
}
}
checkCudaError("radixSortStep");
}
//----------------------------------------------------------------------------
// Optimization for sorts of fewer than 4 * CTA_SIZE elements
//----------------------------------------------------------------------------
template <bool flip>
void radixSortSingleBlock(uint *keys,
uint *values,
uint numElements)
{
bool fullBlocks = (numElements % (NewRadixSort::CTA_SIZE * 4) == 0);
if (fullBlocks)
{
radixSortBlocks<32, 0, true, flip>
<<<1, NewRadixSort::CTA_SIZE, 4 * NewRadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 0);
}
else
{
radixSortBlocks<32, 0, false, flip>
<<<1, NewRadixSort::CTA_SIZE, 4 * NewRadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)values,
(uint4*)keys, (uint4*)values,
numElements, 0);
}
if (flip)
unflipFloats<<<1, NewRadixSort::CTA_SIZE>>>(keys, numElements);
checkCudaError("radixSortSingleBlock");
}
//----------------------------------------------------------------------------
// Optimization for sorts of WARP_SIZE or fewer elements
//----------------------------------------------------------------------------
template <bool flip>
__global__
void radixSortSingleWarp(uint *keys,
uint *values,
uint numElements)
{
__shared__ uint sKeys[NewRadixSort::WARP_SIZE];
__shared__ uint sValues[NewRadixSort::WARP_SIZE];
__shared__ uint sFlags[NewRadixSort::WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
sValues[threadIdx.x] = values[threadIdx.x];
__SYNC // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
uint val_i = sValues[i];
sFlags[threadIdx.x] = 0;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
uint temp = sKeys[threadIdx.x];
uint tempval = sValues[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sValues[threadIdx.x + 1] = tempval;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
sValues[threadIdx.x] = val_i;
}
__SYNC // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
values[threadIdx.x] = sValues[threadIdx.x];
}
//----------------------------------------------------------------------------
// Main radix sort function. Sorts in place in the keys and values arrays,
// but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers. Uses cudppScan() for the prefix sum of
// radix counters.
//----------------------------------------------------------------------------
extern "C"
void radixSort(uint *keys,
uint *values,
uint *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool manualCoalesce = true,
bool flipBits = false)
{
if(numElements <= NewRadixSort::WARP_SIZE)
{
if (flipBits)
radixSortSingleWarp<true><<<1, numElements>>>(keys, values, numElements);
else
radixSortSingleWarp<false><<<1, numElements>>>(keys, values, numElements);
checkCudaError("radixSortSingleWarp");
return;
}
if(numElements <= NewRadixSort::CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlock<true>(keys, values, numElements);
else
radixSortSingleBlock<false>(keys, values, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStep<4, 0, true, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
else
{ radixSortStep<4, 0, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 4)
{
radixSortStep<4, 4, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 8)
{
radixSortStep<4, 8, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 12)
{
radixSortStep<4, 12, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 16)
{
radixSortStep<4, 16, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 20)
{
radixSortStep<4, 20, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 24)
{
radixSortStep<4, 24, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStep<4, 28, false, true>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
else
{
radixSortStep<4, 28, false, false>(keys, values, tempKeys, tempValues,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
}
checkCudaError("radixSort");
}
extern "C"
void radixSortFloatKeys(float *keys,
uint *values,
float *tempKeys,
uint *tempValues,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool manualCoalesce,
bool negativeKeys)
{
radixSort((uint*)keys, values, (uint*)tempKeys, tempValues, counters,
countersSum, blockOffsets, scanPlan, numElements, keyBits,
manualCoalesce, negativeKeys);
checkCudaError("radixSortFloatKeys");
}
//----------------------------------------------------------------------------
// Key-only Sorts
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
// Uses rank to sort one bit at a time: Sorts a block according
// to bits startbit -> nbits + startbit
//----------------------------------------------------------------------------
template<uint nbits, uint startbit>
__device__ void radixSortBlockKeysOnly(uint4 &key)
{
extern __shared__ uint sMem1[];
for(uint shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r = rank4<256>(lsb);
#if 1
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem1[(r.x & 3) * NewRadixSort::CTA_SIZE + (r.x >> 2)] = key.x;
sMem1[(r.y & 3) * NewRadixSort::CTA_SIZE + (r.y >> 2)] = key.y;
sMem1[(r.z & 3) * NewRadixSort::CTA_SIZE + (r.z >> 2)] = key.z;
sMem1[(r.w & 3) * NewRadixSort::CTA_SIZE + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem1[threadIdx.x];
key.y = sMem1[threadIdx.x + NewRadixSort::CTA_SIZE];
key.z = sMem1[threadIdx.x + 2 * NewRadixSort::CTA_SIZE];
key.w = sMem1[threadIdx.x + 3 * NewRadixSort::CTA_SIZE];
#else
sMem1[r.x] = key.x;
sMem1[r.y] = key.y;
sMem1[r.z] = key.z;
sMem1[r.w] = key.w;
__syncthreads();
// This access has 4-way bank conflicts
key = sMem[threadIdx.x];
#endif
__syncthreads();
}
}
//----------------------------------------------------------------------------
//
// radixSortBlocks sorts each block of data independently in shared
// memory.
//
// Done in two separate stages. This stage calls radixSortBlock on each block
// independently, sorting on the basis of bits (startbit) -> (startbit + nbits)
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool fullBlocks, bool flip>
__global__ void radixSortBlocksKeysOnly(uint4* keysOut, uint4* keysIn, uint numElements, uint startBlock)
{
extern __shared__ uint4 sMem[];
uint4 key;
const uint blockId = blockIdx.x + startBlock;
const uint i = blockId * blockDim.x + threadIdx.x;
const uint idx = i << 2;
// handle non-full last block if array is not multiple of 1024 numElements
if (!fullBlocks && idx+3 >= numElements)
{
if (idx >= numElements)
{
key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX);
}
else
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysIn;
key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX;
key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX;
key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX;
key.w = UINT_MAX;
}
}
else
{
key = keysIn[i];
if (flip)
{
key.x = floatFlip<flip>(key.x);
key.y = floatFlip<flip>(key.y);
key.z = floatFlip<flip>(key.z);
key.w = floatFlip<flip>(key.w);
}
}
__syncthreads();
radixSortBlockKeysOnly<nbits, startbit>(key);
__syncthreads();
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && idx+3 >= numElements)
{
if (idx < numElements)
{
// for non-full block, we handle uint1 values instead of uint4
uint *keys1 = (uint*)keysOut;
keys1[idx] = key.x;
if (idx + 1 < numElements)
{
keys1[idx + 1] = key.y;
if (idx + 2 < numElements)
{
keys1[idx + 2] = key.z;
}
}
}
}
else
{
keysOut[i] = key;
}
}
//----------------------------------------------------------------------------
// reorderData shuffles data in the array globally after the radix offsets
// have been found. Depends on NewRadixSort::CTA_SIZE being 16 * number of radices
// (i.e. 16 * 2^nbits).
//
// This is quite fast and fully coalesces memory writes, albeit by doing extra
// (potentially wasted) work allocating threads to portions of memory that are
// not written out. Significantly faster than the generic approach on G80.
//----------------------------------------------------------------------------
template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip>
__global__ void reorderDataKeysOnly(uint *outKeys,
uint2 *keys,
uint *blockOffsets,
uint *offsets,
uint *sizes,
uint numElements,
uint totalBlocks,
uint startBlock)
{
__shared__ uint2 sKeys2[NewRadixSort::CTA_SIZE];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint *sKeys1 = (uint*)sKeys2;
const uint blockId = blockIdx.x + startBlock;
const uint i = blockId * blockDim.x + threadIdx.x;
// handle non-full last block if array is not multiple of 1024 numElements
if(!fullBlocks && (((i + 1) << 1) > numElements))
{
uint *keys1 = (uint*)keys;
uint j = i << 1;
sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX;
j++;
sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX;
}
else
{
sKeys2[threadIdx.x] = keys[i];
}
if (!manualCoalesce)
{
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]);
}
radix = (sKeys1[threadIdx.x + NewRadixSort::CTA_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + NewRadixSort::CTA_SIZE - sBlockOffsets[radix];
if (fullBlocks || globalOffset < numElements)
{
outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + NewRadixSort::CTA_SIZE]);
}
}
else
{
__shared__ uint sSizes[16];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId];
}
__syncthreads();
// 1 half-warp is responsible for writing out all values for 1 radix.
// Loops if there are more than 16 values to be written out.
// All start indices are rounded down to the nearest multiple of 16, and
// all end indices are rounded up to the nearest multiple of 16.
// Thus it can do extra work if the start and end indices are not multiples of 16
// This is bounded by a factor of 2 (it can do 2X more work at most).
const uint halfWarpID = threadIdx.x >> 4;
const uint halfWarpOffset = threadIdx.x & 0xF;
const uint leadingInvalid = sOffsets[halfWarpID] & 0xF;
uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0;
uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 -
((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF);
uint numIterations = endPos - startPos;
uint outOffset = startPos + halfWarpOffset;
uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset;
for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16)
{
if( (outOffset >= sOffsets[halfWarpID]) &&
(inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID]))
{
if(blockId < totalBlocks - 1 || outOffset < numElements)
{
outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]);
}
}
}
}
}
//----------------------------------------------------------------------------
// Perform one step of the radix sort. Sorts by nbits key bits per step,
// starting at startbit.
//----------------------------------------------------------------------------
template<uint nbits, uint startbit, bool flip, bool unflip>
void radixSortStepKeysOnly(uint *keys,
uint *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
bool manualCoalesce)
{
const uint eltsPerBlock = NewRadixSort::CTA_SIZE * 4;
const uint eltsPerBlock2 = NewRadixSort::CTA_SIZE * 2;
bool fullBlocks = ((numElements % eltsPerBlock) == 0);
uint numBlocks = (fullBlocks) ?
(numElements / eltsPerBlock) :
(numElements / eltsPerBlock + 1);
uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ?
(numElements / eltsPerBlock2) :
(numElements / eltsPerBlock2 + 1);
const uint max1DBlocks = 65535;
for (uint block = 0; block < numBlocks; block += max1DBlocks)
{
uint blocks = min(max1DBlocks, numBlocks - block);
if (blocks < max1DBlocks && !fullBlocks)
{
radixSortBlocksKeysOnly<nbits, startbit, false, flip>
<<<blocks, NewRadixSort::CTA_SIZE, 4 * NewRadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)keys, numElements, block);
}
else
{
radixSortBlocksKeysOnly<nbits, startbit, true, flip>
<<<blocks, NewRadixSort::CTA_SIZE, 4 * NewRadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)tempKeys, (uint4*)keys, numElements, block);
}
}
for (uint block = 0; block < numBlocks2; block += max1DBlocks)
{
uint blocks = min(max1DBlocks, numBlocks2 - block);
if (blocks < max1DBlocks && !fullBlocks)
{
findRadixOffsets<startbit, false>
<<<blocks, NewRadixSort::CTA_SIZE, 3 * NewRadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2, block);
}
else
{
findRadixOffsets<startbit, true>
<<<blocks, NewRadixSort::CTA_SIZE, 3 * NewRadixSort::CTA_SIZE * sizeof(uint)>>>
((uint2*)tempKeys, counters, blockOffsets, numElements, numBlocks2, block);
}
}
cudppScan(scanPlan, countersSum, counters, 16*numBlocks2);
for (uint block = 0; block < numBlocks2; block += max1DBlocks)
{
uint blocks = min(max1DBlocks, numBlocks2 - block);
if (blocks < max1DBlocks && !fullBlocks)
{
if (manualCoalesce)
{
reorderDataKeysOnly<startbit, false, true, unflip><<<blocks, NewRadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2, block);
}
else
{
reorderDataKeysOnly<startbit, false, false, unflip><<<blocks, NewRadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters,
numElements, numBlocks2, block);
}
}
else
{
if (manualCoalesce)
{
reorderDataKeysOnly<startbit, true, true, unflip><<<blocks, NewRadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2, block);
}
else
{
reorderDataKeysOnly<startbit, true, false, unflip><<<blocks, NewRadixSort::CTA_SIZE>>>
(keys, (uint2*)tempKeys, blockOffsets, countersSum, counters, numElements, numBlocks2, block);
}
}
}
checkCudaError("radixSortStepKeysOnly");
}
//----------------------------------------------------------------------------
// Optimization for sorts of fewer than 4 * CTA_SIZE elements
//----------------------------------------------------------------------------
template <bool flip>
void radixSortSingleBlockKeysOnly(uint *keys,
uint numElements)
{
bool fullBlocks = (numElements % (NewRadixSort::CTA_SIZE * 4) == 0);
if (fullBlocks)
{
radixSortBlocksKeysOnly<32, 0, true, flip>
<<<1, NewRadixSort::CTA_SIZE, 4 * NewRadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)keys, numElements, 0);
}
else
{
radixSortBlocksKeysOnly<32, 0, false, flip>
<<<1, NewRadixSort::CTA_SIZE, 4 * NewRadixSort::CTA_SIZE * sizeof(uint)>>>
((uint4*)keys, (uint4*)keys, numElements, 0);
}
if (flip)
unflipFloats<<<1, NewRadixSort::CTA_SIZE>>>(keys, numElements);
checkCudaError("radixSortSingleBlock");
}
//----------------------------------------------------------------------------
// Optimization for sorts of WARP_SIZE or fewer elements
//----------------------------------------------------------------------------
template <bool flip>
__global__
void radixSortSingleWarpKeysOnly(uint *keys,
uint numElements)
{
__shared__ uint sKeys[NewRadixSort::WARP_SIZE];
__shared__ uint sFlags[NewRadixSort::WARP_SIZE];
sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]);
__SYNC // emulation only
for(uint i = 1; i < numElements; i++)
{
uint key_i = sKeys[i];
sFlags[threadIdx.x] = 0;
if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) )
{
uint temp = sKeys[threadIdx.x];
sFlags[threadIdx.x] = 1;
sKeys[threadIdx.x + 1] = temp;
sFlags[threadIdx.x + 1] = 0;
}
if(sFlags[threadIdx.x] == 1 )
{
sKeys[threadIdx.x] = key_i;
}
__SYNC // emulation only
}
keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]);
}
//----------------------------------------------------------------------------
// Main radix sort function. Sorts in place in the keys and values arrays,
// but uses the other device arrays as temporary storage. All pointer
// parameters are device pointers. Uses cudppScan() for the prefix sum of
// radix counters.
//----------------------------------------------------------------------------
extern "C"
void radixSortKeysOnly(uint *keys,
uint *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool manualCoalesce = true,
bool flipBits = false)
{
if(numElements <= NewRadixSort::WARP_SIZE)
{
if (flipBits)
radixSortSingleWarpKeysOnly<true><<<1, numElements>>>(keys, numElements);
else
radixSortSingleWarpKeysOnly<false><<<1, numElements>>>(keys, numElements);
checkCudaError("radixSortSingleWarp");
return;
}
if(numElements <= NewRadixSort::CTA_SIZE * 4)
{
if (flipBits)
radixSortSingleBlockKeysOnly<true>(keys, numElements);
else
radixSortSingleBlockKeysOnly<false>(keys, numElements);
return;
}
// flip float bits on the first pass, unflip on the last pass
if (flipBits)
{
radixSortStepKeysOnly<4, 0, true, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
else
{
radixSortStepKeysOnly<4, 0, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 4)
{
radixSortStepKeysOnly<4, 4, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 8)
{
radixSortStepKeysOnly<4, 8, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 12)
{
radixSortStepKeysOnly<4, 12, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 16)
{
radixSortStepKeysOnly<4, 16, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 20)
{
radixSortStepKeysOnly<4, 20, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 24)
{
radixSortStepKeysOnly<4, 24, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
if (keyBits > 28)
{
if (flipBits) // last pass
{
radixSortStepKeysOnly<4, 28, false, true>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
else
{
radixSortStepKeysOnly<4, 28, false, false>(keys, tempKeys,
counters, countersSum, blockOffsets,
scanPlan, numElements, manualCoalesce);
}
}
checkCudaError("radixSortKeysOnly");
}
extern "C"
void radixSortFloatKeysOnly(float *keys,
float *tempKeys,
uint *counters,
uint *countersSum,
uint *blockOffsets,
CUDPPHandle scanPlan,
uint numElements,
uint keyBits,
bool manualCoalesce,
bool negativeKeys)
{
radixSortKeysOnly((uint*)keys, (uint*)tempKeys, counters, countersSum, blockOffsets,
scanPlan, numElements, keyBits, manualCoalesce, negativeKeys);
checkCudaError("radixSortFloatKeys");
}
|
2b1466d5e68a70be74a8e7505d2b1d8a1edc8ace.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
// Comment out this line to enable debug mode
#define NDEBUG
/* time stamp function in seconds */
__host__ double getTimeStamp()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (double)tv.tv_usec / 1000000 + tv.tv_sec;
}
__host__ void initX(float *X, int numRows, int numCols)
{
int lastIBase = (numRows + 1) * numCols;
for (int j = 0; j < numCols; j++)
{
X[j] = 0;
X[lastIBase + j] = 0;
}
for (int i = 0; i < numRows; i++)
{
int iBase = (i + 1) * numCols;
for (int j = 0; j < numCols; j++)
{
// h_X_old[i,j] = (float) (i+j)/2.0;
X[iBase + j] = (float)(i + j) / 2.0;
}
}
}
__host__ void initY(float *Y, int numRows, int numCols)
{
for (int i = 0; i < numRows; i++)
{
int iBase = i * (numCols + 2);
Y[iBase] = 0;
Y[iBase + 1] = 0;
for (int j = 0; j < numCols; j++)
{
// h_Y_old[i,j] = (float) 3.25*(i+j);
Y[iBase + j + 2] = (float)3.25 * (i + j);
}
}
}
#define H_ADJ_INDEX_X(i, j) ((i) + 1) * numCols + (j)
#define H_ADJ_INDEX_Y(i, j) (i) * (numCols + 2) + (j) + 2
#define H_INDEX(i, j) (i) * numCols + (j)
__host__ void f_siggen_reference(float *X, float *Y, float *Z, int numRows, int numCols)
{
for (int i = 0; i < numRows; i++)
{
for (int j = 0; j < numCols; j++)
{
// Z[i,j] = X[i-1,j] + X[i,j] + X[i+1,j] Y[i,j-2] Y[i,j-1] Y[i,j]
Z[H_INDEX(i, j)] =
X[H_ADJ_INDEX_X(i - 1, j)] +
X[H_ADJ_INDEX_X(i, j)] +
X[H_ADJ_INDEX_X(i + 1, j)] -
Y[H_ADJ_INDEX_Y(i, j - 2)] -
Y[H_ADJ_INDEX_Y(i, j - 1)] -
Y[H_ADJ_INDEX_Y(i, j)];
}
}
}
__host__ int checkZ(float *E, float *A, int numRows, int numCols)
{
for (int i = 0; i < numRows; i++)
{
int ibase = i * numCols;
for (int j = 0; j < numCols; j++)
{
if (E[ibase + j] != A[ibase + j])
{
#ifndef NDEBUG
printf("(i=%d, j=%d) Expected=%f Actual=%f\n", i, j, E[ibase + j], A[ibase + j]);
#endif
return 0;
}
}
}
return 1;
}
__global__ void f_siggen(float *X, float *Y, float *Z, int numRows, int numCols, int smemNumElemX)
{
extern __shared__ float s_data[];
float *s_XT = s_data; // blockDim.x * (blockDim.y + 2);
int s_XTWidth = (blockDim.y + 2);
// int s_XTHeight = blockDim.x;
float *s_Y = s_XT + smemNumElemX; // (blockDim.x + 2) * blockDim.y;
/* Global Coordinate */
int globalX = blockDim.x * blockIdx.x + threadIdx.x;
int globalY = blockDim.y * blockIdx.y + threadIdx.y;
int globalIdx = globalY * numCols + globalX;
int globalXIdx = (globalY + 1) * numCols + globalX;
int globalYIdx = globalY * (numCols + 2) + globalX + 2;
if (globalX >= numCols || globalY >= numRows)
return;
/* Set Up s_XT */
int s_XTx = threadIdx.y + 1;
int s_XTy = threadIdx.x;
int s_XTIdx = s_XTy * s_XTWidth + s_XTx;
s_XT[s_XTIdx] = X[globalXIdx];
if (threadIdx.y == 0)
{
s_XT[s_XTIdx - 1] = X[globalXIdx - numCols];
}
if (threadIdx.y == blockDim.y - 1 || globalY == numRows - 1)
{
s_XT[s_XTIdx + 1] = X[globalXIdx + numCols];
}
/* Set Up s_Y */
int s_Yx = threadIdx.x + 2;
int s_Yy = threadIdx.y;
int s_YIdx = s_Yy * (blockDim.x + 2) + s_Yx;
s_Y[s_YIdx] = Y[globalYIdx];
if (threadIdx.x == 0)
{
s_Y[s_YIdx - 2] = Y[globalYIdx - 2];
s_Y[s_YIdx - 1] = Y[globalYIdx - 1];
}
/* Wait for All to Set Up s_XT and s_Y */
__syncthreads();
/* Write Output */
Z[globalIdx] = s_XT[s_XTIdx - 1] + s_XT[s_XTIdx] + s_XT[s_XTIdx + 1] - s_Y[s_YIdx - 2] - s_Y[s_YIdx - 1] - s_Y[s_YIdx];
}
int main(int argc, char *argv[])
{
int error = 0;
/* Get Dimension */
if (argc != 3)
{
printf("Error: The number of arguments is not exactly 2\n");
return 0;
}
int numRows = atoi(argv[1]);
int numCols = atoi(argv[2]);
size_t numElem = numRows * numCols;
size_t numBytes = numElem * sizeof(float);
int numRowsX = numRows + 2;
int numColsX = numCols;
size_t numElemX = numRowsX * numColsX;
size_t numBytesX = numElemX * sizeof(float);
int numRowsY = numRows;
int numColsY = numCols + 2;
size_t numElemY = numRowsY * numColsY;
size_t numBytesY = numElemY * sizeof(float);
#ifndef NDEBUG
printf("numRows=%d, numCols=%d, numElem=%ld, numBytes=%ld\n", numRows, numCols, numElem, numBytes);
printf("numRowsX=%d, numColsX=%d, numElemX=%ld, numBytesX=%ld\n", numRowsX, numColsX, numElemX, numBytesX);
printf("numRowsY=%d, numColsY=%d, numElemY=%ld, numBytesY=%ld\n", numRowsY, numColsY, numElemY, numBytesY);
#endif
/* Allocate Host Memory */
float *h_XY = NULL;
error = error || hipHostMalloc((void **)&h_XY, numBytesX + numBytesY, 0);
float *h_X = h_XY;
float *h_Y = h_X + numElemX;
float *h_hZ = (float *)malloc(numBytes);
float *h_dZ = NULL;
error = error || hipHostMalloc((void **)&h_dZ, numBytes, 0);
if (error)
{
printf("Error: hipHostMalloc returns error\n");
return 0;
}
/* Initialize Host Memory */
initX(h_X, numRows, numCols);
initY(h_Y, numRows, numCols);
#ifndef NDEBUG
double timestampPreCpuKernel = getTimeStamp();
#endif
f_siggen_reference(h_X, h_Y, h_hZ, numRows, numCols);
#ifndef NDEBUG
double timestampPostCpuKernel = getTimeStamp();
printf("CPU=%.6fsec\n", timestampPostCpuKernel - timestampPreCpuKernel);
#endif
/* Allocate Device Memory */
float *d_XY = NULL;
error = error || hipMalloc((void **)&d_XY, numBytesX + numBytesY);
float *d_X = d_XY;
float *d_Y = d_X + numElemX;
float *d_Z = NULL;
error = error || hipMalloc((void **)&d_Z, numBytes);
if (error)
{
printf("Error: hipMalloc returns error\n");
return 0;
}
/* Copy Host Memory to Device Memory */
double timestampPreCpuGpuTransfer = getTimeStamp();
error = error || hipMemcpy(d_XY, h_XY, numBytesX + numBytesY, hipMemcpyHostToDevice);
if (error)
{
printf("Error: hipMemcpy returns error\n");
return 0;
}
/* Run Kernel */
double timestampPreKernel = getTimeStamp();
dim3 d_blockDim;
d_blockDim.x = 32;
d_blockDim.y = 32;
dim3 d_gridDim;
d_gridDim.x = (numCols - 1) / d_blockDim.x + 1;
d_gridDim.y = (numRows - 1) / d_blockDim.y + 1;
int d_smemNumElemX = d_blockDim.x * (d_blockDim.y + 2);
int d_smemNumElemY = (d_blockDim.x + 2) * d_blockDim.y;
size_t d_smemNumBytes = (d_smemNumElemX + d_smemNumElemY) * sizeof(float);
hipLaunchKernelGGL(( f_siggen), dim3(d_gridDim), dim3(d_blockDim), d_smemNumBytes, 0, d_X, d_Y, d_Z, numRows, numCols, d_smemNumElemX);
hipDeviceSynchronize();
/* Copy Device Memory to Host Memory */
double timestampPreGpuCpuTransfer = getTimeStamp();
error = error || hipMemcpy(h_dZ, d_Z, numBytes, hipMemcpyDeviceToHost);
if (error)
{
printf("Error: hipMemcpy returns error\n");
return 0;
}
double timestampPostGpuCpuTransfer = getTimeStamp();
/* Free Device Memory */
hipFree(d_Z);
d_Z = NULL;
d_Y = NULL;
d_X = NULL;
hipFree(d_XY);
d_XY = NULL;
/* Verify Device Result with Host Result */
error = error || !checkZ(h_hZ, h_dZ, numRows, numCols);
/* Output */
#ifndef NDEBUG
printf("d_gridDim=(%d, %d), d_blockDim=(%d, %d), d_smemNumBytes=%ld\n", d_gridDim.x, d_gridDim.y, d_blockDim.x, d_blockDim.y, d_smemNumBytes);
#endif
if (!error)
{
// #ifndef NDEBUG
// printf("<total_GPU_time> <CPU_GPU_transfer_time> <kernel_time> <GPU_CPU_transfer_time> <Z-value> <nl>\n");
// #endif
float totalGpuElapased = timestampPostGpuCpuTransfer - timestampPreCpuGpuTransfer;
float cpuGpuTransferElapsed = timestampPreKernel - timestampPreCpuGpuTransfer;
float kernelElapsed = timestampPreGpuCpuTransfer - timestampPreKernel;
float gpuCpuTransferElapsed = timestampPostGpuCpuTransfer - timestampPreGpuCpuTransfer;
int zValueI = 5;
int zValueJ = 5;
float zValue = h_dZ[H_INDEX(zValueI, zValueJ)];
printf("%.6f %.6f %.6f %.6f %.6f\n", totalGpuElapased, cpuGpuTransferElapsed, kernelElapsed, gpuCpuTransferElapsed, zValue);
}
else
{
printf("Error: GPU result does not with CPU result\n");
#ifndef NDEBUG
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
printf("(i=%d, j=%d), CPU=%.6f, GPU=%.6f, X=%.6f, Y=%.6f\n", i, j, h_hZ[H_INDEX(i, j)], h_dZ[H_INDEX(i, j)], h_X[H_ADJ_INDEX_X(i, j)], h_Y[H_ADJ_INDEX_Y(i, j)]);
}
}
#endif
}
/* Free Host Memory */
hipHostFree(h_dZ);
h_dZ = NULL;
free(h_hZ);
h_hZ = NULL;
h_Y = NULL;
h_X = NULL;
hipHostFree(h_XY);
h_XY = NULL;
/* Clean Up Device Resource */
hipDeviceReset();
} | 2b1466d5e68a70be74a8e7505d2b1d8a1edc8ace.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
// Comment out this line to enable debug mode
#define NDEBUG
/* time stamp function in seconds */
__host__ double getTimeStamp()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (double)tv.tv_usec / 1000000 + tv.tv_sec;
}
__host__ void initX(float *X, int numRows, int numCols)
{
int lastIBase = (numRows + 1) * numCols;
for (int j = 0; j < numCols; j++)
{
X[j] = 0;
X[lastIBase + j] = 0;
}
for (int i = 0; i < numRows; i++)
{
int iBase = (i + 1) * numCols;
for (int j = 0; j < numCols; j++)
{
// h_X_old[i,j] = (float) (i+j)/2.0;
X[iBase + j] = (float)(i + j) / 2.0;
}
}
}
__host__ void initY(float *Y, int numRows, int numCols)
{
for (int i = 0; i < numRows; i++)
{
int iBase = i * (numCols + 2);
Y[iBase] = 0;
Y[iBase + 1] = 0;
for (int j = 0; j < numCols; j++)
{
// h_Y_old[i,j] = (float) 3.25*(i+j);
Y[iBase + j + 2] = (float)3.25 * (i + j);
}
}
}
#define H_ADJ_INDEX_X(i, j) ((i) + 1) * numCols + (j)
#define H_ADJ_INDEX_Y(i, j) (i) * (numCols + 2) + (j) + 2
#define H_INDEX(i, j) (i) * numCols + (j)
__host__ void f_siggen_reference(float *X, float *Y, float *Z, int numRows, int numCols)
{
for (int i = 0; i < numRows; i++)
{
for (int j = 0; j < numCols; j++)
{
// Z[i,j] = X[i-1,j] + X[i,j] + X[i+1,j] – Y[i,j-2] – Y[i,j-1] – Y[i,j]
Z[H_INDEX(i, j)] =
X[H_ADJ_INDEX_X(i - 1, j)] +
X[H_ADJ_INDEX_X(i, j)] +
X[H_ADJ_INDEX_X(i + 1, j)] -
Y[H_ADJ_INDEX_Y(i, j - 2)] -
Y[H_ADJ_INDEX_Y(i, j - 1)] -
Y[H_ADJ_INDEX_Y(i, j)];
}
}
}
__host__ int checkZ(float *E, float *A, int numRows, int numCols)
{
for (int i = 0; i < numRows; i++)
{
int ibase = i * numCols;
for (int j = 0; j < numCols; j++)
{
if (E[ibase + j] != A[ibase + j])
{
#ifndef NDEBUG
printf("(i=%d, j=%d) Expected=%f Actual=%f\n", i, j, E[ibase + j], A[ibase + j]);
#endif
return 0;
}
}
}
return 1;
}
__global__ void f_siggen(float *X, float *Y, float *Z, int numRows, int numCols, int smemNumElemX)
{
extern __shared__ float s_data[];
float *s_XT = s_data; // blockDim.x * (blockDim.y + 2);
int s_XTWidth = (blockDim.y + 2);
// int s_XTHeight = blockDim.x;
float *s_Y = s_XT + smemNumElemX; // (blockDim.x + 2) * blockDim.y;
/* Global Coordinate */
int globalX = blockDim.x * blockIdx.x + threadIdx.x;
int globalY = blockDim.y * blockIdx.y + threadIdx.y;
int globalIdx = globalY * numCols + globalX;
int globalXIdx = (globalY + 1) * numCols + globalX;
int globalYIdx = globalY * (numCols + 2) + globalX + 2;
if (globalX >= numCols || globalY >= numRows)
return;
/* Set Up s_XT */
int s_XTx = threadIdx.y + 1;
int s_XTy = threadIdx.x;
int s_XTIdx = s_XTy * s_XTWidth + s_XTx;
s_XT[s_XTIdx] = X[globalXIdx];
if (threadIdx.y == 0)
{
s_XT[s_XTIdx - 1] = X[globalXIdx - numCols];
}
if (threadIdx.y == blockDim.y - 1 || globalY == numRows - 1)
{
s_XT[s_XTIdx + 1] = X[globalXIdx + numCols];
}
/* Set Up s_Y */
int s_Yx = threadIdx.x + 2;
int s_Yy = threadIdx.y;
int s_YIdx = s_Yy * (blockDim.x + 2) + s_Yx;
s_Y[s_YIdx] = Y[globalYIdx];
if (threadIdx.x == 0)
{
s_Y[s_YIdx - 2] = Y[globalYIdx - 2];
s_Y[s_YIdx - 1] = Y[globalYIdx - 1];
}
/* Wait for All to Set Up s_XT and s_Y */
__syncthreads();
/* Write Output */
Z[globalIdx] = s_XT[s_XTIdx - 1] + s_XT[s_XTIdx] + s_XT[s_XTIdx + 1] - s_Y[s_YIdx - 2] - s_Y[s_YIdx - 1] - s_Y[s_YIdx];
}
int main(int argc, char *argv[])
{
int error = 0;
/* Get Dimension */
if (argc != 3)
{
printf("Error: The number of arguments is not exactly 2\n");
return 0;
}
int numRows = atoi(argv[1]);
int numCols = atoi(argv[2]);
size_t numElem = numRows * numCols;
size_t numBytes = numElem * sizeof(float);
int numRowsX = numRows + 2;
int numColsX = numCols;
size_t numElemX = numRowsX * numColsX;
size_t numBytesX = numElemX * sizeof(float);
int numRowsY = numRows;
int numColsY = numCols + 2;
size_t numElemY = numRowsY * numColsY;
size_t numBytesY = numElemY * sizeof(float);
#ifndef NDEBUG
printf("numRows=%d, numCols=%d, numElem=%ld, numBytes=%ld\n", numRows, numCols, numElem, numBytes);
printf("numRowsX=%d, numColsX=%d, numElemX=%ld, numBytesX=%ld\n", numRowsX, numColsX, numElemX, numBytesX);
printf("numRowsY=%d, numColsY=%d, numElemY=%ld, numBytesY=%ld\n", numRowsY, numColsY, numElemY, numBytesY);
#endif
/* Allocate Host Memory */
float *h_XY = NULL;
error = error || cudaHostAlloc((void **)&h_XY, numBytesX + numBytesY, 0);
float *h_X = h_XY;
float *h_Y = h_X + numElemX;
float *h_hZ = (float *)malloc(numBytes);
float *h_dZ = NULL;
error = error || cudaHostAlloc((void **)&h_dZ, numBytes, 0);
if (error)
{
printf("Error: cudaHostAlloc returns error\n");
return 0;
}
/* Initialize Host Memory */
initX(h_X, numRows, numCols);
initY(h_Y, numRows, numCols);
#ifndef NDEBUG
double timestampPreCpuKernel = getTimeStamp();
#endif
f_siggen_reference(h_X, h_Y, h_hZ, numRows, numCols);
#ifndef NDEBUG
double timestampPostCpuKernel = getTimeStamp();
printf("CPU=%.6fsec\n", timestampPostCpuKernel - timestampPreCpuKernel);
#endif
/* Allocate Device Memory */
float *d_XY = NULL;
error = error || cudaMalloc((void **)&d_XY, numBytesX + numBytesY);
float *d_X = d_XY;
float *d_Y = d_X + numElemX;
float *d_Z = NULL;
error = error || cudaMalloc((void **)&d_Z, numBytes);
if (error)
{
printf("Error: cudaMalloc returns error\n");
return 0;
}
/* Copy Host Memory to Device Memory */
double timestampPreCpuGpuTransfer = getTimeStamp();
error = error || cudaMemcpy(d_XY, h_XY, numBytesX + numBytesY, cudaMemcpyHostToDevice);
if (error)
{
printf("Error: cudaMemcpy returns error\n");
return 0;
}
/* Run Kernel */
double timestampPreKernel = getTimeStamp();
dim3 d_blockDim;
d_blockDim.x = 32;
d_blockDim.y = 32;
dim3 d_gridDim;
d_gridDim.x = (numCols - 1) / d_blockDim.x + 1;
d_gridDim.y = (numRows - 1) / d_blockDim.y + 1;
int d_smemNumElemX = d_blockDim.x * (d_blockDim.y + 2);
int d_smemNumElemY = (d_blockDim.x + 2) * d_blockDim.y;
size_t d_smemNumBytes = (d_smemNumElemX + d_smemNumElemY) * sizeof(float);
f_siggen<<<d_gridDim, d_blockDim, d_smemNumBytes>>>(d_X, d_Y, d_Z, numRows, numCols, d_smemNumElemX);
cudaDeviceSynchronize();
/* Copy Device Memory to Host Memory */
double timestampPreGpuCpuTransfer = getTimeStamp();
error = error || cudaMemcpy(h_dZ, d_Z, numBytes, cudaMemcpyDeviceToHost);
if (error)
{
printf("Error: cudaMemcpy returns error\n");
return 0;
}
double timestampPostGpuCpuTransfer = getTimeStamp();
/* Free Device Memory */
cudaFree(d_Z);
d_Z = NULL;
d_Y = NULL;
d_X = NULL;
cudaFree(d_XY);
d_XY = NULL;
/* Verify Device Result with Host Result */
error = error || !checkZ(h_hZ, h_dZ, numRows, numCols);
/* Output */
#ifndef NDEBUG
printf("d_gridDim=(%d, %d), d_blockDim=(%d, %d), d_smemNumBytes=%ld\n", d_gridDim.x, d_gridDim.y, d_blockDim.x, d_blockDim.y, d_smemNumBytes);
#endif
if (!error)
{
// #ifndef NDEBUG
// printf("<total_GPU_time> <CPU_GPU_transfer_time> <kernel_time> <GPU_CPU_transfer_time> <Z-value> <nl>\n");
// #endif
float totalGpuElapased = timestampPostGpuCpuTransfer - timestampPreCpuGpuTransfer;
float cpuGpuTransferElapsed = timestampPreKernel - timestampPreCpuGpuTransfer;
float kernelElapsed = timestampPreGpuCpuTransfer - timestampPreKernel;
float gpuCpuTransferElapsed = timestampPostGpuCpuTransfer - timestampPreGpuCpuTransfer;
int zValueI = 5;
int zValueJ = 5;
float zValue = h_dZ[H_INDEX(zValueI, zValueJ)];
printf("%.6f %.6f %.6f %.6f %.6f\n", totalGpuElapased, cpuGpuTransferElapsed, kernelElapsed, gpuCpuTransferElapsed, zValue);
}
else
{
printf("Error: GPU result does not with CPU result\n");
#ifndef NDEBUG
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
printf("(i=%d, j=%d), CPU=%.6f, GPU=%.6f, X=%.6f, Y=%.6f\n", i, j, h_hZ[H_INDEX(i, j)], h_dZ[H_INDEX(i, j)], h_X[H_ADJ_INDEX_X(i, j)], h_Y[H_ADJ_INDEX_Y(i, j)]);
}
}
#endif
}
/* Free Host Memory */
cudaFreeHost(h_dZ);
h_dZ = NULL;
free(h_hZ);
h_hZ = NULL;
h_Y = NULL;
h_X = NULL;
cudaFreeHost(h_XY);
h_XY = NULL;
/* Clean Up Device Resource */
cudaDeviceReset();
} |
55634525bc0ce63132033ddea47f1ce89dc13efd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <time.h>
#include "hash.h"
// CUDA runtime
#include <hip/hip_runtime.h>
#define DIGEST_LENGTH 52 // Size in words
#define HASH_LENGTH 32 // Size in words
int check_cuda_errors()
{
hipError_t rc;
rc = hipGetLastError();
if (rc != hipSuccess)
{
printf("Last CUDA error %s\n", hipGetErrorString(rc));
return 1;
}
return 0;
}
long long wall_clock_time()
{
#ifdef __linux__
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll);
#endif
}
//TODO global hash
__global__
void globalHash(
uint8_t *hash,
const uint8_t *X,
unsigned long long n,
int* found,
unsigned long long *res_nonce,
unsigned long long offset
){
//CONSTRUCTING INPUT
uint8_t input[DIGEST_LENGTH];
for (int i = 0; i < 44; i++) {
input[i] = X[i];
}
unsigned long long threadID = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long nonce = threadID + offset;
for (int i = 44; i < DIGEST_LENGTH; i++) {
input[i] = (nonce >> 8 * (DIGEST_LENGTH - i - 1)) & 0xFF;
}
uint8_t localHash[HASH_LENGTH];
sha256(localHash, input, DIGEST_LENGTH);
// Check 64-bit prefix of SHA256(digest)
unsigned long long prefix = 0x0000;
for (int i = 0; i < 8; i++) {
prefix = prefix | ((unsigned long long) localHash[i] << (8*(7-i)));
}
if (prefix < n) {
if (atomicExch(found, 1) == 0) {
memcpy(hash, localHash, sizeof(uint8_t) * HASH_LENGTH);
memcpy(res_nonce, &nonce, sizeof(unsigned long long));
}
}
}
int main(int argc, char *argv[]) {
//INPUT HANDLING
if (argc != 2 ) {
printf("Usage: [executable] [file 1]\n");
return 1;
}
FILE* file = fopen(argv[1], "r"); /* should check the result */
char *prevDigest = (char*) malloc(64+1);
uint8_t *tid = (uint8_t*) malloc(sizeof(uint8_t));
unsigned long long n;
fscanf(file, "%s", prevDigest);
fscanf(file, "%s", tid);
fscanf(file, "%llu", &n);
fclose(file);
//PREV DIGEST
uint8_t *prev = (uint8_t*) malloc(sizeof(uint8_t) * 32);
char *buff = prevDigest;
for (int i = 0; i<32; i++){
sscanf(buff, "%02hhX", &prev[i]);
buff += 2;
}
//Getting UNIX Timestamp
uint32_t timeNow = (uint32_t) time(NULL);
uint8_t *t = (uint8_t*) malloc(sizeof(uint8_t) * 4);
t[0] = (uint8_t) (timeNow >> 24);
t[1] = (uint8_t) (timeNow >> 16);
t[2] = (uint8_t) (timeNow >> 8);
t[3] = (uint8_t) timeNow;
//HOST DATA
//CHANGE GRID AND BLOCK SIZES HERE
const uint32_t block_count = 80;
const uint32_t threads_per_block = 256;
const uint32_t thread_count = block_count * threads_per_block;
//DEVICE DATA
uint8_t *hash;
uint8_t *X;
unsigned long long *res_nonce;
unsigned long long offset = 0;
int *found;
// "Malloc" device memory
hipMallocManaged((void **)&hash, HASH_LENGTH * sizeof(uint8_t));
hipMallocManaged((void **)&X, DIGEST_LENGTH * sizeof(uint8_t));
hipMallocManaged(&res_nonce, sizeof(unsigned long long));
hipMallocManaged(&found, sizeof(int));
*found = 0;
//FILL X WITH THE INPUT VALUES
int i, j;
for (i = 0, j = 0; i < 32; i++) {
X[i] = prev[j];
j++;
}
for (i = 32, j = 0; i < 36; i++) {
X[i] = t[j];
j++;
}
for (i = 36, j = 0; i < 44; i++) {
X[i] = tid[j];
j++;
}
int start = wall_clock_time();
while (!(*found)) {
hipLaunchKernelGGL(( globalHash), dim3(block_count), dim3(threads_per_block), 0, 0, hash, X, n, found, res_nonce, offset);
hipDeviceSynchronize();
if (check_cuda_errors()){break;}
offset += thread_count;
}
int end = wall_clock_time();
printf("The process took %1.2f seconds\n", ((float)(end - start))/1000000000);
// OUTPUT
printf("%d\n", timeNow);
printf("%llu\n", *res_nonce);
for (int i = 0; i < HASH_LENGTH; i++) {
printf("%02x", hash[i]);
}
printf("\n");
// CLEANUP
hipFree(hash);
hipFree(X);
hipFree(found);
hipFree(res_nonce);
hipDeviceReset();
return 0;
} | 55634525bc0ce63132033ddea47f1ce89dc13efd.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <time.h>
#include "hash.h"
// CUDA runtime
#include <cuda_runtime.h>
#define DIGEST_LENGTH 52 // Size in words
#define HASH_LENGTH 32 // Size in words
int check_cuda_errors()
{
cudaError_t rc;
rc = cudaGetLastError();
if (rc != cudaSuccess)
{
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
return 1;
}
return 0;
}
long long wall_clock_time()
{
#ifdef __linux__
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll);
#endif
}
//TODO global hash
__global__
void globalHash(
uint8_t *hash,
const uint8_t *X,
unsigned long long n,
int* found,
unsigned long long *res_nonce,
unsigned long long offset
){
//CONSTRUCTING INPUT
uint8_t input[DIGEST_LENGTH];
for (int i = 0; i < 44; i++) {
input[i] = X[i];
}
unsigned long long threadID = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long nonce = threadID + offset;
for (int i = 44; i < DIGEST_LENGTH; i++) {
input[i] = (nonce >> 8 * (DIGEST_LENGTH - i - 1)) & 0xFF;
}
uint8_t localHash[HASH_LENGTH];
sha256(localHash, input, DIGEST_LENGTH);
// Check 64-bit prefix of SHA256(digest)
unsigned long long prefix = 0x0000;
for (int i = 0; i < 8; i++) {
prefix = prefix | ((unsigned long long) localHash[i] << (8*(7-i)));
}
if (prefix < n) {
if (atomicExch(found, 1) == 0) {
memcpy(hash, localHash, sizeof(uint8_t) * HASH_LENGTH);
memcpy(res_nonce, &nonce, sizeof(unsigned long long));
}
}
}
int main(int argc, char *argv[]) {
//INPUT HANDLING
if (argc != 2 ) {
printf("Usage: [executable] [file 1]\n");
return 1;
}
FILE* file = fopen(argv[1], "r"); /* should check the result */
char *prevDigest = (char*) malloc(64+1);
uint8_t *tid = (uint8_t*) malloc(sizeof(uint8_t));
unsigned long long n;
fscanf(file, "%s", prevDigest);
fscanf(file, "%s", tid);
fscanf(file, "%llu", &n);
fclose(file);
//PREV DIGEST
uint8_t *prev = (uint8_t*) malloc(sizeof(uint8_t) * 32);
char *buff = prevDigest;
for (int i = 0; i<32; i++){
sscanf(buff, "%02hhX", &prev[i]);
buff += 2;
}
//Getting UNIX Timestamp
uint32_t timeNow = (uint32_t) time(NULL);
uint8_t *t = (uint8_t*) malloc(sizeof(uint8_t) * 4);
t[0] = (uint8_t) (timeNow >> 24);
t[1] = (uint8_t) (timeNow >> 16);
t[2] = (uint8_t) (timeNow >> 8);
t[3] = (uint8_t) timeNow;
//HOST DATA
//CHANGE GRID AND BLOCK SIZES HERE
const uint32_t block_count = 80;
const uint32_t threads_per_block = 256;
const uint32_t thread_count = block_count * threads_per_block;
//DEVICE DATA
uint8_t *hash;
uint8_t *X;
unsigned long long *res_nonce;
unsigned long long offset = 0;
int *found;
// "Malloc" device memory
cudaMallocManaged((void **)&hash, HASH_LENGTH * sizeof(uint8_t));
cudaMallocManaged((void **)&X, DIGEST_LENGTH * sizeof(uint8_t));
cudaMallocManaged(&res_nonce, sizeof(unsigned long long));
cudaMallocManaged(&found, sizeof(int));
*found = 0;
//FILL X WITH THE INPUT VALUES
int i, j;
for (i = 0, j = 0; i < 32; i++) {
X[i] = prev[j];
j++;
}
for (i = 32, j = 0; i < 36; i++) {
X[i] = t[j];
j++;
}
for (i = 36, j = 0; i < 44; i++) {
X[i] = tid[j];
j++;
}
int start = wall_clock_time();
while (!(*found)) {
globalHash<<<block_count, threads_per_block>>>(hash, X, n, found, res_nonce, offset);
cudaDeviceSynchronize();
if (check_cuda_errors()){break;}
offset += thread_count;
}
int end = wall_clock_time();
printf("The process took %1.2f seconds\n", ((float)(end - start))/1000000000);
// OUTPUT
printf("%d\n", timeNow);
printf("%llu\n", *res_nonce);
for (int i = 0; i < HASH_LENGTH; i++) {
printf("%02x", hash[i]);
}
printf("\n");
// CLEANUP
cudaFree(hash);
cudaFree(X);
cudaFree(found);
cudaFree(res_nonce);
cudaDeviceReset();
return 0;
} |
a269f1affcf539886ec9275a1e4526fce6a51ee9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/*
Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix.
Grid is ceil(m/NB) x ntile.
Each tile is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
zsymmetrize_tiles_lower( int m, magmaDoubleComplex *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.y*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
magmaDoubleComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaDoubleComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = MAGMA_Z_CONJ(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
zsymmetrize_tiles_upper( int m, magmaDoubleComplex *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.y*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
magmaDoubleComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaDoubleComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = MAGMA_Z_CONJ(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/***************************************************************************//**
Purpose
-------
ZSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa,
to make some blocks of dA into general representations of a symmetric block.
This processes NTILE blocks, typically the diagonal blocks.
Each block is offset by mstride rows and nstride columns from the previous block.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows & columns of each square block of dA. M >= 0.
@param[in,out]
dA COMPLEX_16 array, dimension (LDDA,N)
The matrix dA. N = m + nstride*(ntile-1).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)).
@param[in]
ntile INTEGER
Number of blocks to symmetrize. ntile >= 0.
@param[in]
mstride INTEGER
Row offset from start of one block to start of next block. mstride >= 0.
Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles
from overlapping.
@param[in]
nstride INTEGER
Column offset from start of one block to start of next block. nstride >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_symmetrize_batched
*******************************************************************************/
extern "C" void
magmablas_zsymmetrize_tiles_q(
magma_uplo_t uplo, magma_int_t m,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m + mstride*(ntile-1)) )
info = -5;
else if ( ntile < 0 )
info = -6;
else if ( mstride < 0 )
info = -7;
else if ( nstride < 0 )
info = -8;
else if ( mstride < m && nstride < m ) // only one must be >= m.
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || ntile == 0 )
return;
dim3 threads( NB, 1 );
dim3 grid( magma_ceildiv( m, NB ), ntile );
//printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x );
if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( zsymmetrize_tiles_upper)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, dA, ldda, mstride, nstride );
}
else {
hipLaunchKernelGGL(( zsymmetrize_tiles_lower)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, dA, ldda, mstride, nstride );
}
}
| a269f1affcf539886ec9275a1e4526fce6a51ee9.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/*
Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix.
Grid is ceil(m/NB) x ntile.
Each tile is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
zsymmetrize_tiles_lower( int m, magmaDoubleComplex *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.y*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
magmaDoubleComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaDoubleComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = MAGMA_Z_CONJ(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
zsymmetrize_tiles_upper( int m, magmaDoubleComplex *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.y*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
magmaDoubleComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaDoubleComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = MAGMA_Z_CONJ(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/***************************************************************************//**
Purpose
-------
ZSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa,
to make some blocks of dA into general representations of a symmetric block.
This processes NTILE blocks, typically the diagonal blocks.
Each block is offset by mstride rows and nstride columns from the previous block.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows & columns of each square block of dA. M >= 0.
@param[in,out]
dA COMPLEX_16 array, dimension (LDDA,N)
The matrix dA. N = m + nstride*(ntile-1).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)).
@param[in]
ntile INTEGER
Number of blocks to symmetrize. ntile >= 0.
@param[in]
mstride INTEGER
Row offset from start of one block to start of next block. mstride >= 0.
Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles
from overlapping.
@param[in]
nstride INTEGER
Column offset from start of one block to start of next block. nstride >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_symmetrize_batched
*******************************************************************************/
extern "C" void
magmablas_zsymmetrize_tiles_q(
magma_uplo_t uplo, magma_int_t m,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m + mstride*(ntile-1)) )
info = -5;
else if ( ntile < 0 )
info = -6;
else if ( mstride < 0 )
info = -7;
else if ( nstride < 0 )
info = -8;
else if ( mstride < m && nstride < m ) // only one must be >= m.
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || ntile == 0 )
return;
dim3 threads( NB, 1 );
dim3 grid( magma_ceildiv( m, NB ), ntile );
//printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x );
if ( uplo == MagmaUpper ) {
zsymmetrize_tiles_upper
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, dA, ldda, mstride, nstride );
}
else {
zsymmetrize_tiles_lower
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, dA, ldda, mstride, nstride );
}
}
|
5c4d23eddaa9c51bd6b1fdef5beb786c7792a4b2.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include "term.h"
// This is the default block size
const unsigned int BLOCK_SIZE = 32;
__global__ void convolution_basic(matrix_t* image, matrix_t* filter, matrix_t* result) {
matrix_t sum;
int rowd,cold,Bx,By,Tx,Ty;
int i,j,half,start_x,start_y,end_x,end_y;
Bx = blockIdx.x;
By = blockIdx.y;
Tx = threadIdx.x;
Ty = threadIdx.y;
sum = 0;
rowd = By * BLOCK_SIZE + Ty;
cold = Bx * BLOCK_SIZE + Tx;
half = (W_SMOOTHING - 1)/2;
start_y = rowd - half >= 0 ? -half : -rowd;
end_y = rowd + half < M ? half : M - rowd - 1;
for(i=start_y; i <= end_y; ++i) {
start_x = cold - half >= 0 ? -half : - cold;
end_x = cold + half < M ? half : M - cold - 1;
for(j=start_x; j <= end_x; ++j) {
sum += image[M*(rowd+i)+(cold+j)]*filter[W_SMOOTHING*(i+half)+(j+half)];
}
}
result[M*(rowd) + cold] = sum;
}
__global__ void convolution_edge(matrix_t* image, matrix_t* filter, matrix_t* result) {
matrix_t gx,gy;
int rowd,cold,Bx,By,Tx,Ty;
int i,j,half,start_x,start_y,end_x,end_y;
Bx = blockIdx.x;
By = blockIdx.y;
Tx = threadIdx.x;
Ty = threadIdx.y;
gx = 0;
gy = 0;
rowd = By * BLOCK_SIZE + Ty;
cold = Bx * BLOCK_SIZE + Tx;
half = (W_EDGE - 1)/2;
start_y = rowd - half >= 0 ? -half : -rowd;
end_y = rowd + half < M ? half : M - rowd - 1;
for(i=start_y; i <= end_y; ++i) {
start_x = cold - half >= 0 ? -half : - cold;
end_x = cold + half < M ? half : M - cold - 1;
for(j=start_x; j <= end_x; ++j) {
gx += image[M*(rowd+i)+(cold+j)]*filter[W_EDGE*(i+half)+(j+half)];
gy += image[M*(rowd+i)+(cold+j)]*filter[W_EDGE*(j+half)+(i+half)];
}
}
result[M*(rowd) + cold] = sqrt(gx*gx+gy*gy) > 70 ? 255 : 0;
}
int main(void) {
// Pointers for host
matrix_t *image;
matrix_t *gaussian;
matrix_t *sobel;
matrix_t *result;
// Pointers for device memory
matrix_t *image_d;
matrix_t *gaussian_d;
matrix_t *sobel_d;
matrix_t *result_d;
// Used to measure performance
hipEvent_t start, stop;
// Used for timing
float msecTotal = 0.0f;
printf("Using block size %d\n", BLOCK_SIZE);
// Allocate host memory for matrices
// We have to cast our calloc/malloc
// because cuda is technically a subset
// of c++ not vanilla c
image = (matrix_t *) calloc(M*M, sizeof(matrix_t));
assert(image != NULL);
gaussian = (matrix_t *) calloc(W_SMOOTHING*W_SMOOTHING, sizeof(matrix_t));
assert(gaussian != NULL);
sobel = (matrix_t *) calloc(W_EDGE*W_EDGE, sizeof(matrix_t));
assert(sobel != NULL);
result = (matrix_t *) calloc(M*M, sizeof(matrix_t));
assert(result != NULL);
get_image(image);
generate_guassian_2d(gaussian);
generate_sobel_2d(sobel);
save_ppm("Leaves_original_cuda.ppm", image);
save_g("2d", gaussian);
// Allocate device memory for matricies
checkCudaErrors(hipMalloc((void **) &(image_d), M*M*sizeof(matrix_t)));
checkCudaErrors(hipMalloc((void **) &(gaussian_d), W_SMOOTHING*W_SMOOTHING*sizeof(matrix_t)));
checkCudaErrors(hipMalloc((void **) &(sobel_d), W_EDGE*W_EDGE*sizeof(matrix_t)));
checkCudaErrors(hipMalloc((void **) &(result_d), M*M*sizeof(matrix_t)));
// Copy host memory to device
checkCudaErrors(hipMemcpy(image_d, image, M*M*sizeof(matrix_t), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(gaussian_d, gaussian, W_SMOOTHING*W_SMOOTHING*sizeof(matrix_t), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(sobel_d, sobel, W_EDGE*W_EDGE*sizeof(matrix_t), hipMemcpyHostToDevice));
// Allocate CUDA events that we'll use for timing
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Setup execution parameters TODO
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(M / threads.x, M / threads.y);
printf("Computing result...\n");
hipDeviceSynchronize();
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
// Execute the kernel
hipLaunchKernelGGL(( convolution_basic) , dim3(grid), dim3(threads) , 0, 0, image_d, gaussian_d, result_d);
checkCudaErrors(hipMemcpy(image_d, result_d, M*M*sizeof(matrix_t), hipMemcpyDefault));
hipLaunchKernelGGL(( convolution_edge) , dim3(grid), dim3(threads) , 0, 0, image_d, sobel_d, result_d);
// Copy result from device to host
checkCudaErrors(hipMemcpy(result, result_d, M*M*sizeof(matrix_t), hipMemcpyDeviceToHost));
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
printf("done in % 3f (sec)\n", msecTotal/1000);
save_ppm("Leaves_blur_cuda.ppm", result);
// Clean up memory
free(image);
free(gaussian);
free(sobel);
checkCudaErrors(hipFree(image_d));
checkCudaErrors(hipFree(gaussian_d));
checkCudaErrors(hipFree(sobel_d));
/* end multiplication */
return 0;
}
| 5c4d23eddaa9c51bd6b1fdef5beb786c7792a4b2.cu | // System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include "term.h"
// This is the default block size
const unsigned int BLOCK_SIZE = 32;
__global__ void convolution_basic(matrix_t* image, matrix_t* filter, matrix_t* result) {
matrix_t sum;
int rowd,cold,Bx,By,Tx,Ty;
int i,j,half,start_x,start_y,end_x,end_y;
Bx = blockIdx.x;
By = blockIdx.y;
Tx = threadIdx.x;
Ty = threadIdx.y;
sum = 0;
rowd = By * BLOCK_SIZE + Ty;
cold = Bx * BLOCK_SIZE + Tx;
half = (W_SMOOTHING - 1)/2;
start_y = rowd - half >= 0 ? -half : -rowd;
end_y = rowd + half < M ? half : M - rowd - 1;
for(i=start_y; i <= end_y; ++i) {
start_x = cold - half >= 0 ? -half : - cold;
end_x = cold + half < M ? half : M - cold - 1;
for(j=start_x; j <= end_x; ++j) {
sum += image[M*(rowd+i)+(cold+j)]*filter[W_SMOOTHING*(i+half)+(j+half)];
}
}
result[M*(rowd) + cold] = sum;
}
__global__ void convolution_edge(matrix_t* image, matrix_t* filter, matrix_t* result) {
matrix_t gx,gy;
int rowd,cold,Bx,By,Tx,Ty;
int i,j,half,start_x,start_y,end_x,end_y;
Bx = blockIdx.x;
By = blockIdx.y;
Tx = threadIdx.x;
Ty = threadIdx.y;
gx = 0;
gy = 0;
rowd = By * BLOCK_SIZE + Ty;
cold = Bx * BLOCK_SIZE + Tx;
half = (W_EDGE - 1)/2;
start_y = rowd - half >= 0 ? -half : -rowd;
end_y = rowd + half < M ? half : M - rowd - 1;
for(i=start_y; i <= end_y; ++i) {
start_x = cold - half >= 0 ? -half : - cold;
end_x = cold + half < M ? half : M - cold - 1;
for(j=start_x; j <= end_x; ++j) {
gx += image[M*(rowd+i)+(cold+j)]*filter[W_EDGE*(i+half)+(j+half)];
gy += image[M*(rowd+i)+(cold+j)]*filter[W_EDGE*(j+half)+(i+half)];
}
}
result[M*(rowd) + cold] = sqrt(gx*gx+gy*gy) > 70 ? 255 : 0;
}
int main(void) {
// Pointers for host
matrix_t *image;
matrix_t *gaussian;
matrix_t *sobel;
matrix_t *result;
// Pointers for device memory
matrix_t *image_d;
matrix_t *gaussian_d;
matrix_t *sobel_d;
matrix_t *result_d;
// Used to measure performance
cudaEvent_t start, stop;
// Used for timing
float msecTotal = 0.0f;
printf("Using block size %d\n", BLOCK_SIZE);
// Allocate host memory for matrices
// We have to cast our calloc/malloc
// because cuda is technically a subset
// of c++ not vanilla c
image = (matrix_t *) calloc(M*M, sizeof(matrix_t));
assert(image != NULL);
gaussian = (matrix_t *) calloc(W_SMOOTHING*W_SMOOTHING, sizeof(matrix_t));
assert(gaussian != NULL);
sobel = (matrix_t *) calloc(W_EDGE*W_EDGE, sizeof(matrix_t));
assert(sobel != NULL);
result = (matrix_t *) calloc(M*M, sizeof(matrix_t));
assert(result != NULL);
get_image(image);
generate_guassian_2d(gaussian);
generate_sobel_2d(sobel);
save_ppm("Leaves_original_cuda.ppm", image);
save_g("2d", gaussian);
// Allocate device memory for matricies
checkCudaErrors(cudaMalloc((void **) &(image_d), M*M*sizeof(matrix_t)));
checkCudaErrors(cudaMalloc((void **) &(gaussian_d), W_SMOOTHING*W_SMOOTHING*sizeof(matrix_t)));
checkCudaErrors(cudaMalloc((void **) &(sobel_d), W_EDGE*W_EDGE*sizeof(matrix_t)));
checkCudaErrors(cudaMalloc((void **) &(result_d), M*M*sizeof(matrix_t)));
// Copy host memory to device
checkCudaErrors(cudaMemcpy(image_d, image, M*M*sizeof(matrix_t), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(gaussian_d, gaussian, W_SMOOTHING*W_SMOOTHING*sizeof(matrix_t), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(sobel_d, sobel, W_EDGE*W_EDGE*sizeof(matrix_t), cudaMemcpyHostToDevice));
// Allocate CUDA events that we'll use for timing
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Setup execution parameters TODO
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(M / threads.x, M / threads.y);
printf("Computing result...\n");
cudaDeviceSynchronize();
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
// Execute the kernel
convolution_basic <<< grid, threads >>>(image_d, gaussian_d, result_d);
checkCudaErrors(cudaMemcpy(image_d, result_d, M*M*sizeof(matrix_t), cudaMemcpyDefault));
convolution_edge <<< grid, threads >>>(image_d, sobel_d, result_d);
// Copy result from device to host
checkCudaErrors(cudaMemcpy(result, result_d, M*M*sizeof(matrix_t), cudaMemcpyDeviceToHost));
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
printf("done in % 3f (sec)\n", msecTotal/1000);
save_ppm("Leaves_blur_cuda.ppm", result);
// Clean up memory
free(image);
free(gaussian);
free(sobel);
checkCudaErrors(cudaFree(image_d));
checkCudaErrors(cudaFree(gaussian_d));
checkCudaErrors(cudaFree(sobel_d));
/* end multiplication */
return 0;
}
|
e44428034cc9c21c6b1fec39e541a13e4935569a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "hip/device_functions.h"
#include "BFS_APSP_GPU.cuh"
#define VIRTUAL_WARP 8
__global__ void APSP_BFS_node_kernel(int *r, int *c, int * dist, int numVertices, int numEdges, int offset_source)
{
int offset_vertices = blockIdx.x * numVertices;
int offset_edge = blockIdx.x * numEdges;
for (int i = threadIdx.x; i < numVertices; i += blockDim.x)
{
dist[offset_vertices + i] = -1;
}
int edge_index = threadIdx.x % VIRTUAL_WARP;
int vertice_index = threadIdx.x / VIRTUAL_WARP;
int source = blockIdx.x + offset_source;
if (source >= numVertices)
return;
__shared__ bool done;
done = false;
int level = 0;
dist[offset_vertices + source] = level++;
while (!done)
{
__syncthreads(); // attention: this sync is neccessary
done = true;
for (int current = vertice_index; current < numVertices; current += blockDim.x / VIRTUAL_WARP)
{
if (dist[offset_vertices + current] != level - 1)
continue;
for (int j = r[current] + edge_index; j < r[current + 1]; j += VIRTUAL_WARP)
{
int next = c[j];
int read_dist = dist[offset_vertices + next];
if (read_dist == -1)
{
dist[offset_vertices + next] = level;
done = false;
}
}
}
level ++;
__syncthreads();
}
}
void APSP_GPU(int * dist, int *r, int *c, int numVertices, int numEdges, int grid, int thread)
{
int devID;
hipDeviceProp_t deviceProps;
devID = findCudaDevice();
// get number of SMs on this GPU
checkCudaErrors(hipGetDeviceProperties(&deviceProps, devID));
//printf("CUDA device [%s] has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount);
//int thread = 256;
//int grid = 100;
// allocate device memory
int* d_r;
int* d_c;
int* d_dist;
checkCudaErrors( hipMalloc( (void**) &d_r, sizeof(int) * (numVertices + 1)));
checkCudaErrors( hipMalloc( (void**) &d_c, sizeof(int) * numEdges));
// copy host memory to device
checkCudaErrors( hipMemcpy( d_r, r, sizeof(int) * (numVertices + 1), hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy( d_c, c, sizeof(int) * numEdges, hipMemcpyHostToDevice) );
// allocate device memory for result
checkCudaErrors( hipMalloc( (void**) &d_dist, sizeof(int) * numVertices * grid));
clock_t kernel_time = 0;
clock_t transfer_time = 0;
// execute the kernel
for (int offset_source = 0; offset_source < numVertices; offset_source += grid)
{
clock_t time = clock();
hipLaunchKernelGGL(( APSP_BFS_node_kernel), dim3(grid), dim3(thread), 0, 0, d_r, d_c, d_dist, numVertices, numEdges, offset_source);
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
hipDeviceSynchronize();
time = clock() - time;
cout<<offset_source<<" done. Time = "<<time<<"ms."<<endl;
kernel_time += time;
time = clock();
// copy result from device to host
if(numVertices - offset_source > grid)
checkCudaErrors(hipMemcpy(dist + (long long)offset_source * numVertices, d_dist, sizeof(float) * numVertices * grid, hipMemcpyDeviceToHost));
else
checkCudaErrors(hipMemcpy(dist + (long long) offset_source * numVertices, d_dist, sizeof(float) * numVertices * (numVertices%grid), hipMemcpyDeviceToHost));
time = clock() - time;
transfer_time += time;
}
cout<<"total kernel time: "<<kernel_time<<"ms."<<endl;
cout<<"total transfering time: "<<transfer_time<<"ms."<<endl;
// cleanup memory
checkCudaErrors(hipFree(d_r));
checkCudaErrors(hipFree(d_c));
checkCudaErrors(hipFree(d_dist));
hipDeviceReset();
} | e44428034cc9c21c6b1fec39e541a13e4935569a.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include "device_functions.h"
#include "BFS_APSP_GPU.cuh"
#define VIRTUAL_WARP 8
__global__ void APSP_BFS_node_kernel(int *r, int *c, int * dist, int numVertices, int numEdges, int offset_source)
{
int offset_vertices = blockIdx.x * numVertices;
int offset_edge = blockIdx.x * numEdges;
for (int i = threadIdx.x; i < numVertices; i += blockDim.x)
{
dist[offset_vertices + i] = -1;
}
int edge_index = threadIdx.x % VIRTUAL_WARP;
int vertice_index = threadIdx.x / VIRTUAL_WARP;
int source = blockIdx.x + offset_source;
if (source >= numVertices)
return;
__shared__ bool done;
done = false;
int level = 0;
dist[offset_vertices + source] = level++;
while (!done)
{
__syncthreads(); // attention: this sync is neccessary
done = true;
for (int current = vertice_index; current < numVertices; current += blockDim.x / VIRTUAL_WARP)
{
if (dist[offset_vertices + current] != level - 1)
continue;
for (int j = r[current] + edge_index; j < r[current + 1]; j += VIRTUAL_WARP)
{
int next = c[j];
int read_dist = dist[offset_vertices + next];
if (read_dist == -1)
{
dist[offset_vertices + next] = level;
done = false;
}
}
}
level ++;
__syncthreads();
}
}
void APSP_GPU(int * dist, int *r, int *c, int numVertices, int numEdges, int grid, int thread)
{
int devID;
cudaDeviceProp deviceProps;
devID = findCudaDevice();
// get number of SMs on this GPU
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
//printf("CUDA device [%s] has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount);
//int thread = 256;
//int grid = 100;
// allocate device memory
int* d_r;
int* d_c;
int* d_dist;
checkCudaErrors( cudaMalloc( (void**) &d_r, sizeof(int) * (numVertices + 1)));
checkCudaErrors( cudaMalloc( (void**) &d_c, sizeof(int) * numEdges));
// copy host memory to device
checkCudaErrors( cudaMemcpy( d_r, r, sizeof(int) * (numVertices + 1), cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy( d_c, c, sizeof(int) * numEdges, cudaMemcpyHostToDevice) );
// allocate device memory for result
checkCudaErrors( cudaMalloc( (void**) &d_dist, sizeof(int) * numVertices * grid));
clock_t kernel_time = 0;
clock_t transfer_time = 0;
// execute the kernel
for (int offset_source = 0; offset_source < numVertices; offset_source += grid)
{
clock_t time = clock();
APSP_BFS_node_kernel<<<grid, thread>>>(d_r, d_c, d_dist, numVertices, numEdges, offset_source);
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
cudaThreadSynchronize();
time = clock() - time;
cout<<offset_source<<" done. Time = "<<time<<"ms."<<endl;
kernel_time += time;
time = clock();
// copy result from device to host
if(numVertices - offset_source > grid)
checkCudaErrors(cudaMemcpy(dist + (long long)offset_source * numVertices, d_dist, sizeof(float) * numVertices * grid, cudaMemcpyDeviceToHost));
else
checkCudaErrors(cudaMemcpy(dist + (long long) offset_source * numVertices, d_dist, sizeof(float) * numVertices * (numVertices%grid), cudaMemcpyDeviceToHost));
time = clock() - time;
transfer_time += time;
}
cout<<"total kernel time: "<<kernel_time<<"ms."<<endl;
cout<<"total transfering time: "<<transfer_time<<"ms."<<endl;
// cleanup memory
checkCudaErrors(cudaFree(d_r));
checkCudaErrors(cudaFree(d_c));
checkCudaErrors(cudaFree(d_dist));
cudaDeviceReset();
} |
2ec4763cdd831fd190c04789349cff87863e0517.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <float.h>
#include <math.h>
#include <time.h>
#include "../../constants.h"
#define N_RADIUS 4
#define N_THREADS_PER_PLANE_DIM_X 8
#define N_THREADS_PER_PLANE_DIM_Y 8
#define Z(val) ((val > 8) ? (val - 9) : val)
#define INNER_STENCIL(R0, R1, R2, R3, R4, R5, R6, R7, R8) i++; if (i >= x4) { return; } \
zrs##R8 = u[IDX3_l(i+N_RADIUS,j,k)]; \
__syncthreads(); \
if (threadIdx.y < 2 * N_RADIUS) { \
s_u[threadIdx.y + (threadIdx.y/N_RADIUS)*N_THREADS_PER_PLANE_DIM_Y][suk] = \
u[IDX3_l(i,j0+threadIdx.y+(threadIdx.y/N_RADIUS)*N_THREADS_PER_PLANE_DIM_Y-N_RADIUS,k)]; \
} \
if (threadIdx.x < 2 * N_RADIUS) { \
s_u[suj][threadIdx.x + (threadIdx.x/N_RADIUS)*N_THREADS_PER_PLANE_DIM_X] = \
u[IDX3_l(i,j,k0+threadIdx.x+(threadIdx.x/N_RADIUS)*N_THREADS_PER_PLANE_DIM_X-N_RADIUS)]; \
} \
s_u[suj][suk] = u[IDX3_l(i,j,k)]; \
__syncthreads(); \
if (j < y4 && k < z4) { \
float lap = __fmaf_rn(coef0, zrs##R4 \
, __fmaf_rn(coefx_1, __fadd_rn(zrs##R5,zrs##R3) \
, __fmaf_rn(coefy_1, __fadd_rn(s_u[suj+1][suk],s_u[suj-1][suk]) \
, __fmaf_rn(coefz_1, __fadd_rn(s_u[suj][suk+1],s_u[suj][suk-1]) \
, __fmaf_rn(coefx_2, __fadd_rn(zrs##R6,zrs##R2) \
, __fmaf_rn(coefy_2, __fadd_rn(s_u[suj+2][suk],s_u[suj-2][suk]) \
, __fmaf_rn(coefz_2, __fadd_rn(s_u[suj][suk+2],s_u[suj][suk-2]) \
, __fmaf_rn(coefx_3, __fadd_rn(zrs##R7,zrs##R1) \
, __fmaf_rn(coefy_3, __fadd_rn(s_u[suj+3][suk],s_u[suj-3][suk]) \
, __fmaf_rn(coefz_3, __fadd_rn(s_u[suj][suk+3],s_u[suj][suk-3]) \
, __fmaf_rn(coefx_4, __fadd_rn(zrs##R8,zrs##R0) \
, __fmaf_rn(coefy_4, __fadd_rn(s_u[suj+4][suk],s_u[suj-4][suk]) \
, __fmul_rn(coefz_4, __fadd_rn(s_u[suj][suk+4],s_u[suj][suk-4]) \
))))))))))))); \
v[IDX3_l(i,j,k)] = __fmaf_rn(2.f, zrs##R4, \
__fmaf_rn(vp[IDX3(i,j,k)], lap, -v[IDX3_l(i,j,k)]) \
); \
}
#define PML_STENCIL(R0, R1, R2, R3, R4, R5, R6, R7, R8) i++; if (i >= x4) { return; } \
zrs##R8 = u[IDX3_l(i+N_RADIUS,j,k)]; \
__syncthreads(); \
if (threadIdx.y < 2 * N_RADIUS) { \
s_u[threadIdx.y + (threadIdx.y/N_RADIUS)*N_THREADS_PER_PLANE_DIM_Y][suk] = \
u[IDX3_l(i,j0+threadIdx.y+(threadIdx.y/N_RADIUS)*N_THREADS_PER_PLANE_DIM_Y-N_RADIUS,k)]; \
} \
if (threadIdx.x < 2 * N_RADIUS) { \
s_u[suj][threadIdx.x + (threadIdx.x/N_RADIUS)*N_THREADS_PER_PLANE_DIM_X] = \
u[IDX3_l(i,j,k0+threadIdx.x+(threadIdx.x/N_RADIUS)*N_THREADS_PER_PLANE_DIM_X-N_RADIUS)]; \
} \
s_u[suj][suk] = u[IDX3_l(i,j,k)]; \
__syncthreads(); \
if (j < y4 && k < z4) { \
float lap = __fmaf_rn(coef0, zrs##R4 \
, __fmaf_rn(coefx_1, __fadd_rn(zrs##R5,zrs##R3) \
, __fmaf_rn(coefy_1, __fadd_rn(s_u[suj+1][suk],s_u[suj-1][suk]) \
, __fmaf_rn(coefz_1, __fadd_rn(s_u[suj][suk+1],s_u[suj][suk-1]) \
, __fmaf_rn(coefx_2, __fadd_rn(zrs##R6,zrs##R2) \
, __fmaf_rn(coefy_2, __fadd_rn(s_u[suj+2][suk],s_u[suj-2][suk]) \
, __fmaf_rn(coefz_2, __fadd_rn(s_u[suj][suk+2],s_u[suj][suk-2]) \
, __fmaf_rn(coefx_3, __fadd_rn(zrs##R7,zrs##R1) \
, __fmaf_rn(coefy_3, __fadd_rn(s_u[suj+3][suk],s_u[suj-3][suk]) \
, __fmaf_rn(coefz_3, __fadd_rn(s_u[suj][suk+3],s_u[suj][suk-3]) \
, __fmaf_rn(coefx_4, __fadd_rn(zrs##R8,zrs##R0) \
, __fmaf_rn(coefy_4, __fadd_rn(s_u[suj+4][suk],s_u[suj-4][suk]) \
, __fmul_rn(coefz_4, __fadd_rn(s_u[suj][suk+4],s_u[suj][suk-4]) \
))))))))))))); \
const float s_eta_c = eta[IDX3_eta1(i,j,k)]; \
v[IDX3_l(i,j,k)] = __fdiv_rn( \
__fmaf_rn( \
__fmaf_rn(2.f, s_eta_c, \
__fsub_rn(2.f, \
__fmul_rn(s_eta_c, s_eta_c) \
) \
), \
zrs##R4, \
__fmaf_rn( \
vp[IDX3(i,j,k)], \
__fadd_rn(lap, phi[IDX3(i,j,k)]), \
-v[IDX3_l(i,j,k)] \
) \
), \
__fmaf_rn(2.f, s_eta_c, 1.f) \
); \
phi[IDX3(i,j,k)] = __fdiv_rn( \
__fsub_rn( \
phi[IDX3(i,j,k)], \
__fmaf_rn( \
__fmul_rn( \
__fsub_rn(eta[IDX3_eta1(i+1,j,k)], eta[IDX3_eta1(i-1,j,k)]), \
__fsub_rn(zrs##R5,zrs##R3) \
), hdx_2, \
__fmaf_rn( \
__fmul_rn( \
__fsub_rn(eta[IDX3_eta1(i,j+1,k)], eta[IDX3_eta1(i,j-1,k)]), \
__fsub_rn(s_u[suj+1][suk], s_u[suj-1][suk]) \
), hdy_2, \
__fmul_rn( \
__fmul_rn( \
__fsub_rn(eta[IDX3_eta1(i,j,k+1)], eta[IDX3_eta1(i,j,k-1)]), \
__fsub_rn(s_u[suj][suk+1], s_u[suj][suk-1]) \
), \
hdz_2) \
)) \
) \
, \
__fadd_rn(1.f, s_eta_c) \
); \
}
__global__ void target_inner_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta
) {
__shared__ float s_u[N_THREADS_PER_PLANE_DIM_Y+2*N_RADIUS][N_THREADS_PER_PLANE_DIM_X+2*N_RADIUS];
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
const llint suj = threadIdx.y + N_RADIUS;
const llint suk = threadIdx.x + N_RADIUS;
float zrs0, zrs1, zrs2, zrs3, zrs4, zrs5, zrs6, zrs7, zrs8;
// Preparation
zrs0 = u[IDX3_l(x3-4,j,k)];
zrs1 = u[IDX3_l(x3-3,j,k)];
zrs2 = u[IDX3_l(x3-2,j,k)];
zrs3 = u[IDX3_l(x3-1,j,k)];
zrs4 = u[IDX3_l(x3+0,j,k)];
zrs5 = u[IDX3_l(x3+1,j,k)];
zrs6 = u[IDX3_l(x3+2,j,k)];
zrs7 = u[IDX3_l(x3+3,j,k)];
llint i = x3-1;
while (true) {
INNER_STENCIL(0, 1, 2, 3, 4, 5, 6, 7, 8);
INNER_STENCIL(1, 2, 3, 4, 5, 6, 7, 8, 0);
INNER_STENCIL(2, 3, 4, 5, 6, 7, 8, 0, 1);
INNER_STENCIL(3, 4, 5, 6, 7, 8, 0, 1, 2);
INNER_STENCIL(4, 5, 6, 7, 8, 0, 1, 2, 3);
INNER_STENCIL(5, 6, 7, 8, 0, 1, 2, 3, 4);
INNER_STENCIL(6, 7, 8, 0, 1, 2, 3, 4, 5);
INNER_STENCIL(7, 8, 0, 1, 2, 3, 4, 5, 6);
INNER_STENCIL(8, 0, 1, 2, 3, 4, 5, 6, 7);
}
}
__global__ void target_pml_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
float *__restrict__ phi, const float *__restrict__ eta
) {
__shared__ float s_u[N_THREADS_PER_PLANE_DIM_Y+2*N_RADIUS][N_THREADS_PER_PLANE_DIM_X+2*N_RADIUS];
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
const llint suj = threadIdx.y + N_RADIUS;
const llint suk = threadIdx.x + N_RADIUS;
float zrs0, zrs1, zrs2, zrs3, zrs4, zrs5, zrs6, zrs7, zrs8;
// Preparation
zrs0 = u[IDX3_l(x3-4,j,k)];
zrs1 = u[IDX3_l(x3-3,j,k)];
zrs2 = u[IDX3_l(x3-2,j,k)];
zrs3 = u[IDX3_l(x3-1,j,k)];
zrs4 = u[IDX3_l(x3+0,j,k)];
zrs5 = u[IDX3_l(x3+1,j,k)];
zrs6 = u[IDX3_l(x3+2,j,k)];
zrs7 = u[IDX3_l(x3+3,j,k)];
llint i = x3-1;
while (true) {
PML_STENCIL(0, 1, 2, 3, 4, 5, 6, 7, 8);
PML_STENCIL(1, 2, 3, 4, 5, 6, 7, 8, 0);
PML_STENCIL(2, 3, 4, 5, 6, 7, 8, 0, 1);
PML_STENCIL(3, 4, 5, 6, 7, 8, 0, 1, 2);
PML_STENCIL(4, 5, 6, 7, 8, 0, 1, 2, 3);
PML_STENCIL(5, 6, 7, 8, 0, 1, 2, 3, 4);
PML_STENCIL(6, 7, 8, 0, 1, 2, 3, 4, 5);
PML_STENCIL(7, 8, 0, 1, 2, 3, 4, 5, 6);
PML_STENCIL(8, 0, 1, 2, 3, 4, 5, 6, 7);
}
}
__global__ void kernel_add_source_kernel(float *g_u, llint idx, float source) {
g_u[idx] += source;
}
extern "C" void target(
uint nsteps, double *time_kernel,
llint nx, llint ny, llint nz,
llint x1, llint x2, llint x3, llint x4, llint x5, llint x6,
llint y1, llint y2, llint y3, llint y4, llint y5, llint y6,
llint z1, llint z2, llint z3, llint z4, llint z5, llint z6,
llint lx, llint ly, llint lz,
llint sx, llint sy, llint sz,
float hdx_2, float hdy_2, float hdz_2,
const float *__restrict__ coefx, const float *__restrict__ coefy, const float *__restrict__ coefz,
float *__restrict__ u, const float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta, const float *__restrict__ source
) {
struct timespec start, end;
const llint size_u = (nx + 2 * lx) * (ny + 2 * ly) * (nz + 2 * lz);
const llint size_v = size_u;
const llint size_phi = nx*ny*nz;
const llint size_vp = size_phi;
const llint size_eta = (nx+2)*(ny+2)*(nz+2);
const llint size_u_ext = (nx + 2 * lx)
* ((((ny+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y + 1) * N_THREADS_PER_PLANE_DIM_Y) + 2 * ly)
* ((((nz+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X + 1) * N_THREADS_PER_PLANE_DIM_X) + 2 * lz);
float *d_u, *d_v, *d_vp, *d_phi, *d_eta;
hipMalloc(&d_u, sizeof(float) * size_u_ext);
hipMalloc(&d_v, sizeof(float) * size_u_ext);
hipMalloc(&d_vp, sizeof(float) * size_vp);
hipMalloc(&d_phi, sizeof(float) * size_phi);
hipMalloc(&d_eta, sizeof(float) * size_eta);
hipMemcpy(d_u, u, sizeof(float) * size_u, hipMemcpyHostToDevice);
hipMemcpy(d_v, v, sizeof(float) * size_v, hipMemcpyHostToDevice);
hipMemcpy(d_vp, vp, sizeof(float) * size_vp, hipMemcpyHostToDevice);
hipMemcpy(d_phi, phi, sizeof(float) * size_phi, hipMemcpyHostToDevice);
hipMemcpy(d_eta, eta, sizeof(float) * size_eta, hipMemcpyHostToDevice);
const llint xmin = 0; const llint xmax = nx;
const llint ymin = 0; const llint ymax = ny;
dim3 threadsPerBlock(N_THREADS_PER_PLANE_DIM_X, N_THREADS_PER_PLANE_DIM_Y, 1);
int num_streams = 7;
hipStream_t streams[num_streams];
for (int i = 0; i < num_streams; i++) {
hipStreamCreateWithFlags(&(streams[i]), hipStreamNonBlocking);
}
const uint npo = 100;
for (uint istep = 1; istep <= nsteps; ++istep) {
clock_gettime(CLOCK_REALTIME, &start);
dim3 n_block_front(
(z2-z1+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(ny+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_front), dim3(threadsPerBlock), 0, streams[1], nx,ny,nz,
xmin,xmax,ymin,ymax,z1,z2,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_top(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y2-y1+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_top), dim3(threadsPerBlock), 0, streams[2], nx,ny,nz,
xmin,xmax,y1,y2,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_left(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y4-y3+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_left), dim3(threadsPerBlock), 0, streams[3], nx,ny,nz,
x1,x2,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_center(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y4-y3+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y);
hipLaunchKernelGGL(( target_inner_3d_kernel), dim3(n_block_center), dim3(threadsPerBlock), 0, streams[0], nx,ny,nz,
x3,x4,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_right(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y4-y3+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_right), dim3(threadsPerBlock), 0, streams[4], nx,ny,nz,
x5,x6,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_bottom(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y6-y5+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_bottom), dim3(threadsPerBlock), 0, streams[5], nx,ny,nz,
xmin,xmax,y5,y6,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_back(
(z6-z5+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(ny+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_back), dim3(threadsPerBlock), 0, streams[6], nx,ny,nz,
xmin,xmax,ymin,ymax,z5,z6,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
for (int i = 0; i < num_streams; i++) {
hipStreamSynchronize(streams[i]);
}
hipLaunchKernelGGL(( kernel_add_source_kernel), dim3(1), dim3(1), 0, 0, d_v, IDX3_l(sx,sy,sz), source[istep]);
clock_gettime(CLOCK_REALTIME, &end);
*time_kernel += (end.tv_sec - start.tv_sec) +
(double)(end.tv_nsec - start.tv_nsec) / 1.0e9;
float *t = d_u;
d_u = d_v;
d_v = t;
// Print out
if (istep % npo == 0) {
printf("time step %u / %u\n", istep, nsteps);
}
}
for (int i = 0; i < num_streams; i++) {
hipStreamDestroy(streams[i]);
}
hipMemcpy(u, d_u, sizeof(float) * size_u, hipMemcpyDeviceToHost);
hipFree(d_u);
hipFree(d_v);
hipFree(d_vp);
hipFree(d_phi);
hipFree(d_eta);
}
| 2ec4763cdd831fd190c04789349cff87863e0517.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <float.h>
#include <math.h>
#include <time.h>
#include "../../constants.h"
#define N_RADIUS 4
#define N_THREADS_PER_PLANE_DIM_X 8
#define N_THREADS_PER_PLANE_DIM_Y 8
#define Z(val) ((val > 8) ? (val - 9) : val)
#define INNER_STENCIL(R0, R1, R2, R3, R4, R5, R6, R7, R8) i++; if (i >= x4) { return; } \
zrs##R8 = u[IDX3_l(i+N_RADIUS,j,k)]; \
__syncthreads(); \
if (threadIdx.y < 2 * N_RADIUS) { \
s_u[threadIdx.y + (threadIdx.y/N_RADIUS)*N_THREADS_PER_PLANE_DIM_Y][suk] = \
u[IDX3_l(i,j0+threadIdx.y+(threadIdx.y/N_RADIUS)*N_THREADS_PER_PLANE_DIM_Y-N_RADIUS,k)]; \
} \
if (threadIdx.x < 2 * N_RADIUS) { \
s_u[suj][threadIdx.x + (threadIdx.x/N_RADIUS)*N_THREADS_PER_PLANE_DIM_X] = \
u[IDX3_l(i,j,k0+threadIdx.x+(threadIdx.x/N_RADIUS)*N_THREADS_PER_PLANE_DIM_X-N_RADIUS)]; \
} \
s_u[suj][suk] = u[IDX3_l(i,j,k)]; \
__syncthreads(); \
if (j < y4 && k < z4) { \
float lap = __fmaf_rn(coef0, zrs##R4 \
, __fmaf_rn(coefx_1, __fadd_rn(zrs##R5,zrs##R3) \
, __fmaf_rn(coefy_1, __fadd_rn(s_u[suj+1][suk],s_u[suj-1][suk]) \
, __fmaf_rn(coefz_1, __fadd_rn(s_u[suj][suk+1],s_u[suj][suk-1]) \
, __fmaf_rn(coefx_2, __fadd_rn(zrs##R6,zrs##R2) \
, __fmaf_rn(coefy_2, __fadd_rn(s_u[suj+2][suk],s_u[suj-2][suk]) \
, __fmaf_rn(coefz_2, __fadd_rn(s_u[suj][suk+2],s_u[suj][suk-2]) \
, __fmaf_rn(coefx_3, __fadd_rn(zrs##R7,zrs##R1) \
, __fmaf_rn(coefy_3, __fadd_rn(s_u[suj+3][suk],s_u[suj-3][suk]) \
, __fmaf_rn(coefz_3, __fadd_rn(s_u[suj][suk+3],s_u[suj][suk-3]) \
, __fmaf_rn(coefx_4, __fadd_rn(zrs##R8,zrs##R0) \
, __fmaf_rn(coefy_4, __fadd_rn(s_u[suj+4][suk],s_u[suj-4][suk]) \
, __fmul_rn(coefz_4, __fadd_rn(s_u[suj][suk+4],s_u[suj][suk-4]) \
))))))))))))); \
v[IDX3_l(i,j,k)] = __fmaf_rn(2.f, zrs##R4, \
__fmaf_rn(vp[IDX3(i,j,k)], lap, -v[IDX3_l(i,j,k)]) \
); \
}
#define PML_STENCIL(R0, R1, R2, R3, R4, R5, R6, R7, R8) i++; if (i >= x4) { return; } \
zrs##R8 = u[IDX3_l(i+N_RADIUS,j,k)]; \
__syncthreads(); \
if (threadIdx.y < 2 * N_RADIUS) { \
s_u[threadIdx.y + (threadIdx.y/N_RADIUS)*N_THREADS_PER_PLANE_DIM_Y][suk] = \
u[IDX3_l(i,j0+threadIdx.y+(threadIdx.y/N_RADIUS)*N_THREADS_PER_PLANE_DIM_Y-N_RADIUS,k)]; \
} \
if (threadIdx.x < 2 * N_RADIUS) { \
s_u[suj][threadIdx.x + (threadIdx.x/N_RADIUS)*N_THREADS_PER_PLANE_DIM_X] = \
u[IDX3_l(i,j,k0+threadIdx.x+(threadIdx.x/N_RADIUS)*N_THREADS_PER_PLANE_DIM_X-N_RADIUS)]; \
} \
s_u[suj][suk] = u[IDX3_l(i,j,k)]; \
__syncthreads(); \
if (j < y4 && k < z4) { \
float lap = __fmaf_rn(coef0, zrs##R4 \
, __fmaf_rn(coefx_1, __fadd_rn(zrs##R5,zrs##R3) \
, __fmaf_rn(coefy_1, __fadd_rn(s_u[suj+1][suk],s_u[suj-1][suk]) \
, __fmaf_rn(coefz_1, __fadd_rn(s_u[suj][suk+1],s_u[suj][suk-1]) \
, __fmaf_rn(coefx_2, __fadd_rn(zrs##R6,zrs##R2) \
, __fmaf_rn(coefy_2, __fadd_rn(s_u[suj+2][suk],s_u[suj-2][suk]) \
, __fmaf_rn(coefz_2, __fadd_rn(s_u[suj][suk+2],s_u[suj][suk-2]) \
, __fmaf_rn(coefx_3, __fadd_rn(zrs##R7,zrs##R1) \
, __fmaf_rn(coefy_3, __fadd_rn(s_u[suj+3][suk],s_u[suj-3][suk]) \
, __fmaf_rn(coefz_3, __fadd_rn(s_u[suj][suk+3],s_u[suj][suk-3]) \
, __fmaf_rn(coefx_4, __fadd_rn(zrs##R8,zrs##R0) \
, __fmaf_rn(coefy_4, __fadd_rn(s_u[suj+4][suk],s_u[suj-4][suk]) \
, __fmul_rn(coefz_4, __fadd_rn(s_u[suj][suk+4],s_u[suj][suk-4]) \
))))))))))))); \
const float s_eta_c = eta[IDX3_eta1(i,j,k)]; \
v[IDX3_l(i,j,k)] = __fdiv_rn( \
__fmaf_rn( \
__fmaf_rn(2.f, s_eta_c, \
__fsub_rn(2.f, \
__fmul_rn(s_eta_c, s_eta_c) \
) \
), \
zrs##R4, \
__fmaf_rn( \
vp[IDX3(i,j,k)], \
__fadd_rn(lap, phi[IDX3(i,j,k)]), \
-v[IDX3_l(i,j,k)] \
) \
), \
__fmaf_rn(2.f, s_eta_c, 1.f) \
); \
phi[IDX3(i,j,k)] = __fdiv_rn( \
__fsub_rn( \
phi[IDX3(i,j,k)], \
__fmaf_rn( \
__fmul_rn( \
__fsub_rn(eta[IDX3_eta1(i+1,j,k)], eta[IDX3_eta1(i-1,j,k)]), \
__fsub_rn(zrs##R5,zrs##R3) \
), hdx_2, \
__fmaf_rn( \
__fmul_rn( \
__fsub_rn(eta[IDX3_eta1(i,j+1,k)], eta[IDX3_eta1(i,j-1,k)]), \
__fsub_rn(s_u[suj+1][suk], s_u[suj-1][suk]) \
), hdy_2, \
__fmul_rn( \
__fmul_rn( \
__fsub_rn(eta[IDX3_eta1(i,j,k+1)], eta[IDX3_eta1(i,j,k-1)]), \
__fsub_rn(s_u[suj][suk+1], s_u[suj][suk-1]) \
), \
hdz_2) \
)) \
) \
, \
__fadd_rn(1.f, s_eta_c) \
); \
}
__global__ void target_inner_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta
) {
__shared__ float s_u[N_THREADS_PER_PLANE_DIM_Y+2*N_RADIUS][N_THREADS_PER_PLANE_DIM_X+2*N_RADIUS];
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
const llint suj = threadIdx.y + N_RADIUS;
const llint suk = threadIdx.x + N_RADIUS;
float zrs0, zrs1, zrs2, zrs3, zrs4, zrs5, zrs6, zrs7, zrs8;
// Preparation
zrs0 = u[IDX3_l(x3-4,j,k)];
zrs1 = u[IDX3_l(x3-3,j,k)];
zrs2 = u[IDX3_l(x3-2,j,k)];
zrs3 = u[IDX3_l(x3-1,j,k)];
zrs4 = u[IDX3_l(x3+0,j,k)];
zrs5 = u[IDX3_l(x3+1,j,k)];
zrs6 = u[IDX3_l(x3+2,j,k)];
zrs7 = u[IDX3_l(x3+3,j,k)];
llint i = x3-1;
while (true) {
INNER_STENCIL(0, 1, 2, 3, 4, 5, 6, 7, 8);
INNER_STENCIL(1, 2, 3, 4, 5, 6, 7, 8, 0);
INNER_STENCIL(2, 3, 4, 5, 6, 7, 8, 0, 1);
INNER_STENCIL(3, 4, 5, 6, 7, 8, 0, 1, 2);
INNER_STENCIL(4, 5, 6, 7, 8, 0, 1, 2, 3);
INNER_STENCIL(5, 6, 7, 8, 0, 1, 2, 3, 4);
INNER_STENCIL(6, 7, 8, 0, 1, 2, 3, 4, 5);
INNER_STENCIL(7, 8, 0, 1, 2, 3, 4, 5, 6);
INNER_STENCIL(8, 0, 1, 2, 3, 4, 5, 6, 7);
}
}
__global__ void target_pml_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
float *__restrict__ phi, const float *__restrict__ eta
) {
__shared__ float s_u[N_THREADS_PER_PLANE_DIM_Y+2*N_RADIUS][N_THREADS_PER_PLANE_DIM_X+2*N_RADIUS];
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
const llint suj = threadIdx.y + N_RADIUS;
const llint suk = threadIdx.x + N_RADIUS;
float zrs0, zrs1, zrs2, zrs3, zrs4, zrs5, zrs6, zrs7, zrs8;
// Preparation
zrs0 = u[IDX3_l(x3-4,j,k)];
zrs1 = u[IDX3_l(x3-3,j,k)];
zrs2 = u[IDX3_l(x3-2,j,k)];
zrs3 = u[IDX3_l(x3-1,j,k)];
zrs4 = u[IDX3_l(x3+0,j,k)];
zrs5 = u[IDX3_l(x3+1,j,k)];
zrs6 = u[IDX3_l(x3+2,j,k)];
zrs7 = u[IDX3_l(x3+3,j,k)];
llint i = x3-1;
while (true) {
PML_STENCIL(0, 1, 2, 3, 4, 5, 6, 7, 8);
PML_STENCIL(1, 2, 3, 4, 5, 6, 7, 8, 0);
PML_STENCIL(2, 3, 4, 5, 6, 7, 8, 0, 1);
PML_STENCIL(3, 4, 5, 6, 7, 8, 0, 1, 2);
PML_STENCIL(4, 5, 6, 7, 8, 0, 1, 2, 3);
PML_STENCIL(5, 6, 7, 8, 0, 1, 2, 3, 4);
PML_STENCIL(6, 7, 8, 0, 1, 2, 3, 4, 5);
PML_STENCIL(7, 8, 0, 1, 2, 3, 4, 5, 6);
PML_STENCIL(8, 0, 1, 2, 3, 4, 5, 6, 7);
}
}
__global__ void kernel_add_source_kernel(float *g_u, llint idx, float source) {
g_u[idx] += source;
}
extern "C" void target(
uint nsteps, double *time_kernel,
llint nx, llint ny, llint nz,
llint x1, llint x2, llint x3, llint x4, llint x5, llint x6,
llint y1, llint y2, llint y3, llint y4, llint y5, llint y6,
llint z1, llint z2, llint z3, llint z4, llint z5, llint z6,
llint lx, llint ly, llint lz,
llint sx, llint sy, llint sz,
float hdx_2, float hdy_2, float hdz_2,
const float *__restrict__ coefx, const float *__restrict__ coefy, const float *__restrict__ coefz,
float *__restrict__ u, const float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta, const float *__restrict__ source
) {
struct timespec start, end;
const llint size_u = (nx + 2 * lx) * (ny + 2 * ly) * (nz + 2 * lz);
const llint size_v = size_u;
const llint size_phi = nx*ny*nz;
const llint size_vp = size_phi;
const llint size_eta = (nx+2)*(ny+2)*(nz+2);
const llint size_u_ext = (nx + 2 * lx)
* ((((ny+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y + 1) * N_THREADS_PER_PLANE_DIM_Y) + 2 * ly)
* ((((nz+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X + 1) * N_THREADS_PER_PLANE_DIM_X) + 2 * lz);
float *d_u, *d_v, *d_vp, *d_phi, *d_eta;
cudaMalloc(&d_u, sizeof(float) * size_u_ext);
cudaMalloc(&d_v, sizeof(float) * size_u_ext);
cudaMalloc(&d_vp, sizeof(float) * size_vp);
cudaMalloc(&d_phi, sizeof(float) * size_phi);
cudaMalloc(&d_eta, sizeof(float) * size_eta);
cudaMemcpy(d_u, u, sizeof(float) * size_u, cudaMemcpyHostToDevice);
cudaMemcpy(d_v, v, sizeof(float) * size_v, cudaMemcpyHostToDevice);
cudaMemcpy(d_vp, vp, sizeof(float) * size_vp, cudaMemcpyHostToDevice);
cudaMemcpy(d_phi, phi, sizeof(float) * size_phi, cudaMemcpyHostToDevice);
cudaMemcpy(d_eta, eta, sizeof(float) * size_eta, cudaMemcpyHostToDevice);
const llint xmin = 0; const llint xmax = nx;
const llint ymin = 0; const llint ymax = ny;
dim3 threadsPerBlock(N_THREADS_PER_PLANE_DIM_X, N_THREADS_PER_PLANE_DIM_Y, 1);
int num_streams = 7;
cudaStream_t streams[num_streams];
for (int i = 0; i < num_streams; i++) {
cudaStreamCreateWithFlags(&(streams[i]), cudaStreamNonBlocking);
}
const uint npo = 100;
for (uint istep = 1; istep <= nsteps; ++istep) {
clock_gettime(CLOCK_REALTIME, &start);
dim3 n_block_front(
(z2-z1+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(ny+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y);
target_pml_3d_kernel<<<n_block_front, threadsPerBlock, 0, streams[1]>>>(nx,ny,nz,
xmin,xmax,ymin,ymax,z1,z2,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_top(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y2-y1+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y);
target_pml_3d_kernel<<<n_block_top, threadsPerBlock, 0, streams[2]>>>(nx,ny,nz,
xmin,xmax,y1,y2,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_left(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y4-y3+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
target_pml_3d_kernel<<<n_block_left, threadsPerBlock, 0, streams[3]>>>(nx,ny,nz,
x1,x2,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_center(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y4-y3+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y);
target_inner_3d_kernel<<<n_block_center, threadsPerBlock, 0, streams[0]>>>(nx,ny,nz,
x3,x4,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_right(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y4-y3+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
target_pml_3d_kernel<<<n_block_right, threadsPerBlock, 0, streams[4]>>>(nx,ny,nz,
x5,x6,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_bottom(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y6-y5+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
target_pml_3d_kernel<<<n_block_bottom, threadsPerBlock, 0, streams[5]>>>(nx,ny,nz,
xmin,xmax,y5,y6,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_back(
(z6-z5+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(ny+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
target_pml_3d_kernel<<<n_block_back, threadsPerBlock, 0, streams[6]>>>(nx,ny,nz,
xmin,xmax,ymin,ymax,z5,z6,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
for (int i = 0; i < num_streams; i++) {
cudaStreamSynchronize(streams[i]);
}
kernel_add_source_kernel<<<1, 1>>>(d_v, IDX3_l(sx,sy,sz), source[istep]);
clock_gettime(CLOCK_REALTIME, &end);
*time_kernel += (end.tv_sec - start.tv_sec) +
(double)(end.tv_nsec - start.tv_nsec) / 1.0e9;
float *t = d_u;
d_u = d_v;
d_v = t;
// Print out
if (istep % npo == 0) {
printf("time step %u / %u\n", istep, nsteps);
}
}
for (int i = 0; i < num_streams; i++) {
cudaStreamDestroy(streams[i]);
}
cudaMemcpy(u, d_u, sizeof(float) * size_u, cudaMemcpyDeviceToHost);
cudaFree(d_u);
cudaFree(d_v);
cudaFree(d_vp);
cudaFree(d_phi);
cudaFree(d_eta);
}
|
f344af4cab9eb9e84add7383f2e284e6ae373273.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "knet.h"
__global__ void _drop32(int n, float *x, float *y, float *xmask, double dropout, double scale) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (xmask[i] < dropout) y[i] = 0;
else y[i] = x[i] * scale;
i += blockDim.x * gridDim.x;
}
}
__global__ void _drop64(int n, double *x, double *y, double *xmask, double dropout, double scale) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (xmask[i] < dropout) y[i] = 0;
else y[i] = x[i] * scale;
i += blockDim.x * gridDim.x;
}
}
extern "C" {
void drop32(int n, float *x, float *y, float *xmask, double dropout, double scale) KCALL(_drop32,n,x,y,xmask,dropout,scale);
void drop64(int n, double *x, double *y, double *xmask, double dropout, double scale) KCALL(_drop64,n,x,y,xmask,dropout,scale);
}
| f344af4cab9eb9e84add7383f2e284e6ae373273.cu | #include "knet.h"
__global__ void _drop32(int n, float *x, float *y, float *xmask, double dropout, double scale) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (xmask[i] < dropout) y[i] = 0;
else y[i] = x[i] * scale;
i += blockDim.x * gridDim.x;
}
}
__global__ void _drop64(int n, double *x, double *y, double *xmask, double dropout, double scale) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (xmask[i] < dropout) y[i] = 0;
else y[i] = x[i] * scale;
i += blockDim.x * gridDim.x;
}
}
extern "C" {
void drop32(int n, float *x, float *y, float *xmask, double dropout, double scale) KCALL(_drop32,n,x,y,xmask,dropout,scale);
void drop64(int n, double *x, double *y, double *xmask, double dropout, double scale) KCALL(_drop64,n,x,y,xmask,dropout,scale);
}
|
8f81a070e0f6c7380b44e0df30cf72fa0a275085.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
void saxpy (float* X, float* Y, float* Z, int n);
float avg (float* arr, int n);
__global__
void saxpyKernel(float *x, float *y, float *z, float a, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
z[id] = a*x[id] + y[id];
}
int main () {
int N = 1<<20;
int size = N*sizeof(float);
// Host input and output vectors
float *h_x, *h_y, *h_z;
// Allocate host memory for vecs
h_x = (float*)malloc(size);
h_y = (float*)malloc(size);
h_z = (float*)malloc(size);
int i;
for (i = 0; i < N; i++) {
h_x[i] = 1.0;
h_y[i] = 2.0;
}
// Perform SAXPY on 1M elements
saxpy(h_x, h_y, h_z, N);
printf("AVG = %f\n", avg(h_z, N));
// free host memory
free(h_x);
free(h_y);
free(h_z);
return 0;
}
void saxpy (float* X, float* Y, float* Z, int n) {
//Device input and output vectors
float *d_x, *d_y, *d_z;
int size = n*sizeof(float);
// Allocate device memory
hipMalloc((void**)&d_x, size);
hipMalloc((void**)&d_y, size);
hipError_t z_err = hipMalloc((void**)&d_z, size);
if (z_err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(z_err), __FILE__, __LINE__);}
// Copy X and Y vectors to device
hipMemcpy(d_x, X, size, hipMemcpyHostToDevice);
hipMemcpy(d_y, Y, size, hipMemcpyHostToDevice);
// number of threads per block
int blockSize = 1024;
// number of blocks
//int gridSize = (int)ceil((float)n/blockSize);
int gridSize = n/blockSize;
hipLaunchKernelGGL(( saxpyKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_x, d_y, d_z, 2.0, n);
// Copy z from device to host
hipMemcpy(Z, d_z, size, hipMemcpyDeviceToHost);
// free device memory
hipFree(d_x);
hipFree(d_y);
hipFree(d_z);
}
float avg (float* arr, int n) {
int i;
float total = 0;
for (i = 0; i < n; i++) {
total += arr[i];
}
return total / n;
}
| 8f81a070e0f6c7380b44e0df30cf72fa0a275085.cu | #include <cuda.h>
#include <stdio.h>
void saxpy (float* X, float* Y, float* Z, int n);
float avg (float* arr, int n);
__global__
void saxpyKernel(float *x, float *y, float *z, float a, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
z[id] = a*x[id] + y[id];
}
int main () {
int N = 1<<20;
int size = N*sizeof(float);
// Host input and output vectors
float *h_x, *h_y, *h_z;
// Allocate host memory for vecs
h_x = (float*)malloc(size);
h_y = (float*)malloc(size);
h_z = (float*)malloc(size);
int i;
for (i = 0; i < N; i++) {
h_x[i] = 1.0;
h_y[i] = 2.0;
}
// Perform SAXPY on 1M elements
saxpy(h_x, h_y, h_z, N);
printf("AVG = %f\n", avg(h_z, N));
// free host memory
free(h_x);
free(h_y);
free(h_z);
return 0;
}
void saxpy (float* X, float* Y, float* Z, int n) {
//Device input and output vectors
float *d_x, *d_y, *d_z;
int size = n*sizeof(float);
// Allocate device memory
cudaMalloc((void**)&d_x, size);
cudaMalloc((void**)&d_y, size);
cudaError_t z_err = cudaMalloc((void**)&d_z, size);
if (z_err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(z_err), __FILE__, __LINE__);}
// Copy X and Y vectors to device
cudaMemcpy(d_x, X, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, Y, size, cudaMemcpyHostToDevice);
// number of threads per block
int blockSize = 1024;
// number of blocks
//int gridSize = (int)ceil((float)n/blockSize);
int gridSize = n/blockSize;
saxpyKernel<<<gridSize, blockSize>>>(d_x, d_y, d_z, 2.0, n);
// Copy z from device to host
cudaMemcpy(Z, d_z, size, cudaMemcpyDeviceToHost);
// free device memory
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
}
float avg (float* arr, int n) {
int i;
float total = 0;
for (i = 0; i < n; i++) {
total += arr[i];
}
return total / n;
}
|
44ffcd45cdd7350b9a9ac8518ca12fb58a2eb815.hip | // !!! This is a file automatically generated by hipify!!!
#include "Indice2D.h"
#include "hip/hip_runtime.h"
#include "cudaTools.h"
#include "KernelFilterImpl.h"
#include "Chronos.h"
#include <iostream>
/*--------------------------------------*\
|* UTILS *|
\*-------------------------------------*/
typedef bool (*compare) ( const uint8_t val1, const uint8_t val2 );
__device__ bool min ( const uint8_t val1, const uint8_t val2 ) {
return val1 > val2;
}
__device__ bool max ( const uint8_t val1, const uint8_t val2 ) {
return val1 < val2;
}
/**
* Computes min-or-max using comparaison function.
*
* @param val1 the first value to compare
* @param val2 the second value to compare
* @param comp the function used to compare
* @param result the result from min-or-max computation
*/
__device__ void minOrMax ( const uint8_t val1, const uint8_t val2, const compare comp, uint8_t* result ) {
*result = val1;
if ( comp ( val1, val2 ) ) {
*result = val2;
}
}
/**
* Computes Min-or-Max from RGB channels.
*
* @param r red channel
* @param g green channel
* @param b blue channel
* @param comp compare function
* @param result resulting value from comparaison
*/
__device__ void minOrMax3 ( const uint8_t r, const uint8_t g, const uint8_t b, const compare comp, uint8_t* result ) {
uint8_t resultrg, resultgb;
minOrMax ( r, g, comp, &resultrg );
minOrMax ( g, b, comp, &resultgb );
minOrMax ( resultrg, resultgb, comp, result );
}
/*--------------------------------------*\
|* GPU Globals *|
\*-------------------------------------*/
texture<uchar4, 2, hipReadModeElementType> texBWImage;
__constant__ float k_KERNEL[81];
/**
* Computes Grayscale image from RGB image.
* <p> Uses lightness algorithm:
* <pre>
* bw = (max(R, G, B) + min(R, G, B)) / 2
* </pre>
*
* @param ptrDevRGBImage RGB image read from video
* @param w width of the image
* @param h height of the image
* @param ptrDevBWImage black&white image computed
*/
__global__ void kernelRGBImageToBW_Lightness ( const uchar4* ptrDevRGBImage, const uint32_t w, const uint32_t h, uchar4* ptrDevBWImage ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
while ( s < size ) {
uint8_t minresult, maxresult;
minOrMax3 ( ptrDevRGBImage[s].x, ptrDevRGBImage[s].y, ptrDevRGBImage[s].z, max, &maxresult );
minOrMax3 ( ptrDevRGBImage[s].x, ptrDevRGBImage[s].y, ptrDevRGBImage[s].z, min, &minresult );
uint8_t gray = ( maxresult + minresult ) / 2;
ptrDevBWImage[s].x = ptrDevBWImage[s].y = ptrDevBWImage[s].z = ptrDevBWImage[s].w = gray;
s += nbThreads;
}
}
/**
* Computes Grayscale image from RGB image.
* <p> Uses average algorithm:
* <pre>
* bw = (R + G + B) / 3
* </pre>
*
* @param ptrDevRGBImage RGB image read from video
* @param w width of the image
* @param h height of the image
* @param ptrDevBWImage black&white image computed
*/
__global__ void kernelRGBImageToBW_Average ( const uchar4* ptrDevRGBImage, const uint32_t w, const uint32_t h, uchar4* ptrDevBWImage ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
while ( s < size ) {
uint8_t gray = ( ptrDevRGBImage[s].x + ptrDevRGBImage[s].y + ptrDevRGBImage[s].z ) / 3;
ptrDevBWImage[s].x = ptrDevBWImage[s].y = ptrDevBWImage[s].z = ptrDevBWImage[s].w = gray;
s += nbThreads;
}
}
/**
* Computes Grayscale image from RGB image.
* <p> Uses lightness algorithm:
* <pre>
* bw = (0.21 * R) + (0.71 * G) + (0.07 * B)
* </pre>
*
* @param ptrDevRGBImage RGB image read from video
* @param w width of the image
* @param h height of the image
* @param ptrDevBWImage black&white image computed
*/
__global__ void kernelRGBImageToBW_Luminance ( const uchar4* ptrDevRGBImage, const uint32_t w, const uint32_t h, uchar4* ptrDevBWImage ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
const float R_FACTOR = 0.21f;
const float G_FACTOR = 0.71f;
const float B_FACTOR = 0.07f;
while ( s < size ) {
uint8_t gray = ( ptrDevRGBImage[s].x * R_FACTOR ) + ( ptrDevRGBImage[s].y * G_FACTOR ) + ( ptrDevRGBImage[s].z * B_FACTOR );
ptrDevBWImage[s].x = ptrDevBWImage[s].y = ptrDevBWImage[s].z = ptrDevBWImage[s].w = gray;
s += nbThreads;
}
}
/**
* Convolution function using texture for the image and global memory for the kernel.
*
* @param kernel the kernel used for convolution
* @param k number of column from the kernel
* @param center center point of the kernel
* @param kHalf half of the kernel column size
* @param i i-th position of the image
* @param j j-th position of the image
*/
__device__ float convolutionKernelGlobalImageTexture ( const float* ptrDevKernel, const uint32_t k, const uint32_t center,
const uint32_t kHalf, const uint32_t i, const uint32_t j ) {
float sum = 0.0f;
for ( int v = 1; v <= kHalf; v++ ) {
for ( int u = 1; u <= kHalf; u++ ) {
sum += ptrDevKernel[center + ( v * k ) + u] * tex2D ( texBWImage, j + v, i + u ).x;
sum += ptrDevKernel[center + ( v * k ) - u] * tex2D ( texBWImage, j + v, i - u ).x;
sum += ptrDevKernel[center - ( v * k ) + u] * tex2D ( texBWImage, j - v, i + u ).x;
sum += ptrDevKernel[center - ( v * k ) - u] * tex2D ( texBWImage, j - v, i - u ).x;
}
}
for ( int u = -k / 2; u < k / 2; u++ ) {
sum += ptrDevKernel[center + u] * tex2D ( texBWImage, j, i + u ).x;
sum += ptrDevKernel[center + k * u] * tex2D ( texBWImage, j + u, i ).x;
}
sum += ( ptrDevKernel[center] * tex2D ( texBWImage, j, i ).x ); // Center computed twice
return sum;
}
/**
* Kernel stored in global memory which compute convolution using texture mapping to the image.
*
* @param w width of the image
* @param h heigth of the image
* @param kernel kernel used for the convolution
* @param k number of column from the kernel
* @param ptrDevCudaImageConvolution the convolution result
*/
__global__ void kernelConvolutionGlobalImageTexture ( const uint32_t w, const uint32_t h, const float* ptrDevKernel, const uint32_t k,
float* ptrDevCudaImageConvolution ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
int i, j;
int kHalf = ( k / 2 );
int center = k * ( k / 2 ) + kHalf;
float convolution;
while ( s < size ) {
Indice2D::pixelIJ ( s, w, i, j );
convolution = convolutionKernelGlobalImageTexture ( ptrDevKernel, k, center, kHalf, i, j );
ptrDevCudaImageConvolution[s] = convolution;
s += nbThreads;
}
}
/**
* Convolution function using texture for the image and constant memory for the kernel.
*
* @param kernel the kernel used for convolution
* @param k number of column from the kernel
* @param center center point of the kernel
* @param kHalf half of the kernel column size
* @param i i-th position of the image
* @param j j-th position of the image
*/
__device__ float convolutionKernelConstantImageTexture ( const uint32_t k, const uint32_t center, const uint32_t kHalf, const uint32_t i,
const uint32_t j ) {
float sum = 0.0f;
for ( int v = 1; v <= kHalf; v++ ) {
for ( int u = 1; u <= kHalf; u++ ) {
sum += k_KERNEL[center + ( v * k ) + u] * tex2D ( texBWImage, j + v, i + u ).x;
sum += k_KERNEL[center + ( v * k ) - u] * tex2D ( texBWImage, j + v, i - u ).x;
sum += k_KERNEL[center - ( v * k ) + u] * tex2D ( texBWImage, j - v, i + u ).x;
sum += k_KERNEL[center - ( v * k ) - u] * tex2D ( texBWImage, j - v, i - u ).x;
}
}
for ( int u = -k / 2; u < k / 2; u++ ) {
sum += k_KERNEL[center + u] * tex2D ( texBWImage, j, i + u ).x;
sum += k_KERNEL[center + k * u] * tex2D ( texBWImage, j + u, i ).x;
}
sum += ( k_KERNEL[center] * tex2D ( texBWImage, j, i ).x ); // Center computed twice.
return sum;
}
/**
* Kernel stored in global memory which compute convolution using texture mapping to the image.
*
* @param w width of the image
* @param h heigth of the image
* @param kernel kernel used for the convolution
* @param k number of column from the kernel
* @param ptrDevCudaImageConvolution the convolution result
*/
__global__ void kernelConvolutionConstantImageTexture ( const uint32_t w, const uint32_t h, const uint32_t k,
float* ptrDevCudaImageConvolution ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
int i, j;
int kHalf = ( k / 2 );
int center = k * ( k / 2 ) + kHalf;
float convolution;
while ( s < size ) {
Indice2D::pixelIJ ( s, w, i, j );
convolution = convolutionKernelConstantImageTexture ( k, center, kHalf, i, j );
ptrDevCudaImageConvolution[s] = convolution;
s += nbThreads;
}
}
/**
* Convolution function using texture for the image and constant memory for the kernel.
*
* @param kernel the kernel used for convolution
* @param k number of column from the kernel
* @param center center point of the kernel
* @param kHalf half of the kernel column size
* @param i i-th position of the image
* @param j j-th position of the image
*/
__device__ float convolutionKernelConstantImageTexture_mul24 ( const uint32_t k, const uint32_t center, const uint32_t kHalf,
const uint32_t i, const uint32_t j ) {
float sum = 0.0f;
for ( int v = 1; v <= kHalf; v++ ) {
for ( int u = 1; u <= kHalf; u++ ) {
sum += __mul24 ( k_KERNEL[center + ( v * k ) + u], tex2D ( texBWImage, j + v, i + u ).x );
sum += __mul24 ( k_KERNEL[center + ( v * k ) - u], tex2D ( texBWImage, j + v, i - u ).x );
sum += __mul24 ( k_KERNEL[center - ( v * k ) + u], tex2D ( texBWImage, j - v, i + u ).x );
sum += __mul24 ( k_KERNEL[center - ( v * k ) - u], tex2D ( texBWImage, j - v, i - u ).x );
}
}
for ( int u = -k / 2; u < k / 2; u++ ) {
sum += __mul24 ( k_KERNEL[center + u], tex2D ( texBWImage, j, i + u ).x );
sum += __mul24 ( k_KERNEL[center + k * u], tex2D ( texBWImage, j + u, i ).x );
}
sum += ( __mul24 ( k_KERNEL[center], tex2D ( texBWImage, j, i ).x ) ); // Center computed twice.
return sum;
}
/**
* Kernel stored in global memory which compute convolution using texture mapping to the image.
*
* @param w width of the image
* @param h heigth of the image
* @param kernel kernel used for the convolution
* @param k number of column from the kernel
* @param ptrDevCudaImageConvolution the convolution result
*/
__global__ void kernelConvolutionConstantImageTexture_mul24 ( const uint32_t w, const uint32_t h, const uint32_t k,
float* ptrDevCudaImageConvolution ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
int i, j;
int kHalf = ( k / 2 );
int center = k * ( k / 2 ) + kHalf;
float convolution;
while ( s < size ) {
Indice2D::pixelIJ ( s, w, i, j );
convolution = convolutionKernelConstantImageTexture ( k, center, kHalf, i, j );
ptrDevCudaImageConvolution[s] = convolution;
s += nbThreads;
}
}
/**
* Displays given black&white image to the OpenGL context.
*
* @param ptrDevConvolution image resulting from convolution
* @param w width of the image
* @param h heigth of the image
* @param ptrDevImageGL OpenGL context image
*/
__global__ void kernelDisplayConvolution ( const float* ptrDevConvolution, const uint32_t w, const uint32_t h, uchar4* ptrDevImageGL ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
int i, j;
while ( s < size ) {
Indice2D::pixelIJ ( s, w, i, j );
ptrDevImageGL[s].w = 255;
ptrDevImageGL[s].x = ptrDevImageGL[s].y = ptrDevImageGL[s].z = (uint8_t) ( ptrDevConvolution[s] );
s += nbThreads;
}
}
/*--------------------------------------*\
|* CPU Globals *|
\*-------------------------------------*/
struct CudaImagesSizes {
uint32_t w;
uint32_t h;
size_t rgb_pitch;
size_t rgb_size;
size_t bw_pitch;
size_t bw_size;
size_t conv_size;
size_t kernel_size;
};
static uchar4* ptrDevCudaRGBImage = NULL;
static uchar4* ptrDevCudaBWImage = NULL;
static float* ptrDevKernel = NULL;
static float* ptrDevCudaImageConvolution = NULL;
static CudaImagesSizes sizes;
/**
* Initializes memory and everythings
*
* @param w width of the image
* @param h heigth of the image
*/
void initKernelFillImage ( const uint32_t w, const uint32_t h, const float* kernel, const size_t kernelSize ) {
size_t rgb_size = sizeof(uchar4) * h * w;
size_t bw_size = sizeof(uchar4) * h * w;
size_t conv_size = sizeof(float) * h * w;
sizes.w = w;
sizes.h = h;
sizes.rgb_pitch = sizeof(uchar4) * w;
sizes.rgb_size = rgb_size;
sizes.bw_pitch = sizeof(uchar4) * w;
sizes.bw_size = bw_size;
sizes.conv_size = conv_size;
sizes.kernel_size = kernelSize * sizeof(float);
HANDLE_ERROR( hipMalloc((void**) &ptrDevCudaRGBImage, rgb_size) );
HANDLE_ERROR( hipMalloc((void**) &ptrDevCudaBWImage, bw_size ) );
HANDLE_ERROR( hipMalloc((void**) &ptrDevCudaImageConvolution, conv_size ) );
HANDLE_ERROR( hipMalloc((void**) &ptrDevKernel, sizes.kernel_size) );
// Copy kernel to global memory
HANDLE_ERROR( hipMemcpy( ptrDevKernel, kernel, sizes.kernel_size, hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpyToSymbol("k_KERNEL", kernel, sizes.kernel_size, 0, hipMemcpyHostToDevice ) );
// Create tex, bind tex to ptrDevCudaBWImage
texBWImage.addressMode[0] = hipAddressModeWrap;
texBWImage.addressMode[1] = hipAddressModeWrap;
//texBWImage.filterMode = hipFilterModePoint;
texBWImage.normalized = false; // coordinate not in [0, 1]
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<uchar4> ();
HANDLE_ERROR( hipBindTexture2D(NULL, &texBWImage, ptrDevCudaBWImage, &channelDesc, w, h, sizes.bw_pitch ) );
}
/**
*
*/
void freeKernelFillImageKernel () {
//HANDLE_ERROR ( );
}
/**
* Launches kernel which fill image.
* Kernel called are:
* <ul>
* <ol>RGB to BW kernel (3 algorithms)</ol>
* <ol>Convolution kernel (different memory usage)</ol>
* </ul>
*
* @param ptrDevImageGL OpenGL context image (DEVICE)
* @param ptrCudaImage image read from video (HOST)
* @param w width of the image
* @param h heigth of the image
*/
double launchKernelFillImageKernel ( uchar4* ptrDevImageGL, const uchar4* ptrCudaImage, const uint32_t w, const uint32_t h,
const KernelKind kind, const GrayscaleMethod grayscale, const dim3 dg, const dim3 db ) {
Chronos chrono;
HANDLE_ERROR( hipMemcpy( ptrDevCudaRGBImage, ptrCudaImage, sizes.rgb_size, hipMemcpyHostToDevice ) );
chrono.start ();
switch ( grayscale ) {
default:
case AVERAGE:
hipLaunchKernelGGL(( kernelRGBImageToBW_Average), dim3(dg), dim3(db) , 0, 0, ptrDevCudaRGBImage, w, h, ptrDevCudaBWImage );
break;
case LIGHTNESS:
hipLaunchKernelGGL(( kernelRGBImageToBW_Lightness), dim3(dg), dim3(db) , 0, 0, ptrDevCudaRGBImage, w, h, ptrDevCudaBWImage );
break;
case LUMINANCE:
hipLaunchKernelGGL(( kernelRGBImageToBW_Luminance), dim3(dg), dim3(db) , 0, 0, ptrDevCudaRGBImage, w, h, ptrDevCudaBWImage );
break;
}
switch ( kind ) {
default:
case TEXTURE_GLOBAL:
hipLaunchKernelGGL(( kernelConvolutionGlobalImageTexture), dim3(dg), dim3(db) , 0, 0, w, h, ptrDevKernel, 9, ptrDevCudaImageConvolution );
break;
case TEXTURE_CONSTANT:
hipLaunchKernelGGL(( kernelConvolutionConstantImageTexture), dim3(dg), dim3(db) , 0, 0, w, h, 9, ptrDevCudaImageConvolution );
break;
case TEXTURE_CONSTANT_MUL24:
hipLaunchKernelGGL(( kernelConvolutionConstantImageTexture_mul24), dim3(dg), dim3(db) , 0, 0, w, h, 9, ptrDevCudaImageConvolution );
break;
}
HANDLE_ERROR( hipDeviceSynchronize() );
double time = chrono.stop ();
hipLaunchKernelGGL(( kernelDisplayConvolution), dim3(dg), dim3(db) , 0, 0, ptrDevCudaImageConvolution, w, h, ptrDevImageGL );
; //
return time;
}
| 44ffcd45cdd7350b9a9ac8518ca12fb58a2eb815.cu | #include "Indice2D.h"
#include "cuda_runtime.h"
#include "cudaTools.h"
#include "KernelFilterImpl.h"
#include "Chronos.h"
#include <iostream>
/*--------------------------------------*\
|* UTILS *|
\*-------------------------------------*/
typedef bool (*compare) ( const uint8_t val1, const uint8_t val2 );
__device__ bool min ( const uint8_t val1, const uint8_t val2 ) {
return val1 > val2;
}
__device__ bool max ( const uint8_t val1, const uint8_t val2 ) {
return val1 < val2;
}
/**
* Computes min-or-max using comparaison function.
*
* @param val1 the first value to compare
* @param val2 the second value to compare
* @param comp the function used to compare
* @param result the result from min-or-max computation
*/
__device__ void minOrMax ( const uint8_t val1, const uint8_t val2, const compare comp, uint8_t* result ) {
*result = val1;
if ( comp ( val1, val2 ) ) {
*result = val2;
}
}
/**
* Computes Min-or-Max from RGB channels.
*
* @param r red channel
* @param g green channel
* @param b blue channel
* @param comp compare function
* @param result resulting value from comparaison
*/
__device__ void minOrMax3 ( const uint8_t r, const uint8_t g, const uint8_t b, const compare comp, uint8_t* result ) {
uint8_t resultrg, resultgb;
minOrMax ( r, g, comp, &resultrg );
minOrMax ( g, b, comp, &resultgb );
minOrMax ( resultrg, resultgb, comp, result );
}
/*--------------------------------------*\
|* GPU Globals *|
\*-------------------------------------*/
texture<uchar4, 2, cudaReadModeElementType> texBWImage;
__constant__ float k_KERNEL[81];
/**
* Computes Grayscale image from RGB image.
* <p> Uses lightness algorithm:
* <pre>
* bw = (max(R, G, B) + min(R, G, B)) / 2
* </pre>
*
* @param ptrDevRGBImage RGB image read from video
* @param w width of the image
* @param h height of the image
* @param ptrDevBWImage black&white image computed
*/
__global__ void kernelRGBImageToBW_Lightness ( const uchar4* ptrDevRGBImage, const uint32_t w, const uint32_t h, uchar4* ptrDevBWImage ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
while ( s < size ) {
uint8_t minresult, maxresult;
minOrMax3 ( ptrDevRGBImage[s].x, ptrDevRGBImage[s].y, ptrDevRGBImage[s].z, max, &maxresult );
minOrMax3 ( ptrDevRGBImage[s].x, ptrDevRGBImage[s].y, ptrDevRGBImage[s].z, min, &minresult );
uint8_t gray = ( maxresult + minresult ) / 2;
ptrDevBWImage[s].x = ptrDevBWImage[s].y = ptrDevBWImage[s].z = ptrDevBWImage[s].w = gray;
s += nbThreads;
}
}
/**
* Computes Grayscale image from RGB image.
* <p> Uses average algorithm:
* <pre>
* bw = (R + G + B) / 3
* </pre>
*
* @param ptrDevRGBImage RGB image read from video
* @param w width of the image
* @param h height of the image
* @param ptrDevBWImage black&white image computed
*/
__global__ void kernelRGBImageToBW_Average ( const uchar4* ptrDevRGBImage, const uint32_t w, const uint32_t h, uchar4* ptrDevBWImage ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
while ( s < size ) {
uint8_t gray = ( ptrDevRGBImage[s].x + ptrDevRGBImage[s].y + ptrDevRGBImage[s].z ) / 3;
ptrDevBWImage[s].x = ptrDevBWImage[s].y = ptrDevBWImage[s].z = ptrDevBWImage[s].w = gray;
s += nbThreads;
}
}
/**
* Computes Grayscale image from RGB image.
* <p> Uses lightness algorithm:
* <pre>
* bw = (0.21 * R) + (0.71 * G) + (0.07 * B)
* </pre>
*
* @param ptrDevRGBImage RGB image read from video
* @param w width of the image
* @param h height of the image
* @param ptrDevBWImage black&white image computed
*/
__global__ void kernelRGBImageToBW_Luminance ( const uchar4* ptrDevRGBImage, const uint32_t w, const uint32_t h, uchar4* ptrDevBWImage ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
const float R_FACTOR = 0.21f;
const float G_FACTOR = 0.71f;
const float B_FACTOR = 0.07f;
while ( s < size ) {
uint8_t gray = ( ptrDevRGBImage[s].x * R_FACTOR ) + ( ptrDevRGBImage[s].y * G_FACTOR ) + ( ptrDevRGBImage[s].z * B_FACTOR );
ptrDevBWImage[s].x = ptrDevBWImage[s].y = ptrDevBWImage[s].z = ptrDevBWImage[s].w = gray;
s += nbThreads;
}
}
/**
* Convolution function using texture for the image and global memory for the kernel.
*
* @param kernel the kernel used for convolution
* @param k number of column from the kernel
* @param center center point of the kernel
* @param kHalf half of the kernel column size
* @param i i-th position of the image
* @param j j-th position of the image
*/
__device__ float convolutionKernelGlobalImageTexture ( const float* ptrDevKernel, const uint32_t k, const uint32_t center,
const uint32_t kHalf, const uint32_t i, const uint32_t j ) {
float sum = 0.0f;
for ( int v = 1; v <= kHalf; v++ ) {
for ( int u = 1; u <= kHalf; u++ ) {
sum += ptrDevKernel[center + ( v * k ) + u] * tex2D ( texBWImage, j + v, i + u ).x;
sum += ptrDevKernel[center + ( v * k ) - u] * tex2D ( texBWImage, j + v, i - u ).x;
sum += ptrDevKernel[center - ( v * k ) + u] * tex2D ( texBWImage, j - v, i + u ).x;
sum += ptrDevKernel[center - ( v * k ) - u] * tex2D ( texBWImage, j - v, i - u ).x;
}
}
for ( int u = -k / 2; u < k / 2; u++ ) {
sum += ptrDevKernel[center + u] * tex2D ( texBWImage, j, i + u ).x;
sum += ptrDevKernel[center + k * u] * tex2D ( texBWImage, j + u, i ).x;
}
sum += ( ptrDevKernel[center] * tex2D ( texBWImage, j, i ).x ); // Center computed twice
return sum;
}
/**
* Kernel stored in global memory which compute convolution using texture mapping to the image.
*
* @param w width of the image
* @param h heigth of the image
* @param kernel kernel used for the convolution
* @param k number of column from the kernel
* @param ptrDevCudaImageConvolution the convolution result
*/
__global__ void kernelConvolutionGlobalImageTexture ( const uint32_t w, const uint32_t h, const float* ptrDevKernel, const uint32_t k,
float* ptrDevCudaImageConvolution ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
int i, j;
int kHalf = ( k / 2 );
int center = k * ( k / 2 ) + kHalf;
float convolution;
while ( s < size ) {
Indice2D::pixelIJ ( s, w, i, j );
convolution = convolutionKernelGlobalImageTexture ( ptrDevKernel, k, center, kHalf, i, j );
ptrDevCudaImageConvolution[s] = convolution;
s += nbThreads;
}
}
/**
* Convolution function using texture for the image and constant memory for the kernel.
*
* @param kernel the kernel used for convolution
* @param k number of column from the kernel
* @param center center point of the kernel
* @param kHalf half of the kernel column size
* @param i i-th position of the image
* @param j j-th position of the image
*/
__device__ float convolutionKernelConstantImageTexture ( const uint32_t k, const uint32_t center, const uint32_t kHalf, const uint32_t i,
const uint32_t j ) {
float sum = 0.0f;
for ( int v = 1; v <= kHalf; v++ ) {
for ( int u = 1; u <= kHalf; u++ ) {
sum += k_KERNEL[center + ( v * k ) + u] * tex2D ( texBWImage, j + v, i + u ).x;
sum += k_KERNEL[center + ( v * k ) - u] * tex2D ( texBWImage, j + v, i - u ).x;
sum += k_KERNEL[center - ( v * k ) + u] * tex2D ( texBWImage, j - v, i + u ).x;
sum += k_KERNEL[center - ( v * k ) - u] * tex2D ( texBWImage, j - v, i - u ).x;
}
}
for ( int u = -k / 2; u < k / 2; u++ ) {
sum += k_KERNEL[center + u] * tex2D ( texBWImage, j, i + u ).x;
sum += k_KERNEL[center + k * u] * tex2D ( texBWImage, j + u, i ).x;
}
sum += ( k_KERNEL[center] * tex2D ( texBWImage, j, i ).x ); // Center computed twice.
return sum;
}
/**
* Kernel stored in global memory which compute convolution using texture mapping to the image.
*
* @param w width of the image
* @param h heigth of the image
* @param kernel kernel used for the convolution
* @param k number of column from the kernel
* @param ptrDevCudaImageConvolution the convolution result
*/
__global__ void kernelConvolutionConstantImageTexture ( const uint32_t w, const uint32_t h, const uint32_t k,
float* ptrDevCudaImageConvolution ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
int i, j;
int kHalf = ( k / 2 );
int center = k * ( k / 2 ) + kHalf;
float convolution;
while ( s < size ) {
Indice2D::pixelIJ ( s, w, i, j );
convolution = convolutionKernelConstantImageTexture ( k, center, kHalf, i, j );
ptrDevCudaImageConvolution[s] = convolution;
s += nbThreads;
}
}
/**
* Convolution function using texture for the image and constant memory for the kernel.
*
* @param kernel the kernel used for convolution
* @param k number of column from the kernel
* @param center center point of the kernel
* @param kHalf half of the kernel column size
* @param i i-th position of the image
* @param j j-th position of the image
*/
__device__ float convolutionKernelConstantImageTexture_mul24 ( const uint32_t k, const uint32_t center, const uint32_t kHalf,
const uint32_t i, const uint32_t j ) {
float sum = 0.0f;
for ( int v = 1; v <= kHalf; v++ ) {
for ( int u = 1; u <= kHalf; u++ ) {
sum += __mul24 ( k_KERNEL[center + ( v * k ) + u], tex2D ( texBWImage, j + v, i + u ).x );
sum += __mul24 ( k_KERNEL[center + ( v * k ) - u], tex2D ( texBWImage, j + v, i - u ).x );
sum += __mul24 ( k_KERNEL[center - ( v * k ) + u], tex2D ( texBWImage, j - v, i + u ).x );
sum += __mul24 ( k_KERNEL[center - ( v * k ) - u], tex2D ( texBWImage, j - v, i - u ).x );
}
}
for ( int u = -k / 2; u < k / 2; u++ ) {
sum += __mul24 ( k_KERNEL[center + u], tex2D ( texBWImage, j, i + u ).x );
sum += __mul24 ( k_KERNEL[center + k * u], tex2D ( texBWImage, j + u, i ).x );
}
sum += ( __mul24 ( k_KERNEL[center], tex2D ( texBWImage, j, i ).x ) ); // Center computed twice.
return sum;
}
/**
* Kernel stored in global memory which compute convolution using texture mapping to the image.
*
* @param w width of the image
* @param h heigth of the image
* @param kernel kernel used for the convolution
* @param k number of column from the kernel
* @param ptrDevCudaImageConvolution the convolution result
*/
__global__ void kernelConvolutionConstantImageTexture_mul24 ( const uint32_t w, const uint32_t h, const uint32_t k,
float* ptrDevCudaImageConvolution ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
int i, j;
int kHalf = ( k / 2 );
int center = k * ( k / 2 ) + kHalf;
float convolution;
while ( s < size ) {
Indice2D::pixelIJ ( s, w, i, j );
convolution = convolutionKernelConstantImageTexture ( k, center, kHalf, i, j );
ptrDevCudaImageConvolution[s] = convolution;
s += nbThreads;
}
}
/**
* Displays given black&white image to the OpenGL context.
*
* @param ptrDevConvolution image resulting from convolution
* @param w width of the image
* @param h heigth of the image
* @param ptrDevImageGL OpenGL context image
*/
__global__ void kernelDisplayConvolution ( const float* ptrDevConvolution, const uint32_t w, const uint32_t h, uchar4* ptrDevImageGL ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
size_t size = h * w;
int i, j;
while ( s < size ) {
Indice2D::pixelIJ ( s, w, i, j );
ptrDevImageGL[s].w = 255;
ptrDevImageGL[s].x = ptrDevImageGL[s].y = ptrDevImageGL[s].z = (uint8_t) ( ptrDevConvolution[s] );
s += nbThreads;
}
}
/*--------------------------------------*\
|* CPU Globals *|
\*-------------------------------------*/
struct CudaImagesSizes {
uint32_t w;
uint32_t h;
size_t rgb_pitch;
size_t rgb_size;
size_t bw_pitch;
size_t bw_size;
size_t conv_size;
size_t kernel_size;
};
static uchar4* ptrDevCudaRGBImage = NULL;
static uchar4* ptrDevCudaBWImage = NULL;
static float* ptrDevKernel = NULL;
static float* ptrDevCudaImageConvolution = NULL;
static CudaImagesSizes sizes;
/**
* Initializes memory and everythings
*
* @param w width of the image
* @param h heigth of the image
*/
void initKernelFillImage ( const uint32_t w, const uint32_t h, const float* kernel, const size_t kernelSize ) {
size_t rgb_size = sizeof(uchar4) * h * w;
size_t bw_size = sizeof(uchar4) * h * w;
size_t conv_size = sizeof(float) * h * w;
sizes.w = w;
sizes.h = h;
sizes.rgb_pitch = sizeof(uchar4) * w;
sizes.rgb_size = rgb_size;
sizes.bw_pitch = sizeof(uchar4) * w;
sizes.bw_size = bw_size;
sizes.conv_size = conv_size;
sizes.kernel_size = kernelSize * sizeof(float);
HANDLE_ERROR( cudaMalloc((void**) &ptrDevCudaRGBImage, rgb_size) );
HANDLE_ERROR( cudaMalloc((void**) &ptrDevCudaBWImage, bw_size ) );
HANDLE_ERROR( cudaMalloc((void**) &ptrDevCudaImageConvolution, conv_size ) );
HANDLE_ERROR( cudaMalloc((void**) &ptrDevKernel, sizes.kernel_size) );
// Copy kernel to global memory
HANDLE_ERROR( cudaMemcpy( ptrDevKernel, kernel, sizes.kernel_size, cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpyToSymbol("k_KERNEL", kernel, sizes.kernel_size, 0, cudaMemcpyHostToDevice ) );
// Create tex, bind tex to ptrDevCudaBWImage
texBWImage.addressMode[0] = cudaAddressModeWrap;
texBWImage.addressMode[1] = cudaAddressModeWrap;
//texBWImage.filterMode = cudaFilterModePoint;
texBWImage.normalized = false; // coordinate not in [0, 1]
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<uchar4> ();
HANDLE_ERROR( cudaBindTexture2D(NULL, &texBWImage, ptrDevCudaBWImage, &channelDesc, w, h, sizes.bw_pitch ) );
}
/**
*
*/
void freeKernelFillImageKernel () {
//HANDLE_ERROR ( );
}
/**
* Launches kernel which fill image.
* Kernel called are:
* <ul>
* <ol>RGB to BW kernel (3 algorithms)</ol>
* <ol>Convolution kernel (different memory usage)</ol>
* </ul>
*
* @param ptrDevImageGL OpenGL context image (DEVICE)
* @param ptrCudaImage image read from video (HOST)
* @param w width of the image
* @param h heigth of the image
*/
double launchKernelFillImageKernel ( uchar4* ptrDevImageGL, const uchar4* ptrCudaImage, const uint32_t w, const uint32_t h,
const KernelKind kind, const GrayscaleMethod grayscale, const dim3 dg, const dim3 db ) {
Chronos chrono;
HANDLE_ERROR( cudaMemcpy( ptrDevCudaRGBImage, ptrCudaImage, sizes.rgb_size, cudaMemcpyHostToDevice ) );
chrono.start ();
switch ( grayscale ) {
default:
case AVERAGE:
kernelRGBImageToBW_Average<<< dg, db >>> ( ptrDevCudaRGBImage, w, h, ptrDevCudaBWImage );
break;
case LIGHTNESS:
kernelRGBImageToBW_Lightness<<< dg, db >>> ( ptrDevCudaRGBImage, w, h, ptrDevCudaBWImage );
break;
case LUMINANCE:
kernelRGBImageToBW_Luminance<<< dg, db >>> ( ptrDevCudaRGBImage, w, h, ptrDevCudaBWImage );
break;
}
switch ( kind ) {
default:
case TEXTURE_GLOBAL:
kernelConvolutionGlobalImageTexture<<< dg, db >>> ( w, h, ptrDevKernel, 9, ptrDevCudaImageConvolution );
break;
case TEXTURE_CONSTANT:
kernelConvolutionConstantImageTexture<<< dg, db >>> ( w, h, 9, ptrDevCudaImageConvolution );
break;
case TEXTURE_CONSTANT_MUL24:
kernelConvolutionConstantImageTexture_mul24<<< dg, db >>> ( w, h, 9, ptrDevCudaImageConvolution );
break;
}
HANDLE_ERROR( cudaDeviceSynchronize() );
double time = chrono.stop ();
kernelDisplayConvolution<<< dg, db >>> ( ptrDevCudaImageConvolution, w, h, ptrDevImageGL );
; //
return time;
}
|
e678f25ce78627ebe5837063fb9bf1ab3af72288.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
float * get_network_output_layer_gpu(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
void forward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
l.forward_gpu(l, state);
state.input = l.output_gpu;
}
}
void backward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
forward_network_gpu(net, state);
backward_network_gpu(net, state);
}
float train_network_datum_gpu(network net, float *x, float *y)
{
*net.seen += net.batch;
forward_backward_network_gpu(net, x, y);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data1 d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data1 d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
//printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
//printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data1 d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data1 p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//hipDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//hipDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
network_state state;
state.index = 0;
state.net = net;
state.input = cuda_make_array(input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
cuda_free(state.input);
return out;
}
| e678f25ce78627ebe5837063fb9bf1ab3af72288.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
float * get_network_output_layer_gpu(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
void forward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
l.forward_gpu(l, state);
state.input = l.output_gpu;
}
}
void backward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
forward_network_gpu(net, state);
backward_network_gpu(net, state);
}
float train_network_datum_gpu(network net, float *x, float *y)
{
*net.seen += net.batch;
forward_backward_network_gpu(net, x, y);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data1 d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data1 d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
//printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
//printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data1 d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data1 p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//cudaDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//cudaDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
network_state state;
state.index = 0;
state.net = net;
state.input = cuda_make_array(input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
cuda_free(state.input);
return out;
}
|
56e85734572c0e41f3f750ceff399bdf68b55f9d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <cudpp.h>
#include <unistd.h>
size_t pitch;
extern "C" double* matrix_alloc_gpu (int size_x, int size_y) {
void *p;
hipMallocPitch (&p, &pitch, sizeof(double)*size_x, size_y);
if (p == NULL) {
fprintf (stderr, "I cannot reserve device memory for the matrix.\n");
exit (EXIT_FAILURE);
}
return (double *)p;
}
extern "C" void H2D (double *m_gpu, double *m_cpu, int size_x, int size_y) {
hipMemcpy2D (m_gpu, pitch, m_cpu, size_x*sizeof(double),\
size_x*sizeof(double), size_y,hipMemcpyHostToDevice);
}
extern "C" void D2H (double *m_cpu, double *m_gpu, int size_x, int size_y) {
hipMemcpy2D(m_cpu
}
extern "C" void main_gpu (double *d_odata, double *d_idata, int size_x, int size_y) {
CUDPPHandle theCudpp;
CUDPPConfiguration config;
CUDPPHandle scanplan = 0;
CUDPPResult res;
cudppCreate (&theCudpp);
config.op = CUDPP_ADD;
config.datatype = CUDPP_DOUBLE;
config.algorithm = CUDPP_SCAN;
config.options = CUDPP_OPTION_BACKWARD | CUDPP_OPTION_EXCLUSIVE;
//AQUI: Fijese en el tipo de scan efectuado en el host, y complete las opciones (config.options)
//AQUI: fabrique el plan de la reduccion. Cuidado con el valor de 'rowPitch'
res = cudppPlan (theCudpp, &scanplan, config, size_x, size_y, pitch/sizeof(double));
if (CUDPP_SUCCESS != res) {
printf("Error creating CUDPPPlan\n");
exit(-1);
}
res = cudppMultiScan(scanplan, d_odata, d_idata, size_x*size_y);
//AQUI: ponga la instruccion para la ejecucion del plan
if (CUDPP_SUCCESS != res) {
printf("Error in cudppScan()\n");
exit(-1);
}
}
| 56e85734572c0e41f3f750ceff399bdf68b55f9d.cu | #include <cuda.h>
#include <stdio.h>
#include <cudpp.h>
#include <unistd.h>
size_t pitch;
extern "C" double* matrix_alloc_gpu (int size_x, int size_y) {
void *p;
cudaMallocPitch (&p, &pitch, sizeof(double)*size_x, size_y);
if (p == NULL) {
fprintf (stderr, "I cannot reserve device memory for the matrix.\n");
exit (EXIT_FAILURE);
}
return (double *)p;
}
extern "C" void H2D (double *m_gpu, double *m_cpu, int size_x, int size_y) {
cudaMemcpy2D (m_gpu, pitch, m_cpu, size_x*sizeof(double),\
size_x*sizeof(double), size_y,cudaMemcpyHostToDevice);
}
extern "C" void D2H (double *m_cpu, double *m_gpu, int size_x, int size_y) {
cudaMemcpy2D(m_cpu
}
extern "C" void main_gpu (double *d_odata, double *d_idata, int size_x, int size_y) {
CUDPPHandle theCudpp;
CUDPPConfiguration config;
CUDPPHandle scanplan = 0;
CUDPPResult res;
cudppCreate (&theCudpp);
config.op = CUDPP_ADD;
config.datatype = CUDPP_DOUBLE;
config.algorithm = CUDPP_SCAN;
config.options = CUDPP_OPTION_BACKWARD | CUDPP_OPTION_EXCLUSIVE;
//AQUI: Fijese en el tipo de scan efectuado en el host, y complete las opciones (config.options)
//AQUI: fabrique el plan de la reduccion. Cuidado con el valor de 'rowPitch'
res = cudppPlan (theCudpp, &scanplan, config, size_x, size_y, pitch/sizeof(double));
if (CUDPP_SUCCESS != res) {
printf("Error creating CUDPPPlan\n");
exit(-1);
}
res = cudppMultiScan(scanplan, d_odata, d_idata, size_x*size_y);
//AQUI: ponga la instruccion para la ejecucion del plan
if (CUDPP_SUCCESS != res) {
printf("Error in cudppScan()\n");
exit(-1);
}
}
|
4d69cefbf82d7397e802cdb19cd6c49b81e5f1ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** @file test_getSystemMatrixFromWorkqueue.cu */
/*
* Author: malte
*
* Created on 22. Oktober 2014, 10:29
*/
#include <cstdlib>
#include "FileTalk.hpp"
#include "getSystemMatrixFromWorkqueue.cu"
#include "getWorkqueue.hpp"
#include "VoxelGrid.hpp"
#include "MeasurementSetup.hpp"
#include "MeasurementSetupLinIndex.hpp"
#include "MeasurementSetupTrafo2CartCoord.hpp"
#include "H5File2DefaultMeasurementList.h"
#include "H5DensityWriter.hpp"
#include "GridAdapter.hpp"
#include "real_measurementsetup_defines.h"
#include "voxelgrid_defines.h"
#include <iostream>
#include "CUDA_HandleError.hpp"
#include "typedefs.hpp"
#include "device_constant_memory.hpp"
/*
* Simple C++ Test Suite
*/
#define NBLOCKS 32
#define TPB 256
int main(int argc, char** argv) {
int const nargs(3);
if(argc!=nargs+1) {
std::cerr << "Error: Wrong number of arguments. Exspected: "
<< nargs << ":" << std::endl
<< " filename of measurement" << std::endl
<< " filename of output" << std::endl
<< " number of rays" << std::endl;
exit(EXIT_FAILURE);
}
std::string const fn(argv[1]);
std::string const on(argv[2]);
int const nrays(atoi(argv[3]));
MS setup =
MS(
POS0X, POS1X,
NA, N0Z, N0Y, N1Z, N1Y,
DA, SEGX, SEGY, SEGZ);
HANDLE_ERROR(hipMemcpyToSymbol(setup_const, &setup, sizeof(MS)));
VG grid =
VG(
GRIDOX, GRIDOY, GRIDOZ,
GRIDDX, GRIDDY, GRIDDZ,
GRIDNX, GRIDNY, GRIDNZ);
HANDLE_ERROR(hipMemcpyToSymbol(grid_const, &grid, sizeof(grid)));
ML list =
H5File2DefaultMeasurementList<val_t>(fn, NA*N0Z*N0Y*N1Z*N1Y);
// Allocate memory for workqueue on host
SAYLINE(__LINE__-1);
std::vector<int> wqCnlId_host;;
std::vector<int> wqVxlId_host;
// Get Workqueue
SAYLINE(__LINE__-1);
int listId(0); int vxlId(0);
int nFound =
getWorkqueue<
val_t,
ML,
VG, Idx, Idy, Idz,
MS, Id0z, Id0y, Id1z, Id1y, Ida,
Trafo0, Trafo1> (
wqCnlId_host, wqVxlId_host, listId, vxlId, &list, &grid, &setup);
// Allocate memory for sparse matrix (=workqueue + matrix values) on device
int * wqCnlId_devi = NULL;
int * wqVxlId_devi = NULL;
val_t * val_devi = NULL;
HANDLE_ERROR(hipMalloc((void**)&wqCnlId_devi, sizeof(wqCnlId_devi[0]) *nFound));
HANDLE_ERROR(hipMalloc((void**)&wqVxlId_devi, sizeof(wqVxlId_devi[0]) *nFound));
HANDLE_ERROR(hipMalloc((void**)&val_devi, sizeof(val_devi[0]) *nFound));
HANDLE_ERROR(hipDeviceSynchronize());
// Copy Workqueue to device
SAYLINE(__LINE__-1);
HANDLE_ERROR(hipMemcpy(
wqCnlId_devi, &(*wqCnlId_host.begin()), sizeof(wqCnlId_devi[0]) *nFound, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(
wqVxlId_devi, &(*wqVxlId_host.begin()), sizeof(wqVxlId_devi[0]) *nFound, hipMemcpyHostToDevice));
HANDLE_ERROR(hipDeviceSynchronize());
// Kernel launch
SAYLINE(__LINE__-1);
hipLaunchKernelGGL(( getSystemMatrixFromWorkqueue<
val_t, VG, Idx, Idy, Idz, MS, Id0z, Id0y, Id1z, Id1y, Ida, Trafo0, Trafo1>)
, dim3(NBLOCKS), dim3(TPB), 0, 0,
wqCnlId_devi, wqVxlId_devi, val_devi, nFound, nrays);
HANDLE_ERROR(hipDeviceSynchronize());
// Allocate memory for matrix values on host
std::vector<val_t> val_host(nFound, 0);
// Copy matrix values to host
SAYLINE(__LINE__-1);
HANDLE_ERROR(hipGetLastError());
HANDLE_ERROR(hipMemcpy(
&(*val_host.begin()), val_devi, sizeof(val_host[0]) * nFound, hipMemcpyDeviceToHost));
HANDLE_ERROR(hipDeviceSynchronize());
// Sum up values
SAYLINE(__LINE__-1);
val_t sum(0);
for(int i=0; i<nFound; i++) {
sum += val_host[i];
}
std::cout << "Sum is: " << sum << std::endl;
// Create grid memory for backprojection
SAYLINE(__LINE__-1);
int const gridsize(grid.gridnx()*grid.gridny()*grid.gridnz());
val_t * mem = new val_t[gridsize];
for(int vxlId=0; vxlId<gridsize; vxlId++) {
mem[vxlId] = 0.;
}
// Backproject workqueue on grid
SAYLINE(__LINE__-1);
for(int wqId=0; wqId<nFound; wqId++) {
int vxlId = wqVxlId_host[wqId];
mem[vxlId] += val_host[ wqId];
}
// Write to hdf5
SAYLINE(__LINE__-1);
H5DensityWriter<GridAdapter<VG, val_t> > writer(on);
GridAdapter<VG, val_t> ga(&grid);
writer.write(mem, ga);
return (EXIT_SUCCESS);
}
| 4d69cefbf82d7397e802cdb19cd6c49b81e5f1ce.cu | /** @file test_getSystemMatrixFromWorkqueue.cu */
/*
* Author: malte
*
* Created on 22. Oktober 2014, 10:29
*/
#include <cstdlib>
#include "FileTalk.hpp"
#include "getSystemMatrixFromWorkqueue.cu"
#include "getWorkqueue.hpp"
#include "VoxelGrid.hpp"
#include "MeasurementSetup.hpp"
#include "MeasurementSetupLinIndex.hpp"
#include "MeasurementSetupTrafo2CartCoord.hpp"
#include "H5File2DefaultMeasurementList.h"
#include "H5DensityWriter.hpp"
#include "GridAdapter.hpp"
#include "real_measurementsetup_defines.h"
#include "voxelgrid_defines.h"
#include <iostream>
#include "CUDA_HandleError.hpp"
#include "typedefs.hpp"
#include "device_constant_memory.hpp"
/*
* Simple C++ Test Suite
*/
#define NBLOCKS 32
#define TPB 256
int main(int argc, char** argv) {
int const nargs(3);
if(argc!=nargs+1) {
std::cerr << "Error: Wrong number of arguments. Exspected: "
<< nargs << ":" << std::endl
<< " filename of measurement" << std::endl
<< " filename of output" << std::endl
<< " number of rays" << std::endl;
exit(EXIT_FAILURE);
}
std::string const fn(argv[1]);
std::string const on(argv[2]);
int const nrays(atoi(argv[3]));
MS setup =
MS(
POS0X, POS1X,
NA, N0Z, N0Y, N1Z, N1Y,
DA, SEGX, SEGY, SEGZ);
HANDLE_ERROR(cudaMemcpyToSymbol(setup_const, &setup, sizeof(MS)));
VG grid =
VG(
GRIDOX, GRIDOY, GRIDOZ,
GRIDDX, GRIDDY, GRIDDZ,
GRIDNX, GRIDNY, GRIDNZ);
HANDLE_ERROR(cudaMemcpyToSymbol(grid_const, &grid, sizeof(grid)));
ML list =
H5File2DefaultMeasurementList<val_t>(fn, NA*N0Z*N0Y*N1Z*N1Y);
// Allocate memory for workqueue on host
SAYLINE(__LINE__-1);
std::vector<int> wqCnlId_host;;
std::vector<int> wqVxlId_host;
// Get Workqueue
SAYLINE(__LINE__-1);
int listId(0); int vxlId(0);
int nFound =
getWorkqueue<
val_t,
ML,
VG, Idx, Idy, Idz,
MS, Id0z, Id0y, Id1z, Id1y, Ida,
Trafo0, Trafo1> (
wqCnlId_host, wqVxlId_host, listId, vxlId, &list, &grid, &setup);
// Allocate memory for sparse matrix (=workqueue + matrix values) on device
int * wqCnlId_devi = NULL;
int * wqVxlId_devi = NULL;
val_t * val_devi = NULL;
HANDLE_ERROR(cudaMalloc((void**)&wqCnlId_devi, sizeof(wqCnlId_devi[0]) *nFound));
HANDLE_ERROR(cudaMalloc((void**)&wqVxlId_devi, sizeof(wqVxlId_devi[0]) *nFound));
HANDLE_ERROR(cudaMalloc((void**)&val_devi, sizeof(val_devi[0]) *nFound));
HANDLE_ERROR(cudaDeviceSynchronize());
// Copy Workqueue to device
SAYLINE(__LINE__-1);
HANDLE_ERROR(cudaMemcpy(
wqCnlId_devi, &(*wqCnlId_host.begin()), sizeof(wqCnlId_devi[0]) *nFound, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(
wqVxlId_devi, &(*wqVxlId_host.begin()), sizeof(wqVxlId_devi[0]) *nFound, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaDeviceSynchronize());
// Kernel launch
SAYLINE(__LINE__-1);
getSystemMatrixFromWorkqueue<
val_t, VG, Idx, Idy, Idz, MS, Id0z, Id0y, Id1z, Id1y, Ida, Trafo0, Trafo1>
<<<NBLOCKS, TPB>>> (
wqCnlId_devi, wqVxlId_devi, val_devi, nFound, nrays);
HANDLE_ERROR(cudaDeviceSynchronize());
// Allocate memory for matrix values on host
std::vector<val_t> val_host(nFound, 0);
// Copy matrix values to host
SAYLINE(__LINE__-1);
HANDLE_ERROR(cudaGetLastError());
HANDLE_ERROR(cudaMemcpy(
&(*val_host.begin()), val_devi, sizeof(val_host[0]) * nFound, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaDeviceSynchronize());
// Sum up values
SAYLINE(__LINE__-1);
val_t sum(0);
for(int i=0; i<nFound; i++) {
sum += val_host[i];
}
std::cout << "Sum is: " << sum << std::endl;
// Create grid memory for backprojection
SAYLINE(__LINE__-1);
int const gridsize(grid.gridnx()*grid.gridny()*grid.gridnz());
val_t * mem = new val_t[gridsize];
for(int vxlId=0; vxlId<gridsize; vxlId++) {
mem[vxlId] = 0.;
}
// Backproject workqueue on grid
SAYLINE(__LINE__-1);
for(int wqId=0; wqId<nFound; wqId++) {
int vxlId = wqVxlId_host[wqId];
mem[vxlId] += val_host[ wqId];
}
// Write to hdf5
SAYLINE(__LINE__-1);
H5DensityWriter<GridAdapter<VG, val_t> > writer(on);
GridAdapter<VG, val_t> ga(&grid);
writer.write(mem, ga);
return (EXIT_SUCCESS);
}
|
5c760ed12da80e23479c543255d49dd8c6fde7f6.hip | // !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include <vector>
#include "../../../../src/common/categorical.h"
#include "../../../../src/tree/gpu_hist/histogram.cuh"
#include "../../../../src/tree/gpu_hist/row_partitioner.cuh"
#include "../../categorical_helpers.h"
#include "../../helpers.h"
namespace xgboost {
namespace tree {
void TestDeterministicHistogram(bool is_dense, int shm_size) {
Context ctx = CreateEmptyGenericParam(0);
size_t constexpr kBins = 256, kCols = 120, kRows = 16384, kRounds = 16;
float constexpr kLower = -1e-2, kUpper = 1e2;
float sparsity = is_dense ? 0.0f : 0.5f;
auto matrix = RandomDataGenerator(kRows, kCols, sparsity).GenerateDMatrix();
BatchParam batch_param{0, static_cast<int32_t>(kBins)};
for (auto const& batch : matrix->GetBatches<EllpackPage>(batch_param)) {
auto* page = batch.Impl();
tree::RowPartitioner row_partitioner(0, kRows);
auto ridx = row_partitioner.GetRows(0);
int num_bins = kBins * kCols;
dh::device_vector<GradientPairInt64> histogram(num_bins);
auto d_histogram = dh::ToSpan(histogram);
auto gpair = GenerateRandomGradients(kRows, kLower, kUpper);
gpair.SetDevice(0);
FeatureGroups feature_groups(page->Cuts(), page->is_dense, shm_size,
sizeof(GradientPairInt64));
auto quantiser = GradientQuantiser(gpair.DeviceSpan());
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
feature_groups.DeviceAccessor(0), gpair.DeviceSpan(), ridx, d_histogram,
quantiser);
std::vector<GradientPairInt64> histogram_h(num_bins);
dh::safe_cuda(hipMemcpy(histogram_h.data(), d_histogram.data(),
num_bins * sizeof(GradientPairInt64),
hipMemcpyDeviceToHost));
for (size_t i = 0; i < kRounds; ++i) {
dh::device_vector<GradientPairInt64> new_histogram(num_bins);
auto d_new_histogram = dh::ToSpan(new_histogram);
auto quantiser = GradientQuantiser(gpair.DeviceSpan());
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
feature_groups.DeviceAccessor(0), gpair.DeviceSpan(), ridx,
d_new_histogram, quantiser);
std::vector<GradientPairInt64> new_histogram_h(num_bins);
dh::safe_cuda(hipMemcpy(new_histogram_h.data(), d_new_histogram.data(),
num_bins * sizeof(GradientPairInt64),
hipMemcpyDeviceToHost));
for (size_t j = 0; j < new_histogram_h.size(); ++j) {
ASSERT_EQ(new_histogram_h[j].GetQuantisedGrad(), histogram_h[j].GetQuantisedGrad());
ASSERT_EQ(new_histogram_h[j].GetQuantisedHess(), histogram_h[j].GetQuantisedHess());
}
}
{
auto gpair = GenerateRandomGradients(kRows, kLower, kUpper);
gpair.SetDevice(0);
// Use a single feature group to compute the baseline.
FeatureGroups single_group(page->Cuts());
dh::device_vector<GradientPairInt64> baseline(num_bins);
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
single_group.DeviceAccessor(0), gpair.DeviceSpan(), ridx,
dh::ToSpan(baseline), quantiser);
std::vector<GradientPairInt64> baseline_h(num_bins);
dh::safe_cuda(hipMemcpy(baseline_h.data(), baseline.data().get(),
num_bins * sizeof(GradientPairInt64),
hipMemcpyDeviceToHost));
for (size_t i = 0; i < baseline.size(); ++i) {
EXPECT_NEAR(baseline_h[i].GetQuantisedGrad(), histogram_h[i].GetQuantisedGrad(),
baseline_h[i].GetQuantisedGrad() * 1e-3);
}
}
}
}
TEST(Histogram, GPUDeterministic) {
std::vector<bool> is_dense_array{false, true};
std::vector<int> shm_sizes{48 * 1024, 64 * 1024, 160 * 1024};
for (bool is_dense : is_dense_array) {
for (int shm_size : shm_sizes) {
TestDeterministicHistogram(is_dense, shm_size);
}
}
}
void ValidateCategoricalHistogram(size_t n_categories, common::Span<GradientPairInt64> onehot,
common::Span<GradientPairInt64> cat) {
auto cat_sum = std::accumulate(cat.cbegin(), cat.cend(), GradientPairInt64{});
for (size_t c = 0; c < n_categories; ++c) {
auto zero = onehot[c * 2];
auto one = onehot[c * 2 + 1];
auto chosen = cat[c];
auto not_chosen = cat_sum - chosen;
ASSERT_EQ(zero, not_chosen);
ASSERT_EQ(one, chosen);
}
}
// Test 1 vs rest categorical histogram is equivalent to one hot encoded data.
void TestGPUHistogramCategorical(size_t num_categories) {
auto ctx = CreateEmptyGenericParam(0);
size_t constexpr kRows = 340;
size_t constexpr kBins = 256;
auto x = GenerateRandomCategoricalSingleColumn(kRows, num_categories);
auto cat_m = GetDMatrixFromData(x, kRows, 1);
cat_m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
BatchParam batch_param{0, static_cast<int32_t>(kBins)};
tree::RowPartitioner row_partitioner(0, kRows);
auto ridx = row_partitioner.GetRows(0);
dh::device_vector<GradientPairInt64> cat_hist(num_categories);
auto gpair = GenerateRandomGradients(kRows, 0, 2);
gpair.SetDevice(0);
auto quantiser = GradientQuantiser(gpair.DeviceSpan());
/**
* Generate hist with cat data.
*/
for (auto const &batch : cat_m->GetBatches<EllpackPage>(batch_param)) {
auto* page = batch.Impl();
FeatureGroups single_group(page->Cuts());
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
single_group.DeviceAccessor(0), gpair.DeviceSpan(), ridx,
dh::ToSpan(cat_hist), quantiser);
}
/**
* Generate hist with one hot encoded data.
*/
auto x_encoded = OneHotEncodeFeature(x, num_categories);
auto encode_m = GetDMatrixFromData(x_encoded, kRows, num_categories);
dh::device_vector<GradientPairInt64> encode_hist(2 * num_categories);
for (auto const &batch : encode_m->GetBatches<EllpackPage>(batch_param)) {
auto* page = batch.Impl();
FeatureGroups single_group(page->Cuts());
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
single_group.DeviceAccessor(0), gpair.DeviceSpan(), ridx,
dh::ToSpan(encode_hist), quantiser);
}
std::vector<GradientPairInt64> h_cat_hist(cat_hist.size());
thrust::copy(cat_hist.begin(), cat_hist.end(), h_cat_hist.begin());
std::vector<GradientPairInt64> h_encode_hist(encode_hist.size());
thrust::copy(encode_hist.begin(), encode_hist.end(), h_encode_hist.begin());
ValidateCategoricalHistogram(num_categories,
common::Span<GradientPairInt64>{h_encode_hist},
common::Span<GradientPairInt64>{h_cat_hist});
}
TEST(Histogram, GPUHistCategorical) {
for (size_t num_categories = 2; num_categories < 8; ++num_categories) {
TestGPUHistogramCategorical(num_categories);
}
}
namespace {
// Atomic add as type cast for test.
XGBOOST_DEV_INLINE int64_t atomicAdd(int64_t *dst, int64_t src) { // NOLINT
uint64_t* u_dst = reinterpret_cast<uint64_t*>(dst);
uint64_t u_src = *reinterpret_cast<uint64_t*>(&src);
uint64_t ret = ::atomicAdd(u_dst, u_src);
return *reinterpret_cast<int64_t*>(&ret);
}
}
void TestAtomicAdd() {
size_t n_elements = 1024;
dh::device_vector<int64_t> result_a(1, 0);
auto d_result_a = result_a.data().get();
dh::device_vector<int64_t> result_b(1, 0);
auto d_result_b = result_b.data().get();
/**
* Test for simple inputs
*/
std::vector<int64_t> h_inputs(n_elements);
for (size_t i = 0; i < h_inputs.size(); ++i) {
h_inputs[i] = (i % 2 == 0) ? i : -i;
}
dh::device_vector<int64_t> inputs(h_inputs);
auto d_inputs = inputs.data().get();
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
/**
* Test for positive values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<uint32_t>::max() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_GT(result_a[0], std::numeric_limits<uint32_t>::max());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
/**
* Test for negative values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<int32_t>::min() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_LT(result_a[0], std::numeric_limits<int32_t>::min());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
}
TEST(Histogram, AtomicAddInt64) {
TestAtomicAdd();
}
} // namespace tree
} // namespace xgboost
| 5c760ed12da80e23479c543255d49dd8c6fde7f6.cu | #include <gtest/gtest.h>
#include <vector>
#include "../../../../src/common/categorical.h"
#include "../../../../src/tree/gpu_hist/histogram.cuh"
#include "../../../../src/tree/gpu_hist/row_partitioner.cuh"
#include "../../categorical_helpers.h"
#include "../../helpers.h"
namespace xgboost {
namespace tree {
void TestDeterministicHistogram(bool is_dense, int shm_size) {
Context ctx = CreateEmptyGenericParam(0);
size_t constexpr kBins = 256, kCols = 120, kRows = 16384, kRounds = 16;
float constexpr kLower = -1e-2, kUpper = 1e2;
float sparsity = is_dense ? 0.0f : 0.5f;
auto matrix = RandomDataGenerator(kRows, kCols, sparsity).GenerateDMatrix();
BatchParam batch_param{0, static_cast<int32_t>(kBins)};
for (auto const& batch : matrix->GetBatches<EllpackPage>(batch_param)) {
auto* page = batch.Impl();
tree::RowPartitioner row_partitioner(0, kRows);
auto ridx = row_partitioner.GetRows(0);
int num_bins = kBins * kCols;
dh::device_vector<GradientPairInt64> histogram(num_bins);
auto d_histogram = dh::ToSpan(histogram);
auto gpair = GenerateRandomGradients(kRows, kLower, kUpper);
gpair.SetDevice(0);
FeatureGroups feature_groups(page->Cuts(), page->is_dense, shm_size,
sizeof(GradientPairInt64));
auto quantiser = GradientQuantiser(gpair.DeviceSpan());
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
feature_groups.DeviceAccessor(0), gpair.DeviceSpan(), ridx, d_histogram,
quantiser);
std::vector<GradientPairInt64> histogram_h(num_bins);
dh::safe_cuda(cudaMemcpy(histogram_h.data(), d_histogram.data(),
num_bins * sizeof(GradientPairInt64),
cudaMemcpyDeviceToHost));
for (size_t i = 0; i < kRounds; ++i) {
dh::device_vector<GradientPairInt64> new_histogram(num_bins);
auto d_new_histogram = dh::ToSpan(new_histogram);
auto quantiser = GradientQuantiser(gpair.DeviceSpan());
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
feature_groups.DeviceAccessor(0), gpair.DeviceSpan(), ridx,
d_new_histogram, quantiser);
std::vector<GradientPairInt64> new_histogram_h(num_bins);
dh::safe_cuda(cudaMemcpy(new_histogram_h.data(), d_new_histogram.data(),
num_bins * sizeof(GradientPairInt64),
cudaMemcpyDeviceToHost));
for (size_t j = 0; j < new_histogram_h.size(); ++j) {
ASSERT_EQ(new_histogram_h[j].GetQuantisedGrad(), histogram_h[j].GetQuantisedGrad());
ASSERT_EQ(new_histogram_h[j].GetQuantisedHess(), histogram_h[j].GetQuantisedHess());
}
}
{
auto gpair = GenerateRandomGradients(kRows, kLower, kUpper);
gpair.SetDevice(0);
// Use a single feature group to compute the baseline.
FeatureGroups single_group(page->Cuts());
dh::device_vector<GradientPairInt64> baseline(num_bins);
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
single_group.DeviceAccessor(0), gpair.DeviceSpan(), ridx,
dh::ToSpan(baseline), quantiser);
std::vector<GradientPairInt64> baseline_h(num_bins);
dh::safe_cuda(cudaMemcpy(baseline_h.data(), baseline.data().get(),
num_bins * sizeof(GradientPairInt64),
cudaMemcpyDeviceToHost));
for (size_t i = 0; i < baseline.size(); ++i) {
EXPECT_NEAR(baseline_h[i].GetQuantisedGrad(), histogram_h[i].GetQuantisedGrad(),
baseline_h[i].GetQuantisedGrad() * 1e-3);
}
}
}
}
TEST(Histogram, GPUDeterministic) {
std::vector<bool> is_dense_array{false, true};
std::vector<int> shm_sizes{48 * 1024, 64 * 1024, 160 * 1024};
for (bool is_dense : is_dense_array) {
for (int shm_size : shm_sizes) {
TestDeterministicHistogram(is_dense, shm_size);
}
}
}
void ValidateCategoricalHistogram(size_t n_categories, common::Span<GradientPairInt64> onehot,
common::Span<GradientPairInt64> cat) {
auto cat_sum = std::accumulate(cat.cbegin(), cat.cend(), GradientPairInt64{});
for (size_t c = 0; c < n_categories; ++c) {
auto zero = onehot[c * 2];
auto one = onehot[c * 2 + 1];
auto chosen = cat[c];
auto not_chosen = cat_sum - chosen;
ASSERT_EQ(zero, not_chosen);
ASSERT_EQ(one, chosen);
}
}
// Test 1 vs rest categorical histogram is equivalent to one hot encoded data.
void TestGPUHistogramCategorical(size_t num_categories) {
auto ctx = CreateEmptyGenericParam(0);
size_t constexpr kRows = 340;
size_t constexpr kBins = 256;
auto x = GenerateRandomCategoricalSingleColumn(kRows, num_categories);
auto cat_m = GetDMatrixFromData(x, kRows, 1);
cat_m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
BatchParam batch_param{0, static_cast<int32_t>(kBins)};
tree::RowPartitioner row_partitioner(0, kRows);
auto ridx = row_partitioner.GetRows(0);
dh::device_vector<GradientPairInt64> cat_hist(num_categories);
auto gpair = GenerateRandomGradients(kRows, 0, 2);
gpair.SetDevice(0);
auto quantiser = GradientQuantiser(gpair.DeviceSpan());
/**
* Generate hist with cat data.
*/
for (auto const &batch : cat_m->GetBatches<EllpackPage>(batch_param)) {
auto* page = batch.Impl();
FeatureGroups single_group(page->Cuts());
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
single_group.DeviceAccessor(0), gpair.DeviceSpan(), ridx,
dh::ToSpan(cat_hist), quantiser);
}
/**
* Generate hist with one hot encoded data.
*/
auto x_encoded = OneHotEncodeFeature(x, num_categories);
auto encode_m = GetDMatrixFromData(x_encoded, kRows, num_categories);
dh::device_vector<GradientPairInt64> encode_hist(2 * num_categories);
for (auto const &batch : encode_m->GetBatches<EllpackPage>(batch_param)) {
auto* page = batch.Impl();
FeatureGroups single_group(page->Cuts());
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
single_group.DeviceAccessor(0), gpair.DeviceSpan(), ridx,
dh::ToSpan(encode_hist), quantiser);
}
std::vector<GradientPairInt64> h_cat_hist(cat_hist.size());
thrust::copy(cat_hist.begin(), cat_hist.end(), h_cat_hist.begin());
std::vector<GradientPairInt64> h_encode_hist(encode_hist.size());
thrust::copy(encode_hist.begin(), encode_hist.end(), h_encode_hist.begin());
ValidateCategoricalHistogram(num_categories,
common::Span<GradientPairInt64>{h_encode_hist},
common::Span<GradientPairInt64>{h_cat_hist});
}
TEST(Histogram, GPUHistCategorical) {
for (size_t num_categories = 2; num_categories < 8; ++num_categories) {
TestGPUHistogramCategorical(num_categories);
}
}
namespace {
// Atomic add as type cast for test.
XGBOOST_DEV_INLINE int64_t atomicAdd(int64_t *dst, int64_t src) { // NOLINT
uint64_t* u_dst = reinterpret_cast<uint64_t*>(dst);
uint64_t u_src = *reinterpret_cast<uint64_t*>(&src);
uint64_t ret = ::atomicAdd(u_dst, u_src);
return *reinterpret_cast<int64_t*>(&ret);
}
}
void TestAtomicAdd() {
size_t n_elements = 1024;
dh::device_vector<int64_t> result_a(1, 0);
auto d_result_a = result_a.data().get();
dh::device_vector<int64_t> result_b(1, 0);
auto d_result_b = result_b.data().get();
/**
* Test for simple inputs
*/
std::vector<int64_t> h_inputs(n_elements);
for (size_t i = 0; i < h_inputs.size(); ++i) {
h_inputs[i] = (i % 2 == 0) ? i : -i;
}
dh::device_vector<int64_t> inputs(h_inputs);
auto d_inputs = inputs.data().get();
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
/**
* Test for positive values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<uint32_t>::max() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_GT(result_a[0], std::numeric_limits<uint32_t>::max());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
/**
* Test for negative values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<int32_t>::min() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_LT(result_a[0], std::numeric_limits<int32_t>::min());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
}
TEST(Histogram, AtomicAddInt64) {
TestAtomicAdd();
}
} // namespace tree
} // namespace xgboost
|
3d979e87f4f417ff080d5f08ad5c0db3484a2d73.hip | // !!! This is a file automatically generated by hipify!!!
#include <wb.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__device__ int binarySearch(const int value, const int *A, const int N)
{
// TODO: Implement a binary search that returns
// the index where all values in A are less than
// the given value.
int min = 0;
int max = N;
int sum = 0;
if (value > A[N - 1]){
return N;
}
else{
while (min <= max){
int guess = min + (max - min) / 2;
if (value <= A[guess]){
max = guess - 1;
}
else if (A[guess]<value){
sum = guess + 1;
min = guess + 1;
}
}
return sum;
}
}
__device__ int linearSearch(const int value, const int *A, const int N)
{
int val = 0;
int index = 0;
while (index < N){
if (A[index] < value){
val+=1;
index+=1;
}
else{
index+=1;
}
}
return val;
}
__global__ void merge(int *C, const int *A, const int *B, const int N)
{
// TODO: Merge arrays A and B into C. To make it
// easier you can assume the following:
//
// 1) A and B are both size N
//
// 2) C is size 2N
//
// 3) Both A and B are sorted arrays
//
// The algorithm should work as follows:
// Given inputs A and B as follows:
// A = [0 2 4 10]
// B = [1 5 7 9]
//
// Step 1:
// Find for each element in array A the index i that
// would A[i] be inserted in array B or in other
// words find the smallest j where A[i] < B[j].
int i, iA, iB;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N){
iA = linearSearch(A[i], B, N);
iB = linearSearch(B[i] + 1, A, N);
C[i + iA] = A[i];
C[i + iB] = B[i];
}
//
// Step 2:
// Do the same for B, but this time find the j
// where B[i] < A[j].
//
// Step 3:
// Since we know how many elements come before
// A[i] in array A and we know how many elements
// come before A[i] in array B, which is given by
// are calculation of j. We should know where A[i]
// is inserted into C, given i and j.
//
// This same logic can be used to find where B[i]
// should be inserted into C. Although you will have
// to make a minor change to handle duplicates in A
// and B. Or in other words if A and B intersect at
// all some values in C will be incorrect. This
// occurs because A and B will want to put the values
// in the same place in C.
}
int main(int argc, char **argv) {
wbArg_t args;
int N;
int* A;
int* B;
int* C;
int* deviceA;
int* deviceB;
int* deviceC;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
A = (int *)wbImport(wbArg_getInputFile(args, 0), &N, NULL, "Integer");
B = (int *)wbImport(wbArg_getInputFile(args, 1), &N, NULL, "Integer");
C = (int *)malloc(2 * N * sizeof(int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", N);
int threads = 256;
int blocks = N / threads + ((N%threads == 0) ? 0 : 1);
wbTime_start(GPU, "Allocating GPU memory.");
hipMalloc((void **)&deviceA, N * sizeof(int));
hipMalloc((void **)&deviceB, N * sizeof(int));
hipMalloc((void **)&deviceC, 2 * N * sizeof(int));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
hipMemcpy(deviceA, A, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(deviceB, B, N * sizeof(int), hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Perform on CUDA.
const dim3 blockSize(threads, 1, 1);
const dim3 gridSize(blocks, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
merge << < gridSize, blockSize >> >(deviceC, deviceA, deviceB, N);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
hipMemcpy(C, deviceC, 2 * N * sizeof(int), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, C, 2*N);
free(A);
free(B);
free(C);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
| 3d979e87f4f417ff080d5f08ad5c0db3484a2d73.cu | #include <wb.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__device__ int binarySearch(const int value, const int *A, const int N)
{
// TODO: Implement a binary search that returns
// the index where all values in A are less than
// the given value.
int min = 0;
int max = N;
int sum = 0;
if (value > A[N - 1]){
return N;
}
else{
while (min <= max){
int guess = min + (max - min) / 2;
if (value <= A[guess]){
max = guess - 1;
}
else if (A[guess]<value){
sum = guess + 1;
min = guess + 1;
}
}
return sum;
}
}
__device__ int linearSearch(const int value, const int *A, const int N)
{
int val = 0;
int index = 0;
while (index < N){
if (A[index] < value){
val+=1;
index+=1;
}
else{
index+=1;
}
}
return val;
}
__global__ void merge(int *C, const int *A, const int *B, const int N)
{
// TODO: Merge arrays A and B into C. To make it
// easier you can assume the following:
//
// 1) A and B are both size N
//
// 2) C is size 2N
//
// 3) Both A and B are sorted arrays
//
// The algorithm should work as follows:
// Given inputs A and B as follows:
// A = [0 2 4 10]
// B = [1 5 7 9]
//
// Step 1:
// Find for each element in array A the index i that
// would A[i] be inserted in array B or in other
// words find the smallest j where A[i] < B[j].
int i, iA, iB;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N){
iA = linearSearch(A[i], B, N);
iB = linearSearch(B[i] + 1, A, N);
C[i + iA] = A[i];
C[i + iB] = B[i];
}
//
// Step 2:
// Do the same for B, but this time find the j
// where B[i] < A[j].
//
// Step 3:
// Since we know how many elements come before
// A[i] in array A and we know how many elements
// come before A[i] in array B, which is given by
// are calculation of j. We should know where A[i]
// is inserted into C, given i and j.
//
// This same logic can be used to find where B[i]
// should be inserted into C. Although you will have
// to make a minor change to handle duplicates in A
// and B. Or in other words if A and B intersect at
// all some values in C will be incorrect. This
// occurs because A and B will want to put the values
// in the same place in C.
}
int main(int argc, char **argv) {
wbArg_t args;
int N;
int* A;
int* B;
int* C;
int* deviceA;
int* deviceB;
int* deviceC;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
A = (int *)wbImport(wbArg_getInputFile(args, 0), &N, NULL, "Integer");
B = (int *)wbImport(wbArg_getInputFile(args, 1), &N, NULL, "Integer");
C = (int *)malloc(2 * N * sizeof(int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", N);
int threads = 256;
int blocks = N / threads + ((N%threads == 0) ? 0 : 1);
wbTime_start(GPU, "Allocating GPU memory.");
cudaMalloc((void **)&deviceA, N * sizeof(int));
cudaMalloc((void **)&deviceB, N * sizeof(int));
cudaMalloc((void **)&deviceC, 2 * N * sizeof(int));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
cudaMemcpy(deviceA, A, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, B, N * sizeof(int), cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Perform on CUDA.
const dim3 blockSize(threads, 1, 1);
const dim3 gridSize(blocks, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
merge << < gridSize, blockSize >> >(deviceC, deviceA, deviceB, N);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
cudaMemcpy(C, deviceC, 2 * N * sizeof(int), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, C, 2*N);
free(A);
free(B);
free(C);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
|
91144a3f47323a751d46ea09614456fa420a4b53.hip | // !!! This is a file automatically generated by hipify!!!
///////////////////////////////////////////////////////////////////////////////
// cuda_lut_test.cu
//
// Contains implementation of tests for CUDA lut module
///////////////////////////////////////////////////////////////////////////////
#include "doctest/doctest.h"
#include <algorithm>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "cuda_proc.cuh"
#include "cuda_lut.cuh"
static void cuda_apply_lut_filled_test(
CudaHostImage& h_dst, CudaImage& d_dst,
CudaImage& src, CudaLUT& lut,
uchar dst_v, uchar src_v, uchar lut_v)
{
cuda_image_fill_async(d_dst, dst_v);
cuda_image_fill_async(src, src_v);
cuda_lut_fill_async(lut, lut_v);
cuda_lut_set_async(lut);
cuda_apply_lut_async(d_dst, src);
cuda_image_copy_to_host_async(h_dst, d_dst);
checkCudaErrors(hipDeviceSynchronize());
CHECK(std::all_of((const uchar*) h_dst.data, h_dst.dataend,
[lut_v](uchar v) { return v == lut_v; }));
}
TEST_CASE("LUTs can be applied to images")
{
const auto cols = 8;
const auto rows = 8;
cuda_proc_init();
auto h_dst = cuda_create_host_image(cols, rows);
auto d_dst = cuda_create_image(cols, rows);
auto src = cuda_create_image(cols, rows);
auto lut = cuda_create_lut();
SUBCASE("Applying LUT with zeros should make image also zeroed")
{
const auto dst_v = 0xAB;
const auto src_v = 0xCD;
const auto lut_v = 0x00;
cuda_apply_lut_filled_test(h_dst, d_dst, src, lut, dst_v, src_v, lut_v);
}
SUBCASE("Applying LUT with ones should make image also oned")
{
const auto dst_v = 0xAB;
const auto src_v = 0xCD;
const auto lut_v = 0x01;
cuda_apply_lut_filled_test(h_dst, d_dst, src, lut, dst_v, src_v, lut_v);
}
SUBCASE("Applying LUT with 0xFF should make image also")
{
const auto dst_v = 0xAB;
const auto src_v = 0xCD;
const auto lut_v = 0xFF;
cuda_apply_lut_filled_test(h_dst, d_dst, src, lut, dst_v, src_v, lut_v);
}
cuda_free_lut(lut);
cuda_free_image(src);
cuda_free_image(d_dst);
cuda_free_host_image(h_dst);
cuda_proc_deinit();
}
| 91144a3f47323a751d46ea09614456fa420a4b53.cu | ///////////////////////////////////////////////////////////////////////////////
// cuda_lut_test.cu
//
// Contains implementation of tests for CUDA lut module
///////////////////////////////////////////////////////////////////////////////
#include "doctest/doctest.h"
#include <algorithm>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include "cuda_proc.cuh"
#include "cuda_lut.cuh"
static void cuda_apply_lut_filled_test(
CudaHostImage& h_dst, CudaImage& d_dst,
CudaImage& src, CudaLUT& lut,
uchar dst_v, uchar src_v, uchar lut_v)
{
cuda_image_fill_async(d_dst, dst_v);
cuda_image_fill_async(src, src_v);
cuda_lut_fill_async(lut, lut_v);
cuda_lut_set_async(lut);
cuda_apply_lut_async(d_dst, src);
cuda_image_copy_to_host_async(h_dst, d_dst);
checkCudaErrors(cudaDeviceSynchronize());
CHECK(std::all_of((const uchar*) h_dst.data, h_dst.dataend,
[lut_v](uchar v) { return v == lut_v; }));
}
TEST_CASE("LUTs can be applied to images")
{
const auto cols = 8;
const auto rows = 8;
cuda_proc_init();
auto h_dst = cuda_create_host_image(cols, rows);
auto d_dst = cuda_create_image(cols, rows);
auto src = cuda_create_image(cols, rows);
auto lut = cuda_create_lut();
SUBCASE("Applying LUT with zeros should make image also zeroed")
{
const auto dst_v = 0xAB;
const auto src_v = 0xCD;
const auto lut_v = 0x00;
cuda_apply_lut_filled_test(h_dst, d_dst, src, lut, dst_v, src_v, lut_v);
}
SUBCASE("Applying LUT with ones should make image also oned")
{
const auto dst_v = 0xAB;
const auto src_v = 0xCD;
const auto lut_v = 0x01;
cuda_apply_lut_filled_test(h_dst, d_dst, src, lut, dst_v, src_v, lut_v);
}
SUBCASE("Applying LUT with 0xFF should make image also")
{
const auto dst_v = 0xAB;
const auto src_v = 0xCD;
const auto lut_v = 0xFF;
cuda_apply_lut_filled_test(h_dst, d_dst, src, lut, dst_v, src_v, lut_v);
}
cuda_free_lut(lut);
cuda_free_image(src);
cuda_free_image(d_dst);
cuda_free_host_image(h_dst);
cuda_proc_deinit();
}
|
5d840d3d8d69f0e1f4cf8f9b301b2c885974dd58.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "bcnn_op_cuda_tanh_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
bcnn_op_cuda_tanh_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
bcnn_op_cuda_tanh_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
bcnn_op_cuda_tanh_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5d840d3d8d69f0e1f4cf8f9b301b2c885974dd58.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "bcnn_op_cuda_tanh_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
bcnn_op_cuda_tanh_kernel<<<gridBlock,threadBlock>>>(n,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
bcnn_op_cuda_tanh_kernel<<<gridBlock,threadBlock>>>(n,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
bcnn_op_cuda_tanh_kernel<<<gridBlock,threadBlock>>>(n,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c8bdb7ea993d396f8dbf4a812c7c800c0ab047d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef OUT_OF_BOUNDS_LABEL
#define OUT_OF_BOUNDS_LABEL -1
#endif
#ifndef BAD_TOPOLOGY_LABEL
#define BAD_TOPOLOGY_LABEL -2
#endif
#ifndef NUM_OF_CHANNELS
#define NUM_OF_CHANNELS 3
#endif
#ifndef USE_COUNTS
#define USE_COUNTS 1
#endif
#ifndef OUT_OF_BOUNDS_LABEL
#define OUT_OF_BOUNDS_LABEL -1
#endif
#define THREADS_PER_BLOCK 512
#include "update_seg.h"
#include "sp.h"
#include <stdio.h>
#ifndef WIN32
#include <unistd.h>
#endif
/*
__device__ static float atomicMaxFloat(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
*/
__device__ __forceinline__ float atomicMaxFloat (float * addr, float value) {
float old;
old = (value >= 0) ? __int_as_float(atomicMax((int *)addr, __float_as_int(value))) :
__uint_as_float(atomicMin((unsigned int *)addr, __float_as_uint(value)));
return old;
}
__host__ void CudaFindBorderPixels(const int* seg, bool* border, const int nPixels, const int xdim, const int ydim, const int single_border){
int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) );
dim3 ThreadPerBlock(THREADS_PER_BLOCK,1);
dim3 BlockPerGrid(num_block,1);
hipLaunchKernelGGL(( find_border_pixels), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, seg,border,nPixels, xdim, ydim, single_border);
}
__host__ void CudaFindBorderPixels_end(const int* seg, bool* border, const int nPixels, const int xdim, const int ydim, const int single_border){
int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) );
dim3 ThreadPerBlock(THREADS_PER_BLOCK,1);
dim3 BlockPerGrid(num_block,1);
hipLaunchKernelGGL(( find_border_pixels_end), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, seg,border,nPixels, xdim, ydim, single_border);
}
__global__ void find_border_pixels(const int* seg, bool* border, const int nPixels, const int xdim, const int ydim, const int single_border){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx>=nPixels) return;
//border[idx]=0; // init
int x = idx % xdim;
int y = idx / xdim;
int C = __ldg(&seg[idx]); // center
int N,S,E,W; // north, south, east,west
/*N=S=W=E=OUT_OF_BOUNDS_LABEL; // init
if (y>1){
N = seg[idx-xdim]; // above
}
if (x>1){
W = seg[idx-1]; // left
}
if (y<ydim-1){
S = seg[idx+xdim]; // below
}
if (x<xdim-1){
E = seg[idx+1]; // right
}
// If the nbr is different from the central pixel and is not out-of-bounds,
// then it is a border pixel.
if ( (N>=0 && C!=N) || (S>=0 && C!=S) || (E>=0 && C!=E) || (W>=0 && C!=W) ){
border[idx]=1;
}
*/
if ((y<1)||(x<1)||(y>=ydim-1)||(x>=xdim-1))
{
border[idx] = 1;
return;
}
N = __ldg(&seg[idx-xdim]); // above
W = __ldg(&seg[idx-1]); // left
S = __ldg(&seg[idx+xdim]); // below
E = __ldg(&seg[idx+1]); // right
// If the nbr is different from the central pixel and is not out-of-bounds,
// then it is a border pixel.
if ((C!=N) || (C!=S) || (C!=E) || (C!=W) ){
border[idx]=1;
}
return;
}
__global__ void find_border_pixels_end(const int* seg, bool* border, const int nPixels, const int xdim, const int ydim, const int single_border){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx>=nPixels) return;
border[idx]=0; // init
int x = idx % xdim;
int y = idx / xdim;
int C = seg[idx]; // center
int N,S,E,W; // north, south, east,west
N=S=W=E=OUT_OF_BOUNDS_LABEL; // init
if (y>1){
N = seg[idx-xdim]; // above
}
if (x>1){
W = seg[idx-1]; // left
}
if (y<ydim-1){
S = seg[idx+xdim]; // below
}
if (x<xdim-1){
E = seg[idx+1]; // right
}
// If the nbr is different from the central pixel and is not out-of-bounds,
// then it is a border pixel.
if ( (N>=0 && C!=N) || (S>=0 && C!=S) || (E>=0 && C!=E) || (W>=0 && C!=W) ){
if (N>=0 && C>N) border[idx]=1;
if (S>=0 && C>S) border[idx]=1;
if (E>=0 && C>E) border[idx]=1;
if (W>=0 && C>W) border[idx]=1;
}
return;
}
__global__ void cal_posterior( float* img, int* seg, bool* border, superpixel_params* sp_params, float3 J_i, float logdet_Sigma_i, float i_std, int s_std, int* changes, int nPts , int xdim,post_changes_helper* post_changes)
{
/* __shared__ float mu_i_x[720];
__shared__ float mu_i_y[720];
__shared__ float mu_i_z[720];
__shared__ int mu_s_x[720];
__shared__ int mu_s_y[720];
if(threadIdx.x<720)
{
mu_i_x[720] = sp_params[threadIdx.x].mu_i.x;
mu_i_y[720] = sp_params[threadIdx.x].mu_i.y;
mu_i_z[720] = sp_params[threadIdx.x].mu_i.z;
mu_s_x[720] = sp_params[threadIdx.x].mu_s.x;
mu_s_y[720] = sp_params[threadIdx.x].mu_s.y;
}
__syncthreads();
int idx_res = threadIdx.x + blockIdx.x*blockDim.x;
int idx_inside = idx_res%4;
int idx = idx_res/4;
if (idx_res>=4*nPts) return;
int post = post_changes[idx].skip_post[idx_inside];
int seg_idx = post_changes[idx].changes[idx_inside];
if(post || ! seg_idx) return;
float* imgC = img + idx * 3;
int x = idx % xdim;
int y = idx / xdim;
float res = -1000; // some large negative number
const float x0 = imgC[0]-mu_i_x[seg_idx];
const float x1 = imgC[1]-mu_i_y[seg_idx];
const float x2 = imgC[2]-mu_i_z[seg_idx];
const int d0 = x - mu_s_x[seg_idx];
const int d1 = y - mu_s_y[seg_idx];
//color component
const float J_i_x = J_i.x;
const float J_i_y = J_i.y;
const float J_i_z = J_i.z;
const float sigma_s_x = sp_params[seg_idx].sigma_s.x;
const float sigma_s_y = sp_params[seg_idx].sigma_s.y;
const float sigma_s_z = sp_params[seg_idx].sigma_s.z;
const float logdet_sigma_s = sp_params[seg_idx].logdet_Sigma_s;
res = res - (x0*x0*J_i_x + x1*x1*J_i_y + x2*x2*J_i_z); //res = -calc_squared_mahal_3d(imgC,mu_i,J_i);
res = res -logdet_Sigma_i;
//space component
res = res - d0*d0*sigma_s_x;
res = res - d1*d1*sigma_s_z;
res = res - 2*d0*d1*sigma_s_y; // res -= calc_squared_mahal_2d(pt,mu_s,J_s);
res = res - logdet_sigma_s;
//post_changes[idx].post[idx_inside] = res;
if (res > atomicMaxFloat(&post_changes[idx].post[4],res))
seg[idx] = seg_idx;
*/
int idx_res = threadIdx.x + blockIdx.x*blockDim.x;
int idx_inside = idx_res%4;
int idx = idx_res/4;
if (idx_res>=4*nPts) return;
if (border[idx]==0) return;
int post = post_changes[idx].skip_post[idx_inside];
int seg_idx = post_changes[idx].changes[idx_inside];
if(post || ! seg_idx) return;
float res = -1000; // some large negative number
float* imgC = img + idx * 3;
int x = idx % xdim;
int y = idx / xdim;
//if (idx>154064)
// printf("%d ,%d , %d\n",idx_inside, idx, seg_idx);
const float x0 = imgC[0]-sp_params[seg_idx].mu_i.x;
const float x1 = imgC[1]-sp_params[seg_idx].mu_i.y;
const float x2 = imgC[2]-sp_params[seg_idx].mu_i.z;
const int d0 = x - sp_params[seg_idx].mu_s.x;
const int d1 = y - sp_params[seg_idx].mu_s.y;
//color component
const float J_i_x = J_i.x;
const float J_i_y = J_i.y;
const float J_i_z = J_i.z;
const float sigma_s_x = sp_params[seg_idx].sigma_s.x;
const float sigma_s_y = sp_params[seg_idx].sigma_s.y;
const float sigma_s_z = sp_params[seg_idx].sigma_s.z;
const float logdet_sigma_s = sp_params[seg_idx].logdet_Sigma_s;
res = res - (x0*x0*J_i_x + x1*x1*J_i_y + x2*x2*J_i_z); //res = -calc_squared_mahal_3d(imgC,mu_i,J_i);
res = res -logdet_Sigma_i;
//space component
res = res - d0*d0*sigma_s_x;
res = res - d1*d1*sigma_s_z;
res = res - 2*d0*d1*sigma_s_y; // res -= calc_squared_mahal_2d(pt,mu_s,J_s);
res = res - logdet_sigma_s;
if (res > atomicMaxFloat(&post_changes[idx].post[4],res))
seg[idx] = seg_idx;
//res += potts_res;
return;
}
__global__ void change_seg(int* seg, int* changes, int nPts ,post_changes_helper* post_changes)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx>=nPts) return;
//if((!post_changes[idx].changes[0])||(!post_changes[idx].changes[1])||(!post_changes[idx].changes[2])||(!post_changes[idx].changes[3])) return;
float res_max = -99999;
int final_idx = seg[idx];
for (int i=0; i<4 ; i++)
{
int idx_change = post_changes[idx].changes[i];
if(idx_change)
{
float val = post_changes[idx].post[i];
if(res_max < val)
{
res_max = val;
final_idx = idx_change;
}
}
}
seg[idx] = final_idx;
/*
if (res_max < post_changes[idx].post[0]){
res_max= post_changes[idx].post[0];
seg[idx]= post_changes[idx].changes[0];
}
if (res_max < post_changes[idx].post[1]){
res_max= post_changes[idx].post[1];
seg[idx]= post_changes[idx].changes[1];
}
if (res_max < post_changes[idx].post[2]){
res_max= post_changes[idx].post[2];
seg[idx]= post_changes[idx].changes[2];
}
if (res_max < post_changes[idx].post[3]){
res_max= post_changes[idx].post[3];
seg[idx]= post_changes[idx].changes[3];
}
//seg[idx] =arg_max;
*/
return;
}
__host__ void update_seg(float* img, int* seg, int* seg_potts_label ,bool* border,
superpixel_params* sp_params,
const float3 J_i, const float logdet_Sigma_i,
bool cal_cov, float i_std, int s_std,
int nInnerIters,
const int nPixels, const int nSPs, int nSPs_buffer, int xdim, int ydim, float beta_potts_term, post_changes_helper* post_changes){
int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) );
int num_block2 = ceil( double(nPixels*4) / double(THREADS_PER_BLOCK) );
dim3 ThreadPerBlock(THREADS_PER_BLOCK,1);
dim3 BlockPerGrid(num_block,1);
dim3 BlockPerGrid2(num_block2,1);
dim3 blockSize(8, 8);
dim3 gridSize((int)ceil((float)xdim*2 / (float)blockSize.x), (int)ceil((float)ydim*2 / (float)blockSize.y));
int single_border = 0 ;
hipMemset(post_changes, 0, nPixels*sizeof(post_changes_helper));
for (int iter = 0 ; iter < nInnerIters; iter++){
// strides of 2*2
hipMemset(border, 0, nPixels*sizeof(bool));
hipLaunchKernelGGL(( find_border_pixels), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, seg, border, nPixels, xdim, ydim, single_border);
for (int xmod3 = 0 ; xmod3 <2; xmod3++){
for (int ymod3 = 0; ymod3 <2; ymod3++){
//find the border pixels
//find_border_pixels<<<BlockPerGrid,ThreadPerBlock>>>(seg, border, nPixels, xdim, ydim, single_border);
//update_seg_subset<<<BlockPerGrid2,ThreadPerBlock>>>(img, seg, seg_potts_label,border, sp_params, J_i, logdet_Sigma_i, cal_cov, i_std, s_std, nPixels, nSPs,xdim, ydim, xmod3, ymod3, beta_potts_term,post_changes);
//update_seg_subset<<<gridSize,blockSize>>>(img, seg, seg_potts_label,border, sp_params, J_i, logdet_Sigma_i, cal_cov, i_std, s_std, nPixels, nSPs,xdim, ydim, xmod3, ymod3, beta_potts_term,post_changes);
hipLaunchKernelGGL(( update_seg_subset), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, img, seg, seg_potts_label,border, sp_params, J_i, logdet_Sigma_i, cal_cov, i_std, s_std, nPixels, nSPs,xdim, ydim, xmod3, ymod3, beta_potts_term,post_changes);
//cal_posterior<<<BlockPerGrid2,ThreadPerBlock>>>(img, seg, border, sp_params, J_i, logdet_Sigma_i, i_std, s_std, 0, nPixels ,xdim, post_changes);
//change_seg<<<BlockPerGrid,ThreadPerBlock>>>(seg, 0, nPixels ,post_changes);
}
}
}
hipMemset(border, 0, nPixels*sizeof(bool));
hipLaunchKernelGGL(( find_border_pixels), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, seg, border, nPixels, xdim, ydim, single_border);
}
/*
* Update the superpixel labels for pixels
* that are on the boundary of the superpixels
* and on the (xmod3, ymod3) position of 3*3 block
*/
/*
__global__ void update_seg_subset(
float* img, int* seg, int* seg_potts_label, bool* border,
superpixel_params* sp_params,
const float3 J_i, const float logdet_Sigma_i,
bool cal_cov, float i_std, int s_std,
const int nPts,const int nSuperpixels,
const int xdim, const int ydim,
const int xmod3, const int ymod3, const float beta_potts_term, post_changes_helper* post_changes)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
// idx = idx_img;
if (idx >= 4*nPts) return;
int seg_idx = idx/4;
int idx_inside = idx%4;
if (border[seg_idx]==0) return;
// strides of 2*2
int x = seg_idx % xdim;
if (x % 2 != xmod3) return;
int y = seg_idx / xdim;
if (y % 2 != ymod3) return;
int idx_cache = threadIdx.x/4;
float beta = 0;
//printf("(%d, %d) - %d, %d, %d \n", x,y , idx_cache,threadIdx.x );
const bool x_greater_than_1 = (x>1);
const bool y_greater_than_1 = (y>1);
const bool x_smaller_than_xdim_minus_1 = x<(xdim-1);
const bool y_smaller_than_ydim_minus_1 = y<(ydim-1);
if ((!x_greater_than_1)||(!y_greater_than_1)||(!x_smaller_than_xdim_minus_1)||(!y_smaller_than_ydim_minus_1)) return;
__shared__ int N_shared[THREADS_PER_BLOCK/4];
__shared__ int S_shared[THREADS_PER_BLOCK/4];
__shared__ int E_shared[THREADS_PER_BLOCK/4];
__shared__ int W_shared[THREADS_PER_BLOCK/4];
__shared__ int SW_shared[THREADS_PER_BLOCK/4];
__shared__ int SE_shared[THREADS_PER_BLOCK/4];
__shared__ int N_Prev_shared[THREADS_PER_BLOCK/4];
__shared__ int N_Count[THREADS_PER_BLOCK/4];
__shared__ int S_Count[THREADS_PER_BLOCK/4];
__shared__ int E_Count[THREADS_PER_BLOCK/4];
__shared__ int W_Count[THREADS_PER_BLOCK/4];
//int C = seg[seg_idx]; // center
int N,S,E,W, NW, NE , SW, SE; // north, south, east,west
// N = S = W = E = OUT_OF_BOUNDS_LABEL; // init to out-of-bounds
bool nbrs[9];
//float potts_term[4];
//potts_term[0] = potts_term[1] = potts_term[2] = potts_term[3] = 0;
bool isNvalid = 0 ;
bool isSvalid = 0;
bool isEvalid = 0;
bool isWvalid = 0 ;
int count_diff_nbrs_N=0;
int count_diff_nbrs_S=0;
int count_diff_nbrs_E=0;
int count_diff_nbrs_W=0;
if(idx_inside==0)
{
//NW =N = NE =W = E = SW = S = SE=5 ;
// init
post_changes[seg_idx].post[4] = -9999;
NW =__ldg(&seg[seg_idx-xdim-1]);
N = __ldg(&seg[seg_idx-xdim]);
NE = __ldg(&seg[seg_idx-xdim+1]);
}
if(idx_inside==1)
{
W = __ldg(&seg[seg_idx-1]);
E = __ldg(&seg[seg_idx+1]);
W_shared[idx_cache] = W;
E_shared[idx_cache] = E;
}
if(idx_inside==2)
{
SW = __ldg(&seg[seg_idx+xdim-1]);
S = __ldg(&seg[seg_idx+xdim]);
SE =__ldg(&seg[seg_idx+xdim+1]);
SW_shared[idx_cache] = SW;
SE_shared[idx_cache] = SE;
S_shared[idx_cache] = S;
}
__syncthreads();
if(idx_inside==0)
{
W = W_shared[idx_cache] ;
E = E_shared[idx_cache] ;
SW = SW_shared[idx_cache] ;
SE = SE_shared[idx_cache] ;
S = S_shared[idx_cache];
//N :
set_nbrs(NW, N, NE, W, E, SW, S, SE,N, nbrs);
count_diff_nbrs_N = ischangbale_by_nbrs(nbrs);
isNvalid = nbrs[8];
//potts_term[0] = calc_potts(beta,count_diff_nbrs_N);
//W :
set_nbrs(NW, N, NE, W, E, SW, S, SE,W, nbrs);
count_diff_nbrs_W = ischangbale_by_nbrs(nbrs);
isWvalid = nbrs[8];
// potts_term[1] = calc_potts(beta,count_diff_nbrs_W);
//S :
set_nbrs(NW, N, NE, W, E, SW, S, SE,S, nbrs);
count_diff_nbrs_S = ischangbale_by_nbrs(nbrs);
isSvalid = nbrs[8];
//potts_term[2] = calc_potts(beta,count_diff_nbrs_S);
//E:
set_nbrs(NW, N, NE, W, E, SW, S, SE,E, nbrs);
// check 8 nbrs and save result if valid to change to the last place of array
// return how many nbrs different for potts term calculation
count_diff_nbrs_E = ischangbale_by_nbrs(nbrs);
isEvalid = nbrs[8];
//potts_term[3] = calc_potts(beta,count_diff_nbrs_E);
if(!isNvalid) N = 0;
if(!isWvalid) W = 0;
if(!isSvalid) S = 0;
if(!isEvalid) E = 0;
N_Count[idx_cache] = count_diff_nbrs_N;
S_Count[idx_cache] = count_diff_nbrs_S ;
E_Count[idx_cache] = count_diff_nbrs_E;
W_Count[idx_cache] = count_diff_nbrs_W;
N_Prev_shared[idx_cache] = (int(isNvalid))+(int(isWvalid))+(int(isSvalid))+(int(isEvalid));
}
__syncthreads();
if (N_Prev_shared[idx_cache]<4) return; //BuG for less than 4
switch (idx_inside)
{
case (0):
if(!N_shared[idx_cache]) return;
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,N_shared[idx_cache],J_i,logdet_Sigma_i,i_std,s_std,post_changes,N_Count[idx_cache],beta);
break;
case (1):
if(!S_shared[idx_cache]) return;
if(S_shared[idx_cache]!=N_shared[idx_cache])
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,S_shared[idx_cache],J_i,logdet_Sigma_i,i_std,s_std,post_changes,S_Count[idx_cache],beta);
break;
case (2):
if(!W_shared[idx_cache]) return;
if((W_shared[idx_cache]!=S_shared[idx_cache])&&(W_shared[idx_cache]!=N_shared[idx_cache]))
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,W_shared[idx_cache],J_i,logdet_Sigma_i,i_std,s_std,post_changes,W_Count[idx_cache],beta);
case(3):
if(!E_shared[idx_cache]) return;
if((E_shared[idx_cache]!=W_shared[idx_cache])&&(E_shared[idx_cache]!=S_shared[idx_cache])&&(E_shared[idx_cache]!=N_shared[idx_cache]))
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,E_shared[idx_cache],J_i,logdet_Sigma_i,i_std,s_std,post_changes,E_Count[idx_cache],beta);
break;
}
return;
}
*/
__global__ void update_seg_subset(
float* img, int* seg, int* seg_potts_label, bool* border,
superpixel_params* sp_params,
const float3 J_i, const float logdet_Sigma_i,
bool cal_cov, float i_std, int s_std,
const int nPts,const int nSuperpixels,
const int xdim, const int ydim,
const int xmod3, const int ymod3, const float beta_potts_term, post_changes_helper* post_changes)
{
int label_check;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
// idx = idx_img;
int seg_idx = idx;
if (seg_idx>=nPts) return;
int x = seg_idx % xdim;
if (x % 2 != xmod3) return;
int y = seg_idx / xdim;
if (y % 2 != ymod3) return;
if (border[seg_idx]==0) return;
// strides of 2*2
//float beta = 4;
//printf("(%d, %d) - %d, %d, %d \n", x,y , idx_cache,threadIdx.x );
const bool x_greater_than_1 = (x>1);
const bool y_greater_than_1 = (y>1);
const bool x_smaller_than_xdim_minus_1 = x<(xdim-1);
const bool y_smaller_than_ydim_minus_1 = y<(ydim-1);
if ((!x_greater_than_1)||(!y_greater_than_1)||(!x_smaller_than_xdim_minus_1)||(!y_smaller_than_ydim_minus_1)) return;
/*if(sp_params[ seg[seg_idx]].count==1)
{
seg[seg_idx]=seg[seg_idx-1];
return;
}*/
//int C = seg[seg_idx]; // center
// N = S = W = E = OUT_OF_BOUNDS_LABEL; // init to out-of-bounds
bool nbrs[9];
//float potts_term[4];
//potts_term[0] = potts_term[1] = potts_term[2] = potts_term[3] = 0;
bool isNvalid = 0 ;
bool isSvalid = 0;
bool isEvalid = 0;
bool isWvalid = 0 ;
float beta = beta_potts_term;
//printf("Beta: %f", beta);
int count_diff_nbrs_N=0;
int count_diff_nbrs_S=0;
int count_diff_nbrs_E=0;
int count_diff_nbrs_W=0;
//NW =N = NE =W = E = SW = S = SE=5 ;
// init
float2 res_max;
res_max.x = -9999;
//post_changes[seg_idx].post[4] = -9999;
int NW =__ldg(&seg[seg_idx-xdim-1]);
int N = __ldg(&seg[seg_idx-xdim]);
int NE = __ldg(&seg[seg_idx-xdim+1]);
int W = __ldg(&seg[seg_idx-1]);
int E = __ldg(&seg[seg_idx+1]);
int SW = __ldg(&seg[seg_idx+xdim-1]);
int S = __ldg(&seg[seg_idx+xdim]);
int SE =__ldg(&seg[seg_idx+xdim+1]);
//N :
set_nbrs(NW, N, NE, W, E, SW, S, SE,N, nbrs);
count_diff_nbrs_N = ischangbale_by_nbrs(nbrs);
isNvalid = nbrs[8];
//potts_term[0] = calc_potts(beta,count_diff_nbrs_N);
if(!isNvalid) return;
//W :
set_nbrs(NW, N, NE, W, E, SW, S, SE,W, nbrs);
count_diff_nbrs_W = ischangbale_by_nbrs(nbrs);
isWvalid = nbrs[8];
if(!isWvalid) return;
// potts_term[1] = calc_potts(beta,count_diff_nbrs_W);
//S :
set_nbrs(NW, N, NE, W, E, SW, S, SE,S, nbrs);
count_diff_nbrs_S = ischangbale_by_nbrs(nbrs);
isSvalid = nbrs[8];
if(!isSvalid) return;
//potts_term[2] = calc_potts(beta,count_diff_nbrs_S);
//E:
set_nbrs(NW, N, NE, W, E, SW, S, SE,E, nbrs);
// check 8 nbrs and save result if valid to change to the last place of array
// return how many nbrs different for potts term calculation
count_diff_nbrs_E = ischangbale_by_nbrs(nbrs);
isEvalid = nbrs[8];
if(!isEvalid) return;
//potts_term[3] = calc_potts(beta,count_diff_nbrs_E);
//N_Prev_shared[idx_cache] = (int(isNvalid))+(int(isWvalid))+(int(isSvalid))+(int(isEvalid));
label_check = N;
res_max =cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,count_diff_nbrs_N,beta,res_max);
label_check = S;
if(label_check!=N)
res_max = cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,count_diff_nbrs_S,beta,res_max);
label_check = W;
if((label_check!=S)&&(label_check!=N))
res_max = cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,count_diff_nbrs_W,beta,res_max);
label_check = E;
if((label_check!=W)&&(label_check!=S)&&(label_check!=N))
res_max= cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,count_diff_nbrs_E,beta,res_max);
seg[seg_idx] = res_max.y;
return;
}
/*
__global__ void update_seg_subset(
float* img, int* seg, int* seg_potts_label, bool* border,
superpixel_params* sp_params,
const float3 J_i, const float logdet_Sigma_i,
bool cal_cov, float i_std, int s_std,
const int nPts,const int nSuperpixels,
const int xdim, const int ydim,
const int xmod3, const int ymod3, const float beta_potts_term, post_changes_helper* post_changes)
{
int label_check;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
// idx = idx_img;
if (idx >= 4*nPts) return;
int seg_idx = idx/4;
int idx_inside = idx%4;
int x = seg_idx % xdim;
if (x % 2 != xmod3) return;
int y = seg_idx / xdim;
if (y % 2 != ymod3) return;
if (border[seg_idx]==0) return;
// strides of 2*2
int idx_cache = threadIdx.x/4;
float beta = 0;
//printf("(%d, %d) - %d, %d, %d \n", x,y , idx_cache,threadIdx.x );
const bool x_greater_than_1 = (x>1);
const bool y_greater_than_1 = (y>1);
const bool x_smaller_than_xdim_minus_1 = x<(xdim-1);
const bool y_smaller_than_ydim_minus_1 = y<(ydim-1);
if ((!x_greater_than_1)||(!y_greater_than_1)||(!x_smaller_than_xdim_minus_1)||(!y_smaller_than_ydim_minus_1)) return;
__shared__ int N_shared[THREADS_PER_BLOCK/4];
__shared__ int S_shared[THREADS_PER_BLOCK/4];
__shared__ int E_shared[THREADS_PER_BLOCK/4];
__shared__ int W_shared[THREADS_PER_BLOCK/4];
__shared__ int N_Prev_shared[THREADS_PER_BLOCK/4];
__shared__ int N_Count[THREADS_PER_BLOCK/4];
__shared__ int S_Count[THREADS_PER_BLOCK/4];
__shared__ int E_Count[THREADS_PER_BLOCK/4];
__shared__ int W_Count[THREADS_PER_BLOCK/4];
if(idx_inside==0)
{
//int C = seg[seg_idx]; // center
int N,S,E,W, NW, NE , SW, SE; // north, south, east,west
// N = S = W = E = OUT_OF_BOUNDS_LABEL; // init to out-of-bounds
bool nbrs[9];
//float potts_term[4];
//potts_term[0] = potts_term[1] = potts_term[2] = potts_term[3] = 0;
bool isNvalid = 0 ;
bool isSvalid = 0;
bool isEvalid = 0;
bool isWvalid = 0 ;
float beta = beta_potts_term;
int count_diff_nbrs_N=0;
int count_diff_nbrs_S=0;
int count_diff_nbrs_E=0;
int count_diff_nbrs_W=0;
//NW =N = NE =W = E = SW = S = SE=5 ;
// init
post_changes[seg_idx].post[4] = -9999;
NW =__ldg(&seg[seg_idx-xdim-1]);
N = __ldg(&seg[seg_idx-xdim]);
NE = __ldg(&seg[seg_idx-xdim+1]);
W = __ldg(&seg[seg_idx-1]);
E = __ldg(&seg[seg_idx+1]);
SW = __ldg(&seg[seg_idx+xdim-1]);
S = __ldg(&seg[seg_idx+xdim]);
SE =__ldg(&seg[seg_idx+xdim+1]);
//N :
set_nbrs(NW, N, NE, W, E, SW, S, SE,N, nbrs);
count_diff_nbrs_N = ischangbale_by_nbrs(nbrs);
isNvalid = nbrs[8];
//potts_term[0] = calc_potts(beta,count_diff_nbrs_N);
//W :
set_nbrs(NW, N, NE, W, E, SW, S, SE,W, nbrs);
count_diff_nbrs_W = ischangbale_by_nbrs(nbrs);
isWvalid = nbrs[8];
// potts_term[1] = calc_potts(beta,count_diff_nbrs_W);
//S :
set_nbrs(NW, N, NE, W, E, SW, S, SE,S, nbrs);
count_diff_nbrs_S = ischangbale_by_nbrs(nbrs);
isSvalid = nbrs[8];
//potts_term[2] = calc_potts(beta,count_diff_nbrs_S);
//E:
set_nbrs(NW, N, NE, W, E, SW, S, SE,E, nbrs);
// check 8 nbrs and save result if valid to change to the last place of array
// return how many nbrs different for potts term calculation
count_diff_nbrs_E = ischangbale_by_nbrs(nbrs);
isEvalid = nbrs[8];
//potts_term[3] = calc_potts(beta,count_diff_nbrs_E);
if(!isNvalid) N = 0;
if(!isWvalid) W = 0;
if(!isSvalid) S = 0;
if(!isEvalid) E = 0;
N_shared[idx_cache] = N;
W_shared[idx_cache] = W;
E_shared[idx_cache] = E;
S_shared[idx_cache] = S;
N_Count[idx_cache] = count_diff_nbrs_N;
S_Count[idx_cache] = count_diff_nbrs_S ;
E_Count[idx_cache] = count_diff_nbrs_E;
W_Count[idx_cache] = count_diff_nbrs_W;
if((!N)||(!W)||(!E)||(!S)) N_Prev_shared[idx_cache] = 1;
else N_Prev_shared[idx_cache] = 0;
//N_Prev_shared[idx_cache] = (int(isNvalid))+(int(isWvalid))+(int(isSvalid))+(int(isEvalid));
}
__syncthreads();
if (N_Prev_shared[idx_cache]) return; //BuG for less than 4
switch (idx_inside)
{
case (0):
label_check = N_shared[idx_cache];
if(!label_check) return;
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,N_Count[idx_cache],beta);
break;
case (1):
label_check = S_shared[idx_cache];
if(!label_check) return;
if(label_check!=N_shared[idx_cache])
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,S_Count[idx_cache],beta);
break;
case (2):
label_check = W_shared[idx_cache];
if(!label_check) return;
if((label_check==S_shared[idx_cache])||(label_check==N_shared[idx_cache])) return;
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,W_Count[idx_cache],beta);
break;
case(3):
label_check = E_shared[idx_cache];
if(!label_check) return;
if((label_check==W_shared[idx_cache])||(label_check==S_shared[idx_cache])||(label_check==N_shared[idx_cache])) return;
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,E_Count[idx_cache],beta);
break;
}
return;
}
*/
| c8bdb7ea993d396f8dbf4a812c7c800c0ab047d3.cu | #ifndef OUT_OF_BOUNDS_LABEL
#define OUT_OF_BOUNDS_LABEL -1
#endif
#ifndef BAD_TOPOLOGY_LABEL
#define BAD_TOPOLOGY_LABEL -2
#endif
#ifndef NUM_OF_CHANNELS
#define NUM_OF_CHANNELS 3
#endif
#ifndef USE_COUNTS
#define USE_COUNTS 1
#endif
#ifndef OUT_OF_BOUNDS_LABEL
#define OUT_OF_BOUNDS_LABEL -1
#endif
#define THREADS_PER_BLOCK 512
#include "update_seg.h"
#include "sp.h"
#include <stdio.h>
#ifndef WIN32
#include <unistd.h>
#endif
/*
__device__ static float atomicMaxFloat(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
*/
__device__ __forceinline__ float atomicMaxFloat (float * addr, float value) {
float old;
old = (value >= 0) ? __int_as_float(atomicMax((int *)addr, __float_as_int(value))) :
__uint_as_float(atomicMin((unsigned int *)addr, __float_as_uint(value)));
return old;
}
__host__ void CudaFindBorderPixels(const int* seg, bool* border, const int nPixels, const int xdim, const int ydim, const int single_border){
int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) );
dim3 ThreadPerBlock(THREADS_PER_BLOCK,1);
dim3 BlockPerGrid(num_block,1);
find_border_pixels<<<BlockPerGrid,ThreadPerBlock>>>(seg,border,nPixels, xdim, ydim, single_border);
}
__host__ void CudaFindBorderPixels_end(const int* seg, bool* border, const int nPixels, const int xdim, const int ydim, const int single_border){
int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) );
dim3 ThreadPerBlock(THREADS_PER_BLOCK,1);
dim3 BlockPerGrid(num_block,1);
find_border_pixels_end<<<BlockPerGrid,ThreadPerBlock>>>(seg,border,nPixels, xdim, ydim, single_border);
}
__global__ void find_border_pixels(const int* seg, bool* border, const int nPixels, const int xdim, const int ydim, const int single_border){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx>=nPixels) return;
//border[idx]=0; // init
int x = idx % xdim;
int y = idx / xdim;
int C = __ldg(&seg[idx]); // center
int N,S,E,W; // north, south, east,west
/*N=S=W=E=OUT_OF_BOUNDS_LABEL; // init
if (y>1){
N = seg[idx-xdim]; // above
}
if (x>1){
W = seg[idx-1]; // left
}
if (y<ydim-1){
S = seg[idx+xdim]; // below
}
if (x<xdim-1){
E = seg[idx+1]; // right
}
// If the nbr is different from the central pixel and is not out-of-bounds,
// then it is a border pixel.
if ( (N>=0 && C!=N) || (S>=0 && C!=S) || (E>=0 && C!=E) || (W>=0 && C!=W) ){
border[idx]=1;
}
*/
if ((y<1)||(x<1)||(y>=ydim-1)||(x>=xdim-1))
{
border[idx] = 1;
return;
}
N = __ldg(&seg[idx-xdim]); // above
W = __ldg(&seg[idx-1]); // left
S = __ldg(&seg[idx+xdim]); // below
E = __ldg(&seg[idx+1]); // right
// If the nbr is different from the central pixel and is not out-of-bounds,
// then it is a border pixel.
if ((C!=N) || (C!=S) || (C!=E) || (C!=W) ){
border[idx]=1;
}
return;
}
__global__ void find_border_pixels_end(const int* seg, bool* border, const int nPixels, const int xdim, const int ydim, const int single_border){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx>=nPixels) return;
border[idx]=0; // init
int x = idx % xdim;
int y = idx / xdim;
int C = seg[idx]; // center
int N,S,E,W; // north, south, east,west
N=S=W=E=OUT_OF_BOUNDS_LABEL; // init
if (y>1){
N = seg[idx-xdim]; // above
}
if (x>1){
W = seg[idx-1]; // left
}
if (y<ydim-1){
S = seg[idx+xdim]; // below
}
if (x<xdim-1){
E = seg[idx+1]; // right
}
// If the nbr is different from the central pixel and is not out-of-bounds,
// then it is a border pixel.
if ( (N>=0 && C!=N) || (S>=0 && C!=S) || (E>=0 && C!=E) || (W>=0 && C!=W) ){
if (N>=0 && C>N) border[idx]=1;
if (S>=0 && C>S) border[idx]=1;
if (E>=0 && C>E) border[idx]=1;
if (W>=0 && C>W) border[idx]=1;
}
return;
}
__global__ void cal_posterior( float* img, int* seg, bool* border, superpixel_params* sp_params, float3 J_i, float logdet_Sigma_i, float i_std, int s_std, int* changes, int nPts , int xdim,post_changes_helper* post_changes)
{
/* __shared__ float mu_i_x[720];
__shared__ float mu_i_y[720];
__shared__ float mu_i_z[720];
__shared__ int mu_s_x[720];
__shared__ int mu_s_y[720];
if(threadIdx.x<720)
{
mu_i_x[720] = sp_params[threadIdx.x].mu_i.x;
mu_i_y[720] = sp_params[threadIdx.x].mu_i.y;
mu_i_z[720] = sp_params[threadIdx.x].mu_i.z;
mu_s_x[720] = sp_params[threadIdx.x].mu_s.x;
mu_s_y[720] = sp_params[threadIdx.x].mu_s.y;
}
__syncthreads();
int idx_res = threadIdx.x + blockIdx.x*blockDim.x;
int idx_inside = idx_res%4;
int idx = idx_res/4;
if (idx_res>=4*nPts) return;
int post = post_changes[idx].skip_post[idx_inside];
int seg_idx = post_changes[idx].changes[idx_inside];
if(post || ! seg_idx) return;
float* imgC = img + idx * 3;
int x = idx % xdim;
int y = idx / xdim;
float res = -1000; // some large negative number
const float x0 = imgC[0]-mu_i_x[seg_idx];
const float x1 = imgC[1]-mu_i_y[seg_idx];
const float x2 = imgC[2]-mu_i_z[seg_idx];
const int d0 = x - mu_s_x[seg_idx];
const int d1 = y - mu_s_y[seg_idx];
//color component
const float J_i_x = J_i.x;
const float J_i_y = J_i.y;
const float J_i_z = J_i.z;
const float sigma_s_x = sp_params[seg_idx].sigma_s.x;
const float sigma_s_y = sp_params[seg_idx].sigma_s.y;
const float sigma_s_z = sp_params[seg_idx].sigma_s.z;
const float logdet_sigma_s = sp_params[seg_idx].logdet_Sigma_s;
res = res - (x0*x0*J_i_x + x1*x1*J_i_y + x2*x2*J_i_z); //res = -calc_squared_mahal_3d(imgC,mu_i,J_i);
res = res -logdet_Sigma_i;
//space component
res = res - d0*d0*sigma_s_x;
res = res - d1*d1*sigma_s_z;
res = res - 2*d0*d1*sigma_s_y; // res -= calc_squared_mahal_2d(pt,mu_s,J_s);
res = res - logdet_sigma_s;
//post_changes[idx].post[idx_inside] = res;
if (res > atomicMaxFloat(&post_changes[idx].post[4],res))
seg[idx] = seg_idx;
*/
int idx_res = threadIdx.x + blockIdx.x*blockDim.x;
int idx_inside = idx_res%4;
int idx = idx_res/4;
if (idx_res>=4*nPts) return;
if (border[idx]==0) return;
int post = post_changes[idx].skip_post[idx_inside];
int seg_idx = post_changes[idx].changes[idx_inside];
if(post || ! seg_idx) return;
float res = -1000; // some large negative number
float* imgC = img + idx * 3;
int x = idx % xdim;
int y = idx / xdim;
//if (idx>154064)
// printf("%d ,%d , %d\n",idx_inside, idx, seg_idx);
const float x0 = imgC[0]-sp_params[seg_idx].mu_i.x;
const float x1 = imgC[1]-sp_params[seg_idx].mu_i.y;
const float x2 = imgC[2]-sp_params[seg_idx].mu_i.z;
const int d0 = x - sp_params[seg_idx].mu_s.x;
const int d1 = y - sp_params[seg_idx].mu_s.y;
//color component
const float J_i_x = J_i.x;
const float J_i_y = J_i.y;
const float J_i_z = J_i.z;
const float sigma_s_x = sp_params[seg_idx].sigma_s.x;
const float sigma_s_y = sp_params[seg_idx].sigma_s.y;
const float sigma_s_z = sp_params[seg_idx].sigma_s.z;
const float logdet_sigma_s = sp_params[seg_idx].logdet_Sigma_s;
res = res - (x0*x0*J_i_x + x1*x1*J_i_y + x2*x2*J_i_z); //res = -calc_squared_mahal_3d(imgC,mu_i,J_i);
res = res -logdet_Sigma_i;
//space component
res = res - d0*d0*sigma_s_x;
res = res - d1*d1*sigma_s_z;
res = res - 2*d0*d1*sigma_s_y; // res -= calc_squared_mahal_2d(pt,mu_s,J_s);
res = res - logdet_sigma_s;
if (res > atomicMaxFloat(&post_changes[idx].post[4],res))
seg[idx] = seg_idx;
//res += potts_res;
return;
}
__global__ void change_seg(int* seg, int* changes, int nPts ,post_changes_helper* post_changes)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx>=nPts) return;
//if((!post_changes[idx].changes[0])||(!post_changes[idx].changes[1])||(!post_changes[idx].changes[2])||(!post_changes[idx].changes[3])) return;
float res_max = -99999;
int final_idx = seg[idx];
for (int i=0; i<4 ; i++)
{
int idx_change = post_changes[idx].changes[i];
if(idx_change)
{
float val = post_changes[idx].post[i];
if(res_max < val)
{
res_max = val;
final_idx = idx_change;
}
}
}
seg[idx] = final_idx;
/*
if (res_max < post_changes[idx].post[0]){
res_max= post_changes[idx].post[0];
seg[idx]= post_changes[idx].changes[0];
}
if (res_max < post_changes[idx].post[1]){
res_max= post_changes[idx].post[1];
seg[idx]= post_changes[idx].changes[1];
}
if (res_max < post_changes[idx].post[2]){
res_max= post_changes[idx].post[2];
seg[idx]= post_changes[idx].changes[2];
}
if (res_max < post_changes[idx].post[3]){
res_max= post_changes[idx].post[3];
seg[idx]= post_changes[idx].changes[3];
}
//seg[idx] =arg_max;
*/
return;
}
__host__ void update_seg(float* img, int* seg, int* seg_potts_label ,bool* border,
superpixel_params* sp_params,
const float3 J_i, const float logdet_Sigma_i,
bool cal_cov, float i_std, int s_std,
int nInnerIters,
const int nPixels, const int nSPs, int nSPs_buffer, int xdim, int ydim, float beta_potts_term, post_changes_helper* post_changes){
int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) );
int num_block2 = ceil( double(nPixels*4) / double(THREADS_PER_BLOCK) );
dim3 ThreadPerBlock(THREADS_PER_BLOCK,1);
dim3 BlockPerGrid(num_block,1);
dim3 BlockPerGrid2(num_block2,1);
dim3 blockSize(8, 8);
dim3 gridSize((int)ceil((float)xdim*2 / (float)blockSize.x), (int)ceil((float)ydim*2 / (float)blockSize.y));
int single_border = 0 ;
cudaMemset(post_changes, 0, nPixels*sizeof(post_changes_helper));
for (int iter = 0 ; iter < nInnerIters; iter++){
// strides of 2*2
cudaMemset(border, 0, nPixels*sizeof(bool));
find_border_pixels<<<BlockPerGrid,ThreadPerBlock>>>(seg, border, nPixels, xdim, ydim, single_border);
for (int xmod3 = 0 ; xmod3 <2; xmod3++){
for (int ymod3 = 0; ymod3 <2; ymod3++){
//find the border pixels
//find_border_pixels<<<BlockPerGrid,ThreadPerBlock>>>(seg, border, nPixels, xdim, ydim, single_border);
//update_seg_subset<<<BlockPerGrid2,ThreadPerBlock>>>(img, seg, seg_potts_label,border, sp_params, J_i, logdet_Sigma_i, cal_cov, i_std, s_std, nPixels, nSPs,xdim, ydim, xmod3, ymod3, beta_potts_term,post_changes);
//update_seg_subset<<<gridSize,blockSize>>>(img, seg, seg_potts_label,border, sp_params, J_i, logdet_Sigma_i, cal_cov, i_std, s_std, nPixels, nSPs,xdim, ydim, xmod3, ymod3, beta_potts_term,post_changes);
update_seg_subset<<<BlockPerGrid,ThreadPerBlock>>>(img, seg, seg_potts_label,border, sp_params, J_i, logdet_Sigma_i, cal_cov, i_std, s_std, nPixels, nSPs,xdim, ydim, xmod3, ymod3, beta_potts_term,post_changes);
//cal_posterior<<<BlockPerGrid2,ThreadPerBlock>>>(img, seg, border, sp_params, J_i, logdet_Sigma_i, i_std, s_std, 0, nPixels ,xdim, post_changes);
//change_seg<<<BlockPerGrid,ThreadPerBlock>>>(seg, 0, nPixels ,post_changes);
}
}
}
cudaMemset(border, 0, nPixels*sizeof(bool));
find_border_pixels<<<BlockPerGrid,ThreadPerBlock>>>(seg, border, nPixels, xdim, ydim, single_border);
}
/*
* Update the superpixel labels for pixels
* that are on the boundary of the superpixels
* and on the (xmod3, ymod3) position of 3*3 block
*/
/*
__global__ void update_seg_subset(
float* img, int* seg, int* seg_potts_label, bool* border,
superpixel_params* sp_params,
const float3 J_i, const float logdet_Sigma_i,
bool cal_cov, float i_std, int s_std,
const int nPts,const int nSuperpixels,
const int xdim, const int ydim,
const int xmod3, const int ymod3, const float beta_potts_term, post_changes_helper* post_changes)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
// idx = idx_img;
if (idx >= 4*nPts) return;
int seg_idx = idx/4;
int idx_inside = idx%4;
if (border[seg_idx]==0) return;
// strides of 2*2
int x = seg_idx % xdim;
if (x % 2 != xmod3) return;
int y = seg_idx / xdim;
if (y % 2 != ymod3) return;
int idx_cache = threadIdx.x/4;
float beta = 0;
//printf("(%d, %d) - %d, %d, %d \n", x,y , idx_cache,threadIdx.x );
const bool x_greater_than_1 = (x>1);
const bool y_greater_than_1 = (y>1);
const bool x_smaller_than_xdim_minus_1 = x<(xdim-1);
const bool y_smaller_than_ydim_minus_1 = y<(ydim-1);
if ((!x_greater_than_1)||(!y_greater_than_1)||(!x_smaller_than_xdim_minus_1)||(!y_smaller_than_ydim_minus_1)) return;
__shared__ int N_shared[THREADS_PER_BLOCK/4];
__shared__ int S_shared[THREADS_PER_BLOCK/4];
__shared__ int E_shared[THREADS_PER_BLOCK/4];
__shared__ int W_shared[THREADS_PER_BLOCK/4];
__shared__ int SW_shared[THREADS_PER_BLOCK/4];
__shared__ int SE_shared[THREADS_PER_BLOCK/4];
__shared__ int N_Prev_shared[THREADS_PER_BLOCK/4];
__shared__ int N_Count[THREADS_PER_BLOCK/4];
__shared__ int S_Count[THREADS_PER_BLOCK/4];
__shared__ int E_Count[THREADS_PER_BLOCK/4];
__shared__ int W_Count[THREADS_PER_BLOCK/4];
//int C = seg[seg_idx]; // center
int N,S,E,W, NW, NE , SW, SE; // north, south, east,west
// N = S = W = E = OUT_OF_BOUNDS_LABEL; // init to out-of-bounds
bool nbrs[9];
//float potts_term[4];
//potts_term[0] = potts_term[1] = potts_term[2] = potts_term[3] = 0;
bool isNvalid = 0 ;
bool isSvalid = 0;
bool isEvalid = 0;
bool isWvalid = 0 ;
int count_diff_nbrs_N=0;
int count_diff_nbrs_S=0;
int count_diff_nbrs_E=0;
int count_diff_nbrs_W=0;
if(idx_inside==0)
{
//NW =N = NE =W = E = SW = S = SE=5 ;
// init
post_changes[seg_idx].post[4] = -9999;
NW =__ldg(&seg[seg_idx-xdim-1]);
N = __ldg(&seg[seg_idx-xdim]);
NE = __ldg(&seg[seg_idx-xdim+1]);
}
if(idx_inside==1)
{
W = __ldg(&seg[seg_idx-1]);
E = __ldg(&seg[seg_idx+1]);
W_shared[idx_cache] = W;
E_shared[idx_cache] = E;
}
if(idx_inside==2)
{
SW = __ldg(&seg[seg_idx+xdim-1]);
S = __ldg(&seg[seg_idx+xdim]);
SE =__ldg(&seg[seg_idx+xdim+1]);
SW_shared[idx_cache] = SW;
SE_shared[idx_cache] = SE;
S_shared[idx_cache] = S;
}
__syncthreads();
if(idx_inside==0)
{
W = W_shared[idx_cache] ;
E = E_shared[idx_cache] ;
SW = SW_shared[idx_cache] ;
SE = SE_shared[idx_cache] ;
S = S_shared[idx_cache];
//N :
set_nbrs(NW, N, NE, W, E, SW, S, SE,N, nbrs);
count_diff_nbrs_N = ischangbale_by_nbrs(nbrs);
isNvalid = nbrs[8];
//potts_term[0] = calc_potts(beta,count_diff_nbrs_N);
//W :
set_nbrs(NW, N, NE, W, E, SW, S, SE,W, nbrs);
count_diff_nbrs_W = ischangbale_by_nbrs(nbrs);
isWvalid = nbrs[8];
// potts_term[1] = calc_potts(beta,count_diff_nbrs_W);
//S :
set_nbrs(NW, N, NE, W, E, SW, S, SE,S, nbrs);
count_diff_nbrs_S = ischangbale_by_nbrs(nbrs);
isSvalid = nbrs[8];
//potts_term[2] = calc_potts(beta,count_diff_nbrs_S);
//E:
set_nbrs(NW, N, NE, W, E, SW, S, SE,E, nbrs);
// check 8 nbrs and save result if valid to change to the last place of array
// return how many nbrs different for potts term calculation
count_diff_nbrs_E = ischangbale_by_nbrs(nbrs);
isEvalid = nbrs[8];
//potts_term[3] = calc_potts(beta,count_diff_nbrs_E);
if(!isNvalid) N = 0;
if(!isWvalid) W = 0;
if(!isSvalid) S = 0;
if(!isEvalid) E = 0;
N_Count[idx_cache] = count_diff_nbrs_N;
S_Count[idx_cache] = count_diff_nbrs_S ;
E_Count[idx_cache] = count_diff_nbrs_E;
W_Count[idx_cache] = count_diff_nbrs_W;
N_Prev_shared[idx_cache] = (int(isNvalid))+(int(isWvalid))+(int(isSvalid))+(int(isEvalid));
}
__syncthreads();
if (N_Prev_shared[idx_cache]<4) return; //BuG for less than 4
switch (idx_inside)
{
case (0):
if(!N_shared[idx_cache]) return;
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,N_shared[idx_cache],J_i,logdet_Sigma_i,i_std,s_std,post_changes,N_Count[idx_cache],beta);
break;
case (1):
if(!S_shared[idx_cache]) return;
if(S_shared[idx_cache]!=N_shared[idx_cache])
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,S_shared[idx_cache],J_i,logdet_Sigma_i,i_std,s_std,post_changes,S_Count[idx_cache],beta);
break;
case (2):
if(!W_shared[idx_cache]) return;
if((W_shared[idx_cache]!=S_shared[idx_cache])&&(W_shared[idx_cache]!=N_shared[idx_cache]))
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,W_shared[idx_cache],J_i,logdet_Sigma_i,i_std,s_std,post_changes,W_Count[idx_cache],beta);
case(3):
if(!E_shared[idx_cache]) return;
if((E_shared[idx_cache]!=W_shared[idx_cache])&&(E_shared[idx_cache]!=S_shared[idx_cache])&&(E_shared[idx_cache]!=N_shared[idx_cache]))
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,E_shared[idx_cache],J_i,logdet_Sigma_i,i_std,s_std,post_changes,E_Count[idx_cache],beta);
break;
}
return;
}
*/
__global__ void update_seg_subset(
float* img, int* seg, int* seg_potts_label, bool* border,
superpixel_params* sp_params,
const float3 J_i, const float logdet_Sigma_i,
bool cal_cov, float i_std, int s_std,
const int nPts,const int nSuperpixels,
const int xdim, const int ydim,
const int xmod3, const int ymod3, const float beta_potts_term, post_changes_helper* post_changes)
{
int label_check;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
// idx = idx_img;
int seg_idx = idx;
if (seg_idx>=nPts) return;
int x = seg_idx % xdim;
if (x % 2 != xmod3) return;
int y = seg_idx / xdim;
if (y % 2 != ymod3) return;
if (border[seg_idx]==0) return;
// strides of 2*2
//float beta = 4;
//printf("(%d, %d) - %d, %d, %d \n", x,y , idx_cache,threadIdx.x );
const bool x_greater_than_1 = (x>1);
const bool y_greater_than_1 = (y>1);
const bool x_smaller_than_xdim_minus_1 = x<(xdim-1);
const bool y_smaller_than_ydim_minus_1 = y<(ydim-1);
if ((!x_greater_than_1)||(!y_greater_than_1)||(!x_smaller_than_xdim_minus_1)||(!y_smaller_than_ydim_minus_1)) return;
/*if(sp_params[ seg[seg_idx]].count==1)
{
seg[seg_idx]=seg[seg_idx-1];
return;
}*/
//int C = seg[seg_idx]; // center
// N = S = W = E = OUT_OF_BOUNDS_LABEL; // init to out-of-bounds
bool nbrs[9];
//float potts_term[4];
//potts_term[0] = potts_term[1] = potts_term[2] = potts_term[3] = 0;
bool isNvalid = 0 ;
bool isSvalid = 0;
bool isEvalid = 0;
bool isWvalid = 0 ;
float beta = beta_potts_term;
//printf("Beta: %f", beta);
int count_diff_nbrs_N=0;
int count_diff_nbrs_S=0;
int count_diff_nbrs_E=0;
int count_diff_nbrs_W=0;
//NW =N = NE =W = E = SW = S = SE=5 ;
// init
float2 res_max;
res_max.x = -9999;
//post_changes[seg_idx].post[4] = -9999;
int NW =__ldg(&seg[seg_idx-xdim-1]);
int N = __ldg(&seg[seg_idx-xdim]);
int NE = __ldg(&seg[seg_idx-xdim+1]);
int W = __ldg(&seg[seg_idx-1]);
int E = __ldg(&seg[seg_idx+1]);
int SW = __ldg(&seg[seg_idx+xdim-1]);
int S = __ldg(&seg[seg_idx+xdim]);
int SE =__ldg(&seg[seg_idx+xdim+1]);
//N :
set_nbrs(NW, N, NE, W, E, SW, S, SE,N, nbrs);
count_diff_nbrs_N = ischangbale_by_nbrs(nbrs);
isNvalid = nbrs[8];
//potts_term[0] = calc_potts(beta,count_diff_nbrs_N);
if(!isNvalid) return;
//W :
set_nbrs(NW, N, NE, W, E, SW, S, SE,W, nbrs);
count_diff_nbrs_W = ischangbale_by_nbrs(nbrs);
isWvalid = nbrs[8];
if(!isWvalid) return;
// potts_term[1] = calc_potts(beta,count_diff_nbrs_W);
//S :
set_nbrs(NW, N, NE, W, E, SW, S, SE,S, nbrs);
count_diff_nbrs_S = ischangbale_by_nbrs(nbrs);
isSvalid = nbrs[8];
if(!isSvalid) return;
//potts_term[2] = calc_potts(beta,count_diff_nbrs_S);
//E:
set_nbrs(NW, N, NE, W, E, SW, S, SE,E, nbrs);
// check 8 nbrs and save result if valid to change to the last place of array
// return how many nbrs different for potts term calculation
count_diff_nbrs_E = ischangbale_by_nbrs(nbrs);
isEvalid = nbrs[8];
if(!isEvalid) return;
//potts_term[3] = calc_potts(beta,count_diff_nbrs_E);
//N_Prev_shared[idx_cache] = (int(isNvalid))+(int(isWvalid))+(int(isSvalid))+(int(isEvalid));
label_check = N;
res_max =cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,count_diff_nbrs_N,beta,res_max);
label_check = S;
if(label_check!=N)
res_max = cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,count_diff_nbrs_S,beta,res_max);
label_check = W;
if((label_check!=S)&&(label_check!=N))
res_max = cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,count_diff_nbrs_W,beta,res_max);
label_check = E;
if((label_check!=W)&&(label_check!=S)&&(label_check!=N))
res_max= cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,count_diff_nbrs_E,beta,res_max);
seg[seg_idx] = res_max.y;
return;
}
/*
__global__ void update_seg_subset(
float* img, int* seg, int* seg_potts_label, bool* border,
superpixel_params* sp_params,
const float3 J_i, const float logdet_Sigma_i,
bool cal_cov, float i_std, int s_std,
const int nPts,const int nSuperpixels,
const int xdim, const int ydim,
const int xmod3, const int ymod3, const float beta_potts_term, post_changes_helper* post_changes)
{
int label_check;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
// idx = idx_img;
if (idx >= 4*nPts) return;
int seg_idx = idx/4;
int idx_inside = idx%4;
int x = seg_idx % xdim;
if (x % 2 != xmod3) return;
int y = seg_idx / xdim;
if (y % 2 != ymod3) return;
if (border[seg_idx]==0) return;
// strides of 2*2
int idx_cache = threadIdx.x/4;
float beta = 0;
//printf("(%d, %d) - %d, %d, %d \n", x,y , idx_cache,threadIdx.x );
const bool x_greater_than_1 = (x>1);
const bool y_greater_than_1 = (y>1);
const bool x_smaller_than_xdim_minus_1 = x<(xdim-1);
const bool y_smaller_than_ydim_minus_1 = y<(ydim-1);
if ((!x_greater_than_1)||(!y_greater_than_1)||(!x_smaller_than_xdim_minus_1)||(!y_smaller_than_ydim_minus_1)) return;
__shared__ int N_shared[THREADS_PER_BLOCK/4];
__shared__ int S_shared[THREADS_PER_BLOCK/4];
__shared__ int E_shared[THREADS_PER_BLOCK/4];
__shared__ int W_shared[THREADS_PER_BLOCK/4];
__shared__ int N_Prev_shared[THREADS_PER_BLOCK/4];
__shared__ int N_Count[THREADS_PER_BLOCK/4];
__shared__ int S_Count[THREADS_PER_BLOCK/4];
__shared__ int E_Count[THREADS_PER_BLOCK/4];
__shared__ int W_Count[THREADS_PER_BLOCK/4];
if(idx_inside==0)
{
//int C = seg[seg_idx]; // center
int N,S,E,W, NW, NE , SW, SE; // north, south, east,west
// N = S = W = E = OUT_OF_BOUNDS_LABEL; // init to out-of-bounds
bool nbrs[9];
//float potts_term[4];
//potts_term[0] = potts_term[1] = potts_term[2] = potts_term[3] = 0;
bool isNvalid = 0 ;
bool isSvalid = 0;
bool isEvalid = 0;
bool isWvalid = 0 ;
float beta = beta_potts_term;
int count_diff_nbrs_N=0;
int count_diff_nbrs_S=0;
int count_diff_nbrs_E=0;
int count_diff_nbrs_W=0;
//NW =N = NE =W = E = SW = S = SE=5 ;
// init
post_changes[seg_idx].post[4] = -9999;
NW =__ldg(&seg[seg_idx-xdim-1]);
N = __ldg(&seg[seg_idx-xdim]);
NE = __ldg(&seg[seg_idx-xdim+1]);
W = __ldg(&seg[seg_idx-1]);
E = __ldg(&seg[seg_idx+1]);
SW = __ldg(&seg[seg_idx+xdim-1]);
S = __ldg(&seg[seg_idx+xdim]);
SE =__ldg(&seg[seg_idx+xdim+1]);
//N :
set_nbrs(NW, N, NE, W, E, SW, S, SE,N, nbrs);
count_diff_nbrs_N = ischangbale_by_nbrs(nbrs);
isNvalid = nbrs[8];
//potts_term[0] = calc_potts(beta,count_diff_nbrs_N);
//W :
set_nbrs(NW, N, NE, W, E, SW, S, SE,W, nbrs);
count_diff_nbrs_W = ischangbale_by_nbrs(nbrs);
isWvalid = nbrs[8];
// potts_term[1] = calc_potts(beta,count_diff_nbrs_W);
//S :
set_nbrs(NW, N, NE, W, E, SW, S, SE,S, nbrs);
count_diff_nbrs_S = ischangbale_by_nbrs(nbrs);
isSvalid = nbrs[8];
//potts_term[2] = calc_potts(beta,count_diff_nbrs_S);
//E:
set_nbrs(NW, N, NE, W, E, SW, S, SE,E, nbrs);
// check 8 nbrs and save result if valid to change to the last place of array
// return how many nbrs different for potts term calculation
count_diff_nbrs_E = ischangbale_by_nbrs(nbrs);
isEvalid = nbrs[8];
//potts_term[3] = calc_potts(beta,count_diff_nbrs_E);
if(!isNvalid) N = 0;
if(!isWvalid) W = 0;
if(!isSvalid) S = 0;
if(!isEvalid) E = 0;
N_shared[idx_cache] = N;
W_shared[idx_cache] = W;
E_shared[idx_cache] = E;
S_shared[idx_cache] = S;
N_Count[idx_cache] = count_diff_nbrs_N;
S_Count[idx_cache] = count_diff_nbrs_S ;
E_Count[idx_cache] = count_diff_nbrs_E;
W_Count[idx_cache] = count_diff_nbrs_W;
if((!N)||(!W)||(!E)||(!S)) N_Prev_shared[idx_cache] = 1;
else N_Prev_shared[idx_cache] = 0;
//N_Prev_shared[idx_cache] = (int(isNvalid))+(int(isWvalid))+(int(isSvalid))+(int(isEvalid));
}
__syncthreads();
if (N_Prev_shared[idx_cache]) return; //BuG for less than 4
switch (idx_inside)
{
case (0):
label_check = N_shared[idx_cache];
if(!label_check) return;
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,N_Count[idx_cache],beta);
break;
case (1):
label_check = S_shared[idx_cache];
if(!label_check) return;
if(label_check!=N_shared[idx_cache])
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,S_Count[idx_cache],beta);
break;
case (2):
label_check = W_shared[idx_cache];
if(!label_check) return;
if((label_check==S_shared[idx_cache])||(label_check==N_shared[idx_cache])) return;
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,W_Count[idx_cache],beta);
break;
case(3):
label_check = E_shared[idx_cache];
if(!label_check) return;
if((label_check==W_shared[idx_cache])||(label_check==S_shared[idx_cache])||(label_check==N_shared[idx_cache])) return;
cal_posterior_new(img,seg,x,y,sp_params,seg_idx,label_check,J_i,logdet_Sigma_i,i_std,s_std,post_changes,E_Count[idx_cache],beta);
break;
}
return;
}
*/
|
87171933af5ec85f3c318a9126687ce2d0640b42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2013-2015, Gregory P. Meyer
University of Illinois Board of Trustees
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <dip/common/error.h>
#include <dip/common/types.h>
#define BLOCK_WIDTH 16
namespace dip {
__global__ void ComputeVertices(int width, int height, float fx, float fy,
float cx, float cy, const Depth *depth,
Vertices vertices) {
// Get Block and Thread Id
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Calculate Row & Column
int col = tx + bx * BLOCK_WIDTH;
int row = ty + by * BLOCK_WIDTH;
// Compute Vertices
if ((col < width) && (row < height)) {
int i = col + row * width;
Depth depth_value = depth[i];
if (depth_value > 0) {
vertices.x[i] = depth_value * ((col - cx) / fx);
vertices.y[i] = depth_value * ((row - cy) / fy);
vertices.z[i] = depth_value;
}
else {
vertices.x[i] = 0.0f;
vertices.y[i] = 0.0f;
vertices.z[i] = 0.0f;
}
}
}
__global__ void ComputeNormals(int width, int height, Vertices vertices,
Normals normals) {
// Allocate Shared Memory
__shared__ Vertex vs[BLOCK_WIDTH][BLOCK_WIDTH];
// Get Block and Thread Id
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Calculate Row & Column
int col = tx + bx * BLOCK_WIDTH;
int row = ty + by * BLOCK_WIDTH;
// Cooperative Load of the Tile
if ((col < width) && (row < height)) {
vs[ty][tx].x = vertices.x[col + row * width];
vs[ty][tx].y = vertices.y[col + row * width];
vs[ty][tx].z = vertices.z[col + row * width];
}
else {
vs[ty][tx].x = 0.0f;
vs[ty][tx].y = 0.0f;
vs[ty][tx].z = 0.0f;
}
// Sync Threads in Block
__syncthreads();
// Compute Normals
if ((col < width) && (row < height)) {
int i = col + row * width;
// Load Center Vertex
Vertex center;
center.x = vs[ty][tx].x;
center.y = vs[ty][tx].y;
center.z = vs[ty][tx].z;
// Load Neighboring Vertices
Vertex west, north;
if ((col - 1) >= 0) {
if ((tx - 1) >= 0) {
west.x = vs[ty][tx - 1].x;
west.y = vs[ty][tx - 1].y;
west.z = vs[ty][tx - 1].z;
}
else {
west.x = vertices.x[i - 1];
west.y = vertices.y[i - 1];
west.z = vertices.z[i - 1];
}
}
else {
west.x = 0.0f;
west.y = 0.0f;
west.z = 0.0f;
}
if ((row - 1) >= 0) {
if ((ty - 1) >= 0) {
north.x = vs[ty - 1][tx].x;
north.y = vs[ty - 1][tx].y;
north.z = vs[ty - 1][tx].z;
}
else {
north.x = vertices.x[i - width];
north.y = vertices.y[i - width];
north.z = vertices.z[i - width];
}
}
else {
north.x = 0.0f;
north.y = 0.0f;
north.z = 0.0f;
}
if((center.z > 0.0f) && (west.z > 0.0f) && (north.z > 0.0f)) {
// Compute Vectors
Vector left, up;
left.x = west.x - center.x;
left.y = west.y - center.y;
left.z = west.z - center.z;
up.x = north.x - center.x;
up.y = north.y - center.y;
up.z = north.z - center.z;
// Perform Cross Product
Vector normal;
normal.x = left.y * up.z - up.y * left.z;
normal.y = up.x * left.z - left.x * up.z;
normal.z = left.x * up.y - up.x * left.y;
// Normalize
float inorm = rsqrt(normal.x * normal.x + normal.y * normal.y +
normal.z * normal.z);
normals.x[i] = normal.x * inorm;
normals.y[i] = normal.y * inorm;
normals.z[i] = normal.z * inorm;
}
else {
normals.x[i] = 0.0f;
normals.y[i] = 0.0f;
normals.z[i] = 0.0f;
}
}
}
void BackProjectionKernel(int width, int height, float fx, float fy,
float cx, float cy, const Depth *depth,
Vertices vertices, Normals normals) {
// Launch Back Projection Kernel
int grid_width = (width + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH;
int grid_height = (height + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH;
dim3 grid_dim(grid_width, grid_height, 1);
dim3 block_dim(BLOCK_WIDTH, BLOCK_WIDTH, 1);
hipLaunchKernelGGL(( ComputeVertices), dim3(grid_dim), dim3(block_dim), 0, 0, width, height, fx, fy, cx, cy, depth,
vertices);
CUDA_ERROR_CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( ComputeNormals), dim3(grid_dim), dim3(block_dim), 0, 0, width, height, vertices, normals);
CUDA_ERROR_CHECK(hipDeviceSynchronize());
}
} // namespace dip
| 87171933af5ec85f3c318a9126687ce2d0640b42.cu | /*
Copyright (c) 2013-2015, Gregory P. Meyer
University of Illinois Board of Trustees
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <dip/common/error.h>
#include <dip/common/types.h>
#define BLOCK_WIDTH 16
namespace dip {
__global__ void ComputeVertices(int width, int height, float fx, float fy,
float cx, float cy, const Depth *depth,
Vertices vertices) {
// Get Block and Thread Id
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Calculate Row & Column
int col = tx + bx * BLOCK_WIDTH;
int row = ty + by * BLOCK_WIDTH;
// Compute Vertices
if ((col < width) && (row < height)) {
int i = col + row * width;
Depth depth_value = depth[i];
if (depth_value > 0) {
vertices.x[i] = depth_value * ((col - cx) / fx);
vertices.y[i] = depth_value * ((row - cy) / fy);
vertices.z[i] = depth_value;
}
else {
vertices.x[i] = 0.0f;
vertices.y[i] = 0.0f;
vertices.z[i] = 0.0f;
}
}
}
__global__ void ComputeNormals(int width, int height, Vertices vertices,
Normals normals) {
// Allocate Shared Memory
__shared__ Vertex vs[BLOCK_WIDTH][BLOCK_WIDTH];
// Get Block and Thread Id
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Calculate Row & Column
int col = tx + bx * BLOCK_WIDTH;
int row = ty + by * BLOCK_WIDTH;
// Cooperative Load of the Tile
if ((col < width) && (row < height)) {
vs[ty][tx].x = vertices.x[col + row * width];
vs[ty][tx].y = vertices.y[col + row * width];
vs[ty][tx].z = vertices.z[col + row * width];
}
else {
vs[ty][tx].x = 0.0f;
vs[ty][tx].y = 0.0f;
vs[ty][tx].z = 0.0f;
}
// Sync Threads in Block
__syncthreads();
// Compute Normals
if ((col < width) && (row < height)) {
int i = col + row * width;
// Load Center Vertex
Vertex center;
center.x = vs[ty][tx].x;
center.y = vs[ty][tx].y;
center.z = vs[ty][tx].z;
// Load Neighboring Vertices
Vertex west, north;
if ((col - 1) >= 0) {
if ((tx - 1) >= 0) {
west.x = vs[ty][tx - 1].x;
west.y = vs[ty][tx - 1].y;
west.z = vs[ty][tx - 1].z;
}
else {
west.x = vertices.x[i - 1];
west.y = vertices.y[i - 1];
west.z = vertices.z[i - 1];
}
}
else {
west.x = 0.0f;
west.y = 0.0f;
west.z = 0.0f;
}
if ((row - 1) >= 0) {
if ((ty - 1) >= 0) {
north.x = vs[ty - 1][tx].x;
north.y = vs[ty - 1][tx].y;
north.z = vs[ty - 1][tx].z;
}
else {
north.x = vertices.x[i - width];
north.y = vertices.y[i - width];
north.z = vertices.z[i - width];
}
}
else {
north.x = 0.0f;
north.y = 0.0f;
north.z = 0.0f;
}
if((center.z > 0.0f) && (west.z > 0.0f) && (north.z > 0.0f)) {
// Compute Vectors
Vector left, up;
left.x = west.x - center.x;
left.y = west.y - center.y;
left.z = west.z - center.z;
up.x = north.x - center.x;
up.y = north.y - center.y;
up.z = north.z - center.z;
// Perform Cross Product
Vector normal;
normal.x = left.y * up.z - up.y * left.z;
normal.y = up.x * left.z - left.x * up.z;
normal.z = left.x * up.y - up.x * left.y;
// Normalize
float inorm = rsqrt(normal.x * normal.x + normal.y * normal.y +
normal.z * normal.z);
normals.x[i] = normal.x * inorm;
normals.y[i] = normal.y * inorm;
normals.z[i] = normal.z * inorm;
}
else {
normals.x[i] = 0.0f;
normals.y[i] = 0.0f;
normals.z[i] = 0.0f;
}
}
}
void BackProjectionKernel(int width, int height, float fx, float fy,
float cx, float cy, const Depth *depth,
Vertices vertices, Normals normals) {
// Launch Back Projection Kernel
int grid_width = (width + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH;
int grid_height = (height + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH;
dim3 grid_dim(grid_width, grid_height, 1);
dim3 block_dim(BLOCK_WIDTH, BLOCK_WIDTH, 1);
ComputeVertices<<<grid_dim, block_dim>>>(width, height, fx, fy, cx, cy, depth,
vertices);
CUDA_ERROR_CHECK(cudaDeviceSynchronize());
ComputeNormals<<<grid_dim, block_dim>>>(width, height, vertices, normals);
CUDA_ERROR_CHECK(cudaDeviceSynchronize());
}
} // namespace dip
|
67c7699104a16e4540a770f114c6baacbaa38e07.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include "errors.hpp"
#define INT_PER_LINE 2
#define NFPGAS 48
#define NCHAN_COARSE 336
#define NCHAN_FINE_IN 32
#define NCHAN_FINE_OUT 27
#define NACCUMULATE 128
#define NPOL 2
#define NSAMPS 4
#define NSAMPS_SUMMED 2
#define NCHAN_SUM 16
#define NSAMP_PER_PACKET 128
#define NCHAN_PER_PACKET 7
__global__ void UnpackKernel(int2 *__restrict__ in, hipfftComplex *__restrict__ out) {
int skip = 0;
__shared__ int2 accblock[896];
int chan = 0;
int time = 0;
int line = 0;
hipfftComplex cpol;
int polint;
int outskip = 0;
for (int iacc = 0; iacc < NACCUMULATE; ++iacc) {
// NOTE: This is skipping whole words as in will be cast to int2
// skip = iacc * NCHAN_COARSE * NSAMP_PER_PACKET + blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET;
skip = blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NCHAN_PER_PACKET * NSAMP_PER_PACKET;
for (int ichunk = 0; ichunk < 7; ++ichunk) {
line = ichunk * blockDim.x + threadIdx.x;
chan = line % 7;
time = line / 7;
accblock[chan * NSAMP_PER_PACKET + time] = in[skip + line];
}
__syncthreads();
skip = NCHAN_COARSE * NSAMP_PER_PACKET * NACCUMULATE;
outskip = blockIdx.x * 7 * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET;
for (chan = 0; chan < NCHAN_PER_PACKET; ++chan) {
polint = accblock[chan * NSAMP_PER_PACKET + threadIdx.x].y;
cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) ));
cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) ));
out[outskip + threadIdx.x] = cpol;
polint = accblock[chan * NSAMP_PER_PACKET + threadIdx.x].x;
cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) ));
cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) ));
out[skip + outskip + threadIdx.x] = cpol;
outskip += NSAMP_PER_PACKET * NACCUMULATE;
}
}
}
__global__ void DetectScrunchKernel(
hipComplex* __restrict__ in, // PFTF <-- FFT output order
float* __restrict__ out // TF <-- Filterbank order
)
{
/**
* This block is going to do 2 timesamples for all coarse channels.
* The fine channels are dealt with by the lanes
*/
// gridDim.x should be Nacc * 128 / (32 * nsamps_to_add) == 256
__shared__ float freq_sum_buffer[NCHAN_FINE_OUT*NCHAN_COARSE]; // 9072 elements
int warp_idx = threadIdx.x >> 0x5;
int lane_idx = threadIdx.x & 0x1f;
int pol_offset = NCHAN_COARSE * NSAMPS * NCHAN_FINE_IN * NACCUMULATE;
int coarse_chan_offet = NACCUMULATE * NCHAN_FINE_IN * NSAMPS;
int block_offset = NCHAN_FINE_IN * NSAMPS_SUMMED * blockIdx.x;
int nwarps_per_block = blockDim.x/warpSize;
//Drop first 3 fine channels and last 2 fine channels
if ((lane_idx > 2) & (lane_idx < 30))
{
// This warp
// first sample in inner dimension = (32 * 2 * blockIdx.x)
// This warp will loop over coarse channels in steps of NWARPS per block coarse_chan_idx (0,335)
for (int coarse_chan_idx = warp_idx; coarse_chan_idx < NCHAN_COARSE; coarse_chan_idx += nwarps_per_block)
{
float real = 0.0f;
float imag = 0.0f;
int base_offset = coarse_chan_offet * coarse_chan_idx + block_offset + lane_idx;
for (int pol_idx=0; pol_idx<NPOL; ++pol_idx)
{
int offset = base_offset + pol_offset * pol_idx;
for (int sample_idx=0; sample_idx<NSAMPS_SUMMED; ++sample_idx)
{
//Get first channel
// IDX = NCHAN_COARSE * NSAMPS * NCHAN_FINE_IN * NACCUMULATE * pol_idx
// + NACCUMULATE * NCHAN_FINE_IN * NSAMPS * coarse_chan_idx
// + blockIdx.x * NCHAN_FINE_IN * NSAMPS_SUMMED
// + NCHAN_FINE_IN * sample_idx
// + lane_idx;
hipComplex val = in[offset + NCHAN_FINE_IN * sample_idx];
real += val.x * val.x;
imag += val.y * val.y;
}
// 3 is the leading dead lane count
// sketchy
freq_sum_buffer[coarse_chan_idx*NCHAN_FINE_OUT + lane_idx - 3] = real + imag;
}
}
}
__syncthreads();
/**
* Here each warp will reduce 32 channels into 2 channels
* The last warp will have a problem that there will only be 16 values to process
*
*/
if (threadIdx.x < (NCHAN_FINE_OUT * NCHAN_COARSE / NCHAN_SUM))
{
float sum = 0.0;
for (int chan_idx = threadIdx.x * NCHAN_SUM; chan_idx < (threadIdx.x+1) * NCHAN_SUM; ++chan_idx)
{
sum += freq_sum_buffer[chan_idx];
}
out[NCHAN_FINE_OUT * NCHAN_COARSE / NCHAN_SUM * blockIdx.x + threadIdx.x] = sum;
}
return;
}
int main(int argc, char *argv[])
{
// unsigned short polai;
// unsigned short polaq;
//
// unsigned short polbi;
// unsigned short polbq;
size_t toread = 8 * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NFPGAS * NACCUMULATE;
unsigned char *codifarray = new unsigned char[toread];
for (int ifpga = 0; ifpga < 48; ++ifpga) {
for (int iacc = 0; iacc < NACCUMULATE; ++iacc) {
for (int isamp = 0; isamp < 128; ++isamp) {
for (int ichan = 0; ichan < 7; ++ichan) {
// polai = ((ifpga << 10) | (isamp << 2) | 0x0);
// polaq = ((ifpga << 10) | (isamp << 2) | 0x2);
// polbi = ((ifpga << 10) | (isamp << 2) | 0x1);
// polbq = ((ifpga << 10) | (isamp << 2) | 0x3);
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 0] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 1] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 2] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 3] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 4] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 5] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 6] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 7] = 0;
if((ifpga == 0) && (ichan == 0)) {
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 0] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 1] = 2;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 2] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 3] = 2;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 4] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 5] = 2;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 6] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 7] = 2;
}
}
}
}
}
unsigned char *devdata;
cudaCheckError(hipMalloc((void**)&devdata, toread * sizeof(unsigned char)));
cudaCheckError(hipMemcpy(devdata, codifarray, toread * sizeof(unsigned char), hipMemcpyHostToDevice));
hipfftComplex *unpacked;
cudaCheckError(hipMalloc((void**)&unpacked, toread / 8 * sizeof(hipfftComplex)));
int sizes[] = {32};
hipfftHandle fftplan;
cufftCheckError(hipfftPlanMany(&fftplan, 1, sizes, NULL, 1, sizes[0], NULL, 1, sizes[0], HIPFFT_C2C, 336 * NACCUMULATE * 4));
float *detected;
cudaCheckError(hipMalloc((void**)&detected, NCHAN_COARSE * NCHAN_FINE_OUT / 16 * NACCUMULATE * 128 / 32 / NSAMPS_SUMMED * sizeof(float)));
std::cout << "Running the kernels..." << std::endl;
hipLaunchKernelGGL(( UnpackKernel), dim3(48), dim3(128), 0, 0, reinterpret_cast<int2*>(devdata), unpacked);
cufftCheckError(hipfftExecC2C(fftplan, unpacked, unpacked, HIPFFT_FORWARD));
hipLaunchKernelGGL(( DetectScrunchKernel), dim3(2 * NACCUMULATE), dim3(1024), 0, 0, unpacked, detected);
cudaCheckError(hipDeviceSynchronize());
std::cout << "Copying the data back..." << std::endl;
float *dataarray = new float[NCHAN_COARSE * NCHAN_FINE_OUT / 16 * NACCUMULATE * 128 / 32 / NSAMPS_SUMMED];
cudaCheckError(hipMemcpy(dataarray, detected, NCHAN_COARSE * NCHAN_FINE_OUT / 16 * NACCUMULATE * 128 / 32 / NSAMPS_SUMMED * sizeof(float), hipMemcpyDeviceToHost));
std::ofstream outdata("detected.dat");
if (!outdata) {
std::cerr << "Could not create the output file!" << std::endl;
exit(EXIT_FAILURE);
}
for (int isamp = 0; isamp < NACCUMULATE * 128 / 32 / NSAMPS_SUMMED; ++isamp) {
for (int ichan = 0; ichan < NCHAN_COARSE * NCHAN_FINE_OUT / 16; ++ichan) {
outdata << dataarray[isamp * NCHAN_COARSE * NCHAN_FINE_OUT / 16 + ichan] << " ";
}
outdata << std::endl;
}
outdata.close();
delete [] dataarray;
cudaCheckError(hipFree(detected));
cudaCheckError(hipFree(devdata));
delete [] codifarray;
return 0;
}
| 67c7699104a16e4540a770f114c6baacbaa38e07.cu | #include <cstdlib>
#include <fstream>
#include <iostream>
#include <cuda.h>
#include <cufft.h>
#include "errors.hpp"
#define INT_PER_LINE 2
#define NFPGAS 48
#define NCHAN_COARSE 336
#define NCHAN_FINE_IN 32
#define NCHAN_FINE_OUT 27
#define NACCUMULATE 128
#define NPOL 2
#define NSAMPS 4
#define NSAMPS_SUMMED 2
#define NCHAN_SUM 16
#define NSAMP_PER_PACKET 128
#define NCHAN_PER_PACKET 7
__global__ void UnpackKernel(int2 *__restrict__ in, cufftComplex *__restrict__ out) {
int skip = 0;
__shared__ int2 accblock[896];
int chan = 0;
int time = 0;
int line = 0;
cufftComplex cpol;
int polint;
int outskip = 0;
for (int iacc = 0; iacc < NACCUMULATE; ++iacc) {
// NOTE: This is skipping whole words as in will be cast to int2
// skip = iacc * NCHAN_COARSE * NSAMP_PER_PACKET + blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET;
skip = blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NCHAN_PER_PACKET * NSAMP_PER_PACKET;
for (int ichunk = 0; ichunk < 7; ++ichunk) {
line = ichunk * blockDim.x + threadIdx.x;
chan = line % 7;
time = line / 7;
accblock[chan * NSAMP_PER_PACKET + time] = in[skip + line];
}
__syncthreads();
skip = NCHAN_COARSE * NSAMP_PER_PACKET * NACCUMULATE;
outskip = blockIdx.x * 7 * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET;
for (chan = 0; chan < NCHAN_PER_PACKET; ++chan) {
polint = accblock[chan * NSAMP_PER_PACKET + threadIdx.x].y;
cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) ));
cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) ));
out[outskip + threadIdx.x] = cpol;
polint = accblock[chan * NSAMP_PER_PACKET + threadIdx.x].x;
cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) ));
cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) ));
out[skip + outskip + threadIdx.x] = cpol;
outskip += NSAMP_PER_PACKET * NACCUMULATE;
}
}
}
__global__ void DetectScrunchKernel(
cuComplex* __restrict__ in, // PFTF <-- FFT output order
float* __restrict__ out // TF <-- Filterbank order
)
{
/**
* This block is going to do 2 timesamples for all coarse channels.
* The fine channels are dealt with by the lanes
*/
// gridDim.x should be Nacc * 128 / (32 * nsamps_to_add) == 256
__shared__ float freq_sum_buffer[NCHAN_FINE_OUT*NCHAN_COARSE]; // 9072 elements
int warp_idx = threadIdx.x >> 0x5;
int lane_idx = threadIdx.x & 0x1f;
int pol_offset = NCHAN_COARSE * NSAMPS * NCHAN_FINE_IN * NACCUMULATE;
int coarse_chan_offet = NACCUMULATE * NCHAN_FINE_IN * NSAMPS;
int block_offset = NCHAN_FINE_IN * NSAMPS_SUMMED * blockIdx.x;
int nwarps_per_block = blockDim.x/warpSize;
//Drop first 3 fine channels and last 2 fine channels
if ((lane_idx > 2) & (lane_idx < 30))
{
// This warp
// first sample in inner dimension = (32 * 2 * blockIdx.x)
// This warp will loop over coarse channels in steps of NWARPS per block coarse_chan_idx (0,335)
for (int coarse_chan_idx = warp_idx; coarse_chan_idx < NCHAN_COARSE; coarse_chan_idx += nwarps_per_block)
{
float real = 0.0f;
float imag = 0.0f;
int base_offset = coarse_chan_offet * coarse_chan_idx + block_offset + lane_idx;
for (int pol_idx=0; pol_idx<NPOL; ++pol_idx)
{
int offset = base_offset + pol_offset * pol_idx;
for (int sample_idx=0; sample_idx<NSAMPS_SUMMED; ++sample_idx)
{
//Get first channel
// IDX = NCHAN_COARSE * NSAMPS * NCHAN_FINE_IN * NACCUMULATE * pol_idx
// + NACCUMULATE * NCHAN_FINE_IN * NSAMPS * coarse_chan_idx
// + blockIdx.x * NCHAN_FINE_IN * NSAMPS_SUMMED
// + NCHAN_FINE_IN * sample_idx
// + lane_idx;
cuComplex val = in[offset + NCHAN_FINE_IN * sample_idx];
real += val.x * val.x;
imag += val.y * val.y;
}
// 3 is the leading dead lane count
// sketchy
freq_sum_buffer[coarse_chan_idx*NCHAN_FINE_OUT + lane_idx - 3] = real + imag;
}
}
}
__syncthreads();
/**
* Here each warp will reduce 32 channels into 2 channels
* The last warp will have a problem that there will only be 16 values to process
*
*/
if (threadIdx.x < (NCHAN_FINE_OUT * NCHAN_COARSE / NCHAN_SUM))
{
float sum = 0.0;
for (int chan_idx = threadIdx.x * NCHAN_SUM; chan_idx < (threadIdx.x+1) * NCHAN_SUM; ++chan_idx)
{
sum += freq_sum_buffer[chan_idx];
}
out[NCHAN_FINE_OUT * NCHAN_COARSE / NCHAN_SUM * blockIdx.x + threadIdx.x] = sum;
}
return;
}
int main(int argc, char *argv[])
{
// unsigned short polai;
// unsigned short polaq;
//
// unsigned short polbi;
// unsigned short polbq;
size_t toread = 8 * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NFPGAS * NACCUMULATE;
unsigned char *codifarray = new unsigned char[toread];
for (int ifpga = 0; ifpga < 48; ++ifpga) {
for (int iacc = 0; iacc < NACCUMULATE; ++iacc) {
for (int isamp = 0; isamp < 128; ++isamp) {
for (int ichan = 0; ichan < 7; ++ichan) {
// polai = ((ifpga << 10) | (isamp << 2) | 0x0);
// polaq = ((ifpga << 10) | (isamp << 2) | 0x2);
// polbi = ((ifpga << 10) | (isamp << 2) | 0x1);
// polbq = ((ifpga << 10) | (isamp << 2) | 0x3);
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 0] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 1] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 2] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 3] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 4] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 5] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 6] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 7] = 0;
if((ifpga == 0) && (ichan == 0)) {
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 0] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 1] = 2;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 2] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 3] = 2;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 4] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 5] = 2;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 6] = 0;
codifarray[(ifpga * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET * NCHAN_PER_PACKET + isamp * NCHAN_PER_PACKET + ichan) * 8 + 7] = 2;
}
}
}
}
}
unsigned char *devdata;
cudaCheckError(cudaMalloc((void**)&devdata, toread * sizeof(unsigned char)));
cudaCheckError(cudaMemcpy(devdata, codifarray, toread * sizeof(unsigned char), cudaMemcpyHostToDevice));
cufftComplex *unpacked;
cudaCheckError(cudaMalloc((void**)&unpacked, toread / 8 * sizeof(cufftComplex)));
int sizes[] = {32};
cufftHandle fftplan;
cufftCheckError(cufftPlanMany(&fftplan, 1, sizes, NULL, 1, sizes[0], NULL, 1, sizes[0], CUFFT_C2C, 336 * NACCUMULATE * 4));
float *detected;
cudaCheckError(cudaMalloc((void**)&detected, NCHAN_COARSE * NCHAN_FINE_OUT / 16 * NACCUMULATE * 128 / 32 / NSAMPS_SUMMED * sizeof(float)));
std::cout << "Running the kernels..." << std::endl;
UnpackKernel<<<48, 128, 0>>>(reinterpret_cast<int2*>(devdata), unpacked);
cufftCheckError(cufftExecC2C(fftplan, unpacked, unpacked, CUFFT_FORWARD));
DetectScrunchKernel<<<2 * NACCUMULATE, 1024, 0>>>(unpacked, detected);
cudaCheckError(cudaDeviceSynchronize());
std::cout << "Copying the data back..." << std::endl;
float *dataarray = new float[NCHAN_COARSE * NCHAN_FINE_OUT / 16 * NACCUMULATE * 128 / 32 / NSAMPS_SUMMED];
cudaCheckError(cudaMemcpy(dataarray, detected, NCHAN_COARSE * NCHAN_FINE_OUT / 16 * NACCUMULATE * 128 / 32 / NSAMPS_SUMMED * sizeof(float), cudaMemcpyDeviceToHost));
std::ofstream outdata("detected.dat");
if (!outdata) {
std::cerr << "Could not create the output file!" << std::endl;
exit(EXIT_FAILURE);
}
for (int isamp = 0; isamp < NACCUMULATE * 128 / 32 / NSAMPS_SUMMED; ++isamp) {
for (int ichan = 0; ichan < NCHAN_COARSE * NCHAN_FINE_OUT / 16; ++ichan) {
outdata << dataarray[isamp * NCHAN_COARSE * NCHAN_FINE_OUT / 16 + ichan] << " ";
}
outdata << std::endl;
}
outdata.close();
delete [] dataarray;
cudaCheckError(cudaFree(detected));
cudaCheckError(cudaFree(devdata));
delete [] codifarray;
return 0;
}
|
763e962edda1119a3f751fff583713080e42f013.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/AccumulateType.h"
#include "ATen/hip/HIPApplyUtils.cuh"
#include "ATen/hip/detail/IndexUtils.cuh"
#include "ATen/hip/detail/TensorInfo.cuh"
#include "hiprand/hiprand_kernel.h"
#include <THH/THHGeneral.h>
#include <THH/THHTensorRandom.h>
#include <THH/THHGenerator.hpp>
THCGenerator* THCRandom_getGenerator(THCState* state);
// philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4
// for all members of float4 to be consumed UNROLL has to be 4. Don't change!
const int UNROLL = 4;
std::pair<uint64_t, uint64_t> next_philox_seed(uint64_t increment) {
auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState());
uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment);
return std::make_pair(gen_->state.initial_seed, offset);
}
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(256,8)
#endif
__global__ void
fused_dropout_add_kernel(at::cuda::detail::TensorInfo<scalar_t, IndexType> input,
at::cuda::detail::TensorInfo<scalar_t, IndexType> input_add,
at::cuda::detail::TensorInfo<scalar_t, IndexType> ret,
at::cuda::detail::TensorInfo<uint8_t, IndexType> mask,
IndexType totalElements, accscalar_t prob, std::pair<uint64_t, uint64_t> seeds
) {
//accscalar_t pinv = accscalar_t(1)/prob;
float pinv = 1.0/(float)prob;
IndexType idx = blockIdx.x * blockDim.x*UNROLL + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
idx,
seeds.second,
&state);
float4 rand = hiprand_uniform4(&state);
scalar_t src[UNROLL];
scalar_t src_add[UNROLL];
rand.x = rand.x < prob;
rand.y = rand.y < prob;
rand.z = rand.z < prob;
rand.w = rand.w < prob;
IndexType offset = idx;
for (int ii = 0; ii < UNROLL; ii++) {
if (offset < totalElements) {
src[ii] = input.data[offset];
src_add[ii] = input_add.data[offset];
}
offset += blockDim.x;
}
offset = idx;
for (int ii = 0; ii < UNROLL; ii++) {
if (offset < totalElements) {
ret.data[offset] = src[ii]*(&rand.x)[ii]*pinv + src_add[ii];
mask.data[offset] = (uint8_t)(&rand.x)[ii];
}
offset += blockDim.x;
}
}
template<typename scalar_t, typename accscalar_t>
void masked_scale_kernel(at::Tensor& ret, const at::Tensor src, const at::Tensor mask, accscalar_t scale){
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, uint8_t>(ret, src, mask, [scale]__device__(scalar_t& ret_val, const scalar_t& src_val, const uint8_t mask_val){
ret_val = (float)mask_val * src_val * scale;
});
}
std::vector<at::Tensor>
fused_dropout_add_cuda(const at::Tensor& input, const at::Tensor& input_add, double prob){
at::Tensor ret = at::empty_like(input);
at::Tensor mask = at::empty(input.sizes(), input.options().dtype(at::kByte));
const int64_t nelem = input.numel();
const int64_t block_size = 256;
dim3 dim_block(block_size);
dim3 grid((nelem + (block_size*UNROLL) -1)/(block_size*UNROLL));
//number of times random will be generated per thread, to offset philox counter in thc random state
int64_t counter_offset = UNROLL ; //((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL;
if (at::cuda::detail::canUse32BitIndexMath(input)){
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "fused_dropout_add", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
accscalar_t proba = (accscalar_t)(prob);
auto input_info = at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(input);
auto input_add_info = at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(input_add);
auto ret_info = at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(ret);
auto mask_info = at::cuda::detail::getTensorInfo<uint8_t, unsigned int>(mask);
input_info.collapseDims();
input_add_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor
switch (input_info.dims) {
case 1:
hipLaunchKernelGGL(( fused_dropout_add_kernel<scalar_t, accscalar_t, unsigned int, 1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_info, input_add_info, ret_info, mask_info, nelem, proba, next_philox_seed(counter_offset));
break;
default:
hipLaunchKernelGGL(( fused_dropout_add_kernel<scalar_t, accscalar_t, unsigned int, -1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_info, input_add_info, ret_info, mask_info, nelem, proba, next_philox_seed(counter_offset));
}
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "fused_dropout_add", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
accscalar_t proba = (accscalar_t)(prob);
auto input_info = at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(input);
auto input_add_info = at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(input_add);
auto ret_info = at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(ret);
auto mask_info = at::cuda::detail::getTensorInfo<uint8_t, uint64_t>(mask);
input_info.collapseDims();
input_add_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor
switch (input_info.dims) {
case 1:
hipLaunchKernelGGL(( fused_dropout_add_kernel<scalar_t, accscalar_t, uint64_t, 1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_info, input_add_info, ret_info, mask_info, nelem, proba, next_philox_seed(counter_offset));
break;
default:
hipLaunchKernelGGL(( fused_dropout_add_kernel<scalar_t, accscalar_t, uint64_t, -1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_info, input_add_info, ret_info, mask_info, nelem, proba, next_philox_seed(counter_offset));
}
});
}
THCudaCheck(hipGetLastError());
return {ret, mask};
}
at::Tensor fused_dropout_add_backward_cuda(const at::Tensor& grad, const at::Tensor& mask, double scale){
at::Tensor ret = at::empty_like(grad);
AT_CHECK(mask.type().scalarType() == at::ScalarType::Byte, "mask should be torch.uint8 dtype");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.type(), "masked_scale", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
accscalar_t proba = (accscalar_t)(scale);
masked_scale_kernel<scalar_t, accscalar_t>(ret, grad, mask, proba);
});
return ret;
}
| 763e962edda1119a3f751fff583713080e42f013.cu | #include "ATen/ATen.h"
#include "ATen/AccumulateType.h"
#include "ATen/cuda/CUDAApplyUtils.cuh"
#include "ATen/cuda/detail/IndexUtils.cuh"
#include "ATen/cuda/detail/TensorInfo.cuh"
#include "curand_kernel.h"
#include <THC/THCGeneral.h>
#include <THC/THCTensorRandom.h>
#include <THC/THCGenerator.hpp>
THCGenerator* THCRandom_getGenerator(THCState* state);
// philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4
// for all members of float4 to be consumed UNROLL has to be 4. Don't change!
const int UNROLL = 4;
std::pair<uint64_t, uint64_t> next_philox_seed(uint64_t increment) {
auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState());
uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment);
return std::make_pair(gen_->state.initial_seed, offset);
}
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(256,8)
#endif
__global__ void
fused_dropout_add_kernel(at::cuda::detail::TensorInfo<scalar_t, IndexType> input,
at::cuda::detail::TensorInfo<scalar_t, IndexType> input_add,
at::cuda::detail::TensorInfo<scalar_t, IndexType> ret,
at::cuda::detail::TensorInfo<uint8_t, IndexType> mask,
IndexType totalElements, accscalar_t prob, std::pair<uint64_t, uint64_t> seeds
) {
//accscalar_t pinv = accscalar_t(1)/prob;
float pinv = 1.0/(float)prob;
IndexType idx = blockIdx.x * blockDim.x*UNROLL + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
idx,
seeds.second,
&state);
float4 rand = curand_uniform4(&state);
scalar_t src[UNROLL];
scalar_t src_add[UNROLL];
rand.x = rand.x < prob;
rand.y = rand.y < prob;
rand.z = rand.z < prob;
rand.w = rand.w < prob;
IndexType offset = idx;
for (int ii = 0; ii < UNROLL; ii++) {
if (offset < totalElements) {
src[ii] = input.data[offset];
src_add[ii] = input_add.data[offset];
}
offset += blockDim.x;
}
offset = idx;
for (int ii = 0; ii < UNROLL; ii++) {
if (offset < totalElements) {
ret.data[offset] = src[ii]*(&rand.x)[ii]*pinv + src_add[ii];
mask.data[offset] = (uint8_t)(&rand.x)[ii];
}
offset += blockDim.x;
}
}
template<typename scalar_t, typename accscalar_t>
void masked_scale_kernel(at::Tensor& ret, const at::Tensor src, const at::Tensor mask, accscalar_t scale){
at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, uint8_t>(ret, src, mask, [scale]__device__(scalar_t& ret_val, const scalar_t& src_val, const uint8_t mask_val){
ret_val = (float)mask_val * src_val * scale;
});
}
std::vector<at::Tensor>
fused_dropout_add_cuda(const at::Tensor& input, const at::Tensor& input_add, double prob){
at::Tensor ret = at::empty_like(input);
at::Tensor mask = at::empty(input.sizes(), input.options().dtype(at::kByte));
const int64_t nelem = input.numel();
const int64_t block_size = 256;
dim3 dim_block(block_size);
dim3 grid((nelem + (block_size*UNROLL) -1)/(block_size*UNROLL));
//number of times random will be generated per thread, to offset philox counter in thc random state
int64_t counter_offset = UNROLL ; //((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL;
if (at::cuda::detail::canUse32BitIndexMath(input)){
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "fused_dropout_add", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
accscalar_t proba = (accscalar_t)(prob);
auto input_info = at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(input);
auto input_add_info = at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(input_add);
auto ret_info = at::cuda::detail::getTensorInfo<scalar_t, unsigned int>(ret);
auto mask_info = at::cuda::detail::getTensorInfo<uint8_t, unsigned int>(mask);
input_info.collapseDims();
input_add_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor
switch (input_info.dims) {
case 1:
fused_dropout_add_kernel<scalar_t, accscalar_t, unsigned int, 1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(input_info, input_add_info, ret_info, mask_info, nelem, proba, next_philox_seed(counter_offset));
break;
default:
fused_dropout_add_kernel<scalar_t, accscalar_t, unsigned int, -1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(input_info, input_add_info, ret_info, mask_info, nelem, proba, next_philox_seed(counter_offset));
}
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "fused_dropout_add", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
accscalar_t proba = (accscalar_t)(prob);
auto input_info = at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(input);
auto input_add_info = at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(input_add);
auto ret_info = at::cuda::detail::getTensorInfo<scalar_t, uint64_t>(ret);
auto mask_info = at::cuda::detail::getTensorInfo<uint8_t, uint64_t>(mask);
input_info.collapseDims();
input_add_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor
switch (input_info.dims) {
case 1:
fused_dropout_add_kernel<scalar_t, accscalar_t, uint64_t, 1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(input_info, input_add_info, ret_info, mask_info, nelem, proba, next_philox_seed(counter_offset));
break;
default:
fused_dropout_add_kernel<scalar_t, accscalar_t, uint64_t, -1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(input_info, input_add_info, ret_info, mask_info, nelem, proba, next_philox_seed(counter_offset));
}
});
}
THCudaCheck(cudaGetLastError());
return {ret, mask};
}
at::Tensor fused_dropout_add_backward_cuda(const at::Tensor& grad, const at::Tensor& mask, double scale){
at::Tensor ret = at::empty_like(grad);
AT_CHECK(mask.type().scalarType() == at::ScalarType::Byte, "mask should be torch.uint8 dtype");
AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.type(), "masked_scale", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
accscalar_t proba = (accscalar_t)(scale);
masked_scale_kernel<scalar_t, accscalar_t>(ret, grad, mask, proba);
});
return ret;
}
|
f8cf71ffe018414d71e4213b7b491cf2d9b59d88.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SahDecompress.cuh"
namespace sahdecompress {
void initHash(KeyValuePair * primitiveIndirections,
uint n)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(n, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( initHash_kernel), dim3(grid), dim3(block), 0, 0, primitiveIndirections,
n);
}
void countLeaves(uint * leafLengths,
int * qelements,
int2 * nodes,
KeyValuePair * indirections,
uint * runHeads,
uint numHeads,
uint numPrimitives,
uint numNodes,
uint scanLength)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(scanLength, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( countLeaves_kernel), dim3(grid), dim3(block), 0, 0, leafLengths,
qelements,
nodes,
indirections,
runHeads,
numHeads,
numPrimitives,
numNodes,
scanLength);
}
void copyHash(KeyValuePair * dst, KeyValuePair * src,
uint n)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(n, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( copyHash_kernel), dim3(grid), dim3(block), 0, 0, dst,
src,
n);
}
void decompressIndices(uint * decompressedIndices,
uint * compressedIndices,
KeyValuePair * sorted,
uint * offset,
uint * runLength,
uint n)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(n, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( decompressIndices_kernel), dim3(grid), dim3(block), 0, 0, decompressedIndices,
compressedIndices,
sorted,
offset,
runLength,
n);
}
void decompressPrimitives(KeyValuePair * dst,
KeyValuePair * src,
int2 * nodes,
KeyValuePair * indirections,
uint* leafOffset,
uint * runHeads,
uint numHeads,
uint numPrimitives,
uint numNodes)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(numNodes, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( decompressPrimitives_kernel), dim3(grid), dim3(block), 0, 0, dst,
src,
nodes,
indirections,
leafOffset,
runHeads,
numHeads,
numPrimitives,
numNodes);
}
void writeSortedHash(KeyValuePair * dst,
KeyValuePair * src,
uint * indices,
uint n)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(n, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( writeSortedHash_kernel), dim3(grid), dim3(block), 0, 0, dst,
src,
indices,
n);
}
void rearrangeIndices(KeyValuePair * dst,
KeyValuePair * src,
uint * compressedIndices,
KeyValuePair * sorted,
uint * offset,
uint * runLength,
uint nunRuns)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(nunRuns, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( rearrangeIndices_kernel), dim3(grid), dim3(block), 0, 0, dst,
src,
compressedIndices,
sorted,
offset,
runLength,
nunRuns);
}
}
| f8cf71ffe018414d71e4213b7b491cf2d9b59d88.cu | #include "SahDecompress.cuh"
namespace sahdecompress {
void initHash(KeyValuePair * primitiveIndirections,
uint n)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(n, tpb);
dim3 grid(nblk, 1, 1);
initHash_kernel<<< grid, block>>>(primitiveIndirections,
n);
}
void countLeaves(uint * leafLengths,
int * qelements,
int2 * nodes,
KeyValuePair * indirections,
uint * runHeads,
uint numHeads,
uint numPrimitives,
uint numNodes,
uint scanLength)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(scanLength, tpb);
dim3 grid(nblk, 1, 1);
countLeaves_kernel<<< grid, block>>>(leafLengths,
qelements,
nodes,
indirections,
runHeads,
numHeads,
numPrimitives,
numNodes,
scanLength);
}
void copyHash(KeyValuePair * dst, KeyValuePair * src,
uint n)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(n, tpb);
dim3 grid(nblk, 1, 1);
copyHash_kernel<<< grid, block>>>(dst,
src,
n);
}
void decompressIndices(uint * decompressedIndices,
uint * compressedIndices,
KeyValuePair * sorted,
uint * offset,
uint * runLength,
uint n)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(n, tpb);
dim3 grid(nblk, 1, 1);
decompressIndices_kernel<<< grid, block>>>(decompressedIndices,
compressedIndices,
sorted,
offset,
runLength,
n);
}
void decompressPrimitives(KeyValuePair * dst,
KeyValuePair * src,
int2 * nodes,
KeyValuePair * indirections,
uint* leafOffset,
uint * runHeads,
uint numHeads,
uint numPrimitives,
uint numNodes)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(numNodes, tpb);
dim3 grid(nblk, 1, 1);
decompressPrimitives_kernel<<< grid, block>>>(dst,
src,
nodes,
indirections,
leafOffset,
runHeads,
numHeads,
numPrimitives,
numNodes);
}
void writeSortedHash(KeyValuePair * dst,
KeyValuePair * src,
uint * indices,
uint n)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(n, tpb);
dim3 grid(nblk, 1, 1);
writeSortedHash_kernel<<< grid, block>>>(dst,
src,
indices,
n);
}
void rearrangeIndices(KeyValuePair * dst,
KeyValuePair * src,
uint * compressedIndices,
KeyValuePair * sorted,
uint * offset,
uint * runLength,
uint nunRuns)
{
const int tpb = 512;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(nunRuns, tpb);
dim3 grid(nblk, 1, 1);
rearrangeIndices_kernel<<< grid, block>>>(dst,
src,
compressedIndices,
sorted,
offset,
runLength,
nunRuns);
}
}
|
2f0592b262f94ac2a9c93cdb2fe9d5f1bf713c25.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file bfs_app.cu
*
* @brief Gunrock breadth-first search (BFS) application
*/
#include <gunrock/app/app.cuh>
// breadth-first search includes
// #include <gunrock/app/lp/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_enactor.cuh>
// #include <gunrock/app/lp/bfs_test.cuh>
// #include <gunrock/app/lp/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_test.cuh>
namespace gunrock {
namespace app {
namespace bfs {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-ID|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run BFS tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_labels Reference labels
* @param[in] target Whether to perform the BFS
* \return hipError_t error message(s), if any
*/
template <typename GraphT, typename LabelT = typename GraphT::VertexT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
LabelT **ref_labels = NULL,
util::Location target = util::DEVICE) {
hipError_t retval = hipSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
bool quick_mode = parameters.Get<bool>("quick");
bool mark_pred = parameters.Get<bool>("mark-pred");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
util::Info info("BFS", parameters, graph); // initialize Info structure
// Allocate host-side array (for both reference and GPU-computed results)
LabelT *h_labels = new LabelT[graph.nodes];
VertexT *h_preds = (mark_pred) ? new VertexT[graph.nodes] : NULL;
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform BFS
VertexT src;
// oh so we are not doing num of runs * srcs, instead or num of runs remains the same its just that we use different sources
// that makes sense I guess when we have random sources
for (int run_num = 0; run_num < num_runs; ++run_num) {
src = srcs[run_num % num_srcs];
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
" ms, src = " + std::to_string(src) + ", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (!quick_mode && validation == "each") {
GUARD_CU(problem.Extract(h_labels, h_preds));
SizeT num_errors = app::bfs::Validate_Results(
parameters, graph, src, h_labels, h_preds,
ref_labels == NULL ? NULL : ref_labels[run_num % num_srcs], (VertexT*)NULL,
false);
}
}
cpu_timer.Start();
// Copy out results
GUARD_CU(problem.Extract(h_labels, h_preds));
if (!quick_mode && validation == "last") {
SizeT num_errors = app::bfs::Validate_Results(
parameters, graph, src, h_labels, h_preds,
ref_labels == NULL ? NULL : ref_labels[(num_runs - 1) % num_srcs]);
}
// compute running statistics
info.ComputeTraversalStats(enactor, h_labels);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_labels;
h_labels = NULL;
delete[] h_preds;
h_preds = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace bfs
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_bfs function
* @tparam GraphT Type of the graph
* @tparam LabelT Type of the labels
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] labels Return shortest hop distance from source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename LabelT = typename GraphT::VertexT>
double gunrock_bfs(gunrock::util::Parameters ¶meters, GraphT &graph,
LabelT **labels, typename GraphT::VertexT **preds = NULL) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::bfs::Problem<GraphT> ProblemT;
typedef gunrock::app::bfs::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_runs = parameters.Get<int>("num-runs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num) {
int src_num = run_num % num_srcs;
VertexT src = srcs[src_num];
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(labels[src_num], preds == NULL ? NULL : preds[src_num]);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] edge_values CSR-formatted graph input edge weights
* @param[in] num_runs Number of runs to perform SSSP
* @param[in] sources Sources to begin traverse, one for each run
* @param[in] mark_preds Whether to output predecessor info
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename LabelT = VertexT>
double bfs(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const int num_runs, VertexT *sources, const bool mark_pred,
const bool direction_optimized, const bool idempotence,
LabelT **labels, VertexT **preds = NULL) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, VertexT,
gunrock::graph::HAS_CSR |
gunrock::graph::HAS_CSC>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("bfs");
gunrock::graphio::UseParameters(parameters);
gunrock::app::bfs::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("mark-pred", mark_pred);
parameters.Set("num-runs", num_runs);
parameters.Set("direction-optimized", direction_optimized);
parameters.Set("idempotence", idempotence);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges,
gunrock::util::HOST);
graph.FromCsr(graph.csr(), gunrock::util::HOST, 0, quiet, true);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the BFS
double elapsed_time = gunrock_bfs(parameters, graph, labels, preds);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
/*
* @brief Simple C-interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] source Source to begin traverse
* @param[in] mark_preds Whether to output predecessor info
* @param[in] direction_optimized Whether to use directional optimizing BFS
* @param[in] idempotence Whether to use idempotence
* @param[out] labels Return shortest hop distances to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
double bfs(const int num_nodes, const int num_edges, const int *row_offsets,
const int *col_indices, int source, const bool mark_pred,
const bool direction_optimized, const bool idempotence,
int *distances, int *preds) {
return bfs(num_nodes, num_edges, row_offsets, col_indices, 1, &source,
mark_pred, direction_optimized, idempotence, &distances, &preds);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 2f0592b262f94ac2a9c93cdb2fe9d5f1bf713c25.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file bfs_app.cu
*
* @brief Gunrock breadth-first search (BFS) application
*/
#include <gunrock/app/app.cuh>
// breadth-first search includes
// #include <gunrock/app/lp/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_enactor.cuh>
// #include <gunrock/app/lp/bfs_test.cuh>
// #include <gunrock/app/lp/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_test.cuh>
namespace gunrock {
namespace app {
namespace bfs {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-ID|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run BFS tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_labels Reference labels
* @param[in] target Whether to perform the BFS
* \return cudaError_t error message(s), if any
*/
template <typename GraphT, typename LabelT = typename GraphT::VertexT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
LabelT **ref_labels = NULL,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
bool quick_mode = parameters.Get<bool>("quick");
bool mark_pred = parameters.Get<bool>("mark-pred");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
util::Info info("BFS", parameters, graph); // initialize Info structure
// Allocate host-side array (for both reference and GPU-computed results)
LabelT *h_labels = new LabelT[graph.nodes];
VertexT *h_preds = (mark_pred) ? new VertexT[graph.nodes] : NULL;
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform BFS
VertexT src;
// oh so we are not doing num of runs * srcs, instead or num of runs remains the same its just that we use different sources
// that makes sense I guess when we have random sources
for (int run_num = 0; run_num < num_runs; ++run_num) {
src = srcs[run_num % num_srcs];
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
" ms, src = " + std::to_string(src) + ", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (!quick_mode && validation == "each") {
GUARD_CU(problem.Extract(h_labels, h_preds));
SizeT num_errors = app::bfs::Validate_Results(
parameters, graph, src, h_labels, h_preds,
ref_labels == NULL ? NULL : ref_labels[run_num % num_srcs], (VertexT*)NULL,
false);
}
}
cpu_timer.Start();
// Copy out results
GUARD_CU(problem.Extract(h_labels, h_preds));
if (!quick_mode && validation == "last") {
SizeT num_errors = app::bfs::Validate_Results(
parameters, graph, src, h_labels, h_preds,
ref_labels == NULL ? NULL : ref_labels[(num_runs - 1) % num_srcs]);
}
// compute running statistics
info.ComputeTraversalStats(enactor, h_labels);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_labels;
h_labels = NULL;
delete[] h_preds;
h_preds = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace bfs
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_bfs function
* @tparam GraphT Type of the graph
* @tparam LabelT Type of the labels
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] labels Return shortest hop distance from source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename LabelT = typename GraphT::VertexT>
double gunrock_bfs(gunrock::util::Parameters ¶meters, GraphT &graph,
LabelT **labels, typename GraphT::VertexT **preds = NULL) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::bfs::Problem<GraphT> ProblemT;
typedef gunrock::app::bfs::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_runs = parameters.Get<int>("num-runs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num) {
int src_num = run_num % num_srcs;
VertexT src = srcs[src_num];
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(labels[src_num], preds == NULL ? NULL : preds[src_num]);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] edge_values CSR-formatted graph input edge weights
* @param[in] num_runs Number of runs to perform SSSP
* @param[in] sources Sources to begin traverse, one for each run
* @param[in] mark_preds Whether to output predecessor info
* @param[out] distances Return shortest distance to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename LabelT = VertexT>
double bfs(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const int num_runs, VertexT *sources, const bool mark_pred,
const bool direction_optimized, const bool idempotence,
LabelT **labels, VertexT **preds = NULL) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, VertexT,
gunrock::graph::HAS_CSR |
gunrock::graph::HAS_CSC>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("bfs");
gunrock::graphio::UseParameters(parameters);
gunrock::app::bfs::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("mark-pred", mark_pred);
parameters.Set("num-runs", num_runs);
parameters.Set("direction-optimized", direction_optimized);
parameters.Set("idempotence", idempotence);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges,
gunrock::util::HOST);
graph.FromCsr(graph.csr(), gunrock::util::HOST, 0, quiet, true);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the BFS
double elapsed_time = gunrock_bfs(parameters, graph, labels, preds);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
/*
* @brief Simple C-interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] source Source to begin traverse
* @param[in] mark_preds Whether to output predecessor info
* @param[in] direction_optimized Whether to use directional optimizing BFS
* @param[in] idempotence Whether to use idempotence
* @param[out] labels Return shortest hop distances to source per vertex
* @param[out] preds Return predecessors of each vertex
* \return double Return accumulated elapsed times for all runs
*/
double bfs(const int num_nodes, const int num_edges, const int *row_offsets,
const int *col_indices, int source, const bool mark_pred,
const bool direction_optimized, const bool idempotence,
int *distances, int *preds) {
return bfs(num_nodes, num_edges, row_offsets, col_indices, 1, &source,
mark_pred, direction_optimized, idempotence, &distances, &preds);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.